CombinedText stringlengths 4 3.42M |
|---|
from django.contrib import admin
from models import *
class CitiesAdmin(admin.ModelAdmin):
raw_id_fields = ['alt_names']
class ContinenteAdmin(CitiesAdmin):
ordering = ['name']
admin.site.register(Continente, ContinenteAdmin)
class CountryAdmin(CitiesAdmin):
list_display = ['name', 'code', 'code3', 'tld', 'phone', 'continent', 'area', 'population']
search_fields = ['name', 'code', 'code3', 'tld', 'phone']
admin.site.register(Country, CountryAdmin)
class RegionAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'code', 'country']
search_fields = ['name', 'name_std', 'code']
admin.site.register(Region, RegionAdmin)
class SubregionAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'code', 'region']
search_fields = ['name', 'name_std', 'code']
raw_id_fields = ['alt_names', 'region']
admin.site.register(Subregion, SubregionAdmin)
class CityAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'subregion', 'region', 'country', 'population']
search_fields = ['name_std']
raw_id_fields = ['alt_names', 'region', 'subregion']
admin.site.register(City, CityAdmin)
class DistrictAdmin(CitiesAdmin):
raw_id_fields = ['alt_names', 'city']
list_display = ['name_std', 'city']
search_fields = ['name', 'name_std']
admin.site.register(District, DistrictAdmin)
class AltNameAdmin(admin.ModelAdmin):
ordering = ['name']
list_display = ['name', 'language', 'is_preferred', 'is_short']
list_filter = ['is_preferred', 'is_short', 'language']
search_fields = ['name']
admin.site.register(AlternativeName, AltNameAdmin)
class PostalCodeAdmin(CitiesAdmin):
ordering = ['code']
list_display = ['code', 'subregion_name', 'region_name', 'country']
search_fields = ['code', 'country__name', 'region_name', 'subregion_name']
admin.site.register(PostalCode, PostalCodeAdmin)
otimizando consulta para admin de city
from django.contrib import admin
from models import *
class CitiesAdmin(admin.ModelAdmin):
raw_id_fields = ['alt_names']
class ContinenteAdmin(CitiesAdmin):
ordering = ['name']
admin.site.register(Continente, ContinenteAdmin)
class CountryAdmin(CitiesAdmin):
list_display = ['name', 'code', 'code3', 'tld', 'phone', 'continent', 'area', 'population']
search_fields = ['name', 'code', 'code3', 'tld', 'phone']
admin.site.register(Country, CountryAdmin)
class RegionAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'code', 'country']
search_fields = ['name', 'name_std', 'code']
admin.site.register(Region, RegionAdmin)
class SubregionAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'code', 'region']
search_fields = ['name', 'name_std', 'code']
raw_id_fields = ['alt_names', 'region']
admin.site.register(Subregion, SubregionAdmin)
class CityAdmin(CitiesAdmin):
ordering = ['name_std']
list_display = ['name_std', 'region', 'country']
search_fields = ['name_std']
raw_id_fields = ['alt_names', 'region', 'subregion']
admin.site.register(City, CityAdmin)
class DistrictAdmin(CitiesAdmin):
raw_id_fields = ['alt_names', 'city']
list_display = ['name_std', 'city']
search_fields = ['name', 'name_std']
admin.site.register(District, DistrictAdmin)
class AltNameAdmin(admin.ModelAdmin):
ordering = ['name']
list_display = ['name', 'language', 'is_preferred', 'is_short']
list_filter = ['is_preferred', 'is_short', 'language']
search_fields = ['name']
admin.site.register(AlternativeName, AltNameAdmin)
class PostalCodeAdmin(CitiesAdmin):
ordering = ['code']
list_display = ['code', 'subregion_name', 'region_name', 'country']
search_fields = ['code', 'country__name', 'region_name', 'subregion_name']
admin.site.register(PostalCode, PostalCodeAdmin)
|
# Orca
#
# Copyright 2010 Joanmarie Diggs.
# Copyright 2014-2015 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs." \
"Copyright (c) 2014-2015 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import re
import urllib
from orca import debug
from orca import input_event
from orca import orca
from orca import orca_state
from orca import script_utilities
from orca import settings
from orca import settings_manager
_settingsManager = settings_manager.getManager()
class Utilities(script_utilities.Utilities):
def __init__(self, script):
super().__init__(script)
self._currentAttrs = {}
self._caretContexts = {}
self._inDocumentContent = {}
self._isTextBlockElement = {}
self._isGridDescendant = {}
self._isLayoutOnly = {}
self._isMath = {}
self._mathNestingLevel = {}
self._isOffScreenLabel = {}
self._hasExplicitName = {}
self._hasNoSize = {}
self._hasLongDesc = {}
self._hasUselessCanvasDescendant = {}
self._isClickableElement = {}
self._isAnchor = {}
self._isLandmark = {}
self._isLiveRegion = {}
self._isLink = {}
self._isNonNavigablePopup = {}
self._isNonEntryTextWidget = {}
self._isUselessImage = {}
self._isParentOfNullChild = {}
self._inferredLabels = {}
self._roleDescription = {}
self._text = {}
self._tag = {}
self._treatAsDiv = {}
self._currentObjectContents = None
self._currentSentenceContents = None
self._currentLineContents = None
self._currentWordContents = None
self._currentCharacterContents = None
self._validChildRoles = {pyatspi.ROLE_LIST: [pyatspi.ROLE_LIST_ITEM]}
def _cleanupContexts(self):
toRemove = []
for key, [obj, offset] in self._caretContexts.items():
if self.isZombie(obj):
toRemove.append(key)
for key in toRemove:
self._caretContexts.pop(key, None)
def clearCachedObjects(self):
debug.println(debug.LEVEL_INFO, "WEB: cleaning up cached objects")
self._inDocumentContent = {}
self._isTextBlockElement = {}
self._isGridDescendant = {}
self._isLayoutOnly = {}
self._isMath = {}
self._mathNestingLevel = {}
self._isOffScreenLabel = {}
self._hasExplicitName = {}
self._hasNoSize = {}
self._hasLongDesc = {}
self._hasUselessCanvasDescendant = {}
self._isClickableElement = {}
self._isAnchor = {}
self._isLandmark = {}
self._isLiveRegion = {}
self._isLink = {}
self._isNonNavigablePopup = {}
self._isNonEntryTextWidget = {}
self._isUselessImage = {}
self._isParentOfNullChild = {}
self._inferredLabels = {}
self._roleDescription = {}
self._tag = {}
self._treatAsDiv = {}
self._cleanupContexts()
def clearContentCache(self):
self._currentObjectContents = None
self._currentSentenceContents = None
self._currentLineContents = None
self._currentWordContents = None
self._currentCharacterContents = None
self._currentAttrs = {}
self._text = {}
def isDocument(self, obj):
roles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB, pyatspi.ROLE_EMBEDDED]
try:
rv = obj.getRole() in roles
except:
msg = "WEB: Exception getting role for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
rv = False
return rv
def inDocumentContent(self, obj=None):
if not obj:
obj = orca_state.locusOfFocus
if self.isDocument(obj):
return True
rv = self._inDocumentContent.get(hash(obj))
if rv is not None:
return rv
document = self.getDocumentForObject(obj)
rv = document is not None
self._inDocumentContent[hash(obj)] = rv
return rv
def getDocumentForObject(self, obj):
if not obj:
return None
if self.isDocument(obj):
msg = "WEB: %s is document" % obj
debug.println(debug.LEVEL_INFO, msg)
return obj
document = pyatspi.findAncestor(obj, self.isDocument)
msg = "WEB: Document for %s is %s" % (obj, document)
debug.println(debug.LEVEL_INFO, msg)
return document
def _getDocumentsEmbeddedBy(self, frame):
isEmbeds = lambda r: r.getRelationType() == pyatspi.RELATION_EMBEDS
relations = list(filter(isEmbeds, frame.getRelationSet()))
if not relations:
return []
relation = relations[0]
targets = [relation.getTarget(i) for i in range(relation.getNTargets())]
if not targets:
return []
roles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
isDocument = lambda x: x and x.getRole() in roles
return list(filter(isDocument, targets))
def documentFrame(self, obj=None):
isShowing = lambda x: x and x.getState().contains(pyatspi.STATE_SHOWING)
try:
windows = [child for child in self._script.app]
except:
msg = "WEB: Exception getting children for app %s" % self._script.app
debug.println(debug.LEVEL_INFO, msg)
windows = []
if orca_state.activeWindow in windows:
windows = [orca_state.activeWindow]
for window in windows:
documents = self._getDocumentsEmbeddedBy(window)
documents = list(filter(isShowing, documents))
if len(documents) == 1:
return documents[0]
return self.getDocumentForObject(obj or orca_state.locusOfFocus)
def documentFrameURI(self):
documentFrame = self.documentFrame()
if documentFrame and not self.isZombie(documentFrame):
document = documentFrame.queryDocument()
return document.getAttributeValue('DocURL')
return None
def setCaretPosition(self, obj, offset):
if self._script.flatReviewContext:
self._script.toggleFlatReviewMode()
obj, offset = self.findFirstCaretContext(obj, offset)
self.setCaretContext(obj, offset, documentFrame=None)
if self._script.focusModeIsSticky():
return
try:
state = obj.getState()
except:
msg = "WEB: Exception getting state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return
orca.setLocusOfFocus(None, obj, notifyScript=False)
if state.contains(pyatspi.STATE_FOCUSABLE):
try:
obj.queryComponent().grabFocus()
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return
except:
msg = "WEB: Exception grabbing focus on %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return
text = self.queryNonEmptyText(obj)
if text:
text.setCaretOffset(offset)
if self._script.useFocusMode(obj) != self._script.inFocusMode():
self._script.togglePresentationMode(None)
obj.clearCache()
# TODO - JD: This is private.
self._script._saveFocusedObjectInfo(obj)
def getNextObjectInDocument(self, obj, documentFrame):
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_TO:
return relation.getTarget(0)
if obj == documentFrame:
obj, offset = self.getCaretContext(documentFrame)
for child in documentFrame:
if self.characterOffsetInParent(child) > offset:
return child
if obj and obj.childCount:
return obj[0]
nextObj = None
while obj and not nextObj:
index = obj.getIndexInParent() + 1
if 0 < index < obj.parent.childCount:
nextObj = obj.parent[index]
elif obj.parent != documentFrame:
obj = obj.parent
else:
break
return nextObj
def getPreviousObjectInDocument(self, obj, documentFrame):
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_FROM:
return relation.getTarget(0)
if obj == documentFrame:
obj, offset = self.getCaretContext(documentFrame)
for child in documentFrame:
if self.characterOffsetInParent(child) < offset:
return child
index = obj.getIndexInParent() - 1
if not 0 <= index < obj.parent.childCount:
obj = obj.parent
index = obj.getIndexInParent() - 1
previousObj = obj.parent[index]
while previousObj and previousObj.childCount:
previousObj = previousObj[previousObj.childCount - 1]
return previousObj
def getTopOfFile(self):
return self.findFirstCaretContext(self.documentFrame(), 0)
def getBottomOfFile(self):
obj = self.getLastObjectInDocument(self.documentFrame())
offset = 0
text = self.queryNonEmptyText(obj)
if text:
offset = text.characterCount - 1
while obj:
lastobj, lastoffset = self.nextContext(obj, offset)
if not lastobj:
break
obj, offset = lastobj, lastoffset
return [obj, offset]
def getLastObjectInDocument(self, documentFrame):
try:
lastChild = documentFrame[documentFrame.childCount - 1]
except:
lastChild = documentFrame
while lastChild:
lastObj = self.getNextObjectInDocument(lastChild, documentFrame)
if lastObj and lastObj != lastChild:
lastChild = lastObj
else:
break
if lastChild and self.doNotDescendForCaret(lastChild):
lastChild = lastChild.parent
return lastChild
def getRoleDescription(self, obj):
rv = self._roleDescription.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = attrs.get('roledescription', '')
self._roleDescription[hash(obj)] = rv
return rv
def _getTag(self, obj):
rv = self._tag.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return None
rv = attrs.get('tag')
self._tag[hash(obj)] = rv
return rv
def inFindToolbar(self, obj=None):
if not obj:
obj = orca_state.locusOfFocus
if obj and obj.parent \
and obj.parent.getRole() == pyatspi.ROLE_AUTOCOMPLETE:
return False
return super().inFindToolbar(obj)
def isEmpty(self, obj):
if not self.isTextBlockElement(obj):
return False
return self.queryNonEmptyText(obj, False) is None
def isHidden(self, obj):
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return False
return attrs.get('hidden', False)
def isTextArea(self, obj):
if self.isLink(obj):
return False
return super().isTextArea(obj)
def isReadOnlyTextArea(self, obj):
# NOTE: This method is deliberately more conservative than isTextArea.
if obj.getRole() != pyatspi.ROLE_ENTRY:
return False
state = obj.getState()
readOnly = state.contains(pyatspi.STATE_FOCUSABLE) \
and not state.contains(pyatspi.STATE_EDITABLE)
return readOnly
def setCaretOffset(self, obj, characterOffset):
self.setCaretPosition(obj, characterOffset)
self._script.updateBraille(obj)
def nextContext(self, obj=None, offset=-1, skipSpace=False):
if not obj:
obj, offset = self.getCaretContext()
nextobj, nextoffset = self.findNextCaretInOrder(obj, offset)
if (obj, offset) == (nextobj, nextoffset):
nextobj, nextoffset = self.findNextCaretInOrder(nextobj, nextoffset)
if skipSpace:
text = self.queryNonEmptyText(nextobj)
while text and text.getText(nextoffset, nextoffset + 1).isspace():
nextobj, nextoffset = self.findNextCaretInOrder(nextobj, nextoffset)
text = self.queryNonEmptyText(nextobj)
return nextobj, nextoffset
def previousContext(self, obj=None, offset=-1, skipSpace=False):
if not obj:
obj, offset = self.getCaretContext()
prevobj, prevoffset = self.findPreviousCaretInOrder(obj, offset)
if (obj, offset) == (prevobj, prevoffset):
prevobj, prevoffset = self.findPreviousCaretInOrder(prevobj, prevoffset)
if skipSpace:
text = self.queryNonEmptyText(prevobj)
while text and text.getText(prevoffset, prevoffset + 1).isspace():
prevobj, prevoffset = self.findPreviousCaretInOrder(prevobj, prevoffset)
text = self.queryNonEmptyText(prevobj)
return prevobj, prevoffset
def lastContext(self, root):
offset = 0
text = self.queryNonEmptyText(root)
if text:
offset = text.characterCount - 1
def _isInRoot(o):
return o == root or pyatspi.utils.findAncestor(o, lambda x: x == root)
obj = root
while obj:
lastobj, lastoffset = self.nextContext(obj, offset)
if not (lastobj and _isInRoot(lastobj)):
break
obj, offset = lastobj, lastoffset
return obj, offset
def contextsAreOnSameLine(self, a, b):
if a == b:
return True
aObj, aOffset = a
bObj, bOffset = b
aExtents = self.getExtents(aObj, aOffset, aOffset + 1)
bExtents = self.getExtents(bObj, bOffset, bOffset + 1)
return self.extentsAreOnSameLine(aExtents, bExtents)
@staticmethod
def extentsAreOnSameLine(a, b, pixelDelta=5):
if a == b:
return True
aX, aY, aWidth, aHeight = a
bX, bY, bWidth, bHeight = b
if aWidth == 0 and aHeight == 0:
return bY <= aY <= bY + bHeight
if bWidth == 0 and bHeight == 0:
return aY <= bY <= aY + aHeight
highestBottom = min(aY + aHeight, bY + bHeight)
lowestTop = max(aY, bY)
if lowestTop >= highestBottom:
return False
aMiddle = aY + aHeight / 2
bMiddle = bY + bHeight / 2
if abs(aMiddle - bMiddle) > pixelDelta:
return False
return True
@staticmethod
def getExtents(obj, startOffset, endOffset):
if not obj:
return [0, 0, 0, 0]
try:
text = obj.queryText()
if text.characterCount:
return list(text.getRangeExtents(startOffset, endOffset, 0))
except NotImplementedError:
pass
except:
msg = "WEB: Exception getting range extents for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
role = obj.getRole()
parentRole = obj.parent.getRole()
if role in [pyatspi.ROLE_MENU, pyatspi.ROLE_LIST_ITEM] \
and parentRole in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_LIST_BOX]:
try:
ext = obj.parent.queryComponent().getExtents(0)
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj.parent
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
except:
msg = "WEB: Exception getting extents for %s" % obj.parent
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
else:
try:
ext = obj.queryComponent().getExtents(0)
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
except:
msg = "WEB: Exception getting extents for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
return [ext.x, ext.y, ext.width, ext.height]
def expandEOCs(self, obj, startOffset=0, endOffset=-1):
if not self.inDocumentContent(obj):
return ""
text = self.queryNonEmptyText(obj)
if not text:
return ""
string = text.getText(startOffset, endOffset)
if self.EMBEDDED_OBJECT_CHARACTER in string:
# If we're not getting the full text of this object, but
# rather a substring, we need to figure out the offset of
# the first child within this substring.
childOffset = 0
for child in obj:
if self.characterOffsetInParent(child) >= startOffset:
break
childOffset += 1
toBuild = list(string)
count = toBuild.count(self.EMBEDDED_OBJECT_CHARACTER)
for i in range(count):
index = toBuild.index(self.EMBEDDED_OBJECT_CHARACTER)
try:
child = obj[i + childOffset]
except:
continue
childText = self.expandEOCs(child)
if not childText:
childText = ""
toBuild[index] = "%s " % childText
string = "".join(toBuild).strip()
return string
def substring(self, obj, startOffset, endOffset):
if not self.inDocumentContent(obj):
return super().substring(obj, startOffset, endOffset)
text = self.queryNonEmptyText(obj)
if text:
return text.getText(startOffset, endOffset)
return ""
def textAttributes(self, acc, offset, get_defaults=False):
attrsForObj = self._currentAttrs.get(hash(acc)) or {}
if offset in attrsForObj:
return attrsForObj.get(offset)
attrs = super().textAttributes(acc, offset, get_defaults)
self._currentAttrs[hash(acc)] = {offset:attrs}
return attrs
def findObjectInContents(self, obj, offset, contents):
if not obj or not contents:
return -1
offset = max(0, offset)
matches = [x for x in contents if x[0] == obj]
match = [x for x in matches if x[1] <= offset < x[2]]
if match and match[0] and match[0] in contents:
return contents.index(match[0])
return -1
def isNonEntryTextWidget(self, obj):
rv = self._isNonEntryTextWidget.get(hash(obj))
if rv is not None:
return rv
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON]
role = obj.getRole()
if role in roles:
rv = True
elif role in [pyatspi.ROLE_LIST_ITEM, pyatspi.ROLE_TABLE_CELL]:
rv = not self.isTextBlockElement(obj)
self._isNonEntryTextWidget[hash(obj)] = rv
return rv
def queryNonEmptyText(self, obj, excludeNonEntryTextWidgets=True):
if not obj:
return None
if hash(obj) in self._text:
return self._text.get(hash(obj))
try:
rv = obj.queryText()
characterCount = rv.characterCount
except:
rv = None
else:
if not characterCount:
rv = None
if not self.isLiveRegion(obj):
doNotQuery = [pyatspi.ROLE_TABLE_ROW,
pyatspi.ROLE_TOOL_BAR]
role = obj.getRole()
if rv and role in doNotQuery:
rv = None
if rv and excludeNonEntryTextWidgets and self.isNonEntryTextWidget(obj):
rv = None
if rv and (self.isHidden(obj) or self.isOffScreenLabel(obj)):
rv = None
if rv and role == pyatspi.ROLE_LINK \
and (self.hasExplicitName(obj) or self.hasUselessCanvasDescendant(obj)):
rv = None
self._text[hash(obj)] = rv
return rv
def _treatTextObjectAsWhole(self, obj):
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON]
role = obj.getRole()
if role in roles:
return True
if role == pyatspi.ROLE_TABLE_CELL and self.isFocusModeWidget(obj):
return True
return False
def __findRange(self, text, offset, start, end, boundary):
# We should not have to do any of this. Seriously. This is why
# We can't have nice things.
allText = text.getText(0, -1)
extents = list(text.getRangeExtents(offset, offset + 1, 0))
def _inThisSpan(span):
return span[0] <= offset <= span[1]
def _onThisLine(span):
rangeExtents = list(text.getRangeExtents(span[0], span[0] + 1, 0))
return self.extentsAreOnSameLine(extents, rangeExtents)
spans = []
charCount = text.characterCount
if boundary == pyatspi.TEXT_BOUNDARY_SENTENCE_START:
spans = [m.span() for m in re.finditer("\S*[^\.\?\!]+((?<!\w)[\.\?\!]+(?!\w)|\S*)", allText)]
elif boundary is not None:
spans = [m.span() for m in re.finditer("[^\n\r]+", allText)]
if not spans:
spans = [(0, charCount)]
rangeStart, rangeEnd = 0, charCount
for span in spans:
if _inThisSpan(span):
rangeStart, rangeEnd = span[0], span[1] + 1
break
string = allText[rangeStart:rangeEnd]
if string and boundary in [pyatspi.TEXT_BOUNDARY_SENTENCE_START, None]:
return string, rangeStart, rangeEnd
words = [m.span() for m in re.finditer("[^\s\ufffc]+", string)]
words = list(map(lambda x: (x[0] + rangeStart, x[1] + rangeStart), words))
if boundary == pyatspi.TEXT_BOUNDARY_WORD_START:
spans = list(filter(_inThisSpan, words))
if boundary == pyatspi.TEXT_BOUNDARY_LINE_START:
spans = list(filter(_onThisLine, words))
if spans:
rangeStart, rangeEnd = spans[0][0], spans[-1][1] + 1
string = allText[rangeStart:rangeEnd]
return string, rangeStart, rangeEnd
def _attemptBrokenTextRecovery(self):
return False
def _getTextAtOffset(self, obj, offset, boundary):
if not obj:
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '', Start: 0, End: 0. (obj is None)" % (offset, obj, boundary)
debug.println(debug.LEVEL_INFO, msg)
return '', 0, 0
text = self.queryNonEmptyText(obj)
if not text:
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '', Start: 0, End: 1. (queryNonEmptyText() returned None)" \
% (offset, obj, boundary)
debug.println(debug.LEVEL_INFO, msg)
return '', 0, 1
if boundary == pyatspi.TEXT_BOUNDARY_CHAR:
string, start, end = text.getText(offset, offset + 1), offset, offset + 1
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
if not boundary:
string, start, end = text.getText(offset, -1), offset, text.characterCount
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
if boundary == pyatspi.TEXT_BOUNDARY_SENTENCE_START \
and not obj.getState().contains(pyatspi.STATE_EDITABLE):
allText = text.getText(0, -1)
if obj.getRole() in [pyatspi.ROLE_LIST_ITEM, pyatspi.ROLE_HEADING] \
or not (re.search("\w", allText) and self.isTextBlockElement(obj)):
string, start, end = allText, 0, text.characterCount
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
offset = max(0, offset)
string, start, end = text.getTextAtOffset(offset, boundary)
# The above should be all that we need to do, but....
if not self._attemptBrokenTextRecovery():
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" Not checking for broken text." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
needSadHack = False
testString, testStart, testEnd = text.getTextAtOffset(start, boundary)
if (string, start, end) != (testString, testStart, testEnd):
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
s2 = testString.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset for %s using %s.\n" \
" For offset %i - String: '%s', Start: %i, End: %i.\n" \
" For offset %i - String: '%s', Start: %i, End: %i.\n" \
" The bug is the above results should be the same.\n" \
" This very likely needs to be fixed by the toolkit." \
% (obj, boundary, offset, s1, start, end, start, s2, testStart, testEnd)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
elif not string and 0 <= offset < text.characterCount:
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
s2 = text.getText(0, -1).replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" The bug is no text reported for a valid offset.\n" \
" Character count: %i, Full text: '%s'.\n" \
" This very likely needs to be fixed by the toolkit." \
% (offset, obj, boundary, s1, start, end, text.characterCount, s2)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
elif not (start <= offset < end):
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" The bug is the range returned is outside of the offset.\n" \
" This very likely needs to be fixed by the toolkit." \
% (offset, obj, boundary, s1, start, end)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
if needSadHack:
sadString, sadStart, sadEnd = self.__findRange(text, offset, start, end, boundary)
s = sadString.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "HACK: Attempting to recover from above failure.\n" \
" String: '%s', Start: %i, End: %i." % (s, sadStart, sadEnd)
debug.println(debug.LEVEL_INFO, msg)
return sadString, sadStart, sadEnd
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
def _getContentsForObj(self, obj, offset, boundary):
if not obj:
return []
if boundary == pyatspi.TEXT_BOUNDARY_LINE_START and self.isMath(obj):
if self.isMathTopLevel(obj):
math = obj
else:
math = self.getMathAncestor(obj)
return [[math, 0, 1, '']]
role = obj.getRole()
if role == pyatspi.ROLE_INTERNAL_FRAME and obj.childCount == 1:
return self._getContentsForObj(obj[0], 0, boundary)
string, start, end = self._getTextAtOffset(obj, offset, boundary)
# Check for ROLE_SECTION due to https://bugzilla.mozilla.org/show_bug.cgi?id=1210630
if not string or (self.isLandmark(obj) and role != pyatspi.ROLE_SECTION):
return [[obj, start, end, string]]
stringOffset = offset - start
try:
char = string[stringOffset]
except:
pass
else:
if char == self.EMBEDDED_OBJECT_CHARACTER:
childIndex = self.getChildIndex(obj, offset)
try:
child = obj[childIndex]
except:
pass
else:
return self._getContentsForObj(child, 0, boundary)
ranges = [m.span() for m in re.finditer("[^\ufffc]+", string)]
strings = list(filter(lambda x: x[0] <= stringOffset <= x[1], ranges))
if len(strings) == 1:
rangeStart, rangeEnd = strings[0]
start += rangeStart
string = string[rangeStart:rangeEnd]
end = start + len(string)
return [[obj, start, end, string]]
def getSentenceContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentSentenceContents) != -1:
return self._currentSentenceContents
boundary = pyatspi.TEXT_BOUNDARY_SENTENCE_START
objects = self._getContentsForObj(obj, offset, boundary)
state = obj.getState()
if state.contains(pyatspi.STATE_EDITABLE) \
and state.contains(pyatspi.STATE_FOCUSED):
return objects
def _treatAsSentenceEnd(x):
xObj, xStart, xEnd, xString = x
if not self.isTextBlockElement(xObj):
return False
text = self.queryNonEmptyText(xObj)
if text and 0 < text.characterCount <= xEnd:
return True
if 0 <= xStart <= 5:
xString = " ".join(xString.split()[1:])
match = re.search("\S[\.\!\?]+(\s|\Z)", xString)
return match is not None
# Check for things in the same sentence before this object.
firstObj, firstStart, firstEnd, firstString = objects[0]
while firstObj and firstString:
if firstStart == 0 and self.isTextBlockElement(firstObj):
break
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(lambda x: x not in objects, onLeft))
endsOnLeft = list(filter(_treatAsSentenceEnd, onLeft))
if endsOnLeft:
i = onLeft.index(endsOnLeft[-1])
onLeft = onLeft[i+1:]
if not onLeft:
break
objects[0:0] = onLeft
firstObj, firstStart, firstEnd, firstString = objects[0]
# Check for things in the same sentence after this object.
while not _treatAsSentenceEnd(objects[-1]):
lastObj, lastStart, lastEnd, lastString = objects[-1]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(lambda x: x not in objects, onRight))
if not onRight:
break
objects.extend(onRight)
if useCache:
self._currentSentenceContents = objects
return objects
def getCharacterAtOffset(self, obj, offset):
text = self.queryNonEmptyText(obj)
if text:
return text.getText(offset, offset + 1)
return ""
def getCharacterContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentCharacterContents) != -1:
return self._currentCharacterContents
boundary = pyatspi.TEXT_BOUNDARY_CHAR
objects = self._getContentsForObj(obj, offset, boundary)
if useCache:
self._currentCharacterContents = objects
return objects
def getWordContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentWordContents) != -1:
return self._currentWordContents
boundary = pyatspi.TEXT_BOUNDARY_WORD_START
objects = self._getContentsForObj(obj, offset, boundary)
extents = self.getExtents(obj, offset, offset + 1)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd or not xString:
return False
xExtents = self.getExtents(xObj, xStart, xStart + 1)
return self.extentsAreOnSameLine(extents, xExtents)
# Check for things in the same word to the left of this object.
firstObj, firstStart, firstEnd, firstString = objects[0]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
while prevObj and firstString:
text = self.queryNonEmptyText(prevObj)
if not text or text.getText(pOffset, pOffset + 1).isspace():
break
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(_include, onLeft))
if not onLeft:
break
objects[0:0] = onLeft
firstObj, firstStart, firstEnd, firstString = objects[0]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
# Check for things in the same word to the right of this object.
lastObj, lastStart, lastEnd, lastString = objects[-1]
while lastObj and lastString and not lastString[-1].isspace():
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastStart, lastEnd, lastString = objects[-1]
# We want to treat the list item marker as its own word.
firstObj, firstStart, firstEnd, firstString = objects[0]
if firstStart == 0 and firstObj.getRole() == pyatspi.ROLE_LIST_ITEM:
objects = [objects[0]]
if useCache:
self._currentWordContents = objects
return objects
def getObjectContentsAtOffset(self, obj, offset=0, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentObjectContents) != -1:
return self._currentObjectContents
objIsLandmark = self.isLandmark(obj)
def _isInObject(x):
if not x:
return False
if x == obj:
return True
return _isInObject(x.parent)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd:
return False
if objIsLandmark and self.isLandmark(xObj) and obj != xObj:
return False
return _isInObject(xObj)
objects = self._getContentsForObj(obj, offset, None)
lastObj, lastStart, lastEnd, lastString = objects[-1]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
while nextObj:
onRight = self._getContentsForObj(nextObj, nOffset, None)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastEnd = objects[-1][0], objects[-1][2]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
if useCache:
self._currentObjectContents = objects
return objects
def _contentIsSubsetOf(self, contentA, contentB):
objA, startA, endA, stringA = contentA
objB, startB, endB, stringB = contentB
if objA == objB:
setA = set(range(startA, endA))
setB = set(range(startB, endB))
return setA.issubset(setB)
return False
def getLineContentsAtOffset(self, obj, offset, layoutMode=None, useCache=True):
if not obj:
return []
text = self.queryNonEmptyText(obj)
if text and offset == text.characterCount:
offset -= 1
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentLineContents) != -1:
return self._currentLineContents
if layoutMode == None:
layoutMode = _settingsManager.getSetting('layoutMode')
objects = []
extents = self.getExtents(obj, offset, offset + 1)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd:
return False
xExtents = self.getExtents(xObj, xStart, xStart + 1)
if self.isMathTopLevel(xObj):
onSameLine = self.extentsAreOnSameLine(extents, xExtents, extents[3])
else:
onSameLine = self.extentsAreOnSameLine(extents, xExtents)
return onSameLine
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
objects = self._getContentsForObj(obj, offset, boundary)
if not layoutMode:
if useCache:
self._currentLineContents = objects
return objects
firstObj, firstStart, firstEnd, firstString = objects[0]
if (extents[2] == 0 and extents[3] == 0) or self.isMath(firstObj):
extents = self.getExtents(firstObj, firstStart, firstEnd)
lastObj, lastStart, lastEnd, lastString = objects[-1]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
# Check for things on the same line to the left of this object.
while prevObj:
text = self.queryNonEmptyText(prevObj)
if text and text.getText(pOffset, pOffset + 1) in [" ", "\xa0"]:
prevObj, pOffset = self.findPreviousCaretInOrder(prevObj, pOffset)
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(_include, onLeft))
if not onLeft:
break
if self._contentIsSubsetOf(objects[0], onLeft[-1]):
objects.pop(0)
objects[0:0] = onLeft
firstObj, firstStart = objects[0][0], objects[0][1]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
# Check for things on the same line to the right of this object.
while nextObj:
text = self.queryNonEmptyText(nextObj)
if text and text.getText(nOffset, nOffset + 1) in [" ", "\xa0"]:
nextObj, nOffset = self.findNextCaretInOrder(nextObj, nOffset)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastEnd = objects[-1][0], objects[-1][2]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
if useCache:
self._currentLineContents = objects
return objects
def getPreviousLineContents(self, obj=None, offset=-1, layoutMode=None, useCache=True):
if obj is None:
obj, offset = self.getCaretContext()
msg = "WEB: Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
if obj and self.isZombie(obj):
msg = "WEB: Current context obj %s is zombie. Clearing cache." % obj
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.getCaretContext()
msg = "WEB: Now Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
line = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
msg = "WEB: Line contents for %s, %i: %s" % (obj, offset, line)
debug.println(debug.LEVEL_INFO, msg)
if not (line and line[0]):
return []
firstObj, firstOffset = line[0][0], line[0][1]
msg = "WEB: First context on line is: %s, %i" % (firstObj, firstOffset)
debug.println(debug.LEVEL_INFO, msg)
obj, offset = self.previousContext(firstObj, firstOffset, True)
if not obj and firstObj:
msg = "WEB: Previous context is: %s, %i. Trying again." % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.previousContext(firstObj, firstOffset, True)
msg = "WEB: Previous context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
contents = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
if not contents:
msg = "WEB: Could not get line contents for %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return []
return contents
def getNextLineContents(self, obj=None, offset=-1, layoutMode=None, useCache=True):
if obj is None:
obj, offset = self.getCaretContext()
msg = "WEB: Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
if obj and self.isZombie(obj):
msg = "WEB: Current context obj %s is zombie. Clearing cache." % obj
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.getCaretContext()
msg = "WEB: Now Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
line = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
msg = "WEB: Line contents for %s, %i: %s" % (obj, offset, line)
debug.println(debug.LEVEL_INFO, msg)
if not (line and line[0]):
return []
math = self.getMathAncestor(obj)
if math:
lastObj, lastOffset = self.lastContext(math)
else:
lastObj, lastOffset = line[-1][0], line[-1][2] - 1
msg = "WEB: Last context on line is: %s, %i" % (lastObj, lastOffset)
debug.println(debug.LEVEL_INFO, msg)
obj, offset = self.nextContext(lastObj, lastOffset, True)
if not obj and lastObj:
msg = "WEB: Next context is: %s, %i. Trying again." % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.nextContext(lastObj, lastOffset, True)
msg = "WEB: Next context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
contents = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
if not contents:
msg = "WEB: Could not get line contents for %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return []
return contents
def isFocusModeWidget(self, obj):
try:
role = obj.getRole()
state = obj.getState()
except:
msg = "WEB: Exception getting role and state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
if state.contains(pyatspi.STATE_EDITABLE) \
or state.contains(pyatspi.STATE_EXPANDABLE):
return True
focusModeRoles = [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_PROGRESS_BAR,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_SPIN_BUTTON,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_TABLE_ROW,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TREE_TABLE,
pyatspi.ROLE_TREE]
if role in focusModeRoles \
and not self.isTextBlockElement(obj):
return True
if self.isGridDescendant(obj):
return True
return False
def isTextBlockElement(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isTextBlockElement.get(hash(obj))
if rv is not None:
return rv
try:
role = obj.getRole()
state = obj.getState()
except:
msg = "WEB: Exception getting role and state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
textBlockElements = [pyatspi.ROLE_CAPTION,
pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_WEB,
pyatspi.ROLE_FOOTER,
pyatspi.ROLE_FORM,
pyatspi.ROLE_HEADING,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_ROW_HEADER,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_TABLE_CELL]
# TODO - JD: This protection won't be needed once we bump dependencies to 2.16.
try:
textBlockElements.append(pyatspi.ROLE_STATIC)
except:
pass
if not role in textBlockElements:
rv = False
elif state.contains(pyatspi.STATE_EDITABLE):
rv = False
elif role in [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]:
rv = True
elif not state.contains(pyatspi.STATE_FOCUSABLE) and not state.contains(pyatspi.STATE_FOCUSED):
rv = True
else:
rv = False
self._isTextBlockElement[hash(obj)] = rv
return rv
def treatAsDiv(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._treatAsDiv.get(hash(obj))
if rv is not None:
return rv
try:
role = obj.getRole()
childCount = obj.childCount
except:
msg = "WEB: Exception getting role and childCount for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
rv = False
validRoles = self._validChildRoles.get(role)
if validRoles:
if not childCount:
rv = True
else:
rv = bool([x for x in obj if x and x.getRole() not in validRoles])
if not rv:
validRoles = self._validChildRoles.get(obj.parent)
if validRoles:
rv = bool([x for x in obj.parent if x and x.getRole() not in validRoles])
self._treatAsDiv[hash(obj)] = rv
return rv
def speakMathSymbolNames(self, obj=None):
obj = obj or orca_state.locusOfFocus
return self.isMath(obj)
def isInMath(self):
return self.isMath(orca_state.locusOfFocus)
def isMath(self, obj):
rv = self._isMath.get(hash(obj))
if rv is not None:
return rv
tag = self._getTag(obj)
rv = tag in ['math',
'maction',
'maligngroup',
'malignmark',
'menclose',
'merror',
'mfenced',
'mfrac',
'mglyph',
'mi',
'mlabeledtr',
'mlongdiv',
'mmultiscripts',
'mn',
'mo',
'mover',
'mpadded',
'mphantom',
'mprescripts',
'mroot',
'mrow',
'ms',
'mscarries',
'mscarry',
'msgroup',
'msline',
'mspace',
'msqrt',
'msrow',
'mstack',
'mstyle',
'msub',
'msup',
'msubsup',
'mtable',
'mtd',
'mtext',
'mtr',
'munder',
'munderover']
self._isMath[hash(obj)] = rv
return rv
def isNoneElement(self, obj):
return self._getTag(obj) == 'none'
def isMathLayoutOnly(self, obj):
return self._getTag(obj) in ['mrow', 'mstyle', 'merror', 'mpadded']
def isMathMultiline(self, obj):
return self._getTag(obj) in ['mtable', 'mstack', 'mlongdiv']
def isMathEnclose(self, obj):
return self._getTag(obj) == 'menclose'
def isMathFenced(self, obj):
return self._getTag(obj) == 'mfenced'
def isMathFraction(self, obj):
return self._getTag(obj) == 'mfrac'
def isMathFractionWithoutBar(self, obj):
if not self.isMathFraction(obj):
return False
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return False
linethickness = attrs.get('linethickness')
if not linethickness:
return False
for char in linethickness:
if char.isnumeric() and char != '0':
return False
return True
def isMathPhantom(self, obj):
return self._getTag(obj) == 'mphantom'
def isMathRoot(self, obj):
return self.isMathSquareRoot(obj) or self.isMathNthRoot(obj)
def isMathNthRoot(self, obj):
return self._getTag(obj) == 'mroot'
def isMathMultiScript(self, obj):
return self._getTag(obj) == 'mmultiscripts'
def _isMathPrePostScriptSeparator(self, obj):
return self._getTag(obj) == 'mprescripts'
def isMathSubOrSuperScript(self, obj):
return self._getTag(obj) in ['msub', 'msup', 'msubsup']
def isMathTable(self, obj):
return self._getTag(obj) == 'mtable'
def isMathTableRow(self, obj):
return self._getTag(obj) in ['mtr', 'mlabeledtr']
def isMathTableCell(self, obj):
return self._getTag(obj) == 'mtd'
def isMathUnderOrOverScript(self, obj):
return self._getTag(obj) in ['mover', 'munder', 'munderover']
def _isMathSubElement(self, obj):
return self._getTag(obj) == 'msub'
def _isMathSupElement(self, obj):
return self._getTag(obj) == 'msup'
def _isMathSubsupElement(self, obj):
return self._getTag(obj) == 'msubsup'
def _isMathUnderElement(self, obj):
return self._getTag(obj) == 'munder'
def _isMathOverElement(self, obj):
return self._getTag(obj) == 'mover'
def _isMathUnderOverElement(self, obj):
return self._getTag(obj) == 'munderover'
def isMathSquareRoot(self, obj):
return self._getTag(obj) == 'msqrt'
def isMathToken(self, obj):
return self._getTag(obj) in ['mi', 'mn', 'mo', 'mtext', 'ms', 'mspace']
def isMathTopLevel(self, obj):
return obj.getRole() == pyatspi.ROLE_MATH
def getMathAncestor(self, obj):
if not self.isMath(obj):
return None
if self.isMathTopLevel(obj):
return obj
return pyatspi.findAncestor(obj, self.isMathTopLevel)
def getMathDenominator(self, obj):
if not self.isMathFraction(obj):
return None
return obj[1]
def getMathNumerator(self, obj):
if not self.isMathFraction(obj):
return None
return obj[0]
def getMathRootBase(self, obj):
if self.isMathNthRoot(obj):
return obj[0]
if self.isMathSquareRoot(obj):
return obj
return None
def getMathRootIndex(self, obj):
if not self.isMathNthRoot(obj):
return None
try:
return obj[1]
except:
pass
return None
def getMathScriptBase(self, obj):
if self.isMathSubOrSuperScript(obj) \
or self.isMathUnderOrOverScript(obj) \
or self.isMathMultiScript(obj):
return obj[0]
return None
def getMathScriptSubscript(self, obj):
if self._isMathSubElement(obj) or self._isMathSubsupElement(obj):
return obj[1]
return None
def getMathScriptSuperscript(self, obj):
if self._isMathSupElement(obj):
return obj[1]
if self._isMathSubsupElement(obj):
return obj[2]
return None
def getMathScriptUnderscript(self, obj):
if self._isMathUnderElement(obj) or self._isMathUnderOverElement(obj):
return obj[1]
return None
def getMathScriptOverscript(self, obj):
if self._isMathOverElement(obj):
return obj[1]
if self._isMathUnderOverElement(obj):
return obj[2]
return None
def _getMathPrePostScriptSeparator(self, obj):
for child in obj:
if self._isMathPrePostScriptSeparator(child):
return child
return None
def getMathPrescripts(self, obj):
separator = self._getMathPrePostScriptSeparator(obj)
if not separator:
return []
index = separator.getIndexInParent()
return [obj[i] for i in range(index+1, obj.childCount)]
def getMathPostscripts(self, obj):
separator = self._getMathPrePostScriptSeparator(obj)
if separator:
index = separator.getIndexInParent()
else:
index = obj.childCount
return [obj[i] for i in range(1, index)]
def getMathEnclosures(self, obj):
if not self.isMathEnclose(obj):
return []
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return []
return attrs.get('notation', 'longdiv').split()
def getMathFencedSeparators(self, obj):
if not self.isMathFenced(obj):
return ['']
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return ['']
return list(attrs.get('separators', ','))
def getMathFences(self, obj):
if not self.isMathFenced(obj):
return ['', '']
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return ['', '']
return [attrs.get('open', '('), attrs.get('close', ')')]
def getMathNestingLevel(self, obj, test=None):
rv = self._mathNestingLevel.get(hash(obj))
if rv is not None:
return rv
if not test:
test = lambda x: self._getTag(x) == self._getTag(obj)
rv = -1
ancestor = obj
while ancestor:
ancestor = pyatspi.findAncestor(ancestor, test)
rv += 1
self._mathNestingLevel[hash(obj)] = rv
return rv
def filterContentsForPresentation(self, contents, inferLabels=False):
def _include(x):
obj, start, end, string = x
if not obj:
return False
if (self.isTextBlockElement(obj) and not string.strip()) \
or self.isAnchor(obj) \
or (self.hasNoSize(obj) and not string.strip()) \
or self.isOffScreenLabel(obj) \
or self.isUselessImage(obj) \
or self.isLabellingContents(x, contents):
return False
widget = self.isInferredLabelForContents(x, contents)
alwaysFilter = [pyatspi.ROLE_RADIO_BUTTON, pyatspi.ROLE_CHECK_BOX]
if widget and (inferLabels or widget.getRole() in alwaysFilter):
return False
return True
if len(contents) == 1:
return contents
return list(filter(_include, contents))
def needsSeparator(self, lastChar, nextChar):
if lastChar.isspace() or nextChar.isspace():
return False
openingPunctuation = ["(", "[", "{", "<"]
closingPunctuation = [".", "?", "!", ":", ",", ";", ")", "]", "}", ">"]
if lastChar in closingPunctuation or nextChar in openingPunctuation:
return True
if lastChar in openingPunctuation or nextChar in closingPunctuation:
return False
return lastChar.isalnum()
def supportsSelectionAndTable(self, obj):
interfaces = pyatspi.listInterfaces(obj)
return 'Table' in interfaces and 'Selection' in interfaces
def isGridDescendant(self, obj):
if not obj:
return False
rv = self._isGridDescendant.get(hash(obj))
if rv is not None:
return rv
rv = pyatspi.findAncestor(obj, self.supportsSelectionAndTable) is not None
self._isGridDescendant[hash(obj)] = rv
return rv
def isLayoutOnly(self, obj):
if not obj:
return False
rv = self._isLayoutOnly.get(hash(obj))
if rv is not None:
return rv
if self.isMath(obj):
rv = False
else:
rv = super().isLayoutOnly(obj)
self._isLayoutOnly[hash(obj)] = rv
return rv
def isOffScreenLabel(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isOffScreenLabel.get(hash(obj))
if rv is not None:
return rv
rv = False
isLabelFor = lambda x: x.getRelationType() == pyatspi.RELATION_LABEL_FOR
try:
relationSet = obj.getRelationSet()
except:
pass
else:
relations = list(filter(isLabelFor, relationSet))
if relations:
try:
text = obj.queryText()
end = text.characterCount
except:
end = 1
x, y, width, height = self.getExtents(obj, 0, end)
if x < 0 or y < 0:
rv = True
self._isOffScreenLabel[hash(obj)] = rv
return rv
def isDetachedDocument(self, obj):
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if (obj and obj.getRole() in docRoles):
if obj.parent is None:
msg = "WEB: %s is a detatched document" % obj
debug.println(debug.LEVEL_INFO, msg)
return True
return False
def iframeForDetachedDocument(self, obj, root=None):
root = root or self.documentFrame()
isIframe = lambda x: x and x.getRole() == pyatspi.ROLE_INTERNAL_FRAME
try:
iframes = pyatspi.findAllDescendants(root, isIframe)
except:
msg = "WEB: Exception getting descendant iframes of %s" % root
debug.println(debug.LEVEL_INFO, msg)
return None
for iframe in iframes:
if obj in iframe:
# We won't change behavior, but we do want to log all bogosity.
self._isBrokenChildParentTree(obj, iframe)
msg = "WEB: Returning %s as iframe parent of detached %s" % (iframe, obj)
debug.println(debug.LEVEL_INFO, msg)
return iframe
return None
def _isBrokenChildParentTree(self, child, parent):
if not (child and parent):
return False
try:
childIsChildOfParent = child in parent
except:
msg = "WEB: Exception checking if %s is in %s" % (child, parent)
debug.println(debug.LEVEL_INFO, msg)
childIsChildOfParent = False
else:
msg = "WEB: %s is child of %s: %s" % (child, parent, childIsChildOfParent)
debug.println(debug.LEVEL_INFO, msg)
try:
parentIsParentOfChild = child.parent == parent
except:
msg = "WEB: Exception getting parent of %s" % child
debug.println(debug.LEVEL_INFO, msg)
parentIsParentOfChild = False
else:
msg = "WEB: %s is parent of %s: %s" % (parent, child, parentIsParentOfChild)
debug.println(debug.LEVEL_INFO, msg)
if parentIsParentOfChild != childIsChildOfParent:
msg = "FAIL: The above is broken and likely needs to be fixed by the toolkit."
debug.println(debug.LEVEL_INFO, msg)
return True
return False
def isInferredLabelForContents(self, content, contents):
obj, start, end, string = content
objs = list(filter(self.shouldInferLabelFor, [x[0] for x in contents]))
if not objs:
return None
for o in objs:
label, sources = self.inferLabelFor(o)
if obj in sources and label.strip() == string.strip():
return o
return None
def isLabellingContents(self, content, contents):
obj, start, end, string = content
if obj.getRole() != pyatspi.ROLE_LABEL:
return None
relationSet = obj.getRelationSet()
if not relationSet:
return None
for relation in relationSet:
if relation.getRelationType() == pyatspi.RELATION_LABEL_FOR:
for i in range(0, relation.getNTargets()):
target = relation.getTarget(i)
for content in contents:
if content[0] == target:
return target
return None
def isAnchor(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isAnchor.get(hash(obj))
if rv is not None:
return rv
rv = False
if obj.getRole() == pyatspi.ROLE_LINK \
and not obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and not 'Action' in pyatspi.listInterfaces(obj) \
and not self.queryNonEmptyText(obj):
rv = True
self._isAnchor[hash(obj)] = rv
return rv
def isChromeAlert(self, obj):
if not (obj and obj.getRole() == pyatspi.ROLE_ALERT):
return False
if self.inDocumentContent(obj):
return False
return True
def isTopLevelChromeAlert(self, obj):
if not self.isChromeAlert(obj):
return False
return obj.parent.getRole() == pyatspi.ROLE_FRAME
def isClickableElement(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isClickableElement.get(hash(obj))
if rv is not None:
return rv
rv = False
if not obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and not self.isFocusModeWidget(obj):
try:
action = obj.queryAction()
names = [action.getName(i) for i in range(action.nActions)]
except NotImplementedError:
rv = False
else:
rv = "click" in names
self._isClickableElement[hash(obj)] = rv
return rv
def isLandmark(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isLandmark.get(hash(obj))
if rv is not None:
return rv
if obj.getRole() == pyatspi.ROLE_LANDMARK:
rv = True
else:
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = attrs.get('xml-roles') in settings.ariaLandmarks
self._isLandmark[hash(obj)] = rv
return rv
def isLiveRegion(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isLiveRegion.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = 'container-live' in attrs
self._isLiveRegion[hash(obj)] = rv
return rv
def isLink(self, obj):
if not obj:
return False
rv = self._isLink.get(hash(obj))
if rv is not None:
return rv
role = obj.getRole()
# TODO - JD: This protection won't be needed once we bump dependencies to 2.16.
try:
if role == pyatspi.ROLE_STATIC:
role = pyatspi.ROLE_TEXT
except:
pass
if role == pyatspi.ROLE_LINK and not self.isAnchor(obj):
rv = True
elif role == pyatspi.ROLE_TEXT \
and obj.parent.getRole() == pyatspi.ROLE_LINK \
and obj.name and obj.name == obj.parent.name:
rv = True
self._isLink[hash(obj)] = rv
return rv
def isNonNavigablePopup(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isNonNavigablePopup.get(hash(obj))
if rv is not None:
return rv
role = obj.getRole()
if role == pyatspi.ROLE_TOOL_TIP:
rv = True
self._isNonNavigablePopup[hash(obj)] = rv
return rv
def hasUselessCanvasDescendant(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasUselessCanvasDescendant.get(hash(obj))
if rv is not None:
return rv
isCanvas = lambda x: x and x.getRole() == pyatspi.ROLE_CANVAS
try:
canvases = pyatspi.findAllDescendants(obj, isCanvas)
except:
msg = "WEB: Exception getting descendant canvases of %s" % obj
debug.println(debug.LEVEL_INFO, msg)
rv = False
else:
rv = len(list(filter(self.isUselessImage, canvases))) > 0
self._hasUselessCanvasDescendant[hash(obj)] = rv
return rv
def isUselessImage(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isUselessImage.get(hash(obj))
if rv is not None:
return rv
rv = True
if obj.getRole() not in [pyatspi.ROLE_IMAGE, pyatspi.ROLE_CANVAS]:
rv = False
if rv and (obj.name or obj.description or obj.childCount):
rv = False
if rv and (self.isClickableElement(obj) or self.hasLongDesc(obj)):
rv = False
if rv and obj.parent.getRole() == pyatspi.ROLE_LINK:
uri = self.uri(obj.parent)
if uri and not uri.startswith('javascript'):
rv = False
if rv and 'Image' in pyatspi.listInterfaces(obj):
image = obj.queryImage()
if image.imageDescription:
rv = False
else:
width, height = image.getImageSize()
if width > 25 and height > 25:
rv = False
if rv and 'Text' in pyatspi.listInterfaces(obj):
rv = self.queryNonEmptyText(obj) is None
self._isUselessImage[hash(obj)] = rv
return rv
def isParentOfNullChild(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isParentOfNullChild.get(hash(obj))
if rv is not None:
return rv
rv = False
try:
childCount = obj.childCount
except:
msg = "WEB: Exception getting childCount for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
childCount = 0
if childCount and obj[0] is None:
msg = "ERROR: %s reports %i children, but obj[0] is None" % (obj, childCount)
debug.println(debug.LEVEL_INFO, msg)
rv = True
self._isParentOfNullChild[hash(obj)] = rv
return rv
def hasExplicitName(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasExplicitName.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = attrs.get('explicit-name') == 'true'
self._hasExplicitName[hash(obj)] = rv
return rv
def hasLongDesc(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasLongDesc.get(hash(obj))
if rv is not None:
return rv
try:
action = obj.queryAction()
except NotImplementedError:
rv = False
else:
names = [action.getName(i) for i in range(action.nActions)]
rv = "showlongdesc" in names
self._hasLongDesc[hash(obj)] = rv
return rv
def inferLabelFor(self, obj):
if not self.shouldInferLabelFor(obj):
return None, []
rv = self._inferredLabels.get(hash(obj))
if rv is not None:
return rv
rv = self._script.labelInference.infer(obj, False)
self._inferredLabels[hash(obj)] = rv
return rv
def shouldInferLabelFor(self, obj):
try:
name = obj.name
except:
msg = "WEB: Exception getting name for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
else:
if name:
return False
if self._script.inSayAll():
return False
if not self.inDocumentContent():
return False
try:
role = obj.getRole()
except:
msg = "WEB: Exception getting role for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
# TODO - JD: This is private.
if self._script._lastCommandWasCaretNav \
and role not in [pyatspi.ROLE_RADIO_BUTTON, pyatspi.ROLE_CHECK_BOX]:
return False
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_RADIO_BUTTON]
if role not in roles:
return False
if self.displayedLabel(obj):
return False
return True
def isSpinnerEntry(self, obj):
if not self.inDocumentContent(obj):
return False
# TODO - JD: Ideally, things that look and act like spinners (such number inputs)
# would look and act like platform native spinners. That's not true for Gecko. And
# the only thing that's funkier is what we get from WebKitGtk. Try to at least get
# the two engines into alignment before migrating Epiphany support to the web script.
if obj.getState().contains(pyatspi.STATE_EDITABLE) \
and obj.parent.getRole() == pyatspi.ROLE_SPIN_BUTTON:
return True
return False
def eventIsSpinnerNoise(self, event):
if event.type.startswith("object:text-changed") and self.isSpinnerEntry(event.source):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
return True
return False
def treatEventAsSpinnerValueChange(self, event):
if event.type.startswith("object:text-caret-moved") and self.isSpinnerEntry(event.source):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
obj, offset = self.getCaretContext()
return event.source == obj
return False
def eventIsStatusBarNoise(self, event):
if self.inDocumentContent(event.source):
return False
eType = event.type
if eType.startswith("object:text-") or eType.endswith("accessible-name"):
try:
role = event.source.getRole()
except:
msg = "WEB: Exception getting role for %s" % event.source
debug.println(debug.LEVEL_INFO, msg)
else:
return role == pyatspi.ROLE_STATUS_BAR
return False
def eventIsAutocompleteNoise(self, event):
if not self.inDocumentContent(event.source):
return False
isListBoxItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_LIST_BOX
isMenuItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_MENU
isComboBoxItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_COMBO_BOX
if event.source.getState().contains(pyatspi.STATE_EDITABLE) \
and event.type.startswith("object:text-"):
obj, offset = self.getCaretContext()
if isListBoxItem(obj) or isMenuItem(obj):
return True
if obj == event.source and isComboBoxItem(obj):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
return True
return False
def eventIsChromeAutocompleteNoise(self, event):
if self.inDocumentContent(event.source):
return False
selection = ["object:selection-changed", "object:state-changed:selected"]
if not event.type in selection:
return False
try:
focusRole = orca_state.locusOfFocus.getRole()
focusState = orca_state.locusOfFocus.getState()
except:
msg = "WEB: Exception getting role and state for %s" % orca_state.locusOfFocus
debug.println(debug.LEVEL_INFO, msg)
return False
try:
role = event.source.getRole()
except:
msg = "WEB: Exception getting role for %s" % event.source
debug.println(debug.LEVEL_INFO, msg)
return False
if role in [pyatspi.ROLE_MENU, pyatspi.ROLE_MENU_ITEM] \
and focusRole == pyatspi.ROLE_ENTRY \
and focusState.contains(pyatspi.STATE_FOCUSED):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey not in ["Down", "Up"]:
return True
return False
def textEventIsDueToInsertion(self, event):
if not event.type.startswith("object:text-"):
return False
if not self.inDocumentContent(event.source) \
or not event.source.getState().contains(pyatspi.STATE_EDITABLE) \
or not event.source == orca_state.locusOfFocus:
return False
if isinstance(orca_state.lastInputEvent, input_event.KeyboardEvent):
inputEvent = orca_state.lastNonModifierKeyEvent
return inputEvent and inputEvent.isPrintableKey()
return False
def textEventIsForNonNavigableTextObject(self, event):
if not event.type.startswith("object:text-"):
return False
return self._treatTextObjectAsWhole(event.source)
# TODO - JD: As an experiment, we're stopping these at the event manager.
# If that works, this can be removed.
def eventIsEOCAdded(self, event):
if not self.inDocumentContent(event.source):
return False
if event.type.startswith("object:text-changed:insert"):
return self.EMBEDDED_OBJECT_CHARACTER in event.any_data
return False
def caretMovedToSamePageFragment(self, event):
if not event.type.startswith("object:text-caret-moved"):
return False
linkURI = self.uri(orca_state.locusOfFocus)
docURI = self.documentFrameURI()
if linkURI == docURI:
return True
return False
@staticmethod
def getHyperlinkRange(obj):
try:
hyperlink = obj.queryHyperlink()
start, end = hyperlink.startIndex, hyperlink.endIndex
except NotImplementedError:
msg = "WEB: %s does not implement the hyperlink interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1, -1
except:
msg = "WEB: Exception getting hyperlink indices for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1, -1
return start, end
def characterOffsetInParent(self, obj):
start, end, length = self._rangeInParentWithLength(obj)
return start
def _rangeInParentWithLength(self, obj):
if not obj:
return -1, -1, 0
text = self.queryNonEmptyText(obj.parent)
if not text:
return -1, -1, 0
start, end = self.getHyperlinkRange(obj)
return start, end, text.characterCount
@staticmethod
def getChildIndex(obj, offset):
try:
hypertext = obj.queryHypertext()
except NotImplementedError:
msg = "WEB: %s does not implement the hypertext interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1
except:
msg = "WEB: Exception querying hypertext interface for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1
return hypertext.getLinkIndex(offset)
def getChildAtOffset(self, obj, offset):
index = self.getChildIndex(obj, offset)
if index == -1:
return None
try:
child = obj[index]
except:
return None
return child
def hasNoSize(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasNoSize.get(hash(obj))
if rv is not None:
return rv
try:
extents = obj.queryComponent().getExtents(0)
except:
msg = "WEB: Exception getting extents for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
rv = True
else:
rv = not (extents.width and extents.height)
if rv:
msg = "WEB: %s has no size %s" % (obj, extents)
debug.println(debug.LEVEL_INFO, msg)
self._hasNoSize[hash(obj)] = rv
return rv
def doNotDescendForCaret(self, obj):
if not obj or self.isZombie(obj):
return True
try:
childCount = obj.childCount
except:
msg = "WEB: Exception getting childCount for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return True
if not childCount or self.isParentOfNullChild(obj):
return True
if self.isHidden(obj) or self.isOffScreenLabel(obj):
return True
role = obj.getRole()
if role == pyatspi.ROLE_LINK \
and (self.hasExplicitName(obj) or self.hasUselessCanvasDescendant(obj)):
return True
if self.isTextBlockElement(obj):
return False
doNotDescend = [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_TOOL_TIP,
pyatspi.ROLE_TREE,
pyatspi.ROLE_TREE_TABLE]
return role in doNotDescend
def _searchForCaretContext(self, obj):
contextObj, contextOffset = None, -1
while obj:
try:
offset = obj.queryText().caretOffset
except:
obj = None
else:
contextObj, contextOffset = obj, offset
childIndex = self.getChildIndex(obj, offset)
if childIndex >= 0 and obj.childCount:
obj = obj[childIndex]
else:
break
if contextObj:
return self.findNextCaretInOrder(contextObj, max(-1, contextOffset - 1))
return None, -1
def _getCaretContextViaLocusOfFocus(self):
obj = orca_state.locusOfFocus
try:
offset = obj.queryText().caretOffset
except NotImplementedError:
offset = 0
except:
offset = -1
return obj, offset
def getCaretContext(self, documentFrame=None):
if not documentFrame or self.isZombie(documentFrame):
documentFrame = self.documentFrame()
if not documentFrame:
return self._getCaretContextViaLocusOfFocus()
context = self._caretContexts.get(hash(documentFrame.parent))
if context:
return context
obj, offset = self._searchForCaretContext(documentFrame)
self.setCaretContext(obj, offset, documentFrame)
return obj, offset
def clearCaretContext(self, documentFrame=None):
self.clearContentCache()
documentFrame = documentFrame or self.documentFrame()
if not documentFrame:
return
parent = documentFrame.parent
self._caretContexts.pop(hash(parent), None)
def setCaretContext(self, obj=None, offset=-1, documentFrame=None):
documentFrame = documentFrame or self.documentFrame()
if not documentFrame:
return
parent = documentFrame.parent
self._caretContexts[hash(parent)] = obj, offset
def findFirstCaretContext(self, obj, offset):
try:
role = obj.getRole()
except:
msg = "WEB: Exception getting first caret context for %s %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return None, -1
lookInChild = [pyatspi.ROLE_LIST,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TABLE_ROW]
if role in lookInChild and obj.childCount and not self.treatAsDiv(obj):
msg = "WEB: First caret context for %s, %i will look in child %s" % (obj, offset, obj[0])
debug.println(debug.LEVEL_INFO, msg)
return self.findFirstCaretContext(obj[0], 0)
text = self.queryNonEmptyText(obj)
if not text:
if self.isTextBlockElement(obj) or self.isAnchor(obj):
nextObj, nextOffset = self.nextContext(obj, offset)
if nextObj:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, nextObj, nextOffset)
debug.println(debug.LEVEL_INFO, msg)
return nextObj, nextOffset
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, 0)
debug.println(debug.LEVEL_INFO, msg)
return obj, 0
if offset >= text.characterCount:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, text.characterCount)
debug.println(debug.LEVEL_INFO, msg)
return obj, text.characterCount
allText = text.getText(0, -1)
offset = max (0, offset)
if allText[offset] != self.EMBEDDED_OBJECT_CHARACTER:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return obj, offset
child = self.getChildAtOffset(obj, offset)
if not child:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, None, -1)
debug.println(debug.LEVEL_INFO, msg)
return None, -1
return self.findFirstCaretContext(child, 0)
def findNextCaretInOrder(self, obj=None, offset=-1):
if not obj:
obj, offset = self.getCaretContext()
if not obj or not self.inDocumentContent(obj):
return None, -1
if not (self.isHidden(obj) or self.isOffScreenLabel(obj) or self.isNonNavigablePopup(obj)):
text = self.queryNonEmptyText(obj)
if text:
allText = text.getText(0, -1)
for i in range(offset + 1, len(allText)):
child = self.getChildAtOffset(obj, i)
if child and not self.isZombie(child) and not self.isAnchor(child) \
and not self.isUselessImage(child):
return self.findNextCaretInOrder(child, -1)
if allText[i] != self.EMBEDDED_OBJECT_CHARACTER:
return obj, i
elif not self.doNotDescendForCaret(obj) and obj.childCount:
return self.findNextCaretInOrder(obj[0], -1)
elif offset < 0 and not self.isTextBlockElement(obj) and not self.hasNoSize(obj) \
and not self.isUselessImage(obj) and not self.isParentOfNullChild(obj):
return obj, 0
# If we're here, start looking up the the tree, up to the document.
documentFrame = self.documentFrame()
if self.isSameObject(obj, documentFrame):
return None, -1
while obj.parent:
if self.isDetachedDocument(obj.parent):
obj = self.iframeForDetachedDocument(obj.parent)
continue
parent = obj.parent
if self.isZombie(parent):
replicant = self.findReplicant(self.documentFrame(), parent)
if replicant and not self.isZombie(replicant):
parent = replicant
elif parent.parent:
obj = parent
continue
else:
break
start, end, length = self._rangeInParentWithLength(obj)
if start + 1 == end and 0 <= start < end <= length:
return self.findNextCaretInOrder(parent, start)
index = obj.getIndexInParent() + 1
try:
parentChildCount = parent.childCount
except:
msg = "WEB: Exception getting childCount for %s" % parent
debug.println(debug.LEVEL_INFO, msg)
else:
if 0 < index < parentChildCount:
return self.findNextCaretInOrder(parent[index], -1)
obj = parent
return None, -1
def findPreviousCaretInOrder(self, obj=None, offset=-1):
if not obj:
obj, offset = self.getCaretContext()
if not obj or not self.inDocumentContent(obj):
return None, -1
if not (self.isHidden(obj) or self.isOffScreenLabel(obj) or self.isNonNavigablePopup(obj)):
text = self.queryNonEmptyText(obj)
if text:
allText = text.getText(0, -1)
if offset == -1 or offset > len(allText):
offset = len(allText)
for i in range(offset - 1, -1, -1):
child = self.getChildAtOffset(obj, i)
if child and not self.isZombie(child) and not self.isAnchor(child) \
and not self.isUselessImage(child):
return self.findPreviousCaretInOrder(child, -1)
if allText[i] != self.EMBEDDED_OBJECT_CHARACTER:
return obj, i
elif not self.doNotDescendForCaret(obj) and obj.childCount:
return self.findPreviousCaretInOrder(obj[obj.childCount - 1], -1)
elif offset < 0 and not self.isTextBlockElement(obj) and not self.hasNoSize(obj) \
and not self.isUselessImage(obj) and not self.isParentOfNullChild(obj):
return obj, 0
# If we're here, start looking up the the tree, up to the document.
documentFrame = self.documentFrame()
if self.isSameObject(obj, documentFrame):
return None, -1
while obj.parent:
if self.isDetachedDocument(obj.parent):
obj = self.iframeForDetachedDocument(obj.parent)
continue
parent = obj.parent
if self.isZombie(parent):
replicant = self.findReplicant(self.documentFrame(), parent)
if replicant and not self.isZombie(replicant):
parent = replicant
elif parent.parent:
obj = parent
continue
else:
break
start, end, length = self._rangeInParentWithLength(obj)
if start + 1 == end and 0 <= start < end <= length:
return self.findPreviousCaretInOrder(parent, start)
index = obj.getIndexInParent() - 1
try:
parentChildCount = parent.childCount
except:
msg = "WEB: Exception getting childCount for %s" % parent
debug.println(debug.LEVEL_INFO, msg)
else:
if 0 <= index < parentChildCount:
return self.findPreviousCaretInOrder(parent[index], -1)
obj = parent
return None, -1
def handleAsLiveRegion(self, event):
if not _settingsManager.getSetting('inferLiveRegions'):
return False
return self.isLiveRegion(event.source)
def getPageSummary(self, obj):
docframe = self.documentFrame(obj)
col = docframe.queryCollection()
headings = 0
forms = 0
tables = 0
vlinks = 0
uvlinks = 0
percentRead = None
stateset = pyatspi.StateSet()
roles = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK, pyatspi.ROLE_TABLE,
pyatspi.ROLE_FORM]
rule = col.createMatchRule(stateset.raw(), col.MATCH_NONE,
"", col.MATCH_NONE,
roles, col.MATCH_ANY,
"", col.MATCH_NONE,
False)
matches = col.getMatches(rule, col.SORT_ORDER_CANONICAL, 0, True)
col.freeMatchRule(rule)
for obj in matches:
role = obj.getRole()
if role == pyatspi.ROLE_HEADING:
headings += 1
elif role == pyatspi.ROLE_FORM:
forms += 1
elif role == pyatspi.ROLE_TABLE and not self.isLayoutOnly(obj):
tables += 1
elif role == pyatspi.ROLE_LINK:
if obj.getState().contains(pyatspi.STATE_VISITED):
vlinks += 1
else:
uvlinks += 1
return [headings, forms, tables, vlinks, uvlinks, percentRead]
Treat role embedded as a focus-mode widget
# Orca
#
# Copyright 2010 Joanmarie Diggs.
# Copyright 2014-2015 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2010 Joanmarie Diggs." \
"Copyright (c) 2014-2015 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import re
import urllib
from orca import debug
from orca import input_event
from orca import orca
from orca import orca_state
from orca import script_utilities
from orca import settings
from orca import settings_manager
_settingsManager = settings_manager.getManager()
class Utilities(script_utilities.Utilities):
def __init__(self, script):
super().__init__(script)
self._currentAttrs = {}
self._caretContexts = {}
self._inDocumentContent = {}
self._isTextBlockElement = {}
self._isGridDescendant = {}
self._isLayoutOnly = {}
self._isMath = {}
self._mathNestingLevel = {}
self._isOffScreenLabel = {}
self._hasExplicitName = {}
self._hasNoSize = {}
self._hasLongDesc = {}
self._hasUselessCanvasDescendant = {}
self._isClickableElement = {}
self._isAnchor = {}
self._isLandmark = {}
self._isLiveRegion = {}
self._isLink = {}
self._isNonNavigablePopup = {}
self._isNonEntryTextWidget = {}
self._isUselessImage = {}
self._isParentOfNullChild = {}
self._inferredLabels = {}
self._roleDescription = {}
self._text = {}
self._tag = {}
self._treatAsDiv = {}
self._currentObjectContents = None
self._currentSentenceContents = None
self._currentLineContents = None
self._currentWordContents = None
self._currentCharacterContents = None
self._validChildRoles = {pyatspi.ROLE_LIST: [pyatspi.ROLE_LIST_ITEM]}
def _cleanupContexts(self):
toRemove = []
for key, [obj, offset] in self._caretContexts.items():
if self.isZombie(obj):
toRemove.append(key)
for key in toRemove:
self._caretContexts.pop(key, None)
def clearCachedObjects(self):
debug.println(debug.LEVEL_INFO, "WEB: cleaning up cached objects")
self._inDocumentContent = {}
self._isTextBlockElement = {}
self._isGridDescendant = {}
self._isLayoutOnly = {}
self._isMath = {}
self._mathNestingLevel = {}
self._isOffScreenLabel = {}
self._hasExplicitName = {}
self._hasNoSize = {}
self._hasLongDesc = {}
self._hasUselessCanvasDescendant = {}
self._isClickableElement = {}
self._isAnchor = {}
self._isLandmark = {}
self._isLiveRegion = {}
self._isLink = {}
self._isNonNavigablePopup = {}
self._isNonEntryTextWidget = {}
self._isUselessImage = {}
self._isParentOfNullChild = {}
self._inferredLabels = {}
self._roleDescription = {}
self._tag = {}
self._treatAsDiv = {}
self._cleanupContexts()
def clearContentCache(self):
self._currentObjectContents = None
self._currentSentenceContents = None
self._currentLineContents = None
self._currentWordContents = None
self._currentCharacterContents = None
self._currentAttrs = {}
self._text = {}
def isDocument(self, obj):
roles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB, pyatspi.ROLE_EMBEDDED]
try:
rv = obj.getRole() in roles
except:
msg = "WEB: Exception getting role for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
rv = False
return rv
def inDocumentContent(self, obj=None):
if not obj:
obj = orca_state.locusOfFocus
if self.isDocument(obj):
return True
rv = self._inDocumentContent.get(hash(obj))
if rv is not None:
return rv
document = self.getDocumentForObject(obj)
rv = document is not None
self._inDocumentContent[hash(obj)] = rv
return rv
def getDocumentForObject(self, obj):
if not obj:
return None
if self.isDocument(obj):
msg = "WEB: %s is document" % obj
debug.println(debug.LEVEL_INFO, msg)
return obj
document = pyatspi.findAncestor(obj, self.isDocument)
msg = "WEB: Document for %s is %s" % (obj, document)
debug.println(debug.LEVEL_INFO, msg)
return document
def _getDocumentsEmbeddedBy(self, frame):
isEmbeds = lambda r: r.getRelationType() == pyatspi.RELATION_EMBEDS
relations = list(filter(isEmbeds, frame.getRelationSet()))
if not relations:
return []
relation = relations[0]
targets = [relation.getTarget(i) for i in range(relation.getNTargets())]
if not targets:
return []
roles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
isDocument = lambda x: x and x.getRole() in roles
return list(filter(isDocument, targets))
def documentFrame(self, obj=None):
isShowing = lambda x: x and x.getState().contains(pyatspi.STATE_SHOWING)
try:
windows = [child for child in self._script.app]
except:
msg = "WEB: Exception getting children for app %s" % self._script.app
debug.println(debug.LEVEL_INFO, msg)
windows = []
if orca_state.activeWindow in windows:
windows = [orca_state.activeWindow]
for window in windows:
documents = self._getDocumentsEmbeddedBy(window)
documents = list(filter(isShowing, documents))
if len(documents) == 1:
return documents[0]
return self.getDocumentForObject(obj or orca_state.locusOfFocus)
def documentFrameURI(self):
documentFrame = self.documentFrame()
if documentFrame and not self.isZombie(documentFrame):
document = documentFrame.queryDocument()
return document.getAttributeValue('DocURL')
return None
def setCaretPosition(self, obj, offset):
if self._script.flatReviewContext:
self._script.toggleFlatReviewMode()
obj, offset = self.findFirstCaretContext(obj, offset)
self.setCaretContext(obj, offset, documentFrame=None)
if self._script.focusModeIsSticky():
return
try:
state = obj.getState()
except:
msg = "WEB: Exception getting state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return
orca.setLocusOfFocus(None, obj, notifyScript=False)
if state.contains(pyatspi.STATE_FOCUSABLE):
try:
obj.queryComponent().grabFocus()
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return
except:
msg = "WEB: Exception grabbing focus on %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return
text = self.queryNonEmptyText(obj)
if text:
text.setCaretOffset(offset)
if self._script.useFocusMode(obj) != self._script.inFocusMode():
self._script.togglePresentationMode(None)
obj.clearCache()
# TODO - JD: This is private.
self._script._saveFocusedObjectInfo(obj)
def getNextObjectInDocument(self, obj, documentFrame):
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_TO:
return relation.getTarget(0)
if obj == documentFrame:
obj, offset = self.getCaretContext(documentFrame)
for child in documentFrame:
if self.characterOffsetInParent(child) > offset:
return child
if obj and obj.childCount:
return obj[0]
nextObj = None
while obj and not nextObj:
index = obj.getIndexInParent() + 1
if 0 < index < obj.parent.childCount:
nextObj = obj.parent[index]
elif obj.parent != documentFrame:
obj = obj.parent
else:
break
return nextObj
def getPreviousObjectInDocument(self, obj, documentFrame):
if not obj:
return None
for relation in obj.getRelationSet():
if relation.getRelationType() == pyatspi.RELATION_FLOWS_FROM:
return relation.getTarget(0)
if obj == documentFrame:
obj, offset = self.getCaretContext(documentFrame)
for child in documentFrame:
if self.characterOffsetInParent(child) < offset:
return child
index = obj.getIndexInParent() - 1
if not 0 <= index < obj.parent.childCount:
obj = obj.parent
index = obj.getIndexInParent() - 1
previousObj = obj.parent[index]
while previousObj and previousObj.childCount:
previousObj = previousObj[previousObj.childCount - 1]
return previousObj
def getTopOfFile(self):
return self.findFirstCaretContext(self.documentFrame(), 0)
def getBottomOfFile(self):
obj = self.getLastObjectInDocument(self.documentFrame())
offset = 0
text = self.queryNonEmptyText(obj)
if text:
offset = text.characterCount - 1
while obj:
lastobj, lastoffset = self.nextContext(obj, offset)
if not lastobj:
break
obj, offset = lastobj, lastoffset
return [obj, offset]
def getLastObjectInDocument(self, documentFrame):
try:
lastChild = documentFrame[documentFrame.childCount - 1]
except:
lastChild = documentFrame
while lastChild:
lastObj = self.getNextObjectInDocument(lastChild, documentFrame)
if lastObj and lastObj != lastChild:
lastChild = lastObj
else:
break
if lastChild and self.doNotDescendForCaret(lastChild):
lastChild = lastChild.parent
return lastChild
def getRoleDescription(self, obj):
rv = self._roleDescription.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = attrs.get('roledescription', '')
self._roleDescription[hash(obj)] = rv
return rv
def _getTag(self, obj):
rv = self._tag.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return None
rv = attrs.get('tag')
self._tag[hash(obj)] = rv
return rv
def inFindToolbar(self, obj=None):
if not obj:
obj = orca_state.locusOfFocus
if obj and obj.parent \
and obj.parent.getRole() == pyatspi.ROLE_AUTOCOMPLETE:
return False
return super().inFindToolbar(obj)
def isEmpty(self, obj):
if not self.isTextBlockElement(obj):
return False
return self.queryNonEmptyText(obj, False) is None
def isHidden(self, obj):
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return False
return attrs.get('hidden', False)
def isTextArea(self, obj):
if self.isLink(obj):
return False
return super().isTextArea(obj)
def isReadOnlyTextArea(self, obj):
# NOTE: This method is deliberately more conservative than isTextArea.
if obj.getRole() != pyatspi.ROLE_ENTRY:
return False
state = obj.getState()
readOnly = state.contains(pyatspi.STATE_FOCUSABLE) \
and not state.contains(pyatspi.STATE_EDITABLE)
return readOnly
def setCaretOffset(self, obj, characterOffset):
self.setCaretPosition(obj, characterOffset)
self._script.updateBraille(obj)
def nextContext(self, obj=None, offset=-1, skipSpace=False):
if not obj:
obj, offset = self.getCaretContext()
nextobj, nextoffset = self.findNextCaretInOrder(obj, offset)
if (obj, offset) == (nextobj, nextoffset):
nextobj, nextoffset = self.findNextCaretInOrder(nextobj, nextoffset)
if skipSpace:
text = self.queryNonEmptyText(nextobj)
while text and text.getText(nextoffset, nextoffset + 1).isspace():
nextobj, nextoffset = self.findNextCaretInOrder(nextobj, nextoffset)
text = self.queryNonEmptyText(nextobj)
return nextobj, nextoffset
def previousContext(self, obj=None, offset=-1, skipSpace=False):
if not obj:
obj, offset = self.getCaretContext()
prevobj, prevoffset = self.findPreviousCaretInOrder(obj, offset)
if (obj, offset) == (prevobj, prevoffset):
prevobj, prevoffset = self.findPreviousCaretInOrder(prevobj, prevoffset)
if skipSpace:
text = self.queryNonEmptyText(prevobj)
while text and text.getText(prevoffset, prevoffset + 1).isspace():
prevobj, prevoffset = self.findPreviousCaretInOrder(prevobj, prevoffset)
text = self.queryNonEmptyText(prevobj)
return prevobj, prevoffset
def lastContext(self, root):
offset = 0
text = self.queryNonEmptyText(root)
if text:
offset = text.characterCount - 1
def _isInRoot(o):
return o == root or pyatspi.utils.findAncestor(o, lambda x: x == root)
obj = root
while obj:
lastobj, lastoffset = self.nextContext(obj, offset)
if not (lastobj and _isInRoot(lastobj)):
break
obj, offset = lastobj, lastoffset
return obj, offset
def contextsAreOnSameLine(self, a, b):
if a == b:
return True
aObj, aOffset = a
bObj, bOffset = b
aExtents = self.getExtents(aObj, aOffset, aOffset + 1)
bExtents = self.getExtents(bObj, bOffset, bOffset + 1)
return self.extentsAreOnSameLine(aExtents, bExtents)
@staticmethod
def extentsAreOnSameLine(a, b, pixelDelta=5):
if a == b:
return True
aX, aY, aWidth, aHeight = a
bX, bY, bWidth, bHeight = b
if aWidth == 0 and aHeight == 0:
return bY <= aY <= bY + bHeight
if bWidth == 0 and bHeight == 0:
return aY <= bY <= aY + aHeight
highestBottom = min(aY + aHeight, bY + bHeight)
lowestTop = max(aY, bY)
if lowestTop >= highestBottom:
return False
aMiddle = aY + aHeight / 2
bMiddle = bY + bHeight / 2
if abs(aMiddle - bMiddle) > pixelDelta:
return False
return True
@staticmethod
def getExtents(obj, startOffset, endOffset):
if not obj:
return [0, 0, 0, 0]
try:
text = obj.queryText()
if text.characterCount:
return list(text.getRangeExtents(startOffset, endOffset, 0))
except NotImplementedError:
pass
except:
msg = "WEB: Exception getting range extents for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
role = obj.getRole()
parentRole = obj.parent.getRole()
if role in [pyatspi.ROLE_MENU, pyatspi.ROLE_LIST_ITEM] \
and parentRole in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_LIST_BOX]:
try:
ext = obj.parent.queryComponent().getExtents(0)
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj.parent
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
except:
msg = "WEB: Exception getting extents for %s" % obj.parent
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
else:
try:
ext = obj.queryComponent().getExtents(0)
except NotImplementedError:
msg = "WEB: %s does not implement the component interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
except:
msg = "WEB: Exception getting extents for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return [0, 0, 0, 0]
return [ext.x, ext.y, ext.width, ext.height]
def expandEOCs(self, obj, startOffset=0, endOffset=-1):
if not self.inDocumentContent(obj):
return ""
text = self.queryNonEmptyText(obj)
if not text:
return ""
string = text.getText(startOffset, endOffset)
if self.EMBEDDED_OBJECT_CHARACTER in string:
# If we're not getting the full text of this object, but
# rather a substring, we need to figure out the offset of
# the first child within this substring.
childOffset = 0
for child in obj:
if self.characterOffsetInParent(child) >= startOffset:
break
childOffset += 1
toBuild = list(string)
count = toBuild.count(self.EMBEDDED_OBJECT_CHARACTER)
for i in range(count):
index = toBuild.index(self.EMBEDDED_OBJECT_CHARACTER)
try:
child = obj[i + childOffset]
except:
continue
childText = self.expandEOCs(child)
if not childText:
childText = ""
toBuild[index] = "%s " % childText
string = "".join(toBuild).strip()
return string
def substring(self, obj, startOffset, endOffset):
if not self.inDocumentContent(obj):
return super().substring(obj, startOffset, endOffset)
text = self.queryNonEmptyText(obj)
if text:
return text.getText(startOffset, endOffset)
return ""
def textAttributes(self, acc, offset, get_defaults=False):
attrsForObj = self._currentAttrs.get(hash(acc)) or {}
if offset in attrsForObj:
return attrsForObj.get(offset)
attrs = super().textAttributes(acc, offset, get_defaults)
self._currentAttrs[hash(acc)] = {offset:attrs}
return attrs
def findObjectInContents(self, obj, offset, contents):
if not obj or not contents:
return -1
offset = max(0, offset)
matches = [x for x in contents if x[0] == obj]
match = [x for x in matches if x[1] <= offset < x[2]]
if match and match[0] and match[0] in contents:
return contents.index(match[0])
return -1
def isNonEntryTextWidget(self, obj):
rv = self._isNonEntryTextWidget.get(hash(obj))
if rv is not None:
return rv
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON]
role = obj.getRole()
if role in roles:
rv = True
elif role in [pyatspi.ROLE_LIST_ITEM, pyatspi.ROLE_TABLE_CELL]:
rv = not self.isTextBlockElement(obj)
self._isNonEntryTextWidget[hash(obj)] = rv
return rv
def queryNonEmptyText(self, obj, excludeNonEntryTextWidgets=True):
if not obj:
return None
if hash(obj) in self._text:
return self._text.get(hash(obj))
try:
rv = obj.queryText()
characterCount = rv.characterCount
except:
rv = None
else:
if not characterCount:
rv = None
if not self.isLiveRegion(obj):
doNotQuery = [pyatspi.ROLE_TABLE_ROW,
pyatspi.ROLE_TOOL_BAR]
role = obj.getRole()
if rv and role in doNotQuery:
rv = None
if rv and excludeNonEntryTextWidgets and self.isNonEntryTextWidget(obj):
rv = None
if rv and (self.isHidden(obj) or self.isOffScreenLabel(obj)):
rv = None
if rv and role == pyatspi.ROLE_LINK \
and (self.hasExplicitName(obj) or self.hasUselessCanvasDescendant(obj)):
rv = None
self._text[hash(obj)] = rv
return rv
def _treatTextObjectAsWhole(self, obj):
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_RADIO_BUTTON,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON]
role = obj.getRole()
if role in roles:
return True
if role == pyatspi.ROLE_TABLE_CELL and self.isFocusModeWidget(obj):
return True
return False
def __findRange(self, text, offset, start, end, boundary):
# We should not have to do any of this. Seriously. This is why
# We can't have nice things.
allText = text.getText(0, -1)
extents = list(text.getRangeExtents(offset, offset + 1, 0))
def _inThisSpan(span):
return span[0] <= offset <= span[1]
def _onThisLine(span):
rangeExtents = list(text.getRangeExtents(span[0], span[0] + 1, 0))
return self.extentsAreOnSameLine(extents, rangeExtents)
spans = []
charCount = text.characterCount
if boundary == pyatspi.TEXT_BOUNDARY_SENTENCE_START:
spans = [m.span() for m in re.finditer("\S*[^\.\?\!]+((?<!\w)[\.\?\!]+(?!\w)|\S*)", allText)]
elif boundary is not None:
spans = [m.span() for m in re.finditer("[^\n\r]+", allText)]
if not spans:
spans = [(0, charCount)]
rangeStart, rangeEnd = 0, charCount
for span in spans:
if _inThisSpan(span):
rangeStart, rangeEnd = span[0], span[1] + 1
break
string = allText[rangeStart:rangeEnd]
if string and boundary in [pyatspi.TEXT_BOUNDARY_SENTENCE_START, None]:
return string, rangeStart, rangeEnd
words = [m.span() for m in re.finditer("[^\s\ufffc]+", string)]
words = list(map(lambda x: (x[0] + rangeStart, x[1] + rangeStart), words))
if boundary == pyatspi.TEXT_BOUNDARY_WORD_START:
spans = list(filter(_inThisSpan, words))
if boundary == pyatspi.TEXT_BOUNDARY_LINE_START:
spans = list(filter(_onThisLine, words))
if spans:
rangeStart, rangeEnd = spans[0][0], spans[-1][1] + 1
string = allText[rangeStart:rangeEnd]
return string, rangeStart, rangeEnd
def _attemptBrokenTextRecovery(self):
return False
def _getTextAtOffset(self, obj, offset, boundary):
if not obj:
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '', Start: 0, End: 0. (obj is None)" % (offset, obj, boundary)
debug.println(debug.LEVEL_INFO, msg)
return '', 0, 0
text = self.queryNonEmptyText(obj)
if not text:
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '', Start: 0, End: 1. (queryNonEmptyText() returned None)" \
% (offset, obj, boundary)
debug.println(debug.LEVEL_INFO, msg)
return '', 0, 1
if boundary == pyatspi.TEXT_BOUNDARY_CHAR:
string, start, end = text.getText(offset, offset + 1), offset, offset + 1
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
if not boundary:
string, start, end = text.getText(offset, -1), offset, text.characterCount
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
if boundary == pyatspi.TEXT_BOUNDARY_SENTENCE_START \
and not obj.getState().contains(pyatspi.STATE_EDITABLE):
allText = text.getText(0, -1)
if obj.getRole() in [pyatspi.ROLE_LIST_ITEM, pyatspi.ROLE_HEADING] \
or not (re.search("\w", allText) and self.isTextBlockElement(obj)):
string, start, end = allText, 0, text.characterCount
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
offset = max(0, offset)
string, start, end = text.getTextAtOffset(offset, boundary)
# The above should be all that we need to do, but....
if not self._attemptBrokenTextRecovery():
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" Not checking for broken text." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
needSadHack = False
testString, testStart, testEnd = text.getTextAtOffset(start, boundary)
if (string, start, end) != (testString, testStart, testEnd):
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
s2 = testString.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset for %s using %s.\n" \
" For offset %i - String: '%s', Start: %i, End: %i.\n" \
" For offset %i - String: '%s', Start: %i, End: %i.\n" \
" The bug is the above results should be the same.\n" \
" This very likely needs to be fixed by the toolkit." \
% (obj, boundary, offset, s1, start, end, start, s2, testStart, testEnd)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
elif not string and 0 <= offset < text.characterCount:
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
s2 = text.getText(0, -1).replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" The bug is no text reported for a valid offset.\n" \
" Character count: %i, Full text: '%s'.\n" \
" This very likely needs to be fixed by the toolkit." \
% (offset, obj, boundary, s1, start, end, text.characterCount, s2)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
elif not (start <= offset < end):
s1 = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "FAIL: Bad results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i.\n" \
" The bug is the range returned is outside of the offset.\n" \
" This very likely needs to be fixed by the toolkit." \
% (offset, obj, boundary, s1, start, end)
debug.println(debug.LEVEL_INFO, msg)
needSadHack = True
if needSadHack:
sadString, sadStart, sadEnd = self.__findRange(text, offset, start, end, boundary)
s = sadString.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "HACK: Attempting to recover from above failure.\n" \
" String: '%s', Start: %i, End: %i." % (s, sadStart, sadEnd)
debug.println(debug.LEVEL_INFO, msg)
return sadString, sadStart, sadEnd
s = string.replace(self.EMBEDDED_OBJECT_CHARACTER, "[OBJ]").replace("\n", "\\n")
msg = "WEB: Results for text at offset %i for %s using %s:\n" \
" String: '%s', Start: %i, End: %i." % (offset, obj, boundary, s, start, end)
debug.println(debug.LEVEL_INFO, msg)
return string, start, end
def _getContentsForObj(self, obj, offset, boundary):
if not obj:
return []
if boundary == pyatspi.TEXT_BOUNDARY_LINE_START and self.isMath(obj):
if self.isMathTopLevel(obj):
math = obj
else:
math = self.getMathAncestor(obj)
return [[math, 0, 1, '']]
role = obj.getRole()
if role == pyatspi.ROLE_INTERNAL_FRAME and obj.childCount == 1:
return self._getContentsForObj(obj[0], 0, boundary)
string, start, end = self._getTextAtOffset(obj, offset, boundary)
# Check for ROLE_SECTION due to https://bugzilla.mozilla.org/show_bug.cgi?id=1210630
if not string or (self.isLandmark(obj) and role != pyatspi.ROLE_SECTION):
return [[obj, start, end, string]]
stringOffset = offset - start
try:
char = string[stringOffset]
except:
pass
else:
if char == self.EMBEDDED_OBJECT_CHARACTER:
childIndex = self.getChildIndex(obj, offset)
try:
child = obj[childIndex]
except:
pass
else:
return self._getContentsForObj(child, 0, boundary)
ranges = [m.span() for m in re.finditer("[^\ufffc]+", string)]
strings = list(filter(lambda x: x[0] <= stringOffset <= x[1], ranges))
if len(strings) == 1:
rangeStart, rangeEnd = strings[0]
start += rangeStart
string = string[rangeStart:rangeEnd]
end = start + len(string)
return [[obj, start, end, string]]
def getSentenceContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentSentenceContents) != -1:
return self._currentSentenceContents
boundary = pyatspi.TEXT_BOUNDARY_SENTENCE_START
objects = self._getContentsForObj(obj, offset, boundary)
state = obj.getState()
if state.contains(pyatspi.STATE_EDITABLE) \
and state.contains(pyatspi.STATE_FOCUSED):
return objects
def _treatAsSentenceEnd(x):
xObj, xStart, xEnd, xString = x
if not self.isTextBlockElement(xObj):
return False
text = self.queryNonEmptyText(xObj)
if text and 0 < text.characterCount <= xEnd:
return True
if 0 <= xStart <= 5:
xString = " ".join(xString.split()[1:])
match = re.search("\S[\.\!\?]+(\s|\Z)", xString)
return match is not None
# Check for things in the same sentence before this object.
firstObj, firstStart, firstEnd, firstString = objects[0]
while firstObj and firstString:
if firstStart == 0 and self.isTextBlockElement(firstObj):
break
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(lambda x: x not in objects, onLeft))
endsOnLeft = list(filter(_treatAsSentenceEnd, onLeft))
if endsOnLeft:
i = onLeft.index(endsOnLeft[-1])
onLeft = onLeft[i+1:]
if not onLeft:
break
objects[0:0] = onLeft
firstObj, firstStart, firstEnd, firstString = objects[0]
# Check for things in the same sentence after this object.
while not _treatAsSentenceEnd(objects[-1]):
lastObj, lastStart, lastEnd, lastString = objects[-1]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(lambda x: x not in objects, onRight))
if not onRight:
break
objects.extend(onRight)
if useCache:
self._currentSentenceContents = objects
return objects
def getCharacterAtOffset(self, obj, offset):
text = self.queryNonEmptyText(obj)
if text:
return text.getText(offset, offset + 1)
return ""
def getCharacterContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentCharacterContents) != -1:
return self._currentCharacterContents
boundary = pyatspi.TEXT_BOUNDARY_CHAR
objects = self._getContentsForObj(obj, offset, boundary)
if useCache:
self._currentCharacterContents = objects
return objects
def getWordContentsAtOffset(self, obj, offset, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentWordContents) != -1:
return self._currentWordContents
boundary = pyatspi.TEXT_BOUNDARY_WORD_START
objects = self._getContentsForObj(obj, offset, boundary)
extents = self.getExtents(obj, offset, offset + 1)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd or not xString:
return False
xExtents = self.getExtents(xObj, xStart, xStart + 1)
return self.extentsAreOnSameLine(extents, xExtents)
# Check for things in the same word to the left of this object.
firstObj, firstStart, firstEnd, firstString = objects[0]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
while prevObj and firstString:
text = self.queryNonEmptyText(prevObj)
if not text or text.getText(pOffset, pOffset + 1).isspace():
break
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(_include, onLeft))
if not onLeft:
break
objects[0:0] = onLeft
firstObj, firstStart, firstEnd, firstString = objects[0]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
# Check for things in the same word to the right of this object.
lastObj, lastStart, lastEnd, lastString = objects[-1]
while lastObj and lastString and not lastString[-1].isspace():
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastStart, lastEnd, lastString = objects[-1]
# We want to treat the list item marker as its own word.
firstObj, firstStart, firstEnd, firstString = objects[0]
if firstStart == 0 and firstObj.getRole() == pyatspi.ROLE_LIST_ITEM:
objects = [objects[0]]
if useCache:
self._currentWordContents = objects
return objects
def getObjectContentsAtOffset(self, obj, offset=0, useCache=True):
if not obj:
return []
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentObjectContents) != -1:
return self._currentObjectContents
objIsLandmark = self.isLandmark(obj)
def _isInObject(x):
if not x:
return False
if x == obj:
return True
return _isInObject(x.parent)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd:
return False
if objIsLandmark and self.isLandmark(xObj) and obj != xObj:
return False
return _isInObject(xObj)
objects = self._getContentsForObj(obj, offset, None)
lastObj, lastStart, lastEnd, lastString = objects[-1]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
while nextObj:
onRight = self._getContentsForObj(nextObj, nOffset, None)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastEnd = objects[-1][0], objects[-1][2]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
if useCache:
self._currentObjectContents = objects
return objects
def _contentIsSubsetOf(self, contentA, contentB):
objA, startA, endA, stringA = contentA
objB, startB, endB, stringB = contentB
if objA == objB:
setA = set(range(startA, endA))
setB = set(range(startB, endB))
return setA.issubset(setB)
return False
def getLineContentsAtOffset(self, obj, offset, layoutMode=None, useCache=True):
if not obj:
return []
text = self.queryNonEmptyText(obj)
if text and offset == text.characterCount:
offset -= 1
offset = max(0, offset)
if useCache:
if self.findObjectInContents(obj, offset, self._currentLineContents) != -1:
return self._currentLineContents
if layoutMode == None:
layoutMode = _settingsManager.getSetting('layoutMode')
objects = []
extents = self.getExtents(obj, offset, offset + 1)
def _include(x):
if x in objects:
return False
xObj, xStart, xEnd, xString = x
if xStart == xEnd:
return False
xExtents = self.getExtents(xObj, xStart, xStart + 1)
if self.isMathTopLevel(xObj):
onSameLine = self.extentsAreOnSameLine(extents, xExtents, extents[3])
else:
onSameLine = self.extentsAreOnSameLine(extents, xExtents)
return onSameLine
boundary = pyatspi.TEXT_BOUNDARY_LINE_START
objects = self._getContentsForObj(obj, offset, boundary)
if not layoutMode:
if useCache:
self._currentLineContents = objects
return objects
firstObj, firstStart, firstEnd, firstString = objects[0]
if (extents[2] == 0 and extents[3] == 0) or self.isMath(firstObj):
extents = self.getExtents(firstObj, firstStart, firstEnd)
lastObj, lastStart, lastEnd, lastString = objects[-1]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
# Check for things on the same line to the left of this object.
while prevObj:
text = self.queryNonEmptyText(prevObj)
if text and text.getText(pOffset, pOffset + 1) in [" ", "\xa0"]:
prevObj, pOffset = self.findPreviousCaretInOrder(prevObj, pOffset)
onLeft = self._getContentsForObj(prevObj, pOffset, boundary)
onLeft = list(filter(_include, onLeft))
if not onLeft:
break
if self._contentIsSubsetOf(objects[0], onLeft[-1]):
objects.pop(0)
objects[0:0] = onLeft
firstObj, firstStart = objects[0][0], objects[0][1]
prevObj, pOffset = self.findPreviousCaretInOrder(firstObj, firstStart)
# Check for things on the same line to the right of this object.
while nextObj:
text = self.queryNonEmptyText(nextObj)
if text and text.getText(nOffset, nOffset + 1) in [" ", "\xa0"]:
nextObj, nOffset = self.findNextCaretInOrder(nextObj, nOffset)
onRight = self._getContentsForObj(nextObj, nOffset, boundary)
onRight = list(filter(_include, onRight))
if not onRight:
break
objects.extend(onRight)
lastObj, lastEnd = objects[-1][0], objects[-1][2]
nextObj, nOffset = self.findNextCaretInOrder(lastObj, lastEnd - 1)
if useCache:
self._currentLineContents = objects
return objects
def getPreviousLineContents(self, obj=None, offset=-1, layoutMode=None, useCache=True):
if obj is None:
obj, offset = self.getCaretContext()
msg = "WEB: Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
if obj and self.isZombie(obj):
msg = "WEB: Current context obj %s is zombie. Clearing cache." % obj
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.getCaretContext()
msg = "WEB: Now Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
line = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
msg = "WEB: Line contents for %s, %i: %s" % (obj, offset, line)
debug.println(debug.LEVEL_INFO, msg)
if not (line and line[0]):
return []
firstObj, firstOffset = line[0][0], line[0][1]
msg = "WEB: First context on line is: %s, %i" % (firstObj, firstOffset)
debug.println(debug.LEVEL_INFO, msg)
obj, offset = self.previousContext(firstObj, firstOffset, True)
if not obj and firstObj:
msg = "WEB: Previous context is: %s, %i. Trying again." % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.previousContext(firstObj, firstOffset, True)
msg = "WEB: Previous context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
contents = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
if not contents:
msg = "WEB: Could not get line contents for %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return []
return contents
def getNextLineContents(self, obj=None, offset=-1, layoutMode=None, useCache=True):
if obj is None:
obj, offset = self.getCaretContext()
msg = "WEB: Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
if obj and self.isZombie(obj):
msg = "WEB: Current context obj %s is zombie. Clearing cache." % obj
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.getCaretContext()
msg = "WEB: Now Current context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
line = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
msg = "WEB: Line contents for %s, %i: %s" % (obj, offset, line)
debug.println(debug.LEVEL_INFO, msg)
if not (line and line[0]):
return []
math = self.getMathAncestor(obj)
if math:
lastObj, lastOffset = self.lastContext(math)
else:
lastObj, lastOffset = line[-1][0], line[-1][2] - 1
msg = "WEB: Last context on line is: %s, %i" % (lastObj, lastOffset)
debug.println(debug.LEVEL_INFO, msg)
obj, offset = self.nextContext(lastObj, lastOffset, True)
if not obj and lastObj:
msg = "WEB: Next context is: %s, %i. Trying again." % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
self.clearCachedObjects()
obj, offset = self.nextContext(lastObj, lastOffset, True)
msg = "WEB: Next context is: %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
contents = self.getLineContentsAtOffset(obj, offset, layoutMode, useCache)
if not contents:
msg = "WEB: Could not get line contents for %s, %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return []
return contents
def isFocusModeWidget(self, obj):
try:
role = obj.getRole()
state = obj.getState()
except:
msg = "WEB: Exception getting role and state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
if state.contains(pyatspi.STATE_EDITABLE) \
or state.contains(pyatspi.STATE_EXPANDABLE):
return True
focusModeRoles = [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_EMBEDDED,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_PAGE_TAB,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_PROGRESS_BAR,
pyatspi.ROLE_SLIDER,
pyatspi.ROLE_SPIN_BUTTON,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_TABLE_CELL,
pyatspi.ROLE_TABLE_ROW,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TREE_TABLE,
pyatspi.ROLE_TREE]
if role in focusModeRoles \
and not self.isTextBlockElement(obj):
return True
if self.isGridDescendant(obj):
return True
return False
def isTextBlockElement(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isTextBlockElement.get(hash(obj))
if rv is not None:
return rv
try:
role = obj.getRole()
state = obj.getState()
except:
msg = "WEB: Exception getting role and state for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
textBlockElements = [pyatspi.ROLE_CAPTION,
pyatspi.ROLE_COLUMN_HEADER,
pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_WEB,
pyatspi.ROLE_FOOTER,
pyatspi.ROLE_FORM,
pyatspi.ROLE_HEADING,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_LIST,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_PANEL,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_ROW_HEADER,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_TEXT,
pyatspi.ROLE_TABLE_CELL]
# TODO - JD: This protection won't be needed once we bump dependencies to 2.16.
try:
textBlockElements.append(pyatspi.ROLE_STATIC)
except:
pass
if not role in textBlockElements:
rv = False
elif state.contains(pyatspi.STATE_EDITABLE):
rv = False
elif role in [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]:
rv = True
elif not state.contains(pyatspi.STATE_FOCUSABLE) and not state.contains(pyatspi.STATE_FOCUSED):
rv = True
else:
rv = False
self._isTextBlockElement[hash(obj)] = rv
return rv
def treatAsDiv(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._treatAsDiv.get(hash(obj))
if rv is not None:
return rv
try:
role = obj.getRole()
childCount = obj.childCount
except:
msg = "WEB: Exception getting role and childCount for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
rv = False
validRoles = self._validChildRoles.get(role)
if validRoles:
if not childCount:
rv = True
else:
rv = bool([x for x in obj if x and x.getRole() not in validRoles])
if not rv:
validRoles = self._validChildRoles.get(obj.parent)
if validRoles:
rv = bool([x for x in obj.parent if x and x.getRole() not in validRoles])
self._treatAsDiv[hash(obj)] = rv
return rv
def speakMathSymbolNames(self, obj=None):
obj = obj or orca_state.locusOfFocus
return self.isMath(obj)
def isInMath(self):
return self.isMath(orca_state.locusOfFocus)
def isMath(self, obj):
rv = self._isMath.get(hash(obj))
if rv is not None:
return rv
tag = self._getTag(obj)
rv = tag in ['math',
'maction',
'maligngroup',
'malignmark',
'menclose',
'merror',
'mfenced',
'mfrac',
'mglyph',
'mi',
'mlabeledtr',
'mlongdiv',
'mmultiscripts',
'mn',
'mo',
'mover',
'mpadded',
'mphantom',
'mprescripts',
'mroot',
'mrow',
'ms',
'mscarries',
'mscarry',
'msgroup',
'msline',
'mspace',
'msqrt',
'msrow',
'mstack',
'mstyle',
'msub',
'msup',
'msubsup',
'mtable',
'mtd',
'mtext',
'mtr',
'munder',
'munderover']
self._isMath[hash(obj)] = rv
return rv
def isNoneElement(self, obj):
return self._getTag(obj) == 'none'
def isMathLayoutOnly(self, obj):
return self._getTag(obj) in ['mrow', 'mstyle', 'merror', 'mpadded']
def isMathMultiline(self, obj):
return self._getTag(obj) in ['mtable', 'mstack', 'mlongdiv']
def isMathEnclose(self, obj):
return self._getTag(obj) == 'menclose'
def isMathFenced(self, obj):
return self._getTag(obj) == 'mfenced'
def isMathFraction(self, obj):
return self._getTag(obj) == 'mfrac'
def isMathFractionWithoutBar(self, obj):
if not self.isMathFraction(obj):
return False
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return False
linethickness = attrs.get('linethickness')
if not linethickness:
return False
for char in linethickness:
if char.isnumeric() and char != '0':
return False
return True
def isMathPhantom(self, obj):
return self._getTag(obj) == 'mphantom'
def isMathRoot(self, obj):
return self.isMathSquareRoot(obj) or self.isMathNthRoot(obj)
def isMathNthRoot(self, obj):
return self._getTag(obj) == 'mroot'
def isMathMultiScript(self, obj):
return self._getTag(obj) == 'mmultiscripts'
def _isMathPrePostScriptSeparator(self, obj):
return self._getTag(obj) == 'mprescripts'
def isMathSubOrSuperScript(self, obj):
return self._getTag(obj) in ['msub', 'msup', 'msubsup']
def isMathTable(self, obj):
return self._getTag(obj) == 'mtable'
def isMathTableRow(self, obj):
return self._getTag(obj) in ['mtr', 'mlabeledtr']
def isMathTableCell(self, obj):
return self._getTag(obj) == 'mtd'
def isMathUnderOrOverScript(self, obj):
return self._getTag(obj) in ['mover', 'munder', 'munderover']
def _isMathSubElement(self, obj):
return self._getTag(obj) == 'msub'
def _isMathSupElement(self, obj):
return self._getTag(obj) == 'msup'
def _isMathSubsupElement(self, obj):
return self._getTag(obj) == 'msubsup'
def _isMathUnderElement(self, obj):
return self._getTag(obj) == 'munder'
def _isMathOverElement(self, obj):
return self._getTag(obj) == 'mover'
def _isMathUnderOverElement(self, obj):
return self._getTag(obj) == 'munderover'
def isMathSquareRoot(self, obj):
return self._getTag(obj) == 'msqrt'
def isMathToken(self, obj):
return self._getTag(obj) in ['mi', 'mn', 'mo', 'mtext', 'ms', 'mspace']
def isMathTopLevel(self, obj):
return obj.getRole() == pyatspi.ROLE_MATH
def getMathAncestor(self, obj):
if not self.isMath(obj):
return None
if self.isMathTopLevel(obj):
return obj
return pyatspi.findAncestor(obj, self.isMathTopLevel)
def getMathDenominator(self, obj):
if not self.isMathFraction(obj):
return None
return obj[1]
def getMathNumerator(self, obj):
if not self.isMathFraction(obj):
return None
return obj[0]
def getMathRootBase(self, obj):
if self.isMathNthRoot(obj):
return obj[0]
if self.isMathSquareRoot(obj):
return obj
return None
def getMathRootIndex(self, obj):
if not self.isMathNthRoot(obj):
return None
try:
return obj[1]
except:
pass
return None
def getMathScriptBase(self, obj):
if self.isMathSubOrSuperScript(obj) \
or self.isMathUnderOrOverScript(obj) \
or self.isMathMultiScript(obj):
return obj[0]
return None
def getMathScriptSubscript(self, obj):
if self._isMathSubElement(obj) or self._isMathSubsupElement(obj):
return obj[1]
return None
def getMathScriptSuperscript(self, obj):
if self._isMathSupElement(obj):
return obj[1]
if self._isMathSubsupElement(obj):
return obj[2]
return None
def getMathScriptUnderscript(self, obj):
if self._isMathUnderElement(obj) or self._isMathUnderOverElement(obj):
return obj[1]
return None
def getMathScriptOverscript(self, obj):
if self._isMathOverElement(obj):
return obj[1]
if self._isMathUnderOverElement(obj):
return obj[2]
return None
def _getMathPrePostScriptSeparator(self, obj):
for child in obj:
if self._isMathPrePostScriptSeparator(child):
return child
return None
def getMathPrescripts(self, obj):
separator = self._getMathPrePostScriptSeparator(obj)
if not separator:
return []
index = separator.getIndexInParent()
return [obj[i] for i in range(index+1, obj.childCount)]
def getMathPostscripts(self, obj):
separator = self._getMathPrePostScriptSeparator(obj)
if separator:
index = separator.getIndexInParent()
else:
index = obj.childCount
return [obj[i] for i in range(1, index)]
def getMathEnclosures(self, obj):
if not self.isMathEnclose(obj):
return []
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return []
return attrs.get('notation', 'longdiv').split()
def getMathFencedSeparators(self, obj):
if not self.isMathFenced(obj):
return ['']
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return ['']
return list(attrs.get('separators', ','))
def getMathFences(self, obj):
if not self.isMathFenced(obj):
return ['', '']
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
return ['', '']
return [attrs.get('open', '('), attrs.get('close', ')')]
def getMathNestingLevel(self, obj, test=None):
rv = self._mathNestingLevel.get(hash(obj))
if rv is not None:
return rv
if not test:
test = lambda x: self._getTag(x) == self._getTag(obj)
rv = -1
ancestor = obj
while ancestor:
ancestor = pyatspi.findAncestor(ancestor, test)
rv += 1
self._mathNestingLevel[hash(obj)] = rv
return rv
def filterContentsForPresentation(self, contents, inferLabels=False):
def _include(x):
obj, start, end, string = x
if not obj:
return False
if (self.isTextBlockElement(obj) and not string.strip()) \
or self.isAnchor(obj) \
or (self.hasNoSize(obj) and not string.strip()) \
or self.isOffScreenLabel(obj) \
or self.isUselessImage(obj) \
or self.isLabellingContents(x, contents):
return False
widget = self.isInferredLabelForContents(x, contents)
alwaysFilter = [pyatspi.ROLE_RADIO_BUTTON, pyatspi.ROLE_CHECK_BOX]
if widget and (inferLabels or widget.getRole() in alwaysFilter):
return False
return True
if len(contents) == 1:
return contents
return list(filter(_include, contents))
def needsSeparator(self, lastChar, nextChar):
if lastChar.isspace() or nextChar.isspace():
return False
openingPunctuation = ["(", "[", "{", "<"]
closingPunctuation = [".", "?", "!", ":", ",", ";", ")", "]", "}", ">"]
if lastChar in closingPunctuation or nextChar in openingPunctuation:
return True
if lastChar in openingPunctuation or nextChar in closingPunctuation:
return False
return lastChar.isalnum()
def supportsSelectionAndTable(self, obj):
interfaces = pyatspi.listInterfaces(obj)
return 'Table' in interfaces and 'Selection' in interfaces
def isGridDescendant(self, obj):
if not obj:
return False
rv = self._isGridDescendant.get(hash(obj))
if rv is not None:
return rv
rv = pyatspi.findAncestor(obj, self.supportsSelectionAndTable) is not None
self._isGridDescendant[hash(obj)] = rv
return rv
def isLayoutOnly(self, obj):
if not obj:
return False
rv = self._isLayoutOnly.get(hash(obj))
if rv is not None:
return rv
if self.isMath(obj):
rv = False
else:
rv = super().isLayoutOnly(obj)
self._isLayoutOnly[hash(obj)] = rv
return rv
def isOffScreenLabel(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isOffScreenLabel.get(hash(obj))
if rv is not None:
return rv
rv = False
isLabelFor = lambda x: x.getRelationType() == pyatspi.RELATION_LABEL_FOR
try:
relationSet = obj.getRelationSet()
except:
pass
else:
relations = list(filter(isLabelFor, relationSet))
if relations:
try:
text = obj.queryText()
end = text.characterCount
except:
end = 1
x, y, width, height = self.getExtents(obj, 0, end)
if x < 0 or y < 0:
rv = True
self._isOffScreenLabel[hash(obj)] = rv
return rv
def isDetachedDocument(self, obj):
docRoles = [pyatspi.ROLE_DOCUMENT_FRAME, pyatspi.ROLE_DOCUMENT_WEB]
if (obj and obj.getRole() in docRoles):
if obj.parent is None:
msg = "WEB: %s is a detatched document" % obj
debug.println(debug.LEVEL_INFO, msg)
return True
return False
def iframeForDetachedDocument(self, obj, root=None):
root = root or self.documentFrame()
isIframe = lambda x: x and x.getRole() == pyatspi.ROLE_INTERNAL_FRAME
try:
iframes = pyatspi.findAllDescendants(root, isIframe)
except:
msg = "WEB: Exception getting descendant iframes of %s" % root
debug.println(debug.LEVEL_INFO, msg)
return None
for iframe in iframes:
if obj in iframe:
# We won't change behavior, but we do want to log all bogosity.
self._isBrokenChildParentTree(obj, iframe)
msg = "WEB: Returning %s as iframe parent of detached %s" % (iframe, obj)
debug.println(debug.LEVEL_INFO, msg)
return iframe
return None
def _isBrokenChildParentTree(self, child, parent):
if not (child and parent):
return False
try:
childIsChildOfParent = child in parent
except:
msg = "WEB: Exception checking if %s is in %s" % (child, parent)
debug.println(debug.LEVEL_INFO, msg)
childIsChildOfParent = False
else:
msg = "WEB: %s is child of %s: %s" % (child, parent, childIsChildOfParent)
debug.println(debug.LEVEL_INFO, msg)
try:
parentIsParentOfChild = child.parent == parent
except:
msg = "WEB: Exception getting parent of %s" % child
debug.println(debug.LEVEL_INFO, msg)
parentIsParentOfChild = False
else:
msg = "WEB: %s is parent of %s: %s" % (parent, child, parentIsParentOfChild)
debug.println(debug.LEVEL_INFO, msg)
if parentIsParentOfChild != childIsChildOfParent:
msg = "FAIL: The above is broken and likely needs to be fixed by the toolkit."
debug.println(debug.LEVEL_INFO, msg)
return True
return False
def isInferredLabelForContents(self, content, contents):
obj, start, end, string = content
objs = list(filter(self.shouldInferLabelFor, [x[0] for x in contents]))
if not objs:
return None
for o in objs:
label, sources = self.inferLabelFor(o)
if obj in sources and label.strip() == string.strip():
return o
return None
def isLabellingContents(self, content, contents):
obj, start, end, string = content
if obj.getRole() != pyatspi.ROLE_LABEL:
return None
relationSet = obj.getRelationSet()
if not relationSet:
return None
for relation in relationSet:
if relation.getRelationType() == pyatspi.RELATION_LABEL_FOR:
for i in range(0, relation.getNTargets()):
target = relation.getTarget(i)
for content in contents:
if content[0] == target:
return target
return None
def isAnchor(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isAnchor.get(hash(obj))
if rv is not None:
return rv
rv = False
if obj.getRole() == pyatspi.ROLE_LINK \
and not obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and not 'Action' in pyatspi.listInterfaces(obj) \
and not self.queryNonEmptyText(obj):
rv = True
self._isAnchor[hash(obj)] = rv
return rv
def isChromeAlert(self, obj):
if not (obj and obj.getRole() == pyatspi.ROLE_ALERT):
return False
if self.inDocumentContent(obj):
return False
return True
def isTopLevelChromeAlert(self, obj):
if not self.isChromeAlert(obj):
return False
return obj.parent.getRole() == pyatspi.ROLE_FRAME
def isClickableElement(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isClickableElement.get(hash(obj))
if rv is not None:
return rv
rv = False
if not obj.getState().contains(pyatspi.STATE_FOCUSABLE) \
and not self.isFocusModeWidget(obj):
try:
action = obj.queryAction()
names = [action.getName(i) for i in range(action.nActions)]
except NotImplementedError:
rv = False
else:
rv = "click" in names
self._isClickableElement[hash(obj)] = rv
return rv
def isLandmark(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isLandmark.get(hash(obj))
if rv is not None:
return rv
if obj.getRole() == pyatspi.ROLE_LANDMARK:
rv = True
else:
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = attrs.get('xml-roles') in settings.ariaLandmarks
self._isLandmark[hash(obj)] = rv
return rv
def isLiveRegion(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isLiveRegion.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = 'container-live' in attrs
self._isLiveRegion[hash(obj)] = rv
return rv
def isLink(self, obj):
if not obj:
return False
rv = self._isLink.get(hash(obj))
if rv is not None:
return rv
role = obj.getRole()
# TODO - JD: This protection won't be needed once we bump dependencies to 2.16.
try:
if role == pyatspi.ROLE_STATIC:
role = pyatspi.ROLE_TEXT
except:
pass
if role == pyatspi.ROLE_LINK and not self.isAnchor(obj):
rv = True
elif role == pyatspi.ROLE_TEXT \
and obj.parent.getRole() == pyatspi.ROLE_LINK \
and obj.name and obj.name == obj.parent.name:
rv = True
self._isLink[hash(obj)] = rv
return rv
def isNonNavigablePopup(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isNonNavigablePopup.get(hash(obj))
if rv is not None:
return rv
role = obj.getRole()
if role == pyatspi.ROLE_TOOL_TIP:
rv = True
self._isNonNavigablePopup[hash(obj)] = rv
return rv
def hasUselessCanvasDescendant(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasUselessCanvasDescendant.get(hash(obj))
if rv is not None:
return rv
isCanvas = lambda x: x and x.getRole() == pyatspi.ROLE_CANVAS
try:
canvases = pyatspi.findAllDescendants(obj, isCanvas)
except:
msg = "WEB: Exception getting descendant canvases of %s" % obj
debug.println(debug.LEVEL_INFO, msg)
rv = False
else:
rv = len(list(filter(self.isUselessImage, canvases))) > 0
self._hasUselessCanvasDescendant[hash(obj)] = rv
return rv
def isUselessImage(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isUselessImage.get(hash(obj))
if rv is not None:
return rv
rv = True
if obj.getRole() not in [pyatspi.ROLE_IMAGE, pyatspi.ROLE_CANVAS]:
rv = False
if rv and (obj.name or obj.description or obj.childCount):
rv = False
if rv and (self.isClickableElement(obj) or self.hasLongDesc(obj)):
rv = False
if rv and obj.parent.getRole() == pyatspi.ROLE_LINK:
uri = self.uri(obj.parent)
if uri and not uri.startswith('javascript'):
rv = False
if rv and 'Image' in pyatspi.listInterfaces(obj):
image = obj.queryImage()
if image.imageDescription:
rv = False
else:
width, height = image.getImageSize()
if width > 25 and height > 25:
rv = False
if rv and 'Text' in pyatspi.listInterfaces(obj):
rv = self.queryNonEmptyText(obj) is None
self._isUselessImage[hash(obj)] = rv
return rv
def isParentOfNullChild(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._isParentOfNullChild.get(hash(obj))
if rv is not None:
return rv
rv = False
try:
childCount = obj.childCount
except:
msg = "WEB: Exception getting childCount for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
childCount = 0
if childCount and obj[0] is None:
msg = "ERROR: %s reports %i children, but obj[0] is None" % (obj, childCount)
debug.println(debug.LEVEL_INFO, msg)
rv = True
self._isParentOfNullChild[hash(obj)] = rv
return rv
def hasExplicitName(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasExplicitName.get(hash(obj))
if rv is not None:
return rv
try:
attrs = dict([attr.split(':', 1) for attr in obj.getAttributes()])
except:
attrs = {}
rv = attrs.get('explicit-name') == 'true'
self._hasExplicitName[hash(obj)] = rv
return rv
def hasLongDesc(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasLongDesc.get(hash(obj))
if rv is not None:
return rv
try:
action = obj.queryAction()
except NotImplementedError:
rv = False
else:
names = [action.getName(i) for i in range(action.nActions)]
rv = "showlongdesc" in names
self._hasLongDesc[hash(obj)] = rv
return rv
def inferLabelFor(self, obj):
if not self.shouldInferLabelFor(obj):
return None, []
rv = self._inferredLabels.get(hash(obj))
if rv is not None:
return rv
rv = self._script.labelInference.infer(obj, False)
self._inferredLabels[hash(obj)] = rv
return rv
def shouldInferLabelFor(self, obj):
try:
name = obj.name
except:
msg = "WEB: Exception getting name for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
else:
if name:
return False
if self._script.inSayAll():
return False
if not self.inDocumentContent():
return False
try:
role = obj.getRole()
except:
msg = "WEB: Exception getting role for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return False
# TODO - JD: This is private.
if self._script._lastCommandWasCaretNav \
and role not in [pyatspi.ROLE_RADIO_BUTTON, pyatspi.ROLE_CHECK_BOX]:
return False
roles = [pyatspi.ROLE_CHECK_BOX,
pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_ENTRY,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_PASSWORD_TEXT,
pyatspi.ROLE_RADIO_BUTTON]
if role not in roles:
return False
if self.displayedLabel(obj):
return False
return True
def isSpinnerEntry(self, obj):
if not self.inDocumentContent(obj):
return False
# TODO - JD: Ideally, things that look and act like spinners (such number inputs)
# would look and act like platform native spinners. That's not true for Gecko. And
# the only thing that's funkier is what we get from WebKitGtk. Try to at least get
# the two engines into alignment before migrating Epiphany support to the web script.
if obj.getState().contains(pyatspi.STATE_EDITABLE) \
and obj.parent.getRole() == pyatspi.ROLE_SPIN_BUTTON:
return True
return False
def eventIsSpinnerNoise(self, event):
if event.type.startswith("object:text-changed") and self.isSpinnerEntry(event.source):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
return True
return False
def treatEventAsSpinnerValueChange(self, event):
if event.type.startswith("object:text-caret-moved") and self.isSpinnerEntry(event.source):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
obj, offset = self.getCaretContext()
return event.source == obj
return False
def eventIsStatusBarNoise(self, event):
if self.inDocumentContent(event.source):
return False
eType = event.type
if eType.startswith("object:text-") or eType.endswith("accessible-name"):
try:
role = event.source.getRole()
except:
msg = "WEB: Exception getting role for %s" % event.source
debug.println(debug.LEVEL_INFO, msg)
else:
return role == pyatspi.ROLE_STATUS_BAR
return False
def eventIsAutocompleteNoise(self, event):
if not self.inDocumentContent(event.source):
return False
isListBoxItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_LIST_BOX
isMenuItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_MENU
isComboBoxItem = lambda x: x and x.parent and x.parent.getRole() == pyatspi.ROLE_COMBO_BOX
if event.source.getState().contains(pyatspi.STATE_EDITABLE) \
and event.type.startswith("object:text-"):
obj, offset = self.getCaretContext()
if isListBoxItem(obj) or isMenuItem(obj):
return True
if obj == event.source and isComboBoxItem(obj):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey in ["Down", "Up"]:
return True
return False
def eventIsChromeAutocompleteNoise(self, event):
if self.inDocumentContent(event.source):
return False
selection = ["object:selection-changed", "object:state-changed:selected"]
if not event.type in selection:
return False
try:
focusRole = orca_state.locusOfFocus.getRole()
focusState = orca_state.locusOfFocus.getState()
except:
msg = "WEB: Exception getting role and state for %s" % orca_state.locusOfFocus
debug.println(debug.LEVEL_INFO, msg)
return False
try:
role = event.source.getRole()
except:
msg = "WEB: Exception getting role for %s" % event.source
debug.println(debug.LEVEL_INFO, msg)
return False
if role in [pyatspi.ROLE_MENU, pyatspi.ROLE_MENU_ITEM] \
and focusRole == pyatspi.ROLE_ENTRY \
and focusState.contains(pyatspi.STATE_FOCUSED):
lastKey, mods = self.lastKeyAndModifiers()
if lastKey not in ["Down", "Up"]:
return True
return False
def textEventIsDueToInsertion(self, event):
if not event.type.startswith("object:text-"):
return False
if not self.inDocumentContent(event.source) \
or not event.source.getState().contains(pyatspi.STATE_EDITABLE) \
or not event.source == orca_state.locusOfFocus:
return False
if isinstance(orca_state.lastInputEvent, input_event.KeyboardEvent):
inputEvent = orca_state.lastNonModifierKeyEvent
return inputEvent and inputEvent.isPrintableKey()
return False
def textEventIsForNonNavigableTextObject(self, event):
if not event.type.startswith("object:text-"):
return False
return self._treatTextObjectAsWhole(event.source)
# TODO - JD: As an experiment, we're stopping these at the event manager.
# If that works, this can be removed.
def eventIsEOCAdded(self, event):
if not self.inDocumentContent(event.source):
return False
if event.type.startswith("object:text-changed:insert"):
return self.EMBEDDED_OBJECT_CHARACTER in event.any_data
return False
def caretMovedToSamePageFragment(self, event):
if not event.type.startswith("object:text-caret-moved"):
return False
linkURI = self.uri(orca_state.locusOfFocus)
docURI = self.documentFrameURI()
if linkURI == docURI:
return True
return False
@staticmethod
def getHyperlinkRange(obj):
try:
hyperlink = obj.queryHyperlink()
start, end = hyperlink.startIndex, hyperlink.endIndex
except NotImplementedError:
msg = "WEB: %s does not implement the hyperlink interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1, -1
except:
msg = "WEB: Exception getting hyperlink indices for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1, -1
return start, end
def characterOffsetInParent(self, obj):
start, end, length = self._rangeInParentWithLength(obj)
return start
def _rangeInParentWithLength(self, obj):
if not obj:
return -1, -1, 0
text = self.queryNonEmptyText(obj.parent)
if not text:
return -1, -1, 0
start, end = self.getHyperlinkRange(obj)
return start, end, text.characterCount
@staticmethod
def getChildIndex(obj, offset):
try:
hypertext = obj.queryHypertext()
except NotImplementedError:
msg = "WEB: %s does not implement the hypertext interface" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1
except:
msg = "WEB: Exception querying hypertext interface for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return -1
return hypertext.getLinkIndex(offset)
def getChildAtOffset(self, obj, offset):
index = self.getChildIndex(obj, offset)
if index == -1:
return None
try:
child = obj[index]
except:
return None
return child
def hasNoSize(self, obj):
if not (obj and self.inDocumentContent(obj)):
return False
rv = self._hasNoSize.get(hash(obj))
if rv is not None:
return rv
try:
extents = obj.queryComponent().getExtents(0)
except:
msg = "WEB: Exception getting extents for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
rv = True
else:
rv = not (extents.width and extents.height)
if rv:
msg = "WEB: %s has no size %s" % (obj, extents)
debug.println(debug.LEVEL_INFO, msg)
self._hasNoSize[hash(obj)] = rv
return rv
def doNotDescendForCaret(self, obj):
if not obj or self.isZombie(obj):
return True
try:
childCount = obj.childCount
except:
msg = "WEB: Exception getting childCount for %s" % obj
debug.println(debug.LEVEL_INFO, msg)
return True
if not childCount or self.isParentOfNullChild(obj):
return True
if self.isHidden(obj) or self.isOffScreenLabel(obj):
return True
role = obj.getRole()
if role == pyatspi.ROLE_LINK \
and (self.hasExplicitName(obj) or self.hasUselessCanvasDescendant(obj)):
return True
if self.isTextBlockElement(obj):
return False
doNotDescend = [pyatspi.ROLE_COMBO_BOX,
pyatspi.ROLE_LIST_BOX,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_MENU,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PUSH_BUTTON,
pyatspi.ROLE_TOGGLE_BUTTON,
pyatspi.ROLE_TOOL_BAR,
pyatspi.ROLE_TOOL_TIP,
pyatspi.ROLE_TREE,
pyatspi.ROLE_TREE_TABLE]
return role in doNotDescend
def _searchForCaretContext(self, obj):
contextObj, contextOffset = None, -1
while obj:
try:
offset = obj.queryText().caretOffset
except:
obj = None
else:
contextObj, contextOffset = obj, offset
childIndex = self.getChildIndex(obj, offset)
if childIndex >= 0 and obj.childCount:
obj = obj[childIndex]
else:
break
if contextObj:
return self.findNextCaretInOrder(contextObj, max(-1, contextOffset - 1))
return None, -1
def _getCaretContextViaLocusOfFocus(self):
obj = orca_state.locusOfFocus
try:
offset = obj.queryText().caretOffset
except NotImplementedError:
offset = 0
except:
offset = -1
return obj, offset
def getCaretContext(self, documentFrame=None):
if not documentFrame or self.isZombie(documentFrame):
documentFrame = self.documentFrame()
if not documentFrame:
return self._getCaretContextViaLocusOfFocus()
context = self._caretContexts.get(hash(documentFrame.parent))
if context:
return context
obj, offset = self._searchForCaretContext(documentFrame)
self.setCaretContext(obj, offset, documentFrame)
return obj, offset
def clearCaretContext(self, documentFrame=None):
self.clearContentCache()
documentFrame = documentFrame or self.documentFrame()
if not documentFrame:
return
parent = documentFrame.parent
self._caretContexts.pop(hash(parent), None)
def setCaretContext(self, obj=None, offset=-1, documentFrame=None):
documentFrame = documentFrame or self.documentFrame()
if not documentFrame:
return
parent = documentFrame.parent
self._caretContexts[hash(parent)] = obj, offset
def findFirstCaretContext(self, obj, offset):
try:
role = obj.getRole()
except:
msg = "WEB: Exception getting first caret context for %s %i" % (obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return None, -1
lookInChild = [pyatspi.ROLE_LIST,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_TABLE,
pyatspi.ROLE_TABLE_ROW]
if role in lookInChild and obj.childCount and not self.treatAsDiv(obj):
msg = "WEB: First caret context for %s, %i will look in child %s" % (obj, offset, obj[0])
debug.println(debug.LEVEL_INFO, msg)
return self.findFirstCaretContext(obj[0], 0)
text = self.queryNonEmptyText(obj)
if not text:
if self.isTextBlockElement(obj) or self.isAnchor(obj):
nextObj, nextOffset = self.nextContext(obj, offset)
if nextObj:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, nextObj, nextOffset)
debug.println(debug.LEVEL_INFO, msg)
return nextObj, nextOffset
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, 0)
debug.println(debug.LEVEL_INFO, msg)
return obj, 0
if offset >= text.characterCount:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, text.characterCount)
debug.println(debug.LEVEL_INFO, msg)
return obj, text.characterCount
allText = text.getText(0, -1)
offset = max (0, offset)
if allText[offset] != self.EMBEDDED_OBJECT_CHARACTER:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, obj, offset)
debug.println(debug.LEVEL_INFO, msg)
return obj, offset
child = self.getChildAtOffset(obj, offset)
if not child:
msg = "WEB: First caret context for %s, %i is %s, %i" % (obj, offset, None, -1)
debug.println(debug.LEVEL_INFO, msg)
return None, -1
return self.findFirstCaretContext(child, 0)
def findNextCaretInOrder(self, obj=None, offset=-1):
if not obj:
obj, offset = self.getCaretContext()
if not obj or not self.inDocumentContent(obj):
return None, -1
if not (self.isHidden(obj) or self.isOffScreenLabel(obj) or self.isNonNavigablePopup(obj)):
text = self.queryNonEmptyText(obj)
if text:
allText = text.getText(0, -1)
for i in range(offset + 1, len(allText)):
child = self.getChildAtOffset(obj, i)
if child and not self.isZombie(child) and not self.isAnchor(child) \
and not self.isUselessImage(child):
return self.findNextCaretInOrder(child, -1)
if allText[i] != self.EMBEDDED_OBJECT_CHARACTER:
return obj, i
elif not self.doNotDescendForCaret(obj) and obj.childCount:
return self.findNextCaretInOrder(obj[0], -1)
elif offset < 0 and not self.isTextBlockElement(obj) and not self.hasNoSize(obj) \
and not self.isUselessImage(obj) and not self.isParentOfNullChild(obj):
return obj, 0
# If we're here, start looking up the the tree, up to the document.
documentFrame = self.documentFrame()
if self.isSameObject(obj, documentFrame):
return None, -1
while obj.parent:
if self.isDetachedDocument(obj.parent):
obj = self.iframeForDetachedDocument(obj.parent)
continue
parent = obj.parent
if self.isZombie(parent):
replicant = self.findReplicant(self.documentFrame(), parent)
if replicant and not self.isZombie(replicant):
parent = replicant
elif parent.parent:
obj = parent
continue
else:
break
start, end, length = self._rangeInParentWithLength(obj)
if start + 1 == end and 0 <= start < end <= length:
return self.findNextCaretInOrder(parent, start)
index = obj.getIndexInParent() + 1
try:
parentChildCount = parent.childCount
except:
msg = "WEB: Exception getting childCount for %s" % parent
debug.println(debug.LEVEL_INFO, msg)
else:
if 0 < index < parentChildCount:
return self.findNextCaretInOrder(parent[index], -1)
obj = parent
return None, -1
def findPreviousCaretInOrder(self, obj=None, offset=-1):
if not obj:
obj, offset = self.getCaretContext()
if not obj or not self.inDocumentContent(obj):
return None, -1
if not (self.isHidden(obj) or self.isOffScreenLabel(obj) or self.isNonNavigablePopup(obj)):
text = self.queryNonEmptyText(obj)
if text:
allText = text.getText(0, -1)
if offset == -1 or offset > len(allText):
offset = len(allText)
for i in range(offset - 1, -1, -1):
child = self.getChildAtOffset(obj, i)
if child and not self.isZombie(child) and not self.isAnchor(child) \
and not self.isUselessImage(child):
return self.findPreviousCaretInOrder(child, -1)
if allText[i] != self.EMBEDDED_OBJECT_CHARACTER:
return obj, i
elif not self.doNotDescendForCaret(obj) and obj.childCount:
return self.findPreviousCaretInOrder(obj[obj.childCount - 1], -1)
elif offset < 0 and not self.isTextBlockElement(obj) and not self.hasNoSize(obj) \
and not self.isUselessImage(obj) and not self.isParentOfNullChild(obj):
return obj, 0
# If we're here, start looking up the the tree, up to the document.
documentFrame = self.documentFrame()
if self.isSameObject(obj, documentFrame):
return None, -1
while obj.parent:
if self.isDetachedDocument(obj.parent):
obj = self.iframeForDetachedDocument(obj.parent)
continue
parent = obj.parent
if self.isZombie(parent):
replicant = self.findReplicant(self.documentFrame(), parent)
if replicant and not self.isZombie(replicant):
parent = replicant
elif parent.parent:
obj = parent
continue
else:
break
start, end, length = self._rangeInParentWithLength(obj)
if start + 1 == end and 0 <= start < end <= length:
return self.findPreviousCaretInOrder(parent, start)
index = obj.getIndexInParent() - 1
try:
parentChildCount = parent.childCount
except:
msg = "WEB: Exception getting childCount for %s" % parent
debug.println(debug.LEVEL_INFO, msg)
else:
if 0 <= index < parentChildCount:
return self.findPreviousCaretInOrder(parent[index], -1)
obj = parent
return None, -1
def handleAsLiveRegion(self, event):
if not _settingsManager.getSetting('inferLiveRegions'):
return False
return self.isLiveRegion(event.source)
def getPageSummary(self, obj):
docframe = self.documentFrame(obj)
col = docframe.queryCollection()
headings = 0
forms = 0
tables = 0
vlinks = 0
uvlinks = 0
percentRead = None
stateset = pyatspi.StateSet()
roles = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK, pyatspi.ROLE_TABLE,
pyatspi.ROLE_FORM]
rule = col.createMatchRule(stateset.raw(), col.MATCH_NONE,
"", col.MATCH_NONE,
roles, col.MATCH_ANY,
"", col.MATCH_NONE,
False)
matches = col.getMatches(rule, col.SORT_ORDER_CANONICAL, 0, True)
col.freeMatchRule(rule)
for obj in matches:
role = obj.getRole()
if role == pyatspi.ROLE_HEADING:
headings += 1
elif role == pyatspi.ROLE_FORM:
forms += 1
elif role == pyatspi.ROLE_TABLE and not self.isLayoutOnly(obj):
tables += 1
elif role == pyatspi.ROLE_LINK:
if obj.getState().contains(pyatspi.STATE_VISITED):
vlinks += 1
else:
uvlinks += 1
return [headings, forms, tables, vlinks, uvlinks, percentRead]
|
import time
import os
import errno
import uuid
import math
import pandas as pd
import numpy as np
import collections
import natsort
import uuid
import shutil
import itertools
import json
from itertools import combinations
import matplotlib
from fba_tools.fba_toolsClient import fba_tools
import matplotlib.pyplot as plt
from collections import OrderedDict
from copy import deepcopy
from Workspace.WorkspaceClient import Workspace as Workspace
from DataFileUtil.DataFileUtilClient import DataFileUtil
from KBaseReport.KBaseReportClient import KBaseReport
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class MutualInfoUtil:
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.shock_url = config['shock-url']
self.dfu = DataFileUtil(self.callback_url)
self.ws = Workspace(self.ws_url, token=self.token)
self.scratch = config['scratch']
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _validate_run_flux_mutual_information_analysis_params(self, params):
"""
_validate_run_flux_mutual_information_analysis_params:
validates params passed to run_flux_mutual_information_analysis method
"""
log('start validating validate_run_flux_mutual_information_analysis params')
# check for required parameters
for p in ['fbamodel_id', 'compounds', 'media_id', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _get_file_from_ws(self, ref):
try:
file_path = self.ws.get_objects2({'objects':[{'ref': ref}]})
file_path = file_path['data'][0]
except Exception as e:
raise ValueError(
'Unable to get object from workspace: (' +
ref + ')' + str(e))
return file_path
def _make_media_files(self, ws_name, base, compounds):
"""
Build and store media objects for each combination of compound added to the base media.
:param base: The base media file
:param compounds: the set of compound to test
:return: A list of media ids and a matrix with each media combination defined
"""
ref = ws_name + "/" + base
if base.find("/") != -1:
ref = base
output = self._get_file_from_ws(ref)
base_media = output['data']
base = output['info'][1]
myuuid = str(uuid.uuid4())
media_ids = [base]
new_media_list = []
media_matrix = [[""]+compounds]
media_matrix.append([[base]+[0]*len(compounds)])
for n_comp in range(1, len(compounds)+1):
for combo in combinations(compounds, n_comp):
new_media_id = base + '_v%s' % len(media_matrix)
media_ids.append(new_media_id)
media_matrix.append([new_media_id]+[1 if comp in combo else 0 for comp in compounds])
new_media = deepcopy(base_media)
new_media['id'] = new_media_id
new_media['name'] = new_media_id
for new_comp in combo:
new_media['mediacompounds'].append(
{'compound_ref': '48/1/1/compounds/id/%s' % new_comp.split('_')[0],
'concentration': 1.0, 'maxFlux': 1000, 'minFlux': -1000})
new_media_list.append(new_media)
print("Made %s Media Files" % (len(media_ids)-1))
info = self.ws.save_objects(
{'workspace': ws_name,
"objects": [{
"hidden": 1,
"type": "KBaseBiochem.Media",
"data": media,
"name": myuuid + "-" + media['name']
} for media in new_media_list]
})
print info
return media_ids, media_matrix, myuuid
def _run_fba(self, workspace_name, media_id_list, fbamodel_id, myuuid, base_media):
fba_tool_obj = fba_tools(self.callback_url)
new_media_list = []
for media in media_id_list:
if media == base_media:
new_media_list.append(workspace_name + "/" + media)
else:
new_media_list.append(workspace_name + "/" + myuuid + "-" + media)
fba_tool_obj.run_flux_balance_analysis({
"workspace" : workspace_name,
"fbamodel_id" : fbamodel_id,
"fba_output_id" : fbamodel_id + ".mifba",
"fbamodel_workspace" : workspace_name,
"media_id_list" : new_media_list,
"target_reaction" : "bio1",
"minimize_flux" : 1
})
output = self.ws.get_objects2({
'objects' : [{
'ref' : workspace_name + "/" + fbamodel_id + '.mifba'
}]
})
fba = output['data'][0]['data']
biomass_data = "FBAs,Biomass\n"
secretion_file = ","+','.join(media_id_list)+"\n"
full_secretion_file = ","+','.join(media_id_list)+"\n"
full_flux_file = ","+','.join(media_id_list)+"\n"
flux_file = ","+','.join(media_id_list)+"\n"
objectives = fba['other_objectives']
for i in range(0, len(objectives)):
biomass_data = biomass_data + media_id_list[i] + "," + str(objectives[i]) + "\n"
flux_vars = fba['FBAReactionVariables']
for var in flux_vars:
id = var['modelreaction_ref'].split("/").pop()
flux_file = flux_file + id
full_flux_file = full_flux_file + id
fluxes = var['other_values']
for i in range(0, len(fluxes)):
full_flux_file = full_flux_file + "," + str(fluxes[i])
if abs(fluxes[i]) < 1e-7:
flux_file = flux_file + ",0"
else:
flux_file = flux_file + ",1"
flux_file = flux_file + "\n"
full_flux_file = full_flux_file + "\n"
secretion_vars = fba['FBACompoundVariables']
for var in secretion_vars:
id = var['modelcompound_ref'].split("/").pop()
secretion_file = secretion_file + id
full_secretion_file = full_secretion_file + id
fluxes = var['other_values']
for i in range(0, len(fluxes)):
full_secretion_file = full_secretion_file + "," + str(fluxes[i])
if abs(fluxes[i]) < 1e-7:
secretion_file = secretion_file + ",0"
elif fluxes[i] < 0:
secretion_file = secretion_file + ",-1"
else:
secretion_file = secretion_file + ",1"
secretion_file = secretion_file + "\n"
full_secretion_file = full_secretion_file + "\n"
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
biomass_path = os.path.join(output_directory, 'biomass.csv')
secretion_path = os.path.join(output_directory, 'secretion.csv')
flux_path = os.path.join(output_directory, 'flux.csv')
full_secretion_path = os.path.join(output_directory, 'full_secretion.csv')
full_flux_path = os.path.join(output_directory, 'full_flux.csv')
with open(biomass_path, 'w') as biomass_f:
biomass_f.write(biomass_data)
with open(secretion_path, 'w') as secretion_f:
secretion_f.write(secretion_file)
with open(flux_path, 'w') as flux_f:
flux_f.write(flux_file)
with open(full_secretion_path, 'w') as full_secretion_f:
full_secretion_f.write(full_secretion_file)
with open(full_flux_path, 'w') as full_flux_f:
full_flux_f.write(full_flux_file)
return [biomass_path,secretion_path,flux_path,full_secretion_path,full_flux_path]
def _generate_html_report(self, result_directory, mutual_info_dict):
"""
_generate_html_report: generate html summary report
"""
#scratch, uui, datafileutil, file_to_shock, shockId, extended report
log('start generating html report')
html_report = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'mutual_information_report.html')
shutil.copy(os.path.join(result_directory, 'MI_plot.png'),
os.path.join(output_directory, 'MI_plot.png'))
overview_content = ''
overview_content += '<table><tr><th>Mutual Information for various chemical compound combinations'
overview_content += ' Object</th></td>'
overview_content += '<tr><th>Input Chemical Compound Combination</th>'
overview_content += '<th>Mutual Information (in Bits)</th>'
overview_content += '</tr>'
for k, v in mutual_info_dict.items():
overview_content += '<tr><td>{}</td><td>{}</td></tr>'.format(k, v)
overview_content += '</table>'
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'report_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Overview_Content</p>',
overview_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Mutual Information App'})
return html_report
def _generate_report(self, result_directory, mutual_info_dict, params,paths):
"""
_generate_report: generate summary report
"""
uuidStr = str(uuid.uuid4())
self._mkdir_p(result_directory + '/' + uuidStr)
shutil.copy('/kb/module/data/index.html', result_directory + '/' + uuidStr + '/index.html')
#shutil.copy('pdata.json', result_directory + '/' + uuidStr + '/pdata.json')
# DataFileUtils to shock
report_shock_id = self.dfu.file_to_shock({'file_path': result_directory + '/' + uuidStr,
'make_handler': 0,
'pack': 'zip'})['shock_id']
report_file = {'name': 'index.html',
'description': 'the report',
'shock_id': report_shock_id}
biomass_file = {'name': 'biomass_file.csv',
'description': 'biomass_file',
'path': paths[0]}
flux_file = {'name': 'flux_file.csv',
'description': 'flux_file',
'path': paths[1]}
full_flux_file = {'name': 'full_flux_file.csv',
'description': 'full_flux_file',
'path': paths[2]}
secretion_file = {'name': 'secretion_file.csv',
'description': 'secretion_file',
'path': paths[3]}
full_secretion_file = {'name': 'full_secretion_file.csv',
'description': 'full_secretion_file',
'path': paths[4]}
log('creating report')
#output_html_files = self._generate_html_report(result_directory,
# mutual_info_dict)
report_params = {'message': '',
'workspace_name': params.get('workspace_name'),
'html_links': [report_file],
'file_links': [biomass_file,flux_file,full_flux_file,secretion_file,full_secretion_file],
'direct_html_link_index': 0,
'html_window_height': 333,
'report_object_name': 'MutualInfomation_report_' + uuidStr}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_mutual_info(self, media_matrix, fba_file, mi_options):
df1 = pd.read_csv(fba_file)
df1.as_matrix()
#----Input validation of Media/FBAs with Binary Matrix FBAs------
# 1.0 Number of rows in Media.csv file = (Number of columns -1)
# 1.0. If they are different: Through an ERROR saying missed match number of FBAs in media and binary matrix.
# 1.1 Check whether the elements in Media.csv file contains only binary values (i.e. 0 and 1)
# 1.1. If the elements are different: Through an ERROR saying not approapriate input values
# 1.2 Check whether the compounds in Media.csv file match with number of FBAs
# 1.2. If the compounds are different from number of FBAs: Through an ERROR saying not appropriate input values
print media_matrix
s_df1 = df1.shape
s_df2 = media_matrix.shape
Temp_df2 = np.array(media_matrix.values)
# Create matrix with only the elements remove first column and all the rows
Temp_df2 = Temp_df2[0:,1:]
Bin_val_check = np.array_equal(Temp_df2, Temp_df2.astype(bool))
num_compounds = (s_df2[1])-1
if ((s_df1[1]-1) != s_df2[0]) or (Bin_val_check != True) or (int(math.log(s_df2[0],2)) != num_compounds):
print ('invalid input values')
#-----All possible combination of the chemical compounds----------------------
# 2.0 Sperating m0 from rest of the lables
Temp1_df2 = media_matrix
cols = Temp1_df2.columns
for i in range(1,len(cols)):
Temp1_df2.loc[Temp1_df2[cols[i]] == 1 , cols[i]] = cols[i]
print Temp1_df2
# 2.1 Creating a disctionary for all FBAs except m0
print len(Temp1_df2)
mydict = {}
for x in range(0, len(Temp1_df2)):
for i in range(1,s_df2[1]):
currentvalue = Temp1_df2.iloc[x,i]
currentid = Temp1_df2.iloc[x,0]
currentvalue = Temp1_df2.iloc[x,i]
mydict.setdefault(currentid,[])
if currentvalue > 0:
mydict[currentid].append(currentvalue)
# Add the first key as m0
media_0_name = 'm0'
mydict[media_0_name] = "['0']"
#Sort the keys
mydict = collections.OrderedDict(natsort.natsorted(mydict.items()))
print mydict
for k,v in mydict.iteritems():
print k,v
# List of Compounds combination in the list
my_combi_list = []
Compounds_Combi = list(range(1,num_compounds+1))
for L in range(0, len(Compounds_Combi)+1):
for subset in itertools.combinations(Compounds_Combi, L):
my_combi_list.append(list(subset))
print my_combi_list
# Created a dictionary where the keys:
# list of compounds combination
# values are corresponding FBAs list in df2
result_dict = {}
for element in my_combi_list[1:]:
for k, v in mydict.iteritems():
if set(v).issubset(set(map(lambda x:str(x), element))):
key = ','.join(map(lambda x:str(x), element))
if result_dict.get(key):
media_list = result_dict[key]
media_list.append(k)
media_list = list(set(media_list))
result_dict.update({key: media_list})
else:
result_dict.update({key: [media_0_name, k]})
print result_dict
# Created a dictionary where the keys are:
# list of compounds combination
# values are compounds combination FBAs with df1 vaules
All_Comp_Combi_dic = {}
for column, value in result_dict.items():
All_Comp_Combi_dic.update({column : df1.get(value)})
#To print an item from the All_Comp_Combi_dic
df = (pd.DataFrame(All_Comp_Combi_dic.items()))
#print df[0]
#print df[1][7]
MI_dict = {}
for k in range(0, len(df[0])):
drop_rows_df = df[1][k].drop_duplicates(keep="first")
drop_columns_df = drop_rows_df.T.drop_duplicates(keep="first").T
remove = []
removed = {}
cols = df[1][k].columns
for i in range(len(cols)-1):
duplicated = []
v = df[1][k][cols[i]].values
for j in range(i+1,len(cols)):
if np.array_equal(v,df[1][k][cols[j]].values):
remove.append(cols[j])
duplicated.append(cols[j])
if duplicated and cols[i] not in remove:
removed.update({cols[i]:duplicated})
count = {}
for key, value in removed.items():
count.update({key: len(value)})
#print v
# print drop_columns_df
values = count.values()
# print values
values = map(lambda x: x+1, values)
# print values
d = {x:values.count(x) for x in values}
#-------Mutual Inforamtion (MI) calculation-------------
FBAs = len(df[1][k].columns)
pure_entropy = math.log(FBAs,2)
#print pure_entropy
# If No duplicates exist and list "value" is empty
if not values:
#print("List is empty")
No_duplicate_FBAs = len(drop_columns_df.columns)
conditional_entropy = -1 * (No_duplicate_FBAs*((1/No_duplicate_FBAs)*((1/1)*math.log(1.0/1.0,2))));
Mutual_Info = pure_entropy - conditional_entropy
#print('Mutaul Info:', Mutual_Info)
if values:
# If duplicates exist and list "value" is not empty
conditional_entropy = 0
for key in d:
#print key, d[key]
Temp = -1 * d[key] * (key/float(FBAs)) * key * (1.0/key) * math.log(1.0/key,2)
conditional_entropy = Temp + conditional_entropy
#print "%3f" %Temp
Mutual_Info = pure_entropy - conditional_entropy
MI_dict[df[0][k]] = Mutual_Info
#Sorted MI_dict
MI_dict = sorted(MI_dict.items(), key=lambda x: (-len(x[0]), x[0]))
MI_dict = OrderedDict(MI_dict)
x_groups = [[] for x in range(num_compounds)]
y_groups = [[] for x in range(num_compounds)]
names = [[] for x in range(num_compounds)]
Comp_Mapping = [[] for x in range(num_compounds)]
for key, val in MI_dict.iteritems():
del_count = key.count(',')
x_groups[del_count].append(key)
y_groups[del_count].append(val)
# for x, y in zip(x_groups, y_groups):
# data.append(go.Bar(x=x, y=y, name='test'))
compound_IDs = ['H2', 'Vitamin K', 'Hematine', 'Glucose', 'Acetate', 'Formate', 'B12']
pdata = []
for i in range(0, len(x_groups)):
names[i] = str(i + 1) + ' Compound Combination'
Comp_Mapping = str(i + 1) + '-' + compound_IDs[i]
record = {}
record["x"] = []
for e in x_groups[i]:
record["x"].append("c" + e)
record["y"] = y_groups[i]
record["names"] = names[i]
record["Comp_Mapping"] = Comp_Mapping
pdata.append(record)
print pdata
with open('pdata.json', 'w') as outfile:
json.dump(pdata, outfile)
return MI_dict
Fixing flux printing
import time
import os
import errno
import uuid
import math
import pandas as pd
import numpy as np
import collections
import natsort
import uuid
import shutil
import itertools
import json
from itertools import combinations
import matplotlib
from fba_tools.fba_toolsClient import fba_tools
import matplotlib.pyplot as plt
from collections import OrderedDict
from copy import deepcopy
from Workspace.WorkspaceClient import Workspace as Workspace
from DataFileUtil.DataFileUtilClient import DataFileUtil
from KBaseReport.KBaseReportClient import KBaseReport
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class MutualInfoUtil:
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.shock_url = config['shock-url']
self.dfu = DataFileUtil(self.callback_url)
self.ws = Workspace(self.ws_url, token=self.token)
self.scratch = config['scratch']
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _validate_run_flux_mutual_information_analysis_params(self, params):
"""
_validate_run_flux_mutual_information_analysis_params:
validates params passed to run_flux_mutual_information_analysis method
"""
log('start validating validate_run_flux_mutual_information_analysis params')
# check for required parameters
for p in ['fbamodel_id', 'compounds', 'media_id', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _get_file_from_ws(self, ref):
try:
file_path = self.ws.get_objects2({'objects':[{'ref': ref}]})
file_path = file_path['data'][0]
except Exception as e:
raise ValueError(
'Unable to get object from workspace: (' +
ref + ')' + str(e))
return file_path
def _make_media_files(self, ws_name, base, compounds):
"""
Build and store media objects for each combination of compound added to the base media.
:param base: The base media file
:param compounds: the set of compound to test
:return: A list of media ids and a matrix with each media combination defined
"""
ref = ws_name + "/" + base
if base.find("/") != -1:
ref = base
output = self._get_file_from_ws(ref)
base_media = output['data']
base = output['info'][1]
myuuid = str(uuid.uuid4())
media_ids = [base]
new_media_list = []
media_matrix = [[""]+compounds]
media_matrix.append([[base]+[0]*len(compounds)])
for n_comp in range(1, len(compounds)+1):
for combo in combinations(compounds, n_comp):
new_media_id = base + '_v%s' % len(media_matrix)
media_ids.append(new_media_id)
media_matrix.append([new_media_id]+[1 if comp in combo else 0 for comp in compounds])
new_media = deepcopy(base_media)
new_media['id'] = new_media_id
new_media['name'] = new_media_id
for new_comp in combo:
new_media['mediacompounds'].append(
{'compound_ref': '48/1/1/compounds/id/%s' % new_comp.split('_')[0],
'concentration': 1.0, 'maxFlux': 1000, 'minFlux': -1000})
new_media_list.append(new_media)
print("Made %s Media Files" % (len(media_ids)-1))
info = self.ws.save_objects(
{'workspace': ws_name,
"objects": [{
"hidden": 1,
"type": "KBaseBiochem.Media",
"data": media,
"name": myuuid + "-" + media['name']
} for media in new_media_list]
})
print info
return media_ids, media_matrix, myuuid
def _run_fba(self, workspace_name, media_id_list, fbamodel_id, myuuid, base_media):
fba_tool_obj = fba_tools(self.callback_url)
new_media_list = []
for media in media_id_list:
if media == base_media:
new_media_list.append(workspace_name + "/" + media)
else:
new_media_list.append(workspace_name + "/" + myuuid + "-" + media)
fba_tool_obj.run_flux_balance_analysis({
"workspace" : workspace_name,
"fbamodel_id" : fbamodel_id,
"fba_output_id" : fbamodel_id + ".mifba",
"fbamodel_workspace" : workspace_name,
"media_id_list" : new_media_list,
"target_reaction" : "bio1",
"minimize_flux" : 1
})
output = self.ws.get_objects2({
'objects' : [{
'ref' : workspace_name + "/" + fbamodel_id + '.mifba'
}]
})
fba = output['data'][0]['data']
biomass_data = "FBAs,Biomass\n"
secretion_file = ","+','.join(media_id_list)+"\n"
full_secretion_file = ","+','.join(media_id_list)+"\n"
full_flux_file = ","+','.join(media_id_list)+"\n"
flux_file = ","+','.join(media_id_list)+"\n"
objectives = fba['other_objectives']
for i in range(0, len(objectives)):
biomass_data = biomass_data + media_id_list[i] + "," + str(objectives[i]) + "\n"
flux_vars = fba['FBAReactionVariables']
for var in flux_vars:
id = var['modelreaction_ref'].split("/").pop()
flux_file = flux_file + id
full_flux_file = full_flux_file + id
fluxes = var['other_values']
for i in range(0, len(objectives)):
if objectives[i] == 0:
full_flux_file = full_flux_file + ",0"
flux_file = flux_file + ",0"
else:
full_flux_file = full_flux_file + "," + str(fluxes[i])
if abs(fluxes[i]) < 1e-7:
flux_file = flux_file + ",0"
else:
flux_file = flux_file + ",1"
flux_file = flux_file + "\n"
full_flux_file = full_flux_file + "\n"
secretion_vars = fba['FBACompoundVariables']
for var in secretion_vars:
id = var['modelcompound_ref'].split("/").pop()
secretion_file = secretion_file + id
full_secretion_file = full_secretion_file + id
fluxes = var['other_values']
for i in range(0, len(objectives)):
if objectives[i] == 0:
full_secretion_file = full_secretion_file + ",0"
secretion_file = secretion_file + ",0"
else:
full_secretion_file = full_secretion_file + "," + str(fluxes[i])
if abs(fluxes[i]) < 1e-7:
secretion_file = secretion_file + ",0"
elif fluxes[i] < 0:
secretion_file = secretion_file + ",-1"
else:
secretion_file = secretion_file + ",1"
secretion_file = secretion_file + "\n"
full_secretion_file = full_secretion_file + "\n"
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
biomass_path = os.path.join(output_directory, 'biomass.csv')
secretion_path = os.path.join(output_directory, 'secretion.csv')
flux_path = os.path.join(output_directory, 'flux.csv')
full_secretion_path = os.path.join(output_directory, 'full_secretion.csv')
full_flux_path = os.path.join(output_directory, 'full_flux.csv')
with open(biomass_path, 'w') as biomass_f:
biomass_f.write(biomass_data)
with open(secretion_path, 'w') as secretion_f:
secretion_f.write(secretion_file)
with open(flux_path, 'w') as flux_f:
flux_f.write(flux_file)
with open(full_secretion_path, 'w') as full_secretion_f:
full_secretion_f.write(full_secretion_file)
with open(full_flux_path, 'w') as full_flux_f:
full_flux_f.write(full_flux_file)
return [biomass_path,secretion_path,flux_path,full_secretion_path,full_flux_path]
def _generate_html_report(self, result_directory, mutual_info_dict):
"""
_generate_html_report: generate html summary report
"""
#scratch, uui, datafileutil, file_to_shock, shockId, extended report
log('start generating html report')
html_report = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'mutual_information_report.html')
shutil.copy(os.path.join(result_directory, 'MI_plot.png'),
os.path.join(output_directory, 'MI_plot.png'))
overview_content = ''
overview_content += '<table><tr><th>Mutual Information for various chemical compound combinations'
overview_content += ' Object</th></td>'
overview_content += '<tr><th>Input Chemical Compound Combination</th>'
overview_content += '<th>Mutual Information (in Bits)</th>'
overview_content += '</tr>'
for k, v in mutual_info_dict.items():
overview_content += '<tr><td>{}</td><td>{}</td></tr>'.format(k, v)
overview_content += '</table>'
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'report_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Overview_Content</p>',
overview_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Mutual Information App'})
return html_report
def _generate_report(self, result_directory, mutual_info_dict, params,paths):
"""
_generate_report: generate summary report
"""
uuidStr = str(uuid.uuid4())
self._mkdir_p(result_directory + '/' + uuidStr)
shutil.copy('/kb/module/data/index.html', result_directory + '/' + uuidStr + '/index.html')
#shutil.copy('pdata.json', result_directory + '/' + uuidStr + '/pdata.json')
# DataFileUtils to shock
report_shock_id = self.dfu.file_to_shock({'file_path': result_directory + '/' + uuidStr,
'make_handler': 0,
'pack': 'zip'})['shock_id']
report_file = {'name': 'index.html',
'description': 'the report',
'shock_id': report_shock_id}
biomass_file = {'name': 'biomass_file.csv',
'description': 'biomass_file',
'path': paths[0]}
flux_file = {'name': 'flux_file.csv',
'description': 'flux_file',
'path': paths[1]}
full_flux_file = {'name': 'full_flux_file.csv',
'description': 'full_flux_file',
'path': paths[2]}
secretion_file = {'name': 'secretion_file.csv',
'description': 'secretion_file',
'path': paths[3]}
full_secretion_file = {'name': 'full_secretion_file.csv',
'description': 'full_secretion_file',
'path': paths[4]}
log('creating report')
#output_html_files = self._generate_html_report(result_directory,
# mutual_info_dict)
report_params = {'message': '',
'workspace_name': params.get('workspace_name'),
'html_links': [report_file],
'file_links': [biomass_file,flux_file,full_flux_file,secretion_file,full_secretion_file],
'direct_html_link_index': 0,
'html_window_height': 333,
'report_object_name': 'MutualInfomation_report_' + uuidStr}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_mutual_info(self, media_matrix, fba_file, mi_options):
df1 = pd.read_csv(fba_file)
df1.as_matrix()
#----Input validation of Media/FBAs with Binary Matrix FBAs------
# 1.0 Number of rows in Media.csv file = (Number of columns -1)
# 1.0. If they are different: Through an ERROR saying missed match number of FBAs in media and binary matrix.
# 1.1 Check whether the elements in Media.csv file contains only binary values (i.e. 0 and 1)
# 1.1. If the elements are different: Through an ERROR saying not approapriate input values
# 1.2 Check whether the compounds in Media.csv file match with number of FBAs
# 1.2. If the compounds are different from number of FBAs: Through an ERROR saying not appropriate input values
print media_matrix
s_df1 = df1.shape
s_df2 = media_matrix.shape
Temp_df2 = np.array(media_matrix.values)
# Create matrix with only the elements remove first column and all the rows
Temp_df2 = Temp_df2[0:,1:]
Bin_val_check = np.array_equal(Temp_df2, Temp_df2.astype(bool))
num_compounds = (s_df2[1])-1
if ((s_df1[1]-1) != s_df2[0]) or (Bin_val_check != True) or (int(math.log(s_df2[0],2)) != num_compounds):
print ('invalid input values')
#-----All possible combination of the chemical compounds----------------------
# 2.0 Sperating m0 from rest of the lables
Temp1_df2 = media_matrix
cols = Temp1_df2.columns
for i in range(1,len(cols)):
Temp1_df2.loc[Temp1_df2[cols[i]] == 1 , cols[i]] = cols[i]
print Temp1_df2
# 2.1 Creating a disctionary for all FBAs except m0
print len(Temp1_df2)
mydict = {}
for x in range(0, len(Temp1_df2)):
for i in range(1,s_df2[1]):
currentvalue = Temp1_df2.iloc[x,i]
currentid = Temp1_df2.iloc[x,0]
currentvalue = Temp1_df2.iloc[x,i]
mydict.setdefault(currentid,[])
if currentvalue > 0:
mydict[currentid].append(currentvalue)
# Add the first key as m0
media_0_name = 'm0'
mydict[media_0_name] = "['0']"
#Sort the keys
mydict = collections.OrderedDict(natsort.natsorted(mydict.items()))
print mydict
for k,v in mydict.iteritems():
print k,v
# List of Compounds combination in the list
my_combi_list = []
Compounds_Combi = list(range(1,num_compounds+1))
for L in range(0, len(Compounds_Combi)+1):
for subset in itertools.combinations(Compounds_Combi, L):
my_combi_list.append(list(subset))
print my_combi_list
# Created a dictionary where the keys:
# list of compounds combination
# values are corresponding FBAs list in df2
result_dict = {}
for element in my_combi_list[1:]:
for k, v in mydict.iteritems():
if set(v).issubset(set(map(lambda x:str(x), element))):
key = ','.join(map(lambda x:str(x), element))
if result_dict.get(key):
media_list = result_dict[key]
media_list.append(k)
media_list = list(set(media_list))
result_dict.update({key: media_list})
else:
result_dict.update({key: [media_0_name, k]})
print result_dict
# Created a dictionary where the keys are:
# list of compounds combination
# values are compounds combination FBAs with df1 vaules
All_Comp_Combi_dic = {}
for column, value in result_dict.items():
All_Comp_Combi_dic.update({column : df1.get(value)})
#To print an item from the All_Comp_Combi_dic
df = (pd.DataFrame(All_Comp_Combi_dic.items()))
#print df[0]
#print df[1][7]
MI_dict = {}
for k in range(0, len(df[0])):
drop_rows_df = df[1][k].drop_duplicates(keep="first")
drop_columns_df = drop_rows_df.T.drop_duplicates(keep="first").T
remove = []
removed = {}
cols = df[1][k].columns
for i in range(len(cols)-1):
duplicated = []
v = df[1][k][cols[i]].values
for j in range(i+1,len(cols)):
if np.array_equal(v,df[1][k][cols[j]].values):
remove.append(cols[j])
duplicated.append(cols[j])
if duplicated and cols[i] not in remove:
removed.update({cols[i]:duplicated})
count = {}
for key, value in removed.items():
count.update({key: len(value)})
#print v
# print drop_columns_df
values = count.values()
# print values
values = map(lambda x: x+1, values)
# print values
d = {x:values.count(x) for x in values}
#-------Mutual Inforamtion (MI) calculation-------------
FBAs = len(df[1][k].columns)
pure_entropy = math.log(FBAs,2)
#print pure_entropy
# If No duplicates exist and list "value" is empty
if not values:
#print("List is empty")
No_duplicate_FBAs = len(drop_columns_df.columns)
conditional_entropy = -1 * (No_duplicate_FBAs*((1/No_duplicate_FBAs)*((1/1)*math.log(1.0/1.0,2))));
Mutual_Info = pure_entropy - conditional_entropy
#print('Mutaul Info:', Mutual_Info)
if values:
# If duplicates exist and list "value" is not empty
conditional_entropy = 0
for key in d:
#print key, d[key]
Temp = -1 * d[key] * (key/float(FBAs)) * key * (1.0/key) * math.log(1.0/key,2)
conditional_entropy = Temp + conditional_entropy
#print "%3f" %Temp
Mutual_Info = pure_entropy - conditional_entropy
MI_dict[df[0][k]] = Mutual_Info
#Sorted MI_dict
MI_dict = sorted(MI_dict.items(), key=lambda x: (-len(x[0]), x[0]))
MI_dict = OrderedDict(MI_dict)
x_groups = [[] for x in range(num_compounds)]
y_groups = [[] for x in range(num_compounds)]
names = [[] for x in range(num_compounds)]
Comp_Mapping = [[] for x in range(num_compounds)]
for key, val in MI_dict.iteritems():
del_count = key.count(',')
x_groups[del_count].append(key)
y_groups[del_count].append(val)
# for x, y in zip(x_groups, y_groups):
# data.append(go.Bar(x=x, y=y, name='test'))
compound_IDs = ['H2', 'Vitamin K', 'Hematine', 'Glucose', 'Acetate', 'Formate', 'B12']
pdata = []
for i in range(0, len(x_groups)):
names[i] = str(i + 1) + ' Compound Combination'
Comp_Mapping = str(i + 1) + '-' + compound_IDs[i]
record = {}
record["x"] = []
for e in x_groups[i]:
record["x"].append("c" + e)
record["y"] = y_groups[i]
record["names"] = names[i]
record["Comp_Mapping"] = Comp_Mapping
pdata.append(record)
print pdata
with open('pdata.json', 'w') as outfile:
json.dump(pdata, outfile)
return MI_dict
|
#!/usr/bin/env python
#######################################################################################
## Created by David Kirkby, University of California, Irvine <dkirkby@uci.edu>
#######################################################################################
"""
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.07 -o lsst_i --pixel-scale 0.200 --width 4096 --height 4096 --exposure-time 6900 --sky-brightness 20.0 --zenith-fwhm 0.67 --zero-point 41.5
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.07 -o des_i --pixel-scale 0.263 --width 3115 --height 3115 --exposure-time 1000 --sky-brightness 20.1 --zenith-fwhm 0.79 --zero-point 12.5
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.07 -o cfht_i --pixel-scale 0.185 --width 4428 --height 4428 --exposure-time 4300 --sky-brightness 20.3 --zenith-fwhm 0.64 --zero-point 10.0
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.10 -o lsst_r --pixel-scale 0.200 --width 4096 --height 4096 --exposure-time 6900 --sky-brightness 21.3 --zenith-fwhm 0.70 --zero-point 55.8
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.10 -o des_r --pixel-scale 0.263 --width 3115 --height 3115 --exposure-time 800 --sky-brightness 21.1 --zenith-fwhm 0.79 --zero-point 16.8
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.10 -o cfht_r --pixel-scale 0.185 --width 4428 --height 4428 --exposure-time 2000 --sky-brightness 20.8 --zenith-fwhm 0.71 --zero-point 13.5
"""
import sys
import os
import math
import argparse
import logging
import galsim
import pyfits
import numpy
twopi = 2*math.pi
deg2rad = math.pi/180.
deg2arcsec = 3600.
deg2arcmin = 60.
def createComponent(type,electrons,xc,yc,hlr,q,beta,g1,g2):
# create a radial profile of the requested type and size
comp = type(flux = electrons, half_light_radius = hlr)
# set the intrinsic shape
comp.applyShear(q = q, beta = beta*galsim.radians)
# apply cosmic shear
comp.applyShear(g1 = g1, g2 = g2)
# shift to this component's centroid
comp.applyShift(dx = xc, dy = yc)
return comp
"""
Returns a (disk,bulge) tuple of source objects using the specified parameters.
Note that f_d and f_b are fractions of the total flux and need not sum to one.
"""
def createSource(
total_flux,f_d,f_b,
x_d,y_d,hlr_d,q_d,beta_d,
x_b,y_b,hlr_b,q_b,beta_b,
dx,dy,relsize,dbeta,
g1,g2):
# Define the disk component, if any
if f_d > 0:
disk = createComponent(galsim.Exponential,
total_flux*f_d,x_d+dx,y_d+dy,hlr_d*relsize,q_d,beta_d+dbeta,g1,g2)
else:
disk = None
# Define the bulge component, if any
if f_b > 0:
bulge = createComponent(galsim.DeVaucouleurs,
total_flux*f_b,x_b+dx,y_b+dy,hlr_b*relsize,q_b,beta_b+dbeta,g1,g2)
else:
bulge = None
return (disk,bulge)
"""
Renders the convolution of [src,psf,pix] into the specified bounding box.
If psf is None, only [src,pix] are convolved. If src is None, an empty
stamp is returned (we use this below when either the bulge or disk is absent).
"""
def renderStamp(src,psf,pix,bbox):
stamp = galsim.ImageD(bbox)
stamp.setScale(pix.getXWidth())
if src:
models = [src,pix] if psf is None else [src,psf,pix]
gsp = galsim.GSParams(maximum_fft_size=16384)
obj = galsim.Convolve(models,gsparams=gsp)
obj.draw(image = stamp)
return stamp
"""
Renders the specified source convolved with a psf (which might be None)
and pixel response into a postage stamp with the specified bounding box.
"""
def createStamp(src,psf,pix,bbox):
(disk,bulge) = src
diskStamp = renderStamp(disk,psf,pix,bbox)
bulgeStamp = renderStamp(bulge,psf,pix,bbox)
return diskStamp + bulgeStamp
"""
Calculate the centroid, size, and shape of the convolution of [src,psf,pix]
in the specified bounding box using a high-resolution image whose pixels
are smaller by a factor of oversampling (in each direction), and whose
stamp is larger by a factor of zoom (in each direction). The centroid and
size are returned in arcsecs. The centroid is relative to the center of
the input bounding box.
"""
def getStampMoments(src,psf,pix,bbox,oversampling=10,zoom=1):
# Create a high-resolution pixel grid that covers the same area, and
# preserves the even/oddness and mean (min+max)/2. of each dimension.
(x1,x2,y1,y2) = (bbox.getXMin(),bbox.getXMax(),bbox.getYMin(),bbox.getYMax())
xmid = (x1+x2)/2.
dx = oversampling*(x2-x1)/2.
x1 = int(math.floor(xmid - zoom*dx))
x2 = int(math.ceil(xmid + zoom*dx))
assert (x1+x2)/2. == xmid
ymid = (y1+y2)/2.
dy = oversampling*(y2-y1)/2.
y1 = int(math.floor(ymid - zoom*dy))
y2 = int(math.ceil(ymid + zoom*dy))
assert (y1+y2)/2. == ymid
bigBbox = galsim.BoundsI(x1,x2,y1,y2)
scale = pix.getXWidth()/oversampling
smallPix = galsim.Pixel(scale)
# Render a high-resolution stamp of this source
stamp = createStamp(src,psf,smallPix,bigBbox)
# Calculate this stamp's moments
pixels = stamp.array
xproj = numpy.sum(pixels,axis=0)
yproj = numpy.sum(pixels,axis=1)
total = numpy.sum(pixels)
# Calculate the mean in pixels relative to the stamp center
xcoords = numpy.arange(x1,x2+1) - xmid
ycoords = numpy.arange(y1,y2+1) - ymid
x = numpy.sum(xproj*xcoords)/total
y = numpy.sum(yproj*ycoords)/total
# Calculate the second-moments matrix
xcoords -= x
ycoords -= y
xycoords = numpy.outer(ycoords,xcoords)
xx = numpy.sum(xproj*xcoords**2)/total
yy = numpy.sum(yproj*ycoords**2)/total
xy = numpy.sum(pixels*xycoords)/total
# Calculate the ellipticity and size
detQ = xx*yy - xy*xy
denom = xx + yy + 2*math.sqrt(detQ)
eps1 = (xx - yy)/denom
eps2 = 2*xy/denom
sigma = math.pow(detQ,0.25)*scale
return (x*scale,y*scale,sigma,eps1,eps2)
"""
Returns (dx,dy) for the bounding box of a surface brightness profile
SB(x,y) whose isophotes are ellipses with the shape (q,beta) and which
has an underlying normalized radial profile p(r). The inputs are:
maxSB = totalFlux*p(0) = maximum surface brightness before shear
thresholdSB = threshold surface brightness after shear
q = ratio of minor to major axes of ellipse with 0 < q <= 1
beta = angle of ellipse's major axis in radians
rFunction = returns R(b) such that p(R) = b*p(0) with 0 < b < 1
The returned (dx,dy) are in arcsecs, and defined such that SB(x,y) < f0
is guaranteed for |x| > dx or |y| > dy. The result only depends on the
ratio thresholdSB/maxSB so they must be in the same (abitrary) units.
"""
def boundingBox(maxSB,thresholdSB,q,beta,rFunction):
# Calculate shear affine transform parameters
g = (1-q)/(1+q)
gp = g*math.cos(2*beta)
gx = g*math.sin(2*beta)
detM = 1 - gp*gp - gx*gx
# Calculate the dimensionless surface brightness ratio at threshold.
b = thresholdSB/(maxSB*detM)
if b <= 0:
raise RuntimeError('boundingBox: invalid input parameters')
if b >= 1:
# The max surface brightness is below our threshold SB(0,0) <= f0
return (0,0)
# Calculate the threshold radius of the radial profile.
rcut = rFunction(b)
# Shear this circle and return its bounding box dimensions
dx = rcut*math.sqrt(((1+gp)*(1+gp)+gx*gx)/detM) # half width in arcsecs
dy = rcut*math.sqrt(((1-gp)*(1-gp)+gx*gx)/detM) # half height in arcsecs
return (dx,dy)
"""
Returns (dx,dy) for the bounding box of a Sersic profile with n = 1 or 4.
The input flux should be in electrons, hlr in arscecs, beta in radians, f0 in
elec/arcsec^2. 0 < q <= 1 is dimensionless. The returned (dx,dy) are in
arcsecs. See boundingBox above for details.
"""
def sersicBounds(n,flux,hlr,q,beta,f0):
# Convert the half-light radius to the appropriate scale radius r0
# and calculate the Sersic normalization constant
if n == 1:
r0 = hlr/1.67835
norm = twopi*r0*r0
elif n == 4:
r0 = hlr/3459.49
norm = 20160*twopi*r0*r0 # 20160 = n*Gamma[2*n]
else:
raise RuntimeError('Sersic index n = %d is not supported.' % n)
# Calculate and return the bounding box
return boundingBox(flux/norm,f0,q,beta,
lambda b: r0*math.pow(-math.log(b),n))
"""
Returns (dx,dy) for the bounding box of a Moffat profile. The input flux
should be in electrons, fwhm in arcsecs, beta in radians, f0 in elec/arcsec^2.
0 < q <= 1 and moffatBeta > 1 are dimensionless. The returned (dx,dy) are in
arcsecs. See boundingBox above for details.
"""
def moffatBounds(moffatBeta,flux,fwhm,q,beta,f0):
# Check that moffatBeta is valid
if moffatBeta <= 1:
raise RuntimeError('Moffat beta < 1 is not valid.')
# Convert the fwhm to the corresponding scale radius
r0 = 0.5*fwhm/math.sqrt(math.pow(2,1./moffatBeta)-1)
# Calculate the normalization factor norm = 1/p(0)
norm = math.pi*r0*r0/(moffatBeta-1)
# Calculate and return the bounding box
return boundingBox(flux/norm,f0,q,beta,
lambda b: r0*math.sqrt(1-math.pow(b,(moffatBeta-1.)/moffatBeta)))
"""
Returns a mask image of values 0 or 1 depending on whether the corresponding
input image pixel value is above or below the specified threshold in electrons.
Note that if all pixels are below threshold, then the returned mask will
contain only the central pixel with image.array.sum() == 0.
"""
def createMask(image,threshold,args):
# create an empty mask image with the same dimensions as the input image
box = image.bounds
mask = galsim.ImageS(box)
mask.setScale(image.getScale())
borderMax = 0.
lastRow = box.ymax - box.ymin
lastPixel = box.xmax - box.xmin
# initialize our trimmed bounds to just the central pixel
# (the numerator should always be even for odd width,height)
xmin = (box.getXMin()+box.getXMax())
ymin = (box.getYMin()+box.getYMax())
assert xmin%2 == 0 and ymin%2 == 0
xmin = xmin/2
ymin = ymin/2
xmax = xmin
ymax = ymin
# loop over image pixels
for (rowIndex,row) in enumerate(image.array):
y = box.getYMin()+rowIndex
for (pixelIndex,pixelValue) in enumerate(row):
x = box.getXMin()+pixelIndex
if rowIndex == 0 or rowIndex == lastRow or pixelIndex == 0 or pixelIndex == lastPixel:
# update the largest pixel value on our 1-pixel wide border
borderMax = max(borderMax,pixelValue)
if pixelValue >= threshold:
mask.array[rowIndex,pixelIndex] = 1
xmin = min(x,xmin)
xmax = max(x,xmax)
ymin = min(y,ymin)
ymax = max(y,ymax)
# is the stamp too small to contain the threshold contour?
if borderMax > threshold:
print '### stamp truncated at %.1f > %.1f electrons' % (borderMax,threshold)
# build a new mask using the border max as the threshold
return createMask(image,borderMax,args)
trimmed = galsim.BoundsI(xmin,xmax,ymin,ymax)
mask = mask[trimmed]
return mask
"""
Performs any final processing on stamp, controlled by args, then appends it to stamps.
Returns True if the stamp was saved, or otherwise False.
"""
def saveStamp(stamps,stamp,args):
# Clip the stamp so that does not extend beyond the field image. This results
# in potentially smaller files with sources that might not be centered.
if not args.no_clip:
overlap = stamp.bounds & galsim.BoundsI(1,args.width,1,args.height)
if overlap.area() == 0:
# skip this stamp if it falls completely outside our field (after trimming)
return False
stamp = stamp[overlap]
# Convert normalization from elec/exposure to elec/second
stamp = stamp/args.exposure_time
# Remember this stamp.
stamps.append(stamp)
return True
"""
Performs initializations for the psf we will be using.
"""
def initializeForPsf(psf,pix,size):
# Render a centered psf image
bbox = galsim.BoundsI(1,2*size,1,2*size)
stamp = galsim.ImageD(bbox)
scale = pix.getXWidth()
stamp.setScale(scale)
obj = galsim.Convolve([psf,pix])
obj.draw(image=stamp)
# Build the circularized psf profile
profile = numpy.zeros(size,dtype=float)
for x in range(2*size):
for y in range(2*size):
dx = x - size + 0.5
dy = y - size + 0.5
r = math.sqrt(dx*dx + dy*dy)
ipix = int(math.floor(r))
if ipix < size:
profile[ipix] = max(profile[ipix],stamp.array[x,y])
# Create a function that gives the size of bounding box necessary to contain
# psf pixels down to the specified threshold assuming the specified total flux.
# The return value is clipped at 2*size for large fluxes.
def estimator(flux,threshold):
index = 0
while index < size:
if flux*profile[index] < threshold:
return 2*index+1
index += 1
return 2*size
# Calculate the psf size from a high-resolution rendering
(xc,yc,size,e1,e2) = getStampMoments((psf,None),None,pix,bbox)
assert abs(xc) < 0.01*scale and abs(yc) < 0.01*scale
assert abs(e1) < 1e-6 and abs(e2) < 1e-6
return (estimator,size)
# Returns the combined size and ellipticity for the specified disk and bulge components,
# assuming they have the same centroid.
def combineEllipticities(hlr_d,q_d,pa_d,hlr_b,q_b,pa_b,f_b):
# ensure that single-component models give correct results
if f_b == 0:
q_b = 1
elif f_b == 1:
q_d = 1
# calculate the disk and bulge component ellipticities
ed = (1-q_d)/(1+q_d)
ed1 = ed*math.cos(2*pa_d)
ed2 = ed*math.sin(2*pa_d)
eb = (1-q_b)/(1+q_b)
eb1 = eb*math.cos(2*pa_b)
eb2 = eb*math.sin(2*pa_b)
# calculate the corresponding second-moment tensors assuming unit total flux
cd = 1.06502
nd = cd*(hlr_d/(1-ed*ed))**2
Qd11 = nd*(1+ed*ed+2*ed1)
Qd12 = nd*2*ed2
Qd22 = nd*(1+ed*ed-2*ed1)
detQd = Qd11*Qd22 - Qd12*Qd12
cb = 10.8396
nb = cb*(hlr_b/(1-eb*eb))**2
Qb11 = nb*(1+eb*eb+2*eb1)
Qb12 = nb*2*eb2
Qb22 = nb*(1+eb*eb-2*eb1)
detQb = Qb11*Qb22 - Qb12*Qb12
# add the component second-moment tensors
Q11 = (1-f_b)*Qd11 + f_b*Qb11
Q12 = (1-f_b)*Qd12 + f_b*Qb12
Q22 = (1-f_b)*Qd22 + f_b*Qb22
detQ = Q11*Q22 - Q12*Q12
size = math.pow(detQ,0.25)
#semiMajorAxis = math.sqrt(0.5*(Q11+Q22+math.sqrt((Q11-Q22)**2+4*Q12**2)))
# calculate the corresponding combined ellipticity
denom = Q11 + Q22 + 2*math.sqrt(detQ)
e1 = (Q11 - Q22)/denom
e2 = 2*Q12/denom
"""
# check direct calculation of emag when pa_d == pa_b
emag = math.sqrt(e1*e1 + e2*e2)
wd = (1-f_b)*cd*hlr_d**2*(1+q_d)**2/(8*q_d**2) if f_b < 1 else 0
wm = f_b*cb*hlr_b**2*(1+q_b)**2/(8*q_b**2) if f_b > 0 else 0
ep = wd*(1+q_d**2) + wm*(1+q_b**2)
em = wd*(1-q_d**2) + wm*(1-q_b**2)
emag2 = em/(ep+math.sqrt(ep*ep-em*em))
print 'emag:',emag-emag2
"""
return (size,e1,e2)
def signalToNoiseRatio(stamp,pixelNoise):
flat = stamp.array.reshape(-1)
snr = math.sqrt(numpy.dot(flat,flat)/pixelNoise)
return snr
# Returns True if the stamps s1 and s2 have overlapping pixels with non-zero flux.
def overlapping(s1,s2):
# test for overlapping bounding boxes
overlapBounds = s1.bounds & s2.bounds
if overlapBounds.area() == 0:
return False
# test for overlapping flux within the overlapping pixels
overlapFluxProduct = numpy.sum(s1[overlapBounds].array * s2[overlapBounds].array)
return False if overlapFluxProduct == 0 else True
# Assigns a group ID to each stamp in stamps based on its overlaps with other stamps.
def analyzeOverlaps(stamps):
groupID = range(len(stamps))
groupSize = [1]*len(stamps)
for (i1,s1) in enumerate(stamps):
for (i2,s2) in enumerate(stamps[:i1]):
if overlapping(s1,s2):
# get the current group IDs of these overlapping stamps
gid1 = groupID[i1]
gid2 = groupID[i2]
if gid1 == gid2:
continue
# decide which group joins the other
gnew = min(gid1,gid2)
gold = max(gid1,gid2)
# re-assign all stamps in gold to gnew
for i in range(i1+1):
if groupID[i] == gold:
groupID[i] = gnew
groupSize[gnew] += 1
groupSize[gold] -= 1
return (groupID,groupSize)
# Builds the Fisher matrix from the specified array of npar*(npar+1)/2 Fisher images and
# calculates the corresponding shape-measurment error, if possible.
def shapeError(npar,fisherImages,fisherDenominator,mask):
# calculate the Fisher matrix elements by summing pixels of the specified Fisher matrix images
fisherMatrix = numpy.zeros((npar,npar))
index = 0
for i in range(npar):
for j in range(i,npar):
fisherMatrix[i,j] = numpy.sum(fisherImages[index]/fisherDenominator*mask)
if i != j:
fisherMatrix[j,i] = fisherMatrix[i,j]
index += 1
# try to calculate corresponding shape measurement error, which will fail unless
# the Fisher matrix is invertible
try:
fullCov = numpy.linalg.inv(fisherMatrix)
# this is where we assume that the last 2 variations are g1,g2
varEps = 0.5*(fullCov[-2,-2]+fullCov[-1,-1])
# variance might be negative if inverse has large numerical errors
sigmaEps = 0 if varEps <= 0 else math.sqrt(varEps)
except numpy.linalg.LinAlgError:
# assign a shape-measurement error of zero if the Fisher matrix is not invertible.
sigmaEps = 0.
return sigmaEps
# Calculate shape measurment errors with the specified purity cuts. Returns a tuple of the
# corresponding errors, in a list, and an integer-valued image that identifies the purity
# regions by assigning each pixel the value of the largest index such that
# nominal > purity[index]*field (or zero if this criteria is not met for any purity).
def shapeErrorsAnalysis(npar,nominal,fisherImages,noiseVariance,field,purities,isolated=True):
# find the overlap of this object in the full field
overlap = nominal.bounds & field.bounds
# get the pixel values for this object and all objects in the overlap
subNominal = nominal[overlap].array
subField = field[overlap].array
# calculate the denominator array for our Fisher matrix elements, including
# all objects unless we are pretending that this object is isolated
fisherDenominator = (subNominal if isolated else subField) + noiseVariance
# initialize our integer regions image
regions = galsim.ImageI(nominal.bounds)
regionsArray = regions.array
errors = [ ]
for (i,purity) in enumerate(purities):
mask = (subNominal > purity*subField)
regionsArray = numpy.maximum(regionsArray,i*mask)
sigeps = shapeError(npar,fisherImages,fisherDenominator,mask)
errors.append(sigeps)
regions.array[:] = regionsArray[:]
return (errors,regions)
def main():
# Parse command-line args
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action = "store_true",
help = "provide more verbose output")
parser.add_argument("-i", "--input", default = 'gcat.dat',
help = "name of input catalog to read")
parser.add_argument("-o","--output", default = 'catout',
help = "base name of output files to write")
parser.add_argument("-x","--x-center", type = float, default = 0.5,
help = "central RA of image (degrees)")
parser.add_argument("-y","--y-center", type = float, default = 0.0,
help = "central DEC of image (degrees)")
parser.add_argument("--width", type = int, default = 512,
help = "image width (pixels)")
parser.add_argument("--height", type = int, default = 512,
help = "image height (pixels)")
parser.add_argument("--max-size", type = float, default = 20.,
help = "flux from any object is truncated beyond this size (arcsecs)")
parser.add_argument("--no-margin", action = "store_true",
help = "do not simulate the tails of objects just outside the field")
parser.add_argument("--pixel-scale", type = float, default = 0.2,
help = "pixel scale (arscecs/pixel)")
parser.add_argument("--airmass", type = float, default = 1.2,
help = "airmass value to use for atmospheric PSF and extinction")
parser.add_argument("--extinction", type = float, default = 0.07,
help = "atmospheric extinction coefficient")
parser.add_argument("--zenith-fwhm", type = float, default = 0.67,
help = "atmospheric psf full-width-half-max in arcsecs at zenith")
parser.add_argument("--instrumental-fwhm", type = float, default = 0.4,
help = "instrumental psf full-width-half-max in arcsecs")
parser.add_argument("--psf-beta", type = float, default = 0.0,
help = "psf Moffat parameter beta (uses Kolmogorov psf if beta <= 0)")
parser.add_argument("--band", choices = ['u','g','r','i','z','y'], default = 'i',
help = "LSST imaging band to use for source fluxes")
parser.add_argument("--zero-point", type = float, default = 41.5,
help = "zero point for converting magnitude to detected signal in elec/sec")
parser.add_argument("--sky-brightness", type = float, default = 20.0,
help = "sky brightness in mag/sq.arcsec.")
parser.add_argument("--sn-cut", type = float, default = 0.5,
help = "keep all pixels above this signal-to-noise ratio cut")
parser.add_argument("--exposure-time", type = float, default = 6900.,
help = "full-depth exposure time in seconds")
parser.add_argument("--g1", type = float, default = 0.,
help = "constant shear component g1 to apply")
parser.add_argument("--g2", type = float, default = 0.,
help = "constant shear component g2 to apply")
parser.add_argument("--save-field", action = "store_true",
help = "save full field image without noise")
parser.add_argument("--save-noise", action = "store_true",
help = "save full field image with random noise added")
parser.add_argument("--stamps", action = "store_true",
help = "save postage stamps for each source (normalized to 1 exposure)")
parser.add_argument("--no-clip", action = "store_true",
help = "do not clip stamps to the image bounds")
parser.add_argument("--no-disk", action = "store_true",
help = "do not include any galactic disk (Sersic n=1) components")
parser.add_argument("--no-bulge", action = "store_true",
help = "do not include any galactic bulge (Sersic n=4) components")
parser.add_argument("--shape", action = "store_true",
help = "run HSM adaptive moments calculation on no-psf stamp")
parser.add_argument("--partials", action = "store_true",
help = "calculate and save partial derivatives wrt shape parameters (normalized to 1 exposure)")
parser.add_argument("--partials-order", type = int, default = 1,
help = "order of finite difference equation to use for evaluating partials")
parser.add_argument("--only-line", type = int, default = 0,
help = "only use the specified line number from the input catalog (when non-zero)")
args = parser.parse_args()
# Configure the GalSim logger
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("galsimcat")
logger.info('Using output prefix %r' % args.output)
# Define the pixel response
pix = galsim.Pixel(args.pixel_scale)
# Define the psf to use
atmos_fwhm = args.zenith_fwhm*math.pow(args.airmass,0.6)
fwhm = math.sqrt(atmos_fwhm**2 + args.instrumental_fwhm**2)
logger.info('Using PSF fwhm = %.4f" (%.4f" zenith => %.4f" at X = %.3f, %.4f" instrumental)' %
(fwhm,args.zenith_fwhm,atmos_fwhm,args.airmass,args.instrumental_fwhm))
if fwhm > 0:
if args.psf_beta > 0:
psf = galsim.Moffat(beta = args.psf_beta, fwhm = fwhm)
else:
psf = galsim.Kolmogorov(fwhm = fwhm)
(psfBounds,psfSize) = initializeForPsf(psf,pix,int(math.ceil(0.5*args.max_size/args.pixel_scale)))
logger.info('PSF size = %.5f arcsec' % psfSize)
else:
psf = None
# Create an empty image that represents the whole field
field = galsim.ImageD(args.width,args.height)
field.setScale(pix.getXWidth())
# Calculate the relative scaling of RA and angles relative to the image center
RAscale = math.cos(args.y_center*deg2rad)
# Calculate the corners of the image in degrees
RAmin = args.x_center - 0.5*args.width*args.pixel_scale/deg2arcsec/RAscale
RAmax = args.x_center + 0.5*args.width*args.pixel_scale/deg2arcsec/RAscale
DECmin = args.y_center - 0.5*args.height*args.pixel_scale/deg2arcsec
DECmax = args.y_center + 0.5*args.height*args.pixel_scale/deg2arcsec
# Calculate margin size in degrees (sources outside of our image margins
# are always skipped, for speed, even if their tails might overlap our image)
if args.no_margin:
margin = 0
else:
margin = 0.5*args.max_size/deg2arcsec
# Calculate the sky background rate in elec/sec/pixel
skyRate = args.zero_point*math.pow(10,-0.4*(args.sky_brightness-24))*args.pixel_scale**2
# Calculate the mean sky noise level for the full exposure time in elec/pixel
skyNoise = math.sqrt(args.exposure_time*skyRate)
# Calculate the pixel threshold cut to use in detected electrons during the full exposure
pixelCut = args.sn_cut*skyNoise
# Calculate the corresponding surface brightness cut to use
sbCut = pixelCut/(args.pixel_scale*args.pixel_scale)
print 'Simulating %s-band observations with AB24 zero point %.3f elec/sec, sky rate = %.3f elec/sec/pixel' %(
args.band,args.zero_point,skyRate)
print 'Simulating %.1fs exposure with total sky noise level %.3f elec/pixel (%.3f mag/sq.arcsec.)' % (
args.exposure_time,skyNoise,args.sky_brightness)
print 'Will keep all stacked pixels > %.3f elec (%.1f elec/arcsec^2)' % (pixelCut,sbCut)
# Initialize finite difference calculations if necessary
if args.partials:
args.stamps = True
if args.partials_order < 1 or args.partials_order > 4:
logger.error('Bad parameter: partials-order must be an integer 1-4.')
sys.exit(-1)
# Initialize the finite difference coefficients to use
if args.partials_order == 1:
fdCoefs = (1./2.,)
elif args.partials_order == 2:
fdCoefs = (2./3.,-1./12.)
elif args.partials_order == 3:
fdCoefs = (3./4.,-3./20.,1./60.)
else:
fdCoefs = (4./5.,-1./5.,4./105.,-1./280.)
# Open the source input catalog to use and initialize a keyword-based lookup for catalog entries
cat = open(args.input)
catFields = cat.readline().split()
catDict = dict(zip(catFields,range(len(catFields))))
if args.verbose:
logger.info('Reading input catalog %r with fields:\n%s' % (args.input,','.join(catFields)))
# Initialize the output catalog in memory
outputCatalog = [ ]
# Initialize the list of per-object stamp HDUs we will fill
hdu = pyfits.PrimaryHDU()
hduList = pyfits.HDUList([hdu])
stampList = [ ]
fisherImagesList = [ ]
nvar = 0 # declared here so it stays in scope after loop over galaxies
# Loop over catalog entries
nkeep = lineno = 0
for line in cat:
lineno += 1
if args.only_line > 0 and lineno != args.only_line:
continue
# prepare to read this catalog entry
entryCols = line.split()
def catalog(fieldName,type=float):
return type(entryCols[catDict[fieldName]])
entryID = catalog('id',int)
# position on the sky in degrees
RA = catalog('ra')
DEC = catalog('dec')
# skip sources outside our margins
if RA < RAmin-margin or RA > RAmax+margin or DEC < DECmin-margin or DEC > DECmax+margin:
continue
# Calculate the offsets of this source from our image's bottom left corner in pixels
# (which might be negative or byeond our image bounds because of the margins)
xoffset = (RA - RAmin)*deg2arcsec/args.pixel_scale*RAscale
yoffset = (DEC - DECmin)*deg2arcsec/args.pixel_scale
# Look up redshift
z = catalog('redshift')
# Look up source AB magnitude in the requested band
abMag = catalog(args.band + '_ab')
# Correct for extinction
abMag += args.extinction*(args.airmass - 1)
# Calculate total detected signal in electrons
flux = args.exposure_time*args.zero_point*math.pow(10,-0.4*(abMag-24))
# Skip objects whose total flux is below our pixel threshold
if flux < pixelCut:
continue
# Look up the component flux relative normalizations
diskFluxNorm = catalog('fluxnorm_disk')
bulgeFluxNorm = catalog('fluxnorm_bulge')
agnFluxNorm = catalog('fluxnorm_agn')
totalFluxNorm = diskFluxNorm + bulgeFluxNorm + agnFluxNorm
# Calculate the disk and bulge fluxes to simulate
if args.no_disk:
diskFlux = 0
else:
diskFlux = flux*diskFluxNorm/totalFluxNorm
if args.no_bulge:
bulgeFlux = 0
else:
bulgeFlux = flux*bulgeFluxNorm/totalFluxNorm
if diskFlux == 0 and bulgeFlux == 0:
continue
# Get disk component parameters
if diskFlux > 0:
hlr_d = catalog('DiskHalfLightRadius') # in arcsecs
pa_d = catalog('pa_disk') # position angle in degrees
a_d = catalog('a_d') # major axis length in arcsecs
b_d = catalog('b_d') # minor axis length in arcsecs
# Calculate sheared ellipse aspect ratio
q_d = b_d/a_d # between 0.2 and 1
# Convert position angle from degrees to radians
pa_d = pa_d*deg2rad
# Calculate bounding box in arcsecs without psf or pixel convolution
(w_d,h_d) = sersicBounds(1,diskFlux+bulgeFlux,hlr_d,q_d,pa_d,sbCut)
else:
(w_d,h_d) = (0,0)
# Get bulge component parameters
if bulgeFlux > 0:
hlr_b = catalog('BulgeHalfLightRadius') # in arcsecs
pa_b = catalog('pa_bulge') # position angle in degrees
a_b = catalog('a_b') # major axis length in arcsecs
b_b = catalog('b_b') # minor axis length in arcsecs
# Calculate sheared ellipse aspect ratio
q_b = b_b/a_b # between 0.2 and 1
# Convert position angle from degrees to radians
pa_b = pa_b*deg2rad
# Calculate bounding box in arcsecs without psf or pixel convolution
(w_b,h_b) = sersicBounds(4,diskFlux+bulgeFlux,hlr_b,q_b,pa_b,sbCut)
else:
(w_b,h_b) = (0,0)
# If a component is missing, set its nominal size and shape from the other component.
if diskFlux == 0:
(hlr_d,q_d,pa_d) = (hlr_b,q_b,pa_b)
if bulgeFlux == 0:
(hlr_b,q_b,pa_b) = (hlr_d,q_d,pa_d)
# Combine the bulge and disk ellipticities
(size,e1,e2) = combineEllipticities(hlr_d,q_d,pa_d,hlr_b,q_b,pa_b,bulgeFlux/(bulgeFlux+diskFlux))
# Combine the bulge and disk bounding boxes
width = max(w_d,w_b)
height = max(h_d,h_b)
# Estimate the (round) bounding box for the psf in arscecs given our total flux
psfSize = psfBounds(flux,pixelCut)*args.pixel_scale if psf else 0
# Add the psf size in quadrature
width = math.sqrt(width*width + psfSize*psfSize)
height = math.sqrt(height*height + psfSize*psfSize)
# Truncate the bounding box, if necessary
if width > args.max_size or height > args.max_size:
logger.info('...truncating bbbox from (%.1f,%.1f)' % (width,height))
width = min(width,args.max_size)
height = min(height,args.max_size)
# Skip this source if its pixels would all be below pixelCut (can this ever happen?)
if (width,height) == (0,0):
continue
# Calculate the integer coordinates of the image pixel that contains the source center
# (using the convention that the bottom left corner pixel has coordinates 1,1)
xpixels = int(math.ceil(xoffset))
ypixels = int(math.ceil(yoffset))
# Calculate the stamp size to use as width = 2*xhalf+1 and height = 2*yhalf+1.
# We always round up to an odd integer so that flux is consistently centered
# (see Issue #380).
xhalf = int(math.ceil(width/args.pixel_scale))
yhalf = int(math.ceil(height/args.pixel_scale))
# Trim the stamp so that the source is still centered but we do not extend
# beyond the final field image. This will only trim pixels above pixelCut
# that lie outside the field.
if xpixels-xhalf < 1 and xpixels+xhalf > args.width:
xhalf = max(xpixels-1,args.width-xpixels)
if ypixels-yhalf < 1 and ypixels+yhalf > args.height:
yhalf = max(ypixels-1,args.height-ypixels)
# Build this source's stamp bounding box
bbox = galsim.BoundsI(xpixels-xhalf,xpixels+xhalf,ypixels-yhalf,ypixels+yhalf)
# Skip objects that don't overlap our field
if (bbox & field.bounds).area() == 0:
continue
# If we get this far, we are definitely rendering this source (but it might
# still get trimmed out later)
logger.info('Rendering input catalog line %d (entry id %d) with w x h = %d x %d' %
(lineno,entryID,2*xhalf+1,2*yhalf+1))
# Calculate the pixel coordinates of the stamp center.
xstamp = 0.5*(bbox.xmin + bbox.xmax)
ystamp = 0.5*(bbox.ymin + bbox.ymax)
# Calculate the subpixel shift in arcsecs (not pixels!) of the source center
# relative to the stamp center. Note that the resulting shift may be more than
# one pixel in either direction because of the clipping operation above.
xshift = (xoffset - (xstamp-0.5))*args.pixel_scale
yshift = (yoffset - (ystamp-0.5))*args.pixel_scale
if args.verbose:
logger.info(' flux: %.3g electrons (%s-band AB %.1f)' % (flux,args.band,abMag))
logger.info(' bounds: [%d:%d,%d:%d] pixels' % (bbox.xmin,bbox.xmax,bbox.ymin,bbox.ymax))
logger.info(' shift: (%f,%f) arcsecs = (%f,%f) pixels' %
(xshift,yshift,xshift/args.pixel_scale,yshift/args.pixel_scale))
logger.info(' disk: frac = %f, hlr = %f arcsec, q = %f, beta = %f rad' %
(diskFlux/flux,hlr_d,q_d,pa_d))
logger.info(' bulge: frac = %f, hlr = %f arcsec, q = %f, beta = %f rad' %
(bulgeFlux/flux,hlr_b,q_b,pa_b))
logger.info(' agn: frac = %f' % (agnFluxNorm/flux))
logger.info(' bbox: disk (%.1f,%.1f) bulge (%.1f,%.1f) psf %.1f arcsec' %
(w_d,h_d,w_b,h_b,psfSize))
logger.info(' size: %.2f arcsec' % size)
logger.info(' shape: (e1,e2) = (%.6f,%.6f)' % (e1,e2))
# Define the nominal source parameters for rendering this object within its stamp
params = {
'total_flux': diskFlux + bulgeFlux,
'f_d': diskFlux/(diskFlux+bulgeFlux), 'f_b': bulgeFlux/(diskFlux+bulgeFlux),
'x_d': xshift, 'y_d': yshift, 'hlr_d': hlr_d, 'q_d': q_d, 'beta_d': pa_d,
'x_b': xshift, 'y_b': yshift, 'hlr_b': hlr_b, 'q_b': q_b, 'beta_b': pa_b,
'dx': 0., 'dy': 0., 'relsize': 1., 'dbeta': 0.,
'g1': args.g1, 'g2': args.g2
}
# Render the nominal stamps for this galaxy
gal = createSource(**params)
nominal = createStamp(gal,psf,pix,bbox)
# Create a mask for pixels above threshold
mask = createMask(nominal,pixelCut,args)
if mask.array.sum() == 0:
# this stamp has no pixels above threshold
logger.info('*** line %d (id %d) is below threshold' % (lineno,entryID))
continue
trimmed = mask.bounds
if args.verbose:
logger.info(' trimmed: [%d:%d,%d:%d] pixels' %
(trimmed.xmin,trimmed.xmax,trimmed.ymin,trimmed.ymax))
# Add the nominal galaxy to the full field image after applying the threshold mask
# (the mask must be the second term in the product so that the result is double precision)
maskedNominal = nominal[trimmed]*mask
fieldOverlap = trimmed & field.bounds
if fieldOverlap.area() == 0:
# this stamp's mask falls completely outside our field
logger.info('*** line %d (id %d) does not overlap field' % (lineno,entryID))
continue
field[fieldOverlap] += maskedNominal[fieldOverlap]
# Remember the nominal stamp (clipped to the field) for overlap calculations.
stampList.append(maskedNominal[fieldOverlap])
# Calculate this object's nominal flux S/N ratio at full depth using only masked pixels.
# Note that this value cannot be reproduced from the saved stamp when a stamp is clipped
# to the field boundary (use --no-clip to disable this).
snr = signalToNoiseRatio(maskedNominal,args.exposure_time*skyRate)
if args.verbose:
logger.info(' S/N: %.6f' % snr)
# Initialize the datacube of stamps that we will save for this object
datacube = [ ]
partialsArray = [ ]
# Save the nominal (masked and trimmed) stamp
assert saveStamp(datacube,maskedNominal,args)
if args.partials:
# Specify the amount to vary each parameter for partial derivatives
# (we don't use a dictionary here since we want to control the order)
variations = [
('f_d',0.01), ('f_b',0.01),
('dx',args.pixel_scale/3.),('dy',args.pixel_scale/3.),
('relsize',0.05),
('g1',0.03), ('g2',0.03)
]
# the shape measurement parameters must always be the last 2 variations
# since we make this assumption when slicing the covariance matrix below
assert variations[-2][0] == 'g1'
assert variations[-1][0] == 'g2'
# loop over source parameters to vary
for (pname,delta) in variations:
# create stamps for each variation of this parameter
newparams = params.copy()
partial = galsim.ImageD(bbox)
partial.setScale(pix.getXWidth())
# delta might be zero, e.g., for hlr_b when bulge fraction = 0
if delta > 0:
for step in range(args.partials_order):
# create and save the positive variation stamp
newparams[pname] = params[pname] + (step+1)*delta
newsource = createSource(**newparams)
plus = createStamp(newsource,psf,pix,bbox)
# create and save the negative variation stamp
newparams[pname] = params[pname] - (step+1)*delta
newsource = createSource(**newparams)
minus = createStamp(newsource,psf,pix,bbox)
# update the finite difference calculation of this partial
partial += (fdCoefs[step]/delta)*(plus - minus)
# append this partial to our datacube after trimming and masking
maskedPartial = partial[trimmed]*mask
assert saveStamp(datacube,maskedPartial,args)
# remember this partial's numpy image array
partialsArray.append(maskedPartial[fieldOverlap].array)
# calculate the Fisher matrix images for this object (note that we haven't
# included the Fisher denominator here since that might include overlapping
# objects that we have not seen yet)
nvar = len(partialsArray)
nfisher = ((nvar+1)*nvar)/2
(h,w) = partialsArray[0].shape
fisherImage = numpy.zeros((nfisher,h,w))
index = 0
for i in range(nvar):
for j in range(i,nvar):
fisherImage[index] = partialsArray[i]*partialsArray[j]
index += 1
fisherImagesList.append(fisherImage)
# Add a new HDU with a datacube for this object's stamps
# We don't use compression = 'gzip_tile' for now since it is lossy
# and mathematica cannot Import it.
galsim.fits.writeCube(datacube, hdu_list = hduList)
# Add a catalog entry for this galaxy
entry = [entryID,xoffset,yoffset,abMag,flux/args.exposure_time,size,e1,e2,
bulgeFlux/(diskFlux+bulgeFlux),z,snr]
outputCatalog.append(entry)
nkeep += 1
logger.info("saved entry id %d as stamp %d" % (entryID,nkeep))
# Close the input catalog
cat.close()
# Loop over all saved objects to test for overlaps and build overlap groups
(groupID,groupSize) = analyzeOverlaps(stampList)
# Add group id to output catalog
for (i,entry) in enumerate(outputCatalog):
entry.append(groupID[i])
# Do shape measurement error analysis for each galaxy
purities = (0,0.5,0.9)
regionsList = [ ]
for i in range(nkeep):
(errors,regions) = shapeErrorsAnalysis(
nvar,stampList[i],fisherImagesList[i],args.exposure_time*skyRate,field,purities)
outputCatalog[i].extend(errors)
regionsList.append(regions)
# Save the regions for each object
outname = args.output + '_regions.fits'
logger.info('Saving regions to %r' % outname)
galsim.fits.writeMulti(regionsList,outname)
# Save group sizes to a file
outname = args.output + '_groups.dat'
out = open(outname,'w')
ntot = 0
for (i,n) in enumerate(groupSize):
if n > 0:
print >>out,i,n
ntot += n
out.close()
assert ntot == len(groupSize)
# Write the full field image without noise
if args.save_field:
# First without noise
outname = args.output + '_field.fits'
logger.info('Saving full field to %r' % outname)
galsim.fits.write(field,outname)
# Write the full field image with random noise added
if args.save_noise:
rng = galsim.BaseDeviate(123)
noise = galsim.PoissonNoise(rng,sky_level = args.exposure_time*skyRate)
field.addNoise(noise)
outname = args.output + '_noise.fits'
logger.info('Saving full field with noise added to %r' % outname)
galsim.fits.write(field,outname)
# Write the object stamp datacubes
if args.stamps:
outname = args.output + '_stamps.fits'
logger.info('Saving %d stamps to %r' % (nkeep,outname))
galsim.fits.writeFile(outname, hduList)
# Write the output catalog from memory
outname = args.output + '_catalog.dat'
out = open(outname,'w')
for entry in outputCatalog:
print >>out, ' '.join(map(str,entry))
out.close()
logger.info('Wrote %d of %d catalog entries to %r' % (nkeep,lineno,outname))
if __name__ == "__main__":
main()
Mysterious bug fix: don’t re-use (int) size var for (float) psf size
#!/usr/bin/env python
#######################################################################################
## Created by David Kirkby, University of California, Irvine <dkirkby@uci.edu>
#######################################################################################
"""
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.07 -o lsst_i --pixel-scale 0.200 --width 4096 --height 4096 --exposure-time 6900 --sky-brightness 20.0 --zenith-fwhm 0.67 --zero-point 41.5
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.07 -o des_i --pixel-scale 0.263 --width 3115 --height 3115 --exposure-time 1000 --sky-brightness 20.1 --zenith-fwhm 0.79 --zero-point 12.5
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.07 -o cfht_i --pixel-scale 0.185 --width 4428 --height 4428 --exposure-time 4300 --sky-brightness 20.3 --zenith-fwhm 0.64 --zero-point 10.0
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.10 -o lsst_r --pixel-scale 0.200 --width 4096 --height 4096 --exposure-time 6900 --sky-brightness 21.3 --zenith-fwhm 0.70 --zero-point 55.8
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.10 -o des_r --pixel-scale 0.263 --width 3115 --height 3115 --exposure-time 800 --sky-brightness 21.1 --zenith-fwhm 0.79 --zero-point 16.8
./galsimcat.py -i OneDegSq.dat -x 0.5 -y 0.0 --max-size 30 --stamps --partials --save-field --save-noise --airmass 1.2 --extinction 0.10 -o cfht_r --pixel-scale 0.185 --width 4428 --height 4428 --exposure-time 2000 --sky-brightness 20.8 --zenith-fwhm 0.71 --zero-point 13.5
"""
import sys
import os
import math
import argparse
import logging
import galsim
import pyfits
import numpy
twopi = 2*math.pi
deg2rad = math.pi/180.
deg2arcsec = 3600.
deg2arcmin = 60.
def createComponent(type,electrons,xc,yc,hlr,q,beta,g1,g2):
# create a radial profile of the requested type and size
comp = type(flux = electrons, half_light_radius = hlr)
# set the intrinsic shape
comp.applyShear(q = q, beta = beta*galsim.radians)
# apply cosmic shear
comp.applyShear(g1 = g1, g2 = g2)
# shift to this component's centroid
comp.applyShift(dx = xc, dy = yc)
return comp
"""
Returns a (disk,bulge) tuple of source objects using the specified parameters.
Note that f_d and f_b are fractions of the total flux and need not sum to one.
"""
def createSource(
total_flux,f_d,f_b,
x_d,y_d,hlr_d,q_d,beta_d,
x_b,y_b,hlr_b,q_b,beta_b,
dx,dy,relsize,dbeta,
g1,g2):
# Define the disk component, if any
if f_d > 0:
disk = createComponent(galsim.Exponential,
total_flux*f_d,x_d+dx,y_d+dy,hlr_d*relsize,q_d,beta_d+dbeta,g1,g2)
else:
disk = None
# Define the bulge component, if any
if f_b > 0:
bulge = createComponent(galsim.DeVaucouleurs,
total_flux*f_b,x_b+dx,y_b+dy,hlr_b*relsize,q_b,beta_b+dbeta,g1,g2)
else:
bulge = None
return (disk,bulge)
"""
Renders the convolution of [src,psf,pix] into the specified bounding box.
If psf is None, only [src,pix] are convolved. If src is None, an empty
stamp is returned (we use this below when either the bulge or disk is absent).
"""
def renderStamp(src,psf,pix,bbox):
stamp = galsim.ImageD(bbox)
stamp.setScale(pix.getXWidth())
if src:
models = [src,pix] if psf is None else [src,psf,pix]
gsp = galsim.GSParams(maximum_fft_size=16384)
obj = galsim.Convolve(models,gsparams=gsp)
obj.draw(image = stamp)
return stamp
"""
Renders the specified source convolved with a psf (which might be None)
and pixel response into a postage stamp with the specified bounding box.
"""
def createStamp(src,psf,pix,bbox):
(disk,bulge) = src
diskStamp = renderStamp(disk,psf,pix,bbox)
bulgeStamp = renderStamp(bulge,psf,pix,bbox)
return diskStamp + bulgeStamp
"""
Calculate the centroid, size, and shape of the convolution of [src,psf,pix]
in the specified bounding box using a high-resolution image whose pixels
are smaller by a factor of oversampling (in each direction), and whose
stamp is larger by a factor of zoom (in each direction). The centroid and
size are returned in arcsecs. The centroid is relative to the center of
the input bounding box.
"""
def getStampMoments(src,psf,pix,bbox,oversampling=10,zoom=1):
# Create a high-resolution pixel grid that covers the same area, and
# preserves the even/oddness and mean (min+max)/2. of each dimension.
(x1,x2,y1,y2) = (bbox.getXMin(),bbox.getXMax(),bbox.getYMin(),bbox.getYMax())
xmid = (x1+x2)/2.
dx = oversampling*(x2-x1)/2.
x1 = int(math.floor(xmid - zoom*dx))
x2 = int(math.ceil(xmid + zoom*dx))
assert (x1+x2)/2. == xmid
ymid = (y1+y2)/2.
dy = oversampling*(y2-y1)/2.
y1 = int(math.floor(ymid - zoom*dy))
y2 = int(math.ceil(ymid + zoom*dy))
assert (y1+y2)/2. == ymid
bigBbox = galsim.BoundsI(x1,x2,y1,y2)
scale = pix.getXWidth()/oversampling
smallPix = galsim.Pixel(scale)
# Render a high-resolution stamp of this source
stamp = createStamp(src,psf,smallPix,bigBbox)
# Calculate this stamp's moments
pixels = stamp.array
xproj = numpy.sum(pixels,axis=0)
yproj = numpy.sum(pixels,axis=1)
total = numpy.sum(pixels)
# Calculate the mean in pixels relative to the stamp center
xcoords = numpy.arange(x1,x2+1) - xmid
ycoords = numpy.arange(y1,y2+1) - ymid
x = numpy.sum(xproj*xcoords)/total
y = numpy.sum(yproj*ycoords)/total
# Calculate the second-moments matrix
xcoords -= x
ycoords -= y
xycoords = numpy.outer(ycoords,xcoords)
xx = numpy.sum(xproj*xcoords**2)/total
yy = numpy.sum(yproj*ycoords**2)/total
xy = numpy.sum(pixels*xycoords)/total
# Calculate the ellipticity and size
detQ = xx*yy - xy*xy
denom = xx + yy + 2*math.sqrt(detQ)
eps1 = (xx - yy)/denom
eps2 = 2*xy/denom
sigma = math.pow(detQ,0.25)*scale
return (x*scale,y*scale,sigma,eps1,eps2)
"""
Returns (dx,dy) for the bounding box of a surface brightness profile
SB(x,y) whose isophotes are ellipses with the shape (q,beta) and which
has an underlying normalized radial profile p(r). The inputs are:
maxSB = totalFlux*p(0) = maximum surface brightness before shear
thresholdSB = threshold surface brightness after shear
q = ratio of minor to major axes of ellipse with 0 < q <= 1
beta = angle of ellipse's major axis in radians
rFunction = returns R(b) such that p(R) = b*p(0) with 0 < b < 1
The returned (dx,dy) are in arcsecs, and defined such that SB(x,y) < f0
is guaranteed for |x| > dx or |y| > dy. The result only depends on the
ratio thresholdSB/maxSB so they must be in the same (abitrary) units.
"""
def boundingBox(maxSB,thresholdSB,q,beta,rFunction):
# Calculate shear affine transform parameters
g = (1-q)/(1+q)
gp = g*math.cos(2*beta)
gx = g*math.sin(2*beta)
detM = 1 - gp*gp - gx*gx
# Calculate the dimensionless surface brightness ratio at threshold.
b = thresholdSB/(maxSB*detM)
if b <= 0:
raise RuntimeError('boundingBox: invalid input parameters')
if b >= 1:
# The max surface brightness is below our threshold SB(0,0) <= f0
return (0,0)
# Calculate the threshold radius of the radial profile.
rcut = rFunction(b)
# Shear this circle and return its bounding box dimensions
dx = rcut*math.sqrt(((1+gp)*(1+gp)+gx*gx)/detM) # half width in arcsecs
dy = rcut*math.sqrt(((1-gp)*(1-gp)+gx*gx)/detM) # half height in arcsecs
return (dx,dy)
"""
Returns (dx,dy) for the bounding box of a Sersic profile with n = 1 or 4.
The input flux should be in electrons, hlr in arscecs, beta in radians, f0 in
elec/arcsec^2. 0 < q <= 1 is dimensionless. The returned (dx,dy) are in
arcsecs. See boundingBox above for details.
"""
def sersicBounds(n,flux,hlr,q,beta,f0):
# Convert the half-light radius to the appropriate scale radius r0
# and calculate the Sersic normalization constant
if n == 1:
r0 = hlr/1.67835
norm = twopi*r0*r0
elif n == 4:
r0 = hlr/3459.49
norm = 20160*twopi*r0*r0 # 20160 = n*Gamma[2*n]
else:
raise RuntimeError('Sersic index n = %d is not supported.' % n)
# Calculate and return the bounding box
return boundingBox(flux/norm,f0,q,beta,
lambda b: r0*math.pow(-math.log(b),n))
"""
Returns (dx,dy) for the bounding box of a Moffat profile. The input flux
should be in electrons, fwhm in arcsecs, beta in radians, f0 in elec/arcsec^2.
0 < q <= 1 and moffatBeta > 1 are dimensionless. The returned (dx,dy) are in
arcsecs. See boundingBox above for details.
"""
def moffatBounds(moffatBeta,flux,fwhm,q,beta,f0):
# Check that moffatBeta is valid
if moffatBeta <= 1:
raise RuntimeError('Moffat beta < 1 is not valid.')
# Convert the fwhm to the corresponding scale radius
r0 = 0.5*fwhm/math.sqrt(math.pow(2,1./moffatBeta)-1)
# Calculate the normalization factor norm = 1/p(0)
norm = math.pi*r0*r0/(moffatBeta-1)
# Calculate and return the bounding box
return boundingBox(flux/norm,f0,q,beta,
lambda b: r0*math.sqrt(1-math.pow(b,(moffatBeta-1.)/moffatBeta)))
"""
Returns a mask image of values 0 or 1 depending on whether the corresponding
input image pixel value is above or below the specified threshold in electrons.
Note that if all pixels are below threshold, then the returned mask will
contain only the central pixel with image.array.sum() == 0.
"""
def createMask(image,threshold,args):
# create an empty mask image with the same dimensions as the input image
box = image.bounds
mask = galsim.ImageS(box)
mask.setScale(image.getScale())
borderMax = 0.
lastRow = box.ymax - box.ymin
lastPixel = box.xmax - box.xmin
# initialize our trimmed bounds to just the central pixel
# (the numerator should always be even for odd width,height)
xmin = (box.getXMin()+box.getXMax())
ymin = (box.getYMin()+box.getYMax())
assert xmin%2 == 0 and ymin%2 == 0
xmin = xmin/2
ymin = ymin/2
xmax = xmin
ymax = ymin
# loop over image pixels
for (rowIndex,row) in enumerate(image.array):
y = box.getYMin()+rowIndex
for (pixelIndex,pixelValue) in enumerate(row):
x = box.getXMin()+pixelIndex
if rowIndex == 0 or rowIndex == lastRow or pixelIndex == 0 or pixelIndex == lastPixel:
# update the largest pixel value on our 1-pixel wide border
borderMax = max(borderMax,pixelValue)
if pixelValue >= threshold:
mask.array[rowIndex,pixelIndex] = 1
xmin = min(x,xmin)
xmax = max(x,xmax)
ymin = min(y,ymin)
ymax = max(y,ymax)
# is the stamp too small to contain the threshold contour?
if borderMax > threshold:
print '### stamp truncated at %.1f > %.1f electrons' % (borderMax,threshold)
# build a new mask using the border max as the threshold
return createMask(image,borderMax,args)
trimmed = galsim.BoundsI(xmin,xmax,ymin,ymax)
mask = mask[trimmed]
return mask
"""
Performs any final processing on stamp, controlled by args, then appends it to stamps.
Returns True if the stamp was saved, or otherwise False.
"""
def saveStamp(stamps,stamp,args):
# Clip the stamp so that does not extend beyond the field image. This results
# in potentially smaller files with sources that might not be centered.
if not args.no_clip:
overlap = stamp.bounds & galsim.BoundsI(1,args.width,1,args.height)
if overlap.area() == 0:
# skip this stamp if it falls completely outside our field (after trimming)
return False
stamp = stamp[overlap]
# Convert normalization from elec/exposure to elec/second
stamp = stamp/args.exposure_time
# Remember this stamp.
stamps.append(stamp)
return True
"""
Performs initializations for the psf we will be using.
"""
def initializeForPsf(psf,pix,size):
# Render a centered psf image
bbox = galsim.BoundsI(1,2*size,1,2*size)
stamp = galsim.ImageD(bbox)
scale = pix.getXWidth()
stamp.setScale(scale)
obj = galsim.Convolve([psf,pix])
obj.draw(image=stamp)
# Build the circularized psf profile
profile = numpy.zeros(size,dtype=float)
for x in range(2*size):
for y in range(2*size):
dx = x - size + 0.5
dy = y - size + 0.5
r = math.sqrt(dx*dx + dy*dy)
ipix = int(math.floor(r))
if ipix < size:
profile[ipix] = max(profile[ipix],stamp.array[x,y])
# Create a function that gives the size of bounding box necessary to contain
# psf pixels down to the specified threshold assuming the specified total flux.
# The return value is clipped at 2*size for large fluxes.
def estimator(flux,threshold):
index = 0
while index < size:
if flux*profile[index] < threshold:
return 2*index+1
index += 1
return 2*size
# Calculate the psf size from a high-resolution rendering
(xc,yc,psfSize,e1,e2) = getStampMoments((psf,None),None,pix,bbox)
assert abs(xc) < 0.01*scale and abs(yc) < 0.01*scale
assert abs(e1) < 1e-6 and abs(e2) < 1e-6
return (estimator,psfSize)
# Returns the combined size and ellipticity for the specified disk and bulge components,
# assuming they have the same centroid.
def combineEllipticities(hlr_d,q_d,pa_d,hlr_b,q_b,pa_b,f_b):
# ensure that single-component models give correct results
if f_b == 0:
q_b = 1
elif f_b == 1:
q_d = 1
# calculate the disk and bulge component ellipticities
ed = (1-q_d)/(1+q_d)
ed1 = ed*math.cos(2*pa_d)
ed2 = ed*math.sin(2*pa_d)
eb = (1-q_b)/(1+q_b)
eb1 = eb*math.cos(2*pa_b)
eb2 = eb*math.sin(2*pa_b)
# calculate the corresponding second-moment tensors assuming unit total flux
cd = 1.06502
nd = cd*(hlr_d/(1-ed*ed))**2
Qd11 = nd*(1+ed*ed+2*ed1)
Qd12 = nd*2*ed2
Qd22 = nd*(1+ed*ed-2*ed1)
detQd = Qd11*Qd22 - Qd12*Qd12
cb = 10.8396
nb = cb*(hlr_b/(1-eb*eb))**2
Qb11 = nb*(1+eb*eb+2*eb1)
Qb12 = nb*2*eb2
Qb22 = nb*(1+eb*eb-2*eb1)
detQb = Qb11*Qb22 - Qb12*Qb12
# add the component second-moment tensors
Q11 = (1-f_b)*Qd11 + f_b*Qb11
Q12 = (1-f_b)*Qd12 + f_b*Qb12
Q22 = (1-f_b)*Qd22 + f_b*Qb22
detQ = Q11*Q22 - Q12*Q12
size = math.pow(detQ,0.25)
#semiMajorAxis = math.sqrt(0.5*(Q11+Q22+math.sqrt((Q11-Q22)**2+4*Q12**2)))
# calculate the corresponding combined ellipticity
denom = Q11 + Q22 + 2*math.sqrt(detQ)
e1 = (Q11 - Q22)/denom
e2 = 2*Q12/denom
"""
# check direct calculation of emag when pa_d == pa_b
emag = math.sqrt(e1*e1 + e2*e2)
wd = (1-f_b)*cd*hlr_d**2*(1+q_d)**2/(8*q_d**2) if f_b < 1 else 0
wm = f_b*cb*hlr_b**2*(1+q_b)**2/(8*q_b**2) if f_b > 0 else 0
ep = wd*(1+q_d**2) + wm*(1+q_b**2)
em = wd*(1-q_d**2) + wm*(1-q_b**2)
emag2 = em/(ep+math.sqrt(ep*ep-em*em))
print 'emag:',emag-emag2
"""
return (size,e1,e2)
def signalToNoiseRatio(stamp,pixelNoise):
flat = stamp.array.reshape(-1)
snr = math.sqrt(numpy.dot(flat,flat)/pixelNoise)
return snr
# Returns True if the stamps s1 and s2 have overlapping pixels with non-zero flux.
def overlapping(s1,s2):
# test for overlapping bounding boxes
overlapBounds = s1.bounds & s2.bounds
if overlapBounds.area() == 0:
return False
# test for overlapping flux within the overlapping pixels
overlapFluxProduct = numpy.sum(s1[overlapBounds].array * s2[overlapBounds].array)
return False if overlapFluxProduct == 0 else True
# Assigns a group ID to each stamp in stamps based on its overlaps with other stamps.
def analyzeOverlaps(stamps):
groupID = range(len(stamps))
groupSize = [1]*len(stamps)
for (i1,s1) in enumerate(stamps):
for (i2,s2) in enumerate(stamps[:i1]):
if overlapping(s1,s2):
# get the current group IDs of these overlapping stamps
gid1 = groupID[i1]
gid2 = groupID[i2]
if gid1 == gid2:
continue
# decide which group joins the other
gnew = min(gid1,gid2)
gold = max(gid1,gid2)
# re-assign all stamps in gold to gnew
for i in range(i1+1):
if groupID[i] == gold:
groupID[i] = gnew
groupSize[gnew] += 1
groupSize[gold] -= 1
return (groupID,groupSize)
# Builds the Fisher matrix from the specified array of npar*(npar+1)/2 Fisher images and
# calculates the corresponding shape-measurment error, if possible.
def shapeError(npar,fisherImages,fisherDenominator,mask):
# calculate the Fisher matrix elements by summing pixels of the specified Fisher matrix images
fisherMatrix = numpy.zeros((npar,npar))
index = 0
for i in range(npar):
for j in range(i,npar):
fisherMatrix[i,j] = numpy.sum(fisherImages[index]/fisherDenominator*mask)
if i != j:
fisherMatrix[j,i] = fisherMatrix[i,j]
index += 1
# try to calculate corresponding shape measurement error, which will fail unless
# the Fisher matrix is invertible
try:
fullCov = numpy.linalg.inv(fisherMatrix)
# this is where we assume that the last 2 variations are g1,g2
varEps = 0.5*(fullCov[-2,-2]+fullCov[-1,-1])
# variance might be negative if inverse has large numerical errors
sigmaEps = 0 if varEps <= 0 else math.sqrt(varEps)
except numpy.linalg.LinAlgError:
# assign a shape-measurement error of zero if the Fisher matrix is not invertible.
sigmaEps = 0.
return sigmaEps
# Calculate shape measurment errors with the specified purity cuts. Returns a tuple of the
# corresponding errors, in a list, and an integer-valued image that identifies the purity
# regions by assigning each pixel the value of the largest index such that
# nominal > purity[index]*field (or zero if this criteria is not met for any purity).
def shapeErrorsAnalysis(npar,nominal,fisherImages,noiseVariance,field,purities,isolated=True):
# find the overlap of this object in the full field
overlap = nominal.bounds & field.bounds
# get the pixel values for this object and all objects in the overlap
subNominal = nominal[overlap].array
subField = field[overlap].array
# calculate the denominator array for our Fisher matrix elements, including
# all objects unless we are pretending that this object is isolated
fisherDenominator = (subNominal if isolated else subField) + noiseVariance
# initialize our integer regions image
regions = galsim.ImageI(nominal.bounds)
regionsArray = regions.array
errors = [ ]
for (i,purity) in enumerate(purities):
mask = (subNominal > purity*subField)
regionsArray = numpy.maximum(regionsArray,i*mask)
sigeps = shapeError(npar,fisherImages,fisherDenominator,mask)
errors.append(sigeps)
regions.array[:] = regionsArray[:]
return (errors,regions)
def main():
# Parse command-line args
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action = "store_true",
help = "provide more verbose output")
parser.add_argument("-i", "--input", default = 'gcat.dat',
help = "name of input catalog to read")
parser.add_argument("-o","--output", default = 'catout',
help = "base name of output files to write")
parser.add_argument("-x","--x-center", type = float, default = 0.5,
help = "central RA of image (degrees)")
parser.add_argument("-y","--y-center", type = float, default = 0.0,
help = "central DEC of image (degrees)")
parser.add_argument("--width", type = int, default = 512,
help = "image width (pixels)")
parser.add_argument("--height", type = int, default = 512,
help = "image height (pixels)")
parser.add_argument("--max-size", type = float, default = 20.,
help = "flux from any object is truncated beyond this size (arcsecs)")
parser.add_argument("--no-margin", action = "store_true",
help = "do not simulate the tails of objects just outside the field")
parser.add_argument("--pixel-scale", type = float, default = 0.2,
help = "pixel scale (arscecs/pixel)")
parser.add_argument("--airmass", type = float, default = 1.2,
help = "airmass value to use for atmospheric PSF and extinction")
parser.add_argument("--extinction", type = float, default = 0.07,
help = "atmospheric extinction coefficient")
parser.add_argument("--zenith-fwhm", type = float, default = 0.67,
help = "atmospheric psf full-width-half-max in arcsecs at zenith")
parser.add_argument("--instrumental-fwhm", type = float, default = 0.4,
help = "instrumental psf full-width-half-max in arcsecs")
parser.add_argument("--psf-beta", type = float, default = 0.0,
help = "psf Moffat parameter beta (uses Kolmogorov psf if beta <= 0)")
parser.add_argument("--band", choices = ['u','g','r','i','z','y'], default = 'i',
help = "LSST imaging band to use for source fluxes")
parser.add_argument("--zero-point", type = float, default = 41.5,
help = "zero point for converting magnitude to detected signal in elec/sec")
parser.add_argument("--sky-brightness", type = float, default = 20.0,
help = "sky brightness in mag/sq.arcsec.")
parser.add_argument("--sn-cut", type = float, default = 0.5,
help = "keep all pixels above this signal-to-noise ratio cut")
parser.add_argument("--exposure-time", type = float, default = 6900.,
help = "full-depth exposure time in seconds")
parser.add_argument("--g1", type = float, default = 0.,
help = "constant shear component g1 to apply")
parser.add_argument("--g2", type = float, default = 0.,
help = "constant shear component g2 to apply")
parser.add_argument("--save-field", action = "store_true",
help = "save full field image without noise")
parser.add_argument("--save-noise", action = "store_true",
help = "save full field image with random noise added")
parser.add_argument("--stamps", action = "store_true",
help = "save postage stamps for each source (normalized to 1 exposure)")
parser.add_argument("--no-clip", action = "store_true",
help = "do not clip stamps to the image bounds")
parser.add_argument("--no-disk", action = "store_true",
help = "do not include any galactic disk (Sersic n=1) components")
parser.add_argument("--no-bulge", action = "store_true",
help = "do not include any galactic bulge (Sersic n=4) components")
parser.add_argument("--shape", action = "store_true",
help = "run HSM adaptive moments calculation on no-psf stamp")
parser.add_argument("--partials", action = "store_true",
help = "calculate and save partial derivatives wrt shape parameters (normalized to 1 exposure)")
parser.add_argument("--partials-order", type = int, default = 1,
help = "order of finite difference equation to use for evaluating partials")
parser.add_argument("--only-line", type = int, default = 0,
help = "only use the specified line number from the input catalog (when non-zero)")
args = parser.parse_args()
# Configure the GalSim logger
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("galsimcat")
logger.info('Using output prefix %r' % args.output)
# Define the pixel response
pix = galsim.Pixel(args.pixel_scale)
# Define the psf to use
atmos_fwhm = args.zenith_fwhm*math.pow(args.airmass,0.6)
fwhm = math.sqrt(atmos_fwhm**2 + args.instrumental_fwhm**2)
logger.info('Using PSF fwhm = %.4f" (%.4f" zenith => %.4f" at X = %.3f, %.4f" instrumental)' %
(fwhm,args.zenith_fwhm,atmos_fwhm,args.airmass,args.instrumental_fwhm))
if fwhm > 0:
if args.psf_beta > 0:
psf = galsim.Moffat(beta = args.psf_beta, fwhm = fwhm)
else:
psf = galsim.Kolmogorov(fwhm = fwhm)
(psfBounds,psfSize) = initializeForPsf(psf,pix,int(math.ceil(0.5*args.max_size/args.pixel_scale)))
logger.info('PSF size = %.5f arcsec' % psfSize)
else:
psf = None
# Create an empty image that represents the whole field
field = galsim.ImageD(args.width,args.height)
field.setScale(pix.getXWidth())
# Calculate the relative scaling of RA and angles relative to the image center
RAscale = math.cos(args.y_center*deg2rad)
# Calculate the corners of the image in degrees
RAmin = args.x_center - 0.5*args.width*args.pixel_scale/deg2arcsec/RAscale
RAmax = args.x_center + 0.5*args.width*args.pixel_scale/deg2arcsec/RAscale
DECmin = args.y_center - 0.5*args.height*args.pixel_scale/deg2arcsec
DECmax = args.y_center + 0.5*args.height*args.pixel_scale/deg2arcsec
# Calculate margin size in degrees (sources outside of our image margins
# are always skipped, for speed, even if their tails might overlap our image)
if args.no_margin:
margin = 0
else:
margin = 0.5*args.max_size/deg2arcsec
# Calculate the sky background rate in elec/sec/pixel
skyRate = args.zero_point*math.pow(10,-0.4*(args.sky_brightness-24))*args.pixel_scale**2
# Calculate the mean sky noise level for the full exposure time in elec/pixel
skyNoise = math.sqrt(args.exposure_time*skyRate)
# Calculate the pixel threshold cut to use in detected electrons during the full exposure
pixelCut = args.sn_cut*skyNoise
# Calculate the corresponding surface brightness cut to use
sbCut = pixelCut/(args.pixel_scale*args.pixel_scale)
print 'Simulating %s-band observations with AB24 zero point %.3f elec/sec, sky rate = %.3f elec/sec/pixel' %(
args.band,args.zero_point,skyRate)
print 'Simulating %.1fs exposure with total sky noise level %.3f elec/pixel (%.3f mag/sq.arcsec.)' % (
args.exposure_time,skyNoise,args.sky_brightness)
print 'Will keep all stacked pixels > %.3f elec (%.1f elec/arcsec^2)' % (pixelCut,sbCut)
# Initialize finite difference calculations if necessary
if args.partials:
args.stamps = True
if args.partials_order < 1 or args.partials_order > 4:
logger.error('Bad parameter: partials-order must be an integer 1-4.')
sys.exit(-1)
# Initialize the finite difference coefficients to use
if args.partials_order == 1:
fdCoefs = (1./2.,)
elif args.partials_order == 2:
fdCoefs = (2./3.,-1./12.)
elif args.partials_order == 3:
fdCoefs = (3./4.,-3./20.,1./60.)
else:
fdCoefs = (4./5.,-1./5.,4./105.,-1./280.)
# Open the source input catalog to use and initialize a keyword-based lookup for catalog entries
cat = open(args.input)
catFields = cat.readline().split()
catDict = dict(zip(catFields,range(len(catFields))))
if args.verbose:
logger.info('Reading input catalog %r with fields:\n%s' % (args.input,','.join(catFields)))
# Initialize the output catalog in memory
outputCatalog = [ ]
# Initialize the list of per-object stamp HDUs we will fill
hdu = pyfits.PrimaryHDU()
hduList = pyfits.HDUList([hdu])
stampList = [ ]
fisherImagesList = [ ]
nvar = 0 # declared here so it stays in scope after loop over galaxies
# Loop over catalog entries
nkeep = lineno = 0
for line in cat:
lineno += 1
if args.only_line > 0 and lineno != args.only_line:
continue
# prepare to read this catalog entry
entryCols = line.split()
def catalog(fieldName,type=float):
return type(entryCols[catDict[fieldName]])
entryID = catalog('id',int)
# position on the sky in degrees
RA = catalog('ra')
DEC = catalog('dec')
# skip sources outside our margins
if RA < RAmin-margin or RA > RAmax+margin or DEC < DECmin-margin or DEC > DECmax+margin:
continue
# Calculate the offsets of this source from our image's bottom left corner in pixels
# (which might be negative or byeond our image bounds because of the margins)
xoffset = (RA - RAmin)*deg2arcsec/args.pixel_scale*RAscale
yoffset = (DEC - DECmin)*deg2arcsec/args.pixel_scale
# Look up redshift
z = catalog('redshift')
# Look up source AB magnitude in the requested band
abMag = catalog(args.band + '_ab')
# Correct for extinction
abMag += args.extinction*(args.airmass - 1)
# Calculate total detected signal in electrons
flux = args.exposure_time*args.zero_point*math.pow(10,-0.4*(abMag-24))
# Skip objects whose total flux is below our pixel threshold
if flux < pixelCut:
continue
# Look up the component flux relative normalizations
diskFluxNorm = catalog('fluxnorm_disk')
bulgeFluxNorm = catalog('fluxnorm_bulge')
agnFluxNorm = catalog('fluxnorm_agn')
totalFluxNorm = diskFluxNorm + bulgeFluxNorm + agnFluxNorm
# Calculate the disk and bulge fluxes to simulate
if args.no_disk:
diskFlux = 0
else:
diskFlux = flux*diskFluxNorm/totalFluxNorm
if args.no_bulge:
bulgeFlux = 0
else:
bulgeFlux = flux*bulgeFluxNorm/totalFluxNorm
if diskFlux == 0 and bulgeFlux == 0:
continue
# Get disk component parameters
if diskFlux > 0:
hlr_d = catalog('DiskHalfLightRadius') # in arcsecs
pa_d = catalog('pa_disk') # position angle in degrees
a_d = catalog('a_d') # major axis length in arcsecs
b_d = catalog('b_d') # minor axis length in arcsecs
# Calculate sheared ellipse aspect ratio
q_d = b_d/a_d # between 0.2 and 1
# Convert position angle from degrees to radians
pa_d = pa_d*deg2rad
# Calculate bounding box in arcsecs without psf or pixel convolution
(w_d,h_d) = sersicBounds(1,diskFlux+bulgeFlux,hlr_d,q_d,pa_d,sbCut)
else:
(w_d,h_d) = (0,0)
# Get bulge component parameters
if bulgeFlux > 0:
hlr_b = catalog('BulgeHalfLightRadius') # in arcsecs
pa_b = catalog('pa_bulge') # position angle in degrees
a_b = catalog('a_b') # major axis length in arcsecs
b_b = catalog('b_b') # minor axis length in arcsecs
# Calculate sheared ellipse aspect ratio
q_b = b_b/a_b # between 0.2 and 1
# Convert position angle from degrees to radians
pa_b = pa_b*deg2rad
# Calculate bounding box in arcsecs without psf or pixel convolution
(w_b,h_b) = sersicBounds(4,diskFlux+bulgeFlux,hlr_b,q_b,pa_b,sbCut)
else:
(w_b,h_b) = (0,0)
# If a component is missing, set its nominal size and shape from the other component.
if diskFlux == 0:
(hlr_d,q_d,pa_d) = (hlr_b,q_b,pa_b)
if bulgeFlux == 0:
(hlr_b,q_b,pa_b) = (hlr_d,q_d,pa_d)
# Combine the bulge and disk ellipticities
(size,e1,e2) = combineEllipticities(hlr_d,q_d,pa_d,hlr_b,q_b,pa_b,bulgeFlux/(bulgeFlux+diskFlux))
# Combine the bulge and disk bounding boxes
width = max(w_d,w_b)
height = max(h_d,h_b)
# Estimate the (round) bounding box for the psf in arscecs given our total flux
psfSize = psfBounds(flux,pixelCut)*args.pixel_scale if psf else 0
# Add the psf size in quadrature
width = math.sqrt(width*width + psfSize*psfSize)
height = math.sqrt(height*height + psfSize*psfSize)
# Truncate the bounding box, if necessary
if width > args.max_size or height > args.max_size:
logger.info('...truncating bbbox from (%.1f,%.1f)' % (width,height))
width = min(width,args.max_size)
height = min(height,args.max_size)
# Skip this source if its pixels would all be below pixelCut (can this ever happen?)
if (width,height) == (0,0):
continue
# Calculate the integer coordinates of the image pixel that contains the source center
# (using the convention that the bottom left corner pixel has coordinates 1,1)
xpixels = int(math.ceil(xoffset))
ypixels = int(math.ceil(yoffset))
# Calculate the stamp size to use as width = 2*xhalf+1 and height = 2*yhalf+1.
# We always round up to an odd integer so that flux is consistently centered
# (see Issue #380).
xhalf = int(math.ceil(width/args.pixel_scale))
yhalf = int(math.ceil(height/args.pixel_scale))
# Trim the stamp so that the source is still centered but we do not extend
# beyond the final field image. This will only trim pixels above pixelCut
# that lie outside the field.
if xpixels-xhalf < 1 and xpixels+xhalf > args.width:
xhalf = max(xpixels-1,args.width-xpixels)
if ypixels-yhalf < 1 and ypixels+yhalf > args.height:
yhalf = max(ypixels-1,args.height-ypixels)
# Build this source's stamp bounding box
bbox = galsim.BoundsI(xpixels-xhalf,xpixels+xhalf,ypixels-yhalf,ypixels+yhalf)
# Skip objects that don't overlap our field
if (bbox & field.bounds).area() == 0:
continue
# If we get this far, we are definitely rendering this source (but it might
# still get trimmed out later)
logger.info('Rendering input catalog line %d (entry id %d) with w x h = %d x %d' %
(lineno,entryID,2*xhalf+1,2*yhalf+1))
# Calculate the pixel coordinates of the stamp center.
xstamp = 0.5*(bbox.xmin + bbox.xmax)
ystamp = 0.5*(bbox.ymin + bbox.ymax)
# Calculate the subpixel shift in arcsecs (not pixels!) of the source center
# relative to the stamp center. Note that the resulting shift may be more than
# one pixel in either direction because of the clipping operation above.
xshift = (xoffset - (xstamp-0.5))*args.pixel_scale
yshift = (yoffset - (ystamp-0.5))*args.pixel_scale
if args.verbose:
logger.info(' flux: %.3g electrons (%s-band AB %.1f)' % (flux,args.band,abMag))
logger.info(' bounds: [%d:%d,%d:%d] pixels' % (bbox.xmin,bbox.xmax,bbox.ymin,bbox.ymax))
logger.info(' shift: (%f,%f) arcsecs = (%f,%f) pixels' %
(xshift,yshift,xshift/args.pixel_scale,yshift/args.pixel_scale))
logger.info(' disk: frac = %f, hlr = %f arcsec, q = %f, beta = %f rad' %
(diskFlux/flux,hlr_d,q_d,pa_d))
logger.info(' bulge: frac = %f, hlr = %f arcsec, q = %f, beta = %f rad' %
(bulgeFlux/flux,hlr_b,q_b,pa_b))
logger.info(' agn: frac = %f' % (agnFluxNorm/flux))
logger.info(' bbox: disk (%.1f,%.1f) bulge (%.1f,%.1f) psf %.1f arcsec' %
(w_d,h_d,w_b,h_b,psfSize))
logger.info(' size: %.2f arcsec' % size)
logger.info(' shape: (e1,e2) = (%.6f,%.6f)' % (e1,e2))
# Define the nominal source parameters for rendering this object within its stamp
params = {
'total_flux': diskFlux + bulgeFlux,
'f_d': diskFlux/(diskFlux+bulgeFlux), 'f_b': bulgeFlux/(diskFlux+bulgeFlux),
'x_d': xshift, 'y_d': yshift, 'hlr_d': hlr_d, 'q_d': q_d, 'beta_d': pa_d,
'x_b': xshift, 'y_b': yshift, 'hlr_b': hlr_b, 'q_b': q_b, 'beta_b': pa_b,
'dx': 0., 'dy': 0., 'relsize': 1., 'dbeta': 0.,
'g1': args.g1, 'g2': args.g2
}
# Render the nominal stamps for this galaxy
gal = createSource(**params)
nominal = createStamp(gal,psf,pix,bbox)
# Create a mask for pixels above threshold
mask = createMask(nominal,pixelCut,args)
if mask.array.sum() == 0:
# this stamp has no pixels above threshold
logger.info('*** line %d (id %d) is below threshold' % (lineno,entryID))
continue
trimmed = mask.bounds
if args.verbose:
logger.info(' trimmed: [%d:%d,%d:%d] pixels' %
(trimmed.xmin,trimmed.xmax,trimmed.ymin,trimmed.ymax))
# Add the nominal galaxy to the full field image after applying the threshold mask
# (the mask must be the second term in the product so that the result is double precision)
maskedNominal = nominal[trimmed]*mask
fieldOverlap = trimmed & field.bounds
if fieldOverlap.area() == 0:
# this stamp's mask falls completely outside our field
logger.info('*** line %d (id %d) does not overlap field' % (lineno,entryID))
continue
field[fieldOverlap] += maskedNominal[fieldOverlap]
# Remember the nominal stamp (clipped to the field) for overlap calculations.
stampList.append(maskedNominal[fieldOverlap])
# Calculate this object's nominal flux S/N ratio at full depth using only masked pixels.
# Note that this value cannot be reproduced from the saved stamp when a stamp is clipped
# to the field boundary (use --no-clip to disable this).
snr = signalToNoiseRatio(maskedNominal,args.exposure_time*skyRate)
if args.verbose:
logger.info(' S/N: %.6f' % snr)
# Initialize the datacube of stamps that we will save for this object
datacube = [ ]
partialsArray = [ ]
# Save the nominal (masked and trimmed) stamp
assert saveStamp(datacube,maskedNominal,args)
if args.partials:
# Specify the amount to vary each parameter for partial derivatives
# (we don't use a dictionary here since we want to control the order)
variations = [
('f_d',0.01), ('f_b',0.01),
('dx',args.pixel_scale/3.),('dy',args.pixel_scale/3.),
('relsize',0.05),
('g1',0.03), ('g2',0.03)
]
# the shape measurement parameters must always be the last 2 variations
# since we make this assumption when slicing the covariance matrix below
assert variations[-2][0] == 'g1'
assert variations[-1][0] == 'g2'
# loop over source parameters to vary
for (pname,delta) in variations:
# create stamps for each variation of this parameter
newparams = params.copy()
partial = galsim.ImageD(bbox)
partial.setScale(pix.getXWidth())
# delta might be zero, e.g., for hlr_b when bulge fraction = 0
if delta > 0:
for step in range(args.partials_order):
# create and save the positive variation stamp
newparams[pname] = params[pname] + (step+1)*delta
newsource = createSource(**newparams)
plus = createStamp(newsource,psf,pix,bbox)
# create and save the negative variation stamp
newparams[pname] = params[pname] - (step+1)*delta
newsource = createSource(**newparams)
minus = createStamp(newsource,psf,pix,bbox)
# update the finite difference calculation of this partial
partial += (fdCoefs[step]/delta)*(plus - minus)
# append this partial to our datacube after trimming and masking
maskedPartial = partial[trimmed]*mask
assert saveStamp(datacube,maskedPartial,args)
# remember this partial's numpy image array
partialsArray.append(maskedPartial[fieldOverlap].array)
# calculate the Fisher matrix images for this object (note that we haven't
# included the Fisher denominator here since that might include overlapping
# objects that we have not seen yet)
nvar = len(partialsArray)
nfisher = ((nvar+1)*nvar)/2
(h,w) = partialsArray[0].shape
fisherImage = numpy.zeros((nfisher,h,w))
index = 0
for i in range(nvar):
for j in range(i,nvar):
fisherImage[index] = partialsArray[i]*partialsArray[j]
index += 1
fisherImagesList.append(fisherImage)
# Add a new HDU with a datacube for this object's stamps
# We don't use compression = 'gzip_tile' for now since it is lossy
# and mathematica cannot Import it.
galsim.fits.writeCube(datacube, hdu_list = hduList)
# Add a catalog entry for this galaxy
entry = [entryID,xoffset,yoffset,abMag,flux/args.exposure_time,size,e1,e2,
bulgeFlux/(diskFlux+bulgeFlux),z,snr]
outputCatalog.append(entry)
nkeep += 1
logger.info("saved entry id %d as stamp %d" % (entryID,nkeep))
# Close the input catalog
cat.close()
# Loop over all saved objects to test for overlaps and build overlap groups
(groupID,groupSize) = analyzeOverlaps(stampList)
# Add group id to output catalog
for (i,entry) in enumerate(outputCatalog):
entry.append(groupID[i])
# Do shape measurement error analysis for each galaxy
purities = (0,0.5,0.9)
regionsList = [ ]
for i in range(nkeep):
(errors,regions) = shapeErrorsAnalysis(
nvar,stampList[i],fisherImagesList[i],args.exposure_time*skyRate,field,purities)
outputCatalog[i].extend(errors)
regionsList.append(regions)
# Save the regions for each object
outname = args.output + '_regions.fits'
logger.info('Saving regions to %r' % outname)
galsim.fits.writeMulti(regionsList,outname)
# Save group sizes to a file
outname = args.output + '_groups.dat'
out = open(outname,'w')
ntot = 0
for (i,n) in enumerate(groupSize):
if n > 0:
print >>out,i,n
ntot += n
out.close()
assert ntot == len(groupSize)
# Write the full field image without noise
if args.save_field:
# First without noise
outname = args.output + '_field.fits'
logger.info('Saving full field to %r' % outname)
galsim.fits.write(field,outname)
# Write the full field image with random noise added
if args.save_noise:
rng = galsim.BaseDeviate(123)
noise = galsim.PoissonNoise(rng,sky_level = args.exposure_time*skyRate)
field.addNoise(noise)
outname = args.output + '_noise.fits'
logger.info('Saving full field with noise added to %r' % outname)
galsim.fits.write(field,outname)
# Write the object stamp datacubes
if args.stamps:
outname = args.output + '_stamps.fits'
logger.info('Saving %d stamps to %r' % (nkeep,outname))
galsim.fits.writeFile(outname, hduList)
# Write the output catalog from memory
outname = args.output + '_catalog.dat'
out = open(outname,'w')
for entry in outputCatalog:
print >>out, ' '.join(map(str,entry))
out.close()
logger.info('Wrote %d of %d catalog entries to %r' % (nkeep,lineno,outname))
if __name__ == "__main__":
main()
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all nameserver related activity. Health checks. requests."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import time
import traceback
import random
import sys
# external dependencies (from third_party)
import dns.exception
import dns.query
import dns.message
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.reversename
import dns.resolver
import util
# Pick the most accurate timer for a platform. Stolen from timeit.py:
if sys.platform == "win32":
DEFAULT_TIMER = time.clock
else:
DEFAULT_TIMER = time.time
GOOGLE_CLASS_B = ('74.125',)
WWW_GOOGLE_RESPONSE = ('CNAME www.l.google.com',)
WWW_PAYPAL_RESPONSE = ('66.211.169.', '64.4.241.')
WWW_TPB_RESPONSE = ('194.71.107.',)
OPENDNS_NS = '208.67.220.220'
WILDCARD_DOMAINS = ('live.com.', 'blogspot.com.', 'wordpress.com.')
MIN_SHARING_DELTA_MS = 3
MAX_SHARING_DELTA_MS = 90
# How many checks to consider when calculating ns check_duration
SHARED_CACHE_TIMEOUT_MULTIPLIER = 2.25
class NameServer(object):
"""Hold information about a particular nameserver."""
def __init__(self, ip, name=None, internal=False, primary=False):
self.name = name
self.ip = ip
self.is_system = internal
self.system_position = None
self.is_primary = primary
self.timeout = 60
self.health_timeout = 30
self.warnings = []
self.shared_with = []
self.disabled = False
self.checks = []
self.share_check_count = 0
self.cache_check = None
self.is_slower_replica = False
@property
def check_duration(self):
return sum([x[3] for x in self.checks])
@property
def failure(self):
failures = [x for x in self.checks if x[1]]
if failures:
return failures[0]
else:
return None
@property
def warnings_string(self):
if self.disabled:
return '(excluded: %s)' % self.disabled
else:
return ', '.join(self.warnings)
@property
def warnings_comment(self):
if self.warnings or self.disabled:
return '# ' + self.warnings_string
else:
return ''
@property
def hostname(self):
try:
answer = dns.resolver.query(dns.reversename.from_address(self.ip), 'PTR')
if answer:
return str(answer[0])
except:
return ''
def __str__(self):
return '%s [%s]' % (self.name, self.ip)
def __repr__(self):
return self.__str__()
def CreateRequest(self, record, request_type, return_type):
"""Function to work around any dnspython make_query quirks."""
return dns.message.make_query(record, request_type, return_type)
def Query(self, request, timeout):
return dns.query.udp(request, self.ip, timeout, 53)
def TimedRequest(self, type_string, record_string, timeout=None,
timer=DEFAULT_TIMER):
"""Make a DNS request, returning the reply and duration it took.
Args:
type_string: DNS record type to query (string)
record_string: DNS record name to query (string)
timeout: optional timeout (float)
Returns:
A tuple of (response, duration in ms [float], exception)
In the case of a DNS response timeout, the response object will be None.
"""
request_type = dns.rdatatype.from_text(type_string)
record = dns.name.from_text(record_string, None)
request = None
# Sometimes it takes great effort just to craft a UDP packet.
try:
request = self.CreateRequest(record, request_type, dns.rdataclass.IN)
except ValueError, exc:
if not request:
return (None, 0, exc)
if not timeout:
timeout = self.timeout
exc = None
duration = None
try:
start_time = timer()
response = self.Query(request, timeout)
duration = timer() - start_time
except (dns.exception.Timeout), exc:
response = None
except (dns.query.BadResponse, dns.message.TrailingJunk,
dns.query.UnexpectedSource), exc:
response = None
except (KeyboardInterrupt, SystemExit, SystemError), exc:
raise exc
except:
(exc, error) = sys.exc_info()[0:2]
print "* Error with %s: %s (%s)" % (self, exc, error)
response = None
if not duration:
duration = timer() - start_time
return (response, util.SecondsToMilliseconds(duration), exc)
def TestAnswers(self, record_type, record, expected, fatal=True):
"""Test to see that an answer returns correct IP's.
Args:
record_type: text record type for NS query (A, CNAME, etc)
record: string to query for
expected: tuple of strings expected in all answers
Returns:
(is_broken, warning, duration)
"""
is_broken = False
warning = None
(response, duration, exc) = self.TimedRequest(record_type, record,
timeout=self.health_timeout)
failures = []
if not response:
is_broken = True
warning = exc.__class__
elif not response.answer:
if fatal:
is_broken = True
# Avoid preferring broken DNS servers that respond quickly
duration = self.health_timeout
warning = 'No answer for %s' % record
else:
for a in response.answer:
failed = True
for string in expected:
if string in str(a):
failed=False
break
if failed:
failures.append(a)
if failures:
answers = [' + '.join(map(str, x.items)) for x in response.answer]
answer_text = ' -> '.join(answers)
warning = '%s hijacked (%s)' % (record, answer_text)
return (is_broken, warning, duration)
def ResponseToAscii(self, response):
if not response:
return None
if response.answer:
answers = [' + '.join(map(str, x.items)) for x in response.answer]
return ' -> '.join(answers)
else:
return 'no answer'
def TestGoogleComResponse(self):
return self.TestAnswers('A', 'google.com.', GOOGLE_CLASS_B)
def TestWwwGoogleComResponse(self):
return self.TestAnswers('CNAME', 'www.google.com.', WWW_GOOGLE_RESPONSE)
def TestWwwPaypalComResponse(self):
return self.TestAnswers('A', 'www.paypal.com.', WWW_PAYPAL_RESPONSE)
def TestWwwTpbOrgResponse(self):
return self.TestAnswers('A', 'www.thepiratebay.org.', WWW_TPB_RESPONSE,
fatal=False)
def TestNegativeResponse(self):
"""Test for NXDOMAIN hijaaking."""
is_broken = False
warning = None
poison_test = 'nb.%s.google.com.' % random.random()
(response, duration, exc) = self.TimedRequest('A', poison_test,
timeout=self.health_timeout)
if not response:
is_broken = True
warning = str(exc.__class__.__name__)
elif response.answer:
warning = 'NXDOMAIN Hijacking'
return (is_broken, warning, duration)
def QueryWildcardCache(self, hostname=None, save=True, timeout=None):
"""Make a cache to a random wildcard DNS host, storing the record."""
if not timeout:
timeout = self.health_timeout
is_broken = False
warning = None
if not hostname:
domain = random.choice(WILDCARD_DOMAINS)
hostname = 'namebench%s.%s' % (random.randint(1,2**32), domain)
(response, duration, exc) = self.TimedRequest('A', hostname,
timeout=timeout)
ttl = None
if not response:
is_broken = True
warning = exc.__class__.__name__
elif not response.answer:
is_broken = True
warning = 'No response'
else:
ttl = response.answer[0].ttl
if save:
self.cache_check = (hostname, ttl)
return (response, is_broken, warning, duration)
def StoreWildcardCache(self):
(is_broken, warning, duration) = self.QueryWildcardCache(save=True)[1:]
if warning:
self.warnings.append(warning)
if is_broken:
self.disabled = 'Failed CacheWildcard: %s' % warning
# Is this really a good idea to count?
#self.checks.append(('wildcard store', is_broken, warning, duration))
def TestSharedCache(self, other_ns):
"""Is this nameserver sharing a cache with another nameserver?
Args:
other_ns: A nameserver to compare it to.
Returns:
A tuple containing:
- Boolean of whether or not this host has a shared cache
- The faster NameServer object
- The slower NameServer object
"""
if other_ns.cache_check:
(cache_id, other_ttl) = other_ns.cache_check
else:
print "* cache check for %s is missing (skipping)" % other_ns
return (False, None, None)
# These queries tend to run slow, and we've already narrowed down the worst.
timeout = self.health_timeout * SHARED_CACHE_TIMEOUT_MULTIPLIER
(response, is_broken, warning, duration) = self.QueryWildcardCache(
cache_id,
save=False,
timeout=timeout
)
# Try again, but only once. Do penalize them for the first fail however.
if is_broken:
sys.stdout.write('_')
(response, is_broken, warning, duration2) = self.QueryWildcardCache(
cache_id,
save=False,
timeout=timeout
)
if is_broken:
sys.stdout.write('o')
# Is this really a good idea to count?
#self.checks.append((cache_id, is_broken, warning, duration))
if is_broken:
self.disabled = 'Failed shared-cache: %s' % warning
else:
my_ttl = response.answer[0].ttl
delta = abs(other_ttl - my_ttl)
if delta > 0:
if my_ttl < other_ttl:
upstream = self
upstream_ttl = my_ttl
downstream_ttl = other_ttl
downstream = other_ns
else:
upstream = other_ns
downstream = self
upstream_ttl = other_ttl
downstream_ttl = my_ttl
if other_ns.check_duration > self.check_duration:
slower = other_ns
faster = self
else:
slower = self
faster = other_ns
if delta > MIN_SHARING_DELTA_MS and delta < MAX_SHARING_DELTA_MS:
print "%s [%s] -> %s [%s] for %s" % (downstream, downstream_ttl,
upstream, upstream_ttl, cache_id)
return (True, slower, faster)
return (False, None, None)
def CheckHealth(self):
"""Qualify a nameserver to see if it is any good."""
tests = [self.TestWwwGoogleComResponse,
self.TestGoogleComResponse,
self.TestNegativeResponse,
self.TestWwwPaypalComResponse,
self.TestWwwTpbOrgResponse]
self.checks = []
self.warnings = []
for test in tests:
(is_broken, warning, duration) = test()
self.checks.append((test.__name__, is_broken, warning, duration))
if warning:
# print "found %s [%s] to have %s: %s" % (self.name, self.ip, test, warning)
self.warnings.append('%s: %s' % (test.__name__, warning))
if is_broken:
self.disabled = 'Failed %s: %s' % (test.__name__, warning)
break
# if self.warnings:
# print '%s [%s] - %s' % (self.name, self.ip, self.warnings)
return self.disabled
Go ahead and print warnings if a system nameserver is giving grief
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all nameserver related activity. Health checks. requests."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import time
import traceback
import random
import sys
# external dependencies (from third_party)
import dns.exception
import dns.query
import dns.message
import dns.name
import dns.rdataclass
import dns.rdatatype
import dns.reversename
import dns.resolver
import util
# Pick the most accurate timer for a platform. Stolen from timeit.py:
if sys.platform == "win32":
DEFAULT_TIMER = time.clock
else:
DEFAULT_TIMER = time.time
GOOGLE_CLASS_B = ('74.125',)
WWW_GOOGLE_RESPONSE = ('CNAME www.l.google.com',)
WWW_PAYPAL_RESPONSE = ('66.211.169.', '64.4.241.')
WWW_TPB_RESPONSE = ('194.71.107.',)
OPENDNS_NS = '208.67.220.220'
WILDCARD_DOMAINS = ('live.com.', 'blogspot.com.', 'wordpress.com.')
MIN_SHARING_DELTA_MS = 3
MAX_SHARING_DELTA_MS = 90
# How many checks to consider when calculating ns check_duration
SHARED_CACHE_TIMEOUT_MULTIPLIER = 2.25
class NameServer(object):
"""Hold information about a particular nameserver."""
def __init__(self, ip, name=None, internal=False, primary=False):
self.name = name
self.ip = ip
self.is_system = internal
self.system_position = None
self.is_primary = primary
self.timeout = 60
self.health_timeout = 30
self.warnings = []
self.shared_with = []
self.disabled = False
self.checks = []
self.share_check_count = 0
self.cache_check = None
self.is_slower_replica = False
@property
def check_duration(self):
return sum([x[3] for x in self.checks])
@property
def failure(self):
failures = [x for x in self.checks if x[1]]
if failures:
return failures[0]
else:
return None
@property
def warnings_string(self):
if self.disabled:
return '(excluded: %s)' % self.disabled
else:
return ', '.join(self.warnings)
@property
def warnings_comment(self):
if self.warnings or self.disabled:
return '# ' + self.warnings_string
else:
return ''
@property
def hostname(self):
try:
answer = dns.resolver.query(dns.reversename.from_address(self.ip), 'PTR')
if answer:
return str(answer[0])
except:
return ''
def __str__(self):
return '%s [%s]' % (self.name, self.ip)
def __repr__(self):
return self.__str__()
def CreateRequest(self, record, request_type, return_type):
"""Function to work around any dnspython make_query quirks."""
return dns.message.make_query(record, request_type, return_type)
def Query(self, request, timeout):
return dns.query.udp(request, self.ip, timeout, 53)
def TimedRequest(self, type_string, record_string, timeout=None,
timer=DEFAULT_TIMER):
"""Make a DNS request, returning the reply and duration it took.
Args:
type_string: DNS record type to query (string)
record_string: DNS record name to query (string)
timeout: optional timeout (float)
Returns:
A tuple of (response, duration in ms [float], exception)
In the case of a DNS response timeout, the response object will be None.
"""
request_type = dns.rdatatype.from_text(type_string)
record = dns.name.from_text(record_string, None)
request = None
# Sometimes it takes great effort just to craft a UDP packet.
try:
request = self.CreateRequest(record, request_type, dns.rdataclass.IN)
except ValueError, exc:
if not request:
return (None, 0, exc)
if not timeout:
timeout = self.timeout
exc = None
duration = None
try:
start_time = timer()
response = self.Query(request, timeout)
duration = timer() - start_time
except (dns.exception.Timeout), exc:
response = None
except (dns.query.BadResponse, dns.message.TrailingJunk,
dns.query.UnexpectedSource), exc:
response = None
except (KeyboardInterrupt, SystemExit, SystemError), exc:
raise exc
except:
(exc, error) = sys.exc_info()[0:2]
print "* Error with %s: %s (%s)" % (self, exc, error)
response = None
if not duration:
duration = timer() - start_time
return (response, util.SecondsToMilliseconds(duration), exc)
def TestAnswers(self, record_type, record, expected, fatal=True):
"""Test to see that an answer returns correct IP's.
Args:
record_type: text record type for NS query (A, CNAME, etc)
record: string to query for
expected: tuple of strings expected in all answers
Returns:
(is_broken, warning, duration)
"""
is_broken = False
warning = None
(response, duration, exc) = self.TimedRequest(record_type, record,
timeout=self.health_timeout)
failures = []
if not response:
is_broken = True
warning = exc.__class__
elif not response.answer:
if fatal:
is_broken = True
# Avoid preferring broken DNS servers that respond quickly
duration = self.health_timeout
warning = 'No answer for %s' % record
else:
for a in response.answer:
failed = True
for string in expected:
if string in str(a):
failed=False
break
if failed:
failures.append(a)
if failures:
answers = [' + '.join(map(str, x.items)) for x in response.answer]
answer_text = ' -> '.join(answers)
warning = '%s hijacked (%s)' % (record, answer_text)
return (is_broken, warning, duration)
def ResponseToAscii(self, response):
if not response:
return None
if response.answer:
answers = [' + '.join(map(str, x.items)) for x in response.answer]
return ' -> '.join(answers)
else:
return 'no answer'
def TestGoogleComResponse(self):
return self.TestAnswers('A', 'google.com.', GOOGLE_CLASS_B)
def TestWwwGoogleComResponse(self):
return self.TestAnswers('CNAME', 'www.google.com.', WWW_GOOGLE_RESPONSE)
def TestWwwPaypalComResponse(self):
return self.TestAnswers('A', 'www.paypal.com.', WWW_PAYPAL_RESPONSE)
def TestWwwTpbOrgResponse(self):
return self.TestAnswers('A', 'www.thepiratebay.org.', WWW_TPB_RESPONSE,
fatal=False)
def TestNegativeResponse(self):
"""Test for NXDOMAIN hijaaking."""
is_broken = False
warning = None
poison_test = 'nb.%s.google.com.' % random.random()
(response, duration, exc) = self.TimedRequest('A', poison_test,
timeout=self.health_timeout)
if not response:
is_broken = True
warning = str(exc.__class__.__name__)
elif response.answer:
warning = 'NXDOMAIN Hijacking'
return (is_broken, warning, duration)
def QueryWildcardCache(self, hostname=None, save=True, timeout=None):
"""Make a cache to a random wildcard DNS host, storing the record."""
if not timeout:
timeout = self.health_timeout
is_broken = False
warning = None
if not hostname:
domain = random.choice(WILDCARD_DOMAINS)
hostname = 'namebench%s.%s' % (random.randint(1,2**32), domain)
(response, duration, exc) = self.TimedRequest('A', hostname,
timeout=timeout)
ttl = None
if not response:
is_broken = True
warning = exc.__class__.__name__
elif not response.answer:
is_broken = True
warning = 'No response'
else:
ttl = response.answer[0].ttl
if save:
self.cache_check = (hostname, ttl)
return (response, is_broken, warning, duration)
def StoreWildcardCache(self):
(is_broken, warning, duration) = self.QueryWildcardCache(save=True)[1:]
if warning:
self.warnings.append(warning)
if is_broken:
if self.is_system:
print 'Ouch, %s failed StoreWildcardCache' % self
self.disabled = 'Failed CacheWildcard: %s' % warning
# Is this really a good idea to count?
#self.checks.append(('wildcard store', is_broken, warning, duration))
def TestSharedCache(self, other_ns):
"""Is this nameserver sharing a cache with another nameserver?
Args:
other_ns: A nameserver to compare it to.
Returns:
A tuple containing:
- Boolean of whether or not this host has a shared cache
- The faster NameServer object
- The slower NameServer object
"""
if other_ns.cache_check:
(cache_id, other_ttl) = other_ns.cache_check
else:
print "* cache check for %s is missing (skipping)" % other_ns
return (False, None, None)
# These queries tend to run slow, and we've already narrowed down the worst.
timeout = self.health_timeout * SHARED_CACHE_TIMEOUT_MULTIPLIER
(response, is_broken, warning, duration) = self.QueryWildcardCache(
cache_id,
save=False,
timeout=timeout
)
# Try again, but only once. Do penalize them for the first fail however.
if is_broken:
sys.stdout.write('_')
(response, is_broken, warning, duration2) = self.QueryWildcardCache(
cache_id,
save=False,
timeout=timeout
)
if is_broken:
sys.stdout.write('o')
# Is this really a good idea to count?
#self.checks.append((cache_id, is_broken, warning, duration))
if is_broken:
if self.is_system:
print 'Ouch, %s failed TestSharedCache' % self
self.disabled = 'Failed shared-cache: %s' % warning
else:
my_ttl = response.answer[0].ttl
delta = abs(other_ttl - my_ttl)
if delta > 0:
if my_ttl < other_ttl:
upstream = self
upstream_ttl = my_ttl
downstream_ttl = other_ttl
downstream = other_ns
else:
upstream = other_ns
downstream = self
upstream_ttl = other_ttl
downstream_ttl = my_ttl
if other_ns.check_duration > self.check_duration:
slower = other_ns
faster = self
else:
slower = self
faster = other_ns
if delta > MIN_SHARING_DELTA_MS and delta < MAX_SHARING_DELTA_MS:
print "%s [%s] -> %s [%s] for %s" % (downstream, downstream_ttl,
upstream, upstream_ttl, cache_id)
return (True, slower, faster)
return (False, None, None)
def CheckHealth(self):
"""Qualify a nameserver to see if it is any good."""
tests = [self.TestWwwGoogleComResponse,
self.TestGoogleComResponse,
self.TestNegativeResponse,
self.TestWwwPaypalComResponse,
self.TestWwwTpbOrgResponse]
self.checks = []
self.warnings = []
for test in tests:
(is_broken, warning, duration) = test()
self.checks.append((test.__name__, is_broken, warning, duration))
if warning:
if self.is_system:
print "found %s [%s] to have %s: %s" % (self.name, self.ip, test, warning)
self.warnings.append('%s: %s' % (test.__name__, warning))
if is_broken:
self.disabled = 'Failed %s: %s' % (test.__name__, warning)
break
# if self.warnings:
# print '%s [%s] - %s' % (self.name, self.ip, self.warnings)
return self.disabled
|
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
from paddle.fluid.framework import _set_expected_place, _current_expected_place
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
from .batch_sampler import _InfiniteIterableSampler
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
_DatasetKind, _IterableDatasetStopIteration, _WorkerException
from .flat import _flatten_batch, _restore_batch
__all__ = ['get_worker_info']
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._auto_collate_batch = loader.auto_collate_batch
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
if self._auto_collate_batch:
self._sampler_iter = iter(loader.batch_sampler)
self._collate_fn = loader.collate_fn or default_collate_fn
else:
if self._dataset_kind == _DatasetKind.MAP:
self._sampler_iter = iter(list(range(len(self._dataset))))
else:
self._sampler_iter = iter(
_InfiniteIterableSampler(self._dataset, 1))
self._collate_fn = loader.collate_fn or default_convert_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collate_batch,
self._collate_fn, True)
# NOTE: _structrue_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
# can record the data structure sequencely in a list without
# recording the send and recv index
self._structure_infos = []
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 2 * len(self._places)
self._init_thread()
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _thread_loop(self, legacy_expected_place):
try:
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
for indices in self._sampler_iter:
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices)
# flat batch and record structure infos
batch, structure = _flatten_batch(batch)
self._structure_infos.append(structure)
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except StopIteration:
self._blocking_queue.close()
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning("DataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def __next__(self):
try:
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
return data
except StopIteration:
self._reader.shutdown()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def __del__(self):
# _blocking_queue in keep order mode holds sub-threads
# need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._auto_collate_batch, self._collate_fn,
self._worker_init_fn, i, self._num_workers,
self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _shutdown_worker(self, worker_id):
if self._worker_status[worker_id]:
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i)
for w in self._workers:
w.join()
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
else:
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
sys.stdout.flush()
info = self._task_infos[self._rcvd_idx]
if len(info) == 3 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
continue
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 3:
info = self._task_infos.pop(self._rcvd_idx)
self._structure_infos.append(info[2])
return info[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch, structure = data
if isinstance(batch, _WorkerException):
self._exit_thread_unexpectedly()
batch.reraise()
if idx == self._rcvd_idx:
del self._task_infos[idx]
self._structure_infos.append(structure)
return batch
else:
self._task_infos[idx] += (batch, structure)
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
fix dataloader exit error (#32550)
* fix dataloader exit error if user exit program when dataloader is still iterating. test=develop
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import six
import sys
import time
import signal
import numbers
import logging
import itertools
import threading
import numpy as np
import multiprocessing
from collections import namedtuple
from paddle.fluid.framework import _set_expected_place, _current_expected_place
# NOTE: queue has a different name in python2 and python3
if six.PY2:
import Queue as queue
else:
import queue
import paddle
from .. import core, layers
from ..framework import in_dygraph_mode
from ..multiprocess_utils import _set_SIGCHLD_handler, MP_STATUS_CHECK_INTERVAL, CleanupFuncRegistrar
from .fetcher import _IterableDatasetFetcher, _MapDatasetFetcher
from .batch_sampler import _InfiniteIterableSampler
from .collate import default_collate_fn, default_convert_fn
from .worker import ParentWatchDog, get_worker_info, _worker_loop, \
_DatasetKind, _IterableDatasetStopIteration, _WorkerException
from .flat import _flatten_batch, _restore_batch
__all__ = ['get_worker_info']
class _DataLoaderIterBase(object):
"""
Iterator implement of DataLoader, will load and feed mini-batch
data by setting in given dataloader.
Args:
loader(instance of DataLoader): instance of `fluid.io.DataLoader`
"""
def __init__(self, loader):
self._dataset = loader.dataset
self._feed_list = loader.feed_list or []
self._places = loader.places
self._return_list = loader.return_list
self._batch_sampler = loader.batch_sampler
self._auto_collate_batch = loader.auto_collate_batch
self._num_workers = loader.num_workers
self._use_buffer_reader = loader.use_buffer_reader
self._use_shared_memory = loader.use_shared_memory
self._timeout = loader.timeout if loader.timeout > 0 else MP_STATUS_CHECK_INTERVAL
self._worker_init_fn = loader.worker_init_fn
self._dataset_kind = loader.dataset_kind
self._pin_memory = loader.pin_memory
if self._auto_collate_batch:
self._sampler_iter = iter(loader.batch_sampler)
self._collate_fn = loader.collate_fn or default_collate_fn
else:
if self._dataset_kind == _DatasetKind.MAP:
self._sampler_iter = iter(list(range(len(self._dataset))))
else:
self._sampler_iter = iter(
_InfiniteIterableSampler(self._dataset, 1))
self._collate_fn = loader.collate_fn or default_convert_fn
# LoDTensorBlockingQueue instance for create_py_reader and a thread
# to put mini-batch data to self._blocking_queue, mini-batch data
# will be get from:
# 1. multi-process mode: get data from workers' result queue
# 2. single-process mode: read mini-batch data in main process
self._blocking_queue = None
self._thread = None
self._thread_done_event = threading.Event()
def __iter__(self):
return self
def __len__(self):
return len(self._batch_sampler)
class _DataLoaderIterSingleProcess(_DataLoaderIterBase):
"""
Single process implement of DataLoaderIter, loading data from
loader.data in main process
"""
def __init__(self, loader):
super(_DataLoaderIterSingleProcess, self).__init__(loader)
self._dataset_fetcher = _DatasetKind.create_fetcher(
self._dataset_kind, self._dataset, self._auto_collate_batch,
self._collate_fn, True)
# NOTE: _structrue_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
# can record the data structure sequencely in a list without
# recording the send and recv index
self._structure_infos = []
# NOTE: len(self._places) batch data compose as an output
# iteration, set blocking_queue can cache 2 iteration datas
# at most here
self._blocking_queue_capacity = 2 * len(self._places)
self._init_thread()
# if user exit python program when dataloader is still
# iterating, resource may no release safely, so we
# add __del__ function to to CleanupFuncRegistrar
# to make sure __del__ is always called when program
# exit for resoure releasing safely
CleanupFuncRegistrar.register(self.__del__)
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._blocking_queue_capacity,
len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _thread_loop(self, legacy_expected_place):
try:
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
for indices in self._sampler_iter:
# read data from dataset in mini-batch
batch = self._dataset_fetcher.fetch(indices)
# flat batch and record structure infos
batch, structure = _flatten_batch(batch)
self._structure_infos.append(structure)
# pack as LoDTensorArray
array = core.LoDTensorArray()
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
break
if self._thread_done_event.is_set():
break
self._blocking_queue.close()
self._shutdown_thread()
except StopIteration:
self._blocking_queue.close()
except Exception:
self._blocking_queue.kill()
self._shutdown_thread()
logging.warning("DataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def __next__(self):
try:
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
return data
except StopIteration:
self._reader.shutdown()
six.reraise(*sys.exc_info())
def _shutdown_thread(self):
if self._thread:
self._thread_done_event.set()
if self._thread is not threading.current_thread():
self._thread.join()
self._thread = None
# python2 compatibility
def next(self):
return self.__next__()
def __del__(self):
# _blocking_queue in keep order mode holds sub-threads
# need to release thread resources on unexpected exit
if self._blocking_queue:
self._blocking_queue.close()
# NOTE: blocking queue should be closed firstly for
# blocking queue read may hang and _thread_done_event
# cannot be checked
self._shutdown_thread()
class _DataLoaderIterMultiProcess(_DataLoaderIterBase):
def __init__(self, loader):
super(_DataLoaderIterMultiProcess, self).__init__(loader)
assert self._num_workers > 0, "Multi-process DataLoader " \
"invalid num_workers({})".format(self._num_workers)
# subprocess wrokers' result queue
self._data_queue = None
# data get from _data_queue will be reordered by _rcvd_idx
# for data order keeping, data index not equal _rcvd_idx
# will be cached in _task_infos
self._send_idx = 0
self._rcvd_idx = 0
self._batches_outstanding = 0
self._task_infos = {}
self._structure_infos = []
# indices outstand as _outstanding_capacity at first, and
# blocking_queue capacity is also _outstanding_capacity.
# _outstanding_capacity here to make sure each indices_queue
# has at least 2 indices, and outstanding batch cached
# output data for at least 2 iterations(Note that len(_places)
# batches will be composed as an iteration output)
self._outstanding_capacity = 2 * max(self._num_workers,
len(self._places))
# see _try_put_indices
self._thread_lock = threading.Lock()
# init workers and indices queues and put 2 indices in each indices queue
self._init_workers()
for _ in range(self._outstanding_capacity):
self._try_put_indices()
self._init_thread()
self._shutdown = False
# if user exit python program when dataloader is still
# iterating, resource may no release safely, so we
# add __del__ function to to CleanupFuncRegistrar
# to make sure __del__ is always called when program
# exit for resoure releasing safely
CleanupFuncRegistrar.register(self.__del__)
def _init_workers(self):
# multiprocess worker and indice queue list initial as empty
self._workers = []
self._worker_status = []
self._indices_queues = []
self._workers_idx_cycle = itertools.cycle(range(self._num_workers))
# create data_queue for workers
self._data_queue = multiprocessing.Queue()
# event for workers and thread, thread event is only need
# in multi-processing mode
self._workers_done_event = multiprocessing.Event()
self._thread_done_event = threading.Event()
for i in range(self._num_workers):
indices_queue = multiprocessing.Queue()
self._indices_queues.append(indices_queue)
worker = multiprocessing.Process(
target=_worker_loop,
args=(self._dataset, self._dataset_kind, indices_queue,
self._data_queue, self._workers_done_event,
self._auto_collate_batch, self._collate_fn,
self._worker_init_fn, i, self._num_workers,
self._use_shared_memory))
worker.daemon = True
worker.start()
self._workers.append(worker)
self._worker_status.append(True)
core._set_process_pids(id(self), tuple(w.pid for w in self._workers))
_set_SIGCHLD_handler()
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except:
self._data_queue.cancel_join_thread()
self._data_queue.close()
break
def _init_thread(self):
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
# if only 1 place, do not need to keep order
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._outstanding_capacity, len(self._places) > 1)
self._reader = core.create_py_reader(
self._blocking_queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_buffer_reader, True,
self._pin_memory)
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._thread_loop, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _shutdown_worker(self, worker_id):
if self._worker_status[worker_id]:
self._indices_queues[worker_id].put(None)
self._worker_status[worker_id] = False
def _try_shutdown_all(self):
if not self._shutdown:
try:
self._exit_thread_expectedly()
self._clear_and_remove_data_queue()
# set _workers_done_event should be set before put None
# to indices_queue, workers wll exit on reading None from
# indices_queue
self._workers_done_event.set()
for i in range(self._num_workers):
self._shutdown_worker(i)
for w in self._workers:
w.join()
for q in self._indices_queues:
q.cancel_join_thread()
q.close()
finally:
core._erase_process_pids(id(self))
self._shutdown = True
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _thread_loop(self, legacy_expected_place):
#NOTE(zhiqiu): Set the expected place for new thread as the same as father thread,
# and it will call platform::SetDeviceId() in c++ internally.
# If we do not set cudaDeviceId in new thread, the default cudaDeviceId will be 0,
# Which may cost hundreds of MB of GPU memory on CUDAPlace(0) if calling some cuda
# APIs in this thread.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
batch = self._get_data()
if not self._thread_done_event.is_set():
if batch is None:
self._exit_thread_expectedly()
else:
try:
# pack as LoDTensorArray
array = core.LoDTensorArray()
if self._use_shared_memory:
for tensor in batch:
array.append(tensor)
else:
# LoDTensor not in shared memory is not
# serializable, cannot be create in workers
for slot in batch:
if isinstance(slot, paddle.Tensor):
slot = slot.value().get_tensor()
elif not isinstance(slot, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(slot, core.CPUPlace())
slot = tmp
array.append(slot)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
finally:
self._rcvd_idx += 1
def _get_data(self):
while not self._thread_done_event.is_set():
# For IterableDataset, batch indices is generated infinitely
# for each worker to raise StopIteration, but a StopIteration
# raising process will discard a batch indices which is count
# in _send_idx but will not increase _rcvd_idx, so we check
# whether the worker is still alive here to skip the discarded
# batch indices and increase _rcvd_idx
if self._dataset_kind == _DatasetKind.ITER:
while self._rcvd_idx < self._send_idx:
sys.stdout.flush()
info = self._task_infos[self._rcvd_idx]
if len(info) == 3 or self._worker_status[info[0]]:
break
del self._task_infos[self._rcvd_idx]
self._rcvd_idx += 1
self._batches_outstanding -= 1
else:
# NOTE: _rcvd_idx and _send_idx only record batches among
# workers, if batches among workers drained, there
# may also be data in blocking queue
if self._batches_outstanding < len(self._places):
return None
continue
if self._rcvd_idx in self._task_infos and \
len(self._task_infos[self._rcvd_idx]) == 3:
info = self._task_infos.pop(self._rcvd_idx)
self._structure_infos.append(info[2])
return info[1]
try:
# [ avoid hang ]: main process may blocking at _reader.read_next when
# KeyboardInterrupt, we do following tradeoff:
# 1. get data with timeout, MP_STATUS_CHECK_INTERVAL(5s) as timeout
# default, if KeyboardInterrupt blocking, failed workers will be
# checked and raise RuntimeError to quit DataLoader in timeout
# exception handling.
# 2. if get data timeout and check workers all alive, continue to
# get data again
data = self._data_queue.get(timeout=self._timeout)
except Exception as e:
# check if thread done event set when waiting data
if self._thread_done_event.is_set():
continue
# check failed workers
failed_workers = []
for i, w in enumerate(self._workers):
if self._worker_status[i] and not w.is_alive():
failed_workers.append(w)
self._shutdown_worker(i)
if len(failed_workers) > 0:
self._exit_thread_unexpectedly()
pids = ', '.join(str(w.pid) for w in failed_workers)
raise RuntimeError("DataLoader {} workers exit unexpectedly, " \
"pids: {}".format(len(failed_workers), pids))
# get(timeout) will call _poll(timeout) and may raise IOError
if isinstance(e, queue.Empty) or isinstance(e, IOError):
# continue on timeout to keep getting data from queue
continue
self._exit_thread_unexpectedly()
logging.error("DataLoader reader thread failed({}) to read data from " \
"workers' result queue.".format(e))
six.reraise(*sys.exc_info())
else:
if self._dataset_kind == _DatasetKind.ITER and isinstance(
data, _IterableDatasetStopIteration):
# if a worker get StopIteraion, we shutdown this worker,
# note that this batch indices to trigger StopIteration
# is discard, outstanding batch number should be decrease
# and another indices should be put for other workers
# may still working.
self._shutdown_worker(data.worker_id)
self._batches_outstanding -= 1
self._try_put_indices()
continue
idx, batch, structure = data
if isinstance(batch, _WorkerException):
self._exit_thread_unexpectedly()
batch.reraise()
if idx == self._rcvd_idx:
del self._task_infos[idx]
self._structure_infos.append(structure)
return batch
else:
self._task_infos[idx] += (batch, structure)
continue
def _try_put_indices(self):
assert self._batches_outstanding <= self._outstanding_capacity, \
"too many indices have been put to queue"
# In multi-process mode for IterableDataset, _try_put_indices will
# be called both in main process(for our implement has blocking queue,
# and blocking queue read is in main process) and thread, which may
# cause error following error
# 1. "ValueError: generator already executing" in next(self._sampler_iter)
# 2. re-enter in increase _send_idx
# add a lock for threading save, for _try_put_indices is only a slight
# function which is not in data reading pipeline, this lock almost no
# influence on performance
with self._thread_lock:
try:
indices = next(self._sampler_iter)
except StopIteration:
return
for i in range(self._num_workers):
worker_idx = next(self._workers_idx_cycle)
if self._worker_status[worker_idx]:
break
else:
return
self._indices_queues[worker_idx].put((self._send_idx, indices))
self._task_infos[self._send_idx] = (worker_idx, )
self._batches_outstanding += 1
self._send_idx += 1
def __del__(self):
self._try_shutdown_all()
def __next__(self):
try:
# _batches_outstanding here record the total batch data number
# in 'from after _try_put_indices to beforeoutput data', this
# value should be _outstanding_capacity if data is not drained,
# if _batches_outstanding is less than _places number, there are
# no enough data to generate next output, close blocking_queue and
# set _thread_done_event here, py_reader will raise StopIteration,
# end workers and indices_queues in StopIteration handling
if self._batches_outstanding < len(self._places):
self._thread_done_event.set()
self._blocking_queue.close()
if in_dygraph_mode():
data = self._reader.read_next_var_list()
data = _restore_batch(data, self._structure_infos.pop(0))
else:
if self._return_list:
data = self._reader.read_next_list()
data = [
_restore_batch(d, s)
for d, s in zip(data, self._structure_infos[:len(
self._places)])
]
self._structure_infos = self._structure_infos[len(
self._places):]
# static graph organized data on multi-device with list, if
# place number is 1, there is only 1 device, extra the data
# from list for devices to be compatible with dygraph mode
if len(self._places) == 1:
data = data[0]
else:
data = self._reader.read_next()
self._on_output_batch()
return data
except StopIteration:
self._reader.shutdown()
self._try_shutdown_all()
six.reraise(*sys.exc_info())
# python2 compatibility
def next(self):
return self.__next__()
def _on_output_batch(self):
for _ in range(len(self._places)):
self._batches_outstanding -= 1
self._try_put_indices()
|
import sys
import datetime
import logging
import json
import subprocess
import socket
import time
import os
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from twisted.internet import threads, reactor, defer, task
from twisted.python.failure import Failure
from twisted.enterprise import adbapi
from collections import defaultdict, deque
from zope.interface import implements
from decimal import Decimal
from lbryum import SimpleConfig, Network
from lbryum.lbrycrd import COIN
from lbryum.wallet import WalletStorage, Wallet
from lbryum.commands import known_commands, Commands
from lbryum.transaction import Transaction
from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, ILBRYWallet
from lbrynet.core.client.ClientRequest import ClientRequest
from lbrynet.core.Error import UnknownNameError, InvalidStreamInfoError, RequestCanceledError
from lbrynet.core.Error import InsufficientFundsError
from lbrynet.metadata.LBRYMetadata import Metadata
log = logging.getLogger(__name__)
alert = logging.getLogger("lbryalert." + __name__)
class ReservedPoints(object):
def __init__(self, identifier, amount):
self.identifier = identifier
self.amount = amount
def _catch_connection_error(f):
def w(*args):
try:
return f(*args)
except socket.error:
raise ValueError("Unable to connect to an lbrycrd server. Make sure an lbrycrd server " +
"is running and that this application can connect to it.")
return w
class LBRYWallet(object):
"""This class implements the LBRYWallet interface for the LBRYcrd payment system"""
implements(ILBRYWallet)
_FIRST_RUN_UNKNOWN = 0
_FIRST_RUN_YES = 1
_FIRST_RUN_NO = 2
def __init__(self, db_dir):
self.db_dir = db_dir
self.db = None
self.next_manage_call = None
self.wallet_balance = Decimal(0.0)
self.total_reserved_points = Decimal(0.0)
self.peer_addresses = {} # {Peer: string}
self.queued_payments = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.expected_balances = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.current_address_given_to_peer = {} # {Peer: address(string)}
self.expected_balance_at_time = deque() # (Peer, address(string), amount(Decimal), time(datetime), count(int),
# incremental_amount(float))
self.max_expected_payment_time = datetime.timedelta(minutes=3)
self.stopped = True
self.is_lagging = None
self.manage_running = False
self._manage_count = 0
self._balance_refresh_time = 3
self._batch_count = 20
self._first_run = self._FIRST_RUN_UNKNOWN
def start(self):
def start_manage():
self.stopped = False
self.manage()
return True
d = self._open_db()
d.addCallback(lambda _: self._clean_bad_records())
d.addCallback(lambda _: self._start())
d.addCallback(lambda _: start_manage())
return d
@staticmethod
def log_stop_error(err):
log.error("An error occurred stopping the wallet: %s", err.getTraceback())
def stop(self):
self.stopped = True
# If self.next_manage_call is None, then manage is currently running or else
# start has not been called, so set stopped and do nothing else.
if self.next_manage_call is not None:
self.next_manage_call.cancel()
self.next_manage_call = None
d = self.manage(do_full=True)
d.addErrback(self.log_stop_error)
d.addCallback(lambda _: self._stop())
d.addErrback(self.log_stop_error)
return d
def manage(self, do_full=False):
self.next_manage_call = None
have_set_manage_running = [False]
self._manage_count += 1
if self._manage_count % self._batch_count == 0:
self._manage_count = 0
do_full = True
def check_if_manage_running():
d = defer.Deferred()
def fire_if_not_running():
if self.manage_running is False:
self.manage_running = True
have_set_manage_running[0] = True
d.callback(True)
elif do_full is False:
d.callback(False)
else:
task.deferLater(reactor, 1, fire_if_not_running)
fire_if_not_running()
return d
d = check_if_manage_running()
def do_manage():
if do_full:
d = self._check_expected_balances()
d.addCallback(lambda _: self._send_payments())
else:
d = defer.succeed(True)
d.addCallback(lambda _: self.get_balance())
def set_wallet_balance(balance):
if self.wallet_balance != balance:
log.info("Got a new balance: %s", str(balance))
self.wallet_balance = balance
d.addCallback(set_wallet_balance)
return d
d.addCallback(lambda should_run: do_manage() if should_run else None)
def set_next_manage_call():
if not self.stopped:
self.next_manage_call = reactor.callLater(self._balance_refresh_time, self.manage)
d.addCallback(lambda _: set_next_manage_call())
def log_error(err):
log.error("Something went wrong during manage. Error message: %s", err.getErrorMessage())
return err
d.addErrback(log_error)
def set_manage_not_running(arg):
if have_set_manage_running[0] is True:
self.manage_running = False
return arg
d.addBoth(set_manage_not_running)
return d
def get_info_exchanger(self):
return LBRYcrdAddressRequester(self)
def get_wallet_info_query_handler_factory(self):
return LBRYcrdAddressQueryHandlerFactory(self)
def reserve_points(self, identifier, amount):
"""
Ensure a certain amount of points are available to be sent as payment, before the service is rendered
@param identifier: The peer to which the payment will ultimately be sent
@param amount: The amount of points to reserve
@return: A ReservedPoints object which is given to send_points once the service has been rendered
"""
rounded_amount = Decimal(str(round(amount, 8)))
#if peer in self.peer_addresses:
if self.wallet_balance >= self.total_reserved_points + rounded_amount:
self.total_reserved_points += rounded_amount
return ReservedPoints(identifier, rounded_amount)
return None
def cancel_point_reservation(self, reserved_points):
"""
Return all of the points that were reserved previously for some ReservedPoints object
@param reserved_points: ReservedPoints previously returned by reserve_points
@return: None
"""
self.total_reserved_points -= reserved_points.amount
def send_points(self, reserved_points, amount):
"""
Schedule a payment to be sent to a peer
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send, must be less than or equal to the
amount reserved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
peer = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
assert(peer in self.peer_addresses)
self.queued_payments[self.peer_addresses[peer]] += rounded_amount
# make any unused points available
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("ordering that %s points be sent to %s", str(rounded_amount),
str(self.peer_addresses[peer]))
peer.update_stats('points_sent', amount)
return defer.succeed(True)
def send_points_to_address(self, reserved_points, amount):
"""
Schedule a payment to be sent to an address
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send. must be less than or equal to the
amount reselved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
address = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
self.queued_payments[address] += rounded_amount
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("Ordering that %s points be sent to %s", str(rounded_amount),
str(address))
return defer.succeed(True)
def add_expected_payment(self, peer, amount):
"""Increase the number of points expected to be paid by a peer"""
rounded_amount = Decimal(str(round(amount, 8)))
assert(peer in self.current_address_given_to_peer)
address = self.current_address_given_to_peer[peer]
log.info("expecting a payment at address %s in the amount of %s", str(address), str(rounded_amount))
self.expected_balances[address] += rounded_amount
expected_balance = self.expected_balances[address]
expected_time = datetime.datetime.now() + self.max_expected_payment_time
self.expected_balance_at_time.append((peer, address, expected_balance, expected_time, 0, amount))
peer.update_stats('expected_points', amount)
def update_peer_address(self, peer, address):
self.peer_addresses[peer] = address
def get_new_address_for_peer(self, peer):
def set_address_for_peer(address):
self.current_address_given_to_peer[peer] = address
return address
d = self.get_new_address()
d.addCallback(set_address_for_peer)
return d
def _send_payments(self):
payments_to_send = {}
for address, points in self.queued_payments.items():
log.info("Should be sending %s points to %s", str(points), str(address))
payments_to_send[address] = points
self.total_reserved_points -= points
self.wallet_balance -= points
del self.queued_payments[address]
if payments_to_send:
log.info("Creating a transaction with outputs %s", str(payments_to_send))
d = self._do_send_many(payments_to_send)
d.addCallback(lambda txid: log.debug("Sent transaction %s", txid))
return d
log.debug("There were no payments to send")
return defer.succeed(True)
def get_stream_info_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def get_txid_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(lambda r: None if 'txid' not in r else r['txid'])
return d
def get_stream_info_from_txid(self, name, txid):
d = self.get_claims_from_tx(txid)
def get_claim_for_name(claims):
for claim in claims:
if claim['name'] == name:
claim['txid'] = txid
return claim
return Failure(UnknownNameError(name))
d.addCallback(get_claim_for_name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def _get_stream_info_from_value(self, result, name):
def _check_result_fields(r):
for k in ['value', 'txid', 'n', 'height', 'amount']:
assert k in r, "getvalueforname response missing field %s" % k
def _log_success(claim_id):
log.info("lbry://%s complies with %s, claimid: %s", name, metadata.version, claim_id)
return defer.succeed(None)
if 'error' in result:
log.warning("Got an error looking up a name: %s", result['error'])
return Failure(UnknownNameError(name))
_check_result_fields(result)
try:
metadata = Metadata(json.loads(result['value']))
except (ValueError, TypeError):
return Failure(InvalidStreamInfoError(name))
txid = result['txid']
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
d.addCallback(lambda _: self.get_claimid(name, txid))
d.addCallback(lambda cid: _log_success(cid))
d.addCallback(lambda _: metadata)
return d
def get_claim(self, name, claim_id):
d = self.get_claims_for_name(name)
d.addCallback(lambda claims: next(claim for claim in claims['claims'] if claim['claimId'] == claim_id))
return d
def get_claimid(self, name, txid):
def _get_id_for_return(claim_id):
if claim_id:
return defer.succeed(claim_id)
else:
d = self.get_claims_from_tx(txid)
d.addCallback(lambda claims: next(c['claimId'] for c in claims if c['name'] == name))
d.addCallback(lambda cid: self._update_claimid(cid, name, txid))
return d
d = self._get_claimid_for_tx(name, txid)
d.addCallback(_get_id_for_return)
return d
def get_claim_info(self, name, txid=None):
if not txid:
d = self._get_value_for_name(name)
d.addCallback(lambda r: self._get_claim_info(name, r['txid']))
else:
d = self._get_claim_info(name, txid)
d.addErrback(lambda _: False)
return d
def _get_claim_info(self, name, txid):
def _build_response(claim):
result = {}
try:
metadata = Metadata(json.loads(claim['value']))
meta_ver = metadata.version
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
except AssertionError:
metadata = claim['value']
meta_ver = "Non-compliant"
d = defer.succeed(None)
claim_id = claim['claimId']
result['claim_id'] = claim_id
result['amount'] = claim['nEffectiveAmount']
result['height'] = claim['nHeight']
result['name'] = name
result['txid'] = txid
result['value'] = metadata
result['supports'] = [{'txid': support['txid'], 'n': support['n']} for support in claim['supports']]
result['meta_version'] = meta_ver
log.info("get claim info lbry://%s metadata: %s, claimid: %s", name, meta_ver, claim_id)
d.addCallback(lambda _: self.get_name_claims())
d.addCallback(lambda r: [c['txid'] for c in r])
d.addCallback(lambda my_claims: _add_is_mine(result, my_claims))
return d
def _add_is_mine(response, my_txs):
response['is_mine'] = response['txid'] in my_txs
return response
d = self.get_claimid(name, txid)
d.addCallback(lambda claim_id: self.get_claim(name, claim_id))
d.addCallback(_build_response)
return d
def get_claims_for_name(self, name):
d = self._get_claims_for_name(name)
return d
def update_metadata(self, new_metadata, old_metadata):
meta_for_return = old_metadata if isinstance(old_metadata, dict) else {}
for k in new_metadata:
meta_for_return[k] = new_metadata[k]
return defer.succeed(Metadata(meta_for_return))
def claim_name(self, name, bid, m):
def _save_metadata(txid, metadata):
log.info("Saving metadata for claim %s" % txid)
d = self._save_name_metadata(name, txid, metadata['sources']['lbry_sd_hash'])
d.addCallback(lambda _: txid)
return d
def _claim_or_update(claim, metadata, _bid):
if not claim:
log.info("No claim yet, making a new one")
return self._send_name_claim(name, json.dumps(metadata), _bid)
if not claim['is_mine']:
log.info("Making a contesting claim")
return self._send_name_claim(name, json.dump(metadata), _bid)
else:
log.info("Updating over own claim")
d = self.update_metadata(metadata, claim['value'])
d.addCallback(lambda new_metadata: self._send_name_claim_update(name, claim['claim_id'], claim['txid'], new_metadata, _bid))
return d
meta = Metadata(m)
d = self.get_claim_info(name)
d.addCallback(lambda claim: _claim_or_update(claim, meta, bid))
d.addCallback(lambda txid: _save_metadata(txid, meta))
return d
def abandon_name(self, txid):
d1 = self.get_new_address()
d2 = self.get_claims_from_tx(txid)
def get_txout_of_claim(claims):
for claim in claims:
if 'name' in claim and 'nOut' in claim:
return claim['nOut']
return defer.fail(ValueError("No claims in tx"))
def get_value_of_txout(nOut):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
d.addCallback(lambda tx: tx['vout'][nOut]['value'])
return d
d2.addCallback(get_txout_of_claim)
d2.addCallback(get_value_of_txout)
dl = defer.DeferredList([d1, d2], consumeErrors=True)
def abandon(results):
if results[0][0] and results[1][0]:
address = results[0][1]
amount = float(results[1][1])
return self._send_abandon(txid, address, amount)
elif results[0][0] is False:
return defer.fail(Failure(ValueError("Couldn't get a new address")))
else:
return results[1][1]
dl.addCallback(abandon)
return dl
def support_claim(self, name, claim_id, amount):
return self._support_claim(name, claim_id, amount)
def get_tx(self, txid):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
return d
def get_history(self):
d = self._get_history()
return d
def get_tx_json(self, txid):
def _decode(raw_tx):
tx = Transaction(raw_tx).deserialize()
decoded_tx = {}
for txkey in tx.keys():
if isinstance(tx[txkey], list):
decoded_tx[txkey] = []
for i in tx[txkey]:
tmp = {}
for k in i.keys():
if isinstance(i[k], Decimal):
tmp[k] = float(i[k] / 1e8)
else:
tmp[k] = i[k]
decoded_tx[txkey].append(tmp)
else:
decoded_tx[txkey] = tx[txkey]
return decoded_tx
d = self._get_raw_tx(txid)
d.addCallback(_decode)
return d
def get_name_and_validity_for_sd_hash(self, sd_hash):
d = self._get_claim_metadata_for_sd_hash(sd_hash)
d.addCallback(lambda name_txid: self._get_status_of_claim(name_txid[1], name_txid[0], sd_hash) if name_txid is not None else None)
return d
def get_available_balance(self):
return float(self.wallet_balance - self.total_reserved_points)
def is_first_run(self):
if self._first_run == self._FIRST_RUN_UNKNOWN:
d = self._check_first_run()
def set_first_run(is_first):
self._first_run = self._FIRST_RUN_YES if is_first else self._FIRST_RUN_NO
d.addCallback(set_first_run)
else:
d = defer.succeed(self._FIRST_RUN_YES if self._first_run else self._FIRST_RUN_NO)
d.addCallback(lambda _: self._first_run == self._FIRST_RUN_YES)
return d
def _get_status_of_claim(self, txid, name, sd_hash):
d = self.get_claims_from_tx(txid)
def get_status(claims):
if claims is None:
claims = []
for claim in claims:
if 'in claim trie' in claim:
if 'name' in claim and str(claim['name']) == name and 'value' in claim:
try:
value_dict = json.loads(claim['value'])
except (ValueError, TypeError):
return None
claim_sd_hash = None
if 'stream_hash' in value_dict:
claim_sd_hash = str(value_dict['stream_hash'])
if 'sources' in value_dict and 'lbrynet_sd_hash' in value_dict['sources']:
claim_sd_hash = str(value_dict['sources']['lbry_sd_hash'])
if claim_sd_hash is not None and claim_sd_hash == sd_hash:
if 'is controlling' in claim and claim['is controlling']:
return name, "valid"
if claim['in claim trie']:
return name, "invalid"
if 'in queue' in claim and claim['in queue']:
return name, "pending"
return name, "unconfirmed"
return None
d.addCallback(get_status)
return d
def _check_expected_balances(self):
now = datetime.datetime.now()
balances_to_check = []
try:
while self.expected_balance_at_time[0][3] < now:
balances_to_check.append(self.expected_balance_at_time.popleft())
except IndexError:
pass
ds = []
for balance_to_check in balances_to_check:
log.info("Checking balance of address %s", str(balance_to_check[1]))
d = self._get_balance_for_address(balance_to_check[1])
d.addCallback(lambda bal: bal >= balance_to_check[2])
ds.append(d)
dl = defer.DeferredList(ds)
def handle_checks(results):
from future_builtins import zip
for balance, (success, result) in zip(balances_to_check, results):
peer = balance[0]
if success is True:
if result is False:
if balance[4] <= 1: # first or second strike, give them another chance
new_expected_balance = (balance[0],
balance[1],
balance[2],
datetime.datetime.now() + self.max_expected_payment_time,
balance[4] + 1,
balance[5])
self.expected_balance_at_time.append(new_expected_balance)
peer.update_score(-5.0)
else:
peer.update_score(-50.0)
else:
if balance[4] == 0:
peer.update_score(balance[5])
peer.update_stats('points_received', balance[5])
else:
log.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
"expected balance: %s, expected time: %s, count: %s, error: %s",
str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
str(balance[4]), str(result.getErrorMessage()))
dl.addCallback(handle_checks)
return dl
def _open_db(self):
self.db = adbapi.ConnectionPool('sqlite3', os.path.join(self.db_dir, "blockchainname.db"),
check_same_thread=False)
def create_tables(transaction):
transaction.execute("create table if not exists name_metadata (" +
" name text, " +
" txid text, " +
" sd_hash text)")
transaction.execute("create table if not exists claim_ids (" +
" claimId text, " +
" name text, " +
" txid text)")
return self.db.runInteraction(create_tables)
def _clean_bad_records(self):
d = self.db.runQuery("delete from name_metadata where length(txid) > 64 or txid is null")
return d
def _save_name_metadata(self, name, txid, sd_hash):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("delete from name_metadata where name=? and txid=? and sd_hash=?", (name, txid, sd_hash))
d.addCallback(lambda _: self.db.runQuery("insert into name_metadata values (?, ?, ?)", (name, txid, sd_hash)))
return d
def _get_claim_metadata_for_sd_hash(self, sd_hash):
d = self.db.runQuery("select name, txid from name_metadata where sd_hash=?", (sd_hash,))
d.addCallback(lambda r: r[0] if r else None)
return d
def _update_claimid(self, claim_id, name, txid):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("delete from claim_ids where claimId=? and name=? and txid=?", (claim_id, name, txid))
d.addCallback(lambda r: self.db.runQuery("insert into claim_ids values (?, ?, ?)", (claim_id, name, txid)))
d.addCallback(lambda _: claim_id)
return d
def _get_claimid_for_tx(self, name, txid):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("select claimId from claim_ids where name=? and txid=?", (name, txid))
d.addCallback(lambda r: r[0][0] if r else None)
return d
######### Must be overridden #########
def get_balance(self):
return defer.fail(NotImplementedError())
def get_new_address(self):
return defer.fail(NotImplementedError())
def get_block(self, blockhash):
return defer.fail(NotImplementedError())
def get_most_recent_blocktime(self):
return defer.fail(NotImplementedError())
def get_best_blockhash(self):
return defer.fail(NotImplementedError())
def get_name_claims(self):
return defer.fail(NotImplementedError())
def _get_claims_for_name(self, name):
return defer.fail(NotImplementedError())
def _check_first_run(self):
return defer.fail(NotImplementedError())
def _get_raw_tx(self, txid):
return defer.fail(NotImplementedError())
def _send_name_claim(self, name, val, amount):
return defer.fail(NotImplementedError())
def _get_decoded_tx(self, raw_tx):
return defer.fail(NotImplementedError())
def _send_abandon(self, txid, address, amount):
return defer.fail(NotImplementedError())
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
return defer.fail(NotImplementedError())
def _support_claim(self, name, claim_id, amount):
return defer.fail(NotImplementedError())
def _do_send_many(self, payments_to_send):
return defer.fail(NotImplementedError())
def _get_value_for_name(self, name):
return defer.fail(NotImplementedError())
def get_claims_from_tx(self, txid):
return defer.fail(NotImplementedError())
def _get_balance_for_address(self, address):
return defer.fail(NotImplementedError())
def _get_history(self):
return defer.fail(NotImplementedError())
def _start(self):
pass
def _stop(self):
pass
class LBRYcrdWallet(LBRYWallet):
def __init__(self, db_dir, wallet_dir=None, wallet_conf=None, lbrycrdd_path=None):
LBRYWallet.__init__(self, db_dir)
self.started_lbrycrdd = False
self.wallet_dir = wallet_dir
self.wallet_conf = wallet_conf
self.lbrycrdd = None
self.lbrycrdd_path = lbrycrdd_path
settings = self._get_rpc_conf()
rpc_user = settings["username"]
rpc_pass = settings["password"]
rpc_port = settings["rpc_port"]
rpc_url = "127.0.0.1"
self.rpc_conn_string = "http://%s:%s@%s:%s" % (rpc_user, rpc_pass, rpc_url, str(rpc_port))
def _start(self):
return threads.deferToThread(self._make_connection)
def _stop(self):
if self.lbrycrdd_path is not None:
return self._stop_daemon()
def _make_connection(self):
alert.info("Connecting to lbrycrdd...")
if self.lbrycrdd_path is not None:
self._start_daemon()
self._get_info_rpc()
log.info("Connected!")
alert.info("Connected to lbrycrdd.")
def _get_rpc_conf(self):
settings = {"username": "rpcuser",
"password": "rpcpassword",
"rpc_port": 9245}
if self.wallet_conf and os.path.exists(self.wallet_conf):
conf = open(self.wallet_conf)
for l in conf:
if l.startswith("rpcuser="):
settings["username"] = l[8:].rstrip('\n')
if l.startswith("rpcpassword="):
settings["password"] = l[12:].rstrip('\n')
if l.startswith("rpcport="):
settings["rpc_port"] = int(l[8:].rstrip('\n'))
return settings
def _check_first_run(self):
d = self.get_balance()
d.addCallback(lambda bal: threads.deferToThread(self._get_num_addresses_rpc) if bal == 0 else 2)
d.addCallback(lambda num_addresses: True if num_addresses <= 1 else False)
return d
def get_new_address(self):
return threads.deferToThread(self._get_new_address_rpc)
def get_balance(self):
return threads.deferToThread(self._get_wallet_balance_rpc)
def get_most_recent_blocktime(self):
d = threads.deferToThread(self._get_best_blockhash_rpc)
d.addCallback(lambda blockhash: threads.deferToThread(self._get_block_rpc, blockhash))
d.addCallback(
lambda block: block['time'] if 'time' in block else Failure(ValueError("Could not get a block time")))
return d
def get_name_claims(self):
return threads.deferToThread(self._get_name_claims_rpc)
def get_block(self, blockhash):
return threads.deferToThread(self._get_block_rpc, blockhash)
def get_best_blockhash(self):
d = threads.deferToThread(self._get_blockchain_info_rpc)
d.addCallback(lambda blockchain_info: blockchain_info['bestblockhash'])
return d
def get_nametrie(self):
return threads.deferToThread(self._get_nametrie_rpc)
def start_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, True) if not status
else "Miner was already running")
return d
def stop_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, False) if status
else "Miner wasn't running")
return d
def get_miner_status(self):
return threads.deferToThread(self._get_gen_status_rpc)
def _get_balance_for_address(self, address):
return threads.deferToThread(self._get_balance_for_address_rpc, address)
def _do_send_many(self, payments_to_send):
outputs = {address: float(points) for address, points in payments_to_send.iteritems()}
return threads.deferToThread(self._do_send_many_rpc, outputs)
def _send_name_claim(self, name, value, amount):
return threads.deferToThread(self._send_name_claim_rpc, name, value, amount)
def _get_raw_tx(self, txid):
return threads.deferToThread(self._get_raw_tx_rpc, txid)
def _get_decoded_tx(self, raw_tx):
return threads.deferToThread(self._get_decoded_tx_rpc, raw_tx)
def _send_abandon(self, txid, address, amount):
return threads.deferToThread(self._send_abandon_rpc, txid, address, amount)
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
return threads.deferToThread(self._update_name_rpc, txid, value, amount)
def _support_claim(self, name, claim_id, amount):
return threads.deferToThread(self._support_claim_rpc, name, claim_id, amount)
def _get_claims_for_name(self, name):
return threads.deferToThread(self._get_claims_for_name_rpc, name)
def get_claims_from_tx(self, txid):
return threads.deferToThread(self._get_claims_from_tx_rpc, txid)
def _get_value_for_name(self, name):
return threads.deferToThread(self._get_value_for_name_rpc, name)
def _get_history(self):
return threads.deferToThread(self._list_transactions_rpc)
def _get_rpc_conn(self):
return AuthServiceProxy(self.rpc_conn_string)
def _start_daemon(self):
tries = 0
try:
rpc_conn = self._get_rpc_conn()
try:
rpc_conn.getinfo()
except ValueError:
log.exception('Failed to get rpc info. Rethrowing with a hopefully more useful error message')
raise Exception('Failed to get rpc info from lbrycrdd. Try restarting lbrycrdd')
log.info("lbrycrdd was already running when LBRYcrdWallet was started.")
return
except (socket.error, JSONRPCException):
tries += 1
log.info("lbrcyrdd was not running when LBRYcrdWallet was started. Attempting to start it.")
try:
if os.name == "nt":
si = subprocess.STARTUPINFO
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf], startupinfo=si)
else:
if sys.platform == 'darwin':
os.chdir("/Applications/LBRY.app/Contents/Resources")
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf])
self.started_lbrycrdd = True
except OSError:
import traceback
log.error("Couldn't launch lbrycrdd at path %s: %s", self.lbrycrdd_path, traceback.format_exc())
raise ValueError("Couldn't launch lbrycrdd. Tried %s" % self.lbrycrdd_path)
while tries < 6:
try:
rpc_conn = self._get_rpc_conn()
rpc_conn.getinfo()
break
except (socket.error, JSONRPCException):
tries += 1
log.warning("Failed to connect to lbrycrdd.")
if tries < 6:
time.sleep(2 ** tries)
log.warning("Trying again in %d seconds", 2 ** tries)
else:
log.warning("Giving up.")
else:
self.lbrycrdd.terminate()
raise ValueError("Couldn't open lbrycrdd")
def _stop_daemon(self):
if self.lbrycrdd is not None and self.started_lbrycrdd is True:
alert.info("Stopping lbrycrdd...")
d = threads.deferToThread(self._stop_rpc)
d.addCallback(lambda _: alert.info("Stopped lbrycrdd."))
return d
return defer.succeed(True)
@_catch_connection_error
def _get_balance_for_address_rpc(self, address):
rpc_conn = self._get_rpc_conn()
balance = rpc_conn.getreceivedbyaddress(address)
log.debug("received balance for %s: %s", str(address), str(balance))
return balance
@_catch_connection_error
def _do_send_many_rpc(self, payments):
rpc_conn = self._get_rpc_conn()
return rpc_conn.sendmany("", payments)
@_catch_connection_error
def _get_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getinfo()
@_catch_connection_error
def _get_name_claims_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.listnameclaims()
@_catch_connection_error
def _get_gen_status_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getgenerate()
@_catch_connection_error
def _set_gen_status_rpc(self, b):
if b:
log.info("Starting miner")
else:
log.info("Stopping miner")
rpc_conn = self._get_rpc_conn()
return rpc_conn.setgenerate(b)
@_catch_connection_error
def _get_raw_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getrawtransaction(txid)
@_catch_connection_error
def _get_decoded_tx_rpc(self, raw):
rpc_conn = self._get_rpc_conn()
return rpc_conn.decoderawtransaction(raw)
@_catch_connection_error
def _send_abandon_rpc(self, txid, address, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.abandonclaim(txid, address, amount)
@_catch_connection_error
def _get_blockchain_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblockchaininfo()
@_catch_connection_error
def _get_block_rpc(self, blockhash):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblock(blockhash)
@_catch_connection_error
def _get_claims_from_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsfortx(txid)
@_catch_connection_error
def _get_claims_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsforname(name)
@_catch_connection_error
def _get_nametrie_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimtrie()
@_catch_connection_error
def _get_wallet_balance_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbalance("")
@_catch_connection_error
def _get_new_address_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getnewaddress()
@_catch_connection_error
def _get_value_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getvalueforname(name)
@_catch_connection_error
def _update_name_rpc(self, txid, value, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.updateclaim(txid, json.dumps(value), amount)
@_catch_connection_error
def _send_name_claim_rpc(self, name, value, amount):
rpc_conn = self._get_rpc_conn()
try:
return str(rpc_conn.claimname(name, value, amount))
except JSONRPCException as e:
if 'message' in e.error and e.error['message'] == "Insufficient funds":
raise InsufficientFundsError()
elif 'message' in e.error:
raise ValueError(e.error['message'])
@_catch_connection_error
def _support_claim_rpc(self, name, claim_id, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.supportclaim(name, claim_id, amount)
@_catch_connection_error
def _get_num_addresses_rpc(self):
rpc_conn = self._get_rpc_conn()
return len(rpc_conn.getaddressesbyaccount(""))
@_catch_connection_error
def _get_best_blockhash_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbestblockhash()
@_catch_connection_error
def _list_transactions_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.listtransactions()
@_catch_connection_error
def _stop_rpc(self):
# check if our lbrycrdd is actually running, or if we connected to one that was already
# running and ours failed to start
if self.lbrycrdd.poll() is None:
rpc_conn = self._get_rpc_conn()
rpc_conn.stop()
self.lbrycrdd.wait()
class LBRYumWallet(LBRYWallet):
def __init__(self, db_dir):
LBRYWallet.__init__(self, db_dir)
self.config = None
self.network = None
self.wallet = None
self.cmd_runner = None
self.first_run = False
self.printed_retrieving_headers = False
self._start_check = None
self._catch_up_check = None
self._caught_up_counter = 0
self._lag_counter = 0
self.blocks_behind_alert = 0
self.catchup_progress = 0
self.max_behind = 0
def _start(self):
network_start_d = defer.Deferred()
def setup_network():
self.config = SimpleConfig({'auto_connect': True})
self.network = Network(self.config)
alert.info("Loading the wallet...")
return defer.succeed(self.network.start())
d = setup_network()
def check_started():
if self.network.is_connecting():
if not self.printed_retrieving_headers and self.network.blockchain.retrieving_headers:
alert.info("Running the wallet for the first time...this may take a moment.")
self.printed_retrieving_headers = True
return False
self._start_check.stop()
self._start_check = None
if self.network.is_connected():
network_start_d.callback(True)
else:
network_start_d.errback(ValueError("Failed to connect to network."))
self._start_check = task.LoopingCall(check_started)
d.addCallback(lambda _: self._start_check.start(.1))
d.addCallback(lambda _: network_start_d)
d.addCallback(lambda _: self._load_wallet())
d.addCallback(lambda _: self._get_cmd_runner())
return d
def _stop(self):
if self._start_check is not None:
self._start_check.stop()
self._start_check = None
if self._catch_up_check is not None:
self._catch_up_check.stop()
self._catch_up_check = None
d = defer.Deferred()
def check_stopped():
if self.network:
if self.network.is_connected():
return False
stop_check.stop()
self.network = None
d.callback(True)
if self.network:
self.network.stop()
stop_check = task.LoopingCall(check_stopped)
stop_check.start(.1)
return d
def _load_wallet(self):
def get_wallet():
path = self.config.get_wallet_path()
storage = WalletStorage(path)
wallet = Wallet(storage)
if not storage.file_exists:
self.first_run = True
seed = wallet.make_seed()
wallet.add_seed(seed, None)
wallet.create_master_keys(None)
wallet.create_main_account()
wallet.synchronize()
self.wallet = wallet
blockchain_caught_d = defer.Deferred()
def check_caught_up():
local_height = self.network.get_catchup_progress()
remote_height = self.network.get_server_height()
if remote_height != 0 and remote_height - local_height <= 5:
msg = ""
if self._caught_up_counter != 0:
msg += "All caught up. "
msg += "Wallet loaded."
alert.info(msg)
self._catch_up_check.stop()
self._catch_up_check = None
blockchain_caught_d.callback(True)
elif remote_height != 0:
past_blocks_behind = self.blocks_behind_alert
self.blocks_behind_alert = remote_height - local_height
if self.blocks_behind_alert < past_blocks_behind:
self._lag_counter = 0
self.is_lagging = False
else:
self._lag_counter += 1
if self._lag_counter >= 900:
self.is_lagging = True
if self.blocks_behind_alert > self.max_behind:
self.max_behind = self.blocks_behind_alert
self.catchup_progress = int(100 * (self.blocks_behind_alert / (5 + self.max_behind)))
if self._caught_up_counter == 0:
alert.info('Catching up with the blockchain...showing blocks left...')
if self._caught_up_counter % 30 == 0:
alert.info('%d...', (remote_height - local_height))
self._caught_up_counter += 1
self._catch_up_check = task.LoopingCall(check_caught_up)
d = threads.deferToThread(get_wallet)
d.addCallback(self._save_wallet)
d.addCallback(lambda _: self.wallet.start_threads(self.network))
d.addCallback(lambda _: self._catch_up_check.start(.1))
d.addCallback(lambda _: blockchain_caught_d)
return d
def _get_cmd_runner(self):
self.cmd_runner = Commands(self.config, self.wallet, self.network)
def get_balance(self):
cmd = known_commands['getbalance']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func)
d.addCallback(lambda result: result['unmatured'] if 'unmatured' in result else result['confirmed'])
d.addCallback(Decimal)
return d
def get_new_address(self):
d = threads.deferToThread(self.wallet.create_new_address)
d.addCallback(self._save_wallet)
return d
def get_block(self, blockhash):
cmd = known_commands['getblock']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, blockhash)
def get_most_recent_blocktime(self):
header = self.network.get_header(self.network.get_local_height())
return defer.succeed(header['timestamp'])
def get_best_blockhash(self):
height = self.network.get_local_height()
d = threads.deferToThread(self.network.blockchain.read_header, height)
d.addCallback(lambda header: self.network.blockchain.hash_header(header))
return d
def get_name_claims(self):
cmd = known_commands['getnameclaims']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _check_first_run(self):
return defer.succeed(self.first_run)
def _get_raw_tx(self, txid):
cmd = known_commands['gettransaction']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _send_name_claim(self, name, val, amount):
def send_claim(address):
cmd = known_commands['claimname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, address, amount, name, val)
d = self.get_new_address()
d.addCallback(send_claim)
d.addCallback(self._broadcast_transaction)
return d
def _get_claims_for_name(self, name):
cmd = known_commands['getclaimsforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
def send_claim_update(address):
decoded_claim_id = claim_id.decode('hex')[::-1]
metadata = json.dumps(Metadata(value))
log.info("updateclaim %s %s %f %s %s '%s'", txid, address, amount, name, decoded_claim_id.encode('hex'), json.dumps(metadata))
cmd = known_commands['updateclaim']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid, address, amount, name, decoded_claim_id, metadata)
d = self.get_new_address()
d.addCallback(send_claim_update)
d.addCallback(self._broadcast_transaction)
return d
def _get_decoded_tx(self, raw_tx):
tx = Transaction(raw_tx)
decoded_tx = {}
decoded_tx['vout'] = []
for output in tx.outputs():
out = {}
out['value'] = Decimal(output[2]) / Decimal(COIN)
decoded_tx['vout'].append(out)
return decoded_tx
def _send_abandon(self, txid, address, amount):
log.info("Abandon %s %s %f" % (txid, address, amount))
cmd = known_commands['abandonclaim']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, txid, address, amount)
d.addCallback(self._broadcast_transaction)
return d
def _support_claim(self, name, claim_id, amount):
def _send_support(d, a, n, c):
cmd = known_commands['supportclaim']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, d, a, n, c)
return d
d = self.get_new_address()
d.addCallback(lambda address: _send_support(address, amount, name, claim_id))
d.addCallback(self._broadcast_transaction)
return d
def _broadcast_transaction(self, raw_tx):
def _log_tx(r):
log.info("Broadcast tx: %s", r)
return r
cmd = known_commands['broadcast']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, raw_tx)
d.addCallback(_log_tx)
d.addCallback(lambda r: r if len(r) == 64 else defer.fail(Exception("Transaction rejected")))
d.addCallback(self._save_wallet)
return d
def _do_send_many(self, payments_to_send):
log.warning("Doing send many. payments to send: %s", str(payments_to_send))
cmd = known_commands['paytomanyandsend']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, payments_to_send.iteritems())
def _get_value_for_name(self, name):
cmd = known_commands['getvalueforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def get_claims_from_tx(self, txid):
cmd = known_commands['getclaimsfromtx']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _get_balance_for_address(self, address):
return defer.succeed(Decimal(self.wallet.get_addr_received(address))/COIN)
def get_nametrie(self):
cmd = known_commands['getclaimtrie']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _get_history(self):
cmd = known_commands['history']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def get_pub_keys(self, wallet):
cmd = known_commands['getpubkeys']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, wallet)
def _save_wallet(self, val):
d = threads.deferToThread(self.wallet.storage.write)
d.addCallback(lambda _: val)
return d
class LBRYcrdAddressRequester(object):
implements([IRequestCreator])
def __init__(self, wallet):
self.wallet = wallet
self._protocols = []
######### IRequestCreator #########
def send_next_request(self, peer, protocol):
if not protocol in self._protocols:
r = ClientRequest({'lbrycrd_address': True}, 'lbrycrd_address')
d = protocol.add_request(r)
d.addCallback(self._handle_address_response, peer, r, protocol)
d.addErrback(self._request_failed, peer)
self._protocols.append(protocol)
return defer.succeed(True)
else:
return defer.succeed(False)
######### internal calls #########
def _handle_address_response(self, response_dict, peer, request, protocol):
assert request.response_identifier in response_dict, \
"Expected %s in dict but did not get it" % request.response_identifier
assert protocol in self._protocols, "Responding protocol is not in our list of protocols"
address = response_dict[request.response_identifier]
self.wallet.update_peer_address(peer, address)
def _request_failed(self, err, peer):
if not err.check(RequestCanceledError):
log.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
err.getErrorMessage(), str(peer))
return err
class LBRYcrdAddressQueryHandlerFactory(object):
implements(IQueryHandlerFactory)
def __init__(self, wallet):
self.wallet = wallet
######### IQueryHandlerFactory #########
def build_query_handler(self):
q_h = LBRYcrdAddressQueryHandler(self.wallet)
return q_h
def get_primary_query_identifier(self):
return 'lbrycrd_address'
def get_description(self):
return "LBRYcrd Address - an address for receiving payments via LBRYcrd"
class LBRYcrdAddressQueryHandler(object):
implements(IQueryHandler)
def __init__(self, wallet):
self.wallet = wallet
self.query_identifiers = ['lbrycrd_address']
self.address = None
self.peer = None
######### IQueryHandler #########
def register_with_request_handler(self, request_handler, peer):
self.peer = peer
request_handler.register_query_handler(self, self.query_identifiers)
def handle_queries(self, queries):
def create_response(address):
self.address = address
fields = {'lbrycrd_address': address}
return fields
if self.query_identifiers[0] in queries:
d = self.wallet.get_new_address_for_peer(self.peer)
d.addCallback(create_response)
return d
if self.address is None:
log.warning("Expected a request for an address, but did not receive one")
return defer.fail(Failure(ValueError("Expected but did not receive an address request")))
else:
return defer.succeed({})
fix json bug
import sys
import datetime
import logging
import json
import subprocess
import socket
import time
import os
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from twisted.internet import threads, reactor, defer, task
from twisted.python.failure import Failure
from twisted.enterprise import adbapi
from collections import defaultdict, deque
from zope.interface import implements
from decimal import Decimal
from lbryum import SimpleConfig, Network
from lbryum.lbrycrd import COIN
from lbryum.wallet import WalletStorage, Wallet
from lbryum.commands import known_commands, Commands
from lbryum.transaction import Transaction
from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, ILBRYWallet
from lbrynet.core.client.ClientRequest import ClientRequest
from lbrynet.core.Error import UnknownNameError, InvalidStreamInfoError, RequestCanceledError
from lbrynet.core.Error import InsufficientFundsError
from lbrynet.metadata.LBRYMetadata import Metadata
log = logging.getLogger(__name__)
alert = logging.getLogger("lbryalert." + __name__)
class ReservedPoints(object):
def __init__(self, identifier, amount):
self.identifier = identifier
self.amount = amount
def _catch_connection_error(f):
def w(*args):
try:
return f(*args)
except socket.error:
raise ValueError("Unable to connect to an lbrycrd server. Make sure an lbrycrd server " +
"is running and that this application can connect to it.")
return w
class LBRYWallet(object):
"""This class implements the LBRYWallet interface for the LBRYcrd payment system"""
implements(ILBRYWallet)
_FIRST_RUN_UNKNOWN = 0
_FIRST_RUN_YES = 1
_FIRST_RUN_NO = 2
def __init__(self, db_dir):
self.db_dir = db_dir
self.db = None
self.next_manage_call = None
self.wallet_balance = Decimal(0.0)
self.total_reserved_points = Decimal(0.0)
self.peer_addresses = {} # {Peer: string}
self.queued_payments = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.expected_balances = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.current_address_given_to_peer = {} # {Peer: address(string)}
self.expected_balance_at_time = deque() # (Peer, address(string), amount(Decimal), time(datetime), count(int),
# incremental_amount(float))
self.max_expected_payment_time = datetime.timedelta(minutes=3)
self.stopped = True
self.is_lagging = None
self.manage_running = False
self._manage_count = 0
self._balance_refresh_time = 3
self._batch_count = 20
self._first_run = self._FIRST_RUN_UNKNOWN
def start(self):
def start_manage():
self.stopped = False
self.manage()
return True
d = self._open_db()
d.addCallback(lambda _: self._clean_bad_records())
d.addCallback(lambda _: self._start())
d.addCallback(lambda _: start_manage())
return d
@staticmethod
def log_stop_error(err):
log.error("An error occurred stopping the wallet: %s", err.getTraceback())
def stop(self):
self.stopped = True
# If self.next_manage_call is None, then manage is currently running or else
# start has not been called, so set stopped and do nothing else.
if self.next_manage_call is not None:
self.next_manage_call.cancel()
self.next_manage_call = None
d = self.manage(do_full=True)
d.addErrback(self.log_stop_error)
d.addCallback(lambda _: self._stop())
d.addErrback(self.log_stop_error)
return d
def manage(self, do_full=False):
self.next_manage_call = None
have_set_manage_running = [False]
self._manage_count += 1
if self._manage_count % self._batch_count == 0:
self._manage_count = 0
do_full = True
def check_if_manage_running():
d = defer.Deferred()
def fire_if_not_running():
if self.manage_running is False:
self.manage_running = True
have_set_manage_running[0] = True
d.callback(True)
elif do_full is False:
d.callback(False)
else:
task.deferLater(reactor, 1, fire_if_not_running)
fire_if_not_running()
return d
d = check_if_manage_running()
def do_manage():
if do_full:
d = self._check_expected_balances()
d.addCallback(lambda _: self._send_payments())
else:
d = defer.succeed(True)
d.addCallback(lambda _: self.get_balance())
def set_wallet_balance(balance):
if self.wallet_balance != balance:
log.info("Got a new balance: %s", str(balance))
self.wallet_balance = balance
d.addCallback(set_wallet_balance)
return d
d.addCallback(lambda should_run: do_manage() if should_run else None)
def set_next_manage_call():
if not self.stopped:
self.next_manage_call = reactor.callLater(self._balance_refresh_time, self.manage)
d.addCallback(lambda _: set_next_manage_call())
def log_error(err):
log.error("Something went wrong during manage. Error message: %s", err.getErrorMessage())
return err
d.addErrback(log_error)
def set_manage_not_running(arg):
if have_set_manage_running[0] is True:
self.manage_running = False
return arg
d.addBoth(set_manage_not_running)
return d
def get_info_exchanger(self):
return LBRYcrdAddressRequester(self)
def get_wallet_info_query_handler_factory(self):
return LBRYcrdAddressQueryHandlerFactory(self)
def reserve_points(self, identifier, amount):
"""
Ensure a certain amount of points are available to be sent as payment, before the service is rendered
@param identifier: The peer to which the payment will ultimately be sent
@param amount: The amount of points to reserve
@return: A ReservedPoints object which is given to send_points once the service has been rendered
"""
rounded_amount = Decimal(str(round(amount, 8)))
#if peer in self.peer_addresses:
if self.wallet_balance >= self.total_reserved_points + rounded_amount:
self.total_reserved_points += rounded_amount
return ReservedPoints(identifier, rounded_amount)
return None
def cancel_point_reservation(self, reserved_points):
"""
Return all of the points that were reserved previously for some ReservedPoints object
@param reserved_points: ReservedPoints previously returned by reserve_points
@return: None
"""
self.total_reserved_points -= reserved_points.amount
def send_points(self, reserved_points, amount):
"""
Schedule a payment to be sent to a peer
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send, must be less than or equal to the
amount reserved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
peer = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
assert(peer in self.peer_addresses)
self.queued_payments[self.peer_addresses[peer]] += rounded_amount
# make any unused points available
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("ordering that %s points be sent to %s", str(rounded_amount),
str(self.peer_addresses[peer]))
peer.update_stats('points_sent', amount)
return defer.succeed(True)
def send_points_to_address(self, reserved_points, amount):
"""
Schedule a payment to be sent to an address
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send. must be less than or equal to the
amount reselved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
address = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
self.queued_payments[address] += rounded_amount
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("Ordering that %s points be sent to %s", str(rounded_amount),
str(address))
return defer.succeed(True)
def add_expected_payment(self, peer, amount):
"""Increase the number of points expected to be paid by a peer"""
rounded_amount = Decimal(str(round(amount, 8)))
assert(peer in self.current_address_given_to_peer)
address = self.current_address_given_to_peer[peer]
log.info("expecting a payment at address %s in the amount of %s", str(address), str(rounded_amount))
self.expected_balances[address] += rounded_amount
expected_balance = self.expected_balances[address]
expected_time = datetime.datetime.now() + self.max_expected_payment_time
self.expected_balance_at_time.append((peer, address, expected_balance, expected_time, 0, amount))
peer.update_stats('expected_points', amount)
def update_peer_address(self, peer, address):
self.peer_addresses[peer] = address
def get_new_address_for_peer(self, peer):
def set_address_for_peer(address):
self.current_address_given_to_peer[peer] = address
return address
d = self.get_new_address()
d.addCallback(set_address_for_peer)
return d
def _send_payments(self):
payments_to_send = {}
for address, points in self.queued_payments.items():
log.info("Should be sending %s points to %s", str(points), str(address))
payments_to_send[address] = points
self.total_reserved_points -= points
self.wallet_balance -= points
del self.queued_payments[address]
if payments_to_send:
log.info("Creating a transaction with outputs %s", str(payments_to_send))
d = self._do_send_many(payments_to_send)
d.addCallback(lambda txid: log.debug("Sent transaction %s", txid))
return d
log.debug("There were no payments to send")
return defer.succeed(True)
def get_stream_info_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def get_txid_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(lambda r: None if 'txid' not in r else r['txid'])
return d
def get_stream_info_from_txid(self, name, txid):
d = self.get_claims_from_tx(txid)
def get_claim_for_name(claims):
for claim in claims:
if claim['name'] == name:
claim['txid'] = txid
return claim
return Failure(UnknownNameError(name))
d.addCallback(get_claim_for_name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def _get_stream_info_from_value(self, result, name):
def _check_result_fields(r):
for k in ['value', 'txid', 'n', 'height', 'amount']:
assert k in r, "getvalueforname response missing field %s" % k
def _log_success(claim_id):
log.info("lbry://%s complies with %s, claimid: %s", name, metadata.version, claim_id)
return defer.succeed(None)
if 'error' in result:
log.warning("Got an error looking up a name: %s", result['error'])
return Failure(UnknownNameError(name))
_check_result_fields(result)
try:
metadata = Metadata(json.loads(result['value']))
except (ValueError, TypeError):
return Failure(InvalidStreamInfoError(name))
txid = result['txid']
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
d.addCallback(lambda _: self.get_claimid(name, txid))
d.addCallback(lambda cid: _log_success(cid))
d.addCallback(lambda _: metadata)
return d
def get_claim(self, name, claim_id):
d = self.get_claims_for_name(name)
d.addCallback(lambda claims: next(claim for claim in claims['claims'] if claim['claimId'] == claim_id))
return d
def get_claimid(self, name, txid):
def _get_id_for_return(claim_id):
if claim_id:
return defer.succeed(claim_id)
else:
d = self.get_claims_from_tx(txid)
d.addCallback(lambda claims: next(c['claimId'] for c in claims if c['name'] == name))
d.addCallback(lambda cid: self._update_claimid(cid, name, txid))
return d
d = self._get_claimid_for_tx(name, txid)
d.addCallback(_get_id_for_return)
return d
def get_claim_info(self, name, txid=None):
if not txid:
d = self._get_value_for_name(name)
d.addCallback(lambda r: self._get_claim_info(name, r['txid']))
else:
d = self._get_claim_info(name, txid)
d.addErrback(lambda _: False)
return d
def _get_claim_info(self, name, txid):
def _build_response(claim):
result = {}
try:
metadata = Metadata(json.loads(claim['value']))
meta_ver = metadata.version
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
except AssertionError:
metadata = claim['value']
meta_ver = "Non-compliant"
d = defer.succeed(None)
claim_id = claim['claimId']
result['claim_id'] = claim_id
result['amount'] = claim['nEffectiveAmount']
result['height'] = claim['nHeight']
result['name'] = name
result['txid'] = txid
result['value'] = metadata
result['supports'] = [{'txid': support['txid'], 'n': support['n']} for support in claim['supports']]
result['meta_version'] = meta_ver
log.info("get claim info lbry://%s metadata: %s, claimid: %s", name, meta_ver, claim_id)
d.addCallback(lambda _: self.get_name_claims())
d.addCallback(lambda r: [c['txid'] for c in r])
d.addCallback(lambda my_claims: _add_is_mine(result, my_claims))
return d
def _add_is_mine(response, my_txs):
response['is_mine'] = response['txid'] in my_txs
return response
d = self.get_claimid(name, txid)
d.addCallback(lambda claim_id: self.get_claim(name, claim_id))
d.addCallback(_build_response)
return d
def get_claims_for_name(self, name):
d = self._get_claims_for_name(name)
return d
def update_metadata(self, new_metadata, old_metadata):
meta_for_return = old_metadata if isinstance(old_metadata, dict) else {}
for k in new_metadata:
meta_for_return[k] = new_metadata[k]
return defer.succeed(Metadata(meta_for_return))
def claim_name(self, name, bid, m):
def _save_metadata(txid, metadata):
log.info("Saving metadata for claim %s" % txid)
d = self._save_name_metadata(name, txid, metadata['sources']['lbry_sd_hash'])
d.addCallback(lambda _: txid)
return d
def _claim_or_update(claim, metadata, _bid):
if not claim:
log.info("No claim yet, making a new one")
return self._send_name_claim(name, metadata, _bid)
if not claim['is_mine']:
log.info("Making a contesting claim")
return self._send_name_claim(name, metadata, _bid)
else:
log.info("Updating over own claim")
d = self.update_metadata(metadata, claim['value'])
d.addCallback(lambda new_metadata: self._send_name_claim_update(name, claim['claim_id'], claim['txid'], new_metadata, _bid))
return d
meta = Metadata(m)
d = self.get_claim_info(name)
d.addCallback(lambda claim: _claim_or_update(claim, meta, bid))
d.addCallback(lambda txid: _save_metadata(txid, meta))
return d
def abandon_name(self, txid):
d1 = self.get_new_address()
d2 = self.get_claims_from_tx(txid)
def get_txout_of_claim(claims):
for claim in claims:
if 'name' in claim and 'nOut' in claim:
return claim['nOut']
return defer.fail(ValueError("No claims in tx"))
def get_value_of_txout(nOut):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
d.addCallback(lambda tx: tx['vout'][nOut]['value'])
return d
d2.addCallback(get_txout_of_claim)
d2.addCallback(get_value_of_txout)
dl = defer.DeferredList([d1, d2], consumeErrors=True)
def abandon(results):
if results[0][0] and results[1][0]:
address = results[0][1]
amount = float(results[1][1])
return self._send_abandon(txid, address, amount)
elif results[0][0] is False:
return defer.fail(Failure(ValueError("Couldn't get a new address")))
else:
return results[1][1]
dl.addCallback(abandon)
return dl
def support_claim(self, name, claim_id, amount):
return self._support_claim(name, claim_id, amount)
def get_tx(self, txid):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
return d
def get_history(self):
d = self._get_history()
return d
def get_tx_json(self, txid):
def _decode(raw_tx):
tx = Transaction(raw_tx).deserialize()
decoded_tx = {}
for txkey in tx.keys():
if isinstance(tx[txkey], list):
decoded_tx[txkey] = []
for i in tx[txkey]:
tmp = {}
for k in i.keys():
if isinstance(i[k], Decimal):
tmp[k] = float(i[k] / 1e8)
else:
tmp[k] = i[k]
decoded_tx[txkey].append(tmp)
else:
decoded_tx[txkey] = tx[txkey]
return decoded_tx
d = self._get_raw_tx(txid)
d.addCallback(_decode)
return d
def get_name_and_validity_for_sd_hash(self, sd_hash):
d = self._get_claim_metadata_for_sd_hash(sd_hash)
d.addCallback(lambda name_txid: self._get_status_of_claim(name_txid[1], name_txid[0], sd_hash) if name_txid is not None else None)
return d
def get_available_balance(self):
return float(self.wallet_balance - self.total_reserved_points)
def is_first_run(self):
if self._first_run == self._FIRST_RUN_UNKNOWN:
d = self._check_first_run()
def set_first_run(is_first):
self._first_run = self._FIRST_RUN_YES if is_first else self._FIRST_RUN_NO
d.addCallback(set_first_run)
else:
d = defer.succeed(self._FIRST_RUN_YES if self._first_run else self._FIRST_RUN_NO)
d.addCallback(lambda _: self._first_run == self._FIRST_RUN_YES)
return d
def _get_status_of_claim(self, txid, name, sd_hash):
d = self.get_claims_from_tx(txid)
def get_status(claims):
if claims is None:
claims = []
for claim in claims:
if 'in claim trie' in claim:
if 'name' in claim and str(claim['name']) == name and 'value' in claim:
try:
value_dict = json.loads(claim['value'])
except (ValueError, TypeError):
return None
claim_sd_hash = None
if 'stream_hash' in value_dict:
claim_sd_hash = str(value_dict['stream_hash'])
if 'sources' in value_dict and 'lbrynet_sd_hash' in value_dict['sources']:
claim_sd_hash = str(value_dict['sources']['lbry_sd_hash'])
if claim_sd_hash is not None and claim_sd_hash == sd_hash:
if 'is controlling' in claim and claim['is controlling']:
return name, "valid"
if claim['in claim trie']:
return name, "invalid"
if 'in queue' in claim and claim['in queue']:
return name, "pending"
return name, "unconfirmed"
return None
d.addCallback(get_status)
return d
def _check_expected_balances(self):
now = datetime.datetime.now()
balances_to_check = []
try:
while self.expected_balance_at_time[0][3] < now:
balances_to_check.append(self.expected_balance_at_time.popleft())
except IndexError:
pass
ds = []
for balance_to_check in balances_to_check:
log.info("Checking balance of address %s", str(balance_to_check[1]))
d = self._get_balance_for_address(balance_to_check[1])
d.addCallback(lambda bal: bal >= balance_to_check[2])
ds.append(d)
dl = defer.DeferredList(ds)
def handle_checks(results):
from future_builtins import zip
for balance, (success, result) in zip(balances_to_check, results):
peer = balance[0]
if success is True:
if result is False:
if balance[4] <= 1: # first or second strike, give them another chance
new_expected_balance = (balance[0],
balance[1],
balance[2],
datetime.datetime.now() + self.max_expected_payment_time,
balance[4] + 1,
balance[5])
self.expected_balance_at_time.append(new_expected_balance)
peer.update_score(-5.0)
else:
peer.update_score(-50.0)
else:
if balance[4] == 0:
peer.update_score(balance[5])
peer.update_stats('points_received', balance[5])
else:
log.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
"expected balance: %s, expected time: %s, count: %s, error: %s",
str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
str(balance[4]), str(result.getErrorMessage()))
dl.addCallback(handle_checks)
return dl
def _open_db(self):
self.db = adbapi.ConnectionPool('sqlite3', os.path.join(self.db_dir, "blockchainname.db"),
check_same_thread=False)
def create_tables(transaction):
transaction.execute("create table if not exists name_metadata (" +
" name text, " +
" txid text, " +
" sd_hash text)")
transaction.execute("create table if not exists claim_ids (" +
" claimId text, " +
" name text, " +
" txid text)")
return self.db.runInteraction(create_tables)
def _clean_bad_records(self):
d = self.db.runQuery("delete from name_metadata where length(txid) > 64 or txid is null")
return d
def _save_name_metadata(self, name, txid, sd_hash):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("delete from name_metadata where name=? and txid=? and sd_hash=?", (name, txid, sd_hash))
d.addCallback(lambda _: self.db.runQuery("insert into name_metadata values (?, ?, ?)", (name, txid, sd_hash)))
return d
def _get_claim_metadata_for_sd_hash(self, sd_hash):
d = self.db.runQuery("select name, txid from name_metadata where sd_hash=?", (sd_hash,))
d.addCallback(lambda r: r[0] if r else None)
return d
def _update_claimid(self, claim_id, name, txid):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("delete from claim_ids where claimId=? and name=? and txid=?", (claim_id, name, txid))
d.addCallback(lambda r: self.db.runQuery("insert into claim_ids values (?, ?, ?)", (claim_id, name, txid)))
d.addCallback(lambda _: claim_id)
return d
def _get_claimid_for_tx(self, name, txid):
assert len(txid) == 64, "That's not a txid: %s" % str(txid)
d = self.db.runQuery("select claimId from claim_ids where name=? and txid=?", (name, txid))
d.addCallback(lambda r: r[0][0] if r else None)
return d
######### Must be overridden #########
def get_balance(self):
return defer.fail(NotImplementedError())
def get_new_address(self):
return defer.fail(NotImplementedError())
def get_block(self, blockhash):
return defer.fail(NotImplementedError())
def get_most_recent_blocktime(self):
return defer.fail(NotImplementedError())
def get_best_blockhash(self):
return defer.fail(NotImplementedError())
def get_name_claims(self):
return defer.fail(NotImplementedError())
def _get_claims_for_name(self, name):
return defer.fail(NotImplementedError())
def _check_first_run(self):
return defer.fail(NotImplementedError())
def _get_raw_tx(self, txid):
return defer.fail(NotImplementedError())
def _send_name_claim(self, name, val, amount):
return defer.fail(NotImplementedError())
def _get_decoded_tx(self, raw_tx):
return defer.fail(NotImplementedError())
def _send_abandon(self, txid, address, amount):
return defer.fail(NotImplementedError())
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
return defer.fail(NotImplementedError())
def _support_claim(self, name, claim_id, amount):
return defer.fail(NotImplementedError())
def _do_send_many(self, payments_to_send):
return defer.fail(NotImplementedError())
def _get_value_for_name(self, name):
return defer.fail(NotImplementedError())
def get_claims_from_tx(self, txid):
return defer.fail(NotImplementedError())
def _get_balance_for_address(self, address):
return defer.fail(NotImplementedError())
def _get_history(self):
return defer.fail(NotImplementedError())
def _start(self):
pass
def _stop(self):
pass
class LBRYcrdWallet(LBRYWallet):
def __init__(self, db_dir, wallet_dir=None, wallet_conf=None, lbrycrdd_path=None):
LBRYWallet.__init__(self, db_dir)
self.started_lbrycrdd = False
self.wallet_dir = wallet_dir
self.wallet_conf = wallet_conf
self.lbrycrdd = None
self.lbrycrdd_path = lbrycrdd_path
settings = self._get_rpc_conf()
rpc_user = settings["username"]
rpc_pass = settings["password"]
rpc_port = settings["rpc_port"]
rpc_url = "127.0.0.1"
self.rpc_conn_string = "http://%s:%s@%s:%s" % (rpc_user, rpc_pass, rpc_url, str(rpc_port))
def _start(self):
return threads.deferToThread(self._make_connection)
def _stop(self):
if self.lbrycrdd_path is not None:
return self._stop_daemon()
def _make_connection(self):
alert.info("Connecting to lbrycrdd...")
if self.lbrycrdd_path is not None:
self._start_daemon()
self._get_info_rpc()
log.info("Connected!")
alert.info("Connected to lbrycrdd.")
def _get_rpc_conf(self):
settings = {"username": "rpcuser",
"password": "rpcpassword",
"rpc_port": 9245}
if self.wallet_conf and os.path.exists(self.wallet_conf):
conf = open(self.wallet_conf)
for l in conf:
if l.startswith("rpcuser="):
settings["username"] = l[8:].rstrip('\n')
if l.startswith("rpcpassword="):
settings["password"] = l[12:].rstrip('\n')
if l.startswith("rpcport="):
settings["rpc_port"] = int(l[8:].rstrip('\n'))
return settings
def _check_first_run(self):
d = self.get_balance()
d.addCallback(lambda bal: threads.deferToThread(self._get_num_addresses_rpc) if bal == 0 else 2)
d.addCallback(lambda num_addresses: True if num_addresses <= 1 else False)
return d
def get_new_address(self):
return threads.deferToThread(self._get_new_address_rpc)
def get_balance(self):
return threads.deferToThread(self._get_wallet_balance_rpc)
def get_most_recent_blocktime(self):
d = threads.deferToThread(self._get_best_blockhash_rpc)
d.addCallback(lambda blockhash: threads.deferToThread(self._get_block_rpc, blockhash))
d.addCallback(
lambda block: block['time'] if 'time' in block else Failure(ValueError("Could not get a block time")))
return d
def get_name_claims(self):
return threads.deferToThread(self._get_name_claims_rpc)
def get_block(self, blockhash):
return threads.deferToThread(self._get_block_rpc, blockhash)
def get_best_blockhash(self):
d = threads.deferToThread(self._get_blockchain_info_rpc)
d.addCallback(lambda blockchain_info: blockchain_info['bestblockhash'])
return d
def get_nametrie(self):
return threads.deferToThread(self._get_nametrie_rpc)
def start_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, True) if not status
else "Miner was already running")
return d
def stop_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, False) if status
else "Miner wasn't running")
return d
def get_miner_status(self):
return threads.deferToThread(self._get_gen_status_rpc)
def _get_balance_for_address(self, address):
return threads.deferToThread(self._get_balance_for_address_rpc, address)
def _do_send_many(self, payments_to_send):
outputs = {address: float(points) for address, points in payments_to_send.iteritems()}
return threads.deferToThread(self._do_send_many_rpc, outputs)
def _send_name_claim(self, name, value, amount):
return threads.deferToThread(self._send_name_claim_rpc, name, value, amount)
def _get_raw_tx(self, txid):
return threads.deferToThread(self._get_raw_tx_rpc, txid)
def _get_decoded_tx(self, raw_tx):
return threads.deferToThread(self._get_decoded_tx_rpc, raw_tx)
def _send_abandon(self, txid, address, amount):
return threads.deferToThread(self._send_abandon_rpc, txid, address, amount)
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
return threads.deferToThread(self._update_name_rpc, txid, value, amount)
def _support_claim(self, name, claim_id, amount):
return threads.deferToThread(self._support_claim_rpc, name, claim_id, amount)
def _get_claims_for_name(self, name):
return threads.deferToThread(self._get_claims_for_name_rpc, name)
def get_claims_from_tx(self, txid):
return threads.deferToThread(self._get_claims_from_tx_rpc, txid)
def _get_value_for_name(self, name):
return threads.deferToThread(self._get_value_for_name_rpc, name)
def _get_history(self):
return threads.deferToThread(self._list_transactions_rpc)
def _get_rpc_conn(self):
return AuthServiceProxy(self.rpc_conn_string)
def _start_daemon(self):
tries = 0
try:
rpc_conn = self._get_rpc_conn()
try:
rpc_conn.getinfo()
except ValueError:
log.exception('Failed to get rpc info. Rethrowing with a hopefully more useful error message')
raise Exception('Failed to get rpc info from lbrycrdd. Try restarting lbrycrdd')
log.info("lbrycrdd was already running when LBRYcrdWallet was started.")
return
except (socket.error, JSONRPCException):
tries += 1
log.info("lbrcyrdd was not running when LBRYcrdWallet was started. Attempting to start it.")
try:
if os.name == "nt":
si = subprocess.STARTUPINFO
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf], startupinfo=si)
else:
if sys.platform == 'darwin':
os.chdir("/Applications/LBRY.app/Contents/Resources")
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf])
self.started_lbrycrdd = True
except OSError:
import traceback
log.error("Couldn't launch lbrycrdd at path %s: %s", self.lbrycrdd_path, traceback.format_exc())
raise ValueError("Couldn't launch lbrycrdd. Tried %s" % self.lbrycrdd_path)
while tries < 6:
try:
rpc_conn = self._get_rpc_conn()
rpc_conn.getinfo()
break
except (socket.error, JSONRPCException):
tries += 1
log.warning("Failed to connect to lbrycrdd.")
if tries < 6:
time.sleep(2 ** tries)
log.warning("Trying again in %d seconds", 2 ** tries)
else:
log.warning("Giving up.")
else:
self.lbrycrdd.terminate()
raise ValueError("Couldn't open lbrycrdd")
def _stop_daemon(self):
if self.lbrycrdd is not None and self.started_lbrycrdd is True:
alert.info("Stopping lbrycrdd...")
d = threads.deferToThread(self._stop_rpc)
d.addCallback(lambda _: alert.info("Stopped lbrycrdd."))
return d
return defer.succeed(True)
@_catch_connection_error
def _get_balance_for_address_rpc(self, address):
rpc_conn = self._get_rpc_conn()
balance = rpc_conn.getreceivedbyaddress(address)
log.debug("received balance for %s: %s", str(address), str(balance))
return balance
@_catch_connection_error
def _do_send_many_rpc(self, payments):
rpc_conn = self._get_rpc_conn()
return rpc_conn.sendmany("", payments)
@_catch_connection_error
def _get_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getinfo()
@_catch_connection_error
def _get_name_claims_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.listnameclaims()
@_catch_connection_error
def _get_gen_status_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getgenerate()
@_catch_connection_error
def _set_gen_status_rpc(self, b):
if b:
log.info("Starting miner")
else:
log.info("Stopping miner")
rpc_conn = self._get_rpc_conn()
return rpc_conn.setgenerate(b)
@_catch_connection_error
def _get_raw_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getrawtransaction(txid)
@_catch_connection_error
def _get_decoded_tx_rpc(self, raw):
rpc_conn = self._get_rpc_conn()
return rpc_conn.decoderawtransaction(raw)
@_catch_connection_error
def _send_abandon_rpc(self, txid, address, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.abandonclaim(txid, address, amount)
@_catch_connection_error
def _get_blockchain_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblockchaininfo()
@_catch_connection_error
def _get_block_rpc(self, blockhash):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblock(blockhash)
@_catch_connection_error
def _get_claims_from_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsfortx(txid)
@_catch_connection_error
def _get_claims_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsforname(name)
@_catch_connection_error
def _get_nametrie_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimtrie()
@_catch_connection_error
def _get_wallet_balance_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbalance("")
@_catch_connection_error
def _get_new_address_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getnewaddress()
@_catch_connection_error
def _get_value_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getvalueforname(name)
@_catch_connection_error
def _update_name_rpc(self, txid, value, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.updateclaim(txid, json.dumps(value), amount)
@_catch_connection_error
def _send_name_claim_rpc(self, name, value, amount):
rpc_conn = self._get_rpc_conn()
try:
return str(rpc_conn.claimname(name, json.dumps(value), amount))
except JSONRPCException as e:
if 'message' in e.error and e.error['message'] == "Insufficient funds":
raise InsufficientFundsError()
elif 'message' in e.error:
raise ValueError(e.error['message'])
@_catch_connection_error
def _support_claim_rpc(self, name, claim_id, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.supportclaim(name, claim_id, amount)
@_catch_connection_error
def _get_num_addresses_rpc(self):
rpc_conn = self._get_rpc_conn()
return len(rpc_conn.getaddressesbyaccount(""))
@_catch_connection_error
def _get_best_blockhash_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbestblockhash()
@_catch_connection_error
def _list_transactions_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.listtransactions()
@_catch_connection_error
def _stop_rpc(self):
# check if our lbrycrdd is actually running, or if we connected to one that was already
# running and ours failed to start
if self.lbrycrdd.poll() is None:
rpc_conn = self._get_rpc_conn()
rpc_conn.stop()
self.lbrycrdd.wait()
class LBRYumWallet(LBRYWallet):
def __init__(self, db_dir):
LBRYWallet.__init__(self, db_dir)
self.config = None
self.network = None
self.wallet = None
self.cmd_runner = None
self.first_run = False
self.printed_retrieving_headers = False
self._start_check = None
self._catch_up_check = None
self._caught_up_counter = 0
self._lag_counter = 0
self.blocks_behind_alert = 0
self.catchup_progress = 0
self.max_behind = 0
def _start(self):
network_start_d = defer.Deferred()
def setup_network():
self.config = SimpleConfig({'auto_connect': True})
self.network = Network(self.config)
alert.info("Loading the wallet...")
return defer.succeed(self.network.start())
d = setup_network()
def check_started():
if self.network.is_connecting():
if not self.printed_retrieving_headers and self.network.blockchain.retrieving_headers:
alert.info("Running the wallet for the first time...this may take a moment.")
self.printed_retrieving_headers = True
return False
self._start_check.stop()
self._start_check = None
if self.network.is_connected():
network_start_d.callback(True)
else:
network_start_d.errback(ValueError("Failed to connect to network."))
self._start_check = task.LoopingCall(check_started)
d.addCallback(lambda _: self._start_check.start(.1))
d.addCallback(lambda _: network_start_d)
d.addCallback(lambda _: self._load_wallet())
d.addCallback(lambda _: self._get_cmd_runner())
return d
def _stop(self):
if self._start_check is not None:
self._start_check.stop()
self._start_check = None
if self._catch_up_check is not None:
self._catch_up_check.stop()
self._catch_up_check = None
d = defer.Deferred()
def check_stopped():
if self.network:
if self.network.is_connected():
return False
stop_check.stop()
self.network = None
d.callback(True)
if self.network:
self.network.stop()
stop_check = task.LoopingCall(check_stopped)
stop_check.start(.1)
return d
def _load_wallet(self):
def get_wallet():
path = self.config.get_wallet_path()
storage = WalletStorage(path)
wallet = Wallet(storage)
if not storage.file_exists:
self.first_run = True
seed = wallet.make_seed()
wallet.add_seed(seed, None)
wallet.create_master_keys(None)
wallet.create_main_account()
wallet.synchronize()
self.wallet = wallet
blockchain_caught_d = defer.Deferred()
def check_caught_up():
local_height = self.network.get_catchup_progress()
remote_height = self.network.get_server_height()
if remote_height != 0 and remote_height - local_height <= 5:
msg = ""
if self._caught_up_counter != 0:
msg += "All caught up. "
msg += "Wallet loaded."
alert.info(msg)
self._catch_up_check.stop()
self._catch_up_check = None
blockchain_caught_d.callback(True)
elif remote_height != 0:
past_blocks_behind = self.blocks_behind_alert
self.blocks_behind_alert = remote_height - local_height
if self.blocks_behind_alert < past_blocks_behind:
self._lag_counter = 0
self.is_lagging = False
else:
self._lag_counter += 1
if self._lag_counter >= 900:
self.is_lagging = True
if self.blocks_behind_alert > self.max_behind:
self.max_behind = self.blocks_behind_alert
self.catchup_progress = int(100 * (self.blocks_behind_alert / (5 + self.max_behind)))
if self._caught_up_counter == 0:
alert.info('Catching up with the blockchain...showing blocks left...')
if self._caught_up_counter % 30 == 0:
alert.info('%d...', (remote_height - local_height))
self._caught_up_counter += 1
self._catch_up_check = task.LoopingCall(check_caught_up)
d = threads.deferToThread(get_wallet)
d.addCallback(self._save_wallet)
d.addCallback(lambda _: self.wallet.start_threads(self.network))
d.addCallback(lambda _: self._catch_up_check.start(.1))
d.addCallback(lambda _: blockchain_caught_d)
return d
def _get_cmd_runner(self):
self.cmd_runner = Commands(self.config, self.wallet, self.network)
def get_balance(self):
cmd = known_commands['getbalance']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func)
d.addCallback(lambda result: result['unmatured'] if 'unmatured' in result else result['confirmed'])
d.addCallback(Decimal)
return d
def get_new_address(self):
d = threads.deferToThread(self.wallet.create_new_address)
d.addCallback(self._save_wallet)
return d
def get_block(self, blockhash):
cmd = known_commands['getblock']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, blockhash)
def get_most_recent_blocktime(self):
header = self.network.get_header(self.network.get_local_height())
return defer.succeed(header['timestamp'])
def get_best_blockhash(self):
height = self.network.get_local_height()
d = threads.deferToThread(self.network.blockchain.read_header, height)
d.addCallback(lambda header: self.network.blockchain.hash_header(header))
return d
def get_name_claims(self):
cmd = known_commands['getnameclaims']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _check_first_run(self):
return defer.succeed(self.first_run)
def _get_raw_tx(self, txid):
cmd = known_commands['gettransaction']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _send_name_claim(self, name, val, amount):
def send_claim(address):
cmd = known_commands['claimname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, address, amount, name, json.dumps(val))
d = self.get_new_address()
d.addCallback(send_claim)
d.addCallback(self._broadcast_transaction)
return d
def _get_claims_for_name(self, name):
cmd = known_commands['getclaimsforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def _send_name_claim_update(self, name, claim_id, txid, value, amount):
def send_claim_update(address):
decoded_claim_id = claim_id.decode('hex')[::-1]
metadata = json.dumps(value)
log.info("updateclaim %s %s %f %s %s '%s'", txid, address, amount, name, decoded_claim_id.encode('hex'), metadata)
cmd = known_commands['updateclaim']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid, address, amount, name, decoded_claim_id, metadata)
d = self.get_new_address()
d.addCallback(send_claim_update)
d.addCallback(self._broadcast_transaction)
return d
def _get_decoded_tx(self, raw_tx):
tx = Transaction(raw_tx)
decoded_tx = {}
decoded_tx['vout'] = []
for output in tx.outputs():
out = {}
out['value'] = Decimal(output[2]) / Decimal(COIN)
decoded_tx['vout'].append(out)
return decoded_tx
def _send_abandon(self, txid, address, amount):
log.info("Abandon %s %s %f" % (txid, address, amount))
cmd = known_commands['abandonclaim']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, txid, address, amount)
d.addCallback(self._broadcast_transaction)
return d
def _support_claim(self, name, claim_id, amount):
def _send_support(d, a, n, c):
cmd = known_commands['supportclaim']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, d, a, n, c)
return d
d = self.get_new_address()
d.addCallback(lambda address: _send_support(address, amount, name, claim_id))
d.addCallback(self._broadcast_transaction)
return d
def _broadcast_transaction(self, raw_tx):
def _log_tx(r):
log.info("Broadcast tx: %s", r)
return r
cmd = known_commands['broadcast']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, raw_tx)
d.addCallback(_log_tx)
d.addCallback(lambda r: r if len(r) == 64 else defer.fail(Exception("Transaction rejected")))
d.addCallback(self._save_wallet)
return d
def _do_send_many(self, payments_to_send):
log.warning("Doing send many. payments to send: %s", str(payments_to_send))
cmd = known_commands['paytomanyandsend']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, payments_to_send.iteritems())
def _get_value_for_name(self, name):
cmd = known_commands['getvalueforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def get_claims_from_tx(self, txid):
cmd = known_commands['getclaimsfromtx']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _get_balance_for_address(self, address):
return defer.succeed(Decimal(self.wallet.get_addr_received(address))/COIN)
def get_nametrie(self):
cmd = known_commands['getclaimtrie']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _get_history(self):
cmd = known_commands['history']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def get_pub_keys(self, wallet):
cmd = known_commands['getpubkeys']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, wallet)
def _save_wallet(self, val):
d = threads.deferToThread(self.wallet.storage.write)
d.addCallback(lambda _: val)
return d
class LBRYcrdAddressRequester(object):
implements([IRequestCreator])
def __init__(self, wallet):
self.wallet = wallet
self._protocols = []
######### IRequestCreator #########
def send_next_request(self, peer, protocol):
if not protocol in self._protocols:
r = ClientRequest({'lbrycrd_address': True}, 'lbrycrd_address')
d = protocol.add_request(r)
d.addCallback(self._handle_address_response, peer, r, protocol)
d.addErrback(self._request_failed, peer)
self._protocols.append(protocol)
return defer.succeed(True)
else:
return defer.succeed(False)
######### internal calls #########
def _handle_address_response(self, response_dict, peer, request, protocol):
assert request.response_identifier in response_dict, \
"Expected %s in dict but did not get it" % request.response_identifier
assert protocol in self._protocols, "Responding protocol is not in our list of protocols"
address = response_dict[request.response_identifier]
self.wallet.update_peer_address(peer, address)
def _request_failed(self, err, peer):
if not err.check(RequestCanceledError):
log.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
err.getErrorMessage(), str(peer))
return err
class LBRYcrdAddressQueryHandlerFactory(object):
implements(IQueryHandlerFactory)
def __init__(self, wallet):
self.wallet = wallet
######### IQueryHandlerFactory #########
def build_query_handler(self):
q_h = LBRYcrdAddressQueryHandler(self.wallet)
return q_h
def get_primary_query_identifier(self):
return 'lbrycrd_address'
def get_description(self):
return "LBRYcrd Address - an address for receiving payments via LBRYcrd"
class LBRYcrdAddressQueryHandler(object):
implements(IQueryHandler)
def __init__(self, wallet):
self.wallet = wallet
self.query_identifiers = ['lbrycrd_address']
self.address = None
self.peer = None
######### IQueryHandler #########
def register_with_request_handler(self, request_handler, peer):
self.peer = peer
request_handler.register_query_handler(self, self.query_identifiers)
def handle_queries(self, queries):
def create_response(address):
self.address = address
fields = {'lbrycrd_address': address}
return fields
if self.query_identifiers[0] in queries:
d = self.wallet.get_new_address_for_peer(self.peer)
d.addCallback(create_response)
return d
if self.address is None:
log.warning("Expected a request for an address, but did not receive one")
return defer.fail(Failure(ValueError("Expected but did not receive an address request")))
else:
return defer.succeed({})
|
# -*- encoding: utf-8 -*-
"""Tests for vumi.blinkenlights.heartbeat.monitor"""
import time
import json
from twisted.trial.unittest import TestCase
from twisted.internet.defer import inlineCallbacks
from vumi.tests.utils import get_stubbed_worker
from vumi.blinkenlights.heartbeat import publisher
from vumi.blinkenlights.heartbeat import monitor
from vumi.blinkenlights.heartbeat.storage import issue_key
from vumi.utils import generate_worker_id
def expected_wkr_dict():
wkr = {
'id': 'system-1:foo',
'name': 'foo',
'system_id': 'system-1',
'min_procs': 1,
'hosts': [{'host': 'host-1', 'proc_count': 1}],
}
return wkr
def expected_sys_dict():
sys = {
'name': 'system-1',
'id': 'system-1',
'timestamp': int(435),
'workers': [expected_wkr_dict()],
}
return sys
class TestWorkerInstance(TestCase):
def test_create(self):
worker = monitor.WorkerInstance('foo', 34)
self.assertEqual(worker.hostname, 'foo')
self.assertEqual(worker.pid, 34)
def test_equiv(self):
self.assertEqual(monitor.WorkerInstance('foo', 34),
monitor.WorkerInstance('foo', 34))
self.failIfEqual(monitor.WorkerInstance('foo', 4),
monitor.WorkerInstance('foo', 34))
self.failIfEqual(monitor.WorkerInstance('fo', 34),
monitor.WorkerInstance('foo', 34))
def test_hash(self):
worker1 = monitor.WorkerInstance('foo', 34)
worker2 = monitor.WorkerInstance('foo', 34)
worker3 = monitor.WorkerInstance('foo', 35)
worker4 = monitor.WorkerInstance('bar', 34)
self.assertEqual(hash(worker1), hash(worker2))
self.assertNotEqual(hash(worker1), hash(worker3))
self.assertNotEqual(hash(worker1), hash(worker4))
class TestWorker(TestCase):
def test_to_dict(self):
wkr = monitor.Worker('system-1', 'foo', 1)
wkr.reset()
wkr.record('host-1', 34)
obj = wkr.to_dict()
self.assertEqual(obj, expected_wkr_dict())
def test_compute_host_info(self):
wkr = monitor.Worker('system-1', 'foo', 1)
wkr.reset()
wkr.record('host-1', 34)
wkr.record('host-1', 546)
counts = wkr._compute_host_info(wkr._instances)
self.assertEqual(counts['host-1'], 2)
class TestSystem(TestCase):
def test_to_dict(self):
wkr = monitor.Worker('system-1', 'foo', 1)
sys = monitor.System('system-1', 'system-1', [wkr])
wkr.reset()
wkr.record('host-1', 34)
obj = sys.to_dict()
obj['timestamp'] = int(435)
self.assertEqual(obj, expected_sys_dict())
class TestHeartBeatMonitor(TestCase):
def setUp(self):
config = {
'deadline': 30,
'redis_manager': {
'key_prefix': 'heartbeats',
'db': 5,
'FAKE_REDIS': True,
},
'monitored_systems': {
'system-1': {
'system_name': 'system-1',
'system_id': 'system-1',
'workers': {
'twitter_transport': {
'name': 'twitter_transport',
'min_procs': 2,
}
}
}
}
}
self.worker = get_stubbed_worker(monitor.HeartBeatMonitor, config)
def tearDown(self):
self.worker.stopWorker()
def gen_fake_attrs(self, timestamp):
sys_id = 'system-1'
wkr_name = 'twitter_transport'
wkr_id = generate_worker_id(sys_id, wkr_name)
attrs = {
'version': publisher.HeartBeatMessage.VERSION_20130319,
'system_id': sys_id,
'worker_id': wkr_id,
'worker_name': wkr_name,
'hostname': "test-host-1",
'timestamp': timestamp,
'pid': 345,
}
return attrs
@inlineCallbacks
def test_update(self):
"""
Test the processing of a message.
"""
yield self.worker.startWorker()
attrs1 = self.gen_fake_attrs(time.time())
attrs2 = self.gen_fake_attrs(time.time())
# process the fake message (and process it twice to verify idempotency)
self.worker.update(attrs1)
self.worker.update(attrs1)
# retrieve the instance set corresponding to the worker_id in the
# fake message
wkr = self.worker._workers[attrs1['worker_id']]
self.assertEqual(len(wkr._instances), 1)
inst = wkr._instances.pop()
wkr._instances.add(inst)
self.assertEqual(inst.hostname, "test-host-1")
self.assertEqual(inst.pid, 345)
# now process a message from another instance of the worker
# and verify that there are two recorded instances
attrs2['hostname'] = 'test-host-2'
self.worker.update(attrs2)
self.assertEqual(len(wkr._instances), 2)
@inlineCallbacks
def test_audit_fail(self):
# here we test the verification of a worker who
# who had less than min_procs check in
yield self.worker.startWorker()
fkredis = self.worker._redis
attrs = self.gen_fake_attrs(time.time())
wkr_id = attrs['worker_id']
# process the fake message ()
self.worker.update(attrs)
wkr = self.worker._workers[attrs['worker_id']]
wkr.audit(self.worker._storage)
# test that an issue was opened
self.assertEqual(wkr.procs_count, 1)
key = issue_key(wkr_id)
issue = json.loads((yield fkredis.get(key)))
self.assertEqual(issue['issue_type'], 'min-procs-fail')
@inlineCallbacks
def test_audit_pass(self):
# here we test the verification of a worker who
# who had more than min_procs check in
yield self.worker.startWorker()
fkredis = self.worker._redis
attrs = self.gen_fake_attrs(time.time())
wkr_id = attrs['worker_id']
# process the fake message ()
self.worker.update(attrs)
attrs['pid'] = 2342
self.worker.update(attrs)
wkr = self.worker._workers[attrs['worker_id']]
wkr.audit(self.worker._storage)
# verify that no issue has been opened
self.assertEqual(wkr.procs_count, 2)
key = issue_key(wkr_id)
issue = yield fkredis.get(key)
self.assertEqual(issue, None)
@inlineCallbacks
def test_prepare_storage(self):
yield self.worker.startWorker()
fkredis = self.worker._redis
self.worker._prepare_storage()
# Systems
systems = yield fkredis.smembers('systems')
self.assertEqual(tuple(systems), ('system-1',))
@inlineCallbacks
def test_serialize_to_redis(self):
"""
This covers a lot of the serialization methods
as well as the _sync_to_storage() function.
"""
yield self.worker.startWorker()
fkredis = self.worker._redis
attrs = self.gen_fake_attrs(time.time())
# process the fake message
self.worker.update(attrs)
self.worker._sync_to_storage()
# this blob is what should be persisted into redis (as JSON)
expected = {
u'name': u'system-1',
u'id': u'system-1',
u'timestamp': 2,
u'workers': [{
u'id': u'system-1:twitter_transport',
u'name': u'twitter_transport',
u'system_id': u'system-1',
u'min_procs': 2,
u'hosts': [{u'host': u'test-host-1', u'proc_count': 1}]
}],
}
# verify that the system data was persisted correctly
system = json.loads((yield fkredis.get('system:system-1')))
system['timestamp'] = 2
self.assertEqual(system, expected)
Replace test docstrings witch comments because it causes annoyances with nosetest (a commonly used python test runner that prints out docstrings instead of test names if docstrings exists -- this gets really irritating 5s after one first encounters it).
# -*- encoding: utf-8 -*-
"""Tests for vumi.blinkenlights.heartbeat.monitor"""
import time
import json
from twisted.trial.unittest import TestCase
from twisted.internet.defer import inlineCallbacks
from vumi.tests.utils import get_stubbed_worker
from vumi.blinkenlights.heartbeat import publisher
from vumi.blinkenlights.heartbeat import monitor
from vumi.blinkenlights.heartbeat.storage import issue_key
from vumi.utils import generate_worker_id
def expected_wkr_dict():
wkr = {
'id': 'system-1:foo',
'name': 'foo',
'system_id': 'system-1',
'min_procs': 1,
'hosts': [{'host': 'host-1', 'proc_count': 1}],
}
return wkr
def expected_sys_dict():
sys = {
'name': 'system-1',
'id': 'system-1',
'timestamp': int(435),
'workers': [expected_wkr_dict()],
}
return sys
class TestWorkerInstance(TestCase):
def test_create(self):
worker = monitor.WorkerInstance('foo', 34)
self.assertEqual(worker.hostname, 'foo')
self.assertEqual(worker.pid, 34)
def test_equiv(self):
self.assertEqual(monitor.WorkerInstance('foo', 34),
monitor.WorkerInstance('foo', 34))
self.failIfEqual(monitor.WorkerInstance('foo', 4),
monitor.WorkerInstance('foo', 34))
self.failIfEqual(monitor.WorkerInstance('fo', 34),
monitor.WorkerInstance('foo', 34))
def test_hash(self):
worker1 = monitor.WorkerInstance('foo', 34)
worker2 = monitor.WorkerInstance('foo', 34)
worker3 = monitor.WorkerInstance('foo', 35)
worker4 = monitor.WorkerInstance('bar', 34)
self.assertEqual(hash(worker1), hash(worker2))
self.assertNotEqual(hash(worker1), hash(worker3))
self.assertNotEqual(hash(worker1), hash(worker4))
class TestWorker(TestCase):
def test_to_dict(self):
wkr = monitor.Worker('system-1', 'foo', 1)
wkr.reset()
wkr.record('host-1', 34)
obj = wkr.to_dict()
self.assertEqual(obj, expected_wkr_dict())
def test_compute_host_info(self):
wkr = monitor.Worker('system-1', 'foo', 1)
wkr.reset()
wkr.record('host-1', 34)
wkr.record('host-1', 546)
counts = wkr._compute_host_info(wkr._instances)
self.assertEqual(counts['host-1'], 2)
class TestSystem(TestCase):
def test_to_dict(self):
wkr = monitor.Worker('system-1', 'foo', 1)
sys = monitor.System('system-1', 'system-1', [wkr])
wkr.reset()
wkr.record('host-1', 34)
obj = sys.to_dict()
obj['timestamp'] = int(435)
self.assertEqual(obj, expected_sys_dict())
class TestHeartBeatMonitor(TestCase):
def setUp(self):
config = {
'deadline': 30,
'redis_manager': {
'key_prefix': 'heartbeats',
'db': 5,
'FAKE_REDIS': True,
},
'monitored_systems': {
'system-1': {
'system_name': 'system-1',
'system_id': 'system-1',
'workers': {
'twitter_transport': {
'name': 'twitter_transport',
'min_procs': 2,
}
}
}
}
}
self.worker = get_stubbed_worker(monitor.HeartBeatMonitor, config)
def tearDown(self):
self.worker.stopWorker()
def gen_fake_attrs(self, timestamp):
sys_id = 'system-1'
wkr_name = 'twitter_transport'
wkr_id = generate_worker_id(sys_id, wkr_name)
attrs = {
'version': publisher.HeartBeatMessage.VERSION_20130319,
'system_id': sys_id,
'worker_id': wkr_id,
'worker_name': wkr_name,
'hostname': "test-host-1",
'timestamp': timestamp,
'pid': 345,
}
return attrs
@inlineCallbacks
def test_update(self):
# Test the processing of a message.
yield self.worker.startWorker()
attrs1 = self.gen_fake_attrs(time.time())
attrs2 = self.gen_fake_attrs(time.time())
# process the fake message (and process it twice to verify idempotency)
self.worker.update(attrs1)
self.worker.update(attrs1)
# retrieve the instance set corresponding to the worker_id in the
# fake message
wkr = self.worker._workers[attrs1['worker_id']]
self.assertEqual(len(wkr._instances), 1)
inst = wkr._instances.pop()
wkr._instances.add(inst)
self.assertEqual(inst.hostname, "test-host-1")
self.assertEqual(inst.pid, 345)
# now process a message from another instance of the worker
# and verify that there are two recorded instances
attrs2['hostname'] = 'test-host-2'
self.worker.update(attrs2)
self.assertEqual(len(wkr._instances), 2)
@inlineCallbacks
def test_audit_fail(self):
# here we test the verification of a worker who
# who had less than min_procs check in
yield self.worker.startWorker()
fkredis = self.worker._redis
attrs = self.gen_fake_attrs(time.time())
wkr_id = attrs['worker_id']
# process the fake message ()
self.worker.update(attrs)
wkr = self.worker._workers[attrs['worker_id']]
wkr.audit(self.worker._storage)
# test that an issue was opened
self.assertEqual(wkr.procs_count, 1)
key = issue_key(wkr_id)
issue = json.loads((yield fkredis.get(key)))
self.assertEqual(issue['issue_type'], 'min-procs-fail')
@inlineCallbacks
def test_audit_pass(self):
# here we test the verification of a worker who
# who had more than min_procs check in
yield self.worker.startWorker()
fkredis = self.worker._redis
attrs = self.gen_fake_attrs(time.time())
wkr_id = attrs['worker_id']
# process the fake message ()
self.worker.update(attrs)
attrs['pid'] = 2342
self.worker.update(attrs)
wkr = self.worker._workers[attrs['worker_id']]
wkr.audit(self.worker._storage)
# verify that no issue has been opened
self.assertEqual(wkr.procs_count, 2)
key = issue_key(wkr_id)
issue = yield fkredis.get(key)
self.assertEqual(issue, None)
@inlineCallbacks
def test_prepare_storage(self):
yield self.worker.startWorker()
fkredis = self.worker._redis
self.worker._prepare_storage()
# Systems
systems = yield fkredis.smembers('systems')
self.assertEqual(tuple(systems), ('system-1',))
@inlineCallbacks
def test_serialize_to_redis(self):
# This covers a lot of the serialization methods
# as well as the _sync_to_storage() function.
yield self.worker.startWorker()
fkredis = self.worker._redis
attrs = self.gen_fake_attrs(time.time())
# process the fake message
self.worker.update(attrs)
self.worker._sync_to_storage()
# this blob is what should be persisted into redis (as JSON)
expected = {
u'name': u'system-1',
u'id': u'system-1',
u'timestamp': 2,
u'workers': [{
u'id': u'system-1:twitter_transport',
u'name': u'twitter_transport',
u'system_id': u'system-1',
u'min_procs': 2,
u'hosts': [{u'host': u'test-host-1', u'proc_count': 1}]
}],
}
# verify that the system data was persisted correctly
system = json.loads((yield fkredis.get('system:system-1')))
system['timestamp'] = 2
self.assertEqual(system, expected)
|
"""QR decomposition functions."""
import numpy
# Local imports
from blas import get_blas_funcs
from lapack import get_lapack_funcs, find_best_lapack_type
from misc import _datacopied
# XXX: what is qr_old, should it be kept?
__all__ = ['qr', 'qr_multiply', 'rq', 'qr_old']
def safecall(f, name, *args, **kwargs):
lwork = kwargs.pop("lwork", None)
if lwork is None:
ret = f(*args, lwork=-1, **kwargs)
lwork = ret[-2][0].real.astype(numpy.int)
ret = f(*args, lwork=lwork, **kwargs)
if ret[-1] < 0:
raise ValueError("illegal value in %d-th argument of internal %s"
% (-ret[-1], name))
return ret[:-2]
def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False):
"""Compute QR decomposition of a matrix.
Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, N)
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition. If pivoting, compute the decomposition
:lm:`A P = Q R` as above, but where P is chosen such that the diagonal
of R is non-increasing.
Returns
-------
Q : float or complex ndarray
Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned if
``mode='r'``.
R : float or complex ndarray
Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
P : integer ndarray
Of shape (N,) for ``pivoting=True``. Not returned if ``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, zungqr, dgeqp3, and zgeqp3.
If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
of (M,M) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import random, linalg, dot, diag, all, allclose
>>> a = random.randn(9, 6)
>>> q, r = linalg.qr(a)
>>> allclose(a, dot(q, r))
True
>>> q.shape, r.shape
((9, 9), (9, 6))
>>> r2 = linalg.qr(a, mode='r')
>>> allclose(r, r2)
True
>>> q3, r3 = linalg.qr(a, mode='economic')
>>> q3.shape, r3.shape
((9, 6), (6, 6))
>>> q4, r4, p4 = linalg.qr(a, pivoting=True)
>>> d = abs(diag(r4))
>>> all(d[1:] <= d[:-1])
True
>>> allclose(a[:, p4], dot(q4, r4))
True
>>> q4.shape, r4.shape, p4.shape
((9, 9), (9, 6), (6,))
>>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
>>> q5.shape, r5.shape, p5.shape
((9, 6), (6, 6), (6,))
"""
# 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
# 'qr' are used below.
# 'raw' is used only internally by qr_multiply, not documented on purpose
if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
raise ValueError(
"Mode argument should be one of ['full', 'r', 'economic']")
a1 = numpy.asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError("expected 2D array")
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if pivoting:
geqp3, = get_lapack_funcs(('geqp3',), (a1,))
qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1
else:
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
overwrite_a=overwrite_a)
if mode not in ['economic', 'raw'] or M < N:
R = numpy.triu(qr)
else:
R = numpy.triu(qr[:N, :])
if pivoting:
Rj = R, jpvt
else:
Rj = R,
if mode == 'r':
return Rj
elif mode == 'raw':
return ((qr, tau),) + Rj
if find_best_lapack_type((a1,))[0] in ('s', 'd'):
gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
else:
gor_un_gqr, = get_lapack_funcs(('ungqr',), (qr,))
if M < N:
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
lwork=lwork, overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
overwrite_a=1)
else:
t = qr.dtype.char
qqr = numpy.empty((M, M), dtype=t)
qqr[:, :N] = qr
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
overwrite_a=1)
return (Q,) + Rj
def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False,
overwrite_a=False, overwrite_c=False):
"""Calculate the QR decomposition and multiply Q with a matrix.
Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal
and R upper triangular. Multiply Q with a vector or a matrix c.
Parameters
----------
a : array, shape (M, N)
Matrix to be decomposed
c : array, one- or two-dimensional
calculate the product of c and q, depending on the mode:
mode : {'left', 'right'}
dot(Q, c) is returned if mode is 'left',
dot(c, Q) is returned if mode is 'right'.
the shape of c must be appropriate for the matrix multiplications,
if mode is 'left', min(a.shape) == c.shape[0],
if mode is 'right', a.shape[0] == c.shape[1].
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition, see the documentation of qr.
conjugate : bool, optional
Whether Q should be complex-conjugated. This might be faster
than explicit conjugation.
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
overwrite_c: bool, optional
Whether data in c is overwritten (may improve performance).
If this is used, c must be big enough to keep the result,
i.e. c.shape[0] = a.shape[0] if mode is 'left'.
Returns
-------
CQ : float or complex ndarray
the product of Q and c, as defined in mode
R : float or complex ndarray
Of shape (K, N), ``K = min(M, N)``.
P : integer ndarray
Of shape (N,) for ``pivoting=True``. Not returned if ``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dormqr, zunmqr, dgeqp3, and zgeqp3.
"""
if not mode in ['left', 'right']:
raise ValueError("Mode argument should be one of ['left', 'right']")
c = numpy.asarray_chkfinite(c)
onedim = c.ndim == 1
if onedim:
c = c.reshape(1, len(c))
if mode == "left":
c = c.T
a = numpy.asarray(a) # chkfinite done in qr
M, N = a.shape
if not (mode == "left" and
(not overwrite_c and min(M, N) == c.shape[0] or
overwrite_c and M == c.shape[0]) or
mode == "right" and M == c.shape[1]):
raise ValueError("objects are not aligned")
raw = qr(a, overwrite_a, None, "raw", pivoting)
Q, tau = raw[0]
if find_best_lapack_type((Q,))[0] in ('s', 'd'):
gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,))
trans = "T"
else:
gor_un_mqr, = get_lapack_funcs(('unmqr',), (Q,))
trans = "C"
Q = Q[:, :min(M, N)]
if M > N and mode == "left" and not overwrite_c:
if conjugate:
cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F")
cc[:, :N] = c.T
else:
cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F")
cc[:N, :] = c
trans = "N"
lr = "R" if conjugate else "L"
overwrite_c = True
elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate:
cc = c.T
lr = "R" if mode == "left" else "L"
else:
trans = "N"
cc = c
lr = "L" if mode == "left" else "R"
cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc,
overwrite_c=overwrite_c)
if trans != "N":
cQ = cQ.T
if mode == "right":
cQ = cQ[:, :min(M, N)]
if onedim:
cQ = cQ.ravel()
return (cQ,) + raw[1:]
def qr_old(a, overwrite_a=False, lwork=None):
"""Compute QR decomposition of a matrix.
Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, N)
Matrix to be decomposed
overwrite_a : boolean
Whether data in a is overwritten (may improve performance)
lwork : integer
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
Returns
-------
Q : float or complex array, shape (M, M)
R : float or complex array, shape (M, N)
Size K = min(M, N)
Raises LinAlgError if decomposition fails
"""
a1 = numpy.asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M,N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
qr, tau, work, info = geqrf(a1, lwork=-1, overwrite_a=1)
lwork = work[0]
qr, tau, work, info = geqrf(a1, lwork=lwork, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal geqrf'
% -info)
gemm, = get_blas_funcs(('gemm',), (qr,))
t = qr.dtype.char
R = numpy.triu(qr)
Q = numpy.identity(M, dtype=t)
ident = numpy.identity(M, dtype=t)
zeros = numpy.zeros
for i in range(min(M, N)):
v = zeros((M,), t)
v[i] = 1
v[i+1:M] = qr[i+1:M, i]
H = gemm(-tau[i], v, v, 1+0j, ident, trans_b=2)
Q = gemm(1, Q, H)
return Q, R
def rq(a, overwrite_a=False, lwork=None, mode='full'):
"""Compute RQ decomposition of a square real matrix.
Calculate the decomposition :lm:`A = R Q` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, M)
Matrix to be decomposed
overwrite_a : boolean
Whether data in a is overwritten (may improve performance)
lwork : integer
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
Returns
-------
R : float array, shape (M, N)
Q : float or complex array, shape (M, M)
Raises LinAlgError if decomposition fails
Examples
--------
>>> from scipy import linalg
>>> from numpy import random, dot, allclose
>>> a = random.randn(6, 9)
>>> r, q = linalg.rq(a)
>>> allclose(a, dot(r, q))
True
>>> r.shape, q.shape
((6, 9), (9, 9))
>>> r2 = linalg.rq(a, mode='r')
>>> allclose(r, r2)
True
>>> r3, q3 = linalg.rq(a, mode='economic')
>>> r3.shape, q3.shape
((6, 6), (6, 9))
"""
if not mode in ['full', 'r', 'economic']:
raise ValueError(\
"Mode argument should be one of ['full', 'r', 'economic']")
a1 = numpy.asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
gerqf, = get_lapack_funcs(('gerqf',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
rq, tau, work, info = gerqf(a1, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
rq, tau, work, info = gerqf(a1, lwork=lwork, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gerqf'
% -info)
if not mode == 'economic' or N < M:
R = numpy.triu(rq, N-M)
else:
R = numpy.triu(rq[-M:, -M:])
if mode == 'r':
return R
if find_best_lapack_type((a1,))[0] in ('s', 'd'):
gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,))
else:
gor_un_grq, = get_lapack_funcs(('ungrq',), (rq,))
if N < M:
# get optimal work array
Q, work, info = gor_un_grq(rq[-N:], tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq[-N:], tau, lwork=lwork, overwrite_a=1)
elif mode == 'economic':
# get optimal work array
Q, work, info = gor_un_grq(rq, tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq, tau, lwork=lwork, overwrite_a=1)
else:
rq1 = numpy.empty((N, N), dtype=rq.dtype)
rq1[-M:] = rq
# get optimal work array
Q, work, info = gor_un_grq(rq1, tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq1, tau, lwork=lwork, overwrite_a=1)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal orgrq"
% -info)
return R, Q
linalg: deprecate qr_old
"""QR decomposition functions."""
import numpy
# Local imports
from blas import get_blas_funcs
from lapack import get_lapack_funcs, find_best_lapack_type
from misc import _datacopied
# XXX: what is qr_old, should it be kept?
__all__ = ['qr', 'qr_multiply', 'rq', 'qr_old']
def safecall(f, name, *args, **kwargs):
lwork = kwargs.pop("lwork", None)
if lwork is None:
ret = f(*args, lwork=-1, **kwargs)
lwork = ret[-2][0].real.astype(numpy.int)
ret = f(*args, lwork=lwork, **kwargs)
if ret[-1] < 0:
raise ValueError("illegal value in %d-th argument of internal %s"
% (-ret[-1], name))
return ret[:-2]
def qr(a, overwrite_a=False, lwork=None, mode='full', pivoting=False):
"""Compute QR decomposition of a matrix.
Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, N)
Matrix to be decomposed
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
lwork : int, optional
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition. If pivoting, compute the decomposition
:lm:`A P = Q R` as above, but where P is chosen such that the diagonal
of R is non-increasing.
Returns
-------
Q : float or complex ndarray
Of shape (M, M), or (M, K) for ``mode='economic'``. Not returned if
``mode='r'``.
R : float or complex ndarray
Of shape (M, N), or (K, N) for ``mode='economic'``. ``K = min(M, N)``.
P : integer ndarray
Of shape (N,) for ``pivoting=True``. Not returned if ``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, zungqr, dgeqp3, and zgeqp3.
If ``mode=economic``, the shapes of Q and R are (M, K) and (K, N) instead
of (M,M) and (M,N), with ``K=min(M,N)``.
Examples
--------
>>> from scipy import random, linalg, dot, diag, all, allclose
>>> a = random.randn(9, 6)
>>> q, r = linalg.qr(a)
>>> allclose(a, dot(q, r))
True
>>> q.shape, r.shape
((9, 9), (9, 6))
>>> r2 = linalg.qr(a, mode='r')
>>> allclose(r, r2)
True
>>> q3, r3 = linalg.qr(a, mode='economic')
>>> q3.shape, r3.shape
((9, 6), (6, 6))
>>> q4, r4, p4 = linalg.qr(a, pivoting=True)
>>> d = abs(diag(r4))
>>> all(d[1:] <= d[:-1])
True
>>> allclose(a[:, p4], dot(q4, r4))
True
>>> q4.shape, r4.shape, p4.shape
((9, 9), (9, 6), (6,))
>>> q5, r5, p5 = linalg.qr(a, mode='economic', pivoting=True)
>>> q5.shape, r5.shape, p5.shape
((9, 6), (6, 6), (6,))
"""
# 'qr' was the old default, equivalent to 'full'. Neither 'full' nor
# 'qr' are used below.
# 'raw' is used only internally by qr_multiply, not documented on purpose
if mode not in ['full', 'qr', 'r', 'economic', 'raw']:
raise ValueError(
"Mode argument should be one of ['full', 'r', 'economic']")
a1 = numpy.asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError("expected 2D array")
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
if pivoting:
geqp3, = get_lapack_funcs(('geqp3',), (a1,))
qr, jpvt, tau = safecall(geqp3, "geqp3", a1, overwrite_a=overwrite_a)
jpvt -= 1 # geqp3 returns a 1-based index array, so subtract 1
else:
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
qr, tau = safecall(geqrf, "geqrf", a1, lwork=lwork,
overwrite_a=overwrite_a)
if mode not in ['economic', 'raw'] or M < N:
R = numpy.triu(qr)
else:
R = numpy.triu(qr[:N, :])
if pivoting:
Rj = R, jpvt
else:
Rj = R,
if mode == 'r':
return Rj
elif mode == 'raw':
return ((qr, tau),) + Rj
if find_best_lapack_type((a1,))[0] in ('s', 'd'):
gor_un_gqr, = get_lapack_funcs(('orgqr',), (qr,))
else:
gor_un_gqr, = get_lapack_funcs(('ungqr',), (qr,))
if M < N:
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr[:, :M], tau,
lwork=lwork, overwrite_a=1)
elif mode == 'economic':
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qr, tau, lwork=lwork,
overwrite_a=1)
else:
t = qr.dtype.char
qqr = numpy.empty((M, M), dtype=t)
qqr[:, :N] = qr
Q, = safecall(gor_un_gqr, "gorgqr/gungqr", qqr, tau, lwork=lwork,
overwrite_a=1)
return (Q,) + Rj
def qr_multiply(a, c, mode='right', pivoting=False, conjugate=False,
overwrite_a=False, overwrite_c=False):
"""Calculate the QR decomposition and multiply Q with a matrix.
Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal
and R upper triangular. Multiply Q with a vector or a matrix c.
Parameters
----------
a : array, shape (M, N)
Matrix to be decomposed
c : array, one- or two-dimensional
calculate the product of c and q, depending on the mode:
mode : {'left', 'right'}
dot(Q, c) is returned if mode is 'left',
dot(c, Q) is returned if mode is 'right'.
the shape of c must be appropriate for the matrix multiplications,
if mode is 'left', min(a.shape) == c.shape[0],
if mode is 'right', a.shape[0] == c.shape[1].
pivoting : bool, optional
Whether or not factorization should include pivoting for rank-revealing
qr decomposition, see the documentation of qr.
conjugate : bool, optional
Whether Q should be complex-conjugated. This might be faster
than explicit conjugation.
overwrite_a : bool, optional
Whether data in a is overwritten (may improve performance)
overwrite_c: bool, optional
Whether data in c is overwritten (may improve performance).
If this is used, c must be big enough to keep the result,
i.e. c.shape[0] = a.shape[0] if mode is 'left'.
Returns
-------
CQ : float or complex ndarray
the product of Q and c, as defined in mode
R : float or complex ndarray
Of shape (K, N), ``K = min(M, N)``.
P : integer ndarray
Of shape (N,) for ``pivoting=True``. Not returned if ``pivoting=False``.
Raises
------
LinAlgError
Raised if decomposition fails
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dormqr, zunmqr, dgeqp3, and zgeqp3.
"""
if not mode in ['left', 'right']:
raise ValueError("Mode argument should be one of ['left', 'right']")
c = numpy.asarray_chkfinite(c)
onedim = c.ndim == 1
if onedim:
c = c.reshape(1, len(c))
if mode == "left":
c = c.T
a = numpy.asarray(a) # chkfinite done in qr
M, N = a.shape
if not (mode == "left" and
(not overwrite_c and min(M, N) == c.shape[0] or
overwrite_c and M == c.shape[0]) or
mode == "right" and M == c.shape[1]):
raise ValueError("objects are not aligned")
raw = qr(a, overwrite_a, None, "raw", pivoting)
Q, tau = raw[0]
if find_best_lapack_type((Q,))[0] in ('s', 'd'):
gor_un_mqr, = get_lapack_funcs(('ormqr',), (Q,))
trans = "T"
else:
gor_un_mqr, = get_lapack_funcs(('unmqr',), (Q,))
trans = "C"
Q = Q[:, :min(M, N)]
if M > N and mode == "left" and not overwrite_c:
if conjugate:
cc = numpy.zeros((c.shape[1], M), dtype=c.dtype, order="F")
cc[:, :N] = c.T
else:
cc = numpy.zeros((M, c.shape[1]), dtype=c.dtype, order="F")
cc[:N, :] = c
trans = "N"
lr = "R" if conjugate else "L"
overwrite_c = True
elif c.flags["C_CONTIGUOUS"] and trans == "T" or conjugate:
cc = c.T
lr = "R" if mode == "left" else "L"
else:
trans = "N"
cc = c
lr = "L" if mode == "left" else "R"
cQ, = safecall(gor_un_mqr, "gormqr/gunmqr", lr, trans, Q, tau, cc,
overwrite_c=overwrite_c)
if trans != "N":
cQ = cQ.T
if mode == "right":
cQ = cQ[:, :min(M, N)]
if onedim:
cQ = cQ.ravel()
return (cQ,) + raw[1:]
@numpy.deprecate
def qr_old(a, overwrite_a=False, lwork=None):
"""Compute QR decomposition of a matrix.
Calculate the decomposition :lm:`A = Q R` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, N)
Matrix to be decomposed
overwrite_a : boolean
Whether data in a is overwritten (may improve performance)
lwork : integer
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
Returns
-------
Q : float or complex array, shape (M, M)
R : float or complex array, shape (M, N)
Size K = min(M, N)
Raises LinAlgError if decomposition fails
"""
a1 = numpy.asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M,N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
geqrf, = get_lapack_funcs(('geqrf',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
qr, tau, work, info = geqrf(a1, lwork=-1, overwrite_a=1)
lwork = work[0]
qr, tau, work, info = geqrf(a1, lwork=lwork, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal geqrf'
% -info)
gemm, = get_blas_funcs(('gemm',), (qr,))
t = qr.dtype.char
R = numpy.triu(qr)
Q = numpy.identity(M, dtype=t)
ident = numpy.identity(M, dtype=t)
zeros = numpy.zeros
for i in range(min(M, N)):
v = zeros((M,), t)
v[i] = 1
v[i+1:M] = qr[i+1:M, i]
H = gemm(-tau[i], v, v, 1+0j, ident, trans_b=2)
Q = gemm(1, Q, H)
return Q, R
def rq(a, overwrite_a=False, lwork=None, mode='full'):
"""Compute RQ decomposition of a square real matrix.
Calculate the decomposition :lm:`A = R Q` where Q is unitary/orthogonal
and R upper triangular.
Parameters
----------
a : array, shape (M, M)
Matrix to be decomposed
overwrite_a : boolean
Whether data in a is overwritten (may improve performance)
lwork : integer
Work array size, lwork >= a.shape[1]. If None or -1, an optimal size
is computed.
mode : {'full', 'r', 'economic'}
Determines what information is to be returned: either both Q and R
('full', default), only R ('r') or both Q and R but computed in
economy-size ('economic', see Notes).
Returns
-------
R : float array, shape (M, N)
Q : float or complex array, shape (M, M)
Raises LinAlgError if decomposition fails
Examples
--------
>>> from scipy import linalg
>>> from numpy import random, dot, allclose
>>> a = random.randn(6, 9)
>>> r, q = linalg.rq(a)
>>> allclose(a, dot(r, q))
True
>>> r.shape, q.shape
((6, 9), (9, 9))
>>> r2 = linalg.rq(a, mode='r')
>>> allclose(r, r2)
True
>>> r3, q3 = linalg.rq(a, mode='economic')
>>> r3.shape, q3.shape
((6, 6), (6, 9))
"""
if not mode in ['full', 'r', 'economic']:
raise ValueError(\
"Mode argument should be one of ['full', 'r', 'economic']")
a1 = numpy.asarray_chkfinite(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
M, N = a1.shape
overwrite_a = overwrite_a or (_datacopied(a1, a))
gerqf, = get_lapack_funcs(('gerqf',), (a1,))
if lwork is None or lwork == -1:
# get optimal work array
rq, tau, work, info = gerqf(a1, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
rq, tau, work, info = gerqf(a1, lwork=lwork, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of internal gerqf'
% -info)
if not mode == 'economic' or N < M:
R = numpy.triu(rq, N-M)
else:
R = numpy.triu(rq[-M:, -M:])
if mode == 'r':
return R
if find_best_lapack_type((a1,))[0] in ('s', 'd'):
gor_un_grq, = get_lapack_funcs(('orgrq',), (rq,))
else:
gor_un_grq, = get_lapack_funcs(('ungrq',), (rq,))
if N < M:
# get optimal work array
Q, work, info = gor_un_grq(rq[-N:], tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq[-N:], tau, lwork=lwork, overwrite_a=1)
elif mode == 'economic':
# get optimal work array
Q, work, info = gor_un_grq(rq, tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq, tau, lwork=lwork, overwrite_a=1)
else:
rq1 = numpy.empty((N, N), dtype=rq.dtype)
rq1[-M:] = rq
# get optimal work array
Q, work, info = gor_un_grq(rq1, tau, lwork=-1, overwrite_a=1)
lwork = work[0].real.astype(numpy.int)
Q, work, info = gor_un_grq(rq1, tau, lwork=lwork, overwrite_a=1)
if info < 0:
raise ValueError("illegal value in %d-th argument of internal orgrq"
% -info)
return R, Q
|
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from sqlalchemy import *
from iatidq import db
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
## TEST RUNTIME-SPECIFIC DATA
class PackageStatus(db.Model):
__tablename__ = 'packagestatus'
id = Column(Integer, primary_key=True)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
status = Column(Integer, nullable=False)
runtime_datetime = Column(DateTime)
def __init__(self):
self.runtime_datetime = datetime.utcnow()
def __repr__(self):
return unicode(self.runtime_datetime)+u' '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Runtime(db.Model):
__tablename__ = 'runtime'
id = Column(Integer, primary_key=True)
runtime_datetime = Column(DateTime, nullable=False)
def __init__(self):
self.runtime_datetime = datetime.utcnow()
def __repr__(self):
return unicode(self.runtime_datetime)+u' '+unicode(self.id)
## IATI REGISTRY PACKAGEGROUPS AND PACKAGES
class PackageGroup(db.Model):
__tablename__ = 'packagegroup'
id = Column(Integer, primary_key=True)
man_auto = Column(UnicodeText)
name = Column(UnicodeText, nullable=False)
ckan_id = Column(UnicodeText)
revision_id = Column(UnicodeText)
title = Column(UnicodeText)
created_date = Column(UnicodeText)
state = Column(UnicodeText)
publisher_iati_id = Column(UnicodeText)
publisher_segmentation = Column(UnicodeText)
publisher_type = Column(UnicodeText)
publisher_ui = Column(UnicodeText)
publisher_organization_type = Column(UnicodeText)
publisher_frequency = Column(UnicodeText)
publisher_thresholds = Column(UnicodeText)
publisher_units = Column(UnicodeText)
publisher_contact = Column(UnicodeText)
publisher_agencies = Column(UnicodeText)
publisher_field_exclusions = Column(UnicodeText)
publisher_description = Column(UnicodeText)
publisher_record_exclusions = Column(UnicodeText)
publisher_timeliness = Column(UnicodeText)
license_id = Column(UnicodeText)
publisher_country = Column(UnicodeText)
publisher_refs = Column(UnicodeText)
publisher_constraints = Column(UnicodeText)
publisher_data_quality = Column(UnicodeText)
def __init__(self, man_auto=None, name=None):
if man_auto is not None:
self.man_auto = man_auto
if name is not None:
self.name = name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Package(db.Model):
__tablename__ = 'package'
id = Column(Integer, primary_key=True)
man_auto = Column(UnicodeText)
source_url = Column(UnicodeText)
package_ckan_id = Column(UnicodeText)
package_name = Column(UnicodeText, nullable=False)
package_title = Column(UnicodeText)
package_license_id = Column(UnicodeText)
package_license = Column(UnicodeText)
package_metadata_created = Column(UnicodeText)
package_metadata_modified = Column(UnicodeText)
package_group = Column(Integer, ForeignKey('packagegroup.id'))
package_activity_from = Column(UnicodeText)
package_activity_to = Column(UnicodeText)
package_activity_count = Column(UnicodeText)
package_country = Column(UnicodeText)
package_archive_file = Column(UnicodeText)
package_verified = Column(UnicodeText)
package_filetype = Column(UnicodeText)
package_revision_id = Column(UnicodeText)
active = Column(Boolean)
__table_args__ = (UniqueConstraint('package_name'),)
def __init__(self, man_auto=None, source_url=None):
if man_auto is not None:
self.man_auto = man_auto
if source_url is not None:
self.source_url = source_url
def __repr__(self):
source_url = self.source_url or "None"
return source_url+u", "+str(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## RESULTS
class Result(db.Model):
__tablename__ = 'result'
id = Column(Integer, primary_key=True)
runtime_id = Column(Integer, ForeignKey('runtime.id'), nullable=False)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
organisation_id = Column(Integer, ForeignKey('organisation.id'))
test_id = Column(Integer, ForeignKey('test.id'), nullable=False)
result_data = Column(Integer)
result_identifier = Column(UnicodeText)
result_hierarchy = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
db.Index('result_runpack',
Result.runtime_id, Result.package_id, Result.result_identifier)
db.Index('result_test',
Result.test_id)
class AggregateResult(db.Model):
__tablename__='aggregateresult'
id = Column(Integer,primary_key=True)
runtime_id=Column(Integer, ForeignKey('runtime.id'), nullable=False)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
organisation_id = Column(Integer, ForeignKey('organisation.id'))
aggregateresulttype_id = Column(Integer, ForeignKey('aggregationtype.id'),
nullable=False)
test_id = Column(Integer, ForeignKey('test.id'), nullable=False)
result_hierarchy = Column(Integer)
results_data = Column(Float)
results_num = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
# AggregationType allows for different aggregations
# Particularly used for looking only at current data
class AggregationType(db.Model):
__tablename__ = 'aggregationtype'
id = Column(Integer,primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
test_id = Column(Integer, ForeignKey('test.id'))
test_result = Column(Integer, nullable=False)
active = Column(Integer)
def setup(self,
name,
description,
test_id,
test_result,
active,
id=None):
self.name = name
self.description = description
self.test_id = test_id
self.test_result = test_result
self.active = active
if id is not None:
self.id = id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## TESTS
class Test(db.Model):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText, nullable=False)
test_group = Column(UnicodeText)
file = Column(UnicodeText)
line = Column(Integer)
test_level = Column(Integer, nullable=False)
active = Column(Boolean)
def setup(self,
name,
description,
test_group,
test_level,
active,
id=None):
self.name = name
self.description = description
self.test_group = test_group
self.test_level = test_level
self.active = active
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## CODELISTS
class Codelist(db.Model):
__tablename__ = 'codelist'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
source = Column(UnicodeText)
def setup(self,
name,
description,
id=None):
self.name = name
self.description = description
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class CodelistCode(db.Model):
__tablename__ = 'codelistcode'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
code = Column(UnicodeText, nullable=False)
codelist_id = Column(Integer, ForeignKey('codelist.id'), nullable=False)
source = Column(UnicodeText)
def setup(self,
name,
code,
codelist_id,
id=None):
self.name = name
self.code = code
self.codelist_id = codelist_id
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## INDICATORS
class IndicatorGroup(db.Model):
__tablename__ = 'indicatorgroup'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
def setup(self,
name,
description,
id=None):
self.name = name
self.description = description
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Indicator(db.Model):
__tablename__ = 'indicator'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
longdescription = Column(UnicodeText)
indicatorgroup_id = Column(Integer, ForeignKey('indicatorgroup.id'),
nullable=False)
indicator_type = Column(UnicodeText)
indicator_category_name = Column(UnicodeText)
indicator_subcategory_name = Column(UnicodeText)
def setup(self,
name,
description,
longdescription,
indicatorgroup_id,
indicator_type,
indicator_category_name,
indicator_subcategory_name,
id=None):
self.name = name
self.description = description
self.longdescription = longdescription
self.indicatorgroup_id = indicatorgroup_id
self.indicator_type = indicator_type
self.indicator_category_name = indicator_category_name
self.indicator_subcategory_name = indicator_subcategory_name
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class IndicatorTest(db.Model):
__tablename__ = 'indicatortest'
id = Column(Integer, primary_key=True)
indicator_id = Column(Integer, ForeignKey('indicator.id'), nullable=False)
test_id = Column(Integer, ForeignKey('test.id'), nullable=False)
def setup(self,
indicator_id,
test_id,
id=None):
self.indicator_id = indicator_id
self.test_id = test_id
if id is not None:
self.id = id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class IndicatorInfoType(db.Model):
__tablename__ = 'indicatorinfotype'
id = Column(Integer, primary_key=True)
indicator_id = Column(Integer, ForeignKey('indicator.id'), nullable=False)
infotype_id = Column(Integer, ForeignKey('info_type.id'), nullable=False)
def setup(self,
indicator_id,
infotype_id,
id=None):
self.indicator_id = indicator_id
self.infotype_id = infotype_id
if id is not None:
self.id = id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class OrganisationCondition(db.Model):
__tablename__ = 'organisationcondition'
id = Column(Integer, primary_key=True)
organisation_id = Column(Integer, ForeignKey('organisation.id'),
nullable=False)
test_id = Column(Integer, ForeignKey('test.id'), nullable=False)
operation = Column(Integer) # show (1) or don't show (0) result
condition = Column(UnicodeText) # activity level, hierarchy 2
condition_value = Column(UnicodeText) # True, 2, etc.
description = Column(UnicodeText)
file = Column(UnicodeText)
line = Column(Integer)
active = Column(Boolean)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class OrganisationConditionFeedback(db.Model):
__tablename__ ='organisationconditionfeedback'
id = Column(Integer, primary_key=True)
organisation_id = Column(UnicodeText, nullable=False)
uses = Column(UnicodeText)
element = Column(UnicodeText)
where = Column(UnicodeText)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## ORGANISATIONS; RELATIONS WITH PACKAGES
class Organisation(db.Model):
__tablename__ = 'organisation'
id = Column(Integer, primary_key=True)
organisation_name = Column(UnicodeText, nullable=False)
organisation_code = Column(UnicodeText, nullable=False)
organisation_total_spend = Column(Float(precision=2))
organisation_total_spend_source = Column(UnicodeText)
organisation_currency = Column(UnicodeText)
organisation_currency_conversion = Column(Float(precision=4))
organisation_currency_conversion_source = Column(UnicodeText)
organisation_largest_recipient = Column(UnicodeText)
organisation_largest_recipient_source = Column(UnicodeText)
frequency = Column(UnicodeText)
frequency_comment = Column(UnicodeText)
__table_args__ = (UniqueConstraint('organisation_name'),
UniqueConstraint('organisation_code'))
# organisation_code is also used to communicate
# with implementation schedules
def setup(self,
organisation_name,
organisation_code,
organisation_total_spend=None,
organisation_total_spend_source=None,
organisation_currency=None,
organisation_currency_conversion=None,
organisation_currency_conversion_source=None,
organisation_largest_recipient=None,
organisation_largest_recipient_source=None,
id=None):
self.organisation_name = organisation_name
self.organisation_code = organisation_code
self.organisation_total_spend = organisation_total_spend,
self.organisation_currency = organisation_currency,
self.organisation_currency_conversion = organisation_currency_conversion,
self.organisation_currency_conversion_source = organisation_currency_conversion_source,
self.organisation_largest_recipient = organisation_largest_recipient
if id is not None:
self.id = id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class OrganisationPackage(db.Model):
__tablename__ = 'organisationpackage'
id = Column(Integer, primary_key=True)
organisation_id = Column(Integer, ForeignKey('organisation.id'),
nullable=False)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
condition = Column(UnicodeText)
__table_args__ = (UniqueConstraint('organisation_id', 'package_id', name='_organisation_package_uc'),
)
def setup(self,
organisation_id,
package_id,
condition=None,
id=None):
self.organisation_id = organisation_id
self.package_id = package_id
self.condition = condition
if id is not None:
self.id = id
class OrganisationPackageGroup(db.Model):
__tablename__ = 'organisationpackagegroup'
id = Column(Integer, primary_key=True)
organisation_id = Column(Integer, ForeignKey('organisation.id'),
nullable=False)
packagegroup_id = Column(Integer, ForeignKey('packagegroup.id'),
nullable=False)
condition = Column(UnicodeText)
__table_args__ = (UniqueConstraint('organisation_id', 'packagegroup_id'),)
def setup(self,
organisation_id,
packagegroup_id,
condition=None,
id=None):
self.organisation_id = organisation_id
self.packagegroup_id = packagegroup_id
self.condition = condition
if id is not None:
self.id = id
## INFORESULTS
# TODO: IMPLEMENT
# ==> total amount of disbursements in this package
# e.g. 1 = total disbursements
class InfoResult(db.Model):
__tablename__ = 'info_result'
id = Column(Integer, primary_key=True)
runtime_id = Column(Integer, ForeignKey('runtime.id'), nullable=False)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
info_id = Column(Integer, ForeignKey('info_type.id'), nullable=False)
organisation_id = Column(Integer, ForeignKey('organisation.id'))
result_data = Column(Float)
class InfoType(db.Model):
__tablename__ = 'info_type'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
level = Column(Integer, nullable=False)
def setup(self,
name,
level,
description=None,
id=None):
self.name = name
self.level = level
self.description = description
if id is not None:
self.id = id
## USERS
class User(db.Model):
__tablename__ = 'user'
id = Column(Integer, primary_key=True)
username = Column(UnicodeText)
first_name = Column(UnicodeText)
last_name = Column(UnicodeText)
email_address = Column(UnicodeText)
reset_password_key = Column(UnicodeText)
pw_hash = db.Column(String(255))
def __init__(self, username, password, first_name, last_name, email_address):
self.username = username
self.pw_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.pw_hash, password)
def __repr__(self):
return self.username, self.id, self.password2
class UserOption(db.Model):
__tablename__ = 'useroption'
id = Column(Integer, primary_key=True)
user_id = Column(Integer, ForeignKey('user.id'))
option_id = Column(Integer, ForeignKey('option.id'))
# basically two different values are permitted
# given different names for clarity
useroption_value = Column(UnicodeText)
useroption_qualifier = Column(UnicodeText)
# Option: e.g. permission_view, survey_access
class Option(db.Model):
__tablename__ = 'option'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText)
qualifier_required = Column(Integer)
## ORGANISATION SURVEYS
class OrganisationSurvey(db.Model):
__tablename__ = 'organisationsurvey'
id = Column(Integer,primary_key=True)
currentworkflow_id = Column(Integer, ForeignKey('workflow.id'))
currentworkflow_deadline = Column(DateTime)
organisation_id = Column(Integer, ForeignKey('organisation.id'))
def setup(self,
organisation_id,
currentworkflow_id,
currentworkflow_deadline=None,
id=None):
self.organisation_id = organisation_id
self.currentworkflow_id = currentworkflow_id
self.currentworkflow_deadline = currentworkflow_deadline
if id is not None:
self.id = id
class OrganisationSurveyData(db.Model):
__tablename__ = 'organisationsurveydata'
id = Column(Integer,primary_key=True)
organisationsurvey_id = Column(Integer, ForeignKey('organisationsurvey.id'))
indicator_id = Column(Integer, ForeignKey('indicator.id'))
workflow_id = Column(Integer, ForeignKey('workflow.id'))
published_status = Column(Integer, ForeignKey('publishedstatus.id'))
published_source = Column(UnicodeText)
published_comment = Column(UnicodeText)
published_accepted = Column(Integer)
def setup(self,
organisationsurvey_id,
indicator_id,
workflow_id=None,
published_status=None,
published_source=None,
published_comment=None,
published_accepted=None,
id=None):
self.organisationsurvey_id = organisationsurvey_id
self.workflow_id = workflow_id
self.indicator_id = indicator_id
self.published_status = published_status
self.published_source = published_source
self.published_comment = published_comment
self.published_accepted = published_accepted
if id is not None:
self.id = id
class PublishedStatus(db.Model):
__tablename__ = 'publishedstatus'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText)
publishedstatus_class = Column(UnicodeText)
def setup(self,
name,
publishedstatus_class,
id=None):
self.name = name
self.publishedstatus_class = publishedstatus_class
if id is not None:
self.id = id
class Workflow(db.Model):
__tablename__='workflow'
id = Column(Integer,primary_key=True)
name = Column(UnicodeText)
title = Column(UnicodeText)
leadsto = Column(Integer, ForeignKey('workflow.id'))
workflow_type = Column(Integer, ForeignKey('workflowtype.id'))
duration = Column(Integer)
def setup(self,
name,
title,
leadsto,
workflow_type=None,
duration=None,
id=None):
self.name = name
self.title = title
self.leadsto = leadsto
self.workflow_type = workflow_type
self.duration = duration
if id is not None:
self.id = id
# WorkflowType: define what sort of workflow this should be.
# Will initially be hardcoded but this should make it easier
# to expand and define later.
class WorkflowType(db.Model):
__tablename__='workflowtype'
id = Column(Integer,primary_key=True)
name = Column(UnicodeText)
def setup(self,
name,
id=None):
self.name = name
if id is not None:
self.id = id
class WorkflowNotification(db.Model):
__tablename__='workflownotifications'
id = Column(Integer, primary_key=True)
workflow_from = Column(Integer, ForeignKey('workflow.id'))
workflow_to = Column(Integer, ForeignKey('workflow.id'))
workflow_notice = Column(UnicodeText)
Update models table, remove unused tables
# IATI Data Quality, tools for Data QA on IATI-formatted publications
# by Mark Brough, Martin Keegan, Ben Webb and Jennifer Smith
#
# Copyright (C) 2013 Publish What You Fund
#
# This programme is free software; you may redistribute and/or modify
# it under the terms of the GNU Affero General Public License v3.0
from sqlalchemy import *
from iatidq import db
from datetime import datetime
from werkzeug.security import generate_password_hash, check_password_hash
## TEST RUNTIME-SPECIFIC DATA
class PackageStatus(db.Model):
__tablename__ = 'packagestatus'
id = Column(Integer, primary_key=True)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
status = Column(Integer, nullable=False)
runtime_datetime = Column(DateTime)
def __init__(self):
self.runtime_datetime = datetime.utcnow()
def __repr__(self):
return unicode(self.runtime_datetime)+u' '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Runtime(db.Model):
__tablename__ = 'runtime'
id = Column(Integer, primary_key=True)
runtime_datetime = Column(DateTime, nullable=False)
def __init__(self):
self.runtime_datetime = datetime.utcnow()
def __repr__(self):
return unicode(self.runtime_datetime)+u' '+unicode(self.id)
## IATI REGISTRY PACKAGEGROUPS AND PACKAGES
class PackageGroup(db.Model):
__tablename__ = 'packagegroup'
id = Column(Integer, primary_key=True)
man_auto = Column(UnicodeText)
name = Column(UnicodeText, nullable=False)
ckan_id = Column(UnicodeText)
revision_id = Column(UnicodeText)
title = Column(UnicodeText)
created_date = Column(UnicodeText)
state = Column(UnicodeText)
publisher_iati_id = Column(UnicodeText)
publisher_segmentation = Column(UnicodeText)
publisher_type = Column(UnicodeText)
publisher_ui = Column(UnicodeText)
publisher_organization_type = Column(UnicodeText)
publisher_frequency = Column(UnicodeText)
publisher_thresholds = Column(UnicodeText)
publisher_units = Column(UnicodeText)
publisher_contact = Column(UnicodeText)
publisher_agencies = Column(UnicodeText)
publisher_field_exclusions = Column(UnicodeText)
publisher_description = Column(UnicodeText)
publisher_record_exclusions = Column(UnicodeText)
publisher_timeliness = Column(UnicodeText)
license_id = Column(UnicodeText)
publisher_country = Column(UnicodeText)
publisher_refs = Column(UnicodeText)
publisher_constraints = Column(UnicodeText)
publisher_data_quality = Column(UnicodeText)
def __init__(self, man_auto=None, name=None):
if man_auto is not None:
self.man_auto = man_auto
if name is not None:
self.name = name
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Package(db.Model):
__tablename__ = 'package'
id = Column(Integer, primary_key=True)
man_auto = Column(UnicodeText)
source_url = Column(UnicodeText)
package_ckan_id = Column(UnicodeText)
package_name = Column(UnicodeText, nullable=False)
package_title = Column(UnicodeText)
package_license_id = Column(UnicodeText)
package_license = Column(UnicodeText)
package_metadata_created = Column(UnicodeText)
package_metadata_modified = Column(UnicodeText)
package_group = Column(Integer, ForeignKey('packagegroup.id'))
package_activity_from = Column(UnicodeText)
package_activity_to = Column(UnicodeText)
package_activity_count = Column(UnicodeText)
package_country = Column(UnicodeText)
package_archive_file = Column(UnicodeText)
package_verified = Column(UnicodeText)
package_filetype = Column(UnicodeText)
package_revision_id = Column(UnicodeText)
active = Column(Boolean)
__table_args__ = (UniqueConstraint('package_name'),)
def __init__(self, man_auto=None, source_url=None):
if man_auto is not None:
self.man_auto = man_auto
if source_url is not None:
self.source_url = source_url
def __repr__(self):
source_url = self.source_url or "None"
return source_url+u", "+str(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## RESULTS
class Result(db.Model):
__tablename__ = 'result'
id = Column(Integer, primary_key=True)
runtime_id = Column(Integer, ForeignKey('runtime.id'), nullable=False)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
organisation_id = Column(Integer, ForeignKey('organisation.id'))
test_id = Column(Integer, ForeignKey('test.id'), nullable=False)
result_data = Column(Integer)
result_identifier = Column(UnicodeText)
result_hierarchy = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
db.Index('result_runpack',
Result.runtime_id, Result.package_id, Result.result_identifier)
db.Index('result_test',
Result.test_id)
class AggregateResult(db.Model):
__tablename__='aggregateresult'
id = Column(Integer,primary_key=True)
runtime_id=Column(Integer, ForeignKey('runtime.id'), nullable=False)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
organisation_id = Column(Integer, ForeignKey('organisation.id'))
aggregateresulttype_id = Column(Integer, ForeignKey('aggregationtype.id'),
nullable=False)
test_id = Column(Integer, ForeignKey('test.id'), nullable=False)
result_hierarchy = Column(Integer)
results_data = Column(Float)
results_num = Column(Integer)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
# AggregationType allows for different aggregations
# Particularly used for looking only at current data
class AggregationType(db.Model):
__tablename__ = 'aggregationtype'
id = Column(Integer,primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
test_id = Column(Integer, ForeignKey('test.id'))
test_result = Column(Integer, nullable=False)
active = Column(Integer)
def setup(self,
name,
description,
test_id,
test_result,
active,
id=None):
self.name = name
self.description = description
self.test_id = test_id
self.test_result = test_result
self.active = active
if id is not None:
self.id = id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## TESTS
class Test(db.Model):
__tablename__ = 'test'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText, nullable=False)
test_group = Column(UnicodeText)
file = Column(UnicodeText)
line = Column(Integer)
test_level = Column(Integer, nullable=False)
active = Column(Boolean)
def setup(self,
name,
description,
test_group,
test_level,
active,
id=None):
self.name = name
self.description = description
self.test_group = test_group
self.test_level = test_level
self.active = active
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## CODELISTS
class Codelist(db.Model):
__tablename__ = 'codelist'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
source = Column(UnicodeText)
def setup(self,
name,
description,
id=None):
self.name = name
self.description = description
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class CodelistCode(db.Model):
__tablename__ = 'codelistcode'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
code = Column(UnicodeText, nullable=False)
codelist_id = Column(Integer, ForeignKey('codelist.id'), nullable=False)
source = Column(UnicodeText)
def setup(self,
name,
code,
codelist_id,
id=None):
self.name = name
self.code = code
self.codelist_id = codelist_id
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## INDICATORS
class IndicatorGroup(db.Model):
__tablename__ = 'indicatorgroup'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
def setup(self,
name,
description,
id=None):
self.name = name
self.description = description
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class Indicator(db.Model):
__tablename__ = 'indicator'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
longdescription = Column(UnicodeText)
indicatorgroup_id = Column(Integer, ForeignKey('indicatorgroup.id'),
nullable=False)
indicator_type = Column(UnicodeText)
indicator_category_name = Column(UnicodeText)
indicator_subcategory_name = Column(UnicodeText)
def setup(self,
name,
description,
longdescription,
indicatorgroup_id,
indicator_type,
indicator_category_name,
indicator_subcategory_name,
id=None):
self.name = name
self.description = description
self.longdescription = longdescription
self.indicatorgroup_id = indicatorgroup_id
self.indicator_type = indicator_type
self.indicator_category_name = indicator_category_name
self.indicator_subcategory_name = indicator_subcategory_name
if id is not None:
self.id = id
def __repr__(self):
return self.name+u', '+unicode(self.id)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class IndicatorTest(db.Model):
__tablename__ = 'indicatortest'
id = Column(Integer, primary_key=True)
indicator_id = Column(Integer, ForeignKey('indicator.id'), nullable=False)
test_id = Column(Integer, ForeignKey('test.id'), nullable=False)
def setup(self,
indicator_id,
test_id,
id=None):
self.indicator_id = indicator_id
self.test_id = test_id
if id is not None:
self.id = id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class IndicatorInfoType(db.Model):
__tablename__ = 'indicatorinfotype'
id = Column(Integer, primary_key=True)
indicator_id = Column(Integer, ForeignKey('indicator.id'), nullable=False)
infotype_id = Column(Integer, ForeignKey('info_type.id'), nullable=False)
def setup(self,
indicator_id,
infotype_id,
id=None):
self.indicator_id = indicator_id
self.infotype_id = infotype_id
if id is not None:
self.id = id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class OrganisationCondition(db.Model):
__tablename__ = 'organisationcondition'
id = Column(Integer, primary_key=True)
organisation_id = Column(Integer, ForeignKey('organisation.id'),
nullable=False)
test_id = Column(Integer, ForeignKey('test.id'), nullable=False)
operation = Column(Integer) # show (1) or don't show (0) result
condition = Column(UnicodeText) # activity level, hierarchy 2
condition_value = Column(UnicodeText) # True, 2, etc.
description = Column(UnicodeText)
file = Column(UnicodeText)
line = Column(Integer)
active = Column(Boolean)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class OrganisationConditionFeedback(db.Model):
__tablename__ ='organisationconditionfeedback'
id = Column(Integer, primary_key=True)
organisation_id = Column(UnicodeText, nullable=False)
uses = Column(UnicodeText)
element = Column(UnicodeText)
where = Column(UnicodeText)
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
## ORGANISATIONS; RELATIONS WITH PACKAGES
class Organisation(db.Model):
__tablename__ = 'organisation'
id = Column(Integer, primary_key=True)
organisation_name = Column(UnicodeText, nullable=False)
organisation_code = Column(UnicodeText, nullable=False)
organisation_total_spend = Column(Float(precision=2))
organisation_total_spend_source = Column(UnicodeText)
organisation_currency = Column(UnicodeText)
organisation_currency_conversion = Column(Float(precision=4))
organisation_currency_conversion_source = Column(UnicodeText)
organisation_largest_recipient = Column(UnicodeText)
organisation_largest_recipient_source = Column(UnicodeText)
frequency = Column(UnicodeText)
frequency_comment = Column(UnicodeText)
__table_args__ = (UniqueConstraint('organisation_name'),
UniqueConstraint('organisation_code'))
# organisation_code is also used to communicate
# with implementation schedules
def setup(self,
organisation_name,
organisation_code,
organisation_total_spend=None,
organisation_total_spend_source=None,
organisation_currency=None,
organisation_currency_conversion=None,
organisation_currency_conversion_source=None,
organisation_largest_recipient=None,
organisation_largest_recipient_source=None,
id=None):
self.organisation_name = organisation_name
self.organisation_code = organisation_code
self.organisation_total_spend = organisation_total_spend,
self.organisation_currency = organisation_currency,
self.organisation_currency_conversion = organisation_currency_conversion,
self.organisation_currency_conversion_source = organisation_currency_conversion_source,
self.organisation_largest_recipient = organisation_largest_recipient
if id is not None:
self.id = id
def as_dict(self):
return {c.name: getattr(self, c.name) for c in self.__table__.columns}
class OrganisationPackage(db.Model):
__tablename__ = 'organisationpackage'
id = Column(Integer, primary_key=True)
organisation_id = Column(Integer, ForeignKey('organisation.id'),
nullable=False)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
condition = Column(UnicodeText)
__table_args__ = (UniqueConstraint('organisation_id', 'package_id', name='_organisation_package_uc'),
)
def setup(self,
organisation_id,
package_id,
condition=None,
id=None):
self.organisation_id = organisation_id
self.package_id = package_id
self.condition = condition
if id is not None:
self.id = id
class OrganisationPackageGroup(db.Model):
__tablename__ = 'organisationpackagegroup'
id = Column(Integer, primary_key=True)
organisation_id = Column(Integer, ForeignKey('organisation.id'),
nullable=False)
packagegroup_id = Column(Integer, ForeignKey('packagegroup.id'),
nullable=False)
condition = Column(UnicodeText)
__table_args__ = (UniqueConstraint('organisation_id', 'packagegroup_id'),)
def setup(self,
organisation_id,
packagegroup_id,
condition=None,
id=None):
self.organisation_id = organisation_id
self.packagegroup_id = packagegroup_id
self.condition = condition
if id is not None:
self.id = id
## INFORESULTS
# TODO: IMPLEMENT
# ==> total amount of disbursements in this package
# e.g. 1 = total disbursements
class InfoResult(db.Model):
__tablename__ = 'info_result'
id = Column(Integer, primary_key=True)
runtime_id = Column(Integer, ForeignKey('runtime.id'), nullable=False)
package_id = Column(Integer, ForeignKey('package.id'), nullable=False)
info_id = Column(Integer, ForeignKey('info_type.id'), nullable=False)
organisation_id = Column(Integer, ForeignKey('organisation.id'))
result_data = Column(Float)
class InfoType(db.Model):
__tablename__ = 'info_type'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText, nullable=False)
description = Column(UnicodeText)
level = Column(Integer, nullable=False)
def setup(self,
name,
level,
description=None,
id=None):
self.name = name
self.level = level
self.description = description
if id is not None:
self.id = id
## USERS
class User(db.Model):
__tablename__ = 'dquser'
id = Column(Integer, primary_key=True)
username = Column(UnicodeText)
name = Column(UnicodeText)
email_address = Column(UnicodeText)
reset_password_key = Column(UnicodeText)
pw_hash = db.Column(String(255))
def setup(self,
username,
password,
name,
email_address=None,
id=None):
self.username = username
self.pw_hash = generate_password_hash(password)
self.name = name
self.email_address = email_address
if id is not None:
self.id = id
def check_password(self, password):
return check_password_hash(self.pw_hash, password)
## ORGANISATION SURVEYS
class OrganisationSurvey(db.Model):
__tablename__ = 'organisationsurvey'
id = Column(Integer,primary_key=True)
currentworkflow_id = Column(Integer, ForeignKey('workflow.id'))
currentworkflow_deadline = Column(DateTime)
organisation_id = Column(Integer, ForeignKey('organisation.id'))
def setup(self,
organisation_id,
currentworkflow_id,
currentworkflow_deadline=None,
id=None):
self.organisation_id = organisation_id
self.currentworkflow_id = currentworkflow_id
self.currentworkflow_deadline = currentworkflow_deadline
if id is not None:
self.id = id
class OrganisationSurveyData(db.Model):
__tablename__ = 'organisationsurveydata'
id = Column(Integer,primary_key=True)
organisationsurvey_id = Column(Integer, ForeignKey('organisationsurvey.id'))
indicator_id = Column(Integer, ForeignKey('indicator.id'))
workflow_id = Column(Integer, ForeignKey('workflow.id'))
published_status = Column(Integer, ForeignKey('publishedstatus.id'))
published_source = Column(UnicodeText)
published_comment = Column(UnicodeText)
published_accepted = Column(Integer)
def setup(self,
organisationsurvey_id,
indicator_id,
workflow_id=None,
published_status=None,
published_source=None,
published_comment=None,
published_accepted=None,
id=None):
self.organisationsurvey_id = organisationsurvey_id
self.workflow_id = workflow_id
self.indicator_id = indicator_id
self.published_status = published_status
self.published_source = published_source
self.published_comment = published_comment
self.published_accepted = published_accepted
if id is not None:
self.id = id
class PublishedStatus(db.Model):
__tablename__ = 'publishedstatus'
id = Column(Integer, primary_key=True)
name = Column(UnicodeText)
publishedstatus_class = Column(UnicodeText)
def setup(self,
name,
publishedstatus_class,
id=None):
self.name = name
self.publishedstatus_class = publishedstatus_class
if id is not None:
self.id = id
class Workflow(db.Model):
__tablename__='workflow'
id = Column(Integer,primary_key=True)
name = Column(UnicodeText)
title = Column(UnicodeText)
leadsto = Column(Integer, ForeignKey('workflow.id'))
workflow_type = Column(Integer, ForeignKey('workflowtype.id'))
duration = Column(Integer)
def setup(self,
name,
title,
leadsto,
workflow_type=None,
duration=None,
id=None):
self.name = name
self.title = title
self.leadsto = leadsto
self.workflow_type = workflow_type
self.duration = duration
if id is not None:
self.id = id
# WorkflowType: define what sort of workflow this should be.
# Will initially be hardcoded but this should make it easier
# to expand and define later.
class WorkflowType(db.Model):
__tablename__='workflowtype'
id = Column(Integer,primary_key=True)
name = Column(UnicodeText)
def setup(self,
name,
id=None):
self.name = name
if id is not None:
self.id = id
class WorkflowNotification(db.Model):
__tablename__='workflownotifications'
id = Column(Integer, primary_key=True)
workflow_from = Column(Integer, ForeignKey('workflow.id'))
workflow_to = Column(Integer, ForeignKey('workflow.id'))
workflow_notice = Column(UnicodeText)
|
import logging
from time import sleep
from nxt.locator import find_one_brick, BrickNotFoundError
from nxt.motcont import MotCont
from nxt.motor import Motor, PORT_A, PORT_B, BlockedException, PORT_C
class BrickController(object):
motCont = None
brick = None
brick_found = False
brick_searching = False
steering_motor = None
actual_abs_degs = 0
main_motors = None
last_commands = {
'steering': 0,
'throttle': 0,
'reverse': 0,
}
STEERING_KEY = 'steering'
THROTTLE_KEY = 'throttle'
REVERSE_KEY = 'reverse'
FULL_SIDE_STEER = 90
DEGREE_TIME = (1/2.95)*(1/360.0)*1000
@classmethod
def init_brick(cls):
if cls.brick_found:
return cls.brick_found
if cls.brick_searching:
logging.warning('Aborting request for brick searching.')
return cls.brick_found
logging.info('Starting new brick brick searching.')
cls.brick_searching = True
try:
# raise BrickNotFoundError
cls.brick = find_one_brick(debug=True)
except BrickNotFoundError:
logging.warning('Brick not found.')
cls.brick_searching, cls.brick_found = False, False
return cls.brick_found
cls.brick_searching, cls.brick_found = False, True
logging.info('Brick successfully found.')
cls.init_motors()
return cls.brick_found
@classmethod
def init_motors(cls):
cls.steering_motor = Motor(cls.brick, PORT_A)
cls.main_motors = (Motor(cls.brick, PORT_B), Motor(cls.brick, PORT_C))
cls.motCont = MotCont(cls.brick)
cls.motCont.start()
@classmethod
def process(cls, **commands):
if not cls.brick_found:
return
if commands.get(cls.STEERING_KEY, 0) != cls.last_commands.get(cls.STEERING_KEY, 0):
abs_degs = int(commands.get(cls.STEERING_KEY, 0) * cls.FULL_SIDE_STEER)
cls.set_steering(abs_degs)
if commands.get(cls.THROTTLE_KEY, 0) != cls.last_commands.get(cls.THROTTLE_KEY, 0) or \
commands.get(cls.REVERSE_KEY, 0) != cls.last_commands.get(cls.REVERSE_KEY, 0):
cls.set_throttle(commands.get(cls.THROTTLE_KEY, 0), commands.get(cls.REVERSE_KEY, 0))
cls.last_commands = commands
@classmethod
def set_steering(cls, abs_degs):
if False:
sleep(0.1)
cls.motCont.move_to(PORT_A, 100 if abs_degs > 0 else -100, abs(abs_degs), speedreg=10)
else:
if cls.actual_abs_degs == abs_degs:
logging.warn('OK')
return
elif abs_degs > cls.actual_abs_degs:
tacho = 100
degs = abs_degs - cls.actual_abs_degs
elif abs_degs < cls.actual_abs_degs:
tacho = -100
degs = abs(abs_degs - cls.actual_abs_degs)
logging.info('Steer {} degs to {}.'.format(degs, ('left', 'right')[tacho > 0]))
try:
# cls.steering_motor.turn(tacho, degs, brake=False)
# cls.steering_motor.weak_turn(tacho, degs)
cls.steering_motor.run(tacho)
sleep(degs*cls.DEGREE_TIME)
cls.steering_motor.brake()
pass
except BlockedException:
logging.warning('Steering motor blocked!')
# cls.actual_abs_degs = cls.steering_motor.get_tacho().block_tacho_count
cls.actual_abs_degs = abs_degs
@classmethod
def set_throttle(cls, throttle, reverse):
if not reverse in (0, 1):
logging.warn('Unknown reverse command, setting to default!')
reverse = 0
if throttle > 100 or throttle < 0:
logging.warn('Unknown throttle command, setting to 0!')
throttle = 0.0
if throttle <= 0:
cls.main_motors[1].idle()
cls.main_motors[0].idle()
return
# <0,100> trans to <(0.7*127),127>
motor_throttle = int((0.5 + 0.5 * (throttle / 100.0)) * 127)
if reverse:
cls.main_motors[0].run(-motor_throttle)
cls.main_motors[1].run(motor_throttle)
else:
cls.main_motors[0].run(motor_throttle)
cls.main_motors[1].run(-motor_throttle)
@classmethod
def get_state(cls):
return {
'brick_found': cls.brick_found,
'brick_searching': cls.brick_searching,
'steering_motor': cls.steering_motor._get_state().to_list() if cls.steering_motor else []
}
def __del__(self):
print('Brick deleted')
try:
BrickController.motCont.stop()
BrickController.brick.sock.close()
except:
pass
server: fuck*ing sleep in py
import logging
from time import sleep
from nxt.locator import find_one_brick, BrickNotFoundError
from nxt.motcont import MotCont
from nxt.motor import Motor, PORT_A, PORT_B, BlockedException, PORT_C
class BrickController(object):
motCont = None
brick = None
brick_found = False
brick_searching = False
steering_motor = None
actual_abs_degs = 0
main_motors = None
last_commands = {
'steering': 0,
'throttle': 0,
'reverse': 0,
}
STEERING_KEY = 'steering'
THROTTLE_KEY = 'throttle'
REVERSE_KEY = 'reverse'
FULL_SIDE_STEER = 90
DEGREE_TIME = (1/2.95)*(1/360.0)
@classmethod
def init_brick(cls):
if cls.brick_found:
return cls.brick_found
if cls.brick_searching:
logging.warning('Aborting request for brick searching.')
return cls.brick_found
logging.info('Starting new brick brick searching.')
cls.brick_searching = True
try:
# raise BrickNotFoundError
cls.brick = find_one_brick(debug=True)
except BrickNotFoundError:
logging.warning('Brick not found.')
cls.brick_searching, cls.brick_found = False, False
return cls.brick_found
cls.brick_searching, cls.brick_found = False, True
logging.info('Brick successfully found.')
cls.init_motors()
return cls.brick_found
@classmethod
def init_motors(cls):
cls.steering_motor = Motor(cls.brick, PORT_A)
cls.main_motors = (Motor(cls.brick, PORT_B), Motor(cls.brick, PORT_C))
cls.motCont = MotCont(cls.brick)
cls.motCont.start()
@classmethod
def process(cls, **commands):
if not cls.brick_found:
return
if commands.get(cls.STEERING_KEY, 0) != cls.last_commands.get(cls.STEERING_KEY, 0):
abs_degs = int(commands.get(cls.STEERING_KEY, 0) * cls.FULL_SIDE_STEER)
cls.set_steering(abs_degs)
if commands.get(cls.THROTTLE_KEY, 0) != cls.last_commands.get(cls.THROTTLE_KEY, 0) or \
commands.get(cls.REVERSE_KEY, 0) != cls.last_commands.get(cls.REVERSE_KEY, 0):
cls.set_throttle(commands.get(cls.THROTTLE_KEY, 0), commands.get(cls.REVERSE_KEY, 0))
cls.last_commands = commands
@classmethod
def set_steering(cls, abs_degs):
if False:
sleep(0.1)
cls.motCont.move_to(PORT_A, 100 if abs_degs > 0 else -100, abs(abs_degs), speedreg=10)
else:
if cls.actual_abs_degs == abs_degs:
logging.warn('OK')
return
elif abs_degs > cls.actual_abs_degs:
tacho = 100
degs = abs_degs - cls.actual_abs_degs
elif abs_degs < cls.actual_abs_degs:
tacho = -100
degs = abs(abs_degs - cls.actual_abs_degs)
logging.info('Steer {} degs to {}.'.format(degs, ('left', 'right')[tacho > 0]))
try:
# cls.steering_motor.turn(tacho, degs, brake=False)
# cls.steering_motor.weak_turn(tacho, degs)
cls.steering_motor.run(tacho)
sleep(degs*cls.DEGREE_TIME)
cls.steering_motor.brake()
pass
except BlockedException:
logging.warning('Steering motor blocked!')
# cls.actual_abs_degs = cls.steering_motor.get_tacho().block_tacho_count
cls.actual_abs_degs = abs_degs
@classmethod
def set_throttle(cls, throttle, reverse):
if not reverse in (0, 1):
logging.warn('Unknown reverse command, setting to default!')
reverse = 0
if throttle > 100 or throttle < 0:
logging.warn('Unknown throttle command, setting to 0!')
throttle = 0.0
if throttle <= 0:
cls.main_motors[1].idle()
cls.main_motors[0].idle()
return
# <0,100> trans to <(0.7*127),127>
motor_throttle = int((0.5 + 0.5 * (throttle / 100.0)) * 127)
if reverse:
cls.main_motors[0].run(-motor_throttle)
cls.main_motors[1].run(motor_throttle)
else:
cls.main_motors[0].run(motor_throttle)
cls.main_motors[1].run(-motor_throttle)
@classmethod
def get_state(cls):
return {
'brick_found': cls.brick_found,
'brick_searching': cls.brick_searching,
'steering_motor': cls.steering_motor._get_state().to_list() if cls.steering_motor else []
}
def __del__(self):
print('Brick deleted')
try:
BrickController.motCont.stop()
BrickController.brick.sock.close()
except:
pass |
AL-1367 bump version number
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ***** BEGIN LICENSE BLOCK *****
# Copyright (C) 2012 Hayaki Saito <user@zuse.jp>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ***** END LICENSE BLOCK *****
import tff
from mouse import IFocusListener, IMouseListener, MouseDecoder
from interface import IInnerFrame, IInnerFrameListener
from output import Canossa
from screen import Screen
_HITTEST_NONE = 0
_HITTEST_CLIENTAREA = 1
_HITTEST_TITLEBAR = 2
_HITTEST_FRAME_LEFT = 3
_HITTEST_FRAME_TOP = 4
_HITTEST_FRAME_RIGHT = 5
_HITTEST_FRAME_BOTTOM = 6
_HITTEST_FRAME_TOPLEFT = 7
_HITTEST_FRAME_TOPRIGHT = 8
_HITTEST_FRAME_BOTTOMLEFT = 9
_HITTEST_FRAME_BOTTOMRIGHT = 10
_HITTEST_BUTTON_CLOSE = 11
_TITLESTYLE_INACTIVE = '\x1b[30;47m'
_TITLESTYLE_ACTIVE = '\x1b[30;42m'
_TITLESTYLE_HOVER = '\x1b[30;46m'
_TITLESTYLE_DRAG = '\x1b[30;43m'
_DRAGTYPE_NONE = 0
_DRAGTYPE_TITLEBAR = 1
_DRAGTYPE_BOTTOMRIGHT = 2
_DRAGTYPE_BOTTOMLEFT = 3
_DRAGTYPE_BOTTOM = 4
_DRAGTYPE_LEFT = 5
_DRAGTYPE_RIGHT = 6
_HOVERTYPE_NONE = 0
_HOVERTYPE_TITLEBAR = 1
_HOVERTYPE_BOTTOMRIGHT = 2
_HOVERTYPE_BOTTOMLEFT = 3
_HOVERTYPE_BOTTOM = 4
_HOVERTYPE_LEFT = 5
_HOVERTYPE_RIGHT = 6
_HOVERTYPE_BUTTON_CLOSE = 7
class IFocusListenerImpl(IFocusListener):
def __init__(self):
pass
""" IFocusListener implementation """
def onfocusin(self):
self._titlestyle = _TITLESTYLE_ACTIVE
def onfocusout(self):
self._titlestyle = _TITLESTYLE_INACTIVE
class IMouseListenerImpl(IMouseListener):
def __init__(self):
self._lasthittest = None
self._dragtype = _DRAGTYPE_NONE
self._hovertype = _HOVERTYPE_NONE
self._dragpos = None
self._titlestyle = _TITLESTYLE_INACTIVE
""" IMouseListener implementation """
def onmousedown(self, context, x, y):
hittest = self._hittest(x, y)
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
self._window.blur()
self._session.blur_subprocess(self._tty)
return False
if not self._window.is_active():
self._window.focus()
self._session.focus_subprocess(self._tty)
self._titlestyle = _TITLESTYLE_ACTIVE
return True
if hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u'\x1b[M%c%c%c' % (0 + 32, x, y))
return True
def onmouseup(self, context, x, y):
hittest = self._hittest(x, y)
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
return False
if not self._window.is_active():
return True
if hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u'\x1b[M%c%c%c' % (3 + 32, x, y))
return True
def onclick(self, context, x, y):
hittest = self._hittest(x, y)
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
return False
if hittest == _HITTEST_BUTTON_CLOSE:
self._window.close()
self._listener.onclose(self, context)
self.close()
return True
return True
def ondoubleclick(self, context, x, y):
hittest = self._lasthittest
if hittest == _HITTEST_NONE:
return False
return True
def onmousehover(self, context, x, y):
hittest = self._hittest(x, y)
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
self._hovertype = _HOVERTYPE_NONE
return False
if hittest == _HITTEST_CLIENTAREA:
if self._window.is_active():
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u"\x1b[M%c%c%c" % (32 + 32, x, y))
self._titlestyle = _TITLESTYLE_ACTIVE
self._hovertype = _HOVERTYPE_NONE
elif hittest == _HITTEST_TITLEBAR:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_TITLEBAR
elif hittest == _HITTEST_FRAME_BOTTOMLEFT:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_BOTTOMLEFT
elif hittest == _HITTEST_FRAME_BOTTOMRIGHT:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_BOTTOMRIGHT
elif hittest == _HITTEST_FRAME_BOTTOM:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_BOTTOM
elif hittest == _HITTEST_FRAME_LEFT:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_LEFT
elif hittest == _HITTEST_FRAME_RIGHT:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_RIGHT
elif hittest == _HITTEST_BUTTON_CLOSE:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_BUTTON_CLOSE
else:
self._titlestyle = _TITLESTYLE_ACTIVE
self._hovertype = _HOVERTYPE_NONE
return True
""" scroll """
def onscrolldown(self, context, x, y):
hittest = self._lasthittest
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
return False
if not self._window.is_active():
return True
if hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u'\x1b[M%c%c%c' % (64 + 32, x, y))
return True
def onscrollup(self, context, x, y):
hittest = self._lasthittest
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
return False
if not self._window.is_active():
return True
elif hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u'\x1b[M%c%c%c' % (65 + 32, x, y))
return True
""" drag and drop """
def ondragstart(self, s, x, y):
hittest = self._lasthittest
if hittest == _HITTEST_NONE:
return False
if not self._window.is_active():
return True
if hittest == _HITTEST_TITLEBAR:
self._dragtype = _DRAGTYPE_TITLEBAR
self._dragpos = (x, y)
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_BOTTOMLEFT:
self._dragtype = _DRAGTYPE_BOTTOMLEFT
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_BOTTOMRIGHT:
self._dragtype = _DRAGTYPE_BOTTOMRIGHT
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_BOTTOM:
self._dragtype = _DRAGTYPE_BOTTOM
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_LEFT:
self._dragtype = _DRAGTYPE_LEFT
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_RIGHT:
self._dragtype = _DRAGTYPE_RIGHT
self._titlestyle = _TITLESTYLE_DRAG
return True
def ondragend(self, s, x, y):
hittest = self._lasthittest
if hittest == _HITTEST_NONE:
return False
if self._dragtype == _DRAGTYPE_NONE:
return True
if not self._window.is_active():
return True
self.left += self.offset_left
self.top += self.offset_top
self.offset_left = 0
self.offset_top = 0
self._dragtype = _DRAGTYPE_NONE
self._dragpos = None
self._titlestyle = _TITLESTYLE_ACTIVE
self._dragstype = _DRAGTYPE_NONE
return True
def ondragmove(self, context, x, y):
hittest = self._lasthittest
if hittest == _HITTEST_NONE:
return False
if self._dragtype == _DRAGTYPE_NONE:
return False
if not self._window.is_active():
return True
if self._dragtype == _DRAGTYPE_TITLEBAR:
origin_x, origin_y = self._dragpos
offset_x = x - origin_x
offset_y = y - origin_y
screen = self._outerscreen
innerscreen = self.innerscreen
width = innerscreen.width + 2
height = innerscreen.height + 2
if self.left + width + offset_x < 1:
offset_x = 1 - self.left - width
elif self.left + offset_x > screen.width - 1:
offset_x = screen.width - self.left - 1
if self.top + height + offset_y < 1:
offset_y = 1 - self.top - height
elif self.top + offset_y > screen.height - 1:
offset_y = screen.height - self.top - 1
self.offset_left = offset_x
self.offset_top = offset_y
left = self.left + self.offset_left - 1
top = self.top + self.offset_top - 1
width = innerscreen.width + 2
height = innerscreen.height + 2
self._window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_BOTTOMRIGHT:
screen = self.innerscreen
window = self._window
left = self.left
top = self.top
row = max(y - top, 5)
col = max(x - left, 8)
screen.resize(row, col)
self._tty.resize(row, col)
left -= 1
top -= 1
width = col + 2
height = row + 2
window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_BOTTOMLEFT:
screen = self.innerscreen
window = self._window
left = min(max(x, 0), self.left + screen.width - 10)
top = self.top
row = max(y - top, 5)
col = self.left + screen.width - left + 1
screen.resize(row, col)
self._tty.resize(row, col)
left -= 2
top -= 1
width = col + 2
height = row + 2
self.left = left + 1
window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_BOTTOM:
screen = self.innerscreen
window = self._window
left = self.left
top = self.top
row = max(y - top, 5)
col = screen.width
screen.resize(row, col)
self._tty.resize(row, col)
left -= 1
top -= 1
width = col + 2
height = row + 2
window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_LEFT:
screen = self.innerscreen
outerscreen = self._outerscreen
window = self._window
left = min(max(x, 0), self.left + screen.width - 10)
top = self.top
row = screen.height
col = self.left + screen.width - left + 1
left -= 2
top -= 1
width = col + 2
height = row + 2
if left > outerscreen.width - 1:
return
screen.resize(row, col)
self._tty.resize(row, col)
self.left = left + 1
window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_RIGHT:
screen = self.innerscreen
window = self._window
left = self.left
top = self.top
row = screen.height
col = max(x - left, 8)
left -= 1
top -= 1
width = col + 2
height = row + 2
screen.resize(row, col)
self._tty.resize(row, col)
window.realloc(left, top, width, height)
else:
hittest = self._hittest(x, y)
self._lasthittest = hittest
if self._dragtype == _DRAGTYPE_NONE:
return False
elif hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts("\x1b[M%c%c%c" % (32 + 32, x, y))
return True
def _get_left(self):
return self.left + self.offset_left - 1
def _get_right(self):
return self.left + self.offset_left + self.innerscreen.width + 1
def _get_top(self):
return self.top + self.offset_top - 1
def _get_bottom(self):
return self.top + self.offset_top + self.innerscreen.height + 1
def _hittest(self, x, y):
screen = self.innerscreen
left = self._get_left()
top = self._get_top()
right = self._get_right()
bottom = self._get_bottom()
if x < left:
return _HITTEST_NONE
if x > right - 1:
return _HITTEST_NONE
if y < top:
return _HITTEST_NONE
if y > bottom - 1:
return _HITTEST_NONE
if y == top:
if x == right - 2:
return _HITTEST_BUTTON_CLOSE
if x >= left and x <= right:
return _HITTEST_TITLEBAR
if y == bottom - 1:
if x == left:
return _HITTEST_FRAME_BOTTOMLEFT
if x == right - 1:
return _HITTEST_FRAME_BOTTOMRIGHT
return _HITTEST_FRAME_BOTTOM
if x == left:
return _HITTEST_FRAME_LEFT
if x == right - 1:
return _HITTEST_FRAME_RIGHT
return _HITTEST_CLIENTAREA
class InnerFrame(tff.DefaultHandler,
IInnerFrame,
IMouseListenerImpl,
IFocusListenerImpl): # aggregate mouse and focus listener
def __init__(self, session, listener, inputhandler, screen,
top, left, row, col,
command, termenc, termprop):
IMouseListenerImpl.__init__(self)
IFocusListenerImpl.__init__(self)
self.enabled = True
innerscreen = Screen(row, col, 0, 0, termenc, termprop)
canossa = Canossa(innerscreen, visibility=False)
self._session = session
window = screen.create_window(self)
window.alloc(left - 1, top - 1, col + 2, row + 2)
self._window = window
self.top = top
self.left = left
self.offset_top = 0
self.offset_left = 0
self._termprop = termprop
self.innerscreen = innerscreen
self._outerscreen = screen
self._listener = listener
self._inputhandler = inputhandler
self._tty = session.add_subtty('xterm', 'ja_JP.UTF-8',
command, row, col, termenc,
self, canossa, self)
self._title = command
""" tff.EventObserver override """
def handle_end(self, context):
self._window.close()
self._listener.onclose(self, context)
def handle_csi(self, context, parameter, intermediate, final):
if self._inputhandler.handle_csi(context, parameter, intermediate, final):
return True
return False
def handle_char(self, context, c):
if self._inputhandler.handle_char(context, c):
return True
return False
def moveto(self, row, col):
if col >= self._outerscreen.width + 1:
raise Exception("range error col=%s" % col)
if row >= self._outerscreen.height + 1:
raise Exception("range error row=%s" % row)
if row < 1:
raise Exception("range error row=%s" % row)
if col < 1:
raise Exception("range error col=%s" % col)
self._window.write('\x1b[%d;%dH' % (row, col))
""" IWidget override """
def draw(self, region):
if self.enabled:
window = self._window
screen = self.innerscreen
outerscreen = self._outerscreen
left = self.left + self.offset_left
top = self.top + self.offset_top
width = screen.width
height = screen.height
dirtyregion = region.add(left - 1, top - 1, width + 2, height + 2)
# タイトルの描画
termprop = self._termprop
title_length = termprop.wcswidth(self._title)
width = screen.width + 2
if title_length < width - 11:
pad_left = (width - title_length) / 2
pad_right = width - title_length - pad_left
title = ' ' * pad_left + self._title + ' ' * (pad_right - 3) + '[x]'
elif width > 10:
title = ' ' + self._title[0:width - 2 - 9] + u'... [x]'
else:
title = ' ' * (width - 3) + '[x]'
window.write('\x1b[?25l')
window.write(self._titlestyle)
dirtyrange = dirtyregion[top - 1]
if not dirtyrange:
return
dirty_left = min(dirtyrange)
if dirty_left < left - 1:
dirty_left = left - 1
if dirty_left < 0:
dirty_left = 0
dirty_right = max(dirtyrange) + 1
if dirty_right > left + screen.width + 1:
dirty_right = left + screen.width + 1
if dirty_right > outerscreen.width:
dirty_right = outerscreen.width
n = left - 1
for c in title:
length = termprop.wcwidth(ord(c))
if n >= outerscreen.width:
break
if n >= dirty_right:
break
if n == dirty_left:
self.moveto(top, n + 1)
if n >= dirty_left:
if n in dirtyrange:
if n == left + width - 4 and self._hovertype == _HOVERTYPE_BUTTON_CLOSE:
window.write('\x1b[37m')
window.write(c)
else:
window.write("\x1b[%dC" % length)
n += length
window.write('\x1b[m')
# フレーム内容の描画
for index in xrange(0, height):
if top + index < outerscreen.height:
if top + index >= 0:
dirtyrange = dirtyregion[top + index]
if dirtyrange:
dirty_left = min(dirtyrange)
if dirty_left < 0:
dirty_left = 0
if dirty_left < left:
dirty_left = left
dirty_right = max(dirtyrange)
if dirty_right > outerscreen.width:
dirty_right = outerscreen.width
if dirty_right > left + screen.width + 1:
dirty_right = left + screen.width + 1
dirty_width = dirty_right - dirty_left
if dirty_width < 0:
continue
# フレーム左辺の描画
if left > 0 and left >= dirty_left and left - 1 < outerscreen.width:
row = top + index
col = left - 1
self.moveto(row + 1, col + 1)
if self._dragtype == _DRAGTYPE_LEFT:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_LEFT:
window.write('\x1b[43m')
window.write('|')
window.write('\x1b[m')
# フレーム内容の描画
screen.copyrect(window, dirty_left - left, index, dirty_width, 1,
dirty_left, top + index, lazy=True)
# フレーム右辺の描画
col = left + screen.width
if col <= dirty_right and col < outerscreen.width:
row = top + index
self.moveto(row + 1, col + 1)
if self._dragtype == _DRAGTYPE_RIGHT:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_RIGHT:
window.write('\x1b[43m')
window.write('|')
window.write('\x1b[m')
# フレーム下部の描画
if top + height < outerscreen.height:
if top + index >= 0:
dirtyrange = dirtyregion[top + height]
dirty_left = min(dirtyrange)
if dirty_left < left - 1:
dirty_left = left - 1
if dirty_left < 0:
dirty_left = 0
dirty_right = max(dirtyrange) + 1
if dirty_right > left + screen.width + 1:
dirty_right = left + screen.width + 1
if dirty_right > outerscreen.width:
dirty_right = outerscreen.width
window.write('\x1b[m')
self.moveto(top + height + 1, dirty_left + 1)
n = left - 1
if n >= 0 and n >= dirty_left:
if self._dragtype == _DRAGTYPE_BOTTOMLEFT:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_BOTTOMLEFT:
window.write('\x1b[43m')
else:
window.write('\x1b[m')
window.write('+')
n += 1
if self._dragtype == _DRAGTYPE_BOTTOM:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_BOTTOM:
window.write('\x1b[43m')
else:
window.write('\x1b[m')
while True:
if n >= dirty_right - 1:
if n == left + screen.width:
if self._dragtype == _DRAGTYPE_BOTTOMRIGHT:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_BOTTOMRIGHT:
window.write('\x1b[43m')
else:
window.write('\x1b[m')
window.write('+')
break
if n >= outerscreen.width:
break
n += 1
if n < dirty_left + 1:
continue
window.write('-')
window.write('\x1b[?25h')
cursor = screen.cursor
row = cursor.row + top + 1
if row < 1:
return
elif row > outerscreen.width:
return
col = cursor.col + left + 1
if col < 1:
return
elif col > outerscreen.width:
return
if row < 1:
return
elif row > outerscreen.height:
return
cursor.draw(window)
self.moveto(row, col)
def close(self):
session = self._session
fd = self._tty.fileno()
session.destruct_subprocess(fd)
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
test()
Fix for an exception caused by applying min() to empty set
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ***** BEGIN LICENSE BLOCK *****
# Copyright (C) 2012 Hayaki Saito <user@zuse.jp>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ***** END LICENSE BLOCK *****
import tff
from mouse import IFocusListener, IMouseListener, MouseDecoder
from interface import IInnerFrame, IInnerFrameListener
from output import Canossa
from screen import Screen
_HITTEST_NONE = 0
_HITTEST_CLIENTAREA = 1
_HITTEST_TITLEBAR = 2
_HITTEST_FRAME_LEFT = 3
_HITTEST_FRAME_TOP = 4
_HITTEST_FRAME_RIGHT = 5
_HITTEST_FRAME_BOTTOM = 6
_HITTEST_FRAME_TOPLEFT = 7
_HITTEST_FRAME_TOPRIGHT = 8
_HITTEST_FRAME_BOTTOMLEFT = 9
_HITTEST_FRAME_BOTTOMRIGHT = 10
_HITTEST_BUTTON_CLOSE = 11
_TITLESTYLE_INACTIVE = '\x1b[30;47m'
_TITLESTYLE_ACTIVE = '\x1b[30;42m'
_TITLESTYLE_HOVER = '\x1b[30;46m'
_TITLESTYLE_DRAG = '\x1b[30;43m'
_DRAGTYPE_NONE = 0
_DRAGTYPE_TITLEBAR = 1
_DRAGTYPE_BOTTOMRIGHT = 2
_DRAGTYPE_BOTTOMLEFT = 3
_DRAGTYPE_BOTTOM = 4
_DRAGTYPE_LEFT = 5
_DRAGTYPE_RIGHT = 6
_HOVERTYPE_NONE = 0
_HOVERTYPE_TITLEBAR = 1
_HOVERTYPE_BOTTOMRIGHT = 2
_HOVERTYPE_BOTTOMLEFT = 3
_HOVERTYPE_BOTTOM = 4
_HOVERTYPE_LEFT = 5
_HOVERTYPE_RIGHT = 6
_HOVERTYPE_BUTTON_CLOSE = 7
class IFocusListenerImpl(IFocusListener):
def __init__(self):
pass
""" IFocusListener implementation """
def onfocusin(self):
self._titlestyle = _TITLESTYLE_ACTIVE
def onfocusout(self):
self._titlestyle = _TITLESTYLE_INACTIVE
class IMouseListenerImpl(IMouseListener):
def __init__(self):
self._lasthittest = None
self._dragtype = _DRAGTYPE_NONE
self._hovertype = _HOVERTYPE_NONE
self._dragpos = None
self._titlestyle = _TITLESTYLE_INACTIVE
""" IMouseListener implementation """
def onmousedown(self, context, x, y):
hittest = self._hittest(x, y)
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
self._window.blur()
self._session.blur_subprocess(self._tty)
return False
if not self._window.is_active():
self._window.focus()
self._session.focus_subprocess(self._tty)
self._titlestyle = _TITLESTYLE_ACTIVE
return True
if hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u'\x1b[M%c%c%c' % (0 + 32, x, y))
return True
def onmouseup(self, context, x, y):
hittest = self._hittest(x, y)
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
return False
if not self._window.is_active():
return True
if hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u'\x1b[M%c%c%c' % (3 + 32, x, y))
return True
def onclick(self, context, x, y):
hittest = self._hittest(x, y)
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
return False
if hittest == _HITTEST_BUTTON_CLOSE:
self._window.close()
self._listener.onclose(self, context)
self.close()
return True
return True
def ondoubleclick(self, context, x, y):
hittest = self._lasthittest
if hittest == _HITTEST_NONE:
return False
return True
def onmousehover(self, context, x, y):
hittest = self._hittest(x, y)
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
self._hovertype = _HOVERTYPE_NONE
return False
if hittest == _HITTEST_CLIENTAREA:
if self._window.is_active():
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u"\x1b[M%c%c%c" % (32 + 32, x, y))
self._titlestyle = _TITLESTYLE_ACTIVE
self._hovertype = _HOVERTYPE_NONE
elif hittest == _HITTEST_TITLEBAR:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_TITLEBAR
elif hittest == _HITTEST_FRAME_BOTTOMLEFT:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_BOTTOMLEFT
elif hittest == _HITTEST_FRAME_BOTTOMRIGHT:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_BOTTOMRIGHT
elif hittest == _HITTEST_FRAME_BOTTOM:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_BOTTOM
elif hittest == _HITTEST_FRAME_LEFT:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_LEFT
elif hittest == _HITTEST_FRAME_RIGHT:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_RIGHT
elif hittest == _HITTEST_BUTTON_CLOSE:
self._titlestyle = _TITLESTYLE_HOVER
self._hovertype = _HOVERTYPE_BUTTON_CLOSE
else:
self._titlestyle = _TITLESTYLE_ACTIVE
self._hovertype = _HOVERTYPE_NONE
return True
""" scroll """
def onscrolldown(self, context, x, y):
hittest = self._lasthittest
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
return False
if not self._window.is_active():
return True
if hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u'\x1b[M%c%c%c' % (64 + 32, x, y))
return True
def onscrollup(self, context, x, y):
hittest = self._lasthittest
self._lasthittest = hittest
if hittest == _HITTEST_NONE:
return False
if not self._window.is_active():
return True
elif hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts(u'\x1b[M%c%c%c' % (65 + 32, x, y))
return True
""" drag and drop """
def ondragstart(self, s, x, y):
hittest = self._lasthittest
if hittest == _HITTEST_NONE:
return False
if not self._window.is_active():
return True
if hittest == _HITTEST_TITLEBAR:
self._dragtype = _DRAGTYPE_TITLEBAR
self._dragpos = (x, y)
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_BOTTOMLEFT:
self._dragtype = _DRAGTYPE_BOTTOMLEFT
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_BOTTOMRIGHT:
self._dragtype = _DRAGTYPE_BOTTOMRIGHT
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_BOTTOM:
self._dragtype = _DRAGTYPE_BOTTOM
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_LEFT:
self._dragtype = _DRAGTYPE_LEFT
self._titlestyle = _TITLESTYLE_DRAG
elif hittest == _HITTEST_FRAME_RIGHT:
self._dragtype = _DRAGTYPE_RIGHT
self._titlestyle = _TITLESTYLE_DRAG
return True
def ondragend(self, s, x, y):
hittest = self._lasthittest
if hittest == _HITTEST_NONE:
return False
if self._dragtype == _DRAGTYPE_NONE:
return True
if not self._window.is_active():
return True
self.left += self.offset_left
self.top += self.offset_top
self.offset_left = 0
self.offset_top = 0
self._dragtype = _DRAGTYPE_NONE
self._dragpos = None
self._titlestyle = _TITLESTYLE_ACTIVE
self._dragstype = _DRAGTYPE_NONE
return True
def ondragmove(self, context, x, y):
hittest = self._lasthittest
if hittest == _HITTEST_NONE:
return False
if self._dragtype == _DRAGTYPE_NONE:
return False
if not self._window.is_active():
return True
if self._dragtype == _DRAGTYPE_TITLEBAR:
origin_x, origin_y = self._dragpos
offset_x = x - origin_x
offset_y = y - origin_y
screen = self._outerscreen
innerscreen = self.innerscreen
width = innerscreen.width + 2
height = innerscreen.height + 2
if self.left + width + offset_x < 1:
offset_x = 1 - self.left - width
elif self.left + offset_x > screen.width - 1:
offset_x = screen.width - self.left - 1
if self.top + height + offset_y < 1:
offset_y = 1 - self.top - height
elif self.top + offset_y > screen.height - 1:
offset_y = screen.height - self.top - 1
self.offset_left = offset_x
self.offset_top = offset_y
left = self.left + self.offset_left - 1
top = self.top + self.offset_top - 1
width = innerscreen.width + 2
height = innerscreen.height + 2
self._window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_BOTTOMRIGHT:
screen = self.innerscreen
window = self._window
left = self.left
top = self.top
row = max(y - top, 5)
col = max(x - left, 8)
screen.resize(row, col)
self._tty.resize(row, col)
left -= 1
top -= 1
width = col + 2
height = row + 2
window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_BOTTOMLEFT:
screen = self.innerscreen
window = self._window
left = min(max(x, 0), self.left + screen.width - 10)
top = self.top
row = max(y - top, 5)
col = self.left + screen.width - left + 1
screen.resize(row, col)
self._tty.resize(row, col)
left -= 2
top -= 1
width = col + 2
height = row + 2
self.left = left + 1
window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_BOTTOM:
screen = self.innerscreen
window = self._window
left = self.left
top = self.top
row = max(y - top, 5)
col = screen.width
screen.resize(row, col)
self._tty.resize(row, col)
left -= 1
top -= 1
width = col + 2
height = row + 2
window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_LEFT:
screen = self.innerscreen
outerscreen = self._outerscreen
window = self._window
left = min(max(x, 0), self.left + screen.width - 10)
top = self.top
row = screen.height
col = self.left + screen.width - left + 1
left -= 2
top -= 1
width = col + 2
height = row + 2
if left > outerscreen.width - 1:
return
screen.resize(row, col)
self._tty.resize(row, col)
self.left = left + 1
window.realloc(left, top, width, height)
elif self._dragtype == _DRAGTYPE_RIGHT:
screen = self.innerscreen
window = self._window
left = self.left
top = self.top
row = screen.height
col = max(x - left, 8)
left -= 1
top -= 1
width = col + 2
height = row + 2
screen.resize(row, col)
self._tty.resize(row, col)
window.realloc(left, top, width, height)
else:
hittest = self._hittest(x, y)
self._lasthittest = hittest
if self._dragtype == _DRAGTYPE_NONE:
return False
elif hittest == _HITTEST_CLIENTAREA:
x -= self.left + self.offset_left
y -= self.top + self.offset_top
x += 33
y += 33
if x < 0x80 and y < 0x80:
context.puts("\x1b[M%c%c%c" % (32 + 32, x, y))
return True
def _get_left(self):
return self.left + self.offset_left - 1
def _get_right(self):
return self.left + self.offset_left + self.innerscreen.width + 1
def _get_top(self):
return self.top + self.offset_top - 1
def _get_bottom(self):
return self.top + self.offset_top + self.innerscreen.height + 1
def _hittest(self, x, y):
screen = self.innerscreen
left = self._get_left()
top = self._get_top()
right = self._get_right()
bottom = self._get_bottom()
if x < left:
return _HITTEST_NONE
if x > right - 1:
return _HITTEST_NONE
if y < top:
return _HITTEST_NONE
if y > bottom - 1:
return _HITTEST_NONE
if y == top:
if x == right - 2:
return _HITTEST_BUTTON_CLOSE
if x >= left and x <= right:
return _HITTEST_TITLEBAR
if y == bottom - 1:
if x == left:
return _HITTEST_FRAME_BOTTOMLEFT
if x == right - 1:
return _HITTEST_FRAME_BOTTOMRIGHT
return _HITTEST_FRAME_BOTTOM
if x == left:
return _HITTEST_FRAME_LEFT
if x == right - 1:
return _HITTEST_FRAME_RIGHT
return _HITTEST_CLIENTAREA
class InnerFrame(tff.DefaultHandler,
IInnerFrame,
IMouseListenerImpl,
IFocusListenerImpl): # aggregate mouse and focus listener
def __init__(self, session, listener, inputhandler, screen,
top, left, row, col,
command, termenc, termprop):
IMouseListenerImpl.__init__(self)
IFocusListenerImpl.__init__(self)
self.enabled = True
innerscreen = Screen(row, col, 0, 0, termenc, termprop)
canossa = Canossa(innerscreen, visibility=False)
self._session = session
window = screen.create_window(self)
window.alloc(left - 1, top - 1, col + 2, row + 2)
self._window = window
self.top = top
self.left = left
self.offset_top = 0
self.offset_left = 0
self._termprop = termprop
self.innerscreen = innerscreen
self._outerscreen = screen
self._listener = listener
self._inputhandler = inputhandler
self._tty = session.add_subtty('xterm', 'ja_JP.UTF-8',
command, row, col, termenc,
self, canossa, self)
self._title = command
""" tff.EventObserver override """
def handle_end(self, context):
self._window.close()
self._listener.onclose(self, context)
def handle_csi(self, context, parameter, intermediate, final):
if self._inputhandler.handle_csi(context, parameter, intermediate, final):
return True
return False
def handle_char(self, context, c):
if self._inputhandler.handle_char(context, c):
return True
return False
def moveto(self, row, col):
if col >= self._outerscreen.width + 1:
raise Exception("range error col=%s" % col)
if row >= self._outerscreen.height + 1:
raise Exception("range error row=%s" % row)
if row < 1:
raise Exception("range error row=%s" % row)
if col < 1:
raise Exception("range error col=%s" % col)
self._window.write('\x1b[%d;%dH' % (row, col))
""" IWidget override """
def draw(self, region):
if self.enabled:
window = self._window
screen = self.innerscreen
outerscreen = self._outerscreen
left = self.left + self.offset_left
top = self.top + self.offset_top
width = screen.width
height = screen.height
dirtyregion = region.add(left - 1, top - 1, width + 2, height + 2)
# タイトルの描画
termprop = self._termprop
title_length = termprop.wcswidth(self._title)
width = screen.width + 2
if title_length < width - 11:
pad_left = (width - title_length) / 2
pad_right = width - title_length - pad_left
title = ' ' * pad_left + self._title + ' ' * (pad_right - 3) + '[x]'
elif width > 10:
title = ' ' + self._title[0:width - 2 - 9] + u'... [x]'
else:
title = ' ' * (width - 3) + '[x]'
window.write('\x1b[?25l')
window.write(self._titlestyle)
dirtyrange = dirtyregion[top - 1]
if not dirtyrange:
return
dirty_left = min(dirtyrange)
if dirty_left < left - 1:
dirty_left = left - 1
if dirty_left < 0:
dirty_left = 0
dirty_right = max(dirtyrange) + 1
if dirty_right > left + screen.width + 1:
dirty_right = left + screen.width + 1
if dirty_right > outerscreen.width:
dirty_right = outerscreen.width
n = left - 1
for c in title:
length = termprop.wcwidth(ord(c))
if n >= outerscreen.width:
break
if n >= dirty_right:
break
if n == dirty_left:
self.moveto(top, n + 1)
if n >= dirty_left:
if n in dirtyrange:
if n == left + width - 4 and self._hovertype == _HOVERTYPE_BUTTON_CLOSE:
window.write('\x1b[37m')
window.write(c)
else:
window.write("\x1b[%dC" % length)
n += length
window.write('\x1b[m')
# フレーム内容の描画
for index in xrange(0, height):
if top + index < outerscreen.height:
if top + index >= 0:
dirtyrange = dirtyregion[top + index]
if dirtyrange:
dirty_left = min(dirtyrange)
if dirty_left < 0:
dirty_left = 0
if dirty_left < left:
dirty_left = left
dirty_right = max(dirtyrange)
if dirty_right > outerscreen.width:
dirty_right = outerscreen.width
if dirty_right > left + screen.width + 1:
dirty_right = left + screen.width + 1
dirty_width = dirty_right - dirty_left
if dirty_width < 0:
continue
# フレーム左辺の描画
if left > 0 and left >= dirty_left and left - 1 < outerscreen.width:
row = top + index
col = left - 1
self.moveto(row + 1, col + 1)
if self._dragtype == _DRAGTYPE_LEFT:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_LEFT:
window.write('\x1b[43m')
window.write('|')
window.write('\x1b[m')
# フレーム内容の描画
screen.copyrect(window, dirty_left - left, index, dirty_width, 1,
dirty_left, top + index, lazy=True)
# フレーム右辺の描画
col = left + screen.width
if col <= dirty_right and col < outerscreen.width:
row = top + index
self.moveto(row + 1, col + 1)
if self._dragtype == _DRAGTYPE_RIGHT:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_RIGHT:
window.write('\x1b[43m')
window.write('|')
window.write('\x1b[m')
# フレーム下部の描画
if top + height < outerscreen.height:
if top + index >= 0:
dirtyrange = dirtyregion[top + height]
if dirtyrange:
dirty_left = min(dirtyrange)
if dirty_left < left - 1:
dirty_left = left - 1
if dirty_left < 0:
dirty_left = 0
dirty_right = max(dirtyrange) + 1
if dirty_right > left + screen.width + 1:
dirty_right = left + screen.width + 1
if dirty_right > outerscreen.width:
dirty_right = outerscreen.width
window.write('\x1b[m')
self.moveto(top + height + 1, dirty_left + 1)
n = left - 1
if n >= 0 and n >= dirty_left:
if self._dragtype == _DRAGTYPE_BOTTOMLEFT:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_BOTTOMLEFT:
window.write('\x1b[43m')
else:
window.write('\x1b[m')
window.write('+')
n += 1
if self._dragtype == _DRAGTYPE_BOTTOM:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_BOTTOM:
window.write('\x1b[43m')
else:
window.write('\x1b[m')
while True:
if n >= dirty_right - 1:
if n == left + screen.width:
if self._dragtype == _DRAGTYPE_BOTTOMRIGHT:
window.write('\x1b[41m')
elif self._hovertype == _HOVERTYPE_BOTTOMRIGHT:
window.write('\x1b[43m')
else:
window.write('\x1b[m')
window.write('+')
break
if n >= outerscreen.width:
break
n += 1
if n < dirty_left + 1:
continue
window.write('-')
window.write('\x1b[?25h')
cursor = screen.cursor
row = cursor.row + top + 1
if row < 1:
return
elif row > outerscreen.width:
return
col = cursor.col + left + 1
if col < 1:
return
elif col > outerscreen.width:
return
if row < 1:
return
elif row > outerscreen.height:
return
cursor.draw(window)
self.moveto(row, col)
def close(self):
session = self._session
fd = self._tty.fileno()
session.destruct_subprocess(fd)
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
test()
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Windows DirectSound audio implementation.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import math
import time
from pyglet.media import AudioPlayer, Listener, MediaException
from pyglet.media.drivers.directsound import lib_dsound as lib
from pyglet.window.win32 import _user32
class DirectSoundException(MediaException):
pass
def _db(gain):
'''Convert linear gain in range [0.0, 1.0] to 100ths of dB.'''
if gain <= 0:
return -10000
return int(1000 * math.log(min(gain, 1)))
class DirectSoundAudioPlayer(AudioPlayer):
_buffer_size = 44800 * 1
_update_buffer_size = _buffer_size // 4
_buffer_size_secs = None
_cone_inner_angle = 360
_cone_outer_angle = 360
UPDATE_PERIOD = 0.05
def __init__(self, audio_format):
super(DirectSoundAudioPlayer, self).__init__(audio_format)
self._playing = False
self._timestamp = 0.
self._buffer = None
self._buffer_playing = False
self._data_size = 0 # amount of buffer filled by this player
self._play_cursor = 0
self._buffer_time = 0. # ts of buffer at buffer_time_pos
self._buffer_time_pos = 0
self._write_cursor = 0
self._timestamps = []
self._eos_count = 0
self._dirty_size = 0
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
self._buffer = lib.IDirectSoundBuffer()
dsound.CreateSoundBuffer(dsbdesc, ctypes.byref(self._buffer), None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer_size_secs = \
self._buffer_size / float(audio_format.bytes_per_second)
self._buffer.SetCurrentPosition(0)
def __del__(self):
try:
self._buffer.Stop()
self._buffer.Release()
if self._buffer3d:
self._buffer3d.Release()
except:
pass
def get_write_size(self):
if self._data_size < self._buffer_size:
return self._buffer_size - self._data_size
play_cursor = self._play_cursor
if self._write_cursor == play_cursor and self._buffer_playing:
# Polling too fast, no play cursor movement
return 0
elif self._write_cursor == play_cursor and not self._playing:
# Paused and up-to-date
return 0
elif self._write_cursor < play_cursor:
# Play cursor ahead of write cursor
write_size = play_cursor - self._write_cursor
else:
# Play cursor behind write cursor, wraps around
write_size = self._buffer_size - self._write_cursor + play_cursor
if write_size < self._update_buffer_size:
return 0
return write_size
def write(self, audio_data, length=None):
# Pass audio_data=None, length>0 to write silence
if length is None:
write_size = self.get_write_size()
length = min(audio_data.length, write_size)
if length == 0:
return 0
if self._data_size < self._buffer_size:
self._data_size = min(self._data_size + length, self._buffer_size)
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
self._buffer.Lock(self._write_cursor, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
if self._write_cursor >= self._play_cursor:
wc = self._write_cursor
else:
wc = self._write_cursor + self._buffer_size
self._timestamps.append((wc, audio_data.timestamp))
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
pass
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor %= self._buffer_size
def write_eos(self):
if self._write_cursor > self._play_cursor:
wc = self._write_cursor
else:
wc = self._write_cursor + self._buffer_size
self._timestamps.append((wc, 'eos'))
def write_end(self):
if not self._dirty_size:
self._dirty_size = self._buffer_size
def pump(self):
# Update play cursor, check for wraparound and EOS markers
play_cursor = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor, None)
if play_cursor.value < self._play_cursor:
# Wrapped around
self._buffer_time_pos -= self._buffer_size
self._timestamps = \
[(a - self._buffer_size, t) for a, t in self._timestamps]
self._play_cursor = play_cursor.value
try:
while self._timestamps[0][0] < self._play_cursor:
pos, timestamp = self._timestamps.pop(0)
if timestamp == 'eos':
self._eos_count += 1
else:
self._buffer_time = timestamp
self._buffer_time_pos = pos
except IndexError:
pass
self._timestamp = self._buffer_time + \
(self._play_cursor - self._buffer_time_pos) \
/ float(self.audio_format.bytes_per_second)
# Write silence
if self._dirty_size:
write_size = self.get_write_size()
length = min(write_size, self._dirty_size)
self.write(None, length)
self._dirty_size -= length
if self._dirty_size < 0:
self._dirty_size = 0
if self._playing and not self._buffer_playing:
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self._buffer_playing = True
def get_time(self):
return self._timestamp
def play(self):
if self._playing:
return
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self._buffer_playing = True
def stop(self):
if not self._playing:
return
self._playing = False
self._buffer.Stop()
self._buffer_playing = False
def clear(self):
self._eos_count = 0
self._timestamps = []
self._write_cursor = 0
self._buffer.SetCurrentPosition(0)
self._buffer_time = 0.
self._buffer_time_pos = 0
self._data_size = 0
def clear_eos(self):
if self._eos_count > 0:
self._eos_count -= 1
return True
return False
def _get_source(self):
if self._sources:
return self._sources[0]
return None
def set_volume(self, volume):
volume = _db(volume)
self._buffer.SetVolume(volume)
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def set_min_distance(self, min_distance):
if self._buffer3d:
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
def set_max_distance(self, max_distance):
if self._buffer3d:
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
def set_pitch(self, pitch):
frequency = int(pitch * self.audio_format.sample_rate)
self._buffer.SetFrequency(frequency)
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
class DirectSoundListener(Listener):
def _init(self):
# Called after driver_init()
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
def __del__(self):
try:
self._buffer.Release()
self._listener.Release()
except:
pass
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
dsound = None
def driver_init():
global dsound
dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(dsound), None)
# A trick used by mplayer.. use desktop as window handle since it would
# be complex to use pyglet window handles (and what to do when application
# is audio only?).
hwnd = _user32.GetDesktopWindow()
dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
driver_listener._init()
# Force a context switch, as some Windows audio drivers don't get time
# to process short sounds if Python hogs all the CPU. See issue #163.
from pyglet import clock
clock.Clock._force_sleep = True
driver_listener = DirectSoundListener()
driver_audio_player_class = DirectSoundAudioPlayer
directsound: write silence data even when below minimum write size.
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Windows DirectSound audio implementation.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import ctypes
import math
import time
from pyglet.media import AudioPlayer, Listener, MediaException
from pyglet.media.drivers.directsound import lib_dsound as lib
from pyglet.window.win32 import _user32
class DirectSoundException(MediaException):
pass
def _db(gain):
'''Convert linear gain in range [0.0, 1.0] to 100ths of dB.'''
if gain <= 0:
return -10000
return int(1000 * math.log(min(gain, 1)))
class DirectSoundAudioPlayer(AudioPlayer):
_buffer_size = 44800 * 1
_update_buffer_size = _buffer_size // 4
_buffer_size_secs = None
_cone_inner_angle = 360
_cone_outer_angle = 360
UPDATE_PERIOD = 0.05
def __init__(self, audio_format):
super(DirectSoundAudioPlayer, self).__init__(audio_format)
self._playing = False
self._timestamp = 0.
self._buffer = None
self._buffer_playing = False
self._data_size = 0 # amount of buffer filled by this player
self._play_cursor = 0
self._buffer_time = 0. # ts of buffer at buffer_time_pos
self._buffer_time_pos = 0
self._write_cursor = 0
self._timestamps = []
self._eos_count = 0
self._dirty_size = 0
wfx = lib.WAVEFORMATEX()
wfx.wFormatTag = lib.WAVE_FORMAT_PCM
wfx.nChannels = audio_format.channels
wfx.nSamplesPerSec = audio_format.sample_rate
wfx.wBitsPerSample = audio_format.sample_size
wfx.nBlockAlign = wfx.wBitsPerSample * wfx.nChannels // 8
wfx.nAvgBytesPerSec = wfx.nSamplesPerSec * wfx.nBlockAlign
dsbdesc = lib.DSBUFFERDESC()
dsbdesc.dwSize = ctypes.sizeof(dsbdesc)
dsbdesc.dwFlags = (lib.DSBCAPS_GLOBALFOCUS |
lib.DSBCAPS_GETCURRENTPOSITION2 |
lib.DSBCAPS_CTRLFREQUENCY |
lib.DSBCAPS_CTRLVOLUME)
if audio_format.channels == 1:
dsbdesc.dwFlags |= lib.DSBCAPS_CTRL3D
dsbdesc.dwBufferBytes = self._buffer_size
dsbdesc.lpwfxFormat = ctypes.pointer(wfx)
self._buffer = lib.IDirectSoundBuffer()
dsound.CreateSoundBuffer(dsbdesc, ctypes.byref(self._buffer), None)
if audio_format.channels == 1:
self._buffer3d = lib.IDirectSound3DBuffer()
self._buffer.QueryInterface(lib.IID_IDirectSound3DBuffer,
ctypes.byref(self._buffer3d))
else:
self._buffer3d = None
self._buffer_size_secs = \
self._buffer_size / float(audio_format.bytes_per_second)
self._buffer.SetCurrentPosition(0)
def __del__(self):
try:
self._buffer.Stop()
self._buffer.Release()
if self._buffer3d:
self._buffer3d.Release()
except:
pass
def get_write_size(self):
if self._data_size < self._buffer_size:
return self._buffer_size - self._data_size
play_cursor = self._play_cursor
if self._write_cursor == play_cursor and self._buffer_playing:
# Polling too fast, no play cursor movement
return 0
elif self._write_cursor == play_cursor and not self._playing:
# Paused and up-to-date
return 0
elif self._write_cursor < play_cursor:
# Play cursor ahead of write cursor
write_size = play_cursor - self._write_cursor
else:
# Play cursor behind write cursor, wraps around
write_size = self._buffer_size - self._write_cursor + play_cursor
if write_size < self._update_buffer_size and not self._dirty_size:
return 0
return write_size
def write(self, audio_data, length=None):
# Pass audio_data=None, length>0 to write silence
if length is None:
write_size = self.get_write_size()
length = min(audio_data.length, write_size)
if length == 0:
return 0
if self._data_size < self._buffer_size:
self._data_size = min(self._data_size + length, self._buffer_size)
p1 = ctypes.c_void_p()
l1 = lib.DWORD()
p2 = ctypes.c_void_p()
l2 = lib.DWORD()
self._buffer.Lock(self._write_cursor, length,
ctypes.byref(p1), l1, ctypes.byref(p2), l2, 0)
assert length == l1.value + l2.value
if audio_data:
if self._write_cursor >= self._play_cursor:
wc = self._write_cursor
else:
wc = self._write_cursor + self._buffer_size
self._timestamps.append((wc, audio_data.timestamp))
ctypes.memmove(p1, audio_data.data, l1.value)
audio_data.consume(l1.value, self.audio_format)
if l2.value:
ctypes.memmove(p2, audio_data.data, l2.value)
audio_data.consume(l2.value, self.audio_format)
else:
ctypes.memset(p1, 0, l1.value)
if l2.value:
ctypes.memset(p2, 0, l2.value)
pass
self._buffer.Unlock(p1, l1, p2, l2)
self._write_cursor += length
self._write_cursor %= self._buffer_size
def write_eos(self):
if self._write_cursor > self._play_cursor:
wc = self._write_cursor
else:
wc = self._write_cursor + self._buffer_size
self._timestamps.append((wc, 'eos'))
def write_end(self):
if not self._dirty_size:
self._dirty_size = self._buffer_size
def pump(self):
# Update play cursor, check for wraparound and EOS markers
play_cursor = lib.DWORD()
self._buffer.GetCurrentPosition(play_cursor, None)
if play_cursor.value < self._play_cursor:
# Wrapped around
self._buffer_time_pos -= self._buffer_size
self._timestamps = \
[(a - self._buffer_size, t) for a, t in self._timestamps]
self._play_cursor = play_cursor.value
try:
while self._timestamps[0][0] < self._play_cursor:
pos, timestamp = self._timestamps.pop(0)
if timestamp == 'eos':
self._eos_count += 1
else:
self._buffer_time = timestamp
self._buffer_time_pos = pos
except IndexError:
pass
self._timestamp = self._buffer_time + \
(self._play_cursor - self._buffer_time_pos) \
/ float(self.audio_format.bytes_per_second)
# Write silence
if self._dirty_size:
write_size = self.get_write_size()
length = min(write_size, self._dirty_size)
self.write(None, length)
self._dirty_size -= length
if self._dirty_size < 0:
self._dirty_size = 0
if self._playing and not self._buffer_playing:
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self._buffer_playing = True
def get_time(self):
return self._timestamp
def play(self):
if self._playing:
return
self._playing = True
self._buffer.Play(0, 0, lib.DSBPLAY_LOOPING)
self._buffer_playing = True
def stop(self):
if not self._playing:
return
self._playing = False
self._buffer.Stop()
self._buffer_playing = False
def clear(self):
self._eos_count = 0
self._timestamps = []
self._write_cursor = 0
self._buffer.SetCurrentPosition(0)
self._buffer_time = 0.
self._buffer_time_pos = 0
self._data_size = 0
def clear_eos(self):
if self._eos_count > 0:
self._eos_count -= 1
return True
return False
def _get_source(self):
if self._sources:
return self._sources[0]
return None
def set_volume(self, volume):
volume = _db(volume)
self._buffer.SetVolume(volume)
def set_position(self, position):
if self._buffer3d:
x, y, z = position
self._buffer3d.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def set_min_distance(self, min_distance):
if self._buffer3d:
self._buffer3d.SetMinDistance(min_distance, lib.DS3D_IMMEDIATE)
def set_max_distance(self, max_distance):
if self._buffer3d:
self._buffer3d.SetMaxDistance(max_distance, lib.DS3D_IMMEDIATE)
def set_pitch(self, pitch):
frequency = int(pitch * self.audio_format.sample_rate)
self._buffer.SetFrequency(frequency)
def set_cone_orientation(self, cone_orientation):
if self._buffer3d:
x, y, z = cone_orientation
self._buffer3d.SetConeOrientation(x, y, -z, lib.DS3D_IMMEDIATE)
def set_cone_inner_angle(self, cone_inner_angle):
if self._buffer3d:
self._cone_inner_angle = int(cone_inner_angle)
self._set_cone_angles()
def set_cone_outer_angle(self, cone_outer_angle):
if self._buffer3d:
self._cone_outer_angle = int(cone_outer_angle)
self._set_cone_angles()
def _set_cone_angles(self):
inner = min(self._cone_inner_angle, self._cone_outer_angle)
outer = max(self._cone_inner_angle, self._cone_outer_angle)
self._buffer3d.SetConeAngles(inner, outer, lib.DS3D_IMMEDIATE)
def set_cone_outer_gain(self, cone_outer_gain):
if self._buffer3d:
volume = _db(cone_outer_gain)
self._buffer3d.SetConeOutsideVolume(volume, lib.DS3D_IMMEDIATE)
class DirectSoundListener(Listener):
def _init(self):
# Called after driver_init()
self._buffer = lib.IDirectSoundBuffer()
dsbd = lib.DSBUFFERDESC()
dsbd.dwSize = ctypes.sizeof(dsbd)
dsbd.dwFlags = (lib.DSBCAPS_CTRL3D |
lib.DSBCAPS_CTRLVOLUME |
lib.DSBCAPS_PRIMARYBUFFER)
dsound.CreateSoundBuffer(dsbd, ctypes.byref(self._buffer), None)
self._listener = lib.IDirectSound3DListener()
self._buffer.QueryInterface(lib.IID_IDirectSound3DListener,
ctypes.byref(self._listener))
def __del__(self):
try:
self._buffer.Release()
self._listener.Release()
except:
pass
def _set_volume(self, volume):
self._volume = volume
self._buffer.SetVolume(_db(volume))
def _set_position(self, position):
self._position = position
x, y, z = position
self._listener.SetPosition(x, y, -z, lib.DS3D_IMMEDIATE)
def _set_forward_orientation(self, orientation):
self._forward_orientation = orientation
self._set_orientation()
def _set_up_orientation(self, orientation):
self._up_orientation = orientation
self._set_orientation()
def _set_orientation(self):
x, y, z = self._forward_orientation
ux, uy, uz = self._up_orientation
self._listener.SetOrientation(x, y, -z, ux, uy, -uz, lib.DS3D_IMMEDIATE)
dsound = None
def driver_init():
global dsound
dsound = lib.IDirectSound()
lib.DirectSoundCreate(None, ctypes.byref(dsound), None)
# A trick used by mplayer.. use desktop as window handle since it would
# be complex to use pyglet window handles (and what to do when application
# is audio only?).
hwnd = _user32.GetDesktopWindow()
dsound.SetCooperativeLevel(hwnd, lib.DSSCL_NORMAL)
driver_listener._init()
# Force a context switch, as some Windows audio drivers don't get time
# to process short sounds if Python hogs all the CPU. See issue #163.
from pyglet import clock
clock.Clock._force_sleep = True
driver_listener = DirectSoundListener()
driver_audio_player_class = DirectSoundAudioPlayer
|
import shutil
from datetime import timedelta, date
from django.core.urlresolvers import reverse
from django.http import HttpRequest, JsonResponse
from django.test import Client, TestCase, override_settings
from django.utils.timezone import now
from cl.api.management.commands.cl_make_bulk_data import Command
from cl.api.utils import BulkJsonHistory
from cl.api.views import coverage_data
from cl.audio.models import Audio
from cl.scrapers.management.commands.cl_scrape_oral_arguments import \
Command as OralArgumentCommand
from cl.scrapers.test_assets import test_oral_arg_scraper
from cl.search.models import \
Docket, Court, Opinion, OpinionCluster, OpinionsCited
class BulkDataTest(TestCase):
fixtures = ['court_data.json']
tmp_data_dir = '/tmp/bulk-dir/'
def setUp(self):
docket = Docket(
case_name=u'foo',
court=Court.objects.get(pk='test'),
)
docket.save()
# Must be more than a year old for all tests to be runnable.
last_month = now().date() - timedelta(days=400)
self.doc_cluster = OpinionCluster(
case_name=u"foo",
docket=docket,
date_filed=last_month
)
self.doc_cluster.save(index=False)
opinion = Opinion.objects.create(
cluster=self.doc_cluster,
type='Lead Opinion'
)
opinion2 = Opinion.objects.create(
cluster=self.doc_cluster,
type='Concurrence'
)
OpinionsCited.objects.create(
citing_opinion=opinion2,
cited_opinion=opinion
)
# Scrape the audio "site" and add its contents
site = test_oral_arg_scraper.Site().parse()
OralArgumentCommand().scrape_court(site, full_crawl=True)
def tearDown(self):
OpinionCluster.objects.all().delete()
Docket.objects.all().delete()
try:
shutil.rmtree(self.tmp_data_dir)
except OSError:
pass
@override_settings(BULK_DATA_DIR=tmp_data_dir)
def test_make_all_bulk_files(self):
"""Can we successfully generate all bulk files?"""
Command().execute()
def test_database_has_objects_for_bulk_export(self):
self.assertTrue(Opinion.objects.count() > 0, 'Opinions exist')
self.assertTrue(Audio.objects.count() > 0, 'Audio exist')
self.assertTrue(Docket.objects.count() > 0, 'Docket exist')
self.assertTrue(Court.objects.count() > 0, 'Court exist')
self.assertEqual(
Court.objects.get(pk='test').full_name,
'Testing Supreme Court'
)
class BasicAPIPageTest(TestCase):
"""Test the basic views"""
fixtures = ['judge_judy.json', 'test_objects_search.json']
def setUp(self):
self.client = Client()
# Need pagerank file for test_pagerank_file()
from cl.search.management.commands.cl_calculate_pagerank_networkx \
import Command
command = Command()
command.do_pagerank(chown=False)
def test_api_index(self):
r = self.client.get(reverse('api_index'))
self.assertEqual(r.status_code, 200)
def test_court_index(self):
r = self.client.get(reverse('court_index'))
self.assertEqual(r.status_code, 200)
def test_rest_docs(self):
r = self.client.get(reverse('rest_docs'))
self.assertEqual(r.status_code, 200)
def test_bulk_data_index(self):
r = self.client.get(reverse('bulk_data_index'))
self.assertEqual(r.status_code, 200)
def test_pagerank_file(self):
r = self.client.get(reverse('pagerank_file'))
self.assertEqual(r.status_code, 200)
def test_coverage_api(self):
r = self.client.get(reverse('coverage_data',
kwargs={'version': 2, 'court': 'ca9'}))
self.assertEqual(r.status_code, 200)
def test_coverage_api_via_url(self):
# Should hit something like:
# https://www.courtlistener.com/api/rest/v2/coverage/ca2/
r = self.client.get('/api/rest/v2/coverage/ca2/')
self.assertEqual(r.status_code, 200)
def test_api_info_page_displays_latest_rest_docs_by_default(self):
response = self.client.get('/api/rest-info/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'rest-docs-vlatest.html')
def test_api_info_page_can_display_different_versions_of_rest_docs(self):
for version in ['v1', 'v2']:
response = self.client.get('/api/rest-info/%s/' % (version,))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'rest-docs-%s.html' % (version,))
header = 'REST API – %s' % (version.upper(),)
self.assertContains(response, header)
class ApiViewTest(TestCase):
"""Tests views in API module via direct calls and not HTTP"""
def test_coverage_data_view_provides_court_data(self):
response = coverage_data(HttpRequest(), 'v2', 'ca9')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response, JsonResponse)
self.assertContains(response, 'annual_counts')
self.assertContains(response, 'total')
def assertCount(cls, path, q, expected_count):
cls.client.login(username='pandora', password='password')
r = cls.client.get(path, q)
cls.assertEqual(len(r.data['results']), expected_count,
msg="r.data was: %s" % r.data)
class DRFJudgeApiFilterTests(TestCase):
"""Do the filters work properly?"""
fixtures = ['judge_judy.json', 'user_with_judge_access.json']
def test_judge_filtering_by_first_name(self):
"""Can we filter by first name?"""
path = reverse('judge-list', kwargs={'version': 'v3'})
# Filtering with good values brings back 1 result.
q = {'name_first__istartswith': 'judith'}
assertCount(self, path, q, 1)
# Filtering with bad values brings back no results.
q = {'name_first__istartswith': 'XXX'}
assertCount(self, path, q, 0)
def test_judge_filtering_by_date(self):
"""Do the various date filters work properly?"""
path = reverse('judge-list', kwargs={'version': 'v3'})
# Exact match for her birthday
correct_date = date(1942, 10, 21)
q = {'date_dob': correct_date.isoformat()}
assertCount(self, path, q, 1)
# People born after the day before her birthday
before = correct_date - timedelta(days=1)
q = {'date_dob__gt': before.isoformat()}
assertCount(self, path, q, 1)
# Flip the logic. This should return no results.
q = {'date_dob__lt': before.isoformat()}
assertCount(self, path, q, 0)
def test_nested_judge_filtering(self):
"""Can we filter across various relations?
Each of these assertions adds another parameter making our final test
a pretty complex combination.
"""
path = reverse('judge-list', kwargs={'version': 'v3'})
q = dict()
# No results for a bad query
q['educations__degree'] = 'XXX'
assertCount(self, path, q, 0)
# One result for a good query
q['educations__degree'] = 'JD'
assertCount(self, path, q, 1)
# Again, no results
q['educations__degree_year'] = 1400
assertCount(self, path, q, 0)
# But with the correct year...one result
q['educations__degree_year'] = 1965
assertCount(self, path, q, 1)
# Judy went to "New York Law School"
q['educations__school__name__istartswith'] = "New York Law"
assertCount(self, path, q, 1)
# Moving on to careers. Bad value, then good.
q['careers__job_title__icontains'] = 'XXX'
assertCount(self, path, q, 0)
q['careers__job_title__icontains'] = 'lawyer'
assertCount(self, path, q, 1)
# Moving on to titles...bad value, then good.
q['titles__title_name'] = 'XXX'
assertCount(self, path, q, 0)
q['titles__title_name'] = 'c-jud'
assertCount(self, path, q, 1)
# Political affiliation filtering...bad, then good.
q['political_affiliations__political_party'] = 'XXX'
assertCount(self, path, q, 0)
q['political_affiliations__political_party'] = 'd'
assertCount(self, path, q, 1)
# Sources
about_now = '2015-12-17T00:00:00Z'
q['sources__date_modified__gt'] = about_now
assertCount(self, path, q, 0)
q.pop('sources__date_modified__gt') # Next key doesn't overwrite.
q['sources__date_modified__lt'] = about_now
assertCount(self, path, q, 1)
# ABA Ratings
q['aba_ratings__rating'] = 'q'
assertCount(self, path, q, 0)
q['aba_ratings__rating'] = 'nq'
assertCount(self, path, q, 1)
def test_education_filtering(self):
"""Can we filter education objects?"""
path = reverse('education-list', kwargs={'version': 'v3'})
q = dict()
# Filter by degree
q['degree'] = 'XXX'
assertCount(self, path, q, 0)
q['degree'] = 'JD'
assertCount(self, path, q, 1)
# Filter by degree's related field, School
q['school__name__istartswith'] = 'XXX'
assertCount(self, path, q, 0)
q['school__name__istartswith'] = 'New York'
assertCount(self, path, q, 1)
def test_title_filtering(self):
"""Can Judge Titles be filtered?"""
path = reverse('title-list', kwargs={'version': 'v3'})
q = dict()
# Filter by title_name
q['title_name'] = 'XXX'
assertCount(self, path, q, 0)
q['title_name'] = 'c-jud'
assertCount(self, path, q, 1)
def test_reverse_filtering(self):
"""Can we filter Source objects by judge name?"""
# I want any source notes about judge judy.
path = reverse('source-list', kwargs={'version': 'v3'})
q = {'judge': 1}
assertCount(self, path, q, 1)
def test_position_filters(self):
"""Can we filter on positions"""
path = reverse('position-list', kwargs={'version': 'v3'})
q = dict()
# I want positions to do with judge #1 (Judy)
q['judge'] = 1
assertCount(self, path, q, 1)
# Retention events
q['rentention_events__retention_type'] = 'reapp_gov'
assertCount(self, path, q, 1)
# Appointer was Bill, a Democrat
q['appointer__name_first__istartswith'] = 'bill'
q['appointer__political_affiliations__political_party'] = 'd'
assertCount(self, path, q, 1)
# She was not appointed by a Republican
q['appointer__political_affiliations__political_party'] = 'r'
assertCount(self, path, q, 0)
def test_racial_filters(self):
"""Can we filter by race?"""
path = reverse('judge-list', kwargs={'version': 'v3'})
q = {'race': 'w'}
assertCount(self, path, q, 1)
# Do an OR. This returns judges that are either black or white (not
# that it matters, MJ)
q['race'] = ['w', 'b']
assertCount(self, path, q, 1)
def test_circular_relationships(self):
"""Do filters configured using strings instead of classes work?"""
path = reverse('education-list', kwargs={'version': 'v3'})
q = dict()
# Traverse judges, careers
q['judge__careers__job_title__icontains'] = 'xxx'
assertCount(self, path, q, 0)
q['judge__careers__job_title__icontains'] = 'lawyer'
assertCount(self, path, q, 1)
# Just traverse to the judge table
q['judge__name_first'] = "Judy" # Nope.
assertCount(self, path, q, 0)
q['judge__name_first'] = "Judith" # Yep.
assertCount(self, path, q, 1)
def test_exclusion_filters(self):
"""Can we exclude using !'s?"""
path = reverse('position-list', kwargs={'version': 'v3'})
q = dict()
# I want positions to do with any judge other than judge #1
# Note the exclamation mark. In a URL this would look like
# "?judge!=1". Fun stuff.
q['judge!'] = 1
assertCount(self, path, q, 0) # Alas, there are none.
class DRFSearchAndAudioAppsApiFilterTest(TestCase):
fixtures = ['judge_judy.json', 'test_objects_search.json',
'test_objects_audio.json', 'court_data.json',
'user_with_judge_access.json']
def test_cluster_filters(self):
"""Do a variety of cluster filters work?"""
path = reverse('opinioncluster-list', kwargs={'version': 'v3'})
q = dict()
# Related filters
q['panel__id'] = 1
assertCount(self, path, q, 1)
q['non_participating_judges!'] = 1 # Exclusion filter.
assertCount(self, path, q, 1)
q['sub_opinions__author'] = 1
assertCount(self, path, q, 4)
# Boolean filter
q['per_curiam'] = False
assertCount(self, path, q, 4)
# Integer lookups
q = dict()
q['scdb_votes_majority__gt'] = 10
assertCount(self, path, q, 0)
q['scdb_votes_majority__gt'] = 1
assertCount(self, path, q, 1)
def test_opinion_filter(self):
"""Do a variety of opinion filters work?"""
path = reverse('opinion-list', kwargs={'version': 'v3'})
q = dict()
# Simple filters
q['sha1'] = 'asdfasdfasdfasdfasdfasddf-nope'
assertCount(self, path, q, 0)
q['sha1'] = 'asdfasdfasdfasdfasdfasddf'
assertCount(self, path, q, 6)
# Related filters
q['cluster__panel'] = 2
assertCount(self, path, q, 0)
q['cluster__panel'] = 1
assertCount(self, path, q, 4)
q = dict()
q['author__name_first__istartswith'] = "Nope"
assertCount(self, path, q, 0)
q['author__name_first__istartswith'] = "jud"
assertCount(self, path, q, 6)
q['joined_by__name_first__istartswith'] = "Nope"
assertCount(self, path, q, 0)
q['joined_by__name_first__istartswith'] = "jud"
assertCount(self, path, q, 1)
q = dict()
types = ['010combined']
q['type'] = types
assertCount(self, path, q, 5)
types.append('020lead')
assertCount(self, path, q, 6)
def test_docket_filters(self):
"""Do a variety of docket filters work?"""
path = reverse('docket-list', kwargs={'version': 'v3'})
q = dict()
# Simple filter
q['docket_number'] = '14-1165-nope'
assertCount(self, path, q, 0)
q['docket_number'] = 'docket number'
assertCount(self, path, q, 3)
# Related filters
q['court'] = 'test-nope'
assertCount(self, path, q, 0)
q['court'] = 'test'
assertCount(self, path, q, 3)
q['clusters__panel__name_first__istartswith'] = 'jud-nope'
assertCount(self, path, q, 0)
q['clusters__panel__name_first__istartswith'] = 'jud'
assertCount(self, path, q, 1)
q['audio_files__sha1'] = 'de8cff186eb263dc06bdc5340860eb6809f898d3-nope'
assertCount(self, path, q, 0)
q['audio_files__sha1'] = 'de8cff186eb263dc06bdc5340860eb6809f898d3'
assertCount(self, path, q, 1)
def test_audio_filters(self):
path = reverse('audio-list', kwargs={'version': 'v3'})
q = dict()
# Simple filter
q['sha1'] = 'de8cff186eb263dc06bdc5340860eb6809f898d3-nope'
assertCount(self, path, q, 0)
q['sha1'] = 'de8cff186eb263dc06bdc5340860eb6809f898d3'
assertCount(self, path, q, 1)
# Related filter
q['docket__court'] = 'test-nope'
assertCount(self, path, q, 0)
q['docket__court'] = 'test'
assertCount(self, path, q, 1)
# Multiple choice filter
q = dict()
sources = ['C']
q['source'] = sources
assertCount(self, path, q, 2)
sources.append('CR')
assertCount(self, path, q, 3)
def test_opinion_cited_filters(self):
"""Do the filters on the opinions_cited work?"""
path = reverse('opinionscited-list', kwargs={'version': 'v3'})
q = dict()
# Simple related filter
q['citing_opinion__sha1'] = 'asdf-nope'
assertCount(self, path, q, 0)
q['citing_opinion__sha1'] = 'asdfasdfasdfasdfasdfasddf'
assertCount(self, path, q, 4)
# Fancy filter: Citing Opinions written by judges with first name
# istartingwith "jud"
q['citing_opinion__author__name_first__istartswith'] = 'jud-nope'
assertCount(self, path, q, 0)
q['citing_opinion__author__name_first__istartswith'] = 'jud'
assertCount(self, path, q, 4)
class DRFFieldSelectionTest(TestCase):
fixtures = ['judge_judy.json', 'test_objects_search.json',
'user_with_judge_access.json']
def test_only_some_fields_returned(self):
"""Can we return only some of the fields?"""
# First check the Judge endpoint, one of our more complicated ones.
path = reverse('judge-list', kwargs={'version': 'v3'})
fields_to_return = ['educations', 'date_modified', 'slug']
q = {'fields': ','.join(fields_to_return)}
self.client.login(username='pandora', password='password')
r = self.client.get(path, q)
self.assertEqual(len(r.data['results'][0].keys()),
len(fields_to_return))
# One more check for good measure.
path = reverse('opinioncluster-list', kwargs={'version': 'v3'})
fields_to_return = ['per_curiam', 'slug']
r = self.client.get(path, q)
self.assertEqual(len(r.data['results'][0].keys()),
len(fields_to_return))
class BulkJsonHistoryTest(TestCase):
def setUp(self):
self.history = BulkJsonHistory('test')
def tearDown(self):
self.history.delete_from_disk()
def test_load_the_file(self):
data = self.history.load_json_file()
self.assertEqual(
{},
data,
)
def test_load_date_when_none(self):
d = self.history.get_last_good_date()
self.assertIsNone(d)
def test_set_date_then_load_it(self):
self.history.add_current_attempt_and_save()
self.history.mark_success_and_save()
d = self.history.get_last_good_date()
self.assertAlmostEqual(
# The date serialized is within ten seconds of now.
d,
now(),
delta=timedelta(seconds=10)
)
def test_add_current_attempt(self):
self.history.add_current_attempt_and_save()
d = self.history.get_last_attempt()
self.assertAlmostEqual(
d,
now(),
delta=timedelta(seconds=10)
)
TESTS: Better cleanup in API tests.
import shutil
from datetime import timedelta, date
from django.core.urlresolvers import reverse
from django.http import HttpRequest, JsonResponse
from django.test import Client, TestCase, override_settings
from django.utils.timezone import now
from cl.api.management.commands.cl_make_bulk_data import Command
from cl.api.utils import BulkJsonHistory
from cl.api.views import coverage_data
from cl.audio.models import Audio
from cl.scrapers.management.commands.cl_scrape_oral_arguments import \
Command as OralArgumentCommand
from cl.scrapers.test_assets import test_oral_arg_scraper
from cl.search.models import \
Docket, Court, Opinion, OpinionCluster, OpinionsCited
class BulkDataTest(TestCase):
fixtures = ['court_data.json']
tmp_data_dir = '/tmp/bulk-dir/'
def setUp(self):
docket = Docket(
case_name=u'foo',
court=Court.objects.get(pk='test'),
)
docket.save()
# Must be more than a year old for all tests to be runnable.
last_month = now().date() - timedelta(days=400)
self.doc_cluster = OpinionCluster(
case_name=u"foo",
docket=docket,
date_filed=last_month
)
self.doc_cluster.save(index=False)
opinion = Opinion.objects.create(
cluster=self.doc_cluster,
type='Lead Opinion'
)
opinion2 = Opinion.objects.create(
cluster=self.doc_cluster,
type='Concurrence'
)
OpinionsCited.objects.create(
citing_opinion=opinion2,
cited_opinion=opinion
)
# Scrape the audio "site" and add its contents
site = test_oral_arg_scraper.Site().parse()
OralArgumentCommand().scrape_court(site, full_crawl=True)
def tearDown(self):
OpinionCluster.objects.all().delete()
Docket.objects.all().delete()
Audio.objects.all().delete()
try:
shutil.rmtree(self.tmp_data_dir)
except OSError:
pass
@override_settings(BULK_DATA_DIR=tmp_data_dir)
def test_make_all_bulk_files(self):
"""Can we successfully generate all bulk files?"""
Command().execute()
def test_database_has_objects_for_bulk_export(self):
self.assertTrue(Opinion.objects.count() > 0, 'Opinions exist')
self.assertTrue(Audio.objects.count() > 0, 'Audio exist')
self.assertTrue(Docket.objects.count() > 0, 'Docket exist')
self.assertTrue(Court.objects.count() > 0, 'Court exist')
self.assertEqual(
Court.objects.get(pk='test').full_name,
'Testing Supreme Court'
)
class BasicAPIPageTest(TestCase):
"""Test the basic views"""
fixtures = ['judge_judy.json', 'test_objects_search.json']
def setUp(self):
self.client = Client()
# Need pagerank file for test_pagerank_file()
from cl.search.management.commands.cl_calculate_pagerank_networkx \
import Command
command = Command()
command.do_pagerank(chown=False)
def test_api_index(self):
r = self.client.get(reverse('api_index'))
self.assertEqual(r.status_code, 200)
def test_court_index(self):
r = self.client.get(reverse('court_index'))
self.assertEqual(r.status_code, 200)
def test_rest_docs(self):
r = self.client.get(reverse('rest_docs'))
self.assertEqual(r.status_code, 200)
def test_bulk_data_index(self):
r = self.client.get(reverse('bulk_data_index'))
self.assertEqual(r.status_code, 200)
def test_pagerank_file(self):
r = self.client.get(reverse('pagerank_file'))
self.assertEqual(r.status_code, 200)
def test_coverage_api(self):
r = self.client.get(reverse('coverage_data',
kwargs={'version': 2, 'court': 'ca9'}))
self.assertEqual(r.status_code, 200)
def test_coverage_api_via_url(self):
# Should hit something like:
# https://www.courtlistener.com/api/rest/v2/coverage/ca2/
r = self.client.get('/api/rest/v2/coverage/ca2/')
self.assertEqual(r.status_code, 200)
def test_api_info_page_displays_latest_rest_docs_by_default(self):
response = self.client.get('/api/rest-info/')
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'rest-docs-vlatest.html')
def test_api_info_page_can_display_different_versions_of_rest_docs(self):
for version in ['v1', 'v2']:
response = self.client.get('/api/rest-info/%s/' % (version,))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'rest-docs-%s.html' % (version,))
header = 'REST API – %s' % (version.upper(),)
self.assertContains(response, header)
class ApiViewTest(TestCase):
"""Tests views in API module via direct calls and not HTTP"""
def test_coverage_data_view_provides_court_data(self):
response = coverage_data(HttpRequest(), 'v2', 'ca9')
self.assertEqual(response.status_code, 200)
self.assertIsInstance(response, JsonResponse)
self.assertContains(response, 'annual_counts')
self.assertContains(response, 'total')
def assertCount(cls, path, q, expected_count):
cls.client.login(username='pandora', password='password')
r = cls.client.get(path, q)
cls.assertEqual(len(r.data['results']), expected_count,
msg="r.data was: %s" % r.data)
class DRFJudgeApiFilterTests(TestCase):
"""Do the filters work properly?"""
fixtures = ['judge_judy.json', 'user_with_judge_access.json']
def test_judge_filtering_by_first_name(self):
"""Can we filter by first name?"""
path = reverse('judge-list', kwargs={'version': 'v3'})
# Filtering with good values brings back 1 result.
q = {'name_first__istartswith': 'judith'}
assertCount(self, path, q, 1)
# Filtering with bad values brings back no results.
q = {'name_first__istartswith': 'XXX'}
assertCount(self, path, q, 0)
def test_judge_filtering_by_date(self):
"""Do the various date filters work properly?"""
path = reverse('judge-list', kwargs={'version': 'v3'})
# Exact match for her birthday
correct_date = date(1942, 10, 21)
q = {'date_dob': correct_date.isoformat()}
assertCount(self, path, q, 1)
# People born after the day before her birthday
before = correct_date - timedelta(days=1)
q = {'date_dob__gt': before.isoformat()}
assertCount(self, path, q, 1)
# Flip the logic. This should return no results.
q = {'date_dob__lt': before.isoformat()}
assertCount(self, path, q, 0)
def test_nested_judge_filtering(self):
"""Can we filter across various relations?
Each of these assertions adds another parameter making our final test
a pretty complex combination.
"""
path = reverse('judge-list', kwargs={'version': 'v3'})
q = dict()
# No results for a bad query
q['educations__degree'] = 'XXX'
assertCount(self, path, q, 0)
# One result for a good query
q['educations__degree'] = 'JD'
assertCount(self, path, q, 1)
# Again, no results
q['educations__degree_year'] = 1400
assertCount(self, path, q, 0)
# But with the correct year...one result
q['educations__degree_year'] = 1965
assertCount(self, path, q, 1)
# Judy went to "New York Law School"
q['educations__school__name__istartswith'] = "New York Law"
assertCount(self, path, q, 1)
# Moving on to careers. Bad value, then good.
q['careers__job_title__icontains'] = 'XXX'
assertCount(self, path, q, 0)
q['careers__job_title__icontains'] = 'lawyer'
assertCount(self, path, q, 1)
# Moving on to titles...bad value, then good.
q['titles__title_name'] = 'XXX'
assertCount(self, path, q, 0)
q['titles__title_name'] = 'c-jud'
assertCount(self, path, q, 1)
# Political affiliation filtering...bad, then good.
q['political_affiliations__political_party'] = 'XXX'
assertCount(self, path, q, 0)
q['political_affiliations__political_party'] = 'd'
assertCount(self, path, q, 1)
# Sources
about_now = '2015-12-17T00:00:00Z'
q['sources__date_modified__gt'] = about_now
assertCount(self, path, q, 0)
q.pop('sources__date_modified__gt') # Next key doesn't overwrite.
q['sources__date_modified__lt'] = about_now
assertCount(self, path, q, 1)
# ABA Ratings
q['aba_ratings__rating'] = 'q'
assertCount(self, path, q, 0)
q['aba_ratings__rating'] = 'nq'
assertCount(self, path, q, 1)
def test_education_filtering(self):
"""Can we filter education objects?"""
path = reverse('education-list', kwargs={'version': 'v3'})
q = dict()
# Filter by degree
q['degree'] = 'XXX'
assertCount(self, path, q, 0)
q['degree'] = 'JD'
assertCount(self, path, q, 1)
# Filter by degree's related field, School
q['school__name__istartswith'] = 'XXX'
assertCount(self, path, q, 0)
q['school__name__istartswith'] = 'New York'
assertCount(self, path, q, 1)
def test_title_filtering(self):
"""Can Judge Titles be filtered?"""
path = reverse('title-list', kwargs={'version': 'v3'})
q = dict()
# Filter by title_name
q['title_name'] = 'XXX'
assertCount(self, path, q, 0)
q['title_name'] = 'c-jud'
assertCount(self, path, q, 1)
def test_reverse_filtering(self):
"""Can we filter Source objects by judge name?"""
# I want any source notes about judge judy.
path = reverse('source-list', kwargs={'version': 'v3'})
q = {'judge': 1}
assertCount(self, path, q, 1)
def test_position_filters(self):
"""Can we filter on positions"""
path = reverse('position-list', kwargs={'version': 'v3'})
q = dict()
# I want positions to do with judge #1 (Judy)
q['judge'] = 1
assertCount(self, path, q, 1)
# Retention events
q['rentention_events__retention_type'] = 'reapp_gov'
assertCount(self, path, q, 1)
# Appointer was Bill, a Democrat
q['appointer__name_first__istartswith'] = 'bill'
q['appointer__political_affiliations__political_party'] = 'd'
assertCount(self, path, q, 1)
# She was not appointed by a Republican
q['appointer__political_affiliations__political_party'] = 'r'
assertCount(self, path, q, 0)
def test_racial_filters(self):
"""Can we filter by race?"""
path = reverse('judge-list', kwargs={'version': 'v3'})
q = {'race': 'w'}
assertCount(self, path, q, 1)
# Do an OR. This returns judges that are either black or white (not
# that it matters, MJ)
q['race'] = ['w', 'b']
assertCount(self, path, q, 1)
def test_circular_relationships(self):
"""Do filters configured using strings instead of classes work?"""
path = reverse('education-list', kwargs={'version': 'v3'})
q = dict()
# Traverse judges, careers
q['judge__careers__job_title__icontains'] = 'xxx'
assertCount(self, path, q, 0)
q['judge__careers__job_title__icontains'] = 'lawyer'
assertCount(self, path, q, 1)
# Just traverse to the judge table
q['judge__name_first'] = "Judy" # Nope.
assertCount(self, path, q, 0)
q['judge__name_first'] = "Judith" # Yep.
assertCount(self, path, q, 1)
def test_exclusion_filters(self):
"""Can we exclude using !'s?"""
path = reverse('position-list', kwargs={'version': 'v3'})
q = dict()
# I want positions to do with any judge other than judge #1
# Note the exclamation mark. In a URL this would look like
# "?judge!=1". Fun stuff.
q['judge!'] = 1
assertCount(self, path, q, 0) # Alas, there are none.
class DRFSearchAndAudioAppsApiFilterTest(TestCase):
fixtures = ['judge_judy.json', 'test_objects_search.json',
'test_objects_audio.json', 'court_data.json',
'user_with_judge_access.json']
def test_cluster_filters(self):
"""Do a variety of cluster filters work?"""
path = reverse('opinioncluster-list', kwargs={'version': 'v3'})
q = dict()
# Related filters
q['panel__id'] = 1
assertCount(self, path, q, 1)
q['non_participating_judges!'] = 1 # Exclusion filter.
assertCount(self, path, q, 1)
q['sub_opinions__author'] = 1
assertCount(self, path, q, 4)
# Boolean filter
q['per_curiam'] = False
assertCount(self, path, q, 4)
# Integer lookups
q = dict()
q['scdb_votes_majority__gt'] = 10
assertCount(self, path, q, 0)
q['scdb_votes_majority__gt'] = 1
assertCount(self, path, q, 1)
def test_opinion_filter(self):
"""Do a variety of opinion filters work?"""
path = reverse('opinion-list', kwargs={'version': 'v3'})
q = dict()
# Simple filters
q['sha1'] = 'asdfasdfasdfasdfasdfasddf-nope'
assertCount(self, path, q, 0)
q['sha1'] = 'asdfasdfasdfasdfasdfasddf'
assertCount(self, path, q, 6)
# Related filters
q['cluster__panel'] = 2
assertCount(self, path, q, 0)
q['cluster__panel'] = 1
assertCount(self, path, q, 4)
q = dict()
q['author__name_first__istartswith'] = "Nope"
assertCount(self, path, q, 0)
q['author__name_first__istartswith'] = "jud"
assertCount(self, path, q, 6)
q['joined_by__name_first__istartswith'] = "Nope"
assertCount(self, path, q, 0)
q['joined_by__name_first__istartswith'] = "jud"
assertCount(self, path, q, 1)
q = dict()
types = ['010combined']
q['type'] = types
assertCount(self, path, q, 5)
types.append('020lead')
assertCount(self, path, q, 6)
def test_docket_filters(self):
"""Do a variety of docket filters work?"""
path = reverse('docket-list', kwargs={'version': 'v3'})
q = dict()
# Simple filter
q['docket_number'] = '14-1165-nope'
assertCount(self, path, q, 0)
q['docket_number'] = 'docket number'
assertCount(self, path, q, 3)
# Related filters
q['court'] = 'test-nope'
assertCount(self, path, q, 0)
q['court'] = 'test'
assertCount(self, path, q, 3)
q['clusters__panel__name_first__istartswith'] = 'jud-nope'
assertCount(self, path, q, 0)
q['clusters__panel__name_first__istartswith'] = 'jud'
assertCount(self, path, q, 1)
q['audio_files__sha1'] = 'de8cff186eb263dc06bdc5340860eb6809f898d3-nope'
assertCount(self, path, q, 0)
q['audio_files__sha1'] = 'de8cff186eb263dc06bdc5340860eb6809f898d3'
assertCount(self, path, q, 1)
def test_audio_filters(self):
path = reverse('audio-list', kwargs={'version': 'v3'})
q = dict()
# Simple filter
q['sha1'] = 'de8cff186eb263dc06bdc5340860eb6809f898d3-nope'
assertCount(self, path, q, 0)
q['sha1'] = 'de8cff186eb263dc06bdc5340860eb6809f898d3'
assertCount(self, path, q, 1)
# Related filter
q['docket__court'] = 'test-nope'
assertCount(self, path, q, 0)
q['docket__court'] = 'test'
assertCount(self, path, q, 1)
# Multiple choice filter
q = dict()
sources = ['C']
q['source'] = sources
assertCount(self, path, q, 2)
sources.append('CR')
assertCount(self, path, q, 3)
def test_opinion_cited_filters(self):
"""Do the filters on the opinions_cited work?"""
path = reverse('opinionscited-list', kwargs={'version': 'v3'})
q = dict()
# Simple related filter
q['citing_opinion__sha1'] = 'asdf-nope'
assertCount(self, path, q, 0)
q['citing_opinion__sha1'] = 'asdfasdfasdfasdfasdfasddf'
assertCount(self, path, q, 4)
# Fancy filter: Citing Opinions written by judges with first name
# istartingwith "jud"
q['citing_opinion__author__name_first__istartswith'] = 'jud-nope'
assertCount(self, path, q, 0)
q['citing_opinion__author__name_first__istartswith'] = 'jud'
assertCount(self, path, q, 4)
class DRFFieldSelectionTest(TestCase):
fixtures = ['judge_judy.json', 'test_objects_search.json',
'user_with_judge_access.json']
def test_only_some_fields_returned(self):
"""Can we return only some of the fields?"""
# First check the Judge endpoint, one of our more complicated ones.
path = reverse('judge-list', kwargs={'version': 'v3'})
fields_to_return = ['educations', 'date_modified', 'slug']
q = {'fields': ','.join(fields_to_return)}
self.client.login(username='pandora', password='password')
r = self.client.get(path, q)
self.assertEqual(len(r.data['results'][0].keys()),
len(fields_to_return))
# One more check for good measure.
path = reverse('opinioncluster-list', kwargs={'version': 'v3'})
fields_to_return = ['per_curiam', 'slug']
r = self.client.get(path, q)
self.assertEqual(len(r.data['results'][0].keys()),
len(fields_to_return))
class BulkJsonHistoryTest(TestCase):
def setUp(self):
self.history = BulkJsonHistory('test')
def tearDown(self):
self.history.delete_from_disk()
def test_load_the_file(self):
data = self.history.load_json_file()
self.assertEqual(
{},
data,
)
def test_load_date_when_none(self):
d = self.history.get_last_good_date()
self.assertIsNone(d)
def test_set_date_then_load_it(self):
self.history.add_current_attempt_and_save()
self.history.mark_success_and_save()
d = self.history.get_last_good_date()
self.assertAlmostEqual(
# The date serialized is within ten seconds of now.
d,
now(),
delta=timedelta(seconds=10)
)
def test_add_current_attempt(self):
self.history.add_current_attempt_and_save()
d = self.history.get_last_attempt()
self.assertAlmostEqual(
d,
now(),
delta=timedelta(seconds=10)
)
|
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
# Copyright 2010-2011 Orca Team
# Copyright 2011-2015 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2011 Orca Team" \
"Copyright (c) 2011-2015 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import urllib
from orca import debug
from orca import messages
from orca import object_properties
from orca import orca_state
from orca import settings_manager
from orca import speech_generator
_settingsManager = settings_manager.getManager()
class SpeechGenerator(speech_generator.SpeechGenerator):
def __init__(self, script):
super().__init__(script)
def _generateAncestors(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateAncestors(obj, **args)
if self._script.inSayAll() and obj == orca_state.locusOfFocus:
return []
result = []
priorObj = args.get('priorObj')
if priorObj and self._script.utilities.inDocumentContent(priorObj):
priorDoc = self._script.utilities.getDocumentForObject(priorObj)
doc = self._script.utilities.getDocumentForObject(obj)
if priorDoc != doc and not self._script.utilities.getDocumentForObject(doc):
result = [super()._generateName(doc)]
if self._script.utilities.isLink(obj) \
or self._script.utilities.isLandmark(obj) \
or self._script.utilities.isMath(obj):
return result
args['stopAtRoles'] = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_WEB,
pyatspi.ROLE_EMBEDDED,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_MATH,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_TOOL_BAR]
args['skipRoles'] = [pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_LINK,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_TEXT]
result.extend(super()._generateAncestors(obj, **args))
return result
def _generateAllTextSelection(self, obj, **args):
if self._script.utilities.isZombie(obj) \
or obj != orca_state.locusOfFocus:
return []
# TODO - JD: These (and the default script's) need to
# call utility methods rather than generate it.
return super()._generateAllTextSelection(obj, **args)
def _generateAnyTextSelection(self, obj, **args):
if self._script.utilities.isZombie(obj) \
or obj != orca_state.locusOfFocus:
return []
# TODO - JD: These (and the default script's) need to
# call utility methods rather than generate it.
return super()._generateAnyTextSelection(obj, **args)
def _generateClickable(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'clickable'
if self._script.utilities.isClickableElement(obj):
result = [self._script.formatting.getString(**args)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
return []
def _generateDescription(self, obj, **args):
if self._script.utilities.isZombie(obj):
return []
if self._script.utilities.preferDescriptionOverName(obj):
return []
role = args.get('role', obj.getRole())
if obj != orca_state.locusOfFocus:
if role in [pyatspi.ROLE_ALERT, pyatspi.ROLE_DIALOG]:
return super()._generateDescription(obj, **args)
return []
formatType = args.get('formatType')
if formatType == 'basicWhereAmI' and self._script.utilities.isLiveRegion(obj):
return self._script.liveRegionManager.generateLiveRegionDescription(obj, **args)
if role == pyatspi.ROLE_TEXT and formatType != 'basicWhereAmI':
return []
# TODO - JD: This is private.
if role == pyatspi.ROLE_LINK and self._script._lastCommandWasCaretNav:
return []
return super()._generateDescription(obj, **args)
def _generateHasLongDesc(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'haslongdesc'
if self._script.utilities.hasLongDesc(obj):
result = [self._script.formatting.getString(**args)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
return []
def _generateLabelOrName(self, obj, **args):
if self._script.utilities.isTextBlockElement(obj) \
and not self._script.utilities.isLandmark(obj) \
and not self._script.utilities.isDPub(obj):
return []
if self._script.utilities.inDocumentContent(obj) and obj.name:
result = [obj.name]
result.extend(self.voice(speech_generator.DEFAULT))
return result
return super()._generateLabelOrName(obj, **args)
def _generateName(self, obj, **args):
if self._script.utilities.isTextBlockElement(obj) \
and not self._script.utilities.isLandmark(obj) \
and not self._script.utilities.isDPub(obj):
return []
if obj.parent and obj.name and obj.name == obj.parent.name \
and obj != orca_state.locusOfFocus:
return []
# TODO - JD: Once the formatting strings are vastly cleaned up
# or simply removed, hacks like this won't be needed.
role = args.get('role', obj.getRole())
if role in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_SPIN_BUTTON]:
return super()._generateName(obj, **args)
if self._script.utilities.isLink(obj) \
and not self._script.utilities.hasExplicitName(obj):
return []
if self._script.utilities.inDocumentContent(obj) and obj.name:
if self._script.utilities.preferDescriptionOverName(obj):
result = [obj.description]
else:
result = [obj.name]
result.extend(self.voice(speech_generator.DEFAULT))
return result
return super()._generateName(obj, **args)
def _generateLabel(self, obj, **args):
if self._script.utilities.isTextBlockElement(obj):
return []
label, objects = self._script.utilities.inferLabelFor(obj)
if label:
result = [label]
result.extend(self.voice(speech_generator.DEFAULT))
return result
return super()._generateLabel(obj, **args)
def _generateNewNodeLevel(self, obj, **args):
if self._script.utilities.isTextBlockElement(obj) \
or self._script.utilities.isLink(obj):
return []
return super()._generateNewNodeLevel(obj, **args)
def _generateLeaving(self, obj, **args):
if not args.get('leaving'):
return []
if self._script.utilities.inDocumentContent(obj) \
and not self._script.utilities.inDocumentContent(orca_state.locusOfFocus):
result = ['']
result.extend(self.voice(speech_generator.SYSTEM))
return result
return super()._generateLeaving(obj, **args)
def _generateNewRadioButtonGroup(self, obj, **args):
# TODO - JD: Looking at the default speech generator's method, this
# is all kinds of broken. Until that can be sorted out, try to filter
# out some of the noise....
return []
def _generateNumberOfChildren(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
# We handle things even for non-document content due to issues in
# other toolkits (e.g. exposing list items to us that are not
# exposed to sighted users)
role = args.get('role', obj.getRole())
if role not in [pyatspi.ROLE_LIST, pyatspi.ROLE_LIST_BOX]:
return super()._generateNumberOfChildren(obj, **args)
setsize = self._script.utilities.getSetSize(obj[0])
if setsize is None:
children = [x for x in obj if x.getRole() == pyatspi.ROLE_LIST_ITEM]
setsize = len(children)
if not setsize:
return []
result = [messages.listItemCount(setsize)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
# TODO - JD: Yet another dumb generator method we should kill.
def _generateTextRole(self, obj, **args):
if self._script.inSayAll():
return []
return self._generateRoleName(obj, **args)
def getLocalizedRoleName(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super().getLocalizedRoleName(obj, **args)
roledescription = self._script.utilities.getRoleDescription(obj)
if roledescription:
return roledescription
return super().getLocalizedRoleName(obj, **args)
def _generateRealActiveDescendantDisplayedText(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateRealActiveDescendantDisplayedText(obj, **args)
return self._generateDisplayedText(obj, **args)
def _generateRoleName(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateRoleName(obj, **args)
result = []
acss = self.voice(speech_generator.SYSTEM)
roledescription = self._script.utilities.getRoleDescription(obj)
if roledescription:
result = [roledescription]
result.extend(acss)
return result
role = args.get('role', obj.getRole())
enabled, disabled = self._getEnabledAndDisabledContextRoles()
if role in disabled:
return []
force = args.get('force', False)
start = args.get('startOffset')
end = args.get('endOffset')
index = args.get('index', 0)
total = args.get('total', 1)
if not force:
doNotSpeak = [pyatspi.ROLE_FOOTER,
pyatspi.ROLE_FORM,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_REDUNDANT_OBJECT,
pyatspi.ROLE_UNKNOWN]
else:
doNotSpeak = [pyatspi.ROLE_UNKNOWN]
if not force:
doNotSpeak.append(pyatspi.ROLE_TABLE_CELL)
doNotSpeak.append(pyatspi.ROLE_TEXT)
doNotSpeak.append(pyatspi.ROLE_STATIC)
if args.get('formatType', 'unfocused') != 'basicWhereAmI':
doNotSpeak.append(pyatspi.ROLE_LIST_ITEM)
doNotSpeak.append(pyatspi.ROLE_LIST)
if (start or end):
doNotSpeak.append(pyatspi.ROLE_DOCUMENT_FRAME)
doNotSpeak.append(pyatspi.ROLE_DOCUMENT_WEB)
doNotSpeak.append(pyatspi.ROLE_ALERT)
if self._script.utilities.isAnchor(obj):
doNotSpeak.append(obj.getRole())
if total > 1:
doNotSpeak.append(pyatspi.ROLE_ROW_HEADER)
if obj.getState().contains(pyatspi.STATE_EDITABLE):
lastKey, mods = self._script.utilities.lastKeyAndModifiers()
if ((lastKey in ["Down", "Right"] and not mods) or self._script.inSayAll()) and start:
return []
if lastKey in ["Up", "Left"] and not mods:
text = self._script.utilities.queryNonEmptyText(obj)
if text and end not in [None, text.characterCount]:
return []
if role in [pyatspi.ROLE_ENTRY, pyatspi.ROLE_PASSWORD_TEXT, pyatspi.ROLE_SPIN_BUTTON]:
result.append(self.getLocalizedRoleName(obj, **args))
elif obj.parent and not obj.parent.getState().contains(pyatspi.STATE_EDITABLE):
if lastKey not in ["Home", "End", "Up", "Down", "Left", "Right", "Page_Up", "Page_Down"]:
result.append(object_properties.ROLE_EDITABLE_CONTENT)
elif role not in doNotSpeak:
result.append(self.getLocalizedRoleName(obj, **args))
if result:
result.extend(acss)
elif role == pyatspi.ROLE_HEADING:
if index == total - 1 or not self._script.utilities.isFocusableWithMathChild(obj):
level = self._script.utilities.headingLevel(obj)
if level:
result.append(object_properties.ROLE_HEADING_LEVEL_SPEECH % {
'role': self.getLocalizedRoleName(obj, **args),
'level': level})
result.extend(acss)
else:
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
elif self._script.utilities.isLink(obj):
if obj.parent.getRole() == pyatspi.ROLE_IMAGE:
result.append(messages.IMAGE_MAP_LINK)
result.extend(acss)
else:
if self._script.utilities.hasUselessCanvasDescendant(obj):
result.append(self.getLocalizedRoleName(obj, role=pyatspi.ROLE_IMAGE))
result.extend(acss)
if index == total - 1 or not self._script.utilities.isFocusableWithMathChild(obj):
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
elif role not in doNotSpeak and args.get('priorObj') != obj:
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
if self._script.utilities.isMath(obj) and not self._script.utilities.isMathTopLevel(obj):
return result
ancestorRoles = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK]
if index == total - 1 \
and (role == pyatspi.ROLE_IMAGE or self._script.utilities.queryNonEmptyText(obj)):
speakRoles = lambda x: x and x.getRole() in ancestorRoles
ancestor = pyatspi.findAncestor(obj, speakRoles)
if ancestor and ancestor.getRole() != role:
result.extend(self._generateRoleName(ancestor))
return result
def _generatePageSummary(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
onlyIfFound = args.get('formatType') != 'detailedWhereAmI'
string = self._script.utilities.getPageSummary(obj, onlyIfFound)
if not string:
return []
result = [string]
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateSiteDescription(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
link_uri = self._script.utilities.uri(obj)
if not link_uri:
return []
link_uri_info = urllib.parse.urlparse(link_uri)
doc_uri = self._script.utilities.documentFrameURI()
if not doc_uri:
return []
result = []
doc_uri_info = urllib.parse.urlparse(doc_uri)
if link_uri_info[1] == doc_uri_info[1]:
if link_uri_info[2] == doc_uri_info[2]:
result.append(messages.LINK_SAME_PAGE)
else:
result.append(messages.LINK_SAME_SITE)
else:
linkdomain = link_uri_info[1].split('.')
docdomain = doc_uri_info[1].split('.')
if len(linkdomain) > 1 and len(docdomain) > 1 \
and linkdomain[-1] == docdomain[-1] \
and linkdomain[-2] == docdomain[-2]:
result.append(messages.LINK_SAME_SITE)
else:
result.append(messages.LINK_DIFFERENT_SITE)
if result:
result.extend(self.voice(speech_generator.HYPERLINK))
return result
def _generateExpandedEOCs(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateExpandedEOCs(obj, **args)
result = []
startOffset = args.get('startOffset', 0)
endOffset = args.get('endOffset', -1)
text = self._script.utilities.expandEOCs(obj, startOffset, endOffset)
if text:
result.append(text)
return result
def _generatePositionInList(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not args.get('forceList', False) \
and not _settingsManager.getSetting('enablePositionSpeaking'):
return []
# TODO - JD: We cannot do this for XUL (or whatever Firefox is
# using in its non-webcontent dialogs)
#if not self._script.utilities.inDocumentContent(obj):
# return super()._generatePositionInList(obj, **args)
menuRoles = [pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_TEAROFF_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_MENU]
if obj.getRole() in menuRoles:
return super()._generatePositionInList(obj, **args)
if self._script.utilities.isEditableComboBox(obj):
return []
position = self._script.utilities.getPositionInSet(obj)
total = self._script.utilities.getSetSize(obj)
if position is None or total is None:
return super()._generatePositionInList(obj, **args)
position = int(position)
total = int(total)
if position < 0 or total < 0:
return []
result = []
result.append(self._script.formatting.getString(
mode='speech',
stringType='groupindex') \
% {"index" : position,
"total" : total})
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateTableCellRow(self, obj, **args):
if not self._script.inFocusMode():
return super()._generateTableCellRow(obj, **args)
if not self._script.utilities.shouldReadFullRow(obj):
return self._generateRealTableCell(obj, **args)
isRow = lambda x: x and x.getRole() == pyatspi.ROLE_TABLE_ROW
row = pyatspi.findAncestor(obj, isRow)
if row and row.name and not self._script.utilities.isLayoutOnly(row):
return self.generate(row)
return super()._generateTableCellRow(obj, **args)
def generateSpeech(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
msg = "WEB: %s is not in document content. Calling default speech generator." % obj
debug.println(debug.LEVEL_INFO, msg, True)
return super().generateSpeech(obj, **args)
msg = "WEB: Generating speech for document object %s" % obj
debug.println(debug.LEVEL_INFO, msg, True)
result = []
if args.get('formatType') == 'detailedWhereAmI':
oldRole = self._overrideRole('default', args)
elif self._script.utilities.isLink(obj):
oldRole = self._overrideRole(pyatspi.ROLE_LINK, args)
elif self._script.utilities.treatAsDiv(obj, offset=args.get('startOffset')):
oldRole = self._overrideRole(pyatspi.ROLE_SECTION, args)
else:
oldRole = self._overrideRole(self._getAlternativeRole(obj, **args), args)
if not 'priorObj' in args:
args['priorObj'] = self._script.utilities.getPriorContext()[0]
if self._script.utilities.isLabellingContents(obj):
result = list(filter(lambda x: x, self.generateContext(obj, **args)))
if not result:
result = list(filter(lambda x: x, super().generateSpeech(obj, **args)))
self._restoreRole(oldRole, args)
msg = "WEB: Speech generation for document object %s complete:" % obj
debug.println(debug.LEVEL_INFO, msg)
for element in result:
debug.println(debug.LEVEL_ALL, " %s" % element)
return result
def generateContents(self, contents, **args):
if not len(contents):
return []
result = []
contents = self._script.utilities.filterContentsForPresentation(contents, True)
msg = "WEB: Generating speech contents (length: %i)" % len(contents)
debug.println(debug.LEVEL_INFO, msg, True)
for i, content in enumerate(contents):
obj, start, end, string = content
msg = "ITEM %i: %s, start: %i, end: %i, string: '%s'" \
% (i, obj, start, end, string)
debug.println(debug.LEVEL_INFO, msg, True)
utterance = self.generateSpeech(
obj, startOffset=start, endOffset=end, string=string,
index=i, total=len(contents), **args)
if isinstance(utterance, list):
isNotEmptyList = lambda x: not (isinstance(x, list) and not x)
utterance = list(filter(isNotEmptyList, utterance))
if utterance and utterance[0]:
result.append(utterance)
args['priorObj'] = obj
if not result:
if self._script.inSayAll():
string = ""
else:
string = messages.BLANK
result = [string, self.voice(speech_generator.DEFAULT)]
return result
Ensure we don't repeat the comment rolename for content with multiple objects
# Orca
#
# Copyright 2005-2009 Sun Microsystems Inc.
# Copyright 2010-2011 Orca Team
# Copyright 2011-2015 Igalia, S.L.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., Franklin Street, Fifth Floor,
# Boston MA 02110-1301 USA.
__id__ = "$Id$"
__version__ = "$Revision$"
__date__ = "$Date$"
__copyright__ = "Copyright (c) 2005-2009 Sun Microsystems Inc." \
"Copyright (c) 2010-2011 Orca Team" \
"Copyright (c) 2011-2015 Igalia, S.L."
__license__ = "LGPL"
import pyatspi
import urllib
from orca import debug
from orca import messages
from orca import object_properties
from orca import orca_state
from orca import settings_manager
from orca import speech_generator
_settingsManager = settings_manager.getManager()
class SpeechGenerator(speech_generator.SpeechGenerator):
def __init__(self, script):
super().__init__(script)
def _generateAncestors(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateAncestors(obj, **args)
if self._script.inSayAll() and obj == orca_state.locusOfFocus:
return []
result = []
priorObj = args.get('priorObj')
if priorObj and self._script.utilities.inDocumentContent(priorObj):
priorDoc = self._script.utilities.getDocumentForObject(priorObj)
doc = self._script.utilities.getDocumentForObject(obj)
if priorDoc != doc and not self._script.utilities.getDocumentForObject(doc):
result = [super()._generateName(doc)]
if self._script.utilities.isLink(obj) \
or self._script.utilities.isLandmark(obj) \
or self._script.utilities.isMath(obj):
return result
args['stopAtRoles'] = [pyatspi.ROLE_DOCUMENT_FRAME,
pyatspi.ROLE_DOCUMENT_WEB,
pyatspi.ROLE_EMBEDDED,
pyatspi.ROLE_INTERNAL_FRAME,
pyatspi.ROLE_MATH,
pyatspi.ROLE_MENU_BAR,
pyatspi.ROLE_TOOL_BAR]
args['skipRoles'] = [pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_LINK,
pyatspi.ROLE_LIST_ITEM,
pyatspi.ROLE_TEXT]
result.extend(super()._generateAncestors(obj, **args))
return result
def _generateAllTextSelection(self, obj, **args):
if self._script.utilities.isZombie(obj) \
or obj != orca_state.locusOfFocus:
return []
# TODO - JD: These (and the default script's) need to
# call utility methods rather than generate it.
return super()._generateAllTextSelection(obj, **args)
def _generateAnyTextSelection(self, obj, **args):
if self._script.utilities.isZombie(obj) \
or obj != orca_state.locusOfFocus:
return []
# TODO - JD: These (and the default script's) need to
# call utility methods rather than generate it.
return super()._generateAnyTextSelection(obj, **args)
def _generateClickable(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'clickable'
if self._script.utilities.isClickableElement(obj):
result = [self._script.formatting.getString(**args)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
return []
def _generateDescription(self, obj, **args):
if self._script.utilities.isZombie(obj):
return []
if self._script.utilities.preferDescriptionOverName(obj):
return []
role = args.get('role', obj.getRole())
if obj != orca_state.locusOfFocus:
if role in [pyatspi.ROLE_ALERT, pyatspi.ROLE_DIALOG]:
return super()._generateDescription(obj, **args)
return []
formatType = args.get('formatType')
if formatType == 'basicWhereAmI' and self._script.utilities.isLiveRegion(obj):
return self._script.liveRegionManager.generateLiveRegionDescription(obj, **args)
if role == pyatspi.ROLE_TEXT and formatType != 'basicWhereAmI':
return []
# TODO - JD: This is private.
if role == pyatspi.ROLE_LINK and self._script._lastCommandWasCaretNav:
return []
return super()._generateDescription(obj, **args)
def _generateHasLongDesc(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
if not args.get('mode', None):
args['mode'] = self._mode
args['stringType'] = 'haslongdesc'
if self._script.utilities.hasLongDesc(obj):
result = [self._script.formatting.getString(**args)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
return []
def _generateLabelOrName(self, obj, **args):
if self._script.utilities.isTextBlockElement(obj) \
and not self._script.utilities.isLandmark(obj) \
and not self._script.utilities.isDPub(obj):
return []
if self._script.utilities.inDocumentContent(obj) and obj.name:
result = [obj.name]
result.extend(self.voice(speech_generator.DEFAULT))
return result
return super()._generateLabelOrName(obj, **args)
def _generateName(self, obj, **args):
if self._script.utilities.isTextBlockElement(obj) \
and not self._script.utilities.isLandmark(obj) \
and not self._script.utilities.isDPub(obj):
return []
if obj.parent and obj.name and obj.name == obj.parent.name \
and obj != orca_state.locusOfFocus:
return []
# TODO - JD: Once the formatting strings are vastly cleaned up
# or simply removed, hacks like this won't be needed.
role = args.get('role', obj.getRole())
if role in [pyatspi.ROLE_COMBO_BOX, pyatspi.ROLE_SPIN_BUTTON]:
return super()._generateName(obj, **args)
if self._script.utilities.isLink(obj) \
and not self._script.utilities.hasExplicitName(obj):
return []
if self._script.utilities.inDocumentContent(obj) and obj.name:
if self._script.utilities.preferDescriptionOverName(obj):
result = [obj.description]
else:
result = [obj.name]
result.extend(self.voice(speech_generator.DEFAULT))
return result
return super()._generateName(obj, **args)
def _generateLabel(self, obj, **args):
if self._script.utilities.isTextBlockElement(obj):
return []
label, objects = self._script.utilities.inferLabelFor(obj)
if label:
result = [label]
result.extend(self.voice(speech_generator.DEFAULT))
return result
return super()._generateLabel(obj, **args)
def _generateNewNodeLevel(self, obj, **args):
if self._script.utilities.isTextBlockElement(obj) \
or self._script.utilities.isLink(obj):
return []
return super()._generateNewNodeLevel(obj, **args)
def _generateLeaving(self, obj, **args):
if not args.get('leaving'):
return []
if self._script.utilities.inDocumentContent(obj) \
and not self._script.utilities.inDocumentContent(orca_state.locusOfFocus):
result = ['']
result.extend(self.voice(speech_generator.SYSTEM))
return result
return super()._generateLeaving(obj, **args)
def _generateNewRadioButtonGroup(self, obj, **args):
# TODO - JD: Looking at the default speech generator's method, this
# is all kinds of broken. Until that can be sorted out, try to filter
# out some of the noise....
return []
def _generateNumberOfChildren(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
# We handle things even for non-document content due to issues in
# other toolkits (e.g. exposing list items to us that are not
# exposed to sighted users)
role = args.get('role', obj.getRole())
if role not in [pyatspi.ROLE_LIST, pyatspi.ROLE_LIST_BOX]:
return super()._generateNumberOfChildren(obj, **args)
setsize = self._script.utilities.getSetSize(obj[0])
if setsize is None:
children = [x for x in obj if x.getRole() == pyatspi.ROLE_LIST_ITEM]
setsize = len(children)
if not setsize:
return []
result = [messages.listItemCount(setsize)]
result.extend(self.voice(speech_generator.SYSTEM))
return result
# TODO - JD: Yet another dumb generator method we should kill.
def _generateTextRole(self, obj, **args):
if self._script.inSayAll():
return []
return self._generateRoleName(obj, **args)
def getLocalizedRoleName(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super().getLocalizedRoleName(obj, **args)
roledescription = self._script.utilities.getRoleDescription(obj)
if roledescription:
return roledescription
return super().getLocalizedRoleName(obj, **args)
def _generateRealActiveDescendantDisplayedText(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateRealActiveDescendantDisplayedText(obj, **args)
return self._generateDisplayedText(obj, **args)
def _generateRoleName(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateRoleName(obj, **args)
result = []
acss = self.voice(speech_generator.SYSTEM)
roledescription = self._script.utilities.getRoleDescription(obj)
if roledescription:
result = [roledescription]
result.extend(acss)
return result
role = args.get('role', obj.getRole())
enabled, disabled = self._getEnabledAndDisabledContextRoles()
if role in disabled:
return []
force = args.get('force', False)
start = args.get('startOffset')
end = args.get('endOffset')
index = args.get('index', 0)
total = args.get('total', 1)
if not force:
doNotSpeak = [pyatspi.ROLE_FOOTER,
pyatspi.ROLE_FORM,
pyatspi.ROLE_LABEL,
pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_PARAGRAPH,
pyatspi.ROLE_SECTION,
pyatspi.ROLE_REDUNDANT_OBJECT,
pyatspi.ROLE_UNKNOWN]
else:
doNotSpeak = [pyatspi.ROLE_UNKNOWN]
if not force:
doNotSpeak.append(pyatspi.ROLE_TABLE_CELL)
doNotSpeak.append(pyatspi.ROLE_TEXT)
doNotSpeak.append(pyatspi.ROLE_STATIC)
if args.get('formatType', 'unfocused') != 'basicWhereAmI':
doNotSpeak.append(pyatspi.ROLE_LIST_ITEM)
doNotSpeak.append(pyatspi.ROLE_LIST)
if (start or end):
doNotSpeak.append(pyatspi.ROLE_DOCUMENT_FRAME)
doNotSpeak.append(pyatspi.ROLE_DOCUMENT_WEB)
doNotSpeak.append(pyatspi.ROLE_ALERT)
if self._script.utilities.isAnchor(obj):
doNotSpeak.append(obj.getRole())
if total > 1:
doNotSpeak.append(pyatspi.ROLE_ROW_HEADER)
if obj.getState().contains(pyatspi.STATE_EDITABLE):
lastKey, mods = self._script.utilities.lastKeyAndModifiers()
if ((lastKey in ["Down", "Right"] and not mods) or self._script.inSayAll()) and start:
return []
if lastKey in ["Up", "Left"] and not mods:
text = self._script.utilities.queryNonEmptyText(obj)
if text and end not in [None, text.characterCount]:
return []
if role in [pyatspi.ROLE_ENTRY, pyatspi.ROLE_PASSWORD_TEXT, pyatspi.ROLE_SPIN_BUTTON]:
result.append(self.getLocalizedRoleName(obj, **args))
elif obj.parent and not obj.parent.getState().contains(pyatspi.STATE_EDITABLE):
if lastKey not in ["Home", "End", "Up", "Down", "Left", "Right", "Page_Up", "Page_Down"]:
result.append(object_properties.ROLE_EDITABLE_CONTENT)
elif role not in doNotSpeak:
result.append(self.getLocalizedRoleName(obj, **args))
if result:
result.extend(acss)
elif role == pyatspi.ROLE_HEADING:
if index == total - 1 or not self._script.utilities.isFocusableWithMathChild(obj):
level = self._script.utilities.headingLevel(obj)
if level:
result.append(object_properties.ROLE_HEADING_LEVEL_SPEECH % {
'role': self.getLocalizedRoleName(obj, **args),
'level': level})
result.extend(acss)
else:
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
elif self._script.utilities.isLink(obj):
if obj.parent.getRole() == pyatspi.ROLE_IMAGE:
result.append(messages.IMAGE_MAP_LINK)
result.extend(acss)
else:
if self._script.utilities.hasUselessCanvasDescendant(obj):
result.append(self.getLocalizedRoleName(obj, role=pyatspi.ROLE_IMAGE))
result.extend(acss)
if index == total - 1 or not self._script.utilities.isFocusableWithMathChild(obj):
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
elif role == pyatspi.ROLE_COMMENT:
if index == 0:
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
elif role not in doNotSpeak and args.get('priorObj') != obj:
result.append(self.getLocalizedRoleName(obj, **args))
result.extend(acss)
if self._script.utilities.isMath(obj) and not self._script.utilities.isMathTopLevel(obj):
return result
ancestorRoles = [pyatspi.ROLE_HEADING, pyatspi.ROLE_LINK]
if index == total - 1 \
and (role == pyatspi.ROLE_IMAGE or self._script.utilities.queryNonEmptyText(obj)):
speakRoles = lambda x: x and x.getRole() in ancestorRoles
ancestor = pyatspi.findAncestor(obj, speakRoles)
if ancestor and ancestor.getRole() != role:
result.extend(self._generateRoleName(ancestor))
return result
def _generatePageSummary(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
onlyIfFound = args.get('formatType') != 'detailedWhereAmI'
string = self._script.utilities.getPageSummary(obj, onlyIfFound)
if not string:
return []
result = [string]
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateSiteDescription(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return []
link_uri = self._script.utilities.uri(obj)
if not link_uri:
return []
link_uri_info = urllib.parse.urlparse(link_uri)
doc_uri = self._script.utilities.documentFrameURI()
if not doc_uri:
return []
result = []
doc_uri_info = urllib.parse.urlparse(doc_uri)
if link_uri_info[1] == doc_uri_info[1]:
if link_uri_info[2] == doc_uri_info[2]:
result.append(messages.LINK_SAME_PAGE)
else:
result.append(messages.LINK_SAME_SITE)
else:
linkdomain = link_uri_info[1].split('.')
docdomain = doc_uri_info[1].split('.')
if len(linkdomain) > 1 and len(docdomain) > 1 \
and linkdomain[-1] == docdomain[-1] \
and linkdomain[-2] == docdomain[-2]:
result.append(messages.LINK_SAME_SITE)
else:
result.append(messages.LINK_DIFFERENT_SITE)
if result:
result.extend(self.voice(speech_generator.HYPERLINK))
return result
def _generateExpandedEOCs(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
return super()._generateExpandedEOCs(obj, **args)
result = []
startOffset = args.get('startOffset', 0)
endOffset = args.get('endOffset', -1)
text = self._script.utilities.expandEOCs(obj, startOffset, endOffset)
if text:
result.append(text)
return result
def _generatePositionInList(self, obj, **args):
if _settingsManager.getSetting('onlySpeakDisplayedText'):
return []
if not args.get('forceList', False) \
and not _settingsManager.getSetting('enablePositionSpeaking'):
return []
# TODO - JD: We cannot do this for XUL (or whatever Firefox is
# using in its non-webcontent dialogs)
#if not self._script.utilities.inDocumentContent(obj):
# return super()._generatePositionInList(obj, **args)
menuRoles = [pyatspi.ROLE_MENU_ITEM,
pyatspi.ROLE_TEAROFF_MENU_ITEM,
pyatspi.ROLE_CHECK_MENU_ITEM,
pyatspi.ROLE_RADIO_MENU_ITEM,
pyatspi.ROLE_MENU]
if obj.getRole() in menuRoles:
return super()._generatePositionInList(obj, **args)
if self._script.utilities.isEditableComboBox(obj):
return []
position = self._script.utilities.getPositionInSet(obj)
total = self._script.utilities.getSetSize(obj)
if position is None or total is None:
return super()._generatePositionInList(obj, **args)
position = int(position)
total = int(total)
if position < 0 or total < 0:
return []
result = []
result.append(self._script.formatting.getString(
mode='speech',
stringType='groupindex') \
% {"index" : position,
"total" : total})
result.extend(self.voice(speech_generator.SYSTEM))
return result
def _generateTableCellRow(self, obj, **args):
if not self._script.inFocusMode():
return super()._generateTableCellRow(obj, **args)
if not self._script.utilities.shouldReadFullRow(obj):
return self._generateRealTableCell(obj, **args)
isRow = lambda x: x and x.getRole() == pyatspi.ROLE_TABLE_ROW
row = pyatspi.findAncestor(obj, isRow)
if row and row.name and not self._script.utilities.isLayoutOnly(row):
return self.generate(row)
return super()._generateTableCellRow(obj, **args)
def generateSpeech(self, obj, **args):
if not self._script.utilities.inDocumentContent(obj):
msg = "WEB: %s is not in document content. Calling default speech generator." % obj
debug.println(debug.LEVEL_INFO, msg, True)
return super().generateSpeech(obj, **args)
msg = "WEB: Generating speech for document object %s" % obj
debug.println(debug.LEVEL_INFO, msg, True)
result = []
if args.get('formatType') == 'detailedWhereAmI':
oldRole = self._overrideRole('default', args)
elif self._script.utilities.isLink(obj):
oldRole = self._overrideRole(pyatspi.ROLE_LINK, args)
elif self._script.utilities.treatAsDiv(obj, offset=args.get('startOffset')):
oldRole = self._overrideRole(pyatspi.ROLE_SECTION, args)
else:
oldRole = self._overrideRole(self._getAlternativeRole(obj, **args), args)
if not 'priorObj' in args:
args['priorObj'] = self._script.utilities.getPriorContext()[0]
if self._script.utilities.isLabellingContents(obj):
result = list(filter(lambda x: x, self.generateContext(obj, **args)))
if not result:
result = list(filter(lambda x: x, super().generateSpeech(obj, **args)))
self._restoreRole(oldRole, args)
msg = "WEB: Speech generation for document object %s complete:" % obj
debug.println(debug.LEVEL_INFO, msg)
for element in result:
debug.println(debug.LEVEL_ALL, " %s" % element)
return result
def generateContents(self, contents, **args):
if not len(contents):
return []
result = []
contents = self._script.utilities.filterContentsForPresentation(contents, True)
msg = "WEB: Generating speech contents (length: %i)" % len(contents)
debug.println(debug.LEVEL_INFO, msg, True)
for i, content in enumerate(contents):
obj, start, end, string = content
msg = "ITEM %i: %s, start: %i, end: %i, string: '%s'" \
% (i, obj, start, end, string)
debug.println(debug.LEVEL_INFO, msg, True)
utterance = self.generateSpeech(
obj, startOffset=start, endOffset=end, string=string,
index=i, total=len(contents), **args)
if isinstance(utterance, list):
isNotEmptyList = lambda x: not (isinstance(x, list) and not x)
utterance = list(filter(isNotEmptyList, utterance))
if utterance and utterance[0]:
result.append(utterance)
args['priorObj'] = obj
if not result:
if self._script.inSayAll():
string = ""
else:
string = messages.BLANK
result = [string, self.voice(speech_generator.DEFAULT)]
return result
|
import time
import os
import errno
import uuid
import math
import pandas as pd
import numpy as np
import collections
import natsort
import uuid
import shutil
import itertools
import json
from itertools import combinations
import matplotlib
from fba_tools.fba_toolsClient import fba_tools
import matplotlib.pyplot as plt
from collections import OrderedDict
from copy import deepcopy
from Workspace.WorkspaceClient import Workspace as Workspace
from DataFileUtil.DataFileUtilClient import DataFileUtil
from KBaseReport.KBaseReportClient import KBaseReport
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class MutualInfoUtil:
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.shock_url = config['shock-url']
self.dfu = DataFileUtil(self.callback_url)
self.ws = Workspace(self.ws_url, token=self.token)
self.scratch = config['scratch']
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _validate_run_flux_mutual_information_analysis_params(self, params):
"""
_validate_run_flux_mutual_information_analysis_params:
validates params passed to run_flux_mutual_information_analysis method
"""
log('start validating validate_run_flux_mutual_information_analysis params')
# check for required parameters
for p in ['fbamodel_id', 'compounds', 'media_id', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _get_file_from_ws(self, workspace, obj_name):
try:
file_path = self.ws.get_objects(
[{'name': obj_name,
'workspace': workspace}])[0]
except Exception as e:
raise ValueError(
'Unable to get object from workspace: (' +
workspace + '/' + obj_name + ')' + str(e))
return file_path
def _make_media_files(self, ws_name, base, compounds):
"""
Build and store media objects for each combination of compound added to the base media.
:param base: The base media file
:param compounds: the set of compound to test
:return: A list of media ids and a matrix with each media combination defined
"""
base_media = self._get_file_from_ws(ws_name, base)['data']
uuid = str(uuid.uuid4()))
media_ids = [base_media['id']]
new_media_list = []
media_matrix = [[""]+compounds]
media_matrix.append([[base_media['id']]+[0]*len(compounds)])
for n_comp in range(1, len(compounds)+1):
for combo in combinations(compounds, n_comp):
new_media_id = base_media['id'] + '_v%s' % len(media_matrix)
media_ids.append(new_media_id)
media_matrix.append([new_media_id]+[1 if comp in combo else 0 for comp in compounds])
new_media = deepcopy(base_media)
new_media['id'] = new_media_id
new_media['name'] = new_media_id
for new_comp in combo:
new_media['mediacompounds'].append(
{'compound_ref': '48/1/1/compounds/id/%s' % new_comp.split('_')[0],
'concentration': 1.0, 'maxFlux': 1000, 'minFlux': -1000})
new_media_list.append(new_media)
print("Made %s Media Files" % (len(media_ids)-1))
info = self.ws.save_objects(
{'workspace': ws_name,
"objects": [{
"hidden": 1,
"type": "KBaseBiochem.Media",
"data": media,
"name": uuid + "-" + media['name']
} for media in new_media_list]
})
print info
return media_ids, media_matrix, uuid
def _run_fba(self, workspace_name, media_id_list, fbamodel_id, uuid):
fba_tool_obj = fba_tools(self.callback_url)
new_media_list = []
for media in media_id_list:
new_media_list.append(uuid + "-" + media)
fba_tool_obj.run_flux_balance_analysis({
"workspace" : workspace_name,
"fbamodel_id" : fbamodel_id,
"fba_output_id" : fbamodel_id + ".mifba",
"fbamodel_workspace" : workspace_name,
"media_id_list" : new_media_list,
"target_reaction" : "bio1",
"minimize_flux" : 1
})
output = self.ws.get_objects2({
'objects' : [{
'ref' : workspace_name + "/" + fbamodel_id + '.mifba'
}]
})
fba = output['data'][0]['data']
biomass_data = "FBAs,Biomass\n"
secretion_file = ","+','.join(media_list)+"\n"
full_secretion_file = ","+','.join(media_list)+"\n"
full_flux_file = ","+','.join(media_list)+"\n"
flux_file = ","+','.join(media_list)+"\n"
objectives = fba['other_objectives']
for i in range(0, len(objectives)):
biomass_data = biomass_data + media_list[i] + "," + objectives[i] + "\n"
flux_vars = fba['FBAReactionVariables']
for var in flux_vars:
id = var['modelreaction_ref'].split("/").pop()
flux_file = flux_file + id
full_flux_file = full_flux_file + id
fluxes = var['other_values']
for i in range(0, len(fluxes)):
full_flux_file = full_flux_file + "," + fluxes[i]
if abs(fluxes[i]) < 1e-7:
flux_file = flux_file + ",0"
else:
flux_file = flux_file + ",1"
flux_file = flux_file + "\n"
full_flux_file = full_flux_file + "\n"
secretion_vars = fba['FBACompoundVariables']
for var in secretion_vars:
id = var['modelcompound_ref'].split("/").pop()
secretion_file = secretion_file + id
full_secretion_file = full_secretion_file + id
fluxes = var['other_values']
for i in range(0, len(fluxes)):
full_secretion_file = full_secretion_file + "," + fluxes[i]
if abs(fluxes[i]) < 1e-7:
secretion_file = secretion_file + ",0"
elif fluxes[i] < 0:
secretion_file = secretion_file + ",-1"
else:
secretion_file = secretion_file + ",1"
secretion_file = secretion_file + "\n"
full_secretion_file = full_secretion_file + "\n"
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
biomass_path = os.path.join(output_directory, 'biomass.csv')
secretion_path = os.path.join(output_directory, 'secretion.csv')
flux_path = os.path.join(output_directory, 'flux.csv')
full_secretion_path = os.path.join(output_directory, 'full_secretion.csv')
full_flux_path = os.path.join(output_directory, 'full_flux.csv')
with open(biomass_path, 'w') as biomass_f:
biomass_f.write(biomass_data)
with open(secretion_path, 'w') as secretion_f:
secretion_f.write(secretion_file)
with open(flux_path, 'w') as flux_f:
flux_f.write(flux_file)
with open(full_secretion_path, 'w') as full_secretion_f:
full_secretion_f.write(full_secretion_file)
with open(full_flux_path, 'w') as full_flux_f:
full_flux_f.write(full_flux_file)
return [biomass_path,secretion_path,flux_path,full_secretion_path,full_flux_path]
def _generate_html_report(self, result_directory, mutual_info_dict):
"""
_generate_html_report: generate html summary report
"""
#scratch, uui, datafileutil, file_to_shock, shockId, extended report
log('start generating html report')
html_report = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'mutual_information_report.html')
shutil.copy(os.path.join(result_directory, 'MI_plot.png'),
os.path.join(output_directory, 'MI_plot.png'))
overview_content = ''
overview_content += '<table><tr><th>Mutual Information for various chemical compound combinations'
overview_content += ' Object</th></td>'
overview_content += '<tr><th>Input Chemical Compound Combination</th>'
overview_content += '<th>Mutual Information (in Bits)</th>'
overview_content += '</tr>'
for k, v in mutual_info_dict.items():
overview_content += '<tr><td>{}</td><td>{}</td></tr>'.format(k, v)
overview_content += '</table>'
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'report_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Overview_Content</p>',
overview_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Mutual Information App'})
return html_report
def _generate_report(self, result_directory, mutual_info_dict, params,paths):
"""
_generate_report: generate summary report
"""
uuidStr = str(uuid.uuid4())
self._mkdir_p(result_directory + '/' + uuidStr)
shutil.copy('/kb/module/data/index.html', result_directory + '/' + uuidStr + '/index.html')
shutil.copy('pdata.json', result_directory + '/' + uuidStr + '/pdata.json')
# DataFileUtils to shock
report_shock_id = self.dfu.file_to_shock({'file_path': result_directory + '/' + uuidStr,
'make_handler': 0,
'pack': 'zip'})['shock_id']
report_file = {'name': 'index.html',
'description': 'the report',
'shock_id': report_shock_id}
biomass_file = {'name': 'biomass_file.csv',
'description': 'biomass_file',
'path': paths[0]}
flux_file = {'name': 'flux_file.csv',
'description': 'flux_file',
'path': paths[1]}
full_flux_file = {'name': 'full_flux_file.csv',
'description': 'full_flux_file',
'path': paths[2]}
secretion_file = {'name': 'secretion_file.csv',
'description': 'secretion_file',
'path': paths[3]}
full_secretion_file = {'name': 'full_secretion_file.csv',
'description': 'full_secretion_file',
'path': paths[4]}
log('creating report')
#output_html_files = self._generate_html_report(result_directory,
# mutual_info_dict)
report_params = {'message': '',
'workspace_name': params.get('workspace_name'),
'html_links': [report_file],
'file_links': [biomass_file,flux_file,full_flux_file,secretion_file,full_secretion_file],
'direct_html_link_index': 0,
'html_window_height': 333,
'report_object_name': 'MutualInfomation_report_' + uuidStr}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_mutual_info(self, media_matrix, fba_file, mi_options):
df1 = pd.read_csv(fba_file)
df1.as_matrix()
#----Input validation of Media/FBAs with Binary Matrix FBAs------
# 1.0 Number of rows in Media.csv file = (Number of columns -1)
# 1.0. If they are different: Through an ERROR saying missed match number of FBAs in media and binary matrix.
# 1.1 Check whether the elements in Media.csv file contains only binary values (i.e. 0 and 1)
# 1.1. If the elements are different: Through an ERROR saying not approapriate input values
# 1.2 Check whether the compounds in Media.csv file match with number of FBAs
# 1.2. If the compounds are different from number of FBAs: Through an ERROR saying not appropriate input values
print media_matrix
s_df1 = df1.shape
s_df2 = media_matrix.shape
Temp_df2 = np.array(media_matrix.values)
# Create matrix with only the elements remove first column and all the rows
Temp_df2 = Temp_df2[0:,1:]
Bin_val_check = np.array_equal(Temp_df2, Temp_df2.astype(bool))
num_compounds = (s_df2[1])-1
if ((s_df1[1]-1) != s_df2[0]) or (Bin_val_check != True) or (int(math.log(s_df2[0],2)) != num_compounds):
print ('invalid input values')
#-----All possible combination of the chemical compounds----------------------
# 2.0 Sperating m0 from rest of the lables
Temp1_df2 = media_matrix
cols = Temp1_df2.columns
for i in range(1,len(cols)):
Temp1_df2.loc[Temp1_df2[cols[i]] == 1 , cols[i]] = cols[i]
print Temp1_df2
# 2.1 Creating a disctionary for all FBAs except m0
print len(Temp1_df2)
mydict = {}
for x in range(0, len(Temp1_df2)):
for i in range(1,s_df2[1]):
currentvalue = Temp1_df2.iloc[x,i]
currentid = Temp1_df2.iloc[x,0]
currentvalue = Temp1_df2.iloc[x,i]
mydict.setdefault(currentid,[])
if currentvalue > 0:
mydict[currentid].append(currentvalue)
# Add the first key as m0
media_0_name = 'm0'
mydict[media_0_name] = "['0']"
#Sort the keys
mydict = collections.OrderedDict(natsort.natsorted(mydict.items()))
print mydict
for k,v in mydict.iteritems():
print k,v
# List of Compounds combination in the list
my_combi_list = []
Compounds_Combi = list(range(1,num_compounds+1))
for L in range(0, len(Compounds_Combi)+1):
for subset in itertools.combinations(Compounds_Combi, L):
my_combi_list.append(list(subset))
print my_combi_list
# Created a dictionary where the keys:
# list of compounds combination
# values are corresponding FBAs list in df2
result_dict = {}
for element in my_combi_list[1:]:
for k, v in mydict.iteritems():
if set(v).issubset(set(map(lambda x:str(x), element))):
key = ','.join(map(lambda x:str(x), element))
if result_dict.get(key):
media_list = result_dict[key]
media_list.append(k)
media_list = list(set(media_list))
result_dict.update({key: media_list})
else:
result_dict.update({key: [media_0_name, k]})
print result_dict
# Created a dictionary where the keys are:
# list of compounds combination
# values are compounds combination FBAs with df1 vaules
All_Comp_Combi_dic = {}
for column, value in result_dict.items():
All_Comp_Combi_dic.update({column : df1.get(value)})
#To print an item from the All_Comp_Combi_dic
df = (pd.DataFrame(All_Comp_Combi_dic.items()))
#print df[0]
#print df[1][7]
MI_dict = {}
for k in range(0, len(df[0])):
drop_rows_df = df[1][k].drop_duplicates(keep="first")
drop_columns_df = drop_rows_df.T.drop_duplicates(keep="first").T
remove = []
removed = {}
cols = df[1][k].columns
for i in range(len(cols)-1):
duplicated = []
v = df[1][k][cols[i]].values
for j in range(i+1,len(cols)):
if np.array_equal(v,df[1][k][cols[j]].values):
remove.append(cols[j])
duplicated.append(cols[j])
if duplicated and cols[i] not in remove:
removed.update({cols[i]:duplicated})
count = {}
for key, value in removed.items():
count.update({key: len(value)})
#print v
# print drop_columns_df
values = count.values()
# print values
values = map(lambda x: x+1, values)
# print values
d = {x:values.count(x) for x in values}
#-------Mutual Inforamtion (MI) calculation-------------
FBAs = len(df[1][k].columns)
pure_entropy = math.log(FBAs,2)
#print pure_entropy
# If No duplicates exist and list "value" is empty
if not values:
#print("List is empty")
No_duplicate_FBAs = len(drop_columns_df.columns)
conditional_entropy = -1 * (No_duplicate_FBAs*((1/No_duplicate_FBAs)*((1/1)*math.log(1.0/1.0,2))));
Mutual_Info = pure_entropy - conditional_entropy
#print('Mutaul Info:', Mutual_Info)
if values:
# If duplicates exist and list "value" is not empty
conditional_entropy = 0
for key in d:
#print key, d[key]
Temp = -1 * d[key] * (key/float(FBAs)) * key * (1.0/key) * math.log(1.0/key,2)
conditional_entropy = Temp + conditional_entropy
#print "%3f" %Temp
Mutual_Info = pure_entropy - conditional_entropy
MI_dict[df[0][k]] = Mutual_Info
#Sorted MI_dict
MI_dict = sorted(MI_dict.items(), key=lambda x: (-len(x[0]), x[0]))
MI_dict = OrderedDict(MI_dict)
x_groups = [[] for x in range(num_compounds)]
y_groups = [[] for x in range(num_compounds)]
names = [[] for x in range(num_compounds)]
Comp_Mapping = [[] for x in range(num_compounds)]
for key, val in MI_dict.iteritems():
del_count = key.count(',')
x_groups[del_count].append(key)
y_groups[del_count].append(val)
# for x, y in zip(x_groups, y_groups):
# data.append(go.Bar(x=x, y=y, name='test'))
compound_IDs = ['H2', 'Vitamin K', 'Hematine', 'Glucose', 'Acetate', 'Formate', 'B12']
pdata = []
for i in range(0, len(x_groups)):
names[i] = str(i + 1) + ' Compound Combination'
Comp_Mapping = str(i + 1) + '-' + compound_IDs[i]
record = {}
record["x"] = []
for e in x_groups[i]:
record["x"].append("c" + e)
record["y"] = y_groups[i]
record["names"] = names[i]
record["Comp_Mapping"] = Comp_Mapping
pdata.append(record)
print pdata
with open('pdata.json', 'w') as outfile:
json.dump(pdata, outfile)
return MI_dict
Fixing syntax error
import time
import os
import errno
import uuid
import math
import pandas as pd
import numpy as np
import collections
import natsort
import uuid
import shutil
import itertools
import json
from itertools import combinations
import matplotlib
from fba_tools.fba_toolsClient import fba_tools
import matplotlib.pyplot as plt
from collections import OrderedDict
from copy import deepcopy
from Workspace.WorkspaceClient import Workspace as Workspace
from DataFileUtil.DataFileUtilClient import DataFileUtil
from KBaseReport.KBaseReportClient import KBaseReport
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
class MutualInfoUtil:
def __init__(self, config):
self.ws_url = config["workspace-url"]
self.callback_url = config['SDK_CALLBACK_URL']
self.token = config['KB_AUTH_TOKEN']
self.shock_url = config['shock-url']
self.dfu = DataFileUtil(self.callback_url)
self.ws = Workspace(self.ws_url, token=self.token)
self.scratch = config['scratch']
def _mkdir_p(self, path):
"""
_mkdir_p: make directory for given path
"""
if not path:
return
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _validate_run_flux_mutual_information_analysis_params(self, params):
"""
_validate_run_flux_mutual_information_analysis_params:
validates params passed to run_flux_mutual_information_analysis method
"""
log('start validating validate_run_flux_mutual_information_analysis params')
# check for required parameters
for p in ['fbamodel_id', 'compounds', 'media_id', 'workspace_name']:
if p not in params:
raise ValueError('"{}" parameter is required, but missing'.format(p))
def _get_file_from_ws(self, workspace, obj_name):
try:
file_path = self.ws.get_objects(
[{'name': obj_name,
'workspace': workspace}])[0]
except Exception as e:
raise ValueError(
'Unable to get object from workspace: (' +
workspace + '/' + obj_name + ')' + str(e))
return file_path
def _make_media_files(self, ws_name, base, compounds):
"""
Build and store media objects for each combination of compound added to the base media.
:param base: The base media file
:param compounds: the set of compound to test
:return: A list of media ids and a matrix with each media combination defined
"""
base_media = self._get_file_from_ws(ws_name, base)['data']
uuid = str(uuid.uuid4())
media_ids = [base_media['id']]
new_media_list = []
media_matrix = [[""]+compounds]
media_matrix.append([[base_media['id']]+[0]*len(compounds)])
for n_comp in range(1, len(compounds)+1):
for combo in combinations(compounds, n_comp):
new_media_id = base_media['id'] + '_v%s' % len(media_matrix)
media_ids.append(new_media_id)
media_matrix.append([new_media_id]+[1 if comp in combo else 0 for comp in compounds])
new_media = deepcopy(base_media)
new_media['id'] = new_media_id
new_media['name'] = new_media_id
for new_comp in combo:
new_media['mediacompounds'].append(
{'compound_ref': '48/1/1/compounds/id/%s' % new_comp.split('_')[0],
'concentration': 1.0, 'maxFlux': 1000, 'minFlux': -1000})
new_media_list.append(new_media)
print("Made %s Media Files" % (len(media_ids)-1))
info = self.ws.save_objects(
{'workspace': ws_name,
"objects": [{
"hidden": 1,
"type": "KBaseBiochem.Media",
"data": media,
"name": uuid + "-" + media['name']
} for media in new_media_list]
})
print info
return media_ids, media_matrix, uuid
def _run_fba(self, workspace_name, media_id_list, fbamodel_id, uuid):
fba_tool_obj = fba_tools(self.callback_url)
new_media_list = []
for media in media_id_list:
new_media_list.append(uuid + "-" + media)
fba_tool_obj.run_flux_balance_analysis({
"workspace" : workspace_name,
"fbamodel_id" : fbamodel_id,
"fba_output_id" : fbamodel_id + ".mifba",
"fbamodel_workspace" : workspace_name,
"media_id_list" : new_media_list,
"target_reaction" : "bio1",
"minimize_flux" : 1
})
output = self.ws.get_objects2({
'objects' : [{
'ref' : workspace_name + "/" + fbamodel_id + '.mifba'
}]
})
fba = output['data'][0]['data']
biomass_data = "FBAs,Biomass\n"
secretion_file = ","+','.join(media_list)+"\n"
full_secretion_file = ","+','.join(media_list)+"\n"
full_flux_file = ","+','.join(media_list)+"\n"
flux_file = ","+','.join(media_list)+"\n"
objectives = fba['other_objectives']
for i in range(0, len(objectives)):
biomass_data = biomass_data + media_list[i] + "," + objectives[i] + "\n"
flux_vars = fba['FBAReactionVariables']
for var in flux_vars:
id = var['modelreaction_ref'].split("/").pop()
flux_file = flux_file + id
full_flux_file = full_flux_file + id
fluxes = var['other_values']
for i in range(0, len(fluxes)):
full_flux_file = full_flux_file + "," + fluxes[i]
if abs(fluxes[i]) < 1e-7:
flux_file = flux_file + ",0"
else:
flux_file = flux_file + ",1"
flux_file = flux_file + "\n"
full_flux_file = full_flux_file + "\n"
secretion_vars = fba['FBACompoundVariables']
for var in secretion_vars:
id = var['modelcompound_ref'].split("/").pop()
secretion_file = secretion_file + id
full_secretion_file = full_secretion_file + id
fluxes = var['other_values']
for i in range(0, len(fluxes)):
full_secretion_file = full_secretion_file + "," + fluxes[i]
if abs(fluxes[i]) < 1e-7:
secretion_file = secretion_file + ",0"
elif fluxes[i] < 0:
secretion_file = secretion_file + ",-1"
else:
secretion_file = secretion_file + ",1"
secretion_file = secretion_file + "\n"
full_secretion_file = full_secretion_file + "\n"
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
biomass_path = os.path.join(output_directory, 'biomass.csv')
secretion_path = os.path.join(output_directory, 'secretion.csv')
flux_path = os.path.join(output_directory, 'flux.csv')
full_secretion_path = os.path.join(output_directory, 'full_secretion.csv')
full_flux_path = os.path.join(output_directory, 'full_flux.csv')
with open(biomass_path, 'w') as biomass_f:
biomass_f.write(biomass_data)
with open(secretion_path, 'w') as secretion_f:
secretion_f.write(secretion_file)
with open(flux_path, 'w') as flux_f:
flux_f.write(flux_file)
with open(full_secretion_path, 'w') as full_secretion_f:
full_secretion_f.write(full_secretion_file)
with open(full_flux_path, 'w') as full_flux_f:
full_flux_f.write(full_flux_file)
return [biomass_path,secretion_path,flux_path,full_secretion_path,full_flux_path]
def _generate_html_report(self, result_directory, mutual_info_dict):
"""
_generate_html_report: generate html summary report
"""
#scratch, uui, datafileutil, file_to_shock, shockId, extended report
log('start generating html report')
html_report = list()
output_directory = os.path.join(self.scratch, str(uuid.uuid4()))
self._mkdir_p(output_directory)
result_file_path = os.path.join(output_directory, 'mutual_information_report.html')
shutil.copy(os.path.join(result_directory, 'MI_plot.png'),
os.path.join(output_directory, 'MI_plot.png'))
overview_content = ''
overview_content += '<table><tr><th>Mutual Information for various chemical compound combinations'
overview_content += ' Object</th></td>'
overview_content += '<tr><th>Input Chemical Compound Combination</th>'
overview_content += '<th>Mutual Information (in Bits)</th>'
overview_content += '</tr>'
for k, v in mutual_info_dict.items():
overview_content += '<tr><td>{}</td><td>{}</td></tr>'.format(k, v)
overview_content += '</table>'
with open(result_file_path, 'w') as result_file:
with open(os.path.join(os.path.dirname(__file__), 'report_template.html'),
'r') as report_template_file:
report_template = report_template_file.read()
report_template = report_template.replace('<p>Overview_Content</p>',
overview_content)
result_file.write(report_template)
report_shock_id = self.dfu.file_to_shock({'file_path': output_directory,
'pack': 'zip'})['shock_id']
html_report.append({'shock_id': report_shock_id,
'name': os.path.basename(result_file_path),
'label': os.path.basename(result_file_path),
'description': 'HTML summary report for Mutual Information App'})
return html_report
def _generate_report(self, result_directory, mutual_info_dict, params,paths):
"""
_generate_report: generate summary report
"""
uuidStr = str(uuid.uuid4())
self._mkdir_p(result_directory + '/' + uuidStr)
shutil.copy('/kb/module/data/index.html', result_directory + '/' + uuidStr + '/index.html')
shutil.copy('pdata.json', result_directory + '/' + uuidStr + '/pdata.json')
# DataFileUtils to shock
report_shock_id = self.dfu.file_to_shock({'file_path': result_directory + '/' + uuidStr,
'make_handler': 0,
'pack': 'zip'})['shock_id']
report_file = {'name': 'index.html',
'description': 'the report',
'shock_id': report_shock_id}
biomass_file = {'name': 'biomass_file.csv',
'description': 'biomass_file',
'path': paths[0]}
flux_file = {'name': 'flux_file.csv',
'description': 'flux_file',
'path': paths[1]}
full_flux_file = {'name': 'full_flux_file.csv',
'description': 'full_flux_file',
'path': paths[2]}
secretion_file = {'name': 'secretion_file.csv',
'description': 'secretion_file',
'path': paths[3]}
full_secretion_file = {'name': 'full_secretion_file.csv',
'description': 'full_secretion_file',
'path': paths[4]}
log('creating report')
#output_html_files = self._generate_html_report(result_directory,
# mutual_info_dict)
report_params = {'message': '',
'workspace_name': params.get('workspace_name'),
'html_links': [report_file],
'file_links': [biomass_file,flux_file,full_flux_file,secretion_file,full_secretion_file],
'direct_html_link_index': 0,
'html_window_height': 333,
'report_object_name': 'MutualInfomation_report_' + uuidStr}
kbase_report_client = KBaseReport(self.callback_url)
output = kbase_report_client.create_extended_report(report_params)
report_output = {'report_name': output['name'], 'report_ref': output['ref']}
return report_output
def _generate_mutual_info(self, media_matrix, fba_file, mi_options):
df1 = pd.read_csv(fba_file)
df1.as_matrix()
#----Input validation of Media/FBAs with Binary Matrix FBAs------
# 1.0 Number of rows in Media.csv file = (Number of columns -1)
# 1.0. If they are different: Through an ERROR saying missed match number of FBAs in media and binary matrix.
# 1.1 Check whether the elements in Media.csv file contains only binary values (i.e. 0 and 1)
# 1.1. If the elements are different: Through an ERROR saying not approapriate input values
# 1.2 Check whether the compounds in Media.csv file match with number of FBAs
# 1.2. If the compounds are different from number of FBAs: Through an ERROR saying not appropriate input values
print media_matrix
s_df1 = df1.shape
s_df2 = media_matrix.shape
Temp_df2 = np.array(media_matrix.values)
# Create matrix with only the elements remove first column and all the rows
Temp_df2 = Temp_df2[0:,1:]
Bin_val_check = np.array_equal(Temp_df2, Temp_df2.astype(bool))
num_compounds = (s_df2[1])-1
if ((s_df1[1]-1) != s_df2[0]) or (Bin_val_check != True) or (int(math.log(s_df2[0],2)) != num_compounds):
print ('invalid input values')
#-----All possible combination of the chemical compounds----------------------
# 2.0 Sperating m0 from rest of the lables
Temp1_df2 = media_matrix
cols = Temp1_df2.columns
for i in range(1,len(cols)):
Temp1_df2.loc[Temp1_df2[cols[i]] == 1 , cols[i]] = cols[i]
print Temp1_df2
# 2.1 Creating a disctionary for all FBAs except m0
print len(Temp1_df2)
mydict = {}
for x in range(0, len(Temp1_df2)):
for i in range(1,s_df2[1]):
currentvalue = Temp1_df2.iloc[x,i]
currentid = Temp1_df2.iloc[x,0]
currentvalue = Temp1_df2.iloc[x,i]
mydict.setdefault(currentid,[])
if currentvalue > 0:
mydict[currentid].append(currentvalue)
# Add the first key as m0
media_0_name = 'm0'
mydict[media_0_name] = "['0']"
#Sort the keys
mydict = collections.OrderedDict(natsort.natsorted(mydict.items()))
print mydict
for k,v in mydict.iteritems():
print k,v
# List of Compounds combination in the list
my_combi_list = []
Compounds_Combi = list(range(1,num_compounds+1))
for L in range(0, len(Compounds_Combi)+1):
for subset in itertools.combinations(Compounds_Combi, L):
my_combi_list.append(list(subset))
print my_combi_list
# Created a dictionary where the keys:
# list of compounds combination
# values are corresponding FBAs list in df2
result_dict = {}
for element in my_combi_list[1:]:
for k, v in mydict.iteritems():
if set(v).issubset(set(map(lambda x:str(x), element))):
key = ','.join(map(lambda x:str(x), element))
if result_dict.get(key):
media_list = result_dict[key]
media_list.append(k)
media_list = list(set(media_list))
result_dict.update({key: media_list})
else:
result_dict.update({key: [media_0_name, k]})
print result_dict
# Created a dictionary where the keys are:
# list of compounds combination
# values are compounds combination FBAs with df1 vaules
All_Comp_Combi_dic = {}
for column, value in result_dict.items():
All_Comp_Combi_dic.update({column : df1.get(value)})
#To print an item from the All_Comp_Combi_dic
df = (pd.DataFrame(All_Comp_Combi_dic.items()))
#print df[0]
#print df[1][7]
MI_dict = {}
for k in range(0, len(df[0])):
drop_rows_df = df[1][k].drop_duplicates(keep="first")
drop_columns_df = drop_rows_df.T.drop_duplicates(keep="first").T
remove = []
removed = {}
cols = df[1][k].columns
for i in range(len(cols)-1):
duplicated = []
v = df[1][k][cols[i]].values
for j in range(i+1,len(cols)):
if np.array_equal(v,df[1][k][cols[j]].values):
remove.append(cols[j])
duplicated.append(cols[j])
if duplicated and cols[i] not in remove:
removed.update({cols[i]:duplicated})
count = {}
for key, value in removed.items():
count.update({key: len(value)})
#print v
# print drop_columns_df
values = count.values()
# print values
values = map(lambda x: x+1, values)
# print values
d = {x:values.count(x) for x in values}
#-------Mutual Inforamtion (MI) calculation-------------
FBAs = len(df[1][k].columns)
pure_entropy = math.log(FBAs,2)
#print pure_entropy
# If No duplicates exist and list "value" is empty
if not values:
#print("List is empty")
No_duplicate_FBAs = len(drop_columns_df.columns)
conditional_entropy = -1 * (No_duplicate_FBAs*((1/No_duplicate_FBAs)*((1/1)*math.log(1.0/1.0,2))));
Mutual_Info = pure_entropy - conditional_entropy
#print('Mutaul Info:', Mutual_Info)
if values:
# If duplicates exist and list "value" is not empty
conditional_entropy = 0
for key in d:
#print key, d[key]
Temp = -1 * d[key] * (key/float(FBAs)) * key * (1.0/key) * math.log(1.0/key,2)
conditional_entropy = Temp + conditional_entropy
#print "%3f" %Temp
Mutual_Info = pure_entropy - conditional_entropy
MI_dict[df[0][k]] = Mutual_Info
#Sorted MI_dict
MI_dict = sorted(MI_dict.items(), key=lambda x: (-len(x[0]), x[0]))
MI_dict = OrderedDict(MI_dict)
x_groups = [[] for x in range(num_compounds)]
y_groups = [[] for x in range(num_compounds)]
names = [[] for x in range(num_compounds)]
Comp_Mapping = [[] for x in range(num_compounds)]
for key, val in MI_dict.iteritems():
del_count = key.count(',')
x_groups[del_count].append(key)
y_groups[del_count].append(val)
# for x, y in zip(x_groups, y_groups):
# data.append(go.Bar(x=x, y=y, name='test'))
compound_IDs = ['H2', 'Vitamin K', 'Hematine', 'Glucose', 'Acetate', 'Formate', 'B12']
pdata = []
for i in range(0, len(x_groups)):
names[i] = str(i + 1) + ' Compound Combination'
Comp_Mapping = str(i + 1) + '-' + compound_IDs[i]
record = {}
record["x"] = []
for e in x_groups[i]:
record["x"].append("c" + e)
record["y"] = y_groups[i]
record["names"] = names[i]
record["Comp_Mapping"] = Comp_Mapping
pdata.append(record)
print pdata
with open('pdata.json', 'w') as outfile:
json.dump(pdata, outfile)
return MI_dict
|
#!/usr/bin/env python
#######################################################################################
## Created by David Kirkby, University of California, Irvine <dkirkby@uci.edu>
#######################################################################################
import sys
import os
import math
import argparse
import logging
import galsim
import pyfits
import numpy
twopi = 2*math.pi
deg2rad = math.pi/180.
deg2arcsec = 3600.
deg2arcmin = 60.
def createComponent(type,electrons,xc,yc,hlr,q,beta,g1,g2):
# create a radial profile of the requested type and size
comp = type(flux = electrons, half_light_radius = hlr)
# set the intrinsic shape
comp.applyShear(q = q, beta = beta*galsim.radians)
# apply cosmic shear
comp.applyShear(g1 = g1, g2 = g2)
# shift to this component's centroid
comp.applyShift(dx = xc, dy = yc)
return comp
"""
Returns a (disk,bulge) tuple of source objects using the specified parameters.
"""
def createSource(flux,bulgeFraction,xc,yc,hlr_d,q_d,beta_d,hlr_b,q_b,beta_b,g1,g2):
# Define the disk component, if any
if bulgeFraction < 1:
disk = createComponent(galsim.Exponential,flux*(1-bulgeFraction),xc,yc,hlr_d,q_d,beta_d,g1,g2)
else:
disk = None
# Define the bulge component, if any
if bulgeFraction > 0:
bulge = createComponent(galsim.DeVaucouleurs,flux*bulgeFraction,xc,yc,hlr_b,q_b,beta_b,g1,g2)
else:
bulge = None
return (disk,bulge)
"""
Renders the specified source convolved with a psf (which might be None)
and pixel response into a postage stamp with the specified bounding box.
"""
def renderStamp(src,psf,pix,bbox):
stamp = galsim.ImageD(bbox)
stamp.setScale(pix.getXWidth())
if src:
if psf == None:
obj = galsim.Convolve([src,pix], real_space = True)
else:
gsp = galsim.GSParams(maximum_fft_size=16384)
obj = galsim.Convolve([src,psf,pix],gsparams=gsp)
obj.draw(image = stamp)
return stamp
def createStamp(src,psf,pix,bbox):
(disk,bulge) = src
diskStamp = renderStamp(disk,psf,pix,bbox)
bulgeStamp = renderStamp(bulge,psf,pix,bbox)
return diskStamp + bulgeStamp
"""
Returns (dx,dy) for the bounding box of a surface brightness profile
SB(x,y) whose isophotes are ellipses with the shape (q,beta) and which
has an underlying normalized radial profile p(r). The inputs are:
maxSB = totalFlux*p(0) = maximum surface brightness before shear
thresholdSB = threshold surface brightness after shear
q = ratio of minor to major axes of ellipse with 0 < q <= 1
beta = angle of ellipse's major axis in radians
rFunction = returns R(b) such that p(R) = b*p(0) with 0 < b < 1
The returned (dx,dy) are in arcsecs, and defined such that SB(x,y) < f0
is guaranteed for |x| > dx or |y| > dy. The result only depends on the
ratio thresholdSB/maxSB so they must be in the same (abitrary) units.
"""
def boundingBox(maxSB,thresholdSB,q,beta,rFunction):
# Calculate shear affine transform parameters
g = (1-q)/(1+q)
gp = g*math.cos(2*beta)
gx = g*math.sin(2*beta)
detM = 1 - gp*gp - gx*gx
# Calculate the dimensionless surface brightness ratio at threshold.
b = thresholdSB/(maxSB*detM)
if b <= 0:
raise RuntimeError('boundingBox: invalid input parameters')
if b >= 1:
# The max surface brightness is below our threshold SB(0,0) <= f0
return (0,0)
# Calculate the threshold radius of the radial profile.
rcut = rFunction(b)
# Shear this circle and return its bounding box dimensions
dx = rcut*math.sqrt(((1+gp)*(1+gp)+gx*gx)/detM) # half width in arcsecs
dy = rcut*math.sqrt(((1-gp)*(1-gp)+gx*gx)/detM) # half height in arcsecs
return (dx,dy)
"""
Returns (dx,dy) for the bounding box of a Sersic profile with n = 1 or 4.
The input flux should be in electrons, hlr in arscecs, beta in radians, f0 in
elec/arcsec^2. 0 < q <= 1 is dimensionless. The returned (dx,dy) are in
arcsecs. See boundingBox above for details.
"""
def sersicBounds(n,flux,hlr,q,beta,f0):
# Convert the half-light radius to the appropriate scale radius r0
# and calculate the Sersic normalization constant
if n == 1:
r0 = hlr/1.67835
norm = twopi*r0*r0
elif n == 4:
r0 = hlr/3459.49
norm = 20160*twopi*r0*r0 # 20160 = n*Gamma[2*n]
else:
raise RuntimeError('Sersic index n = %d is not supported.' % n)
# Calculate and return the bounding box
return boundingBox(flux/norm,f0,q,beta,
lambda b: r0*math.pow(-math.log(b),n))
"""
Returns (dx,dy) for the bounding box of a Moffat profile. The input flux
should be in electrons, fwhm in arcsecs, beta in radians, f0 in elec/arcsec^2.
0 < q <= 1 and moffatBeta > 1 are dimensionless. The returned (dx,dy) are in
arcsecs. See boundingBox above for details.
"""
def moffatBounds(moffatBeta,flux,fwhm,q,beta,f0):
# Check that moffatBeta is valid
if moffatBeta <= 1:
raise RuntimeError('Moffat beta < 1 is not valid.')
# Convert the fwhm to the corresponding scale radius
r0 = 0.5*fwhm/math.sqrt(math.pow(2,1./moffatBeta)-1)
# Calculate the normalization factor norm = 1/p(0)
norm = math.pi*r0*r0/(moffatBeta-1)
# Calculate and return the bounding box
return boundingBox(flux/norm,f0,q,beta,
lambda b: r0*math.sqrt(1-math.pow(b,(moffatBeta-1.)/moffatBeta)))
"""
Returns a mask image of values 0 or 1 depending on whether the corresponding
input image pixel value is above or below the specified threshold in electrons.
Note that if all pixels are below threshold, then the returned mask will
contain only the central pixel with image.array.sum() == 0.
"""
def createMask(image,threshold,args):
# create an empty mask image with the same dimensions as the input image
box = image.bounds
mask = galsim.ImageS(box)
mask.setScale(image.getScale())
borderMax = 0.
lastRow = box.ymax - box.ymin
lastPixel = box.xmax - box.xmin
if not args.no_trim:
# initialize our trimmed bounds to just the central pixel
# (the numerator should always be even for odd width,height)
xmin = (box.getXMin()+box.getXMax())
ymin = (box.getYMin()+box.getYMax())
assert xmin%2 == 0 and ymin%2 == 0
xmin = xmin/2
ymin = ymin/2
xmax = xmin
ymax = ymin
# loop over image pixels
for (rowIndex,row) in enumerate(image.array):
y = box.getYMin()+rowIndex
for (pixelIndex,pixelValue) in enumerate(row):
x = box.getXMin()+pixelIndex
if rowIndex == 0 or rowIndex == lastRow or pixelIndex == 0 or pixelIndex == lastPixel:
# update the largest pixel value on our 1-pixel wide border
borderMax = max(borderMax,pixelValue)
if pixelValue >= threshold:
mask.array[rowIndex,pixelIndex] = 1
if not args.no_trim:
xmin = min(x,xmin)
xmax = max(x,xmax)
ymin = min(y,ymin)
ymax = max(y,ymax)
# is the stamp too small to contain the threshold contour?
if borderMax > threshold:
print '### stamp truncated at %.1f > %.1f electrons' % (borderMax,threshold)
# build a new mask using the border max as the threshold
return createMask(image,borderMax,args)
if not args.no_trim:
trimmed = galsim.BoundsI(xmin,xmax,ymin,ymax)
mask = mask[trimmed]
return mask
"""
Performs any final processing on stamp, controlled by args, then appends it to stamps.
Returns True if the stamp was saved, or otherwise False.
"""
def saveStamp(stamps,stamp,trimmed,args):
# Trim the stamp to its threshold bounding box
if not args.no_trim:
stamp = stamp[trimmed]
# Clip the stamp so that does not extend beyond the field image. This results
# in potentially smaller files with sources that might not be centered.
if not args.no_clip:
overlap = stamp.bounds & galsim.BoundsI(1,args.width,1,args.height)
if overlap.area() == 0:
# skip this stamp if it falls completely outside our field (after trimming)
return False
stamp = stamp[overlap]
# Remember this stamp.
stamps.append(stamp)
return True
def getPsfBoundsEstimator(psf,pix,size):
stamp = galsim.ImageD(2*size,2*size)
stamp.setScale(pix.getXWidth())
obj = galsim.Convolve([psf,pix])
obj.draw(image=stamp)
# build the circularized psf profile
profile = numpy.zeros(size,dtype=float)
for x in range(2*size):
for y in range(2*size):
dx = x - size + 0.5
dy = y - size + 0.5
r = math.sqrt(dx*dx + dy*dy)
ipix = int(math.floor(r))
if ipix < size:
profile[ipix] = max(profile[ipix],stamp.array[x,y])
# Create a function that gives the size of bounding box necessary to contain
# psf pixels down to the specified threshold assuming the specified total flux.
# The return value is clipped at 2*size for large fluxes.
def estimator(flux,threshold):
index = 0
while index < size:
if flux*profile[index] < threshold:
return 2*index+1
index += 1
return 2*size
return estimator
# Returns the combined size and ellipticity for the specified disk and bulge components,
# assuming they have the same centroid.
def combineEllipticities(hlr_d,q_d,pa_d,hlr_b,q_b,pa_b,f_b):
# ensure that single-component models give correct results
if f_b == 0:
q_b = 1
elif f_b == 1:
q_d = 1
# calculate the disk and bulge component ellipticities
ed = (1-q_d)/(1+q_d)
ed1 = ed*math.cos(2*pa_d)
ed2 = ed*math.sin(2*pa_d)
eb = (1-q_b)/(1+q_b)
eb1 = eb*math.cos(2*pa_b)
eb2 = eb*math.sin(2*pa_b)
# calculate the corresponding second-moment tensors assuming unit total flux
cd = 1.06502
nd = cd*(hlr_d/(1-ed*ed))**2
Qd11 = nd*(1+ed*ed+2*ed1)
Qd12 = nd*2*ed2
Qd22 = nd*(1+ed*ed-2*ed1)
detQd = Qd11*Qd22 - Qd12*Qd12
cb = 10.8396
nb = cb*(hlr_b/(1-eb*eb))**2
Qb11 = nb*(1+eb*eb+2*eb1)
Qb12 = nb*2*eb2
Qb22 = nb*(1+eb*eb-2*eb1)
detQb = Qb11*Qb22 - Qb12*Qb12
# add the component second-moment tensors
Q11 = (1-f_b)*Qd11 + f_b*Qb11
Q12 = (1-f_b)*Qd12 + f_b*Qb12
Q22 = (1-f_b)*Qd22 + f_b*Qb22
detQ = Q11*Q22 - Q12*Q12
size = math.pow(detQ,0.25)
#semiMajorAxis = math.sqrt(0.5*(Q11+Q22+math.sqrt((Q11-Q22)**2+4*Q12**2)))
# calculate the corresponding combined ellipticity
denom = Q11 + Q22 + 2*math.sqrt(detQ)
e1 = (Q11 - Q22)/denom
e2 = 2*Q12/denom
"""
# check direct calculation of emag when pa_d == pa_b
emag = math.sqrt(e1*e1 + e2*e2)
wd = (1-f_b)*cd*hlr_d**2*(1+q_d)**2/(8*q_d**2) if f_b < 1 else 0
wm = f_b*cb*hlr_b**2*(1+q_b)**2/(8*q_b**2) if f_b > 0 else 0
ep = wd*(1+q_d**2) + wm*(1+q_b**2)
em = wd*(1-q_d**2) + wm*(1-q_b**2)
emag2 = em/(ep+math.sqrt(ep*ep-em*em))
print 'emag:',emag-emag2
"""
return (size,e1,e2)
def signalToNoiseRatio(stamp,pixelNoise):
flat = stamp.array.reshape(-1)
snr = math.sqrt(numpy.dot(flat,flat)/pixelNoise)
return snr
def main():
# Parse command-line args
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action = "store_true",
help = "provide more verbose output")
parser.add_argument("-i", "--input", default = 'gcat.dat',
help = "name of input catalog to read")
parser.add_argument("-o","--output", default = 'catout',
help = "base name of output files to write")
parser.add_argument("-x","--x-center", type = float, default = 0.5,
help = "central RA of image (degrees)")
parser.add_argument("-y","--y-center", type = float, default = 0.0,
help = "central DEC of image (degrees)")
parser.add_argument("--width", type = int, default = 512,
help = "image width (pixels)")
parser.add_argument("--height", type = int, default = 512,
help = "image height (pixels)")
parser.add_argument("--max-size", type = float, default = 20.,
help = "flux from any object is truncated beyond this size (arcsecs)")
parser.add_argument("--no-margin", action = "store_true",
help = "do not simulate the tails of objects just outside the field")
parser.add_argument("--pixel-scale", type = float, default = 0.2,
help = "pixel scale (arscecs/pixel)")
parser.add_argument("--airmass", type = float, default = 1.2,
help = "airmass value to use for atmospheric PSF and extinction")
parser.add_argument("--extinction", type = float, default = 0.07,
help = "atmospheric extinction coefficient")
parser.add_argument("--zenith-fwhm", type = float, default = 0.67,
help = "atmospheric psf full-width-half-max in arcsecs at zenith")
parser.add_argument("--instrumental-fwhm", type = float, default = 0.4,
help = "instrumental psf full-width-half-max in arcsecs")
parser.add_argument("--psf-beta", type = float, default = 0.0,
help = "psf Moffat parameter beta (uses Kolmogorov psf if beta <= 0)")
parser.add_argument("--band", choices = ['u','g','r','i','z','y'], default = 'i',
help = "LSST imaging band to use for source fluxes")
parser.add_argument("--zero-point", type = float, default = 41.5,
help = "zero point for converting magnitude to detected signal in elec/sec")
parser.add_argument("--sky-brightness", type = float, default = 20.0,
help = "sky brightness in mag/sq.arcsec.")
parser.add_argument("--sn-cut", type = float, default = 0.5,
help = "keep all pixels above this signal-to-noise ratio cut")
parser.add_argument("--exposure-time", type = float, default = 6900.,
help = "full-depth exposure time in seconds")
parser.add_argument("--g1", type = float, default = 0.,
help = "constant shear component g1 to apply")
parser.add_argument("--g2", type = float, default = 0.,
help = "constant shear component g2 to apply")
parser.add_argument("--save-field", action = "store_true",
help = "save full field image without noise")
parser.add_argument("--save-noise", action = "store_true",
help = "save full field image with random noise added")
parser.add_argument("--stamps", action = "store_true",
help = "save postage stamps for each source (normalized to 1 exposure)")
parser.add_argument("--no-clip", action = "store_true",
help = "do not clip stamps to the image bounds")
parser.add_argument("--no-trim", action = "store_true",
help = "do not trim stamps to their threshold bounding box")
parser.add_argument("--no-disk", action = "store_true",
help = "do not include any galactic disk (Sersic n=1) components")
parser.add_argument("--no-bulge", action = "store_true",
help = "do not include any galactic bulge (Sersic n=4) components")
parser.add_argument("--shape", action = "store_true",
help = "run HSM adaptive moments calculation on no-psf stamp")
parser.add_argument("--render-nopsf", action = "store_true",
help = "save a stamp rendered without any psf for each galaxy")
parser.add_argument("--partials", action = "store_true",
help = "calculate and save partial derivatives wrt shape parameters (normalized to 1 exposure)")
parser.add_argument("--partials-order", type = int, default = 1,
help = "order of finite difference equation to use for evaluating partials")
parser.add_argument("--only-line", type = int, default = 0,
help = "only use the specified line number from the input catalog (when non-zero)")
args = parser.parse_args()
# Configure the GalSim logger
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("galsimcat")
logger.info('Using output prefix %r' % args.output)
# Define the pixel response
pix = galsim.Pixel(args.pixel_scale)
# Define the psf to use
atmos_fwhm = args.zenith_fwhm*math.pow(args.airmass,0.6)
fwhm = math.sqrt(atmos_fwhm**2 + args.instrumental_fwhm**2)
logger.info('Using PSF fwhm = %.4f" (%.4f" zenith => %.4f" at X = %.3d, %.4f" instrumental)' %
(fwhm,args.zenith_fwhm,atmos_fwhm,args.airmass,args.instrumental_fwhm))
if fwhm > 0:
if args.psf_beta > 0:
psf = galsim.Moffat(beta = args.psf_beta, fwhm = fwhm)
else:
psf = galsim.Kolmogorov(fwhm = fwhm)
psfBounds = getPsfBoundsEstimator(psf,pix,int(math.ceil(0.5*args.max_size/args.pixel_scale)))
else:
psf = None
# Create an empty image that represents the whole field
field = galsim.ImageD(args.width,args.height)
field.setScale(pix.getXWidth())
# Calculate the relative scaling of RA and angles relative to the image center
RAscale = math.cos(args.y_center*deg2rad)
# Calculate the corners of the image in degrees
RAmin = args.x_center - 0.5*args.width*args.pixel_scale/deg2arcsec/RAscale
RAmax = args.x_center + 0.5*args.width*args.pixel_scale/deg2arcsec/RAscale
DECmin = args.y_center - 0.5*args.height*args.pixel_scale/deg2arcsec
DECmax = args.y_center + 0.5*args.height*args.pixel_scale/deg2arcsec
# Calculate margin size in degrees (sources outside of our image margins
# are always skipped, for speed, even if their tails might overlap our image)
if args.no_margin:
margin = 0
else:
margin = 0.5*args.max_size/deg2arcsec
# Calculate the sky background rate in elec/sec/pixel
skyRate = args.zero_point*math.pow(10,-0.4*(args.sky_brightness-24))*args.pixel_scale**2
# Calculate the mean sky noise level for the full exposure time in elec/pixel
skyNoise = math.sqrt(args.exposure_time*skyRate)
# Calculate the pixel threshold cut to use in detected electrons during the full exposure
pixelCut = args.sn_cut*skyNoise
# Calculate the corresponding surface brightness cut to use
sbCut = pixelCut/(args.pixel_scale*args.pixel_scale)
print 'Simulating %s-band observations with AB24 zero point %.3f elec/sec, sky rate = %.3f elec/sec/pixel' %(
args.band,args.zero_point,skyRate)
print 'Simulating %.1fs exposure with total sky noise level %.3f elec/pixel (%.3f mag/sq.arcsec.)' % (
args.exposure_time,skyNoise,args.sky_brightness)
print 'Will keep all stacked pixels > %.3f elec (%.1f elec/arcsec^2)' % (pixelCut,sbCut)
# Initialize finite difference calculations if necessary
if args.partials:
args.stamps = True
if args.partials_order < 1 or args.partials_order > 4:
logger.error('Bad parameter: partials-order must be an integer 1-4.')
sys.exit(-1)
# Initialize the finite difference coefficients to use
if args.partials_order == 1:
fdCoefs = (1./2.,)
elif args.partials_order == 2:
fdCoefs = (2./3.,-1./12.)
elif args.partials_order == 3:
fdCoefs = (3./4.,-3./20.,1./60.)
else:
fdCoefs = (4./5.,-1./5.,4./105.,-1./280.)
# Open the source input catalog to use and initialize a keyword-based lookup for catalog entries
cat = open(args.input)
catFields = cat.readline().split()
catDict = dict(zip(catFields,range(len(catFields))))
if args.verbose:
logger.info('Reading input catalog %r with fields:\n%s' % (args.input,','.join(catFields)))
# Initialize the output catalog in memory
outputCatalog = [ ]
# Initialize the list of per-object stamp HDUs we will fill
hdu = pyfits.PrimaryHDU()
hduList = pyfits.HDUList([hdu])
# Loop over catalog entries
nkeep = lineno = 0
for line in cat:
lineno += 1
if args.only_line > 0 and lineno != args.only_line:
continue
# prepare to read this catalog entry
entryCols = line.split()
def catalog(fieldName,type=float):
return type(entryCols[catDict[fieldName]])
entryID = catalog('id',int)
# position on the sky in degrees
RA = catalog('ra')
DEC = catalog('dec')
# skip sources outside our margins
if RA < RAmin-margin or RA > RAmax+margin or DEC < DECmin-margin or DEC > DECmax+margin:
continue
# Calculate the offsets of this source from our image's bottom left corner in pixels
# (which might be negative or byeond our image bounds because of the margins)
xoffset = (RA - RAmin)*deg2arcsec/args.pixel_scale*RAscale
yoffset = (DEC - DECmin)*deg2arcsec/args.pixel_scale
# Look up redshift
z = catalog('redshift')
# Look up source AB magnitude in the requested band
abMag = catalog(args.band + '_ab')
# Correct for extinction
abMag += args.extinction*(args.airmass - 1)
# Calculate total detected signal in electrons
flux = args.exposure_time*args.zero_point*math.pow(10,-0.4*(abMag-24))
# Skip objects whose total flux is below our pixel threshold
if flux < pixelCut:
continue
# Look up the component flux relative normalizations
diskFluxNorm = catalog('fluxnorm_disk')
bulgeFluxNorm = catalog('fluxnorm_bulge')
agnFluxNorm = catalog('fluxnorm_agn')
totalFluxNorm = diskFluxNorm + bulgeFluxNorm + agnFluxNorm
# Calculate the disk and bulge fluxes to simulate
if args.no_disk:
diskFlux = 0
else:
diskFlux = flux*diskFluxNorm/totalFluxNorm
if args.no_bulge:
bulgeFlux = 0
else:
bulgeFlux = flux*bulgeFluxNorm/totalFluxNorm
if diskFlux == 0 and bulgeFlux == 0:
continue
# Get disk component parameters
if diskFlux > 0:
hlr_d = catalog('DiskHalfLightRadius') # in arcsecs
pa_d = catalog('pa_disk') # position angle in degrees
a_d = catalog('a_d') # major axis length in arcsecs
b_d = catalog('b_d') # minor axis length in arcsecs
# Calculate sheared ellipse aspect ratio
q_d = b_d/a_d # between 0.2 and 1
# Convert position angle from degrees to radians
pa_d = pa_d*deg2rad
# Calculate bounding box in arcsecs without psf or pixel convolution
(w_d,h_d) = sersicBounds(1,diskFlux+bulgeFlux,hlr_d,q_d,pa_d,sbCut)
else:
(w_d,h_d) = (0,0)
# Get bulge component parameters
if bulgeFlux > 0:
hlr_b = catalog('BulgeHalfLightRadius') # in arcsecs
pa_b = catalog('pa_bulge') # position angle in degrees
a_b = catalog('a_b') # major axis length in arcsecs
b_b = catalog('b_b') # minor axis length in arcsecs
# Calculate sheared ellipse aspect ratio
q_b = b_b/a_b # between 0.2 and 1
# Convert position angle from degrees to radians
pa_b = pa_b*deg2rad
# Calculate bounding box in arcsecs without psf or pixel convolution
(w_b,h_b) = sersicBounds(4,diskFlux+bulgeFlux,hlr_b,q_b,pa_b,sbCut)
else:
(w_b,h_b) = (0,0)
# If a component is missing, set its nominal size and shape from the other component.
if diskFlux == 0:
(hlr_d,q_d,pa_d) = (hlr_b,q_b,pa_b)
if bulgeFlux == 0:
(hlr_b,q_b,pa_b) = (hlr_d,q_d,pa_d)
# Combine the bulge and disk ellipticities
(size,e1,e2) = combineEllipticities(hlr_d,q_d,pa_d,hlr_b,q_b,pa_b,bulgeFlux/(bulgeFlux+diskFlux))
# Combine the bulge and disk bounding boxes
width = max(w_d,w_b)
height = max(h_d,h_b)
# Estimate the (round) bounding box for the psf in arscecs given our total flux
psfSize = psfBounds(flux,pixelCut)*args.pixel_scale if psf else 0
# Add the psf size in quadrature
width = math.sqrt(width*width + psfSize*psfSize)
height = math.sqrt(height*height + psfSize*psfSize)
# Truncate the bounding box, if necessary
if width > args.max_size or height > args.max_size:
logger.info('...truncating bbbox from (%.1f,%.1f)' % (width,height))
width = min(width,args.max_size)
height = min(height,args.max_size)
# Skip this source if its pixels would all be below pixelCut (can this ever happen?)
if (width,height) == (0,0):
continue
# Calculate the integer coordinates of the image pixel that contains the source center
# (using the convention that the bottom left corner pixel has coordinates 1,1)
xpixels = int(math.ceil(xoffset))
ypixels = int(math.ceil(yoffset))
# Calculate the stamp size to use as width = 2*xhalf+1 and height = 2*yhalf+1.
# We always round up to an odd integer so that flux is consistently centered
# (see Issue #380).
xhalf = int(math.ceil(width/args.pixel_scale))
yhalf = int(math.ceil(height/args.pixel_scale))
# Trim the stamp so that the source is still centered but we do not extend
# beyond the final field image. This will only trim pixels above pixelCut
# that lie outside the field.
if xpixels-xhalf < 1 and xpixels+xhalf > args.width:
xhalf = max(xpixels-1,args.width-xpixels)
if ypixels-yhalf < 1 and ypixels+yhalf > args.height:
yhalf = max(ypixels-1,args.height-ypixels)
# Build this source's stamp bounding box
bbox = galsim.BoundsI(xpixels-xhalf,xpixels+xhalf,ypixels-yhalf,ypixels+yhalf)
# Skip objects that don't overlap our field
if (bbox & field.bounds).area() == 0:
continue
# If we get this far, we are definitely rendering this source (but it might
# still get trimmed out later)
logger.info('Rendering input catalog line %d (entry id %d) with w x h = %d x %d' %
(lineno,entryID,2*xhalf+1,2*yhalf+1))
# Calculate the pixel coordinates of the stamp center.
xstamp = 0.5*(bbox.xmin + bbox.xmax)
ystamp = 0.5*(bbox.ymin + bbox.ymax)
# Calculate the subpixel shift in arcsecs (not pixels!) of the source center
# relative to the stamp center. Note that the resulting shift may be more than
# one pixel in either direction because of the clipping operation above.
xshift = (xoffset - (xstamp-0.5))*args.pixel_scale
yshift = (yoffset - (ystamp-0.5))*args.pixel_scale
if args.verbose:
logger.info(' flux: %.3g electrons (%s-band AB %.1f)' % (flux,args.band,abMag))
logger.info(' bounds: [%d:%d,%d:%d] pixels' % (bbox.xmin,bbox.xmax,bbox.ymin,bbox.ymax))
logger.info(' shift: (%f,%f) arcsecs = (%f,%f) pixels' %
(xshift,yshift,xshift/args.pixel_scale,yshift/args.pixel_scale))
logger.info(' disk: frac = %f, hlr = %f arcsec, q = %f, beta = %f rad' %
(diskFlux/flux,hlr_d,q_d,pa_d))
logger.info(' bulge: frac = %f, hlr = %f arcsec, q = %f, beta = %f rad' %
(bulgeFlux/flux,hlr_b,q_b,pa_b))
logger.info(' agn: frac = %f' % (agnFluxNorm/flux))
logger.info(' bbox: disk (%.1f,%.1f) bulge (%.1f,%.1f) psf %.1f arcsec' %
(w_d,h_d,w_b,h_b,psfSize))
logger.info(' size: %.2f pixels' % size)
logger.info(' shear: (g1,g2) = (%.6f,%.6f)' % (e1,e2))
# Define the nominal source parameters for rendering this object within its stamp
params = {
'flux':diskFlux+bulgeFlux, 'bulgeFraction': bulgeFlux/(diskFlux+bulgeFlux),
'xc':xshift, 'yc':yshift,
'hlr_d':hlr_d, 'q_d':q_d, 'beta_d':pa_d,
'hlr_b':hlr_b, 'q_b':q_b, 'beta_b':pa_b,
'g1':args.g1, 'g2': args.g2
}
# Create stamps for the galaxy with and w/o the psf applied
gal = createSource(**params)
if args.render_nopsf:
# We don't do this by default for speed
nopsf = createStamp(gal,None,pix,bbox)
else:
# Use an empty placeholder so we don't change the shape of the output
nopsf = galsim.ImageD(bbox)
nominal = createStamp(gal,psf,pix,bbox)
# Create a mask for pixels above threshold
mask = createMask(nominal,pixelCut,args)
if mask.array.sum() == 0:
# this stamp has no pixels above threshold
logger.info('*** line %d (id %d) is below threshold' % (lineno,entryID))
continue
trimmed = mask.bounds
if not args.no_trim and args.verbose:
logger.info(' trimmed: [%d:%d,%d:%d] pixels' %
(trimmed.xmin,trimmed.xmax,trimmed.ymin,trimmed.ymax))
# Add the nominal galaxy to the full field image after applying the threshold mask
# (the mask must be the second term in the product so that the result is double precision)
masked = nominal[trimmed]*mask
overlap = trimmed & field.bounds
if overlap.area() == 0:
# this stamp's mask falls completely outside our field
logger.info('*** line %d (id %d) does not overlap field' % (lineno,entryID))
continue
field[overlap] += masked[overlap]
# Calculate this object's nominal flux S/N ratio at full depth using only masked pixels.
# Note that this value cannot be reproduced from the saved stamp when a stamp is clipped
# to the field boundary (use --no-clip to disable this).
snr = signalToNoiseRatio(masked,args.exposure_time*skyRate)
if args.verbose:
logger.info(' S/N: %.6f' % snr)
# Initialize the datacube of stamps that we will save for this object
datacube = [ ]
# Save individual stamps in units of elec/sec
assert saveStamp(datacube,nopsf/args.exposure_time,trimmed,args)
assert saveStamp(datacube,nominal/args.exposure_time,trimmed,args)
assert saveStamp(datacube,mask,trimmed,args)
if args.partials:
# Specify the amount to vary each parameter for partial derivatives
# (we don't use a dictionary here since we want to control the order)
variations = [
('xc',args.pixel_scale/3.), ('yc',args.pixel_scale/3.),
('hlr_d',0.05*hlr_d),('hlr_b',0.05*hlr_b),
('g1',0.03), ('g2',0.03)
]
# loop over source parameters to vary
for (pname,delta) in variations:
# create stamps for each variation of this parameter
newparams = params.copy()
partial = galsim.ImageD(bbox)
partial.setScale(pix.getXWidth())
# delta might be zero, e.g., for hlr_b when bulge fraction = 0
if delta > 0:
for step in range(args.partials_order):
# create and save the positive variation stamp
newparams[pname] = params[pname] + (step+1)*delta
newsource = createSource(**newparams)
plus = createStamp(newsource,psf,pix,bbox)
# create and save the negative variation stamp
newparams[pname] = params[pname] - (step+1)*delta
newsource = createSource(**newparams)
minus = createStamp(newsource,psf,pix,bbox)
# update the finite difference calculation of this partial
partial += (fdCoefs[step]/delta)*(plus - minus)
# append this partial to our datacube
assert saveStamp(datacube,partial/args.exposure_time,trimmed,args)
# Add a new HDU with a datacube for this object's stamps
# We don't use compression = 'gzip_tile' for now since it is lossy
# and mathematica cannot Import it.
galsim.fits.writeCube(datacube, hdu_list = hduList)
# Add a catalog entry for this galaxy
entry = (entryID,xoffset,yoffset,abMag,flux/args.exposure_time,size,e1,e2,
bulgeFlux/(diskFlux+bulgeFlux),z,snr)
outputCatalog.append(entry)
nkeep += 1
logger.info("saved entry id %d as stamp %d" % (entryID,nkeep))
# Write the full field image without noise
if args.save_field:
# First without noise
outname = args.output + '_field.fits'
logger.info('Saving full field to %r' % outname)
galsim.fits.write(field,outname)
# Write the full field image with random noise added
if args.save_noise:
rng = galsim.BaseDeviate(123)
noise = galsim.PoissonNoise(rng,sky_level = args.exposure_time*skyRate)
field.addNoise(noise)
outname = args.output + '_noise.fits'
logger.info('Saving full field with noise added to %r' % outname)
galsim.fits.write(field,outname)
# Write the object stamp datacubes
if args.stamps:
outname = args.output + '_stamps.fits'
logger.info('Saving %d stamps to %r' % (nkeep,outname))
galsim.fits.writeFile(outname, hduList)
# Close the input catalog
cat.close()
# Write the output catalog from memory
outname = args.output + '_catalog.dat'
out = open(outname,'w')
for entry in outputCatalog:
print >>out, ' '.join(map(str,entry))
out.close()
logger.info('Wrote %d of %d catalog entries to %r' % (nkeep,lineno,outname))
if __name__ == "__main__":
main()
Rework model space and choice of parameters to vary for the Fisher matrix
#!/usr/bin/env python
#######################################################################################
## Created by David Kirkby, University of California, Irvine <dkirkby@uci.edu>
#######################################################################################
import sys
import os
import math
import argparse
import logging
import galsim
import pyfits
import numpy
twopi = 2*math.pi
deg2rad = math.pi/180.
deg2arcsec = 3600.
deg2arcmin = 60.
def createComponent(type,electrons,xc,yc,hlr,q,beta,g1,g2):
# create a radial profile of the requested type and size
comp = type(flux = electrons, half_light_radius = hlr)
# set the intrinsic shape
comp.applyShear(q = q, beta = beta*galsim.radians)
# apply cosmic shear
comp.applyShear(g1 = g1, g2 = g2)
# shift to this component's centroid
comp.applyShift(dx = xc, dy = yc)
return comp
"""
Returns a (disk,bulge) tuple of source objects using the specified parameters.
Note that f_d and f_b are fractions of the total flux and need not sum to one.
"""
def createSource(
total_flux,f_d,f_b,
x_d,y_d,hlr_d,q_d,beta_d,
x_b,y_b,hlr_b,q_b,beta_b,
dx,dy,relsize,dbeta,
g1,g2):
# Define the disk component, if any
if f_d > 0:
disk = createComponent(galsim.Exponential,
total_flux*f_d,x_d+dx,y_d+dy,hlr_d*relsize,q_d,beta_d+dbeta,g1,g2)
else:
disk = None
# Define the bulge component, if any
if f_b > 0:
bulge = createComponent(galsim.DeVaucouleurs,
total_flux*f_b,x_b+dx,y_b+dy,hlr_b*relsize,q_b,beta_b+dbeta,g1,g2)
else:
bulge = None
return (disk,bulge)
"""
Renders the specified source convolved with a psf (which might be None)
and pixel response into a postage stamp with the specified bounding box.
"""
def renderStamp(src,psf,pix,bbox):
stamp = galsim.ImageD(bbox)
stamp.setScale(pix.getXWidth())
if src:
if psf == None:
obj = galsim.Convolve([src,pix], real_space = True)
else:
gsp = galsim.GSParams(maximum_fft_size=16384)
obj = galsim.Convolve([src,psf,pix],gsparams=gsp)
obj.draw(image = stamp)
return stamp
def createStamp(src,psf,pix,bbox):
(disk,bulge) = src
diskStamp = renderStamp(disk,psf,pix,bbox)
bulgeStamp = renderStamp(bulge,psf,pix,bbox)
return diskStamp + bulgeStamp
"""
Returns (dx,dy) for the bounding box of a surface brightness profile
SB(x,y) whose isophotes are ellipses with the shape (q,beta) and which
has an underlying normalized radial profile p(r). The inputs are:
maxSB = totalFlux*p(0) = maximum surface brightness before shear
thresholdSB = threshold surface brightness after shear
q = ratio of minor to major axes of ellipse with 0 < q <= 1
beta = angle of ellipse's major axis in radians
rFunction = returns R(b) such that p(R) = b*p(0) with 0 < b < 1
The returned (dx,dy) are in arcsecs, and defined such that SB(x,y) < f0
is guaranteed for |x| > dx or |y| > dy. The result only depends on the
ratio thresholdSB/maxSB so they must be in the same (abitrary) units.
"""
def boundingBox(maxSB,thresholdSB,q,beta,rFunction):
# Calculate shear affine transform parameters
g = (1-q)/(1+q)
gp = g*math.cos(2*beta)
gx = g*math.sin(2*beta)
detM = 1 - gp*gp - gx*gx
# Calculate the dimensionless surface brightness ratio at threshold.
b = thresholdSB/(maxSB*detM)
if b <= 0:
raise RuntimeError('boundingBox: invalid input parameters')
if b >= 1:
# The max surface brightness is below our threshold SB(0,0) <= f0
return (0,0)
# Calculate the threshold radius of the radial profile.
rcut = rFunction(b)
# Shear this circle and return its bounding box dimensions
dx = rcut*math.sqrt(((1+gp)*(1+gp)+gx*gx)/detM) # half width in arcsecs
dy = rcut*math.sqrt(((1-gp)*(1-gp)+gx*gx)/detM) # half height in arcsecs
return (dx,dy)
"""
Returns (dx,dy) for the bounding box of a Sersic profile with n = 1 or 4.
The input flux should be in electrons, hlr in arscecs, beta in radians, f0 in
elec/arcsec^2. 0 < q <= 1 is dimensionless. The returned (dx,dy) are in
arcsecs. See boundingBox above for details.
"""
def sersicBounds(n,flux,hlr,q,beta,f0):
# Convert the half-light radius to the appropriate scale radius r0
# and calculate the Sersic normalization constant
if n == 1:
r0 = hlr/1.67835
norm = twopi*r0*r0
elif n == 4:
r0 = hlr/3459.49
norm = 20160*twopi*r0*r0 # 20160 = n*Gamma[2*n]
else:
raise RuntimeError('Sersic index n = %d is not supported.' % n)
# Calculate and return the bounding box
return boundingBox(flux/norm,f0,q,beta,
lambda b: r0*math.pow(-math.log(b),n))
"""
Returns (dx,dy) for the bounding box of a Moffat profile. The input flux
should be in electrons, fwhm in arcsecs, beta in radians, f0 in elec/arcsec^2.
0 < q <= 1 and moffatBeta > 1 are dimensionless. The returned (dx,dy) are in
arcsecs. See boundingBox above for details.
"""
def moffatBounds(moffatBeta,flux,fwhm,q,beta,f0):
# Check that moffatBeta is valid
if moffatBeta <= 1:
raise RuntimeError('Moffat beta < 1 is not valid.')
# Convert the fwhm to the corresponding scale radius
r0 = 0.5*fwhm/math.sqrt(math.pow(2,1./moffatBeta)-1)
# Calculate the normalization factor norm = 1/p(0)
norm = math.pi*r0*r0/(moffatBeta-1)
# Calculate and return the bounding box
return boundingBox(flux/norm,f0,q,beta,
lambda b: r0*math.sqrt(1-math.pow(b,(moffatBeta-1.)/moffatBeta)))
"""
Returns a mask image of values 0 or 1 depending on whether the corresponding
input image pixel value is above or below the specified threshold in electrons.
Note that if all pixels are below threshold, then the returned mask will
contain only the central pixel with image.array.sum() == 0.
"""
def createMask(image,threshold,args):
# create an empty mask image with the same dimensions as the input image
box = image.bounds
mask = galsim.ImageS(box)
mask.setScale(image.getScale())
borderMax = 0.
lastRow = box.ymax - box.ymin
lastPixel = box.xmax - box.xmin
if not args.no_trim:
# initialize our trimmed bounds to just the central pixel
# (the numerator should always be even for odd width,height)
xmin = (box.getXMin()+box.getXMax())
ymin = (box.getYMin()+box.getYMax())
assert xmin%2 == 0 and ymin%2 == 0
xmin = xmin/2
ymin = ymin/2
xmax = xmin
ymax = ymin
# loop over image pixels
for (rowIndex,row) in enumerate(image.array):
y = box.getYMin()+rowIndex
for (pixelIndex,pixelValue) in enumerate(row):
x = box.getXMin()+pixelIndex
if rowIndex == 0 or rowIndex == lastRow or pixelIndex == 0 or pixelIndex == lastPixel:
# update the largest pixel value on our 1-pixel wide border
borderMax = max(borderMax,pixelValue)
if pixelValue >= threshold:
mask.array[rowIndex,pixelIndex] = 1
if not args.no_trim:
xmin = min(x,xmin)
xmax = max(x,xmax)
ymin = min(y,ymin)
ymax = max(y,ymax)
# is the stamp too small to contain the threshold contour?
if borderMax > threshold:
print '### stamp truncated at %.1f > %.1f electrons' % (borderMax,threshold)
# build a new mask using the border max as the threshold
return createMask(image,borderMax,args)
if not args.no_trim:
trimmed = galsim.BoundsI(xmin,xmax,ymin,ymax)
mask = mask[trimmed]
return mask
"""
Performs any final processing on stamp, controlled by args, then appends it to stamps.
Returns True if the stamp was saved, or otherwise False.
"""
def saveStamp(stamps,stamp,trimmed,args):
# Trim the stamp to its threshold bounding box
if not args.no_trim:
stamp = stamp[trimmed]
# Clip the stamp so that does not extend beyond the field image. This results
# in potentially smaller files with sources that might not be centered.
if not args.no_clip:
overlap = stamp.bounds & galsim.BoundsI(1,args.width,1,args.height)
if overlap.area() == 0:
# skip this stamp if it falls completely outside our field (after trimming)
return False
stamp = stamp[overlap]
# Remember this stamp.
stamps.append(stamp)
return True
def getPsfBoundsEstimator(psf,pix,size):
stamp = galsim.ImageD(2*size,2*size)
stamp.setScale(pix.getXWidth())
obj = galsim.Convolve([psf,pix])
obj.draw(image=stamp)
# build the circularized psf profile
profile = numpy.zeros(size,dtype=float)
for x in range(2*size):
for y in range(2*size):
dx = x - size + 0.5
dy = y - size + 0.5
r = math.sqrt(dx*dx + dy*dy)
ipix = int(math.floor(r))
if ipix < size:
profile[ipix] = max(profile[ipix],stamp.array[x,y])
# Create a function that gives the size of bounding box necessary to contain
# psf pixels down to the specified threshold assuming the specified total flux.
# The return value is clipped at 2*size for large fluxes.
def estimator(flux,threshold):
index = 0
while index < size:
if flux*profile[index] < threshold:
return 2*index+1
index += 1
return 2*size
return estimator
# Returns the combined size and ellipticity for the specified disk and bulge components,
# assuming they have the same centroid.
def combineEllipticities(hlr_d,q_d,pa_d,hlr_b,q_b,pa_b,f_b):
# ensure that single-component models give correct results
if f_b == 0:
q_b = 1
elif f_b == 1:
q_d = 1
# calculate the disk and bulge component ellipticities
ed = (1-q_d)/(1+q_d)
ed1 = ed*math.cos(2*pa_d)
ed2 = ed*math.sin(2*pa_d)
eb = (1-q_b)/(1+q_b)
eb1 = eb*math.cos(2*pa_b)
eb2 = eb*math.sin(2*pa_b)
# calculate the corresponding second-moment tensors assuming unit total flux
cd = 1.06502
nd = cd*(hlr_d/(1-ed*ed))**2
Qd11 = nd*(1+ed*ed+2*ed1)
Qd12 = nd*2*ed2
Qd22 = nd*(1+ed*ed-2*ed1)
detQd = Qd11*Qd22 - Qd12*Qd12
cb = 10.8396
nb = cb*(hlr_b/(1-eb*eb))**2
Qb11 = nb*(1+eb*eb+2*eb1)
Qb12 = nb*2*eb2
Qb22 = nb*(1+eb*eb-2*eb1)
detQb = Qb11*Qb22 - Qb12*Qb12
# add the component second-moment tensors
Q11 = (1-f_b)*Qd11 + f_b*Qb11
Q12 = (1-f_b)*Qd12 + f_b*Qb12
Q22 = (1-f_b)*Qd22 + f_b*Qb22
detQ = Q11*Q22 - Q12*Q12
size = math.pow(detQ,0.25)
#semiMajorAxis = math.sqrt(0.5*(Q11+Q22+math.sqrt((Q11-Q22)**2+4*Q12**2)))
# calculate the corresponding combined ellipticity
denom = Q11 + Q22 + 2*math.sqrt(detQ)
e1 = (Q11 - Q22)/denom
e2 = 2*Q12/denom
"""
# check direct calculation of emag when pa_d == pa_b
emag = math.sqrt(e1*e1 + e2*e2)
wd = (1-f_b)*cd*hlr_d**2*(1+q_d)**2/(8*q_d**2) if f_b < 1 else 0
wm = f_b*cb*hlr_b**2*(1+q_b)**2/(8*q_b**2) if f_b > 0 else 0
ep = wd*(1+q_d**2) + wm*(1+q_b**2)
em = wd*(1-q_d**2) + wm*(1-q_b**2)
emag2 = em/(ep+math.sqrt(ep*ep-em*em))
print 'emag:',emag-emag2
"""
return (size,e1,e2)
def signalToNoiseRatio(stamp,pixelNoise):
flat = stamp.array.reshape(-1)
snr = math.sqrt(numpy.dot(flat,flat)/pixelNoise)
return snr
def main():
# Parse command-line args
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--verbose", action = "store_true",
help = "provide more verbose output")
parser.add_argument("-i", "--input", default = 'gcat.dat',
help = "name of input catalog to read")
parser.add_argument("-o","--output", default = 'catout',
help = "base name of output files to write")
parser.add_argument("-x","--x-center", type = float, default = 0.5,
help = "central RA of image (degrees)")
parser.add_argument("-y","--y-center", type = float, default = 0.0,
help = "central DEC of image (degrees)")
parser.add_argument("--width", type = int, default = 512,
help = "image width (pixels)")
parser.add_argument("--height", type = int, default = 512,
help = "image height (pixels)")
parser.add_argument("--max-size", type = float, default = 20.,
help = "flux from any object is truncated beyond this size (arcsecs)")
parser.add_argument("--no-margin", action = "store_true",
help = "do not simulate the tails of objects just outside the field")
parser.add_argument("--pixel-scale", type = float, default = 0.2,
help = "pixel scale (arscecs/pixel)")
parser.add_argument("--airmass", type = float, default = 1.2,
help = "airmass value to use for atmospheric PSF and extinction")
parser.add_argument("--extinction", type = float, default = 0.07,
help = "atmospheric extinction coefficient")
parser.add_argument("--zenith-fwhm", type = float, default = 0.67,
help = "atmospheric psf full-width-half-max in arcsecs at zenith")
parser.add_argument("--instrumental-fwhm", type = float, default = 0.4,
help = "instrumental psf full-width-half-max in arcsecs")
parser.add_argument("--psf-beta", type = float, default = 0.0,
help = "psf Moffat parameter beta (uses Kolmogorov psf if beta <= 0)")
parser.add_argument("--band", choices = ['u','g','r','i','z','y'], default = 'i',
help = "LSST imaging band to use for source fluxes")
parser.add_argument("--zero-point", type = float, default = 41.5,
help = "zero point for converting magnitude to detected signal in elec/sec")
parser.add_argument("--sky-brightness", type = float, default = 20.0,
help = "sky brightness in mag/sq.arcsec.")
parser.add_argument("--sn-cut", type = float, default = 0.5,
help = "keep all pixels above this signal-to-noise ratio cut")
parser.add_argument("--exposure-time", type = float, default = 6900.,
help = "full-depth exposure time in seconds")
parser.add_argument("--g1", type = float, default = 0.,
help = "constant shear component g1 to apply")
parser.add_argument("--g2", type = float, default = 0.,
help = "constant shear component g2 to apply")
parser.add_argument("--save-field", action = "store_true",
help = "save full field image without noise")
parser.add_argument("--save-noise", action = "store_true",
help = "save full field image with random noise added")
parser.add_argument("--stamps", action = "store_true",
help = "save postage stamps for each source (normalized to 1 exposure)")
parser.add_argument("--no-clip", action = "store_true",
help = "do not clip stamps to the image bounds")
parser.add_argument("--no-trim", action = "store_true",
help = "do not trim stamps to their threshold bounding box")
parser.add_argument("--no-disk", action = "store_true",
help = "do not include any galactic disk (Sersic n=1) components")
parser.add_argument("--no-bulge", action = "store_true",
help = "do not include any galactic bulge (Sersic n=4) components")
parser.add_argument("--shape", action = "store_true",
help = "run HSM adaptive moments calculation on no-psf stamp")
parser.add_argument("--render-nopsf", action = "store_true",
help = "save a stamp rendered without any psf for each galaxy")
parser.add_argument("--partials", action = "store_true",
help = "calculate and save partial derivatives wrt shape parameters (normalized to 1 exposure)")
parser.add_argument("--partials-order", type = int, default = 1,
help = "order of finite difference equation to use for evaluating partials")
parser.add_argument("--only-line", type = int, default = 0,
help = "only use the specified line number from the input catalog (when non-zero)")
args = parser.parse_args()
# Configure the GalSim logger
logging.basicConfig(format="%(message)s", level=logging.INFO, stream=sys.stdout)
logger = logging.getLogger("galsimcat")
logger.info('Using output prefix %r' % args.output)
# Define the pixel response
pix = galsim.Pixel(args.pixel_scale)
# Define the psf to use
atmos_fwhm = args.zenith_fwhm*math.pow(args.airmass,0.6)
fwhm = math.sqrt(atmos_fwhm**2 + args.instrumental_fwhm**2)
logger.info('Using PSF fwhm = %.4f" (%.4f" zenith => %.4f" at X = %.3d, %.4f" instrumental)' %
(fwhm,args.zenith_fwhm,atmos_fwhm,args.airmass,args.instrumental_fwhm))
if fwhm > 0:
if args.psf_beta > 0:
psf = galsim.Moffat(beta = args.psf_beta, fwhm = fwhm)
else:
psf = galsim.Kolmogorov(fwhm = fwhm)
psfBounds = getPsfBoundsEstimator(psf,pix,int(math.ceil(0.5*args.max_size/args.pixel_scale)))
else:
psf = None
# Create an empty image that represents the whole field
field = galsim.ImageD(args.width,args.height)
field.setScale(pix.getXWidth())
# Calculate the relative scaling of RA and angles relative to the image center
RAscale = math.cos(args.y_center*deg2rad)
# Calculate the corners of the image in degrees
RAmin = args.x_center - 0.5*args.width*args.pixel_scale/deg2arcsec/RAscale
RAmax = args.x_center + 0.5*args.width*args.pixel_scale/deg2arcsec/RAscale
DECmin = args.y_center - 0.5*args.height*args.pixel_scale/deg2arcsec
DECmax = args.y_center + 0.5*args.height*args.pixel_scale/deg2arcsec
# Calculate margin size in degrees (sources outside of our image margins
# are always skipped, for speed, even if their tails might overlap our image)
if args.no_margin:
margin = 0
else:
margin = 0.5*args.max_size/deg2arcsec
# Calculate the sky background rate in elec/sec/pixel
skyRate = args.zero_point*math.pow(10,-0.4*(args.sky_brightness-24))*args.pixel_scale**2
# Calculate the mean sky noise level for the full exposure time in elec/pixel
skyNoise = math.sqrt(args.exposure_time*skyRate)
# Calculate the pixel threshold cut to use in detected electrons during the full exposure
pixelCut = args.sn_cut*skyNoise
# Calculate the corresponding surface brightness cut to use
sbCut = pixelCut/(args.pixel_scale*args.pixel_scale)
print 'Simulating %s-band observations with AB24 zero point %.3f elec/sec, sky rate = %.3f elec/sec/pixel' %(
args.band,args.zero_point,skyRate)
print 'Simulating %.1fs exposure with total sky noise level %.3f elec/pixel (%.3f mag/sq.arcsec.)' % (
args.exposure_time,skyNoise,args.sky_brightness)
print 'Will keep all stacked pixels > %.3f elec (%.1f elec/arcsec^2)' % (pixelCut,sbCut)
# Initialize finite difference calculations if necessary
if args.partials:
args.stamps = True
if args.partials_order < 1 or args.partials_order > 4:
logger.error('Bad parameter: partials-order must be an integer 1-4.')
sys.exit(-1)
# Initialize the finite difference coefficients to use
if args.partials_order == 1:
fdCoefs = (1./2.,)
elif args.partials_order == 2:
fdCoefs = (2./3.,-1./12.)
elif args.partials_order == 3:
fdCoefs = (3./4.,-3./20.,1./60.)
else:
fdCoefs = (4./5.,-1./5.,4./105.,-1./280.)
# Open the source input catalog to use and initialize a keyword-based lookup for catalog entries
cat = open(args.input)
catFields = cat.readline().split()
catDict = dict(zip(catFields,range(len(catFields))))
if args.verbose:
logger.info('Reading input catalog %r with fields:\n%s' % (args.input,','.join(catFields)))
# Initialize the output catalog in memory
outputCatalog = [ ]
# Initialize the list of per-object stamp HDUs we will fill
hdu = pyfits.PrimaryHDU()
hduList = pyfits.HDUList([hdu])
# Loop over catalog entries
nkeep = lineno = 0
for line in cat:
lineno += 1
if args.only_line > 0 and lineno != args.only_line:
continue
# prepare to read this catalog entry
entryCols = line.split()
def catalog(fieldName,type=float):
return type(entryCols[catDict[fieldName]])
entryID = catalog('id',int)
# position on the sky in degrees
RA = catalog('ra')
DEC = catalog('dec')
# skip sources outside our margins
if RA < RAmin-margin or RA > RAmax+margin or DEC < DECmin-margin or DEC > DECmax+margin:
continue
# Calculate the offsets of this source from our image's bottom left corner in pixels
# (which might be negative or byeond our image bounds because of the margins)
xoffset = (RA - RAmin)*deg2arcsec/args.pixel_scale*RAscale
yoffset = (DEC - DECmin)*deg2arcsec/args.pixel_scale
# Look up redshift
z = catalog('redshift')
# Look up source AB magnitude in the requested band
abMag = catalog(args.band + '_ab')
# Correct for extinction
abMag += args.extinction*(args.airmass - 1)
# Calculate total detected signal in electrons
flux = args.exposure_time*args.zero_point*math.pow(10,-0.4*(abMag-24))
# Skip objects whose total flux is below our pixel threshold
if flux < pixelCut:
continue
# Look up the component flux relative normalizations
diskFluxNorm = catalog('fluxnorm_disk')
bulgeFluxNorm = catalog('fluxnorm_bulge')
agnFluxNorm = catalog('fluxnorm_agn')
totalFluxNorm = diskFluxNorm + bulgeFluxNorm + agnFluxNorm
# Calculate the disk and bulge fluxes to simulate
if args.no_disk:
diskFlux = 0
else:
diskFlux = flux*diskFluxNorm/totalFluxNorm
if args.no_bulge:
bulgeFlux = 0
else:
bulgeFlux = flux*bulgeFluxNorm/totalFluxNorm
if diskFlux == 0 and bulgeFlux == 0:
continue
# Get disk component parameters
if diskFlux > 0:
hlr_d = catalog('DiskHalfLightRadius') # in arcsecs
pa_d = catalog('pa_disk') # position angle in degrees
a_d = catalog('a_d') # major axis length in arcsecs
b_d = catalog('b_d') # minor axis length in arcsecs
# Calculate sheared ellipse aspect ratio
q_d = b_d/a_d # between 0.2 and 1
# Convert position angle from degrees to radians
pa_d = pa_d*deg2rad
# Calculate bounding box in arcsecs without psf or pixel convolution
(w_d,h_d) = sersicBounds(1,diskFlux+bulgeFlux,hlr_d,q_d,pa_d,sbCut)
else:
(w_d,h_d) = (0,0)
# Get bulge component parameters
if bulgeFlux > 0:
hlr_b = catalog('BulgeHalfLightRadius') # in arcsecs
pa_b = catalog('pa_bulge') # position angle in degrees
a_b = catalog('a_b') # major axis length in arcsecs
b_b = catalog('b_b') # minor axis length in arcsecs
# Calculate sheared ellipse aspect ratio
q_b = b_b/a_b # between 0.2 and 1
# Convert position angle from degrees to radians
pa_b = pa_b*deg2rad
# Calculate bounding box in arcsecs without psf or pixel convolution
(w_b,h_b) = sersicBounds(4,diskFlux+bulgeFlux,hlr_b,q_b,pa_b,sbCut)
else:
(w_b,h_b) = (0,0)
# If a component is missing, set its nominal size and shape from the other component.
if diskFlux == 0:
(hlr_d,q_d,pa_d) = (hlr_b,q_b,pa_b)
if bulgeFlux == 0:
(hlr_b,q_b,pa_b) = (hlr_d,q_d,pa_d)
# Combine the bulge and disk ellipticities
(size,e1,e2) = combineEllipticities(hlr_d,q_d,pa_d,hlr_b,q_b,pa_b,bulgeFlux/(bulgeFlux+diskFlux))
# Combine the bulge and disk bounding boxes
width = max(w_d,w_b)
height = max(h_d,h_b)
# Estimate the (round) bounding box for the psf in arscecs given our total flux
psfSize = psfBounds(flux,pixelCut)*args.pixel_scale if psf else 0
# Add the psf size in quadrature
width = math.sqrt(width*width + psfSize*psfSize)
height = math.sqrt(height*height + psfSize*psfSize)
# Truncate the bounding box, if necessary
if width > args.max_size or height > args.max_size:
logger.info('...truncating bbbox from (%.1f,%.1f)' % (width,height))
width = min(width,args.max_size)
height = min(height,args.max_size)
# Skip this source if its pixels would all be below pixelCut (can this ever happen?)
if (width,height) == (0,0):
continue
# Calculate the integer coordinates of the image pixel that contains the source center
# (using the convention that the bottom left corner pixel has coordinates 1,1)
xpixels = int(math.ceil(xoffset))
ypixels = int(math.ceil(yoffset))
# Calculate the stamp size to use as width = 2*xhalf+1 and height = 2*yhalf+1.
# We always round up to an odd integer so that flux is consistently centered
# (see Issue #380).
xhalf = int(math.ceil(width/args.pixel_scale))
yhalf = int(math.ceil(height/args.pixel_scale))
# Trim the stamp so that the source is still centered but we do not extend
# beyond the final field image. This will only trim pixels above pixelCut
# that lie outside the field.
if xpixels-xhalf < 1 and xpixels+xhalf > args.width:
xhalf = max(xpixels-1,args.width-xpixels)
if ypixels-yhalf < 1 and ypixels+yhalf > args.height:
yhalf = max(ypixels-1,args.height-ypixels)
# Build this source's stamp bounding box
bbox = galsim.BoundsI(xpixels-xhalf,xpixels+xhalf,ypixels-yhalf,ypixels+yhalf)
# Skip objects that don't overlap our field
if (bbox & field.bounds).area() == 0:
continue
# If we get this far, we are definitely rendering this source (but it might
# still get trimmed out later)
logger.info('Rendering input catalog line %d (entry id %d) with w x h = %d x %d' %
(lineno,entryID,2*xhalf+1,2*yhalf+1))
# Calculate the pixel coordinates of the stamp center.
xstamp = 0.5*(bbox.xmin + bbox.xmax)
ystamp = 0.5*(bbox.ymin + bbox.ymax)
# Calculate the subpixel shift in arcsecs (not pixels!) of the source center
# relative to the stamp center. Note that the resulting shift may be more than
# one pixel in either direction because of the clipping operation above.
xshift = (xoffset - (xstamp-0.5))*args.pixel_scale
yshift = (yoffset - (ystamp-0.5))*args.pixel_scale
if args.verbose:
logger.info(' flux: %.3g electrons (%s-band AB %.1f)' % (flux,args.band,abMag))
logger.info(' bounds: [%d:%d,%d:%d] pixels' % (bbox.xmin,bbox.xmax,bbox.ymin,bbox.ymax))
logger.info(' shift: (%f,%f) arcsecs = (%f,%f) pixels' %
(xshift,yshift,xshift/args.pixel_scale,yshift/args.pixel_scale))
logger.info(' disk: frac = %f, hlr = %f arcsec, q = %f, beta = %f rad' %
(diskFlux/flux,hlr_d,q_d,pa_d))
logger.info(' bulge: frac = %f, hlr = %f arcsec, q = %f, beta = %f rad' %
(bulgeFlux/flux,hlr_b,q_b,pa_b))
logger.info(' agn: frac = %f' % (agnFluxNorm/flux))
logger.info(' bbox: disk (%.1f,%.1f) bulge (%.1f,%.1f) psf %.1f arcsec' %
(w_d,h_d,w_b,h_b,psfSize))
logger.info(' size: %.2f pixels' % size)
logger.info(' shear: (g1,g2) = (%.6f,%.6f)' % (e1,e2))
# Define the nominal source parameters for rendering this object within its stamp
params = {
'total_flux': diskFlux + bulgeFlux,
'f_d': diskFlux/(diskFlux+bulgeFlux), 'f_b': bulgeFlux/(diskFlux+bulgeFlux),
'x_d': xshift, 'y_d': yshift, 'hlr_d': hlr_d, 'q_d': q_d, 'beta_d': pa_d,
'x_b': xshift, 'y_b': yshift, 'hlr_b': hlr_b, 'q_b': q_b, 'beta_b': pa_b,
'dx': 0., 'dy': 0., 'relsize': 1., 'dbeta': 0.,
'g1': args.g1, 'g2': args.g2
}
# Create stamps for the galaxy with and w/o the psf applied
gal = createSource(**params)
if args.render_nopsf:
# We don't do this by default for speed
nopsf = createStamp(gal,None,pix,bbox)
else:
# Use an empty placeholder so we don't change the shape of the output
nopsf = galsim.ImageD(bbox)
nominal = createStamp(gal,psf,pix,bbox)
# Create a mask for pixels above threshold
mask = createMask(nominal,pixelCut,args)
if mask.array.sum() == 0:
# this stamp has no pixels above threshold
logger.info('*** line %d (id %d) is below threshold' % (lineno,entryID))
continue
trimmed = mask.bounds
if not args.no_trim and args.verbose:
logger.info(' trimmed: [%d:%d,%d:%d] pixels' %
(trimmed.xmin,trimmed.xmax,trimmed.ymin,trimmed.ymax))
# Add the nominal galaxy to the full field image after applying the threshold mask
# (the mask must be the second term in the product so that the result is double precision)
masked = nominal[trimmed]*mask
overlap = trimmed & field.bounds
if overlap.area() == 0:
# this stamp's mask falls completely outside our field
logger.info('*** line %d (id %d) does not overlap field' % (lineno,entryID))
continue
field[overlap] += masked[overlap]
# Calculate this object's nominal flux S/N ratio at full depth using only masked pixels.
# Note that this value cannot be reproduced from the saved stamp when a stamp is clipped
# to the field boundary (use --no-clip to disable this).
snr = signalToNoiseRatio(masked,args.exposure_time*skyRate)
if args.verbose:
logger.info(' S/N: %.6f' % snr)
# Initialize the datacube of stamps that we will save for this object
datacube = [ ]
# Save individual stamps in units of elec/sec
assert saveStamp(datacube,nopsf/args.exposure_time,trimmed,args)
assert saveStamp(datacube,nominal/args.exposure_time,trimmed,args)
assert saveStamp(datacube,mask,trimmed,args)
if args.partials:
# Specify the amount to vary each parameter for partial derivatives
# (we don't use a dictionary here since we want to control the order)
variations = [
('f_d',0.01), ('f_b',0.01),
('dx',args.pixel_scale/3.),('dy',args.pixel_scale/3.),
('relsize',0.05),
('g1',0.03), ('g2',0.03)
]
# loop over source parameters to vary
for (pname,delta) in variations:
# create stamps for each variation of this parameter
newparams = params.copy()
partial = galsim.ImageD(bbox)
partial.setScale(pix.getXWidth())
# delta might be zero, e.g., for hlr_b when bulge fraction = 0
if delta > 0:
for step in range(args.partials_order):
# create and save the positive variation stamp
newparams[pname] = params[pname] + (step+1)*delta
newsource = createSource(**newparams)
plus = createStamp(newsource,psf,pix,bbox)
# create and save the negative variation stamp
newparams[pname] = params[pname] - (step+1)*delta
newsource = createSource(**newparams)
minus = createStamp(newsource,psf,pix,bbox)
# update the finite difference calculation of this partial
partial += (fdCoefs[step]/delta)*(plus - minus)
# append this partial to our datacube
assert saveStamp(datacube,partial/args.exposure_time,trimmed,args)
# Add a new HDU with a datacube for this object's stamps
# We don't use compression = 'gzip_tile' for now since it is lossy
# and mathematica cannot Import it.
galsim.fits.writeCube(datacube, hdu_list = hduList)
# Add a catalog entry for this galaxy
entry = (entryID,xoffset,yoffset,abMag,flux/args.exposure_time,size,e1,e2,
bulgeFlux/(diskFlux+bulgeFlux),z,snr)
outputCatalog.append(entry)
nkeep += 1
logger.info("saved entry id %d as stamp %d" % (entryID,nkeep))
# Write the full field image without noise
if args.save_field:
# First without noise
outname = args.output + '_field.fits'
logger.info('Saving full field to %r' % outname)
galsim.fits.write(field,outname)
# Write the full field image with random noise added
if args.save_noise:
rng = galsim.BaseDeviate(123)
noise = galsim.PoissonNoise(rng,sky_level = args.exposure_time*skyRate)
field.addNoise(noise)
outname = args.output + '_noise.fits'
logger.info('Saving full field with noise added to %r' % outname)
galsim.fits.write(field,outname)
# Write the object stamp datacubes
if args.stamps:
outname = args.output + '_stamps.fits'
logger.info('Saving %d stamps to %r' % (nkeep,outname))
galsim.fits.writeFile(outname, hduList)
# Close the input catalog
cat.close()
# Write the output catalog from memory
outname = args.output + '_catalog.dat'
out = open(outname,'w')
for entry in outputCatalog:
print >>out, ' '.join(map(str,entry))
out.close()
logger.info('Wrote %d of %d catalog entries to %r' % (nkeep,lineno,outname))
if __name__ == "__main__":
main()
|
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all nameserver related activity. Health checks. requests."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import time
import traceback
import sys
# See if a third_party library exists -- use it if so.
try:
import third_party
except ImportError:
pass
# external dependencies (from third_party)
import dns.exception
import dns.query
import dns.message
import dns.name
import dns.rcode
import dns.rdataclass
import dns.rdatatype
import dns.reversename
import dns.resolver
import health_checks
import util
# Pick the most accurate timer for a platform. Stolen from timeit.py:
if sys.platform[:3] == 'win':
DEFAULT_TIMER = time.clock
else:
DEFAULT_TIMER = time.time
# How many failures before we disable system nameservers
MAX_SYSTEM_FAILURES = 4
ERROR_PRONE_RATE = 10
class NameServer(health_checks.NameServerHealthChecks):
"""Hold information about a particular nameserver."""
def __init__(self, ip, name=None, internal=False, preferred=False):
self.name = name
# We use _ for IPV6 representation in our configuration due to ConfigParser issues.
self.ip = ip.replace('_', ':')
self.is_system = internal
self.system_position = None
self.is_preferred = preferred
self.timeout = 10
self.health_timeout = 10
self.warnings = set()
self.shared_with = set()
self.disabled = False
self.checks = []
self.request_count = 0
self.error_count = 0
self.failed_test_count = 0
self.share_check_count = 0
self.cache_checks = []
self.is_slower_replica = False
self.timer = DEFAULT_TIMER
@property
def check_average(self):
return util.CalculateListAverage([x[3] for x in self.checks])
@property
def check_duration(self):
return sum([x[3] for x in self.checks])
@property
def failure(self):
failures = [x for x in self.checks if x[1]]
if failures:
return failures[0]
else:
return None
@property
def warnings_string(self):
if self.disabled:
return '(excluded: %s)' % self.disabled
else:
return ', '.join(map(str,self.warnings))
@property
def warnings_comment(self):
if self.warnings or self.disabled:
return '# ' + self.warnings_string
else:
return ''
@property
def hostname(self):
try:
answer = dns.resolver.query(dns.reversename.from_address(self.ip), 'PTR')
if answer:
return str(answer[0])
except:
return ''
@property
def is_error_prone(self):
if self.error_rate >= ERROR_PRONE_RATE:
return True
else:
return False
@property
def error_rate(self):
if not self.error_count or not self.request_count:
return 0
else:
return (float(self.error_count) / float(self.request_count)) * 100
def __str__(self):
return '%s [%s]' % (self.name, self.ip)
def __repr__(self):
return self.__str__()
def AddFailure(self, message):
"""Add a failure for this nameserver. This will effectively disable it's use."""
self.failed_test_count += 1
if self.is_system:
print "* System DNS fail #%s/%s: %s %s" % (self.failed_test_count, MAX_SYSTEM_FAILURES, self, message)
if self.failed_test_count >= MAX_SYSTEM_FAILURES:
print "\n* Disabling %s - %s failures" % (self, self.failed_test_count)
self.disabled = message
else:
self.disabled = message
if self.is_preferred:
print "\n* Failed test: %s %s" % (self, message)
def CreateRequest(self, record, request_type, return_type):
"""Function to work around any dnspython make_query quirks."""
return dns.message.make_query(record, request_type, return_type)
def Query(self, request, timeout):
return dns.query.udp(request, self.ip, timeout, 53)
def TimedRequest(self, type_string, record_string, timeout=None):
"""Make a DNS request, returning the reply and duration it took.
Args:
type_string: DNS record type to query (string)
record_string: DNS record name to query (string)
timeout: optional timeout (float)
Returns:
A tuple of (response, duration in ms [float], exception)
In the case of a DNS response timeout, the response object will be None.
"""
request_type = dns.rdatatype.from_text(type_string)
record = dns.name.from_text(record_string, None)
request = None
self.request_count += 1
# Sometimes it takes great effort just to craft a UDP packet.
try:
request = self.CreateRequest(record, request_type, dns.rdataclass.IN)
except ValueError, exc:
if not request:
return (None, 0, exc)
if not timeout:
timeout = self.timeout
exc = None
duration = None
try:
start_time = self.timer()
response = self.Query(request, timeout)
duration = self.timer() - start_time
except (dns.exception.Timeout), exc:
response = None
except (dns.query.BadResponse, dns.message.TrailingJunk,
dns.query.UnexpectedSource), exc:
response = None
except (KeyboardInterrupt, SystemExit, SystemError), exc:
raise exc
except:
(exc, error) = sys.exc_info()[0:2]
print "* Error with %s: %s (%s)" % (self, exc, error)
response = None
if not response:
self.error_count += 1
if not duration:
duration = self.timer() - start_time
return (response, util.SecondsToMilliseconds(duration), exc)
def ResponseToAscii(self, response):
if not response:
return None
if response.answer:
answers = [' + '.join(map(str, x.items)) for x in response.answer]
return ' -> '.join(answers)
else:
return dns.rcode.to_text(response.rcode())
Give preferred nameservers a chance to retry tests
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for all nameserver related activity. Health checks. requests."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import datetime
import socket
import sys
import time
import traceback
# See if a third_party library exists -- use it if so.
try:
import third_party
except ImportError:
pass
# external dependencies (from third_party)
import dns.exception
import dns.query
import dns.message
import dns.name
import dns.rcode
import dns.rdataclass
import dns.rdatatype
import dns.reversename
import dns.resolver
import health_checks
import util
# Pick the most accurate timer for a platform. Stolen from timeit.py:
if sys.platform[:3] == 'win':
DEFAULT_TIMER = time.clock
else:
DEFAULT_TIMER = time.time
# How many failures before we disable system nameservers
MAX_SYSTEM_FAILURES = 4
MAX_PREFERRED_FAILURES = 2
ERROR_PRONE_RATE = 10
class NameServer(health_checks.NameServerHealthChecks):
"""Hold information about a particular nameserver."""
def __init__(self, ip, name=None, internal=False, preferred=False):
self.name = name
# We use _ for IPV6 representation in our configuration due to ConfigParser issues.
self.ip = ip.replace('_', ':')
self.is_system = internal
self.system_position = None
self.is_preferred = preferred
self.timeout = 10
self.health_timeout = 10
self.warnings = set()
self.shared_with = set()
self.disabled = False
self.checks = []
self.request_count = 0
self.error_count = 0
self.failed_test_count = 0
self.share_check_count = 0
self.cache_checks = []
self.is_slower_replica = False
self.timer = DEFAULT_TIMER
if self.is_system:
self.max_failures = MAX_SYSTEM_FAILURES
elif self.is_preferred:
self.max_failures = MAX_PREFERRED_FAILURES
else:
self.max_failures = 0
@property
def check_average(self):
return util.CalculateListAverage([x[3] for x in self.checks])
@property
def check_duration(self):
return sum([x[3] for x in self.checks])
@property
def failure(self):
failures = [x for x in self.checks if x[1]]
if failures:
return failures[0]
else:
return None
@property
def warnings_string(self):
if self.disabled:
return '(excluded: %s)' % self.disabled
else:
return ', '.join(map(str, self.warnings))
@property
def warnings_comment(self):
if self.warnings or self.disabled:
return '# ' + self.warnings_string
else:
return ''
@property
def hostname(self):
try:
answer = dns.resolver.query(dns.reversename.from_address(self.ip), 'PTR')
if answer:
return str(answer[0])
except:
return ''
@property
def is_error_prone(self):
if self.error_rate >= ERROR_PRONE_RATE:
return True
else:
return False
@property
def error_rate(self):
if not self.error_count or not self.request_count:
return 0
else:
return (float(self.error_count) / float(self.request_count)) * 100
def __str__(self):
return '%s [%s]' % (self.name, self.ip)
def __repr__(self):
return self.__str__()
def AddFailure(self, message):
"""Add a failure for this nameserver. This will effectively disable it's use."""
self.failed_test_count += 1
if self.is_system or self.is_preferred:
print "\n* %s failed test #%s/%s: %s" % (self, self.failed_test_count, self.max_failures, message)
if self.failed_test_count >= self.max_failures:
self.disabled = message
def CreateRequest(self, record, request_type, return_type):
"""Function to work around any dnspython make_query quirks."""
return dns.message.make_query(record, request_type, return_type)
def Query(self, request, timeout):
return dns.query.udp(request, self.ip, timeout, 53)
def TimedRequest(self, type_string, record_string, timeout=None):
"""Make a DNS request, returning the reply and duration it took.
Args:
type_string: DNS record type to query (string)
record_string: DNS record name to query (string)
timeout: optional timeout (float)
Returns:
A tuple of (response, duration in ms [float], error_msg)
In the case of a DNS response timeout, the response object will be None.
"""
request_type = dns.rdatatype.from_text(type_string)
record = dns.name.from_text(record_string, None)
request = None
self.request_count += 1
# Sometimes it takes great effort just to craft a UDP packet.
try:
request = self.CreateRequest(record, request_type, dns.rdataclass.IN)
except ValueError, exc:
if not request:
return (None, 0, util.GetLastExceptionString())
if not timeout:
timeout = self.timeout
error_msg = None
exc = None
duration = None
try:
start_time = self.timer()
response = self.Query(request, timeout)
duration = self.timer() - start_time
except (dns.exception.Timeout), exc:
response = None
except (dns.query.BadResponse, dns.message.TrailingJunk,
dns.query.UnexpectedSource, socket.error), exc:
response = None
except (KeyboardInterrupt, SystemExit, SystemError), exc:
raise exc
except:
error_msg = util.GetLastExceptionString()
print "* Unusual error with %s: %s" % (self, error_msg)
response = None
if not response:
self.error_count += 1
if not duration:
duration = self.timer() - start_time
if exc and not error_msg:
error_msg = util.GetLastExceptionString()
return (response, util.SecondsToMilliseconds(duration), error_msg)
def ResponseToAscii(self, response):
if not response:
return None
if response.answer:
answers = [' + '.join(map(str, x.items)) for x in response.answer]
return ' -> '.join(answers)
else:
return dns.rcode.to_text(response.rcode())
|
import sys
import datetime
import logging
import json
import subprocess
import socket
import time
import os
import requests
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from twisted.internet import threads, reactor, defer, task
from twisted.python.failure import Failure
from twisted.enterprise import adbapi
from collections import defaultdict, deque
from zope.interface import implements
from decimal import Decimal
from googlefinance import getQuotes
from lbryum import SimpleConfig, Network
from lbryum.lbrycrd import COIN, TYPE_ADDRESS
from lbryum.wallet import WalletStorage, Wallet
from lbryum.commands import known_commands, Commands
from lbryum.transaction import Transaction
from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, ILBRYWallet
from lbrynet.core.client.ClientRequest import ClientRequest
from lbrynet.core.Error import UnknownNameError, InvalidStreamInfoError, RequestCanceledError
from lbrynet.core.Error import InsufficientFundsError
from lbrynet.core.sqlite_helpers import rerun_if_locked
from lbrynet.conf import SOURCE_TYPES
from lbrynet.core.LBRYMetadata import Metadata
log = logging.getLogger(__name__)
alert = logging.getLogger("lbryalert." + __name__)
class ReservedPoints(object):
def __init__(self, identifier, amount):
self.identifier = identifier
self.amount = amount
def _catch_connection_error(f):
def w(*args):
try:
return f(*args)
except socket.error:
raise ValueError("Unable to connect to an lbrycrd server. Make sure an lbrycrd server " +
"is running and that this application can connect to it.")
return w
class LBRYWallet(object):
"""This class implements the LBRYWallet interface for the LBRYcrd payment system"""
implements(ILBRYWallet)
_FIRST_RUN_UNKNOWN = 0
_FIRST_RUN_YES = 1
_FIRST_RUN_NO = 2
def __init__(self, db_dir):
self.db_dir = db_dir
self.db = None
self.next_manage_call = None
self.wallet_balance = Decimal(0.0)
self.total_reserved_points = Decimal(0.0)
self.peer_addresses = {} # {Peer: string}
self.queued_payments = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.expected_balances = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.current_address_given_to_peer = {} # {Peer: address(string)}
self.expected_balance_at_time = deque() # (Peer, address(string), amount(Decimal), time(datetime), count(int),
# incremental_amount(float))
self.max_expected_payment_time = datetime.timedelta(minutes=3)
self.stopped = True
self.is_lagging = None
self.manage_running = False
self._manage_count = 0
self._balance_refresh_time = 3
self._batch_count = 20
self._first_run = self._FIRST_RUN_UNKNOWN
def start(self):
def start_manage():
self.stopped = False
self.manage()
return True
d = self._open_db()
d.addCallback(lambda _: self._start())
d.addCallback(lambda _: start_manage())
return d
@staticmethod
def log_stop_error(err):
log.error("An error occurred stopping the wallet: %s", err.getTraceback())
def stop(self):
self.stopped = True
# If self.next_manage_call is None, then manage is currently running or else
# start has not been called, so set stopped and do nothing else.
if self.next_manage_call is not None:
self.next_manage_call.cancel()
self.next_manage_call = None
d = self.manage(do_full=True)
d.addErrback(self.log_stop_error)
d.addCallback(lambda _: self._stop())
d.addErrback(self.log_stop_error)
return d
def manage(self, do_full=False):
self.next_manage_call = None
have_set_manage_running = [False]
self._manage_count += 1
if self._manage_count % self._batch_count == 0:
self._manage_count = 0
do_full = True
def check_if_manage_running():
d = defer.Deferred()
def fire_if_not_running():
if self.manage_running is False:
self.manage_running = True
have_set_manage_running[0] = True
d.callback(True)
elif do_full is False:
d.callback(False)
else:
task.deferLater(reactor, 1, fire_if_not_running)
fire_if_not_running()
return d
d = check_if_manage_running()
def do_manage():
if do_full:
d = self._check_expected_balances()
d.addCallback(lambda _: self._send_payments())
else:
d = defer.succeed(True)
d.addCallback(lambda _: self.get_balance())
def set_wallet_balance(balance):
if self.wallet_balance != balance:
log.info("Got a new balance: %s", str(balance))
self.wallet_balance = balance
d.addCallback(set_wallet_balance)
return d
d.addCallback(lambda should_run: do_manage() if should_run else None)
def set_next_manage_call():
if not self.stopped:
self.next_manage_call = reactor.callLater(self._balance_refresh_time, self.manage)
d.addCallback(lambda _: set_next_manage_call())
def log_error(err):
log.error("Something went wrong during manage. Error message: %s", err.getErrorMessage())
return err
d.addErrback(log_error)
def set_manage_not_running(arg):
if have_set_manage_running[0] is True:
self.manage_running = False
return arg
d.addBoth(set_manage_not_running)
return d
def get_info_exchanger(self):
return LBRYcrdAddressRequester(self)
def get_wallet_info_query_handler_factory(self):
return LBRYcrdAddressQueryHandlerFactory(self)
def reserve_points(self, identifier, amount):
"""
Ensure a certain amount of points are available to be sent as payment, before the service is rendered
@param identifier: The peer to which the payment will ultimately be sent
@param amount: The amount of points to reserve
@return: A ReservedPoints object which is given to send_points once the service has been rendered
"""
rounded_amount = Decimal(str(round(amount, 8)))
#if peer in self.peer_addresses:
if self.wallet_balance >= self.total_reserved_points + rounded_amount:
self.total_reserved_points += rounded_amount
return ReservedPoints(identifier, rounded_amount)
return None
def cancel_point_reservation(self, reserved_points):
"""
Return all of the points that were reserved previously for some ReservedPoints object
@param reserved_points: ReservedPoints previously returned by reserve_points
@return: None
"""
self.total_reserved_points -= reserved_points.amount
def send_points(self, reserved_points, amount):
"""
Schedule a payment to be sent to a peer
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send, must be less than or equal to the
amount reserved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
peer = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
assert(peer in self.peer_addresses)
self.queued_payments[self.peer_addresses[peer]] += rounded_amount
# make any unused points available
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("ordering that %s points be sent to %s", str(rounded_amount),
str(self.peer_addresses[peer]))
peer.update_stats('points_sent', amount)
return defer.succeed(True)
def send_points_to_address(self, reserved_points, amount):
"""
Schedule a payment to be sent to an address
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send. must be less than or equal to the
amount reselved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
address = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
self.queued_payments[address] += rounded_amount
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("Ordering that %s points be sent to %s", str(rounded_amount),
str(address))
return defer.succeed(True)
def add_expected_payment(self, peer, amount):
"""Increase the number of points expected to be paid by a peer"""
rounded_amount = Decimal(str(round(amount, 8)))
assert(peer in self.current_address_given_to_peer)
address = self.current_address_given_to_peer[peer]
log.info("expecting a payment at address %s in the amount of %s", str(address), str(rounded_amount))
self.expected_balances[address] += rounded_amount
expected_balance = self.expected_balances[address]
expected_time = datetime.datetime.now() + self.max_expected_payment_time
self.expected_balance_at_time.append((peer, address, expected_balance, expected_time, 0, amount))
peer.update_stats('expected_points', amount)
def update_peer_address(self, peer, address):
self.peer_addresses[peer] = address
def get_new_address_for_peer(self, peer):
def set_address_for_peer(address):
self.current_address_given_to_peer[peer] = address
return address
d = self.get_new_address()
d.addCallback(set_address_for_peer)
return d
def _send_payments(self):
payments_to_send = {}
for address, points in self.queued_payments.items():
log.info("Should be sending %s points to %s", str(points), str(address))
payments_to_send[address] = points
self.total_reserved_points -= points
self.wallet_balance -= points
del self.queued_payments[address]
if payments_to_send:
log.info("Creating a transaction with outputs %s", str(payments_to_send))
d = self._do_send_many(payments_to_send)
d.addCallback(lambda txid: log.debug("Sent transaction %s", txid))
return d
log.info("There were no payments to send")
return defer.succeed(True)
def get_stream_info_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def get_txid_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(lambda r: None if 'txid' not in r else r['txid'])
return d
def get_stream_info_from_txid(self, name, txid):
d = self.get_claims_from_tx(txid)
def get_claim_for_name(claims):
for claim in claims:
if claim['name'] == name:
claim['txid'] = txid
return claim
return Failure(UnknownNameError(name))
d.addCallback(get_claim_for_name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def _get_stream_info_from_value(self, result, name):
def _check_result_fields(r):
for k in ['value', 'txid', 'n', 'height', 'amount']:
assert k in r, "getvalueforname response missing field %s" % k
def _log_success(claim_id):
log.info("lbry://%s complies with %s, claimid: %s" % (name, metadata.meta_version, claim_id))
return defer.succeed(None)
if 'error' in result:
log.warning("Got an error looking up a name: %s", result['error'])
return Failure(UnknownNameError(name))
_check_result_fields(result)
try:
metadata = Metadata(json.loads(result['value']))
except (ValueError, TypeError):
return Failure(InvalidStreamInfoError(name))
txid = result['txid']
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
d.addCallback(lambda _: self.get_claimid(name, txid))
d.addCallback(lambda cid: _log_success(cid))
d.addCallback(lambda _: metadata)
return d
def _get_claim_info(self, result, name, force_good_metadata=True):
def _check_result_fields(r):
for k in ['value', 'txid', 'n', 'height', 'amount']:
assert k in r, "getvalueforname response missing field %s" % k
def _build_response(m, result, claim_id):
result['value'] = m
result['claim_id'] = claim_id
log.info("lbry://%s complies with %s, claimid: %s",
name,
m.meta_version if force_good_metadata else "not checked",
claim_id)
return result
if 'error' in result:
log.warning("Got an error looking up a name: %s", result['error'])
return Failure(UnknownNameError(name))
_check_result_fields(result)
txid = result['txid']
if force_good_metadata:
try:
metadata = Metadata(json.loads(result['value']))
except (ValueError, TypeError):
return Failure(InvalidStreamInfoError(name))
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
d.addCallback(lambda _: self.get_claimid(name, txid))
else:
metadata = result['value']
d = self.get_claimid(name, txid)
d.addCallback(lambda claim_id: _build_response(metadata, result, claim_id))
return d
def get_claimid(self, name, txid):
def _get_id_for_return(claim_id):
if claim_id:
return defer.succeed(claim_id)
else:
d = self.get_claims_from_tx(txid)
d.addCallback(lambda claims: next(c['claimId'] for c in claims if c['name'] == name))
d.addCallback(lambda cid: self._update_claimid(cid, name, txid))
return d
d = self._get_claimid_for_tx(name, txid)
d.addCallback(_get_id_for_return)
return d
def get_claim_info(self, name, force_good_metadata=True, is_mine=False):
def _filter_my_claims(claim):
d = self.get_name_claims()
d.addCallback(lambda my_claims: claim if claim['txid'] in [c['txid'] for c in my_claims] else False)
return d
d = self._get_value_for_name(name)
d.addCallback(lambda r: self._get_claim_info(r, name, force_good_metadata))
d.addErrback(lambda _: False)
if is_mine:
d.addCallback(lambda claim: _filter_my_claims(claim) if claim is not False else False)
return d
def update_metadata(self, new_metadata, old_metadata):
meta_for_return = old_metadata if isinstance(old_metadata, dict) else {}
for k in new_metadata:
meta_for_return[k] = new_metadata[k]
return Metadata(meta_for_return)
def claim_name(self, name, bid, m):
def _save_metadata(txid):
log.info("Saving metadata for claim %s" % txid)
d = self._save_name_metadata(name, txid, metadata['sources']['lbry_sd_hash'])
d.addCallback(lambda _: txid)
return d
metadata = Metadata(m)
d = self.get_claim_info(name, force_good_metadata=False, is_mine=True)
d.addCallback(lambda r: self.update_name(
name,
r['txid'],
json.dumps(self.update_metadata(metadata, r['value'])),
bid
)
if r else self._send_name_claim(
name,
json.dumps(metadata),
bid
)
)
d.addCallback(_save_metadata)
return d
def abandon_name(self, txid):
d1 = self.get_new_address()
d2 = self.get_claims_from_tx(txid)
def get_txout_of_claim(claims):
for claim in claims:
if 'name' in claim and 'nOut' in claim:
return claim['nOut']
return defer.fail(ValueError("No claims in tx"))
def get_value_of_txout(nOut):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
d.addCallback(lambda tx: tx['vout'][nOut]['value'])
return d
d2.addCallback(get_txout_of_claim)
d2.addCallback(get_value_of_txout)
dl = defer.DeferredList([d1, d2], consumeErrors=True)
def abandon(results):
if results[0][0] and results[1][0]:
address = results[0][1]
amount = float(results[1][1])
return self._send_abandon(txid, address, amount)
elif results[0][0] is False:
return defer.fail(Failure(ValueError("Couldn't get a new address")))
else:
return results[1][1]
dl.addCallback(abandon)
return dl
def get_tx(self, txid):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
return d
def update_name(self, name, txid, value, amount):
d = self._update_name(name, txid, value, amount)
return d
def get_name_and_validity_for_sd_hash(self, sd_hash):
d = self._get_claim_metadata_for_sd_hash(sd_hash)
d.addCallback(lambda name_txid: self._get_status_of_claim(name_txid[1], name_txid[0], sd_hash) if name_txid is not None else None)
return d
def get_available_balance(self):
return float(self.wallet_balance - self.total_reserved_points)
def is_first_run(self):
if self._first_run == self._FIRST_RUN_UNKNOWN:
d = self._check_first_run()
def set_first_run(is_first):
self._first_run = self._FIRST_RUN_YES if is_first else self._FIRST_RUN_NO
d.addCallback(set_first_run)
else:
d = defer.succeed(self._FIRST_RUN_YES if self._first_run else self._FIRST_RUN_NO)
d.addCallback(lambda _: self._first_run == self._FIRST_RUN_YES)
return d
def _get_status_of_claim(self, txid, name, sd_hash):
d = self.get_claims_from_tx(txid)
def get_status(claims):
if claims is None:
claims = []
for claim in claims:
if 'in claim trie' in claim:
if 'name' in claim and str(claim['name']) == name and 'value' in claim:
try:
value_dict = json.loads(claim['value'])
except (ValueError, TypeError):
return None
claim_sd_hash = None
if 'stream_hash' in value_dict:
claim_sd_hash = str(value_dict['stream_hash'])
if 'sources' in value_dict and 'lbrynet_sd_hash' in value_dict['sources']:
claim_sd_hash = str(value_dict['sources']['lbry_sd_hash'])
if claim_sd_hash is not None and claim_sd_hash == sd_hash:
if 'is controlling' in claim and claim['is controlling']:
return name, "valid"
if claim['in claim trie']:
return name, "invalid"
if 'in queue' in claim and claim['in queue']:
return name, "pending"
return name, "unconfirmed"
return None
d.addCallback(get_status)
return d
def _check_expected_balances(self):
now = datetime.datetime.now()
balances_to_check = []
try:
while self.expected_balance_at_time[0][3] < now:
balances_to_check.append(self.expected_balance_at_time.popleft())
except IndexError:
pass
ds = []
for balance_to_check in balances_to_check:
log.info("Checking balance of address %s", str(balance_to_check[1]))
d = self._get_balance_for_address(balance_to_check[1])
d.addCallback(lambda bal: bal >= balance_to_check[2])
ds.append(d)
dl = defer.DeferredList(ds)
def handle_checks(results):
from future_builtins import zip
for balance, (success, result) in zip(balances_to_check, results):
peer = balance[0]
if success is True:
if result is False:
if balance[4] <= 1: # first or second strike, give them another chance
new_expected_balance = (balance[0],
balance[1],
balance[2],
datetime.datetime.now() + self.max_expected_payment_time,
balance[4] + 1,
balance[5])
self.expected_balance_at_time.append(new_expected_balance)
peer.update_score(-5.0)
else:
peer.update_score(-50.0)
else:
if balance[4] == 0:
peer.update_score(balance[5])
peer.update_stats('points_received', balance[5])
else:
log.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
"expected balance: %s, expected time: %s, count: %s, error: %s",
str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
str(balance[4]), str(result.getErrorMessage()))
dl.addCallback(handle_checks)
return dl
def _open_db(self):
self.db = adbapi.ConnectionPool('sqlite3', os.path.join(self.db_dir, "blockchainname.db"),
check_same_thread=False)
def create_tables(transaction):
transaction.execute("create table if not exists name_metadata (" +
" name text, " +
" txid text, " +
" sd_hash text)")
transaction.execute("create table if not exists claim_ids (" +
" claimId text, " +
" name text, " +
" txid text)")
return self.db.runInteraction(create_tables)
def _save_name_metadata(self, name, txid, sd_hash):
d = self.db.runQuery("delete from name_metadata where name=? and txid=? and sd_hash=?", (name, txid, sd_hash))
d.addCallback(lambda _: self.db.runQuery("insert into name_metadata values (?, ?, ?)", (name, txid, sd_hash)))
return d
def _get_claim_metadata_for_sd_hash(self, sd_hash):
d = self.db.runQuery("select name, txid from name_metadata where sd_hash=?", (sd_hash,))
d.addCallback(lambda r: r[0] if r else None)
return d
def _update_claimid(self, claim_id, name, txid):
d = self.db.runQuery("delete from claim_ids where claimId=? and name=? and txid=?", (claim_id, name, txid))
d.addCallback(lambda r: self.db.runQuery("insert into claim_ids values (?, ?, ?)", (claim_id, name, txid)))
d.addCallback(lambda _: claim_id)
return d
def _get_claimid_for_tx(self, name, txid):
d = self.db.runQuery("select claimId from claim_ids where name=? and txid=?", (name, txid))
d.addCallback(lambda r: r[0][0] if r else None)
return d
######### Must be overridden #########
def get_balance(self):
return defer.fail(NotImplementedError())
def get_new_address(self):
return defer.fail(NotImplementedError())
def get_block(self, blockhash):
return defer.fail(NotImplementedError())
def get_most_recent_blocktime(self):
return defer.fail(NotImplementedError())
def get_best_blockhash(self):
return defer.fail(NotImplementedError())
def get_name_claims(self):
return defer.fail(NotImplementedError())
def _check_first_run(self):
return defer.fail(NotImplementedError())
def _get_raw_tx(self, txid):
return defer.fail(NotImplementedError())
def _send_name_claim(self, name, val, amount):
return defer.fail(NotImplementedError())
def _get_decoded_tx(self, raw_tx):
return defer.fail(NotImplementedError())
def _send_abandon(self, txid, address, amount):
return defer.fail(NotImplementedError())
def _update_name(self, name, txid, value, amount):
return defer.fail(NotImplementedError())
def _do_send_many(self, payments_to_send):
return defer.fail(NotImplementedError())
def _get_value_for_name(self, name):
return defer.fail(NotImplementedError())
def get_claims_from_tx(self, txid):
return defer.fail(NotImplementedError())
def _get_balance_for_address(self, address):
return defer.fail(NotImplementedError())
def _start(self):
pass
def _stop(self):
pass
class LBRYcrdWallet(LBRYWallet):
def __init__(self, db_dir, wallet_dir=None, wallet_conf=None, lbrycrdd_path=None):
LBRYWallet.__init__(self, db_dir)
self.started_lbrycrdd = False
self.wallet_dir = wallet_dir
self.wallet_conf = wallet_conf
self.lbrycrdd = None
self.lbrycrdd_path = lbrycrdd_path
settings = self._get_rpc_conf()
rpc_user = settings["username"]
rpc_pass = settings["password"]
rpc_port = settings["rpc_port"]
rpc_url = "127.0.0.1"
self.rpc_conn_string = "http://%s:%s@%s:%s" % (rpc_user, rpc_pass, rpc_url, str(rpc_port))
def _start(self):
return threads.deferToThread(self._make_connection)
def _stop(self):
if self.lbrycrdd_path is not None:
return self._stop_daemon()
def _make_connection(self):
alert.info("Connecting to lbrycrdd...")
if self.lbrycrdd_path is not None:
self._start_daemon()
self._get_info_rpc()
log.info("Connected!")
alert.info("Connected to lbrycrdd.")
def _get_rpc_conf(self):
settings = {"username": "rpcuser",
"password": "rpcpassword",
"rpc_port": 9245}
if self.wallet_conf and os.path.exists(self.wallet_conf):
conf = open(self.wallet_conf)
for l in conf:
if l.startswith("rpcuser="):
settings["username"] = l[8:].rstrip('\n')
if l.startswith("rpcpassword="):
settings["password"] = l[12:].rstrip('\n')
if l.startswith("rpcport="):
settings["rpc_port"] = int(l[8:].rstrip('\n'))
return settings
def _check_first_run(self):
d = self.get_balance()
d.addCallback(lambda bal: threads.deferToThread(self._get_num_addresses_rpc) if bal == 0 else 2)
d.addCallback(lambda num_addresses: True if num_addresses <= 1 else False)
return d
def get_new_address(self):
return threads.deferToThread(self._get_new_address_rpc)
def get_balance(self):
return threads.deferToThread(self._get_wallet_balance_rpc)
def get_most_recent_blocktime(self):
d = threads.deferToThread(self._get_best_blockhash_rpc)
d.addCallback(lambda blockhash: threads.deferToThread(self._get_block_rpc, blockhash))
d.addCallback(
lambda block: block['time'] if 'time' in block else Failure(ValueError("Could not get a block time")))
return d
def get_name_claims(self):
return threads.deferToThread(self._get_name_claims_rpc)
def get_block(self, blockhash):
return threads.deferToThread(self._get_block_rpc, blockhash)
def get_best_blockhash(self):
d = threads.deferToThread(self._get_blockchain_info_rpc)
d.addCallback(lambda blockchain_info: blockchain_info['bestblockhash'])
return d
def get_nametrie(self):
return threads.deferToThread(self._get_nametrie_rpc)
def start_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, True) if not status
else "Miner was already running")
return d
def stop_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, False) if status
else "Miner wasn't running")
return d
def get_miner_status(self):
return threads.deferToThread(self._get_gen_status_rpc)
def _get_balance_for_address(self, address):
return threads.deferToThread(self._get_balance_for_address_rpc, address)
def _do_send_many(self, payments_to_send):
outputs = {address: float(points) for address, points in payments_to_send.iteritems()}
return threads.deferToThread(self._do_send_many_rpc, outputs)
def _send_name_claim(self, name, value, amount):
return threads.deferToThread(self._send_name_claim_rpc, name, value, amount)
def _get_raw_tx(self, txid):
return threads.deferToThread(self._get_raw_tx_rpc, txid)
def _get_decoded_tx(self, raw_tx):
return threads.deferToThread(self._get_decoded_tx_rpc, raw_tx)
def _send_abandon(self, txid, address, amount):
return threads.deferToThread(self._send_abandon_rpc, txid, address, amount)
def _update_name(self, name, txid, value, amount):
return threads.deferToThread(self._update_name_rpc, txid, value, amount)
def get_claims_from_tx(self, txid):
return threads.deferToThread(self._get_claims_from_tx_rpc, txid)
def _get_value_for_name(self, name):
return threads.deferToThread(self._get_value_for_name_rpc, name)
def _get_rpc_conn(self):
return AuthServiceProxy(self.rpc_conn_string)
def _start_daemon(self):
tries = 0
try:
rpc_conn = self._get_rpc_conn()
try:
rpc_conn.getinfo()
except ValueError:
log.exception('Failed to get rpc info. Rethrowing with a hopefully more useful error message')
raise Exception('Failed to get rpc info from lbrycrdd. Try restarting lbrycrdd')
log.info("lbrycrdd was already running when LBRYcrdWallet was started.")
return
except (socket.error, JSONRPCException):
tries += 1
log.info("lbrcyrdd was not running when LBRYcrdWallet was started. Attempting to start it.")
try:
if os.name == "nt":
si = subprocess.STARTUPINFO
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf], startupinfo=si)
else:
if sys.platform == 'darwin':
os.chdir("/Applications/LBRY.app/Contents/Resources")
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf])
self.started_lbrycrdd = True
except OSError:
import traceback
log.error("Couldn't launch lbrycrdd at path %s: %s", self.lbrycrdd_path, traceback.format_exc())
raise ValueError("Couldn't launch lbrycrdd. Tried %s" % self.lbrycrdd_path)
while tries < 6:
try:
rpc_conn = self._get_rpc_conn()
rpc_conn.getinfo()
break
except (socket.error, JSONRPCException):
tries += 1
log.warning("Failed to connect to lbrycrdd.")
if tries < 6:
time.sleep(2 ** tries)
log.warning("Trying again in %d seconds", 2 ** tries)
else:
log.warning("Giving up.")
else:
self.lbrycrdd.terminate()
raise ValueError("Couldn't open lbrycrdd")
def _stop_daemon(self):
if self.lbrycrdd is not None and self.started_lbrycrdd is True:
alert.info("Stopping lbrycrdd...")
d = threads.deferToThread(self._stop_rpc)
d.addCallback(lambda _: alert.info("Stopped lbrycrdd."))
return d
return defer.succeed(True)
@_catch_connection_error
def _get_balance_for_address_rpc(self, address):
rpc_conn = self._get_rpc_conn()
balance = rpc_conn.getreceivedbyaddress(address)
log.debug("received balance for %s: %s", str(address), str(balance))
return balance
@_catch_connection_error
def _do_send_many_rpc(self, payments):
rpc_conn = self._get_rpc_conn()
return rpc_conn.sendmany("", payments)
@_catch_connection_error
def _get_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getinfo()
@_catch_connection_error
def _get_name_claims_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.listnameclaims()
@_catch_connection_error
def _get_gen_status_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getgenerate()
@_catch_connection_error
def _set_gen_status_rpc(self, b):
if b:
log.info("Starting miner")
else:
log.info("Stopping miner")
rpc_conn = self._get_rpc_conn()
return rpc_conn.setgenerate(b)
@_catch_connection_error
def _get_raw_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getrawtransaction(txid)
@_catch_connection_error
def _get_decoded_tx_rpc(self, raw):
rpc_conn = self._get_rpc_conn()
return rpc_conn.decoderawtransaction(raw)
@_catch_connection_error
def _send_abandon_rpc(self, txid, address, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.abandonclaim(txid, address, amount)
@_catch_connection_error
def _get_blockchain_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblockchaininfo()
@_catch_connection_error
def _get_block_rpc(self, blockhash):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblock(blockhash)
@_catch_connection_error
def _get_claims_from_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsfortx(txid)
@_catch_connection_error
def _get_nametrie_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimtrie()
@_catch_connection_error
def _get_wallet_balance_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbalance("")
@_catch_connection_error
def _get_new_address_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getnewaddress()
@_catch_connection_error
def _get_value_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getvalueforname(name)
@_catch_connection_error
def _update_name_rpc(self, txid, value, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.updateclaim(txid, value, amount)
@_catch_connection_error
def _send_name_claim_rpc(self, name, value, amount):
rpc_conn = self._get_rpc_conn()
try:
return str(rpc_conn.claimname(name, value, amount))
except JSONRPCException as e:
if 'message' in e.error and e.error['message'] == "Insufficient funds":
raise InsufficientFundsError()
elif 'message' in e.error:
raise ValueError(e.error['message'])
@_catch_connection_error
def _get_num_addresses_rpc(self):
rpc_conn = self._get_rpc_conn()
return len(rpc_conn.getaddressesbyaccount(""))
@_catch_connection_error
def _get_best_blockhash_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbestblockhash()
@_catch_connection_error
def _stop_rpc(self):
# check if our lbrycrdd is actually running, or if we connected to one that was already
# running and ours failed to start
if self.lbrycrdd.poll() is None:
rpc_conn = self._get_rpc_conn()
rpc_conn.stop()
self.lbrycrdd.wait()
class LBRYumWallet(LBRYWallet):
def __init__(self, db_dir):
LBRYWallet.__init__(self, db_dir)
self.config = None
self.network = None
self.wallet = None
self.cmd_runner = None
self.first_run = False
self.printed_retrieving_headers = False
self._start_check = None
self._catch_up_check = None
self._caught_up_counter = 0
self._lag_counter = 0
self.blocks_behind_alert = 0
self.catchup_progress = 0
self.max_behind = 0
def _start(self):
network_start_d = defer.Deferred()
def setup_network():
self.config = SimpleConfig({'auto_connect': True})
self.network = Network(self.config)
alert.info("Loading the wallet...")
return defer.succeed(self.network.start())
d = setup_network()
def check_started():
if self.network.is_connecting():
if not self.printed_retrieving_headers and self.network.blockchain.retrieving_headers:
alert.info("Running the wallet for the first time...this may take a moment.")
self.printed_retrieving_headers = True
return False
self._start_check.stop()
self._start_check = None
if self.network.is_connected():
network_start_d.callback(True)
else:
network_start_d.errback(ValueError("Failed to connect to network."))
self._start_check = task.LoopingCall(check_started)
d.addCallback(lambda _: self._start_check.start(.1))
d.addCallback(lambda _: network_start_d)
d.addCallback(lambda _: self._load_wallet())
d.addCallback(lambda _: self._get_cmd_runner())
return d
def _stop(self):
if self._start_check is not None:
self._start_check.stop()
self._start_check = None
if self._catch_up_check is not None:
self._catch_up_check.stop()
self._catch_up_check = None
d = defer.Deferred()
def check_stopped():
if self.network:
if self.network.is_connected():
return False
stop_check.stop()
self.network = None
d.callback(True)
if self.network:
self.network.stop()
stop_check = task.LoopingCall(check_stopped)
stop_check.start(.1)
return d
def _load_wallet(self):
def get_wallet():
path = self.config.get_wallet_path()
storage = WalletStorage(path)
wallet = Wallet(storage)
if not storage.file_exists:
self.first_run = True
seed = wallet.make_seed()
wallet.add_seed(seed, None)
wallet.create_master_keys(None)
wallet.create_main_account()
wallet.synchronize()
self.wallet = wallet
blockchain_caught_d = defer.Deferred()
def check_caught_up():
local_height = self.network.get_catchup_progress()
remote_height = self.network.get_server_height()
if remote_height != 0 and remote_height - local_height <= 5:
msg = ""
if self._caught_up_counter != 0:
msg += "All caught up. "
msg += "Wallet loaded."
alert.info(msg)
self._catch_up_check.stop()
self._catch_up_check = None
blockchain_caught_d.callback(True)
elif remote_height != 0:
past_blocks_behind = self.blocks_behind_alert
self.blocks_behind_alert = remote_height - local_height
if self.blocks_behind_alert < past_blocks_behind:
self._lag_counter = 0
self.is_lagging = False
else:
self._lag_counter += 1
if self._lag_counter >= 900:
self.is_lagging = True
if self.blocks_behind_alert > self.max_behind:
self.max_behind = self.blocks_behind_alert
self.catchup_progress = int(100 * (self.blocks_behind_alert / (5 + self.max_behind)))
if self._caught_up_counter == 0:
alert.info('Catching up with the blockchain...showing blocks left...')
if self._caught_up_counter % 30 == 0:
alert.info('%d...', (remote_height - local_height))
self._caught_up_counter += 1
self._catch_up_check = task.LoopingCall(check_caught_up)
d = threads.deferToThread(get_wallet)
d.addCallback(self._save_wallet)
d.addCallback(lambda _: self.wallet.start_threads(self.network))
d.addCallback(lambda _: self._catch_up_check.start(.1))
d.addCallback(lambda _: blockchain_caught_d)
return d
def _get_cmd_runner(self):
self.cmd_runner = Commands(self.config, self.wallet, self.network)
def get_balance(self):
cmd = known_commands['getbalance']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func)
d.addCallback(lambda result: result['unmatured'] if 'unmatured' in result else result['confirmed'])
d.addCallback(Decimal)
return d
def _update_name(self, name, txid, value, amount):
serialized_metadata = Metadata(value).serialize()
d = self.get_claimid(name, txid)
d.addCallback(lambda claim_id: self._send_claim_update(txid, amount, name, claim_id, serialized_metadata))
return d
def get_new_address(self):
d = threads.deferToThread(self.wallet.create_new_address)
d.addCallback(self._save_wallet)
return d
def get_block(self, blockhash):
cmd = known_commands['getblock']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, blockhash)
def get_most_recent_blocktime(self):
header = self.network.get_header(self.network.get_local_height())
return defer.succeed(header['timestamp'])
def get_best_blockhash(self):
height = self.network.get_local_height()
d = threads.deferToThread(self.network.blockchain.read_header, height)
d.addCallback(lambda header: self.network.blockchain.hash_header(header))
return d
def get_name_claims(self):
cmd = known_commands['getnameclaims']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _check_first_run(self):
return defer.succeed(self.first_run)
def _get_raw_tx(self, txid):
cmd = known_commands['gettransaction']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _send_name_claim(self, name, val, amount):
def send_claim(address):
cmd = known_commands['claimname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, address, amount, name, val)
d = self.get_new_address()
d.addCallback(send_claim)
d.addCallback(self._broadcast_transaction)
return d
def _send_claim_update(self, txid, amount, name, claim_id, val):
def send_claim(address):
cmd = known_commands['updateclaim']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid, address, amount, name, claim_id, val)
log.info("Update lbry://%s %s %f %s %s" % (name, txid, amount, claim_id, val))
d = self.get_new_address()
d.addCallback(send_claim)
d.addCallback(self._broadcast_transaction)
def _get_decoded_tx(self, raw_tx):
tx = Transaction(raw_tx)
decoded_tx = {}
decoded_tx['vout'] = []
for output in tx.outputs():
out = {}
out['value'] = Decimal(output[2]) / Decimal(COIN)
decoded_tx['vout'].append(out)
return decoded_tx
def _send_abandon(self, txid, address, amount):
log.info("Abandon %s %s %f" % (txid, address, amount))
cmd = known_commands['abandonclaim']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, txid, address, amount)
d.addCallback(self._broadcast_transaction)
return d
def _broadcast_transaction(self, raw_tx):
log.info("Broadcast: %s" % str(raw_tx))
cmd = known_commands['broadcast']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, raw_tx)
d.addCallback(self._save_wallet)
return d
def _do_send_many(self, payments_to_send):
log.warning("Doing send many. payments to send: %s", str(payments_to_send))
cmd = known_commands['paytomanyandsend']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, payments_to_send.iteritems())
def _get_value_for_name(self, name):
cmd = known_commands['getvalueforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def get_claims_from_tx(self, txid):
cmd = known_commands['getclaimsfromtx']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _get_balance_for_address(self, address):
return defer.succeed(Decimal(self.wallet.get_addr_received(address))/COIN)
def get_nametrie(self):
cmd = known_commands['getclaimtrie']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def get_history(self):
cmd = known_commands['history']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def get_tx_json(self, txid):
def _decode(raw_tx):
tx = Transaction(raw_tx).deserialize()
decoded_tx = {}
for txkey in tx.keys():
if isinstance(tx[txkey], list):
decoded_tx[txkey] = []
for i in tx[txkey]:
tmp = {}
for k in i.keys():
if isinstance(i[k], Decimal):
tmp[k] = float(i[k] / 1e8)
else:
tmp[k] = i[k]
decoded_tx[txkey].append(tmp)
else:
decoded_tx[txkey] = tx[txkey]
return decoded_tx
d = self._get_raw_tx(txid)
d.addCallback(_decode)
return d
def get_pub_keys(self, wallet):
cmd = known_commands['getpubkeys']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, wallet)
def _save_wallet(self, val):
d = threads.deferToThread(self.wallet.storage.write)
d.addCallback(lambda _: val)
return d
class LBRYcrdAddressRequester(object):
implements([IRequestCreator])
def __init__(self, wallet):
self.wallet = wallet
self._protocols = []
######### IRequestCreator #########
def send_next_request(self, peer, protocol):
if not protocol in self._protocols:
r = ClientRequest({'lbrycrd_address': True}, 'lbrycrd_address')
d = protocol.add_request(r)
d.addCallback(self._handle_address_response, peer, r, protocol)
d.addErrback(self._request_failed, peer)
self._protocols.append(protocol)
return defer.succeed(True)
else:
return defer.succeed(False)
######### internal calls #########
def _handle_address_response(self, response_dict, peer, request, protocol):
assert request.response_identifier in response_dict, \
"Expected %s in dict but did not get it" % request.response_identifier
assert protocol in self._protocols, "Responding protocol is not in our list of protocols"
address = response_dict[request.response_identifier]
self.wallet.update_peer_address(peer, address)
def _request_failed(self, err, peer):
if not err.check(RequestCanceledError):
log.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
err.getErrorMessage(), str(peer))
return err
class LBRYcrdAddressQueryHandlerFactory(object):
implements(IQueryHandlerFactory)
def __init__(self, wallet):
self.wallet = wallet
######### IQueryHandlerFactory #########
def build_query_handler(self):
q_h = LBRYcrdAddressQueryHandler(self.wallet)
return q_h
def get_primary_query_identifier(self):
return 'lbrycrd_address'
def get_description(self):
return "LBRYcrd Address - an address for receiving payments via LBRYcrd"
class LBRYcrdAddressQueryHandler(object):
implements(IQueryHandler)
def __init__(self, wallet):
self.wallet = wallet
self.query_identifiers = ['lbrycrd_address']
self.address = None
self.peer = None
######### IQueryHandler #########
def register_with_request_handler(self, request_handler, peer):
self.peer = peer
request_handler.register_query_handler(self, self.query_identifiers)
def handle_queries(self, queries):
def create_response(address):
self.address = address
fields = {'lbrycrd_address': address}
return fields
if self.query_identifiers[0] in queries:
d = self.wallet.get_new_address_for_peer(self.peer)
d.addCallback(create_response)
return d
if self.address is None:
log.warning("Expected a request for an address, but did not receive one")
return defer.fail(Failure(ValueError("Expected but did not receive an address request")))
else:
return defer.succeed({})
add get_claims_for_name
import sys
import datetime
import logging
import json
import subprocess
import socket
import time
import os
import requests
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from twisted.internet import threads, reactor, defer, task
from twisted.python.failure import Failure
from twisted.enterprise import adbapi
from collections import defaultdict, deque
from zope.interface import implements
from decimal import Decimal
from googlefinance import getQuotes
from lbryum import SimpleConfig, Network
from lbryum.lbrycrd import COIN, TYPE_ADDRESS
from lbryum.wallet import WalletStorage, Wallet
from lbryum.commands import known_commands, Commands
from lbryum.transaction import Transaction
from lbrynet.interfaces import IRequestCreator, IQueryHandlerFactory, IQueryHandler, ILBRYWallet
from lbrynet.core.client.ClientRequest import ClientRequest
from lbrynet.core.Error import UnknownNameError, InvalidStreamInfoError, RequestCanceledError
from lbrynet.core.Error import InsufficientFundsError
from lbrynet.core.sqlite_helpers import rerun_if_locked
from lbrynet.conf import SOURCE_TYPES
from lbrynet.core.LBRYMetadata import Metadata
log = logging.getLogger(__name__)
alert = logging.getLogger("lbryalert." + __name__)
class ReservedPoints(object):
def __init__(self, identifier, amount):
self.identifier = identifier
self.amount = amount
def _catch_connection_error(f):
def w(*args):
try:
return f(*args)
except socket.error:
raise ValueError("Unable to connect to an lbrycrd server. Make sure an lbrycrd server " +
"is running and that this application can connect to it.")
return w
class LBRYWallet(object):
"""This class implements the LBRYWallet interface for the LBRYcrd payment system"""
implements(ILBRYWallet)
_FIRST_RUN_UNKNOWN = 0
_FIRST_RUN_YES = 1
_FIRST_RUN_NO = 2
def __init__(self, db_dir):
self.db_dir = db_dir
self.db = None
self.next_manage_call = None
self.wallet_balance = Decimal(0.0)
self.total_reserved_points = Decimal(0.0)
self.peer_addresses = {} # {Peer: string}
self.queued_payments = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.expected_balances = defaultdict(Decimal) # {address(string): amount(Decimal)}
self.current_address_given_to_peer = {} # {Peer: address(string)}
self.expected_balance_at_time = deque() # (Peer, address(string), amount(Decimal), time(datetime), count(int),
# incremental_amount(float))
self.max_expected_payment_time = datetime.timedelta(minutes=3)
self.stopped = True
self.is_lagging = None
self.manage_running = False
self._manage_count = 0
self._balance_refresh_time = 3
self._batch_count = 20
self._first_run = self._FIRST_RUN_UNKNOWN
def start(self):
def start_manage():
self.stopped = False
self.manage()
return True
d = self._open_db()
d.addCallback(lambda _: self._start())
d.addCallback(lambda _: start_manage())
return d
@staticmethod
def log_stop_error(err):
log.error("An error occurred stopping the wallet: %s", err.getTraceback())
def stop(self):
self.stopped = True
# If self.next_manage_call is None, then manage is currently running or else
# start has not been called, so set stopped and do nothing else.
if self.next_manage_call is not None:
self.next_manage_call.cancel()
self.next_manage_call = None
d = self.manage(do_full=True)
d.addErrback(self.log_stop_error)
d.addCallback(lambda _: self._stop())
d.addErrback(self.log_stop_error)
return d
def manage(self, do_full=False):
self.next_manage_call = None
have_set_manage_running = [False]
self._manage_count += 1
if self._manage_count % self._batch_count == 0:
self._manage_count = 0
do_full = True
def check_if_manage_running():
d = defer.Deferred()
def fire_if_not_running():
if self.manage_running is False:
self.manage_running = True
have_set_manage_running[0] = True
d.callback(True)
elif do_full is False:
d.callback(False)
else:
task.deferLater(reactor, 1, fire_if_not_running)
fire_if_not_running()
return d
d = check_if_manage_running()
def do_manage():
if do_full:
d = self._check_expected_balances()
d.addCallback(lambda _: self._send_payments())
else:
d = defer.succeed(True)
d.addCallback(lambda _: self.get_balance())
def set_wallet_balance(balance):
if self.wallet_balance != balance:
log.info("Got a new balance: %s", str(balance))
self.wallet_balance = balance
d.addCallback(set_wallet_balance)
return d
d.addCallback(lambda should_run: do_manage() if should_run else None)
def set_next_manage_call():
if not self.stopped:
self.next_manage_call = reactor.callLater(self._balance_refresh_time, self.manage)
d.addCallback(lambda _: set_next_manage_call())
def log_error(err):
log.error("Something went wrong during manage. Error message: %s", err.getErrorMessage())
return err
d.addErrback(log_error)
def set_manage_not_running(arg):
if have_set_manage_running[0] is True:
self.manage_running = False
return arg
d.addBoth(set_manage_not_running)
return d
def get_info_exchanger(self):
return LBRYcrdAddressRequester(self)
def get_wallet_info_query_handler_factory(self):
return LBRYcrdAddressQueryHandlerFactory(self)
def reserve_points(self, identifier, amount):
"""
Ensure a certain amount of points are available to be sent as payment, before the service is rendered
@param identifier: The peer to which the payment will ultimately be sent
@param amount: The amount of points to reserve
@return: A ReservedPoints object which is given to send_points once the service has been rendered
"""
rounded_amount = Decimal(str(round(amount, 8)))
#if peer in self.peer_addresses:
if self.wallet_balance >= self.total_reserved_points + rounded_amount:
self.total_reserved_points += rounded_amount
return ReservedPoints(identifier, rounded_amount)
return None
def cancel_point_reservation(self, reserved_points):
"""
Return all of the points that were reserved previously for some ReservedPoints object
@param reserved_points: ReservedPoints previously returned by reserve_points
@return: None
"""
self.total_reserved_points -= reserved_points.amount
def send_points(self, reserved_points, amount):
"""
Schedule a payment to be sent to a peer
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send, must be less than or equal to the
amount reserved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
peer = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
assert(peer in self.peer_addresses)
self.queued_payments[self.peer_addresses[peer]] += rounded_amount
# make any unused points available
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("ordering that %s points be sent to %s", str(rounded_amount),
str(self.peer_addresses[peer]))
peer.update_stats('points_sent', amount)
return defer.succeed(True)
def send_points_to_address(self, reserved_points, amount):
"""
Schedule a payment to be sent to an address
@param reserved_points: ReservedPoints object previously returned by reserve_points
@param amount: amount of points to actually send. must be less than or equal to the
amount reselved in reserved_points
@return: Deferred which fires when the payment has been scheduled
"""
rounded_amount = Decimal(str(round(amount, 8)))
address = reserved_points.identifier
assert(rounded_amount <= reserved_points.amount)
self.queued_payments[address] += rounded_amount
self.total_reserved_points -= (reserved_points.amount - rounded_amount)
log.info("Ordering that %s points be sent to %s", str(rounded_amount),
str(address))
return defer.succeed(True)
def add_expected_payment(self, peer, amount):
"""Increase the number of points expected to be paid by a peer"""
rounded_amount = Decimal(str(round(amount, 8)))
assert(peer in self.current_address_given_to_peer)
address = self.current_address_given_to_peer[peer]
log.info("expecting a payment at address %s in the amount of %s", str(address), str(rounded_amount))
self.expected_balances[address] += rounded_amount
expected_balance = self.expected_balances[address]
expected_time = datetime.datetime.now() + self.max_expected_payment_time
self.expected_balance_at_time.append((peer, address, expected_balance, expected_time, 0, amount))
peer.update_stats('expected_points', amount)
def update_peer_address(self, peer, address):
self.peer_addresses[peer] = address
def get_new_address_for_peer(self, peer):
def set_address_for_peer(address):
self.current_address_given_to_peer[peer] = address
return address
d = self.get_new_address()
d.addCallback(set_address_for_peer)
return d
def _send_payments(self):
payments_to_send = {}
for address, points in self.queued_payments.items():
log.info("Should be sending %s points to %s", str(points), str(address))
payments_to_send[address] = points
self.total_reserved_points -= points
self.wallet_balance -= points
del self.queued_payments[address]
if payments_to_send:
log.info("Creating a transaction with outputs %s", str(payments_to_send))
d = self._do_send_many(payments_to_send)
d.addCallback(lambda txid: log.debug("Sent transaction %s", txid))
return d
log.info("There were no payments to send")
return defer.succeed(True)
def get_stream_info_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def get_txid_for_name(self, name):
d = self._get_value_for_name(name)
d.addCallback(lambda r: None if 'txid' not in r else r['txid'])
return d
def get_stream_info_from_txid(self, name, txid):
d = self.get_claims_from_tx(txid)
def get_claim_for_name(claims):
for claim in claims:
if claim['name'] == name:
claim['txid'] = txid
return claim
return Failure(UnknownNameError(name))
d.addCallback(get_claim_for_name)
d.addCallback(self._get_stream_info_from_value, name)
return d
def _get_stream_info_from_value(self, result, name):
def _check_result_fields(r):
for k in ['value', 'txid', 'n', 'height', 'amount']:
assert k in r, "getvalueforname response missing field %s" % k
def _log_success(claim_id):
log.info("lbry://%s complies with %s, claimid: %s" % (name, metadata.meta_version, claim_id))
return defer.succeed(None)
if 'error' in result:
log.warning("Got an error looking up a name: %s", result['error'])
return Failure(UnknownNameError(name))
_check_result_fields(result)
try:
metadata = Metadata(json.loads(result['value']))
except (ValueError, TypeError):
return Failure(InvalidStreamInfoError(name))
txid = result['txid']
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
d.addCallback(lambda _: self.get_claimid(name, txid))
d.addCallback(lambda cid: _log_success(cid))
d.addCallback(lambda _: metadata)
return d
def _get_claim_info(self, result, name, force_good_metadata=True):
def _check_result_fields(r):
for k in ['value', 'txid', 'n', 'height', 'amount']:
assert k in r, "getvalueforname response missing field %s" % k
def _build_response(m, result, claim_id):
result['value'] = m
result['claim_id'] = claim_id
log.info("lbry://%s complies with %s, claimid: %s",
name,
m.meta_version if force_good_metadata else "not checked",
claim_id)
return result
if 'error' in result:
log.warning("Got an error looking up a name: %s", result['error'])
return Failure(UnknownNameError(name))
_check_result_fields(result)
txid = result['txid']
if force_good_metadata:
try:
metadata = Metadata(json.loads(result['value']))
except (ValueError, TypeError):
return Failure(InvalidStreamInfoError(name))
sd_hash = metadata['sources']['lbry_sd_hash']
d = self._save_name_metadata(name, txid, sd_hash)
d.addCallback(lambda _: self.get_claimid(name, txid))
else:
metadata = result['value']
d = self.get_claimid(name, txid)
d.addCallback(lambda claim_id: _build_response(metadata, result, claim_id))
return d
def get_claimid(self, name, txid):
def _get_id_for_return(claim_id):
if claim_id:
return defer.succeed(claim_id)
else:
d = self.get_claims_from_tx(txid)
d.addCallback(lambda claims: next(c['claimId'] for c in claims if c['name'] == name))
d.addCallback(lambda cid: self._update_claimid(cid, name, txid))
return d
d = self._get_claimid_for_tx(name, txid)
d.addCallback(_get_id_for_return)
return d
def get_claim_info(self, name, force_good_metadata=True, is_mine=False):
def _filter_my_claims(claim):
d = self.get_name_claims()
d.addCallback(lambda my_claims: claim if claim['txid'] in [c['txid'] for c in my_claims] else False)
return d
d = self._get_value_for_name(name)
d.addCallback(lambda r: self._get_claim_info(r, name, force_good_metadata))
d.addErrback(lambda _: False)
if is_mine:
d.addCallback(lambda claim: _filter_my_claims(claim) if claim is not False else False)
def get_claims_for_name(self, name):
d = self._get_claims_for_name(name)
return d
def update_metadata(self, new_metadata, old_metadata):
meta_for_return = old_metadata if isinstance(old_metadata, dict) else {}
for k in new_metadata:
meta_for_return[k] = new_metadata[k]
return Metadata(meta_for_return)
def claim_name(self, name, bid, m):
def _save_metadata(txid):
log.info("Saving metadata for claim %s" % txid)
d = self._save_name_metadata(name, txid, metadata['sources']['lbry_sd_hash'])
d.addCallback(lambda _: txid)
return d
metadata = Metadata(m)
d = self.get_claim_info(name, force_good_metadata=False, is_mine=True)
d.addCallback(lambda r: self.update_name(
name,
r['txid'],
json.dumps(self.update_metadata(metadata, r['value'])),
bid
)
if r else self._send_name_claim(
name,
json.dumps(metadata),
bid
)
)
d.addCallback(_save_metadata)
return d
def abandon_name(self, txid):
d1 = self.get_new_address()
d2 = self.get_claims_from_tx(txid)
def get_txout_of_claim(claims):
for claim in claims:
if 'name' in claim and 'nOut' in claim:
return claim['nOut']
return defer.fail(ValueError("No claims in tx"))
def get_value_of_txout(nOut):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
d.addCallback(lambda tx: tx['vout'][nOut]['value'])
return d
d2.addCallback(get_txout_of_claim)
d2.addCallback(get_value_of_txout)
dl = defer.DeferredList([d1, d2], consumeErrors=True)
def abandon(results):
if results[0][0] and results[1][0]:
address = results[0][1]
amount = float(results[1][1])
return self._send_abandon(txid, address, amount)
elif results[0][0] is False:
return defer.fail(Failure(ValueError("Couldn't get a new address")))
else:
return results[1][1]
dl.addCallback(abandon)
return dl
def get_tx(self, txid):
d = self._get_raw_tx(txid)
d.addCallback(self._get_decoded_tx)
return d
def update_name(self, name, txid, value, amount):
d = self._update_name(name, txid, value, amount)
return d
def get_name_and_validity_for_sd_hash(self, sd_hash):
d = self._get_claim_metadata_for_sd_hash(sd_hash)
d.addCallback(lambda name_txid: self._get_status_of_claim(name_txid[1], name_txid[0], sd_hash) if name_txid is not None else None)
return d
def get_available_balance(self):
return float(self.wallet_balance - self.total_reserved_points)
def is_first_run(self):
if self._first_run == self._FIRST_RUN_UNKNOWN:
d = self._check_first_run()
def set_first_run(is_first):
self._first_run = self._FIRST_RUN_YES if is_first else self._FIRST_RUN_NO
d.addCallback(set_first_run)
else:
d = defer.succeed(self._FIRST_RUN_YES if self._first_run else self._FIRST_RUN_NO)
d.addCallback(lambda _: self._first_run == self._FIRST_RUN_YES)
return d
def _get_status_of_claim(self, txid, name, sd_hash):
d = self.get_claims_from_tx(txid)
def get_status(claims):
if claims is None:
claims = []
for claim in claims:
if 'in claim trie' in claim:
if 'name' in claim and str(claim['name']) == name and 'value' in claim:
try:
value_dict = json.loads(claim['value'])
except (ValueError, TypeError):
return None
claim_sd_hash = None
if 'stream_hash' in value_dict:
claim_sd_hash = str(value_dict['stream_hash'])
if 'sources' in value_dict and 'lbrynet_sd_hash' in value_dict['sources']:
claim_sd_hash = str(value_dict['sources']['lbry_sd_hash'])
if claim_sd_hash is not None and claim_sd_hash == sd_hash:
if 'is controlling' in claim and claim['is controlling']:
return name, "valid"
if claim['in claim trie']:
return name, "invalid"
if 'in queue' in claim and claim['in queue']:
return name, "pending"
return name, "unconfirmed"
return None
d.addCallback(get_status)
return d
def _check_expected_balances(self):
now = datetime.datetime.now()
balances_to_check = []
try:
while self.expected_balance_at_time[0][3] < now:
balances_to_check.append(self.expected_balance_at_time.popleft())
except IndexError:
pass
ds = []
for balance_to_check in balances_to_check:
log.info("Checking balance of address %s", str(balance_to_check[1]))
d = self._get_balance_for_address(balance_to_check[1])
d.addCallback(lambda bal: bal >= balance_to_check[2])
ds.append(d)
dl = defer.DeferredList(ds)
def handle_checks(results):
from future_builtins import zip
for balance, (success, result) in zip(balances_to_check, results):
peer = balance[0]
if success is True:
if result is False:
if balance[4] <= 1: # first or second strike, give them another chance
new_expected_balance = (balance[0],
balance[1],
balance[2],
datetime.datetime.now() + self.max_expected_payment_time,
balance[4] + 1,
balance[5])
self.expected_balance_at_time.append(new_expected_balance)
peer.update_score(-5.0)
else:
peer.update_score(-50.0)
else:
if balance[4] == 0:
peer.update_score(balance[5])
peer.update_stats('points_received', balance[5])
else:
log.warning("Something went wrong checking a balance. Peer: %s, account: %s,"
"expected balance: %s, expected time: %s, count: %s, error: %s",
str(balance[0]), str(balance[1]), str(balance[2]), str(balance[3]),
str(balance[4]), str(result.getErrorMessage()))
dl.addCallback(handle_checks)
return dl
def _open_db(self):
self.db = adbapi.ConnectionPool('sqlite3', os.path.join(self.db_dir, "blockchainname.db"),
check_same_thread=False)
def create_tables(transaction):
transaction.execute("create table if not exists name_metadata (" +
" name text, " +
" txid text, " +
" sd_hash text)")
transaction.execute("create table if not exists claim_ids (" +
" claimId text, " +
" name text, " +
" txid text)")
return self.db.runInteraction(create_tables)
def _save_name_metadata(self, name, txid, sd_hash):
d = self.db.runQuery("delete from name_metadata where name=? and txid=? and sd_hash=?", (name, txid, sd_hash))
d.addCallback(lambda _: self.db.runQuery("insert into name_metadata values (?, ?, ?)", (name, txid, sd_hash)))
return d
def _get_claim_metadata_for_sd_hash(self, sd_hash):
d = self.db.runQuery("select name, txid from name_metadata where sd_hash=?", (sd_hash,))
d.addCallback(lambda r: r[0] if r else None)
return d
def _update_claimid(self, claim_id, name, txid):
d = self.db.runQuery("delete from claim_ids where claimId=? and name=? and txid=?", (claim_id, name, txid))
d.addCallback(lambda r: self.db.runQuery("insert into claim_ids values (?, ?, ?)", (claim_id, name, txid)))
d.addCallback(lambda _: claim_id)
return d
def _get_claimid_for_tx(self, name, txid):
d = self.db.runQuery("select claimId from claim_ids where name=? and txid=?", (name, txid))
d.addCallback(lambda r: r[0][0] if r else None)
return d
######### Must be overridden #########
def get_balance(self):
return defer.fail(NotImplementedError())
def get_new_address(self):
return defer.fail(NotImplementedError())
def get_block(self, blockhash):
return defer.fail(NotImplementedError())
def get_most_recent_blocktime(self):
return defer.fail(NotImplementedError())
def get_best_blockhash(self):
return defer.fail(NotImplementedError())
def get_name_claims(self):
return defer.fail(NotImplementedError())
def _get_claims_for_name(self, name):
return defer.fail(NotImplementedError())
def _check_first_run(self):
return defer.fail(NotImplementedError())
def _get_raw_tx(self, txid):
return defer.fail(NotImplementedError())
def _send_name_claim(self, name, val, amount):
return defer.fail(NotImplementedError())
def _get_decoded_tx(self, raw_tx):
return defer.fail(NotImplementedError())
def _send_abandon(self, txid, address, amount):
return defer.fail(NotImplementedError())
def _update_name(self, name, txid, value, amount):
return defer.fail(NotImplementedError())
def _do_send_many(self, payments_to_send):
return defer.fail(NotImplementedError())
def _get_value_for_name(self, name):
return defer.fail(NotImplementedError())
def get_claims_from_tx(self, txid):
return defer.fail(NotImplementedError())
def _get_balance_for_address(self, address):
return defer.fail(NotImplementedError())
def _start(self):
pass
def _stop(self):
pass
class LBRYcrdWallet(LBRYWallet):
def __init__(self, db_dir, wallet_dir=None, wallet_conf=None, lbrycrdd_path=None):
LBRYWallet.__init__(self, db_dir)
self.started_lbrycrdd = False
self.wallet_dir = wallet_dir
self.wallet_conf = wallet_conf
self.lbrycrdd = None
self.lbrycrdd_path = lbrycrdd_path
settings = self._get_rpc_conf()
rpc_user = settings["username"]
rpc_pass = settings["password"]
rpc_port = settings["rpc_port"]
rpc_url = "127.0.0.1"
self.rpc_conn_string = "http://%s:%s@%s:%s" % (rpc_user, rpc_pass, rpc_url, str(rpc_port))
def _start(self):
return threads.deferToThread(self._make_connection)
def _stop(self):
if self.lbrycrdd_path is not None:
return self._stop_daemon()
def _make_connection(self):
alert.info("Connecting to lbrycrdd...")
if self.lbrycrdd_path is not None:
self._start_daemon()
self._get_info_rpc()
log.info("Connected!")
alert.info("Connected to lbrycrdd.")
def _get_rpc_conf(self):
settings = {"username": "rpcuser",
"password": "rpcpassword",
"rpc_port": 9245}
if self.wallet_conf and os.path.exists(self.wallet_conf):
conf = open(self.wallet_conf)
for l in conf:
if l.startswith("rpcuser="):
settings["username"] = l[8:].rstrip('\n')
if l.startswith("rpcpassword="):
settings["password"] = l[12:].rstrip('\n')
if l.startswith("rpcport="):
settings["rpc_port"] = int(l[8:].rstrip('\n'))
return settings
def _check_first_run(self):
d = self.get_balance()
d.addCallback(lambda bal: threads.deferToThread(self._get_num_addresses_rpc) if bal == 0 else 2)
d.addCallback(lambda num_addresses: True if num_addresses <= 1 else False)
return d
def get_new_address(self):
return threads.deferToThread(self._get_new_address_rpc)
def get_balance(self):
return threads.deferToThread(self._get_wallet_balance_rpc)
def get_most_recent_blocktime(self):
d = threads.deferToThread(self._get_best_blockhash_rpc)
d.addCallback(lambda blockhash: threads.deferToThread(self._get_block_rpc, blockhash))
d.addCallback(
lambda block: block['time'] if 'time' in block else Failure(ValueError("Could not get a block time")))
return d
def get_name_claims(self):
return threads.deferToThread(self._get_name_claims_rpc)
def get_block(self, blockhash):
return threads.deferToThread(self._get_block_rpc, blockhash)
def get_best_blockhash(self):
d = threads.deferToThread(self._get_blockchain_info_rpc)
d.addCallback(lambda blockchain_info: blockchain_info['bestblockhash'])
return d
def get_nametrie(self):
return threads.deferToThread(self._get_nametrie_rpc)
def start_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, True) if not status
else "Miner was already running")
return d
def stop_miner(self):
d = threads.deferToThread(self._get_gen_status_rpc)
d.addCallback(lambda status: threads.deferToThread(self._set_gen_status_rpc, False) if status
else "Miner wasn't running")
return d
def get_miner_status(self):
return threads.deferToThread(self._get_gen_status_rpc)
def _get_balance_for_address(self, address):
return threads.deferToThread(self._get_balance_for_address_rpc, address)
def _do_send_many(self, payments_to_send):
outputs = {address: float(points) for address, points in payments_to_send.iteritems()}
return threads.deferToThread(self._do_send_many_rpc, outputs)
def _send_name_claim(self, name, value, amount):
return threads.deferToThread(self._send_name_claim_rpc, name, value, amount)
def _get_raw_tx(self, txid):
return threads.deferToThread(self._get_raw_tx_rpc, txid)
def _get_decoded_tx(self, raw_tx):
return threads.deferToThread(self._get_decoded_tx_rpc, raw_tx)
def _send_abandon(self, txid, address, amount):
return threads.deferToThread(self._send_abandon_rpc, txid, address, amount)
def _update_name(self, name, txid, value, amount):
return threads.deferToThread(self._update_name_rpc, txid, value, amount)
def _get_claims_for_name(self, name):
return threads.deferToThread(self._get_claims_for_name_rpc, name)
def get_claims_from_tx(self, txid):
return threads.deferToThread(self._get_claims_from_tx_rpc, txid)
def _get_value_for_name(self, name):
return threads.deferToThread(self._get_value_for_name_rpc, name)
def _get_rpc_conn(self):
return AuthServiceProxy(self.rpc_conn_string)
def _start_daemon(self):
tries = 0
try:
rpc_conn = self._get_rpc_conn()
try:
rpc_conn.getinfo()
except ValueError:
log.exception('Failed to get rpc info. Rethrowing with a hopefully more useful error message')
raise Exception('Failed to get rpc info from lbrycrdd. Try restarting lbrycrdd')
log.info("lbrycrdd was already running when LBRYcrdWallet was started.")
return
except (socket.error, JSONRPCException):
tries += 1
log.info("lbrcyrdd was not running when LBRYcrdWallet was started. Attempting to start it.")
try:
if os.name == "nt":
si = subprocess.STARTUPINFO
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = subprocess.SW_HIDE
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf], startupinfo=si)
else:
if sys.platform == 'darwin':
os.chdir("/Applications/LBRY.app/Contents/Resources")
self.lbrycrdd = subprocess.Popen([self.lbrycrdd_path, "-datadir=%s" % self.wallet_dir,
"-conf=%s" % self.wallet_conf])
self.started_lbrycrdd = True
except OSError:
import traceback
log.error("Couldn't launch lbrycrdd at path %s: %s", self.lbrycrdd_path, traceback.format_exc())
raise ValueError("Couldn't launch lbrycrdd. Tried %s" % self.lbrycrdd_path)
while tries < 6:
try:
rpc_conn = self._get_rpc_conn()
rpc_conn.getinfo()
break
except (socket.error, JSONRPCException):
tries += 1
log.warning("Failed to connect to lbrycrdd.")
if tries < 6:
time.sleep(2 ** tries)
log.warning("Trying again in %d seconds", 2 ** tries)
else:
log.warning("Giving up.")
else:
self.lbrycrdd.terminate()
raise ValueError("Couldn't open lbrycrdd")
def _stop_daemon(self):
if self.lbrycrdd is not None and self.started_lbrycrdd is True:
alert.info("Stopping lbrycrdd...")
d = threads.deferToThread(self._stop_rpc)
d.addCallback(lambda _: alert.info("Stopped lbrycrdd."))
return d
return defer.succeed(True)
@_catch_connection_error
def _get_balance_for_address_rpc(self, address):
rpc_conn = self._get_rpc_conn()
balance = rpc_conn.getreceivedbyaddress(address)
log.debug("received balance for %s: %s", str(address), str(balance))
return balance
@_catch_connection_error
def _do_send_many_rpc(self, payments):
rpc_conn = self._get_rpc_conn()
return rpc_conn.sendmany("", payments)
@_catch_connection_error
def _get_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getinfo()
@_catch_connection_error
def _get_name_claims_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.listnameclaims()
@_catch_connection_error
def _get_gen_status_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getgenerate()
@_catch_connection_error
def _set_gen_status_rpc(self, b):
if b:
log.info("Starting miner")
else:
log.info("Stopping miner")
rpc_conn = self._get_rpc_conn()
return rpc_conn.setgenerate(b)
@_catch_connection_error
def _get_raw_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getrawtransaction(txid)
@_catch_connection_error
def _get_decoded_tx_rpc(self, raw):
rpc_conn = self._get_rpc_conn()
return rpc_conn.decoderawtransaction(raw)
@_catch_connection_error
def _send_abandon_rpc(self, txid, address, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.abandonclaim(txid, address, amount)
@_catch_connection_error
def _get_blockchain_info_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblockchaininfo()
@_catch_connection_error
def _get_block_rpc(self, blockhash):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getblock(blockhash)
@_catch_connection_error
def _get_claims_from_tx_rpc(self, txid):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsfortx(txid)
@_catch_connection_error
def _get_claims_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimsforname(name)
@_catch_connection_error
def _get_nametrie_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getclaimtrie()
@_catch_connection_error
def _get_wallet_balance_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbalance("")
@_catch_connection_error
def _get_new_address_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getnewaddress()
@_catch_connection_error
def _get_value_for_name_rpc(self, name):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getvalueforname(name)
@_catch_connection_error
def _update_name_rpc(self, txid, value, amount):
rpc_conn = self._get_rpc_conn()
return rpc_conn.updateclaim(txid, value, amount)
@_catch_connection_error
def _send_name_claim_rpc(self, name, value, amount):
rpc_conn = self._get_rpc_conn()
try:
return str(rpc_conn.claimname(name, value, amount))
except JSONRPCException as e:
if 'message' in e.error and e.error['message'] == "Insufficient funds":
raise InsufficientFundsError()
elif 'message' in e.error:
raise ValueError(e.error['message'])
@_catch_connection_error
def _get_num_addresses_rpc(self):
rpc_conn = self._get_rpc_conn()
return len(rpc_conn.getaddressesbyaccount(""))
@_catch_connection_error
def _get_best_blockhash_rpc(self):
rpc_conn = self._get_rpc_conn()
return rpc_conn.getbestblockhash()
@_catch_connection_error
def _stop_rpc(self):
# check if our lbrycrdd is actually running, or if we connected to one that was already
# running and ours failed to start
if self.lbrycrdd.poll() is None:
rpc_conn = self._get_rpc_conn()
rpc_conn.stop()
self.lbrycrdd.wait()
class LBRYumWallet(LBRYWallet):
def __init__(self, db_dir):
LBRYWallet.__init__(self, db_dir)
self.config = None
self.network = None
self.wallet = None
self.cmd_runner = None
self.first_run = False
self.printed_retrieving_headers = False
self._start_check = None
self._catch_up_check = None
self._caught_up_counter = 0
self._lag_counter = 0
self.blocks_behind_alert = 0
self.catchup_progress = 0
self.max_behind = 0
def _start(self):
network_start_d = defer.Deferred()
def setup_network():
self.config = SimpleConfig({'auto_connect': True})
self.network = Network(self.config)
alert.info("Loading the wallet...")
return defer.succeed(self.network.start())
d = setup_network()
def check_started():
if self.network.is_connecting():
if not self.printed_retrieving_headers and self.network.blockchain.retrieving_headers:
alert.info("Running the wallet for the first time...this may take a moment.")
self.printed_retrieving_headers = True
return False
self._start_check.stop()
self._start_check = None
if self.network.is_connected():
network_start_d.callback(True)
else:
network_start_d.errback(ValueError("Failed to connect to network."))
self._start_check = task.LoopingCall(check_started)
d.addCallback(lambda _: self._start_check.start(.1))
d.addCallback(lambda _: network_start_d)
d.addCallback(lambda _: self._load_wallet())
d.addCallback(lambda _: self._get_cmd_runner())
return d
def _stop(self):
if self._start_check is not None:
self._start_check.stop()
self._start_check = None
if self._catch_up_check is not None:
self._catch_up_check.stop()
self._catch_up_check = None
d = defer.Deferred()
def check_stopped():
if self.network:
if self.network.is_connected():
return False
stop_check.stop()
self.network = None
d.callback(True)
if self.network:
self.network.stop()
stop_check = task.LoopingCall(check_stopped)
stop_check.start(.1)
return d
def _load_wallet(self):
def get_wallet():
path = self.config.get_wallet_path()
storage = WalletStorage(path)
wallet = Wallet(storage)
if not storage.file_exists:
self.first_run = True
seed = wallet.make_seed()
wallet.add_seed(seed, None)
wallet.create_master_keys(None)
wallet.create_main_account()
wallet.synchronize()
self.wallet = wallet
blockchain_caught_d = defer.Deferred()
def check_caught_up():
local_height = self.network.get_catchup_progress()
remote_height = self.network.get_server_height()
if remote_height != 0 and remote_height - local_height <= 5:
msg = ""
if self._caught_up_counter != 0:
msg += "All caught up. "
msg += "Wallet loaded."
alert.info(msg)
self._catch_up_check.stop()
self._catch_up_check = None
blockchain_caught_d.callback(True)
elif remote_height != 0:
past_blocks_behind = self.blocks_behind_alert
self.blocks_behind_alert = remote_height - local_height
if self.blocks_behind_alert < past_blocks_behind:
self._lag_counter = 0
self.is_lagging = False
else:
self._lag_counter += 1
if self._lag_counter >= 900:
self.is_lagging = True
if self.blocks_behind_alert > self.max_behind:
self.max_behind = self.blocks_behind_alert
self.catchup_progress = int(100 * (self.blocks_behind_alert / (5 + self.max_behind)))
if self._caught_up_counter == 0:
alert.info('Catching up with the blockchain...showing blocks left...')
if self._caught_up_counter % 30 == 0:
alert.info('%d...', (remote_height - local_height))
self._caught_up_counter += 1
self._catch_up_check = task.LoopingCall(check_caught_up)
d = threads.deferToThread(get_wallet)
d.addCallback(self._save_wallet)
d.addCallback(lambda _: self.wallet.start_threads(self.network))
d.addCallback(lambda _: self._catch_up_check.start(.1))
d.addCallback(lambda _: blockchain_caught_d)
return d
def _get_cmd_runner(self):
self.cmd_runner = Commands(self.config, self.wallet, self.network)
def get_balance(self):
cmd = known_commands['getbalance']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func)
d.addCallback(lambda result: result['unmatured'] if 'unmatured' in result else result['confirmed'])
d.addCallback(Decimal)
return d
def _update_name(self, name, txid, value, amount):
serialized_metadata = Metadata(value).serialize()
d = self.get_claimid(name, txid)
d.addCallback(lambda claim_id: self._send_claim_update(txid, amount, name, claim_id, serialized_metadata))
return d
def get_new_address(self):
d = threads.deferToThread(self.wallet.create_new_address)
d.addCallback(self._save_wallet)
return d
def get_block(self, blockhash):
cmd = known_commands['getblock']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, blockhash)
def get_most_recent_blocktime(self):
header = self.network.get_header(self.network.get_local_height())
return defer.succeed(header['timestamp'])
def get_best_blockhash(self):
height = self.network.get_local_height()
d = threads.deferToThread(self.network.blockchain.read_header, height)
d.addCallback(lambda header: self.network.blockchain.hash_header(header))
return d
def get_name_claims(self):
cmd = known_commands['getnameclaims']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def _check_first_run(self):
return defer.succeed(self.first_run)
def _get_raw_tx(self, txid):
cmd = known_commands['gettransaction']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _send_name_claim(self, name, val, amount):
def send_claim(address):
cmd = known_commands['claimname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, address, amount, name, val)
d = self.get_new_address()
d.addCallback(send_claim)
d.addCallback(self._broadcast_transaction)
return d
def _get_claims_for_name(self, name):
cmd = known_commands['getclaimsforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def _send_claim_update(self, txid, amount, name, claim_id, val):
def send_claim(address):
cmd = known_commands['updateclaim']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid, address, amount, name, claim_id, val)
log.info("Update lbry://%s %s %f %s %s" % (name, txid, amount, claim_id, val))
d = self.get_new_address()
d.addCallback(send_claim)
d.addCallback(self._broadcast_transaction)
def _get_decoded_tx(self, raw_tx):
tx = Transaction(raw_tx)
decoded_tx = {}
decoded_tx['vout'] = []
for output in tx.outputs():
out = {}
out['value'] = Decimal(output[2]) / Decimal(COIN)
decoded_tx['vout'].append(out)
return decoded_tx
def _send_abandon(self, txid, address, amount):
log.info("Abandon %s %s %f" % (txid, address, amount))
cmd = known_commands['abandonclaim']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, txid, address, amount)
d.addCallback(self._broadcast_transaction)
return d
def _broadcast_transaction(self, raw_tx):
log.info("Broadcast: %s" % str(raw_tx))
cmd = known_commands['broadcast']
func = getattr(self.cmd_runner, cmd.name)
d = threads.deferToThread(func, raw_tx)
d.addCallback(self._save_wallet)
return d
def _do_send_many(self, payments_to_send):
log.warning("Doing send many. payments to send: %s", str(payments_to_send))
cmd = known_commands['paytomanyandsend']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, payments_to_send.iteritems())
def _get_value_for_name(self, name):
cmd = known_commands['getvalueforname']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, name)
def get_claims_from_tx(self, txid):
cmd = known_commands['getclaimsfromtx']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, txid)
def _get_balance_for_address(self, address):
return defer.succeed(Decimal(self.wallet.get_addr_received(address))/COIN)
def get_nametrie(self):
cmd = known_commands['getclaimtrie']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def get_history(self):
cmd = known_commands['history']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func)
def get_tx_json(self, txid):
def _decode(raw_tx):
tx = Transaction(raw_tx).deserialize()
decoded_tx = {}
for txkey in tx.keys():
if isinstance(tx[txkey], list):
decoded_tx[txkey] = []
for i in tx[txkey]:
tmp = {}
for k in i.keys():
if isinstance(i[k], Decimal):
tmp[k] = float(i[k] / 1e8)
else:
tmp[k] = i[k]
decoded_tx[txkey].append(tmp)
else:
decoded_tx[txkey] = tx[txkey]
return decoded_tx
d = self._get_raw_tx(txid)
d.addCallback(_decode)
return d
def get_pub_keys(self, wallet):
cmd = known_commands['getpubkeys']
func = getattr(self.cmd_runner, cmd.name)
return threads.deferToThread(func, wallet)
def _save_wallet(self, val):
d = threads.deferToThread(self.wallet.storage.write)
d.addCallback(lambda _: val)
return d
class LBRYcrdAddressRequester(object):
implements([IRequestCreator])
def __init__(self, wallet):
self.wallet = wallet
self._protocols = []
######### IRequestCreator #########
def send_next_request(self, peer, protocol):
if not protocol in self._protocols:
r = ClientRequest({'lbrycrd_address': True}, 'lbrycrd_address')
d = protocol.add_request(r)
d.addCallback(self._handle_address_response, peer, r, protocol)
d.addErrback(self._request_failed, peer)
self._protocols.append(protocol)
return defer.succeed(True)
else:
return defer.succeed(False)
######### internal calls #########
def _handle_address_response(self, response_dict, peer, request, protocol):
assert request.response_identifier in response_dict, \
"Expected %s in dict but did not get it" % request.response_identifier
assert protocol in self._protocols, "Responding protocol is not in our list of protocols"
address = response_dict[request.response_identifier]
self.wallet.update_peer_address(peer, address)
def _request_failed(self, err, peer):
if not err.check(RequestCanceledError):
log.warning("A peer failed to send a valid public key response. Error: %s, peer: %s",
err.getErrorMessage(), str(peer))
return err
class LBRYcrdAddressQueryHandlerFactory(object):
implements(IQueryHandlerFactory)
def __init__(self, wallet):
self.wallet = wallet
######### IQueryHandlerFactory #########
def build_query_handler(self):
q_h = LBRYcrdAddressQueryHandler(self.wallet)
return q_h
def get_primary_query_identifier(self):
return 'lbrycrd_address'
def get_description(self):
return "LBRYcrd Address - an address for receiving payments via LBRYcrd"
class LBRYcrdAddressQueryHandler(object):
implements(IQueryHandler)
def __init__(self, wallet):
self.wallet = wallet
self.query_identifiers = ['lbrycrd_address']
self.address = None
self.peer = None
######### IQueryHandler #########
def register_with_request_handler(self, request_handler, peer):
self.peer = peer
request_handler.register_query_handler(self, self.query_identifiers)
def handle_queries(self, queries):
def create_response(address):
self.address = address
fields = {'lbrycrd_address': address}
return fields
if self.query_identifiers[0] in queries:
d = self.wallet.get_new_address_for_peer(self.peer)
d.addCallback(create_response)
return d
if self.address is None:
log.warning("Expected a request for an address, but did not receive one")
return defer.fail(Failure(ValueError("Expected but did not receive an address request")))
else:
return defer.succeed({})
|
""" Functions to construct sparse matrices
"""
__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum', 'bmat' ]
from itertools import izip
from warnings import warn
import numpy
from numpy import ones, clip, array, arange, intc, asarray, rank, zeros, \
cumsum, concatenate, empty
from sputils import upcast
from csr import csr_matrix, isspmatrix_csr
from csc import csc_matrix, isspmatrix_csc
from bsr import bsr_matrix
from coo import coo_matrix
from dok import dok_matrix
from lil import lil_matrix
from dia import dia_matrix
from base import isspmatrix
def spdiags(data, diags, m, n, format=None):
"""Return a sparse matrix given its diagonals.
B = spdiags(diags, offsets, m, n)
Parameters
==========
- data : matrix whose rows contain the diagonal values
- diags : diagonals to set
- k = 0 - the main diagonal
- k > 0 - the k-th upper diagonal
- k < 0 - the k-th lower diagonal
- m, n : dimensions of the result
- format : format of the result (e.g. "csr")
- By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
========
The dia_matrix class which implements the DIAgonal format.
Example
=======
>>> data = array([[1,2,3,4]]).repeat(3,axis=0)
>>> diags = array([0,-1,2])
>>> spdiags(data,diags,4,4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def identity(n, dtype='d', format=None):
"""identity(n) returns a sparse (n x n) identity matrix"""
if format in ['csr','csc']:
indptr = arange(n+1, dtype=intc)
indices = arange(n, dtype=intc)
data = ones(n, dtype=dtype)
cls = eval('%s_matrix' % format)
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
row = arange(n, dtype=intc)
col = arange(n, dtype=intc)
data = ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
else:
return identity( n, dtype=dtype, format='csr').asformat(format)
def eye(m, n, k=0, dtype='d', format=None):
"""eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
"""
diags = ones((1, m), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
==========
A,B : dense or sparse matrices
format : format of the result (e.g. "csr")
- By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
Returns
=======
kronecker product in a sparse matrix format
Examples
========
>>> A = csr_matrix(array([[0,2],[5,0]]))
>>> B = csr_matrix(array([[1,2],[3,4]]))
>>> kron(A,B).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> kron(A,[[1,2],[3,4]]).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
#B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0],A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr),shape=output_shape)
else:
#use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0],A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
==========
A,B : squared dense or sparse matrices
format : format of the result (e.g. "csr")
- By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
Returns
=======
kronecker sum in a sparse matrix format
Examples
========
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype,B.dtype)
L = kron(identity(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, identity(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) #since L + R is not always same format
def bmat( blocks, format=None, dtype=None ):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
==========
blocks -- grid of sparse matrices with compatible shapes
- an entry of None implies an all-zero matrix
format -- sparse format of the result (e.g. "csr")
- by default an appropriate sparse matrix format is returned.
This choice is subject to change.
Example
=======
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> C = coo_matrix([[7]])
>>> bmat( [[A,B],[None,C]] ).todense()
matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat( [[A,None],[None,C]] ).todense()
matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = asarray(blocks, dtype='object')
if rank(blocks) != 2:
raise ValueError('blocks must have rank 2')
M,N = blocks.shape
block_mask = zeros( blocks.shape, dtype='bool' )
brow_lengths = zeros( blocks.shape[0], dtype=int )
bcol_lengths = zeros( blocks.shape[1], dtype=int )
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
else:
if brow_lengths[i] != A.shape[0]:
raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
else:
if bcol_lengths[j] != A.shape[0]:
raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
# ensure that at least one value in each row and col is not None
if brow_lengths.min() == 0:
raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() )
if bcol_lengths.min() == 0:
raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() )
nnz = sum([ A.nnz for A in blocks[block_mask] ])
if dtype is None:
dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) )
row_offsets = concatenate(([0],cumsum(brow_lengths)))
col_offsets = concatenate(([0],cumsum(bcol_lengths)))
data = empty(nnz, dtype=dtype)
row = empty(nnz, dtype=intc)
col = empty(nnz, dtype=intc)
nnz = 0
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = blocks[i,j]
data[nnz:nnz + A.nnz] = A.data
row[nnz:nnz + A.nnz] = A.row
col[nnz:nnz + A.nnz] = A.col
row[nnz:nnz + A.nnz] += row_offsets[i]
col[nnz:nnz + A.nnz] += col_offsets[j]
nnz += A.nnz
shape = (sum(brow_lengths),sum(bcol_lengths))
return coo_matrix( (data, (row, col)), shape=shape ).asformat(format)
#################################
# Deprecated functions
################################
__all__ += [ 'speye','spidentity', 'spkron', 'lil_eye', 'lil_diags' ]
from numpy import deprecate
spkron = deprecate(kron, oldname='spkron', newname='kron')
speye = deprecate(eye, oldname='speye', newname='eye')
spidentity = deprecate(identity, oldname='spidenitiy', newname='identity')
def lil_eye((r,c), k=0, dtype='d'):
"""Generate a lil_matrix of dimensions (r,c) with the k-th
diagonal set to 1.
Parameters
==========
- r,c : int
- row and column-dimensions of the output.
- k : int
- diagonal offset. In the output matrix,
- out[m,m+k] == 1 for all m.
- dtype : dtype
- data-type of the output array.
"""
warn("lil_eye is deprecated." \
"use scipy.sparse.eye(r, c, k, format='lil') instead", \
DeprecationWarning)
return eye(r,c,k,dtype=dtype,format='lil')
#TODO remove this function
def lil_diags(diags,offsets,(m,n),dtype='d'):
"""Generate a lil_matrix with the given diagonals.
Parameters
==========
- diags : list of list of values e.g. [[1,2,3],[4,5]]
- values to be placed on each indicated diagonal.
- offsets : list of ints
- diagonal offsets. This indicates the diagonal on which
the given values should be placed.
- (r,c) : tuple of ints
- row and column dimensions of the output.
- dtype : dtype
- output data-type.
Example
=======
>>> lil_diags([[1,2,3],[4,5],[6]],[0,1,2],(3,3)).todense()
matrix([[ 1., 4., 6.],
[ 0., 2., 5.],
[ 0., 0., 3.]])
"""
offsets_unsorted = list(offsets)
diags_unsorted = list(diags)
if len(diags) != len(offsets):
raise ValueError("Number of diagonals provided should "
"agree with offsets.")
sort_indices = numpy.argsort(offsets_unsorted)
diags = [diags_unsorted[k] for k in sort_indices]
offsets = [offsets_unsorted[k] for k in sort_indices]
for i,k in enumerate(offsets):
if len(diags[i]) < m-abs(k):
raise ValueError("Not enough values specified to fill "
"diagonal %s." % k)
out = lil_matrix((m,n),dtype=dtype)
for k,diag in izip(offsets,diags):
for ix,c in enumerate(xrange(clip(k,0,n),clip(m+k,0,n))):
out.rows[c-k].append(c)
out.data[c-k].append(diag[ix])
return out
fixed typo in deprecation string
""" Functions to construct sparse matrices
"""
__all__ = [ 'spdiags', 'eye', 'identity', 'kron', 'kronsum', 'bmat' ]
from itertools import izip
from warnings import warn
import numpy
from numpy import ones, clip, array, arange, intc, asarray, rank, zeros, \
cumsum, concatenate, empty
from sputils import upcast
from csr import csr_matrix, isspmatrix_csr
from csc import csc_matrix, isspmatrix_csc
from bsr import bsr_matrix
from coo import coo_matrix
from dok import dok_matrix
from lil import lil_matrix
from dia import dia_matrix
from base import isspmatrix
def spdiags(data, diags, m, n, format=None):
"""Return a sparse matrix given its diagonals.
B = spdiags(diags, offsets, m, n)
Parameters
==========
- data : matrix whose rows contain the diagonal values
- diags : diagonals to set
- k = 0 - the main diagonal
- k > 0 - the k-th upper diagonal
- k < 0 - the k-th lower diagonal
- m, n : dimensions of the result
- format : format of the result (e.g. "csr")
- By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
See Also
========
The dia_matrix class which implements the DIAgonal format.
Example
=======
>>> data = array([[1,2,3,4]]).repeat(3,axis=0)
>>> diags = array([0,-1,2])
>>> spdiags(data,diags,4,4).todense()
matrix([[1, 0, 3, 0],
[1, 2, 0, 4],
[0, 2, 3, 0],
[0, 0, 3, 4]])
"""
return dia_matrix((data, diags), shape=(m,n)).asformat(format)
def identity(n, dtype='d', format=None):
"""identity(n) returns a sparse (n x n) identity matrix"""
if format in ['csr','csc']:
indptr = arange(n+1, dtype=intc)
indices = arange(n, dtype=intc)
data = ones(n, dtype=dtype)
cls = eval('%s_matrix' % format)
return cls((data,indices,indptr),(n,n))
elif format == 'coo':
row = arange(n, dtype=intc)
col = arange(n, dtype=intc)
data = ones(n, dtype=dtype)
return coo_matrix((data,(row,col)),(n,n))
else:
return identity( n, dtype=dtype, format='csr').asformat(format)
def eye(m, n, k=0, dtype='d', format=None):
"""eye(m, n) returns a sparse (m x n) matrix where the k-th diagonal
is all ones and everything else is zeros.
"""
diags = ones((1, m), dtype=dtype)
return spdiags(diags, k, m, n).asformat(format)
def kron(A, B, format=None):
"""kronecker product of sparse matrices A and B
Parameters
==========
A,B : dense or sparse matrices
format : format of the result (e.g. "csr")
- By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
Returns
=======
kronecker product in a sparse matrix format
Examples
========
>>> A = csr_matrix(array([[0,2],[5,0]]))
>>> B = csr_matrix(array([[1,2],[3,4]]))
>>> kron(A,B).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
>>> kron(A,[[1,2],[3,4]]).todense()
matrix([[ 0, 0, 2, 4],
[ 0, 0, 6, 8],
[ 5, 10, 0, 0],
[15, 20, 0, 0]])
"""
B = coo_matrix(B)
if (format is None or format == "bsr") and 2*B.nnz >= B.shape[0] * B.shape[1]:
#B is fairly dense, use BSR
A = csr_matrix(A,copy=True)
output_shape = (A.shape[0]*B.shape[0],A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
B = B.toarray()
data = A.data.repeat(B.size).reshape(-1,B.shape[0],B.shape[1])
data = data * B
return bsr_matrix((data,A.indices,A.indptr),shape=output_shape)
else:
#use COO
A = coo_matrix(A)
output_shape = (A.shape[0]*B.shape[0],A.shape[1]*B.shape[1])
if A.nnz == 0 or B.nnz == 0:
# kronecker product is the zero matrix
return coo_matrix( output_shape )
# expand entries of a into blocks
row = A.row.repeat(B.nnz)
col = A.col.repeat(B.nnz)
data = A.data.repeat(B.nnz)
row *= B.shape[0]
col *= B.shape[1]
# increment block indices
row,col = row.reshape(-1,B.nnz),col.reshape(-1,B.nnz)
row += B.row
col += B.col
row,col = row.reshape(-1),col.reshape(-1)
# compute block entries
data = data.reshape(-1,B.nnz) * B.data
data = data.reshape(-1)
return coo_matrix((data,(row,col)), shape=output_shape).asformat(format)
def kronsum(A, B, format=None):
"""kronecker sum of sparse matrices A and B
Kronecker sum of two sparse matrices is a sum of two Kronecker
products kron(I_n,A) + kron(B,I_m) where A has shape (m,m)
and B has shape (n,n) and I_m and I_n are identity matrices
of shape (m,m) and (n,n) respectively.
Parameters
==========
A,B : squared dense or sparse matrices
format : format of the result (e.g. "csr")
- By default (format=None) an appropriate sparse matrix
format is returned. This choice is subject to change.
Returns
=======
kronecker sum in a sparse matrix format
Examples
========
"""
A = coo_matrix(A)
B = coo_matrix(B)
if A.shape[0] != A.shape[1]:
raise ValueError('A is not square')
if B.shape[0] != B.shape[1]:
raise ValueError('B is not square')
dtype = upcast(A.dtype,B.dtype)
L = kron(identity(B.shape[0],dtype=dtype), A, format=format)
R = kron(B, identity(A.shape[0],dtype=dtype), format=format)
return (L+R).asformat(format) #since L + R is not always same format
def bmat( blocks, format=None, dtype=None ):
"""
Build a sparse matrix from sparse sub-blocks
Parameters
==========
blocks -- grid of sparse matrices with compatible shapes
- an entry of None implies an all-zero matrix
format -- sparse format of the result (e.g. "csr")
- by default an appropriate sparse matrix format is returned.
This choice is subject to change.
Example
=======
>>> from scipy.sparse import coo_matrix, bmat
>>> A = coo_matrix([[1,2],[3,4]])
>>> B = coo_matrix([[5],[6]])
>>> C = coo_matrix([[7]])
>>> bmat( [[A,B],[None,C]] ).todense()
matrix([[1, 2, 5],
[3, 4, 6],
[0, 0, 7]])
>>> bmat( [[A,None],[None,C]] ).todense()
matrix([[1, 2, 0],
[3, 4, 0],
[0, 0, 7]])
"""
blocks = asarray(blocks, dtype='object')
if rank(blocks) != 2:
raise ValueError('blocks must have rank 2')
M,N = blocks.shape
block_mask = zeros( blocks.shape, dtype='bool' )
brow_lengths = zeros( blocks.shape[0], dtype=int )
bcol_lengths = zeros( blocks.shape[1], dtype=int )
# convert everything to COO format
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = coo_matrix(blocks[i,j])
blocks[i,j] = A
block_mask[i,j] = True
if brow_lengths[i] == 0:
brow_lengths[i] = A.shape[0]
else:
if brow_lengths[i] != A.shape[0]:
raise ValueError('blocks[%d,:] has incompatible row dimensions' % i)
if bcol_lengths[j] == 0:
bcol_lengths[j] = A.shape[1]
else:
if bcol_lengths[j] != A.shape[0]:
raise ValueError('blocks[:,%d] has incompatible column dimensions' % j)
# ensure that at least one value in each row and col is not None
if brow_lengths.min() == 0:
raise ValueError('blocks[%d,:] is all None' % brow_lengths.argmin() )
if bcol_lengths.min() == 0:
raise ValueError('blocks[:,%d] is all None' % bcol_lengths.argmin() )
nnz = sum([ A.nnz for A in blocks[block_mask] ])
if dtype is None:
dtype = upcast( *tuple([A.dtype for A in blocks[block_mask]]) )
row_offsets = concatenate(([0],cumsum(brow_lengths)))
col_offsets = concatenate(([0],cumsum(bcol_lengths)))
data = empty(nnz, dtype=dtype)
row = empty(nnz, dtype=intc)
col = empty(nnz, dtype=intc)
nnz = 0
for i in range(M):
for j in range(N):
if blocks[i,j] is not None:
A = blocks[i,j]
data[nnz:nnz + A.nnz] = A.data
row[nnz:nnz + A.nnz] = A.row
col[nnz:nnz + A.nnz] = A.col
row[nnz:nnz + A.nnz] += row_offsets[i]
col[nnz:nnz + A.nnz] += col_offsets[j]
nnz += A.nnz
shape = (sum(brow_lengths),sum(bcol_lengths))
return coo_matrix( (data, (row, col)), shape=shape ).asformat(format)
#################################
# Deprecated functions
################################
__all__ += [ 'speye','spidentity', 'spkron', 'lil_eye', 'lil_diags' ]
from numpy import deprecate
spkron = deprecate(kron, oldname='spkron', newname='kron')
speye = deprecate(eye, oldname='speye', newname='eye')
spidentity = deprecate(identity, oldname='spidentity', newname='identity')
def lil_eye((r,c), k=0, dtype='d'):
"""Generate a lil_matrix of dimensions (r,c) with the k-th
diagonal set to 1.
Parameters
==========
- r,c : int
- row and column-dimensions of the output.
- k : int
- diagonal offset. In the output matrix,
- out[m,m+k] == 1 for all m.
- dtype : dtype
- data-type of the output array.
"""
warn("lil_eye is deprecated." \
"use scipy.sparse.eye(r, c, k, format='lil') instead", \
DeprecationWarning)
return eye(r,c,k,dtype=dtype,format='lil')
#TODO remove this function
def lil_diags(diags,offsets,(m,n),dtype='d'):
"""Generate a lil_matrix with the given diagonals.
Parameters
==========
- diags : list of list of values e.g. [[1,2,3],[4,5]]
- values to be placed on each indicated diagonal.
- offsets : list of ints
- diagonal offsets. This indicates the diagonal on which
the given values should be placed.
- (r,c) : tuple of ints
- row and column dimensions of the output.
- dtype : dtype
- output data-type.
Example
=======
>>> lil_diags([[1,2,3],[4,5],[6]],[0,1,2],(3,3)).todense()
matrix([[ 1., 4., 6.],
[ 0., 2., 5.],
[ 0., 0., 3.]])
"""
offsets_unsorted = list(offsets)
diags_unsorted = list(diags)
if len(diags) != len(offsets):
raise ValueError("Number of diagonals provided should "
"agree with offsets.")
sort_indices = numpy.argsort(offsets_unsorted)
diags = [diags_unsorted[k] for k in sort_indices]
offsets = [offsets_unsorted[k] for k in sort_indices]
for i,k in enumerate(offsets):
if len(diags[i]) < m-abs(k):
raise ValueError("Not enough values specified to fill "
"diagonal %s." % k)
out = lil_matrix((m,n),dtype=dtype)
for k,diag in izip(offsets,diags):
for ix,c in enumerate(xrange(clip(k,0,n),clip(m+k,0,n))):
out.rows[c-k].append(c)
out.data[c-k].append(diag[ix])
return out
|
# Dependencies: flask, tornado
from __future__ import absolute_import, division, print_function
# HTTP / HTML
import tornado.wsgi
import tornado.httpserver
import flask
from flask import request, redirect, url_for, make_response
import optparse
import logging
import socket
import simplejson as json
# IBEIS
import ibeis
from ibeis.control.SQLDatabaseControl import (SQLDatabaseController, # NOQA
SQLAtomicContext)
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY, Species
import utool as ut
# Web Internal
from ibeis.web import appfuncs as ap
# Others
import ibeis.constants as const
import random
BROWSER = ut.get_argflag('--browser')
DEFAULT_PORT = 5000
app = flask.Flask(__name__)
################################################################################
def encounter_image_processed(gid_list):
images_reviewed = [ reviewed == 1 for reviewed in app.ibs.get_image_reviewed(gid_list) ]
return images_reviewed
def encounter_annot_viewpoint_processed(aid_list):
annots_reviewed = [ reviewed is not None for reviewed in app.ibs.get_annot_yaws(aid_list) ]
return annots_reviewed
def encounter_annot_quality_processed(aid_list):
annots_reviewed = [ reviewed is not None and reviewed is not -1 for reviewed in app.ibs.get_annot_qualities(aid_list) ]
return annots_reviewed
################################################################################
# @app.after_request
# def add_header(response):
# response.headers['Cache-Control'] = 'public, max-age=%d' % (60 * 60 * 24, )
# return response
@app.route('/')
def root():
return ap.template(None)
@app.route('/view')
def view():
eid_list = app.ibs.get_valid_eids()
gid_list = app.ibs.get_valid_gids()
aid_list = app.ibs.get_valid_aids()
nid_list = app.ibs.get_valid_nids()
return ap.template('view',
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list))
@app.route('/view/encounters')
def view_encounters():
filtered = True
eid = request.args.get('eid', '')
if len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
else:
eid_list = app.ibs.get_valid_eids()
filtered = False
start_time_posix_list = app.ibs.get_encounter_start_time_posix(eid_list)
datetime_list = [
ut.unixtime_to_datetime(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ app.ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ]
aids_list = [ app.ibs.get_valid_aids(include_only_gid_list=gid_list) for gid_list in gids_list ]
images_reviewed_list = [ encounter_image_processed(gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ encounter_annot_viewpoint_processed(aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ encounter_annot_quality_processed(aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
encounter_list = zip(
eid_list,
app.ibs.get_encounter_enctext(eid_list),
app.ibs.get_encounter_num_gids(eid_list),
image_processed_list,
app.ibs.get_encounter_num_aids(eid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
encounter_list.sort(key=lambda t: t[7])
return ap.template('view', 'encounters',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
encounter_list=encounter_list,
num_encounters=len(encounter_list))
@app.route('/view/images')
def view_images():
filtered = True
eid_list = []
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ app.ibs.get_valid_gids(eid=eid) for eid_ in eid_list ])
else:
gid_list = app.ibs.get_valid_gids()
filtered = False
image_unixtime_list = app.ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetime(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ eid_list_[0] for eid_list_ in app.ibs.get_image_eids(gid_list) ],
app.ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
app.ibs.get_image_gps(gid_list),
app.ibs.get_image_party_tag(gid_list),
app.ibs.get_image_contributor_tag(gid_list),
app.ibs.get_image_notes(gid_list),
encounter_image_processed(gid_list),
)
image_list.sort(key=lambda t: t[3])
return ap.template('view', 'images',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list))
@app.route('/view/annotations')
def view_annotations():
filtered = True
eid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ app.ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ])
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
else:
aid_list = app.ibs.get_valid_aids()
filtered = False
annotation_list = zip(
aid_list,
app.ibs.get_annot_gids(aid_list),
[ eid_list_[0] for eid_list_ in app.ibs.get_annot_eids(aid_list) ],
app.ibs.get_annot_image_names(aid_list),
app.ibs.get_annot_names(aid_list),
app.ibs.get_annot_exemplar_flags(aid_list),
app.ibs.get_annot_species_texts(aid_list),
app.ibs.get_annot_yaw_texts(aid_list),
app.ibs.get_annot_quality_texts(aid_list),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(encounter_annot_viewpoint_processed(aid_list), encounter_annot_quality_processed(aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return ap.template('view', 'annotations',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list))
@app.route('/turk')
def turk():
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
return ap.template('turk', None, eid=eid)
@app.route('/turk/detection')
def turk_detection():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(gid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
gid_list_ = ut.filter_items(gid_list, flag_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('detection_instructions_seen', 0) == 0
display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0
if not finished:
gpath = app.ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = app.ibs.get_image_sizes(gid)
scale_factor = 700.0 / float(width)
aid_list = app.ibs.get_image_aids(gid)
annot_bbox_list = app.ibs.get_annot_bboxes(aid_list)
annot_thetas_list = app.ibs.get_annot_thetas(aid_list)
species_list = app.ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for annot_bbox, annot_theta, species in zip(annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = int(scale_factor * annot_bbox[0])
temp['top'] = int(scale_factor * annot_bbox[1])
temp['width'] = int(scale_factor * (annot_bbox[2]))
temp['height'] = int(scale_factor * (annot_bbox[3]))
temp['label'] = species
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif app.default_species is not None:
species = app.default_species
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
return ap.template('turk', 'detection',
eid=eid,
gid=gid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
review=review)
except Exception as e:
return error404(e)
@app.route('/turk/viewpoint')
def turk_viewpoint():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
aid_list_ = ut.filter_items(aid_list, flag_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value = request.args.get('value', None)
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('viewpoint_instructions_seen', 0) == 0
if not finished:
gid = app.ibs.get_annot_gids(aid)
gpath = app.ibs.get_annot_chip_fpaths(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
return ap.template('turk', 'viewpoint',
eid=eid,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
except Exception as e:
return error404(e)
@app.route('/turk/quality')
def turk_quality():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_quality_processed(aid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
aid_list_ = ut.filter_items(aid_list, flag_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value = request.args.get('value', None)
review = 'review' in request.args.keys()
finished = aid is None
# display_instructions = request.cookies.get('quality_instructions_seen', 0) == 0
display_instructions = False
if not finished:
gid = app.ibs.get_annot_gids(aid)
gpath = app.ibs.get_annot_chip_fpaths(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
return ap.template('turk', 'quality',
eid=eid,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
except Exception as e:
return error404(e)
@app.route('/submit/detection', methods=['POST'])
def submit_detection():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid = int(request.form['detection-gid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
# app.ibs.delete_images(gid)
# print('[web] (DELETED) turk_id: %s, gid: %d' % (turk_id, gid, ))
pass
elif method.lower() == 'clear':
aid_list = app.ibs.get_image_aids(gid)
app.ibs.delete_annots(aid_list)
print('[web] (CLEAERED) turk_id: %s, gid: %d' % (turk_id, gid, ))
redirection = request.referrer
if 'gid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&gid=%d' % (redirection, gid, )
else:
redirection = '%s?gid=%d' % (redirection, gid, )
return redirect(redirection)
else:
aid_list = app.ibs.get_image_aids(gid)
# Make new annotations
width, height = app.ibs.get_image_sizes(gid)
scale_factor = float(width) / 700.0
# Get aids
app.ibs.delete_annots(aid_list)
annotation_list = json.loads(request.form['detection-annotations'])
bbox_list = [
(
int(scale_factor * annot['left']),
int(scale_factor * annot['top']),
int(scale_factor * annot['width']),
int(scale_factor * annot['height']),
)
for annot in annotation_list
]
theta_list = [
float(annot['angle'])
for annot in annotation_list
]
species_list = [
annot['label']
for annot in annotation_list
]
app.ibs.add_annots([gid] * len(annotation_list), bbox_list, theta_list=theta_list, species_list=species_list)
app.ibs.set_image_reviewed([gid], [1])
print('[web] turk_id: %s, gid: %d, bbox_list: %r, species_list: %r' % (turk_id, gid, annotation_list, species_list))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_detection', eid=eid, previous=gid))
@app.route('/submit/viewpoint', methods=['POST'])
def submit_viewpoint():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['viewpoint-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
app.ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
value = int(request.form['viewpoint-value'])
def convert_old_viewpoint_to_yaw(view_angle):
''' we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0.000 * TAU,),
>>> ('frontleft' , 0.125 * TAU,),
>>> ('front' , 0.250 * TAU,),
>>> ('frontright' , 0.375 * TAU,),
>>> ('right' , 0.500 * TAU,),
>>> ('backright' , 0.625 * TAU,),
>>> ('back' , 0.750 * TAU,),
>>> ('backleft' , 0.875 * TAU,),
>>> ]
>>> fmtstr = 'old %15r %.2f -> new %15r %.2f'
>>> for lbl, angle in old_viewpoint_labels:
>>> print(fmtstr % (lbl, angle, lbl, convert_old_viewpoint_to_yaw(angle)))
'''
if view_angle is None:
return None
yaw = (-view_angle + (const.TAU / 2)) % const.TAU
return yaw
yaw = convert_old_viewpoint_to_yaw(ut.deg_to_rad(value))
app.ibs.set_annot_yaws([aid], [yaw], input_is_degrees=False)
print('[web] turk_id: %s, aid: %d, yaw: %d' % (turk_id, aid, yaw))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_viewpoint', eid=eid, previous=aid))
@app.route('/submit/quality', methods=['POST'])
def submit_quality():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['quality-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
app.ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
quality = int(request.form['quality-value'])
app.ibs.set_annot_qualities([aid], [quality])
print('[web] turk_id: %s, aid: %d, quality: %d' % (turk_id, aid, quality))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_quality', eid=eid, previous=aid))
@app.route('/ajax/cookie')
def set_cookie():
response = make_response('true')
try:
response.set_cookie(request.args['name'], request.args['value'])
print('[web] Set Cookie: %r -> %r' % (request.args['name'], request.args['value'], ))
return response
except:
print('[web] COOKIE FAILED: %r' % (request.args, ))
return make_response('false')
@app.route('/ajax/image/src/<gid>')
def image_src(gid=None):
# gpath = app.ibs.get_image_paths(gid)
gpath = app.ibs.get_image_thumbpath(gid, ensure_paths=True)
return ap.return_src(gpath)
@app.route('/ajax/annotation/src/<aid>')
def annotation_src(aid=None):
gpath = app.ibs.get_annot_chip_fpaths(aid)
return ap.return_src(gpath)
@app.route('/api')
@app.route('/api/<function>.json', methods=['GET', 'POST'])
def api(function=None):
template = {
'status': {
'success': False,
'code': '',
},
}
print('[web] Function:', function)
print('[web] POST:', dict(request.form))
print('[web] GET:', dict(request.args))
if function is None:
template['status']['success'] = True
template['status']['code'] = 'USAGE: /api/[ibeis_function_name].json'
else:
function = function.lower()
if ap.check_valid_function_name(function):
function = 'app.ibs.%s' % function
exists = True
try:
func = eval(function)
ret = func()
except AttributeError:
exists = False
if exists:
template['status']['success'] = True
template['function'] = function
template['return'] = ret
else:
template['status']['success'] = False
template['status']['code'] = 'ERROR: Specified IBEIS function not visible or implemented'
else:
template['status']['success'] = False
template['status']['code'] = 'ERROR: Specified IBEIS function not valid Python function'
return json.dumps(template)
@app.route('/404')
def error404(exception):
print('[web] %r' % (exception, ))
return ap.template(None, '404')
################################################################################
def start_tornado(app, port=5000, browser=BROWSER, blocking=False, reset_db=True):
def _start_tornado():
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start()
# Initialize the web server
logging.getLogger().setLevel(logging.INFO)
try:
app.server_ip_address = socket.gethostbyname(socket.gethostname())
app.port = port
except:
app.server_ip_address = '127.0.0.1'
app.port = port
url = 'http://%s:%s' % (app.server_ip_address, app.port)
print('[web] Tornado server starting at %s' % (url,))
if browser:
import webbrowser
webbrowser.open(url)
# Blocking
_start_tornado()
# if blocking:
# _start_tornado()
# else:
# import threading
# threading.Thread(target=_start_tornado).start()
def start_from_terminal():
'''
Parse command line options and start the server.
'''
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port',
help='which port to serve content on',
type='int', default=DEFAULT_PORT)
parser.add_option(
'--db',
help='specify an IBEIS database',
type='str', default='testdb0')
opts, args = parser.parse_args()
app.ibs = ibeis.opendb(db=opts.db)
print('[web] Pre-computing all image thumbnails...')
app.ibs.compute_all_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
app.ibs.compute_all_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
app.ibs.compute_all_chips()
start_tornado(app, opts.port)
def start_from_ibeis(ibs, port=DEFAULT_PORT):
'''
Parse command line options and start the server.
'''
dbname = ibs.get_dbname()
if dbname == 'CHTA_Master':
app.default_species = Species.CHEETAH
elif dbname == 'ELPH_Master':
app.default_species = Species.ELEPHANT_SAV
elif dbname == 'GIR_Master':
app.default_species = Species.GIRAFFE
elif dbname == 'GZ_Master':
app.default_species = Species.ZEB_GREVY
elif dbname == 'LION_Master':
app.default_species = Species.LION
elif dbname == 'PZ_Master':
app.default_species = Species.ZEB_PLAIN
elif dbname == 'WD_Master':
app.default_species = Species.WILDDOG
elif 'NNP_' in dbname:
app.default_species = Species.ZEB_PLAIN
elif 'GZC' in dbname:
app.default_species = Species.ZEB_PLAIN
else:
app.default_species = None
print('[web] DEFAULT SPECIES: %r' % (app.default_species))
app.ibs = ibs
print('[web] Pre-computing all image thumbnails (with annots)...')
app.ibs.compute_all_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
app.ibs.compute_all_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
app.ibs.compute_all_chips()
start_tornado(app, port)
if __name__ == '__main__':
start_from_terminal()
hotfix
# Dependencies: flask, tornado
from __future__ import absolute_import, division, print_function
# HTTP / HTML
import tornado.wsgi
import tornado.httpserver
import flask
from flask import request, redirect, url_for, make_response
import optparse
import logging
import socket
import simplejson as json
# IBEIS
import ibeis
from ibeis.control.SQLDatabaseControl import (SQLDatabaseController, # NOQA
SQLAtomicContext)
from ibeis.constants import KEY_DEFAULTS, SPECIES_KEY, Species
import utool as ut
# Web Internal
from ibeis.web import appfuncs as ap
# Others
import ibeis.constants as const
import random
BROWSER = ut.get_argflag('--browser')
DEFAULT_PORT = 5000
app = flask.Flask(__name__)
################################################################################
def encounter_image_processed(gid_list):
images_reviewed = [ reviewed == 1 for reviewed in app.ibs.get_image_reviewed(gid_list) ]
return images_reviewed
def encounter_annot_viewpoint_processed(aid_list):
annots_reviewed = [ reviewed is not None for reviewed in app.ibs.get_annot_yaws(aid_list) ]
return annots_reviewed
def encounter_annot_quality_processed(aid_list):
annots_reviewed = [ reviewed is not None and reviewed is not -1 for reviewed in app.ibs.get_annot_qualities(aid_list) ]
return annots_reviewed
################################################################################
# @app.after_request
# def add_header(response):
# response.headers['Cache-Control'] = 'public, max-age=%d' % (60 * 60 * 24, )
# return response
@app.route('/')
def root():
return ap.template(None)
@app.route('/view')
def view():
eid_list = app.ibs.get_valid_eids()
gid_list = app.ibs.get_valid_gids()
aid_list = app.ibs.get_valid_aids()
nid_list = app.ibs.get_valid_nids()
return ap.template('view',
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
nid_list=nid_list,
nid_list_str=','.join(map(str, nid_list)),
num_nids=len(nid_list))
@app.route('/view/encounters')
def view_encounters():
filtered = True
eid = request.args.get('eid', '')
if len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
else:
eid_list = app.ibs.get_valid_eids()
filtered = False
start_time_posix_list = app.ibs.get_encounter_start_time_posix(eid_list)
datetime_list = [
ut.unixtime_to_datetime(start_time_posix)
if start_time_posix is not None else
'Unknown'
for start_time_posix in start_time_posix_list
]
gids_list = [ app.ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ]
aids_list = [ app.ibs.get_valid_aids(include_only_gid_list=gid_list) for gid_list in gids_list ]
images_reviewed_list = [ encounter_image_processed(gid_list) for gid_list in gids_list ]
annots_reviewed_viewpoint_list = [ encounter_annot_viewpoint_processed(aid_list) for aid_list in aids_list ]
annots_reviewed_quality_list = [ encounter_annot_quality_processed(aid_list) for aid_list in aids_list ]
image_processed_list = [ images_reviewed.count(True) for images_reviewed in images_reviewed_list ]
annot_processed_viewpoint_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_viewpoint_list ]
annot_processed_quality_list = [ annots_reviewed.count(True) for annots_reviewed in annots_reviewed_quality_list ]
reviewed_list = [ all(images_reviewed) and all(annots_reviewed_viewpoint) and all(annot_processed_quality) for images_reviewed, annots_reviewed_viewpoint, annot_processed_quality in zip(images_reviewed_list, annots_reviewed_viewpoint_list, annots_reviewed_quality_list) ]
encounter_list = zip(
eid_list,
app.ibs.get_encounter_enctext(eid_list),
app.ibs.get_encounter_num_gids(eid_list),
image_processed_list,
app.ibs.get_encounter_num_aids(eid_list),
annot_processed_viewpoint_list,
annot_processed_quality_list,
start_time_posix_list,
datetime_list,
reviewed_list,
)
encounter_list.sort(key=lambda t: t[7])
return ap.template('view', 'encounters',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
encounter_list=encounter_list,
num_encounters=len(encounter_list))
@app.route('/view/images')
def view_images():
filtered = True
eid_list = []
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
if len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ app.ibs.get_valid_gids(eid=eid) for eid_ in eid_list ])
else:
gid_list = app.ibs.get_valid_gids()
filtered = False
image_unixtime_list = app.ibs.get_image_unixtime(gid_list)
datetime_list = [
ut.unixtime_to_datetime(image_unixtime)
if image_unixtime is not None
else
'Unknown'
for image_unixtime in image_unixtime_list
]
image_list = zip(
gid_list,
[ eid_list_[0] for eid_list_ in app.ibs.get_image_eids(gid_list) ],
app.ibs.get_image_gnames(gid_list),
image_unixtime_list,
datetime_list,
app.ibs.get_image_gps(gid_list),
app.ibs.get_image_party_tag(gid_list),
app.ibs.get_image_contributor_tag(gid_list),
app.ibs.get_image_notes(gid_list),
encounter_image_processed(gid_list),
)
image_list.sort(key=lambda t: t[3])
return ap.template('view', 'images',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
image_list=image_list,
num_images=len(image_list))
@app.route('/view/annotations')
def view_annotations():
filtered = True
eid_list = []
gid_list = []
aid = request.args.get('aid', '')
gid = request.args.get('gid', '')
eid = request.args.get('eid', '')
if len(aid) > 0:
aid_list = aid.strip().split(',')
aid_list = [ None if aid_ == 'None' or aid_ == '' else int(aid_) for aid_ in aid_list ]
elif len(gid) > 0:
gid_list = gid.strip().split(',')
gid_list = [ None if gid_ == 'None' or gid_ == '' else int(gid_) for gid_ in gid_list ]
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
elif len(eid) > 0:
eid_list = eid.strip().split(',')
eid_list = [ None if eid_ == 'None' or eid_ == '' else int(eid_) for eid_ in eid_list ]
gid_list = ut.flatten([ app.ibs.get_valid_gids(eid=eid_) for eid_ in eid_list ])
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
else:
aid_list = app.ibs.get_valid_aids()
filtered = False
annotation_list = zip(
aid_list,
app.ibs.get_annot_gids(aid_list),
[ eid_list_[0] for eid_list_ in app.ibs.get_annot_eids(aid_list) ],
app.ibs.get_annot_image_names(aid_list),
app.ibs.get_annot_names(aid_list),
app.ibs.get_annot_exemplar_flags(aid_list),
app.ibs.get_annot_species_texts(aid_list),
app.ibs.get_annot_yaw_texts(aid_list),
app.ibs.get_annot_quality_texts(aid_list),
[ reviewed_viewpoint and reviewed_quality for reviewed_viewpoint, reviewed_quality in zip(encounter_annot_viewpoint_processed(aid_list), encounter_annot_quality_processed(aid_list)) ],
)
annotation_list.sort(key=lambda t: t[0])
return ap.template('view', 'annotations',
filtered=filtered,
eid_list=eid_list,
eid_list_str=','.join(map(str, eid_list)),
num_eids=len(eid_list),
gid_list=gid_list,
gid_list_str=','.join(map(str, gid_list)),
num_gids=len(gid_list),
aid_list=aid_list,
aid_list_str=','.join(map(str, aid_list)),
num_aids=len(aid_list),
annotation_list=annotation_list,
num_annotations=len(annotation_list))
@app.route('/turk')
def turk():
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
return ap.template('turk', None, eid=eid)
@app.route('/turk/detection')
def turk_detection():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(gid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(gid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
gid = request.args.get('gid', '')
if len(gid) > 0:
gid = int(gid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
reviewed_list = encounter_image_processed(gid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
gid_list_ = ut.filter_items(gid_list, flag_list)
if len(gid_list_) == 0:
gid = None
else:
# gid = gid_list_[0]
gid = random.choice(gid_list_)
previous = request.args.get('previous', None)
finished = gid is None
review = 'review' in request.args.keys()
display_instructions = request.cookies.get('detection_instructions_seen', 0) == 0
display_species_examples = False # request.cookies.get('detection_example_species_seen', 0) == 0
if not finished:
gpath = app.ibs.get_image_thumbpath(gid, ensure_paths=True, draw_annots=False)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image, filter_width=False)
# Get annotations
width, height = app.ibs.get_image_sizes(gid)
scale_factor = 700.0 / float(width)
aid_list = app.ibs.get_image_aids(gid)
annot_bbox_list = app.ibs.get_annot_bboxes(aid_list)
annot_thetas_list = app.ibs.get_annot_thetas(aid_list)
species_list = app.ibs.get_annot_species_texts(aid_list)
# Get annotation bounding boxes
annotation_list = []
for annot_bbox, annot_theta, species in zip(annot_bbox_list, annot_thetas_list, species_list):
temp = {}
temp['left'] = int(scale_factor * annot_bbox[0])
temp['top'] = int(scale_factor * annot_bbox[1])
temp['width'] = int(scale_factor * (annot_bbox[2]))
temp['height'] = int(scale_factor * (annot_bbox[3]))
temp['label'] = species
temp['angle'] = float(annot_theta)
annotation_list.append(temp)
if len(species_list) > 0:
species = max(set(species_list), key=species_list.count) # Get most common species
elif app.default_species is not None:
species = app.default_species
else:
species = KEY_DEFAULTS[SPECIES_KEY]
else:
gpath = None
species = None
image_src = None
annotation_list = []
return ap.template('turk', 'detection',
eid=eid,
gid=gid,
species=species,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
annotation_list=annotation_list,
display_instructions=display_instructions,
display_species_examples=display_species_examples,
review=review)
except Exception as e:
return error404(e)
@app.route('/turk/viewpoint')
def turk_viewpoint():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
aid_list_ = ut.filter_items(aid_list, flag_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value = request.args.get('value', None)
review = 'review' in request.args.keys()
finished = aid is None
display_instructions = request.cookies.get('viewpoint_instructions_seen', 0) == 0
if not finished:
gid = app.ibs.get_annot_gids(aid)
gpath = app.ibs.get_annot_chip_fpaths(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
return ap.template('turk', 'viewpoint',
eid=eid,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
except Exception as e:
return error404(e)
@app.route('/turk/quality')
def turk_quality():
try:
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
print('[web] TEST 0')
gid_list = app.ibs.get_valid_gids(eid=eid)
print('[web] TEST 1')
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
print('[web] TEST 2')
reviewed_list = encounter_annot_viewpoint_processed(aid_list)
print('[web] TEST 3')
progress = '%0.2f' % (100.0 * reviewed_list.count(True) / len(aid_list), )
print('[web] TEST 4')
enctext = None if eid is None else app.ibs.get_encounter_enctext(eid)
aid = request.args.get('aid', '')
if len(aid) > 0:
aid = int(aid)
else:
gid_list = app.ibs.get_valid_gids(eid=eid)
aid_list = app.ibs.get_valid_aids(include_only_gid_list=gid_list)
reviewed_list = encounter_annot_quality_processed(aid_list)
flag_list = [ not reviewed for reviewed in reviewed_list ]
aid_list_ = ut.filter_items(aid_list, flag_list)
if len(aid_list_) == 0:
aid = None
else:
# aid = aid_list_[0]
aid = random.choice(aid_list_)
previous = request.args.get('previous', None)
value = request.args.get('value', None)
review = 'review' in request.args.keys()
finished = aid is None
# display_instructions = request.cookies.get('quality_instructions_seen', 0) == 0
display_instructions = False
if not finished:
gid = app.ibs.get_annot_gids(aid)
gpath = app.ibs.get_annot_chip_fpaths(aid)
image = ap.open_oriented_image(gpath)
image_src = ap.embed_image_html(image)
else:
gid = None
gpath = None
image_src = None
return ap.template('turk', 'quality',
eid=eid,
gid=gid,
aid=aid,
value=value,
image_path=gpath,
image_src=image_src,
previous=previous,
enctext=enctext,
progress=progress,
finished=finished,
display_instructions=display_instructions,
review=review)
except Exception as e:
return error404(e)
@app.route('/submit/detection', methods=['POST'])
def submit_detection():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
gid = int(request.form['detection-gid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
# app.ibs.delete_images(gid)
# print('[web] (DELETED) turk_id: %s, gid: %d' % (turk_id, gid, ))
pass
elif method.lower() == 'clear':
aid_list = app.ibs.get_image_aids(gid)
app.ibs.delete_annots(aid_list)
print('[web] (CLEAERED) turk_id: %s, gid: %d' % (turk_id, gid, ))
redirection = request.referrer
if 'gid' not in redirection:
# Prevent multiple clears
if '?' in redirection:
redirection = '%s&gid=%d' % (redirection, gid, )
else:
redirection = '%s?gid=%d' % (redirection, gid, )
return redirect(redirection)
else:
aid_list = app.ibs.get_image_aids(gid)
# Make new annotations
width, height = app.ibs.get_image_sizes(gid)
scale_factor = float(width) / 700.0
# Get aids
app.ibs.delete_annots(aid_list)
annotation_list = json.loads(request.form['detection-annotations'])
bbox_list = [
(
int(scale_factor * annot['left']),
int(scale_factor * annot['top']),
int(scale_factor * annot['width']),
int(scale_factor * annot['height']),
)
for annot in annotation_list
]
theta_list = [
float(annot['angle'])
for annot in annotation_list
]
species_list = [
annot['label']
for annot in annotation_list
]
app.ibs.add_annots([gid] * len(annotation_list), bbox_list, theta_list=theta_list, species_list=species_list)
app.ibs.set_image_reviewed([gid], [1])
print('[web] turk_id: %s, gid: %d, bbox_list: %r, species_list: %r' % (turk_id, gid, annotation_list, species_list))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_detection', eid=eid, previous=gid))
@app.route('/submit/viewpoint', methods=['POST'])
def submit_viewpoint():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['viewpoint-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
app.ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
value = int(request.form['viewpoint-value'])
def convert_old_viewpoint_to_yaw(view_angle):
''' we initially had viewpoint coordinates inverted
Example:
>>> import math
>>> TAU = 2 * math.pi
>>> old_viewpoint_labels = [
>>> ('left' , 0.000 * TAU,),
>>> ('frontleft' , 0.125 * TAU,),
>>> ('front' , 0.250 * TAU,),
>>> ('frontright' , 0.375 * TAU,),
>>> ('right' , 0.500 * TAU,),
>>> ('backright' , 0.625 * TAU,),
>>> ('back' , 0.750 * TAU,),
>>> ('backleft' , 0.875 * TAU,),
>>> ]
>>> fmtstr = 'old %15r %.2f -> new %15r %.2f'
>>> for lbl, angle in old_viewpoint_labels:
>>> print(fmtstr % (lbl, angle, lbl, convert_old_viewpoint_to_yaw(angle)))
'''
if view_angle is None:
return None
yaw = (-view_angle + (const.TAU / 2)) % const.TAU
return yaw
yaw = convert_old_viewpoint_to_yaw(ut.deg_to_rad(value))
app.ibs.set_annot_yaws([aid], [yaw], input_is_degrees=False)
print('[web] turk_id: %s, aid: %d, yaw: %d' % (turk_id, aid, yaw))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_viewpoint', eid=eid, previous=aid))
@app.route('/submit/quality', methods=['POST'])
def submit_quality():
method = request.form.get('detection-submit', '')
eid = request.args.get('eid', '')
eid = None if eid == 'None' or eid == '' else int(eid)
aid = int(request.form['quality-aid'])
turk_id = request.cookies.get('turk_id', -1)
if method.lower() == 'delete':
app.ibs.delete_annots(aid)
print('[web] (DELETED) turk_id: %s, aid: %d' % (turk_id, aid, ))
aid = None # Reset AID to prevent previous
else:
quality = int(request.form['quality-value'])
app.ibs.set_annot_qualities([aid], [quality])
print('[web] turk_id: %s, aid: %d, quality: %d' % (turk_id, aid, quality))
# Return HTML
refer = request.args.get('refer', '')
if len(refer) > 0:
return redirect(ap.decode_refer_url(refer))
else:
return redirect(url_for('turk_quality', eid=eid, previous=aid))
@app.route('/ajax/cookie')
def set_cookie():
response = make_response('true')
try:
response.set_cookie(request.args['name'], request.args['value'])
print('[web] Set Cookie: %r -> %r' % (request.args['name'], request.args['value'], ))
return response
except:
print('[web] COOKIE FAILED: %r' % (request.args, ))
return make_response('false')
@app.route('/ajax/image/src/<gid>')
def image_src(gid=None):
# gpath = app.ibs.get_image_paths(gid)
gpath = app.ibs.get_image_thumbpath(gid, ensure_paths=True)
return ap.return_src(gpath)
@app.route('/ajax/annotation/src/<aid>')
def annotation_src(aid=None):
gpath = app.ibs.get_annot_chip_fpaths(aid)
return ap.return_src(gpath)
@app.route('/api')
@app.route('/api/<function>.json', methods=['GET', 'POST'])
def api(function=None):
template = {
'status': {
'success': False,
'code': '',
},
}
print('[web] Function:', function)
print('[web] POST:', dict(request.form))
print('[web] GET:', dict(request.args))
if function is None:
template['status']['success'] = True
template['status']['code'] = 'USAGE: /api/[ibeis_function_name].json'
else:
function = function.lower()
if ap.check_valid_function_name(function):
function = 'app.ibs.%s' % function
exists = True
try:
func = eval(function)
ret = func()
except AttributeError:
exists = False
if exists:
template['status']['success'] = True
template['function'] = function
template['return'] = ret
else:
template['status']['success'] = False
template['status']['code'] = 'ERROR: Specified IBEIS function not visible or implemented'
else:
template['status']['success'] = False
template['status']['code'] = 'ERROR: Specified IBEIS function not valid Python function'
return json.dumps(template)
@app.route('/404')
def error404(exception):
print('[web] %r' % (exception, ))
return ap.template(None, '404')
################################################################################
def start_tornado(app, port=5000, browser=BROWSER, blocking=False, reset_db=True):
def _start_tornado():
http_server = tornado.httpserver.HTTPServer(
tornado.wsgi.WSGIContainer(app))
http_server.listen(port)
tornado.ioloop.IOLoop.instance().start()
# Initialize the web server
logging.getLogger().setLevel(logging.INFO)
try:
app.server_ip_address = socket.gethostbyname(socket.gethostname())
app.port = port
except:
app.server_ip_address = '127.0.0.1'
app.port = port
url = 'http://%s:%s' % (app.server_ip_address, app.port)
print('[web] Tornado server starting at %s' % (url,))
if browser:
import webbrowser
webbrowser.open(url)
# Blocking
_start_tornado()
# if blocking:
# _start_tornado()
# else:
# import threading
# threading.Thread(target=_start_tornado).start()
def start_from_terminal():
'''
Parse command line options and start the server.
'''
parser = optparse.OptionParser()
parser.add_option(
'-p', '--port',
help='which port to serve content on',
type='int', default=DEFAULT_PORT)
parser.add_option(
'--db',
help='specify an IBEIS database',
type='str', default='testdb0')
opts, args = parser.parse_args()
app.ibs = ibeis.opendb(db=opts.db)
print('[web] Pre-computing all image thumbnails...')
app.ibs.compute_all_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
app.ibs.compute_all_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
app.ibs.compute_all_chips()
start_tornado(app, opts.port)
def start_from_ibeis(ibs, port=DEFAULT_PORT):
'''
Parse command line options and start the server.
'''
dbname = ibs.get_dbname()
if dbname == 'CHTA_Master':
app.default_species = Species.CHEETAH
elif dbname == 'ELPH_Master':
app.default_species = Species.ELEPHANT_SAV
elif dbname == 'GIR_Master':
app.default_species = Species.GIRAFFE
elif dbname == 'GZ_Master':
app.default_species = Species.ZEB_GREVY
elif dbname == 'LION_Master':
app.default_species = Species.LION
elif dbname == 'PZ_Master':
app.default_species = Species.ZEB_PLAIN
elif dbname == 'WD_Master':
app.default_species = Species.WILDDOG
elif 'NNP_' in dbname:
app.default_species = Species.ZEB_PLAIN
elif 'GZC' in dbname:
app.default_species = Species.ZEB_PLAIN
else:
app.default_species = None
print('[web] DEFAULT SPECIES: %r' % (app.default_species))
app.ibs = ibs
print('[web] Pre-computing all image thumbnails (with annots)...')
app.ibs.compute_all_thumbs()
print('[web] Pre-computing all image thumbnails (without annots)...')
app.ibs.compute_all_thumbs(draw_annots=False)
print('[web] Pre-computing all annotation chips...')
app.ibs.compute_all_chips()
start_tornado(app, port)
if __name__ == '__main__':
start_from_terminal()
|
Fixed documentation for ensure_necessary_log_dirs
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ***** BEGIN LICENSE BLOCK *****
# Copyright (C) 2012-2014, Hayaki Saito
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# ***** END LICENSE BLOCK *****
import tff
import thread
import logging
lock = thread.allocate_lock()
_CPR_NONE = 0
_CPR_ANSI = 1
_CPR_DEC = 2
def _param_generator(params, minimum=0, offset=0, minarg=1):
param = 0
for c in params:
if c < 0x3a:
param = param * 10 + c - 0x30
elif c < 0x3c:
param += offset
if minimum > param:
yield minimum
else:
yield param
minarg -= 1
param = 0
param += offset
if minimum > param:
yield minimum
else:
yield param
minarg -= 1
if minarg > 0:
yield minimum
def _parse_params(params, minimum=0, offset=0, minarg=1):
return [param for param in _param_generator(params, minimum, offset, minarg)]
import re
import os
import select
_dimension_pattern = re.compile('\033\[([0-9]+);([0-9]+)R|\033\[8;([0-9]+);([0-9]+)t')
def _getdimension_dtterm(stdin, stdout):
data = ""
for i in xrange(0, 2):
stdout.write("\x1b[18t")
stdout.flush()
fd = stdin.fileno()
rfd, wfd, xfd = select.select([fd], [], [], 0.1)
if rfd:
data += os.read(fd, 1024)
m = _dimension_pattern.match(data)
if m is None:
continue
m1, m2, m3, m4 = m.groups()
if m3 and m4:
row = int(m3)
col = int(m4)
return row, col
return None
def _getposition(stdin, stdout):
data = ""
for i in xrange(0, 2):
stdout.write("\x1b[6n")
stdout.flush()
fd = stdin.fileno()
rfd, wfd, xfd = select.select([fd], [], [], 0.1)
if rfd:
data += os.read(fd, 1024)
m = _dimension_pattern.match(data)
if m is None:
continue
m1, m2, m3, m4 = m.groups()
if m3 and m4:
row = int(m3)
col = int(m4)
return row, col
if m1 and m2:
row = int(m1)
col = int(m2)
return row, col
return None
def _get_pos_and_size(stdin, stdout):
import termios
stdin_fileno = stdin.fileno()
vdisable = os.fpathconf(stdin_fileno, 'PC_VDISABLE')
backup = termios.tcgetattr(stdin_fileno)
new = termios.tcgetattr(stdin_fileno)
new[3] &= ~(termios.ECHO | termios.ICANON)
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(stdin_fileno, termios.TCSAFLUSH, new)
try:
position = _getposition(stdin, stdout)
if position:
y, x = position
try:
dimension = _getdimension_dtterm(stdin, stdout)
if not dimension:
stdout.write("\x1b[9999;9999H")
dimension = _getposition(stdin, stdout)
if dimension:
row, col = dimension
return (row, col, y, x)
finally:
stdout.write("\033[%d;%dH" % (y, x))
finally:
termios.tcsetattr(stdin_fileno, termios.TCSAFLUSH, backup)
return None
def _generate_mock_parser(screen):
import StringIO
import tff
canossa = Canossa(screen=screen, resized=False)
outputcontext = tff.ParseContext(output=StringIO.StringIO(), handler=canossa, buffering=False)
parser = tff.DefaultParser()
parser.init(outputcontext)
return parser
def _pack(s):
result = 0
for c in s:
result = result << 8 | ord(c)
return result
class CSIHandlerTrait():
def __init__(self):
self._csi_map = {
_pack('m'): self._handle_sgr,
_pack('H'): self._handle_cup,
_pack('h'): self._handle_sm,
_pack('l'): self._handle_rm,
_pack('?h'): self._handle_decset,
_pack('?l'): self._handle_decrst,
_pack('?s'): self._handle_xtsave,
_pack('?r'): self._handle_xtrest,
_pack('K'): self._handle_el,
_pack('J'): self._handle_ed,
_pack('G'): self._handle_cha,
_pack('@'): self._handle_ich,
_pack('A'): self._handle_cuu,
_pack('B'): self._handle_cud,
_pack('C'): self._handle_cuf,
_pack('D'): self._handle_cub,
_pack('L'): self._handle_il,
_pack('M'): self._handle_dl,
_pack('P'): self._handle_dch,
_pack('>c'): self._handle_da2,
_pack('d'): self._handle_vpa,
_pack('f'): self._handle_hvp,
_pack('g'): self._handle_tbc,
_pack('n'): self._handle_dsr,
_pack('?n'): self._handle_decdsr,
_pack('r'): self._handle_decstbm,
_pack('?$p'): self._handle_decrqm_dec,
_pack('$p'): self._handle_decrqm_ansi,
_pack('"p'): self._handle_decscl,
_pack('!p'): self._handle_decstr,
_pack('x'): self._handle_decreqtparm,
}
def _handle_sgr(self, context, parameter):
"""
SGR - Select Graphics Rendition
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[1;4;45mabc\x1b[mdef')
"""
if not parameter:
self.screen.reset_sgr()
else:
params = _param_generator(parameter)
self.screen.sgr(params)
return True
def _handle_cup(self, context, parameter):
"""
CUP - Cursor Position
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;5H')
>>> screen.getyx()
(4, 4)
>>> parser.parse('\x1b[H')
>>> screen.getyx()
(0, 0)
>>> parser.parse('\x1b[4H')
>>> screen.getyx()
(3, 0)
>>> parser.parse('\x1b[5;H')
>>> screen.getyx()
(4, 0)
>>> parser.parse('\x1b[;5H')
>>> screen.getyx()
(0, 4)
"""
row, col = _param_generator(parameter, offset=-1, minarg=2)
self.screen.cup(row, col)
return True
def _handle_sm(self, context, parameter):
"""
SM - Set Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[1h')
"""
return not self._visibility
def _handle_rm(self, context, parameter):
"""
RM - Reset Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[1l')
"""
return not self._visibility
def _handle_decset(self, context, parameter):
"""
DECSET - DEC Specific Set Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?1h')
"""
params = _param_generator(parameter)
self.screen.decset(params)
return not self._visibility
def _handle_decrst(self, context, parameter):
"""
DECRST - DEC Specific Reset Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?1l')
"""
params = _param_generator(parameter)
self.screen.decrst(params)
return not self._visibility
def _handle_xtsave(self, context, parameter):
"""
XTSAVE(DEC) - Save DEC Specific Modes
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?1s')
"""
params = _parse_params(parameter)
self.screen.xt_save(params)
return not self._visibility
def _handle_xtrest(self, context, parameter):
"""
XTREST(DEC) - Restore DEC Specific Modes
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?1r')
"""
params = _parse_params(parameter)
self.screen.xt_rest(params)
return not self._visibility
def _handle_el(self, context, parameter):
"""
EL - Erase Line(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> screen.resize(4, 10)
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[H')
>>> parser.parse('1234567890')
>>> parser.parse('\x1b[1;5H')
>>> parser.parse('\x1b[K')
>>> print screen.lines[0]
<ESC>[0m1234<SP><SP><SP><SP><SP><SP>
>>> parser.parse('\x1b[1;2H')
>>> parser.parse('\x1b[1K')
>>> print screen.lines[0]
<ESC>[0m<SP>234<SP><SP><SP><SP><SP><SP>
>>> parser.parse('\x1b[2K')
>>> print screen.lines[0]
<ESC>[0m<SP><SP><SP><SP><SP><SP><SP><SP><SP><SP>
"""
ps = _parse_params(parameter)[0]
self.screen.el(ps)
return True
def _handle_ed(self, context, parameter):
"""
ED - Erase Display
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[J')
"""
ps = _parse_params(parameter)[0]
self.screen.ed(ps)
return True
def _handle_cha(self, context, parameter):
"""
CHA - Cursor Horizontal Absolute
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7G')
>>> screen.getyx()
(0, 6)
"""
ps = _parse_params(parameter, offset=-1, minimum=1)[0]
self.screen.cha(ps)
return True
def _handle_ich(self, context, parameter):
"""
ICH - Insert Blank Character(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7@')
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.ich(ps)
return True
def _handle_cuu(self, context, parameter):
"""
CUU - Cursor Up
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[10;10H')
>>> screen.getyx()
(9, 9)
>>> parser.parse('\x1b[A')
>>> screen.getyx()
(8, 9)
>>> parser.parse('\x1b[3A')
>>> screen.getyx()
(5, 9)
>>> parser.parse('\x1b[10A')
>>> screen.getyx()
(0, 9)
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.cuu(ps)
return True
def _handle_cud(self, context, parameter):
"""
CUD - Cursor Down
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;10H')
>>> screen.getyx()
(4, 9)
>>> parser.parse('\x1b[B')
>>> screen.getyx()
(5, 9)
>>> parser.parse('\x1b[4B')
>>> screen.getyx()
(9, 9)
>>> parser.parse('\x1b[40B')
>>> screen.getyx()
(23, 9)
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.cud(ps)
return True
def _handle_cuf(self, context, parameter):
"""
CUF - Cursor Forward
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;10H')
>>> screen.getyx()
(4, 9)
>>> parser.parse('\x1b[C')
>>> screen.getyx()
(4, 10)
>>> parser.parse('\x1b[4C')
>>> screen.getyx()
(4, 14)
>>> parser.parse('\x1b[100C')
>>> screen.getyx()
(4, 79)
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.cuf(ps)
return True
def _handle_cub(self, context, parameter):
"""
CUB - Cursor Backward
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;10H')
>>> screen.getyx()
(4, 9)
>>> parser.parse('\x1b[D')
>>> screen.getyx()
(4, 8)
>>> parser.parse('\x1b[3D')
>>> screen.getyx()
(4, 5)
>>> parser.parse('\x1b[10D')
>>> screen.getyx()
(4, 0)
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.cub(ps)
return True
def _handle_il(self, context, parameter):
"""
IL - Insert Line(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7L')
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.il(ps)
return True
def _handle_dl(self, context, parameter):
"""
DL - Delete Line(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7M')
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.dl(ps)
return True
def _handle_dch(self, context, parameter):
"""
DCH - Delete Char(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7P')
"""
#ps = _parse_params(parameter, minimum=1)[0]
#self.screen.dch(ps)
return True
def _handle_da2(self, context, parameter):
"""
DA2 - Secondary Device Attributes
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[>c')
"""
return False
def _handle_vpa(self, context, parameter):
"""
VPA - Vertical Position Absolute
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[4;4H')
>>> screen.getyx()
(3, 3)
>>> parser.parse('\x1b[d')
>>> screen.getyx()
(0, 3)
>>> parser.parse('\x1b[6d')
>>> screen.getyx()
(5, 3)
>>> parser.parse('\x1b[100d')
>>> screen.getyx()
(23, 3)
"""
ps = _parse_params(parameter, offset=-1)[0]
self.screen.vpa(ps)
return True
def _handle_hvp(self, context, parameter):
"""
HVP - Horizontal and Vertical Position
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;5f')
>>> screen.getyx()
(4, 4)
>>> parser.parse('\x1b[f')
>>> screen.getyx()
(0, 0)
>>> parser.parse('\x1b[4f')
>>> screen.getyx()
(3, 0)
>>> parser.parse('\x1b[5;f')
>>> screen.getyx()
(4, 0)
>>> parser.parse('\x1b[;5f')
>>> screen.getyx()
(0, 4)
"""
row, col = _parse_params(parameter, offset=-1, minarg=2)
self.screen.hvp(row, col)
return True
def _handle_tbc(self, context, parameter):
"""
TBC - Tabstop Clear
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> screen.cursor.col
0
>>> screen._tabstop
[0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80]
>>> parser.parse('\x1b[g')
>>> screen._tabstop
[8, 16, 24, 32, 40, 48, 56, 64, 72, 80]
>>> parser.parse('\x1b[1;9H\x1b[0g')
>>> screen._tabstop
[16, 24, 32, 40, 48, 56, 64, 72, 80]
>>> parser.parse('\x1b[3g')
>>> screen._tabstop
[]
"""
ps = _parse_params(parameter)[0]
self.screen.tbc(ps)
return not self._visibility
def _handle_dsr(self, context, parameter):
"""
DSR - Device Status Request
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[6n')
>>> parser.parse('\x1b[n')
"""
if len(parameter) == 1:
if parameter[0] == 0x36: # 6
y, x = self.screen.getyx()
context.puts("\x1b[%d;%dR" % (y + 1, x + 1))
return True
return not self._visibility
def _handle_decdsr(self, context, parameter):
"""
DECDSR - DEC Specific Device Status Request
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?6n')
>>> parser.parse('\x1b[6n')
"""
if len(parameter) == 2:
if parameter[1] == 0x36: # ?6
y, x = self.screen.getyx()
context.puts("\x1b[?%d;%dR" % (y + 1, x + 1))
return True
return not self._visibility
def _handle_decstbm(self, context, parameter):
"""
DECSTBM - Set Top and Bottom Margin
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[r')
>>> screen.getyx()
(0, 0)
>>> parser.parse('\x1b[4;6r')
>>> screen.getyx()
(3, 0)
"""
if parameter:
top, bottom = _parse_params(parameter, offset=-1, minarg=2)
self.screen.decstbm(top, bottom)
else:
top, bottom = 0, self.screen.height - 1
self.screen.decstbm(top, bottom)
return True
def _handle_decrqm_ansi(self, context, parameter):
"""
DECRQM(ANSI) - Request ANSI Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[$p')
"""
return not self._visibility
def _handle_decrqm_dec(self, context, parameter):
"""
DECRQM(DEC) - Request DEC Private Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?$p')
"""
return not self._visibility
def _handle_decscl(self, context, parameter):
"""
DECSCL - Set Conformance Level
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b["p')
"""
return not self._visibility
def _handle_decstr(self, context, parameter):
"""
DECSTR - Soft Reset
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[!p')
"""
self.screen.decstr()
return True
def _handle_decreqtparm(self, context, parameter):
"""
DECREQTPARM - Request terminal parameters
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[x')
"""
return not self._visibility
def dispatch_csi(self, context, parameter, intermediate, final):
if parameter and parameter[0] > 0x3b:
key = parameter[0]
else:
key = 0
key = reduce(lambda acc, x: acc << 8 | x, intermediate, key)
key = key << 8 | final
f = self._csi_map[key]
return f(context, parameter)
class ESCHandlerTrait():
def __init__(self):
self._esc_map = {
_pack('7'): self._esc_cursorsave,
_pack('8'): self._esc_cursorrestore,
_pack('D'): self._esc_ind,
_pack('E'): self._esc_nel,
_pack('H'): self._esc_hts,
_pack('M'): self._esc_ri,
_pack('c'): self._esc_ris,
_pack('#3'): self._esc_decdhlt,
_pack('#4'): self._esc_decdhlb,
_pack('#5'): self._esc_decswl,
_pack('#6'): self._esc_decdwl,
_pack('#8'): self._esc_decaln,
}
def _esc_cursorsave(self):
"""
DECSC - Save Cursor
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b7')
"""
self.screen.cursor.save()
return True
def _esc_cursorrestore(self):
"""
DECRC - Restore Cursor
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b8')
"""
self.screen.cursor.save()
return True
def _esc_ind(self):
"""
IND - Index
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bD')
"""
self.screen.ind()
return True
def _esc_nel(self):
"""
NEL - Next Line
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bE')
"""
self.screen.nel()
return True
def _esc_hts(self):
"""
HTS - Horizontal Tab Set
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bH')
"""
self.screen.hts()
return True
def _esc_ri(self):
"""
RI - Reverse Index
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bM')
"""
self.screen.ri()
return True
def _esc_ris(self):
"""
RIS - Hard Terminal Reset
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bc')
"""
self.screen.ris()
return not self._visibility # pass through
def _esc_decdhlt(self):
"""
DECDHLT - Double Height Line (Top part)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#3')
"""
self.screen.decdhlt()
return True
def _esc_decdhlb(self):
"""
DECDHLB - Double Height Line (Bottom part)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#4')
"""
self.screen.decdhlb()
return True
def _esc_decswl(self):
"""
DECSWL - Single Width Line
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#5')
"""
self.screen.decswl()
return True
def _esc_decdwl(self):
"""
DECDWL - Double Width Line
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#6')
"""
self.screen.decdwl()
return True
def _esc_decaln(self):
"""
DECALN - Screen Alignment Pattern
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#8')
"""
self.screen.decaln()
return True
def dispatch_esc(self, context, intermediate, final):
key = reduce(lambda acc, x: acc << 8 | x, intermediate, 0)
key = key << 8 | final
#elif intermediate == [0x28]: # (
# self.screen.set_g0(final)
# return True
#elif intermediate == [0x29]: # )
# self.screen.set_g1(final)
# return True
f = self._esc_map[key]
return f()
class Canossa(tff.DefaultHandler,
CSIHandlerTrait,
ESCHandlerTrait):
__cpr = False
def __init__(self,
screen=None,
termenc="UTF-8",
termprop=None,
visibility=False,
resized=True):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> screen.getyx()
(0, 0)
>>> canossa = Canossa(screen=screen, resized=False)
"""
if screen:
self.screen = screen
else:
import sys
from screen import Screen
# make screen
# get current position
result = _get_pos_and_size(sys.stdin, sys.stdout)
if result:
row, col, y, x = result
self.screen = Screen(row, col, y, x, termenc, termprop)
self._visibility = visibility
self.__cpr = False
self._resized = resized
CSIHandlerTrait.__init__(self)
ESCHandlerTrait.__init__(self)
def handle_csi(self, context, parameter, intermediate, final):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
"""
if self._resized:
self._resized = False
self.screen.adjust_cursor()
try:
return self.dispatch_csi(context, parameter, intermediate, final)
except Exception, e:
mnemonic = '[%s, %s, %s]' % (repr(parameter), repr(intermediate), chr(final))
logging.error("handle_csi: %s" % mnemonic)
logging.error("handle_csi: %s" % e)
return True
def handle_esc(self, context, intermediate, final):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
"""
if self._resized:
self._resized = False
self.screen.adjust_cursor()
try:
self.dispatch_esc(context, intermediate, final)
except Exception, e:
mnemonic = '[%s, %s]' % (repr(intermediate), chr(final))
logging.error("handle_esc: %s" % mnemonic)
logging.error("handle_esc: %s" % e)
return True
def handle_control_string(self, context, prefix, value):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b]0;abcde\\x1b\\\\')
>>> parser.parse('\x1b]2;abcde\\x1b\\\\')
>>> parser.parse('\x1b]Pq\\x1b\\\\')
"""
if prefix == 0x5d: # ']'
try:
pos = value.index(0x3b)
except ValueError:
return False
if pos == -1:
return False
elif pos == 0:
num = [0]
else:
try:
num = value[:pos]
except:
num = None
if num:
if num[0] == 0x30 or num[0] == 0x32:
arg = value[pos + 1:]
self.screen.settitle(u''.join([unichr(x) for x in arg]))
s = self.screen.gettitle()
if s:
value = num + [0x3b] + [ord(x) for x in s]
new_title = u"".join([unichr(c) for c in value])
#context.putu(u"\x1b]%s\x1b\\" % new_title)
return True
return False
def handle_char(self, context, c):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('abc\\x07def\\x0a\\x0d\\x0c\\x0e\\x0f')
"""
if self._resized:
self._resized = False
self.screen.adjust_cursor()
screen = self.screen
if c <= 0x20:
if c == 0x20: # SP
screen.sp()
elif c == 0x0a: # NL
screen.lf()
elif c == 0x0d: # CR
screen.cr()
elif c == 0x09: # HT
screen.ht()
elif c == 0x08: # BS
screen.bs()
elif c == 0x00: # NUL
pass
elif c == 0x05: # ENQ
return not self._visibility
elif c == 0x07: # BEL
pass
elif c == 0x0b: # VT
screen.lf()
elif c == 0x0c: # FF
screen.lf()
elif c == 0x0e: # SO
screen.so()
return True
elif c == 0x0f: # SI
screen.si()
return True
else:
screen.write(c)
return True
def handle_draw(self, context):
if self._visibility:
self.screen.drawall(context)
#if self.__cpr != _CPR_NONE:
# if self.__cpr == _CPR_ANSI:
# self.__cpr = _CPR_NONE
# context.puts("\x1b[6n")
# elif self.__cpr == _CPR_DEC:
# self.__cpr = _CPR_NONE
# context.puts("\x1b[?6n")
def handle_resize(self, context, row, col):
lock.acquire()
#self._resized = True
screen = self.screen
try:
screen.resize(row, col)
screen.adjust_cursor()
finally:
lock.release()
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
test()
Improve drawing
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# ***** BEGIN LICENSE BLOCK *****
# Copyright (C) 2012-2014, Hayaki Saito
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
#
# ***** END LICENSE BLOCK *****
import tff
import thread
import logging
lock = thread.allocate_lock()
_CPR_NONE = 0
_CPR_ANSI = 1
_CPR_DEC = 2
def _param_generator(params, minimum=0, offset=0, minarg=1):
param = 0
for c in params:
if c < 0x3a:
param = param * 10 + c - 0x30
elif c < 0x3c:
param += offset
if minimum > param:
yield minimum
else:
yield param
minarg -= 1
param = 0
param += offset
if minimum > param:
yield minimum
else:
yield param
minarg -= 1
if minarg > 0:
yield minimum
def _parse_params(params, minimum=0, offset=0, minarg=1):
return [param for param in _param_generator(params, minimum, offset, minarg)]
import re
import os
import select
_dimension_pattern = re.compile('\033\[([0-9]+);([0-9]+)R|\033\[8;([0-9]+);([0-9]+)t')
def _getdimension_dtterm(stdin, stdout):
data = ""
for i in xrange(0, 2):
stdout.write("\x1b[18t")
stdout.flush()
fd = stdin.fileno()
rfd, wfd, xfd = select.select([fd], [], [], 0.1)
if rfd:
data += os.read(fd, 1024)
m = _dimension_pattern.match(data)
if m is None:
continue
m1, m2, m3, m4 = m.groups()
if m3 and m4:
row = int(m3)
col = int(m4)
return row, col
return None
def _getposition(stdin, stdout):
data = ""
for i in xrange(0, 2):
stdout.write("\x1b[6n")
stdout.flush()
fd = stdin.fileno()
rfd, wfd, xfd = select.select([fd], [], [], 0.1)
if rfd:
data += os.read(fd, 1024)
m = _dimension_pattern.match(data)
if m is None:
continue
m1, m2, m3, m4 = m.groups()
if m3 and m4:
row = int(m3)
col = int(m4)
return row, col
if m1 and m2:
row = int(m1)
col = int(m2)
return row, col
return None
def _get_pos_and_size(stdin, stdout):
import termios
stdin_fileno = stdin.fileno()
vdisable = os.fpathconf(stdin_fileno, 'PC_VDISABLE')
backup = termios.tcgetattr(stdin_fileno)
new = termios.tcgetattr(stdin_fileno)
new[3] &= ~(termios.ECHO | termios.ICANON)
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(stdin_fileno, termios.TCSAFLUSH, new)
try:
position = _getposition(stdin, stdout)
if position:
y, x = position
try:
dimension = _getdimension_dtterm(stdin, stdout)
if not dimension:
stdout.write("\x1b[9999;9999H")
dimension = _getposition(stdin, stdout)
if dimension:
row, col = dimension
return (row, col, y, x)
finally:
stdout.write("\033[%d;%dH" % (y, x))
finally:
termios.tcsetattr(stdin_fileno, termios.TCSAFLUSH, backup)
return None
def _generate_mock_parser(screen):
import StringIO
import tff
canossa = Canossa(screen=screen, resized=False)
outputcontext = tff.ParseContext(output=StringIO.StringIO(), handler=canossa, buffering=False)
parser = tff.DefaultParser()
parser.init(outputcontext)
return parser
def _pack(s):
result = 0
for c in s:
result = result << 8 | ord(c)
return result
class CSIHandlerTrait():
def __init__(self):
self._csi_map = {
_pack('m'): self._handle_sgr,
_pack('H'): self._handle_cup,
_pack('h'): self._handle_sm,
_pack('l'): self._handle_rm,
_pack('?h'): self._handle_decset,
_pack('?l'): self._handle_decrst,
_pack('?s'): self._handle_xtsave,
_pack('?r'): self._handle_xtrest,
_pack('K'): self._handle_el,
_pack('J'): self._handle_ed,
_pack('G'): self._handle_cha,
_pack('@'): self._handle_ich,
_pack('A'): self._handle_cuu,
_pack('B'): self._handle_cud,
_pack('C'): self._handle_cuf,
_pack('D'): self._handle_cub,
_pack('L'): self._handle_il,
_pack('M'): self._handle_dl,
_pack('P'): self._handle_dch,
_pack('>c'): self._handle_da2,
_pack('d'): self._handle_vpa,
_pack('f'): self._handle_hvp,
_pack('g'): self._handle_tbc,
_pack('n'): self._handle_dsr,
_pack('?n'): self._handle_decdsr,
_pack('r'): self._handle_decstbm,
_pack('?$p'): self._handle_decrqm_dec,
_pack('$p'): self._handle_decrqm_ansi,
_pack('"p'): self._handle_decscl,
_pack('!p'): self._handle_decstr,
_pack('x'): self._handle_decreqtparm,
}
def _handle_sgr(self, context, parameter):
"""
SGR - Select Graphics Rendition
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[1;4;45mabc\x1b[mdef')
"""
if not parameter:
self.screen.reset_sgr()
else:
params = _param_generator(parameter)
self.screen.sgr(params)
return True
def _handle_cup(self, context, parameter):
"""
CUP - Cursor Position
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;5H')
>>> screen.getyx()
(4, 4)
>>> parser.parse('\x1b[H')
>>> screen.getyx()
(0, 0)
>>> parser.parse('\x1b[4H')
>>> screen.getyx()
(3, 0)
>>> parser.parse('\x1b[5;H')
>>> screen.getyx()
(4, 0)
>>> parser.parse('\x1b[;5H')
>>> screen.getyx()
(0, 4)
"""
row, col = _param_generator(parameter, offset=-1, minarg=2)
self.screen.cup(row, col)
return True
def _handle_sm(self, context, parameter):
"""
SM - Set Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[1h')
"""
return not self._visibility
def _handle_rm(self, context, parameter):
"""
RM - Reset Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[1l')
"""
return not self._visibility
def _handle_decset(self, context, parameter):
"""
DECSET - DEC Specific Set Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?1h')
"""
params = _param_generator(parameter)
self.screen.decset(params)
return not self._visibility
def _handle_decrst(self, context, parameter):
"""
DECRST - DEC Specific Reset Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?1l')
"""
params = _param_generator(parameter)
self.screen.decrst(params)
return not self._visibility
def _handle_xtsave(self, context, parameter):
"""
XTSAVE(DEC) - Save DEC Specific Modes
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?1s')
"""
params = _parse_params(parameter)
self.screen.xt_save(params)
return not self._visibility
def _handle_xtrest(self, context, parameter):
"""
XTREST(DEC) - Restore DEC Specific Modes
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?1r')
"""
params = _parse_params(parameter)
self.screen.xt_rest(params)
return not self._visibility
def _handle_el(self, context, parameter):
"""
EL - Erase Line(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> screen.resize(4, 10)
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[H')
>>> parser.parse('1234567890')
>>> parser.parse('\x1b[1;5H')
>>> parser.parse('\x1b[K')
>>> print screen.lines[0]
<ESC>[0m1234<SP><SP><SP><SP><SP><SP>
>>> parser.parse('\x1b[1;2H')
>>> parser.parse('\x1b[1K')
>>> print screen.lines[0]
<ESC>[0m<SP>234<SP><SP><SP><SP><SP><SP>
>>> parser.parse('\x1b[2K')
>>> print screen.lines[0]
<ESC>[0m<SP><SP><SP><SP><SP><SP><SP><SP><SP><SP>
"""
ps = _parse_params(parameter)[0]
self.screen.el(ps)
return True
def _handle_ed(self, context, parameter):
"""
ED - Erase Display
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[J')
"""
ps = _parse_params(parameter)[0]
self.screen.ed(ps)
return True
def _handle_cha(self, context, parameter):
"""
CHA - Cursor Horizontal Absolute
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7G')
>>> screen.getyx()
(0, 6)
"""
ps = _parse_params(parameter, offset=-1, minimum=1)[0]
self.screen.cha(ps)
return True
def _handle_ich(self, context, parameter):
"""
ICH - Insert Blank Character(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7@')
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.ich(ps)
return True
def _handle_cuu(self, context, parameter):
"""
CUU - Cursor Up
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[10;10H')
>>> screen.getyx()
(9, 9)
>>> parser.parse('\x1b[A')
>>> screen.getyx()
(8, 9)
>>> parser.parse('\x1b[3A')
>>> screen.getyx()
(5, 9)
>>> parser.parse('\x1b[10A')
>>> screen.getyx()
(0, 9)
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.cuu(ps)
return True
def _handle_cud(self, context, parameter):
"""
CUD - Cursor Down
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;10H')
>>> screen.getyx()
(4, 9)
>>> parser.parse('\x1b[B')
>>> screen.getyx()
(5, 9)
>>> parser.parse('\x1b[4B')
>>> screen.getyx()
(9, 9)
>>> parser.parse('\x1b[40B')
>>> screen.getyx()
(23, 9)
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.cud(ps)
return True
def _handle_cuf(self, context, parameter):
"""
CUF - Cursor Forward
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;10H')
>>> screen.getyx()
(4, 9)
>>> parser.parse('\x1b[C')
>>> screen.getyx()
(4, 10)
>>> parser.parse('\x1b[4C')
>>> screen.getyx()
(4, 14)
>>> parser.parse('\x1b[100C')
>>> screen.getyx()
(4, 79)
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.cuf(ps)
return True
def _handle_cub(self, context, parameter):
"""
CUB - Cursor Backward
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;10H')
>>> screen.getyx()
(4, 9)
>>> parser.parse('\x1b[D')
>>> screen.getyx()
(4, 8)
>>> parser.parse('\x1b[3D')
>>> screen.getyx()
(4, 5)
>>> parser.parse('\x1b[10D')
>>> screen.getyx()
(4, 0)
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.cub(ps)
return True
def _handle_il(self, context, parameter):
"""
IL - Insert Line(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7L')
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.il(ps)
return True
def _handle_dl(self, context, parameter):
"""
DL - Delete Line(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7M')
"""
ps = _parse_params(parameter, minimum=1)[0]
self.screen.dl(ps)
return True
def _handle_dch(self, context, parameter):
"""
DCH - Delete Char(s)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[7P')
"""
#ps = _parse_params(parameter, minimum=1)[0]
#self.screen.dch(ps)
return True
def _handle_da2(self, context, parameter):
"""
DA2 - Secondary Device Attributes
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[>c')
"""
return False
def _handle_vpa(self, context, parameter):
"""
VPA - Vertical Position Absolute
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[4;4H')
>>> screen.getyx()
(3, 3)
>>> parser.parse('\x1b[d')
>>> screen.getyx()
(0, 3)
>>> parser.parse('\x1b[6d')
>>> screen.getyx()
(5, 3)
>>> parser.parse('\x1b[100d')
>>> screen.getyx()
(23, 3)
"""
ps = _parse_params(parameter, offset=-1)[0]
self.screen.vpa(ps)
return True
def _handle_hvp(self, context, parameter):
"""
HVP - Horizontal and Vertical Position
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[5;5f')
>>> screen.getyx()
(4, 4)
>>> parser.parse('\x1b[f')
>>> screen.getyx()
(0, 0)
>>> parser.parse('\x1b[4f')
>>> screen.getyx()
(3, 0)
>>> parser.parse('\x1b[5;f')
>>> screen.getyx()
(4, 0)
>>> parser.parse('\x1b[;5f')
>>> screen.getyx()
(0, 4)
"""
row, col = _parse_params(parameter, offset=-1, minarg=2)
self.screen.hvp(row, col)
return True
def _handle_tbc(self, context, parameter):
"""
TBC - Tabstop Clear
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> screen.cursor.col
0
>>> screen._tabstop
[0, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80]
>>> parser.parse('\x1b[g')
>>> screen._tabstop
[8, 16, 24, 32, 40, 48, 56, 64, 72, 80]
>>> parser.parse('\x1b[1;9H\x1b[0g')
>>> screen._tabstop
[16, 24, 32, 40, 48, 56, 64, 72, 80]
>>> parser.parse('\x1b[3g')
>>> screen._tabstop
[]
"""
ps = _parse_params(parameter)[0]
self.screen.tbc(ps)
return not self._visibility
def _handle_dsr(self, context, parameter):
"""
DSR - Device Status Request
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[6n')
>>> parser.parse('\x1b[n')
"""
if len(parameter) == 1:
if parameter[0] == 0x36: # 6
y, x = self.screen.getyx()
context.puts("\x1b[%d;%dR" % (y + 1, x + 1))
return True
return not self._visibility
def _handle_decdsr(self, context, parameter):
"""
DECDSR - DEC Specific Device Status Request
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?6n')
>>> parser.parse('\x1b[6n')
"""
if len(parameter) == 2:
if parameter[1] == 0x36: # ?6
y, x = self.screen.getyx()
context.puts("\x1b[?%d;%dR" % (y + 1, x + 1))
return True
return not self._visibility
def _handle_decstbm(self, context, parameter):
"""
DECSTBM - Set Top and Bottom Margin
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[r')
>>> screen.getyx()
(0, 0)
>>> parser.parse('\x1b[4;6r')
>>> screen.getyx()
(3, 0)
"""
if parameter:
top, bottom = _parse_params(parameter, offset=-1, minarg=2)
self.screen.decstbm(top, bottom)
else:
top, bottom = 0, self.screen.height - 1
self.screen.decstbm(top, bottom)
return True
def _handle_decrqm_ansi(self, context, parameter):
"""
DECRQM(ANSI) - Request ANSI Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[$p')
"""
return not self._visibility
def _handle_decrqm_dec(self, context, parameter):
"""
DECRQM(DEC) - Request DEC Private Mode
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[?$p')
"""
return not self._visibility
def _handle_decscl(self, context, parameter):
"""
DECSCL - Set Conformance Level
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b["p')
"""
return not self._visibility
def _handle_decstr(self, context, parameter):
"""
DECSTR - Soft Reset
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[!p')
"""
self.screen.decstr()
return True
def _handle_decreqtparm(self, context, parameter):
"""
DECREQTPARM - Request terminal parameters
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b[x')
"""
return not self._visibility
def dispatch_csi(self, context, parameter, intermediate, final):
if parameter and parameter[0] > 0x3b:
key = parameter[0]
else:
key = 0
key = reduce(lambda acc, x: acc << 8 | x, intermediate, key)
key = key << 8 | final
f = self._csi_map[key]
return f(context, parameter)
class ESCHandlerTrait():
def __init__(self):
self._esc_map = {
_pack('7'): self._esc_cursorsave,
_pack('8'): self._esc_cursorrestore,
_pack('D'): self._esc_ind,
_pack('E'): self._esc_nel,
_pack('H'): self._esc_hts,
_pack('M'): self._esc_ri,
_pack('c'): self._esc_ris,
_pack('#3'): self._esc_decdhlt,
_pack('#4'): self._esc_decdhlb,
_pack('#5'): self._esc_decswl,
_pack('#6'): self._esc_decdwl,
_pack('#8'): self._esc_decaln,
}
def _esc_cursorsave(self):
"""
DECSC - Save Cursor
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b7')
"""
self.screen.cursor.save()
return True
def _esc_cursorrestore(self):
"""
DECRC - Restore Cursor
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b8')
"""
self.screen.cursor.save()
return True
def _esc_ind(self):
"""
IND - Index
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bD')
"""
self.screen.ind()
return True
def _esc_nel(self):
"""
NEL - Next Line
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bE')
"""
self.screen.nel()
return True
def _esc_hts(self):
"""
HTS - Horizontal Tab Set
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bH')
"""
self.screen.hts()
return True
def _esc_ri(self):
"""
RI - Reverse Index
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bM')
"""
self.screen.ri()
return True
def _esc_ris(self):
"""
RIS - Hard Terminal Reset
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1bc')
"""
self.screen.ris()
return not self._visibility # pass through
def _esc_decdhlt(self):
"""
DECDHLT - Double Height Line (Top part)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#3')
"""
self.screen.decdhlt()
return True
def _esc_decdhlb(self):
"""
DECDHLB - Double Height Line (Bottom part)
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#4')
"""
self.screen.decdhlb()
return True
def _esc_decswl(self):
"""
DECSWL - Single Width Line
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#5')
"""
self.screen.decswl()
return True
def _esc_decdwl(self):
"""
DECDWL - Double Width Line
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#6')
"""
self.screen.decdwl()
return True
def _esc_decaln(self):
"""
DECALN - Screen Alignment Pattern
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b#8')
"""
self.screen.decaln()
return True
def dispatch_esc(self, context, intermediate, final):
key = reduce(lambda acc, x: acc << 8 | x, intermediate, 0)
key = key << 8 | final
#elif intermediate == [0x28]: # (
# self.screen.set_g0(final)
# return True
#elif intermediate == [0x29]: # )
# self.screen.set_g1(final)
# return True
f = self._esc_map[key]
return f()
class Canossa(tff.DefaultHandler,
CSIHandlerTrait,
ESCHandlerTrait):
__cpr = False
dirty = True
def __init__(self,
screen=None,
termenc="UTF-8",
termprop=None,
visibility=False,
resized=True):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> screen.getyx()
(0, 0)
>>> canossa = Canossa(screen=screen, resized=False)
"""
if screen:
self.screen = screen
else:
import sys
from screen import Screen
# make screen
# get current position
result = _get_pos_and_size(sys.stdin, sys.stdout)
if result:
row, col, y, x = result
self.screen = Screen(row, col, y, x, termenc, termprop)
self._visibility = visibility
self.__cpr = False
self._resized = resized
CSIHandlerTrait.__init__(self)
ESCHandlerTrait.__init__(self)
def handle_csi(self, context, parameter, intermediate, final):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
"""
if self._resized:
self._resized = False
self.screen.adjust_cursor()
try:
return self.dispatch_csi(context, parameter, intermediate, final)
except Exception, e:
mnemonic = '[%s, %s, %s]' % (repr(parameter), repr(intermediate), chr(final))
logging.error("handle_csi: %s" % mnemonic)
logging.error("handle_csi: %s" % e)
return True
def handle_esc(self, context, intermediate, final):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
"""
if self._resized:
self._resized = False
self.screen.adjust_cursor()
try:
self.dispatch_esc(context, intermediate, final)
except Exception, e:
mnemonic = '[%s, %s]' % (repr(intermediate), chr(final))
logging.error("handle_esc: %s" % mnemonic)
logging.error("handle_esc: %s" % e)
return True
def handle_control_string(self, context, prefix, value):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('\x1b]0;abcde\\x1b\\\\')
>>> parser.parse('\x1b]2;abcde\\x1b\\\\')
>>> parser.parse('\x1b]Pq\\x1b\\\\')
"""
if prefix == 0x5d: # ']'
try:
pos = value.index(0x3b)
except ValueError:
return False
if pos == -1:
return False
elif pos == 0:
num = [0]
else:
try:
num = value[:pos]
except:
num = None
if num:
if num[0] == 0x30 or num[0] == 0x32:
arg = value[pos + 1:]
self.screen.settitle(u''.join([unichr(x) for x in arg]))
s = self.screen.gettitle()
if s:
value = num + [0x3b] + [ord(x) for x in s]
new_title = u"".join([unichr(c) for c in value])
#context.putu(u"\x1b]%s\x1b\\" % new_title)
return True
return False
def handle_char(self, context, c):
"""
>>> from screen import MockScreenWithCursor
>>> screen = MockScreenWithCursor()
>>> parser = _generate_mock_parser(screen)
>>> parser.parse('abc\\x07def\\x0a\\x0d\\x0c\\x0e\\x0f')
"""
if self._resized:
self._resized = False
self.screen.adjust_cursor()
self.dirty = True
screen = self.screen
if c <= 0x20:
if c == 0x20: # SP
screen.sp()
elif c == 0x0a: # NL
screen.lf()
elif c == 0x0d: # CR
screen.cr()
elif c == 0x09: # HT
screen.ht()
elif c == 0x08: # BS
screen.bs()
elif c == 0x00: # NUL
pass
elif c == 0x05: # ENQ
return not self._visibility
elif c == 0x07: # BEL
pass
elif c == 0x0b: # VT
screen.lf()
elif c == 0x0c: # FF
screen.lf()
elif c == 0x0e: # SO
screen.so()
return True
elif c == 0x0f: # SI
screen.si()
return True
else:
screen.write(c)
return True
def handle_draw(self, context):
if self._visibility and self.dirty:
self.screen.drawall(context)
#if self.__cpr != _CPR_NONE:
# if self.__cpr == _CPR_ANSI:
# self.__cpr = _CPR_NONE
# context.puts("\x1b[6n")
# elif self.__cpr == _CPR_DEC:
# self.__cpr = _CPR_NONE
# context.puts("\x1b[?6n")
def handle_resize(self, context, row, col):
lock.acquire()
#self._resized = True
screen = self.screen
try:
screen.resize(row, col)
screen.adjust_cursor()
finally:
lock.release()
def test():
import doctest
doctest.testmod()
if __name__ == "__main__":
test()
|
import json
import random
from unittest.mock import MagicMock
import gevent
import pytest
from raiden.constants import UINT64_MAX
from raiden.messages import Processed, SecretRequest
from raiden.network.transport import MatrixTransport
from raiden.tests.utils.factories import HOP1, HOP1_KEY, UNIT_SECRETHASH
from raiden.tests.utils.transport import MockRaidenService
from raiden.transfer.queue_identifier import QueueIdentifier
from raiden.transfer.state_change import ActionUpdateTransportSyncToken
from raiden.utils.typing import Address, List, Optional, Union
USERID1 = '@Alice:Wonderland'
@pytest.fixture
def mock_matrix(
monkeypatch,
retry_interval,
retries_before_backoff,
local_matrix_server,
private_rooms,
):
from matrix_client.user import User
monkeypatch.setattr(User, 'get_display_name', lambda _: 'random_display_name')
def mock_get_user(klass, user: Union[User, str]) -> User:
return User(None, USERID1)
def mock_get_room_ids_for_address(
klass,
address: Address,
filter_private: bool=None,
) -> List[str]:
return ['!roomID:server']
def mock_set_room_id_for_address(self, address: Address, room_id: Optional[str]):
pass
def mock_receive_message(klass, message):
# We are just unit testing the matrix transport receive so do nothing
assert message
def mock_receive_delivered(klass, delivered):
# We are just unit testing the matrix transport receive so do nothing
assert delivered
config = dict(
retry_interval=retry_interval,
retries_before_backoff=retries_before_backoff,
server=local_matrix_server,
server_name='matrix.local.raiden',
available_servers=[],
discovery_room='discovery',
private_rooms=private_rooms,
)
transport = MatrixTransport(config)
transport.raiden = MockRaidenService()
transport._stop_event.clear()
transport._address_to_userids[HOP1] = USERID1
monkeypatch.setattr(MatrixTransport, '_get_user', mock_get_user)
monkeypatch.setattr(
MatrixTransport,
'_get_room_ids_for_address',
mock_get_room_ids_for_address,
)
monkeypatch.setattr(MatrixTransport, '_set_room_id_for_address', mock_set_room_id_for_address)
monkeypatch.setattr(MatrixTransport, '_receive_message', mock_receive_message)
return transport
@pytest.fixture()
def skip_userid_validation(monkeypatch):
def mock_validate_userid_signature(klass, user):
return HOP1
monkeypatch.setattr(
MatrixTransport,
'_validate_userid_signature',
mock_validate_userid_signature,
)
def make_message(convert_to_hex: bool = False, overwrite_data=None):
from matrix_client.room import Room
room = Room(None, '!roomID:server')
if not overwrite_data:
message = SecretRequest(
message_identifier=random.randint(0, UINT64_MAX),
payment_identifier=1,
secrethash=UNIT_SECRETHASH,
amount=1,
expiration=10,
)
message.sign(HOP1_KEY)
data = message.encode()
if convert_to_hex:
data = '0x' + data.hex()
else:
data = json.dumps(message.to_dict())
else:
data = overwrite_data
event = dict(
type='m.room.message',
sender=USERID1,
content={
'msgtype': 'm.text',
'body': data,
},
)
return room, event
def test_normal_processing_hex(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(convert_to_hex=True)
assert m._handle_message(room, event)
def test_normal_processing_json(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(convert_to_hex=False)
assert m._handle_message(room, event)
def test_processing_invalid_json(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
invalid_json = '{"foo": 1,'
room, event = make_message(convert_to_hex=False, overwrite_data=invalid_json)
assert not m._handle_message(room, event)
def test_sending_nonstring_body(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(overwrite_data=b'somebinarydata')
assert not m._handle_message(room, event)
def test_processing_invalid_message_json(
mock_matrix,
skip_userid_validation,
skip_if_not_matrix,
):
m = mock_matrix
invalid_message = '{"this": 1, "message": 5, "is": 3, "not_valid": 5}'
room, event = make_message(convert_to_hex=False, overwrite_data=invalid_message)
assert not m._handle_message(room, event)
def test_processing_invalid_message_cmdid_json(
mock_matrix,
skip_userid_validation,
skip_if_not_matrix,
):
m = mock_matrix
invalid_message = '{"type": "NonExistentMessage", "is": 3, "not_valid": 5}'
room, event = make_message(convert_to_hex=False, overwrite_data=invalid_message)
assert not m._handle_message(room, event)
def test_processing_invalid_hex(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(convert_to_hex=True)
old_data = event['content']['body']
event['content']['body'] = old_data[:-1]
assert not m._handle_message(room, event)
def test_processing_invalid_message_hex(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(convert_to_hex=True)
old_data = event['content']['body']
event['content']['body'] = old_data[:-4]
assert not m._handle_message(room, event)
def test_processing_invalid_message_cmdid_hex(
mock_matrix,
skip_userid_validation,
skip_if_not_matrix,
):
m = mock_matrix
room, event = make_message(convert_to_hex=True)
old_data = event['content']['body']
event['content']['body'] = '0xff' + old_data[4:]
assert not m._handle_message(room, event)
def test_matrix_message_sync(
skip_if_not_matrix,
local_matrix_server,
private_rooms,
retry_interval,
retries_before_backoff,
):
transport0 = MatrixTransport({
'discovery_room': 'discovery',
'retries_before_backoff': retries_before_backoff,
'retry_interval': retry_interval,
'server': local_matrix_server,
'server_name': 'matrix.local.raiden',
'available_servers': [],
'private_rooms': private_rooms,
})
transport1 = MatrixTransport({
'discovery_room': 'discovery',
'retries_before_backoff': retries_before_backoff,
'retry_interval': retry_interval,
'server': local_matrix_server,
'server_name': 'matrix.local.raiden',
'available_servers': [],
'private_rooms': private_rooms,
})
latest_sync_token = None
received_messages = []
def hook(sync_token):
nonlocal latest_sync_token
latest_sync_token = sync_token
class MessageHandler:
def on_message(self, _, message):
nonlocal received_messages
received_messages.append(message)
transport0._client.set_post_sync_hook(hook)
message_handler = MessageHandler()
raiden_service0 = MockRaidenService(message_handler)
raiden_service1 = MockRaidenService(message_handler)
raiden_service1.handle_state_change = MagicMock()
transport0.start(
raiden_service0,
message_handler,
None,
)
transport1.start(
raiden_service1,
message_handler,
None,
)
transport0.start_health_check(transport1._raiden_service.address)
transport1.start_health_check(transport0._raiden_service.address)
queue_identifier = QueueIdentifier(
recipient=transport1._raiden_service.address,
channel_identifier=1,
)
for i in range(5):
message = Processed(i)
message.sign(transport0._raiden_service.private_key)
transport0.send_async(
queue_identifier,
message,
)
gevent.sleep(2)
update_transport_sync_token = ActionUpdateTransportSyncToken(latest_sync_token)
raiden_service1.handle_state_change.assert_called_with(update_transport_sync_token)
assert len(received_messages) == 10
for i in range(5):
assert received_messages[i].message_identifier == i
transport1.stop()
assert latest_sync_token
# Send more messages while the other end is offline
for i in range(10, 15):
message = Processed(i)
message.sign(transport0._raiden_service.private_key)
transport0.send_async(
queue_identifier,
message,
)
# Should fetch the 5 messages sent while transport1 was offline
transport1.start(
transport1._raiden_service,
message_handler,
latest_sync_token,
)
gevent.sleep(2)
assert len(received_messages) == 20
for i in range(10, 15):
assert received_messages[i].message_identifier == i
transport0.stop()
transport1.stop()
Test retry mechanism
import json
import random
from unittest.mock import MagicMock
import gevent
import pytest
from raiden.constants import UINT64_MAX
from raiden.messages import Processed, SecretRequest
from raiden.network.transport import MatrixTransport
from raiden.network.transport.matrix import UserPresence, _RetryQueue
from raiden.tests.utils.factories import HOP1, HOP1_KEY, UNIT_SECRETHASH, make_address
from raiden.tests.utils.transport import MockRaidenService
from raiden.transfer.mediated_transfer.events import CHANNEL_IDENTIFIER_GLOBAL_QUEUE
from raiden.transfer.queue_identifier import QueueIdentifier
from raiden.transfer.state_change import ActionUpdateTransportSyncToken
from raiden.utils import pex
from raiden.utils.typing import Address, List, Optional, Union
USERID1 = '@Alice:Wonderland'
@pytest.fixture
def mock_matrix(
monkeypatch,
retry_interval,
retries_before_backoff,
local_matrix_server,
private_rooms,
):
from matrix_client.user import User
monkeypatch.setattr(User, 'get_display_name', lambda _: 'random_display_name')
def mock_get_user(klass, user: Union[User, str]) -> User:
return User(None, USERID1)
def mock_get_room_ids_for_address(
klass,
address: Address,
filter_private: bool=None,
) -> List[str]:
return ['!roomID:server']
def mock_set_room_id_for_address(self, address: Address, room_id: Optional[str]):
pass
def mock_receive_message(klass, message):
# We are just unit testing the matrix transport receive so do nothing
assert message
def mock_receive_delivered(klass, delivered):
# We are just unit testing the matrix transport receive so do nothing
assert delivered
config = dict(
retry_interval=retry_interval,
retries_before_backoff=retries_before_backoff,
server=local_matrix_server,
server_name='matrix.local.raiden',
available_servers=[],
discovery_room='discovery',
private_rooms=private_rooms,
)
transport = MatrixTransport(config)
transport.raiden = MockRaidenService()
transport._stop_event.clear()
transport._address_to_userids[HOP1] = USERID1
monkeypatch.setattr(MatrixTransport, '_get_user', mock_get_user)
monkeypatch.setattr(
MatrixTransport,
'_get_room_ids_for_address',
mock_get_room_ids_for_address,
)
monkeypatch.setattr(MatrixTransport, '_set_room_id_for_address', mock_set_room_id_for_address)
monkeypatch.setattr(MatrixTransport, '_receive_message', mock_receive_message)
return transport
@pytest.fixture()
def skip_userid_validation(monkeypatch):
def mock_validate_userid_signature(klass, user):
return HOP1
monkeypatch.setattr(
MatrixTransport,
'_validate_userid_signature',
mock_validate_userid_signature,
)
def make_message(convert_to_hex: bool = False, overwrite_data=None):
from matrix_client.room import Room
room = Room(None, '!roomID:server')
if not overwrite_data:
message = SecretRequest(
message_identifier=random.randint(0, UINT64_MAX),
payment_identifier=1,
secrethash=UNIT_SECRETHASH,
amount=1,
expiration=10,
)
message.sign(HOP1_KEY)
data = message.encode()
if convert_to_hex:
data = '0x' + data.hex()
else:
data = json.dumps(message.to_dict())
else:
data = overwrite_data
event = dict(
type='m.room.message',
sender=USERID1,
content={
'msgtype': 'm.text',
'body': data,
},
)
return room, event
def test_normal_processing_hex(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(convert_to_hex=True)
assert m._handle_message(room, event)
def test_normal_processing_json(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(convert_to_hex=False)
assert m._handle_message(room, event)
def test_processing_invalid_json(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
invalid_json = '{"foo": 1,'
room, event = make_message(convert_to_hex=False, overwrite_data=invalid_json)
assert not m._handle_message(room, event)
def test_sending_nonstring_body(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(overwrite_data=b'somebinarydata')
assert not m._handle_message(room, event)
def test_processing_invalid_message_json(
mock_matrix,
skip_userid_validation,
skip_if_not_matrix,
):
m = mock_matrix
invalid_message = '{"this": 1, "message": 5, "is": 3, "not_valid": 5}'
room, event = make_message(convert_to_hex=False, overwrite_data=invalid_message)
assert not m._handle_message(room, event)
def test_processing_invalid_message_cmdid_json(
mock_matrix,
skip_userid_validation,
skip_if_not_matrix,
):
m = mock_matrix
invalid_message = '{"type": "NonExistentMessage", "is": 3, "not_valid": 5}'
room, event = make_message(convert_to_hex=False, overwrite_data=invalid_message)
assert not m._handle_message(room, event)
def test_processing_invalid_hex(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(convert_to_hex=True)
old_data = event['content']['body']
event['content']['body'] = old_data[:-1]
assert not m._handle_message(room, event)
def test_processing_invalid_message_hex(mock_matrix, skip_userid_validation, skip_if_not_matrix):
m = mock_matrix
room, event = make_message(convert_to_hex=True)
old_data = event['content']['body']
event['content']['body'] = old_data[:-4]
assert not m._handle_message(room, event)
def test_processing_invalid_message_cmdid_hex(
mock_matrix,
skip_userid_validation,
skip_if_not_matrix,
):
m = mock_matrix
room, event = make_message(convert_to_hex=True)
old_data = event['content']['body']
event['content']['body'] = '0xff' + old_data[4:]
assert not m._handle_message(room, event)
def test_matrix_message_sync(
skip_if_not_matrix,
local_matrix_server,
private_rooms,
retry_interval,
retries_before_backoff,
):
transport0 = MatrixTransport({
'discovery_room': 'discovery',
'retries_before_backoff': retries_before_backoff,
'retry_interval': retry_interval,
'server': local_matrix_server,
'server_name': 'matrix.local.raiden',
'available_servers': [],
'private_rooms': private_rooms,
})
transport1 = MatrixTransport({
'discovery_room': 'discovery',
'retries_before_backoff': retries_before_backoff,
'retry_interval': retry_interval,
'server': local_matrix_server,
'server_name': 'matrix.local.raiden',
'available_servers': [],
'private_rooms': private_rooms,
})
latest_sync_token = None
received_messages = []
def hook(sync_token):
nonlocal latest_sync_token
latest_sync_token = sync_token
class MessageHandler:
def on_message(self, _, message):
nonlocal received_messages
received_messages.append(message)
transport0._client.set_post_sync_hook(hook)
message_handler = MessageHandler()
raiden_service0 = MockRaidenService(message_handler)
raiden_service1 = MockRaidenService(message_handler)
raiden_service1.handle_state_change = MagicMock()
transport0.start(
raiden_service0,
message_handler,
None,
)
transport1.start(
raiden_service1,
message_handler,
None,
)
transport0.start_health_check(transport1._raiden_service.address)
transport1.start_health_check(transport0._raiden_service.address)
queue_identifier = QueueIdentifier(
recipient=transport1._raiden_service.address,
channel_identifier=1,
)
for i in range(5):
message = Processed(i)
message.sign(transport0._raiden_service.private_key)
transport0.send_async(
queue_identifier,
message,
)
gevent.sleep(2)
update_transport_sync_token = ActionUpdateTransportSyncToken(latest_sync_token)
raiden_service1.handle_state_change.assert_called_with(update_transport_sync_token)
assert len(received_messages) == 10
for i in range(5):
assert received_messages[i].message_identifier == i
transport1.stop()
assert latest_sync_token
# Send more messages while the other end is offline
for i in range(10, 15):
message = Processed(i)
message.sign(transport0._raiden_service.private_key)
transport0.send_async(
queue_identifier,
message,
)
# Should fetch the 5 messages sent while transport1 was offline
transport1.start(
transport1._raiden_service,
message_handler,
latest_sync_token,
)
gevent.sleep(2)
assert len(received_messages) == 20
for i in range(10, 15):
assert received_messages[i].message_identifier == i
transport0.stop()
transport1.stop()
def test_matrix_message_retry(
skip_if_not_matrix,
local_matrix_server,
private_rooms,
retry_interval,
retries_before_backoff,
):
""" Test the retry mechanism implemented into the matrix client.
The test creates a transport and sends a message. Given that the
receiver was online, the initial message is sent but the receiver
doesn't respond in time and goes offline. The retrier should then
wait for the `retry_interval` duration to pass and send the message
again but this won't work because the receiver is offline. Once
the receiver comes back again, the message should be sent again.
"""
partner_address = make_address()
transport = MatrixTransport({
'discovery_room': 'discovery',
'retries_before_backoff': retries_before_backoff,
'retry_interval': retry_interval,
'server': local_matrix_server,
'server_name': 'matrix.local.raiden',
'available_servers': [],
'private_rooms': private_rooms,
})
transport._send_raw = MagicMock()
raiden_service = MockRaidenService(None)
transport.start(
raiden_service,
raiden_service.message_handler,
None,
)
transport.log = MagicMock()
# Receiver is online
transport._address_to_presence[partner_address] = UserPresence.ONLINE
queueid = QueueIdentifier(
recipient=partner_address,
channel_identifier=CHANNEL_IDENTIFIER_GLOBAL_QUEUE,
)
chain_state = raiden_service.wal.state_manager.current_state
retry_queue = _RetryQueue(transport, partner_address)
retry_queue.start()
# Send the initial message
message = Processed(0)
message.sign(transport._raiden_service.private_key)
chain_state.queueids_to_queues[queueid] = [message]
retry_queue.enqueue_global(message)
gevent.sleep(1)
transport._send_raw.call_count = 1
# Receiver goes offline
transport._address_to_presence[partner_address] = UserPresence.OFFLINE
gevent.sleep(retry_interval)
transport.log.debug.assert_called_with(
'Partner not reachable. Skipping.',
partner=pex(partner_address),
status=UserPresence.OFFLINE,
)
# Retrier did not call send_raw given that the receiver is still offline
assert transport._send_raw.call_count == 1
# Receiver goes offline
transport._address_to_presence[partner_address] = UserPresence.ONLINE
gevent.sleep(retry_interval)
# Retrier now should have sent the message again
assert transport._send_raw.call_count == 2
|
# -*- coding: utf-8 -*-
import json
import logging
import elasticsearch_dsl
from elasticsearch.exceptions import NotFoundError
from elasticsearch.helpers import parallel_bulk
from elasticsearch_dsl.connections import connections
from tg import config
from pyjobsweb import model
from pyjobsweb.lib.sqlalchemy_ import current_server_timestamp
from pyjobsweb.lib.lock import acquire_inter_process_lock
from pyjobsweb.commands import AppContextCommand
class PopulateESCommand(AppContextCommand):
def __init__(self, *args, **kwargs):
super(PopulateESCommand, self).__init__(args, kwargs)
self._logger = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(PopulateESCommand, self).get_parser(prog_name)
jobs_help_msg = 'synchronizes the jobs index from the Elasticsearch ' \
'database with the jobs table from the Postgresql ' \
'database'
parser.add_argument('-j', '--jobs',
help=jobs_help_msg,
dest='synchronize_jobs_index',
action='store_const', const=True)
companies_help_msg = 'synchronizes the companies index from the ' \
'Elasticsearch database with the companies ' \
'table from the Postgresql database'
parser.add_argument('-co', '--companies',
help=companies_help_msg,
dest='synchronize_companies_index',
action='store_const', const=True)
geocomplete_help_msg = \
'populates the geocomplete index of the elasticsearch database'
parser.add_argument('-g', '--geocomplete',
help=geocomplete_help_msg,
dest='populate_geocomplete_index',
action='store_const', const=True)
return parser
def _logging(self, logging_level, message):
self._logger.log(logging_level, message)
def _job_id_logging(self, job_id, logging_level, message):
log_msg = u'[Job offer id: %s] %s' % (job_id, message)
self._logging(logging_level, log_msg)
def _company_id_logging(self, company_id, logging_level, message):
log_msg = u'[Company: %s] %s' % (company_id, message)
self._logging(logging_level, log_msg)
def _compute_dirty_documents(self, sql_table_cls, doc_type):
self._logging(logging.INFO,
'Computing out of sync %s documents.' % doc_type)
dirty_rows = sql_table_cls.get_dirty_rows()
for row in dirty_rows:
yield row.to_elasticsearch_document()
@staticmethod
def _geocompletion_documents():
geolocation_data = open(config.get('fr.geolocation_data.path'))
json_dict = json.loads(geolocation_data.read())
for postal_code, places in json_dict.items():
for place in places:
yield model.Geocomplete(name=place['name'],
complement=place['complement'],
postal_code=postal_code,
geolocation=dict(
lat=float(place['lat']),
lon=float(place['lon'])
),
weight=place['weight'])
def _synchronisation_op(self, elasticsearch_doctype, pending_insertions):
self._logging(logging.INFO,
'Computing required operations to synchronize documents.')
for p in pending_insertions:
doc_dict = p.to_dict(True)
try:
elasticsearch_doctype.get(p.id)
update_op = doc_dict
update_op['_op_type'] = 'update'
update_op['doc'] = doc_dict['_source']
del update_op['_source']
sync_op = update_op
except NotFoundError:
add_op = doc_dict
add_op['_op_type'] = 'index'
sync_op = add_op
yield sync_op
def _perform_index_sync(self, sql_table_cls, es_doc_cls, id_logger):
es_doc = es_doc_cls()
elasticsearch_conn = connections.get_connection()
sync_timestamp = current_server_timestamp()
pending_insertions = self._compute_dirty_documents(
sql_table_cls, es_doc.doc_type)
bulk_op = self._synchronisation_op(es_doc, pending_insertions)
self._logging(logging.INFO, 'Performing synchronization.')
for ok, info in parallel_bulk(elasticsearch_conn, bulk_op):
obj_id = info['index']['_id'] \
if 'index' in info else info['update']['_id']
if ok:
# Mark the task as handled so we don't retreat it next time
self._logging(logging.INFO,
'Document %s has been synced successfully.'
% obj_id)
sql_table_cls.update_last_sync(obj_id, sync_timestamp)
else:
id_logger(obj_id, logging.ERROR,
'Error while syncing document %s index.' % obj_id)
# Refresh indices to increase research speed
elasticsearch_dsl.Index(es_doc.index).refresh()
def _synchronise_index(self, sql_table_cls, es_doc_cls, id_logger):
es_doc = es_doc_cls()
self._logging(logging.INFO,
'Synchronizing %s index.' % es_doc.index)
with acquire_inter_process_lock('sync_%s' % es_doc.index) as acquired:
if not acquired:
es_doc = es_doc_cls()
err_msg = 'Another process is already synchronizing the %s ' \
'index, aborting now.' % es_doc.index
self._logging(logging.WARNING, err_msg)
else:
self._perform_index_sync(sql_table_cls, es_doc_cls, id_logger)
self._logging(logging.INFO,
'Index %s is now synchronized.' % es_doc.index)
def _synchronise_jobs_index(self):
self._synchronise_index(model.JobAlchemy,
model.JobElastic, self._job_id_logging)
def _synchronise_companies_index(self):
self._synchronise_index(model.CompanyAlchemy,
model.CompanyElastic, self._company_id_logging)
@staticmethod
def _geocomplete_index_batch(elasticsearch_conn, to_index):
log_msg = 'Indexing documents.'
self._logging(logging.INFO, log_msg)
for ok, info in parallel_bulk(elasticsearch_conn, to_index):
if not ok:
doc_id = info['create']['_id']
doc_type = info['create']['_type']
doc_index = info['create']['_index']
logging_level = logging.ERROR
err_msg = "Couldn't index document: '%s', of type: %s, " \
"under index: %s." % (doc_id, doc_type, doc_index)
self._logging(logging_level, err_msg)
def _perform_geocomplete_index_population(self, max_doc):
elasticsearch_conn = connections.get_connection()
to_index = list()
for i, document in enumerate(self._geocompletion_documents()):
if i % max_doc == 0:
log_msg = 'Computing required geoloc-entry documents.'
self._logging(logging.INFO, log_msg)
to_index.append(document.to_dict(True))
if len(to_index) < max_doc:
continue
self._geocomplete_index_batch(elasticsearch_conn, to_index)
to_index = list()
if len(to_index) != 0:
self._geocomplete_index_batch(elasticsearch_conn, to_index)
elasticsearch_dsl.Index('geocomplete').refresh()
def _populate_geocomplete_index(self, max_doc=1000):
log_msg = 'Populating geocomplete index.'
self._logging(logging.INFO, log_msg)
with acquire_inter_process_lock('populate_geocomplete') as acquired:
if not acquired:
err_msg = 'Another process is already populating the ' \
'geocomplete index, aborting now.'
self._logging(logging.WARNING, err_msg)
else:
self._perform_geocomplete_index_population(max_doc)
log_msg = 'gecomplete index populated and refreshed.'
self._logging(logging.INFO, log_msg)
def take_action(self, parsed_args):
super(PopulateESCommand, self).take_action(parsed_args)
if parsed_args.populate_geocomplete_index:
self._populate_geocomplete_index()
if parsed_args.synchronize_jobs_index:
self._synchronise_jobs_index()
if parsed_args.synchronize_companies_index:
self._synchronise_companies_index()
Bugfix: gearbox populate-s -g now works properly
# -*- coding: utf-8 -*-
import json
import logging
import elasticsearch_dsl
from elasticsearch.exceptions import NotFoundError
from elasticsearch.helpers import parallel_bulk
from elasticsearch_dsl.connections import connections
from tg import config
from pyjobsweb import model
from pyjobsweb.lib.sqlalchemy_ import current_server_timestamp
from pyjobsweb.lib.lock import acquire_inter_process_lock
from pyjobsweb.commands import AppContextCommand
class PopulateESCommand(AppContextCommand):
def __init__(self, *args, **kwargs):
super(PopulateESCommand, self).__init__(args, kwargs)
self._logger = logging.getLogger(__name__)
def get_parser(self, prog_name):
parser = super(PopulateESCommand, self).get_parser(prog_name)
jobs_help_msg = 'synchronizes the jobs index from the Elasticsearch ' \
'database with the jobs table from the Postgresql ' \
'database'
parser.add_argument('-j', '--jobs',
help=jobs_help_msg,
dest='synchronize_jobs_index',
action='store_const', const=True)
companies_help_msg = 'synchronizes the companies index from the ' \
'Elasticsearch database with the companies ' \
'table from the Postgresql database'
parser.add_argument('-co', '--companies',
help=companies_help_msg,
dest='synchronize_companies_index',
action='store_const', const=True)
geocomplete_help_msg = \
'populates the geocomplete index of the elasticsearch database'
parser.add_argument('-g', '--geocomplete',
help=geocomplete_help_msg,
dest='populate_geocomplete_index',
action='store_const', const=True)
return parser
def _logging(self, logging_level, message):
self._logger.log(logging_level, message)
def _job_id_logging(self, job_id, logging_level, message):
log_msg = u'[Job offer id: %s] %s' % (job_id, message)
self._logging(logging_level, log_msg)
def _company_id_logging(self, company_id, logging_level, message):
log_msg = u'[Company: %s] %s' % (company_id, message)
self._logging(logging_level, log_msg)
def _compute_dirty_documents(self, sql_table_cls, doc_type):
self._logging(logging.INFO,
'Computing out of sync %s documents.' % doc_type)
dirty_rows = sql_table_cls.get_dirty_rows()
for row in dirty_rows:
yield row.to_elasticsearch_document()
@staticmethod
def _geocompletion_documents():
geolocation_data = open(config.get('fr.geolocation_data.path'))
json_dict = json.loads(geolocation_data.read())
for postal_code, places in json_dict.items():
for place in places:
yield model.Geocomplete(name=place['name'],
complement=place['complement'],
postal_code=postal_code,
geolocation=dict(
lat=float(place['lat']),
lon=float(place['lon'])
),
weight=place['weight'])
def _synchronisation_op(self, elasticsearch_doctype, pending_insertions):
self._logging(logging.INFO,
'Computing required operations to synchronize documents.')
for p in pending_insertions:
doc_dict = p.to_dict(True)
try:
elasticsearch_doctype.get(p.id)
update_op = doc_dict
update_op['_op_type'] = 'update'
update_op['doc'] = doc_dict['_source']
del update_op['_source']
sync_op = update_op
except NotFoundError:
add_op = doc_dict
add_op['_op_type'] = 'index'
sync_op = add_op
yield sync_op
def _perform_index_sync(self, sql_table_cls, es_doc_cls, id_logger):
es_doc = es_doc_cls()
elasticsearch_conn = connections.get_connection()
sync_timestamp = current_server_timestamp()
pending_insertions = self._compute_dirty_documents(
sql_table_cls, es_doc.doc_type)
bulk_op = self._synchronisation_op(es_doc, pending_insertions)
self._logging(logging.INFO, 'Performing synchronization.')
for ok, info in parallel_bulk(elasticsearch_conn, bulk_op):
obj_id = info['index']['_id'] \
if 'index' in info else info['update']['_id']
if ok:
# Mark the task as handled so we don't retreat it next time
self._logging(logging.INFO,
'Document %s has been synced successfully.'
% obj_id)
sql_table_cls.update_last_sync(obj_id, sync_timestamp)
else:
id_logger(obj_id, logging.ERROR,
'Error while syncing document %s index.' % obj_id)
# Refresh indices to increase research speed
elasticsearch_dsl.Index(es_doc.index).refresh()
def _synchronise_index(self, sql_table_cls, es_doc_cls, id_logger):
es_doc = es_doc_cls()
self._logging(logging.INFO,
'Synchronizing %s index.' % es_doc.index)
with acquire_inter_process_lock('sync_%s' % es_doc.index) as acquired:
if not acquired:
es_doc = es_doc_cls()
err_msg = 'Another process is already synchronizing the %s ' \
'index, aborting now.' % es_doc.index
self._logging(logging.WARNING, err_msg)
else:
self._perform_index_sync(sql_table_cls, es_doc_cls, id_logger)
self._logging(logging.INFO,
'Index %s is now synchronized.' % es_doc.index)
def _synchronise_jobs_index(self):
self._synchronise_index(model.JobAlchemy,
model.JobElastic, self._job_id_logging)
def _synchronise_companies_index(self):
self._synchronise_index(model.CompanyAlchemy,
model.CompanyElastic, self._company_id_logging)
def _geocomplete_index_batch(self, elasticsearch_conn, to_index):
log_msg = 'Indexing documents.'
self._logging(logging.INFO, log_msg)
for ok, info in parallel_bulk(elasticsearch_conn, to_index):
if not ok:
doc_id = info['create']['_id']
doc_type = info['create']['_type']
doc_index = info['create']['_index']
logging_level = logging.ERROR
err_msg = "Couldn't index document: '%s', of type: %s, " \
"under index: %s." % (doc_id, doc_type, doc_index)
self._logging(logging_level, err_msg)
def _perform_geocomplete_index_population(self, max_doc):
elasticsearch_conn = connections.get_connection()
to_index = list()
for i, document in enumerate(self._geocompletion_documents()):
if i % max_doc == 0:
log_msg = 'Computing required geoloc-entry documents.'
self._logging(logging.INFO, log_msg)
to_index.append(document.to_dict(True))
if len(to_index) < max_doc:
continue
self._geocomplete_index_batch(elasticsearch_conn, to_index)
to_index = list()
if len(to_index) != 0:
self._geocomplete_index_batch(elasticsearch_conn, to_index)
elasticsearch_dsl.Index('geocomplete').refresh()
def _populate_geocomplete_index(self, max_doc=1000):
log_msg = 'Populating geocomplete index.'
self._logging(logging.INFO, log_msg)
with acquire_inter_process_lock('populate_geocomplete') as acquired:
if not acquired:
err_msg = 'Another process is already populating the ' \
'geocomplete index, aborting now.'
self._logging(logging.WARNING, err_msg)
else:
self._perform_geocomplete_index_population(max_doc)
log_msg = 'gecomplete index populated and refreshed.'
self._logging(logging.INFO, log_msg)
def take_action(self, parsed_args):
super(PopulateESCommand, self).take_action(parsed_args)
if parsed_args.populate_geocomplete_index:
self._populate_geocomplete_index()
if parsed_args.synchronize_jobs_index:
self._synchronise_jobs_index()
if parsed_args.synchronize_companies_index:
self._synchronise_companies_index()
|
#!/usr/bin/env python
# Copyright (C) 2011 by Benedict Paten (benedictpaten@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Reports the state of your given job tree.
"""
import sys
import os
import xml.etree.cElementTree as ET
from xml.dom import minidom # For making stuff pretty
from sonLib.bioio import logger
from sonLib.bioio import logFile
from sonLib.bioio import getBasicOptionParser
from sonLib.bioio import parseBasicOptions
from sonLib.bioio import TempFileTree
from jobTree.src.master import getEnvironmentFileName, getJobFileDirName
from jobTree.src.master import getStatsFileName, getConfigFileName
class JTTag(object):
def __init__(self, tree):
""" Given an ElementTree tag, build a convenience object.
"""
for name in ["total_time", "median_clock", "total_memory", "median_wait",
"total_number", "average_time", "median_memory",
"min_number_per_slave", "average_wait", "total_clock",
"median_time", "min_time", "min_wait", "max_clock",
"max_wait", "total_wait", "min_clock", "average_memory",
"max_number_per_slave", "max_memory", "min_clock",
"average_memory", "max_number_per_slave", "max_memory",
"median_number_per_slave", "average_number_per_slave",
"max_time", "average_clock", "min_memory"
]:
setattr(self, name, self.__get(tree, name))
self.name = tree.tag
def __get(self, tag, name):
if name in tag.attrib:
value = tag.attrib[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a
def initializeOptions(parser):
##########################################
# Construct the arguments.
##########################################
parser.add_option("--jobTree", dest="jobTree",
help="Directory containing the job tree")
parser.add_option("--outputFile", dest="outputFile", default=None,
help="File in which to write results")
parser.add_option("--raw", action="store_true", default=False,
help="output the raw xml data.")
parser.add_option("--pretty", action="store_true", default=False,
help=("if not raw, prettify the numbers to be "
"human readable."))
parser.add_option("--categories",
help=("comma separated list from [time, clock, wait, "
"memory]"))
parser.add_option("--sortby", default="time",
help=("how to sort Target list. may be from [alpha, "
"time, clock, wait, memory, count]. "
"default=%(default)s"))
parser.add_option("--reverse_sort", default=False, action="store_true",
help="reverse sort order.")
def checkOptions(options, args, parser):
logger.info("Parsed arguments")
assert len(args) <= 1 # Only jobtree may be specified as argument
if len(args) == 1: # Allow jobTree directory as arg
options.jobTree = args[0]
logger.info("Checking if we have files for job tree")
if options.jobTree == None:
parser.error("Specify --jobTree")
if not os.path.exists(options.jobTree):
parser.error("--jobTree %s does not exist"
% options.jobTree)
if not os.path.isdir(options.jobTree):
parser.error("--jobTree %s is not a directory"
% options.jobTree)
if not os.path.isfile(getConfigFileName(options.jobTree)):
parser.error("A valid job tree must contain the config file")
if not os.path.isfile(getStatsFileName(options.jobTree)):
parser.error("The job-tree was run without the --stats flag, "
"so no stats were created")
defaultCategories = ["time", "clock", "wait", "memory"]
if options.categories is None:
options.categories = defaultCategories
else:
options.categories = options.categories.split(",")
for c in options.categories:
if c not in defaultCategories:
parser.error("Unknown category %s. Must be from %s"
% (c, str(defaultCategories)))
extraSort = ["count", "alpha"]
if options.sortby is not None:
if (options.sortby not in defaultCategories and
options.sortby not in extraSort):
parser.error("Unknown --sortby %s. Must be from %s"
% (options.sortby, str(defaultCategories + extraSort)))
logger.info("Checked arguments")
def prettyXml(elem):
"""Return a pretty-printed XML string for the ElementTree Element.
"""
roughString = ET.tostring(elem, "utf-8")
reparsed = minidom.parseString(roughString)
return reparsed.toprettyxml(indent=" ")
def padStr(s, field=None):
""" Pad the begining of a string with spaces, if necessary.
"""
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s
def prettyMemory(k, field=None, isBytes=False):
""" Given input k as kilobytes, return a nicely formatted string.
"""
from math import floor
if isBytes:
k /= 1024
if k < 1024:
return padStr("%gK" % k, field)
if k < (1024 * 1024):
return padStr("%.1fM" % (k / 1024.0), field)
if k < (1024 * 1024 * 1024):
return padStr("%.1fG" % (k / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024):
return padStr("%.1fT" % (k / 1024.0 / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024 * 1024):
return padStr("%.1fP" % (k / 1024.0 / 1024.0 / 1024.0 / 1024.0), field)
def prettyTime(t, field=None):
""" Given input t as seconds, return a nicely formatted string.
"""
from math import floor
pluralDict = {True: "s", False: ""}
if t < 120:
return padStr("%ds" % t, field)
if t < 120 * 60:
m = floor(t / 60.)
s = t % 60
return padStr("%dm%ds" % (m, s), field)
if t < 25 * 60 * 60:
h = floor(t / 60. / 60.)
m = floor((t - (h * 60. * 60.)) / 60.)
s = t % 60
return padStr("%dh%gm%ds" % (h, m, s), field)
if t < 7 * 24 * 60 * 60:
d = floor(t / 24. / 60. / 60.)
h = floor((t - (d * 24. * 60. * 60.)) / 60. / 60.)
m = floor((t
- (d * 24. * 60. * 60.)
- (h * 60. * 60.))
/ 60.)
s = t % 60
dPlural = pluralDict[d > 1]
return padStr("%dday%s%dh%dm%ds" % (d, dPlural, h, m, s), field)
w = floor(t / 7. / 24. / 60. / 60.)
d = floor((t - (w * 7 * 24 * 60 * 60)) / 24. / 60. / 60.)
h = floor((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.))
/ 60. / 60.)
m = floor((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.)
- (h * 60. * 60.))
/ 60.)
s = t % 60
wPlural = pluralDict[w > 1]
dPlural = pluralDict[d > 1]
return padStr("%dweek%s%dday%s%dh%dm%ds" % (w, wPlural, d,
dPlural, h, m, s), field)
def reportTime(t, options, field=None):
""" Given t seconds, report back the correct format as string.
"""
if options.pretty:
return prettyTime(t, field=field)
else:
if field is not None:
return "%*.2f" % (field, t)
else:
return "%.2f" % t
def reportMemory(k, options, field=None, isBytes=False):
""" Given k kilobytes, report back the correct format as string.
"""
if options.pretty:
return prettyMemory(k, field=field, isBytes=isBytes)
else:
if isBytes:
k /= 1024
if field is not None:
return "%*gK" % (field, k)
else:
return "%gK" % k
def reportNumber(n, options, field=None):
""" Given n an integer, report back the correct format as string.
"""
if field is not None:
return "%*g" % (field, n)
else:
return "%g" % n
def refineData(root, options):
""" walk the root and gather up the important bits.
"""
slave = JTTag(root.find("slave"))
target = JTTag(root.find("target"))
targetTypesTree = root.find("target_types")
targetTypes = []
for child in targetTypesTree:
targetTypes.append(JTTag(child))
return root, slave, target, targetTypes
def sprintTag(key, tag, options):
""" Print out a JTTag()
"""
header = " %7s " % "Count"
sub_header = " %7s " % "n"
tag_str = " %s" % reportNumber(tag.total_number, options, field=7)
out_str = ""
if key == "target":
out_str += " %-12s | %7s%7s%7s%7s " % ("Slave Jobs", "min",
"med", "ave", "max")
slave_str = "%s| \n" % (" " * 14)
for t in [tag.min_number_per_slave, tag.median_number_per_slave,
tag.average_number_per_slave, tag.max_number_per_slave]:
slave_str += reportNumber(t, options, field=7)
out_str += slave_str + "\n"
if "time" in options.categories:
header += "| %40s " % "Time"
sub_header += "| %10s%10s%10s%10s " % ("min", "med", "ave", "max")
tag_str += " | "
for t in [tag.min_time, tag.median_time,
tag.average_time, tag.max_time]:
tag_str += reportTime(t, options, field=10)
if "clock" in options.categories:
header += "| %40s " % "Clock"
sub_header += "| %10s%10s%10s%10s " % ("min", "med", "ave", "max")
tag_str += " | "
for t in [tag.min_clock, tag.median_clock,
tag.average_clock, tag.max_clock]:
tag_str += reportTime(t, options, field=10)
if "wait" in options.categories:
header += "| %40s " % "Wait"
sub_header += "| %10s%10s%10s%10s " % ("min", "med", "ave", "max")
tag_str += " | "
for t in [tag.min_wait, tag.median_wait,
tag.average_wait, tag.max_wait]:
tag_str += reportTime(t, options, field=10)
if "memory" in options.categories:
header += "| %40s " % "Memory"
sub_header += "| %10s%10s%10s%10s " % ("min", "med", "ave", "max")
tag_str += " | "
for t in [tag.min_memory, tag.median_memory,
tag.average_memory, tag.max_memory]:
tag_str += reportMemory(t, options, field=10)
out_str += header + "\n"
out_str += sub_header + "\n"
out_str += tag_str + "\n"
return out_str
def get(tree, name):
""" Return a float value attribute NAME from TREE.
"""
if name in tree.attrib:
value = tree.attrib[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a
def sortTargets(targetTypes, options):
""" Return a targetTypes all sorted.
"""
if options.sortby == "time":
return sorted(targetTypes, key=lambda tag: tag.median_time,
reverse=options.reverse_sort)
elif options.sortby == "alpha":
return sorted(targetTypes, key=lambda tag: tag.name,
reverse=options.reverse_sort)
elif options.sortby == "clock":
return sorted(targetTypes, key=lambda tag: tag.median_clock,
reverse=options.reverse_sort)
elif options.sortby == "wait":
return sorted(targetTypes, key=lambda tag: tag.median_wait,
reverse=options.reverse_sort)
elif options.sortby == "memory":
return sorted(targetTypes, key=lambda tag: tag.median_memory,
reverse=options.reverse_sort)
elif options.sortby == "count":
return sorted(targetTypes, key=lambda tag: tag.total_number,
reverse=options.reverse_sort)
def reportPrettyData(root, slave, target, target_types, options):
""" print the important bits out.
"""
out_str = "Batch System: %s\n" % root.attrib["batch_system"]
out_str += ("Default CPU: %s Default Memory: %s\n"
"Job Time: %s Max CPUs: %s Max Threads: %s\n" % (
reportNumber(get(root, "default_cpu"), options),
reportMemory(get(root, "default_memory"), options, isBytes=True),
reportTime(get(root, "job_time"), options),
reportNumber(get(root, "max_cpus"), options),
reportNumber(get(root, "max_threads"), options),
))
out_str += ("Total Clock: %s Total Runtime: %s\n" % (
reportTime(get(root, "total_clock"), options),
reportTime(get(root, "total_run_time"), options),
))
out_str += "Slave\n"
out_str += sprintTag("slave", slave, options)
out_str += "Target\n"
out_str += sprintTag("target", target, options)
target_types = sortTargets(target_types, options)
for t in target_types:
out_str += " %s\n" % t.name
out_str += sprintTag(t.name, t, options)
return out_str
def buildElement(element, items, itemName):
""" Create an element for output.
"""
def __round(i):
if i < 0:
logger.debug("I got a less than 0 value: %s" % i)
return 0.0
return i
itemTimes = [ __round(float(item.attrib["time"])) for item in items ]
itemTimes.sort()
itemClocks = [ __round(float(item.attrib["clock"])) for item in items ]
itemClocks.sort()
itemWaits = [ __round(__round(float(item.attrib["time"])) -
__round(float(item.attrib["clock"])))
for item in items ]
itemWaits.sort()
itemMemory = [ __round(float(item.attrib["memory"])) for item in items ]
itemMemory.sort()
assert len(itemClocks) == len(itemTimes)
assert len(itemClocks) == len(itemWaits)
if len(itemTimes) == 0:
itemTimes.append(0)
itemClocks.append(0)
itemWaits.append(0)
itemMemory.append(0)
return ET.SubElement(
element, itemName,
{"total_number":str(len(items)),
"total_time":str(sum(itemTimes)),
"median_time":str(itemTimes[len(itemTimes)/2]),
"average_time":str(sum(itemTimes)/len(itemTimes)),
"min_time":str(min(itemTimes)),
"max_time":str(max(itemTimes)),
"total_clock":str(sum(itemClocks)),
"median_clock":str(itemClocks[len(itemClocks)/2]),
"average_clock":str(sum(itemClocks)/len(itemClocks)),
"min_clock":str(min(itemClocks)),
"max_clock":str(max(itemClocks)),
"total_wait":str(sum(itemWaits)),
"median_wait":str(itemWaits[len(itemWaits)/2]),
"average_wait":str(sum(itemWaits)/len(itemWaits)),
"min_wait":str(min(itemWaits)),
"max_wait":str(max(itemWaits)),
"total_memory":str(sum(itemMemory)),
"median_memory":str(itemMemory[len(itemMemory)/2]),
"average_memory":str(sum(itemMemory)/len(itemMemory)),
"min_memory":str(min(itemMemory)),
"max_memory":str(max(itemMemory))
})
def createSummary(element, containingItems, containingItemName, getFn):
itemCounts = [len(getFn(containingItem)) for
containingItem in containingItems]
itemCounts.sort()
if len(itemCounts) == 0:
itemCounts.append(0)
element.attrib["median_number_per_%s" %
containingItemName] = str(itemCounts[len(itemCounts) / 2])
element.attrib["average_number_per_%s" %
containingItemName] = str(float(sum(itemCounts)) /
len(itemCounts))
element.attrib["min_number_per_%s" %
containingItemName] = str(min(itemCounts))
element.attrib["max_number_per_%s" %
containingItemName] = str(max(itemCounts))
def getSettings(options):
config_file = getConfigFileName(options.jobTree)
stats_file = getStatsFileName(options.jobTree)
try:
config = ET.parse(config_file).getroot()
except ET.ParseError:
sys.stderr.write('The config file xml, %s, is empty.\n' % config_file)
raise
try:
stats = ET.parse(stats_file).getroot()
except ET.ParseError:
sys.stderr.write('The job tree stats file is empty. Either the job '
'has crashed, or no jobs have completed yet.\n')
sys.exit(0)
return config, stats
def processData(config, stats, options):
##########################################
# Collate the stats and report
##########################################
if stats.find("total_time") == None: # Hack to allow unfinished jobtrees.
ET.SubElement(stats, "total_time", { "time":"0.0", "clock":"0.0"})
collatedStatsTag = ET.Element(
"collated_stats",
{"total_run_time":stats.find("total_time").attrib["time"],
"total_clock":stats.find("total_time").attrib["clock"],
"batch_system":config.attrib["batch_system"],
"job_time":config.attrib["job_time"],
"default_memory":config.attrib["default_memory"],
"default_cpu":config.attrib["default_cpu"],
"max_cpus":config.attrib["max_cpus"],
"max_threads":config.attrib["max_threads"] })
# Add slave info
slaves = stats.findall("slave")
buildElement(collatedStatsTag, slaves, "slave")
# Add aggregated target info
targets = []
for slave in slaves:
targets += slave.findall("target")
def fn4(job):
return list(slave.findall("target"))
createSummary(buildElement(collatedStatsTag, targets, "target"),
slaves, "slave", fn4)
# Get info for each target
targetNames = set()
for target in targets:
targetNames.add(target.attrib["class"])
targetTypesTag = ET.SubElement(collatedStatsTag, "target_types")
for targetName in targetNames:
targetTypes = [ target for target in targets
if target.attrib["class"] == targetName ]
targetTypeTag = buildElement(targetTypesTag, targetTypes, targetName)
return collatedStatsTag
def reportData(xml_tree, options):
# Now dump it all out to file
if options.raw:
out_str = prettyXml(xml_tree)
else:
root, slave, target, target_types = refineData(xml_tree, options)
out_str = reportPrettyData(root, slave, target, target_types, options)
if options.outputFile != None:
fileHandle = open(options.outputFile, "w")
fileHandle.write(out_str)
fileHandle.close()
# Now dump onto the screen
print out_str
def main():
""" Reports stats on the job-tree, use with --stats option to jobTree.
"""
parser = getBasicOptionParser(
"usage: %prog [--jobTree] JOB_TREE_DIR [options]", "%prog 0.1")
initializeOptions(parser)
options, args = parseBasicOptions(parser)
checkOptions(options, args, parser)
config, stats = getSettings(options)
collatedStatsTag = processData(config, stats, options)
reportData(collatedStatsTag, options)
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
main()
added --human to mirror --pretty
#!/usr/bin/env python
# Copyright (C) 2011 by Benedict Paten (benedictpaten@gmail.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Reports the state of your given job tree.
"""
import sys
import os
import xml.etree.cElementTree as ET
from xml.dom import minidom # For making stuff pretty
from sonLib.bioio import logger
from sonLib.bioio import logFile
from sonLib.bioio import getBasicOptionParser
from sonLib.bioio import parseBasicOptions
from sonLib.bioio import TempFileTree
from jobTree.src.master import getEnvironmentFileName, getJobFileDirName
from jobTree.src.master import getStatsFileName, getConfigFileName
class JTTag(object):
def __init__(self, tree):
""" Given an ElementTree tag, build a convenience object.
"""
for name in ["total_time", "median_clock", "total_memory", "median_wait",
"total_number", "average_time", "median_memory",
"min_number_per_slave", "average_wait", "total_clock",
"median_time", "min_time", "min_wait", "max_clock",
"max_wait", "total_wait", "min_clock", "average_memory",
"max_number_per_slave", "max_memory", "min_clock",
"average_memory", "max_number_per_slave", "max_memory",
"median_number_per_slave", "average_number_per_slave",
"max_time", "average_clock", "min_memory"
]:
setattr(self, name, self.__get(tree, name))
self.name = tree.tag
def __get(self, tag, name):
if name in tag.attrib:
value = tag.attrib[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a
def initializeOptions(parser):
##########################################
# Construct the arguments.
##########################################
parser.add_option("--jobTree", dest="jobTree",
help="Directory containing the job tree")
parser.add_option("--outputFile", dest="outputFile", default=None,
help="File in which to write results")
parser.add_option("--raw", action="store_true", default=False,
help="output the raw xml data.")
parser.add_option("--pretty", "--human", action="store_true", default=False,
help=("if not raw, prettify the numbers to be "
"human readable."))
parser.add_option("--categories",
help=("comma separated list from [time, clock, wait, "
"memory]"))
parser.add_option("--sortby", default="time",
help=("how to sort Target list. may be from [alpha, "
"time, clock, wait, memory, count]. "
"default=%(default)s"))
parser.add_option("--reverse_sort", default=False, action="store_true",
help="reverse sort order.")
def checkOptions(options, args, parser):
logger.info("Parsed arguments")
assert len(args) <= 1 # Only jobtree may be specified as argument
if len(args) == 1: # Allow jobTree directory as arg
options.jobTree = args[0]
logger.info("Checking if we have files for job tree")
if options.jobTree == None:
parser.error("Specify --jobTree")
if not os.path.exists(options.jobTree):
parser.error("--jobTree %s does not exist"
% options.jobTree)
if not os.path.isdir(options.jobTree):
parser.error("--jobTree %s is not a directory"
% options.jobTree)
if not os.path.isfile(getConfigFileName(options.jobTree)):
parser.error("A valid job tree must contain the config file")
if not os.path.isfile(getStatsFileName(options.jobTree)):
parser.error("The job-tree was run without the --stats flag, "
"so no stats were created")
defaultCategories = ["time", "clock", "wait", "memory"]
if options.categories is None:
options.categories = defaultCategories
else:
options.categories = options.categories.split(",")
for c in options.categories:
if c not in defaultCategories:
parser.error("Unknown category %s. Must be from %s"
% (c, str(defaultCategories)))
extraSort = ["count", "alpha"]
if options.sortby is not None:
if (options.sortby not in defaultCategories and
options.sortby not in extraSort):
parser.error("Unknown --sortby %s. Must be from %s"
% (options.sortby, str(defaultCategories + extraSort)))
logger.info("Checked arguments")
def prettyXml(elem):
"""Return a pretty-printed XML string for the ElementTree Element.
"""
roughString = ET.tostring(elem, "utf-8")
reparsed = minidom.parseString(roughString)
return reparsed.toprettyxml(indent=" ")
def padStr(s, field=None):
""" Pad the begining of a string with spaces, if necessary.
"""
if field is None:
return s
else:
if len(s) >= field:
return s
else:
return " " * (field - len(s)) + s
def prettyMemory(k, field=None, isBytes=False):
""" Given input k as kilobytes, return a nicely formatted string.
"""
from math import floor
if isBytes:
k /= 1024
if k < 1024:
return padStr("%gK" % k, field)
if k < (1024 * 1024):
return padStr("%.1fM" % (k / 1024.0), field)
if k < (1024 * 1024 * 1024):
return padStr("%.1fG" % (k / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024):
return padStr("%.1fT" % (k / 1024.0 / 1024.0 / 1024.0), field)
if k < (1024 * 1024 * 1024 * 1024 * 1024):
return padStr("%.1fP" % (k / 1024.0 / 1024.0 / 1024.0 / 1024.0), field)
def prettyTime(t, field=None):
""" Given input t as seconds, return a nicely formatted string.
"""
from math import floor
pluralDict = {True: "s", False: ""}
if t < 120:
return padStr("%ds" % t, field)
if t < 120 * 60:
m = floor(t / 60.)
s = t % 60
return padStr("%dm%ds" % (m, s), field)
if t < 25 * 60 * 60:
h = floor(t / 60. / 60.)
m = floor((t - (h * 60. * 60.)) / 60.)
s = t % 60
return padStr("%dh%gm%ds" % (h, m, s), field)
if t < 7 * 24 * 60 * 60:
d = floor(t / 24. / 60. / 60.)
h = floor((t - (d * 24. * 60. * 60.)) / 60. / 60.)
m = floor((t
- (d * 24. * 60. * 60.)
- (h * 60. * 60.))
/ 60.)
s = t % 60
dPlural = pluralDict[d > 1]
return padStr("%dday%s%dh%dm%ds" % (d, dPlural, h, m, s), field)
w = floor(t / 7. / 24. / 60. / 60.)
d = floor((t - (w * 7 * 24 * 60 * 60)) / 24. / 60. / 60.)
h = floor((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.))
/ 60. / 60.)
m = floor((t
- (w * 7. * 24. * 60. * 60.)
- (d * 24. * 60. * 60.)
- (h * 60. * 60.))
/ 60.)
s = t % 60
wPlural = pluralDict[w > 1]
dPlural = pluralDict[d > 1]
return padStr("%dweek%s%dday%s%dh%dm%ds" % (w, wPlural, d,
dPlural, h, m, s), field)
def reportTime(t, options, field=None):
""" Given t seconds, report back the correct format as string.
"""
if options.pretty:
return prettyTime(t, field=field)
else:
if field is not None:
return "%*.2f" % (field, t)
else:
return "%.2f" % t
def reportMemory(k, options, field=None, isBytes=False):
""" Given k kilobytes, report back the correct format as string.
"""
if options.pretty:
return prettyMemory(k, field=field, isBytes=isBytes)
else:
if isBytes:
k /= 1024
if field is not None:
return "%*gK" % (field, k)
else:
return "%gK" % k
def reportNumber(n, options, field=None):
""" Given n an integer, report back the correct format as string.
"""
if field is not None:
return "%*g" % (field, n)
else:
return "%g" % n
def refineData(root, options):
""" walk the root and gather up the important bits.
"""
slave = JTTag(root.find("slave"))
target = JTTag(root.find("target"))
targetTypesTree = root.find("target_types")
targetTypes = []
for child in targetTypesTree:
targetTypes.append(JTTag(child))
return root, slave, target, targetTypes
def sprintTag(key, tag, options):
""" Print out a JTTag()
"""
header = " %7s " % "Count"
sub_header = " %7s " % "n"
tag_str = " %s" % reportNumber(tag.total_number, options, field=7)
out_str = ""
if key == "target":
out_str += " %-12s | %7s%7s%7s%7s " % ("Slave Jobs", "min",
"med", "ave", "max")
slave_str = "%s| \n" % (" " * 14)
for t in [tag.min_number_per_slave, tag.median_number_per_slave,
tag.average_number_per_slave, tag.max_number_per_slave]:
slave_str += reportNumber(t, options, field=7)
out_str += slave_str + "\n"
if "time" in options.categories:
header += "| %40s " % "Time"
sub_header += "| %10s%10s%10s%10s " % ("min", "med", "ave", "max")
tag_str += " | "
for t in [tag.min_time, tag.median_time,
tag.average_time, tag.max_time]:
tag_str += reportTime(t, options, field=10)
if "clock" in options.categories:
header += "| %40s " % "Clock"
sub_header += "| %10s%10s%10s%10s " % ("min", "med", "ave", "max")
tag_str += " | "
for t in [tag.min_clock, tag.median_clock,
tag.average_clock, tag.max_clock]:
tag_str += reportTime(t, options, field=10)
if "wait" in options.categories:
header += "| %40s " % "Wait"
sub_header += "| %10s%10s%10s%10s " % ("min", "med", "ave", "max")
tag_str += " | "
for t in [tag.min_wait, tag.median_wait,
tag.average_wait, tag.max_wait]:
tag_str += reportTime(t, options, field=10)
if "memory" in options.categories:
header += "| %40s " % "Memory"
sub_header += "| %10s%10s%10s%10s " % ("min", "med", "ave", "max")
tag_str += " | "
for t in [tag.min_memory, tag.median_memory,
tag.average_memory, tag.max_memory]:
tag_str += reportMemory(t, options, field=10)
out_str += header + "\n"
out_str += sub_header + "\n"
out_str += tag_str + "\n"
return out_str
def get(tree, name):
""" Return a float value attribute NAME from TREE.
"""
if name in tree.attrib:
value = tree.attrib[name]
else:
return float("nan")
try:
a = float(value)
except ValueError:
a = float("nan")
return a
def sortTargets(targetTypes, options):
""" Return a targetTypes all sorted.
"""
if options.sortby == "time":
return sorted(targetTypes, key=lambda tag: tag.median_time,
reverse=options.reverse_sort)
elif options.sortby == "alpha":
return sorted(targetTypes, key=lambda tag: tag.name,
reverse=options.reverse_sort)
elif options.sortby == "clock":
return sorted(targetTypes, key=lambda tag: tag.median_clock,
reverse=options.reverse_sort)
elif options.sortby == "wait":
return sorted(targetTypes, key=lambda tag: tag.median_wait,
reverse=options.reverse_sort)
elif options.sortby == "memory":
return sorted(targetTypes, key=lambda tag: tag.median_memory,
reverse=options.reverse_sort)
elif options.sortby == "count":
return sorted(targetTypes, key=lambda tag: tag.total_number,
reverse=options.reverse_sort)
def reportPrettyData(root, slave, target, target_types, options):
""" print the important bits out.
"""
out_str = "Batch System: %s\n" % root.attrib["batch_system"]
out_str += ("Default CPU: %s Default Memory: %s\n"
"Job Time: %s Max CPUs: %s Max Threads: %s\n" % (
reportNumber(get(root, "default_cpu"), options),
reportMemory(get(root, "default_memory"), options, isBytes=True),
reportTime(get(root, "job_time"), options),
reportNumber(get(root, "max_cpus"), options),
reportNumber(get(root, "max_threads"), options),
))
out_str += ("Total Clock: %s Total Runtime: %s\n" % (
reportTime(get(root, "total_clock"), options),
reportTime(get(root, "total_run_time"), options),
))
out_str += "Slave\n"
out_str += sprintTag("slave", slave, options)
out_str += "Target\n"
out_str += sprintTag("target", target, options)
target_types = sortTargets(target_types, options)
for t in target_types:
out_str += " %s\n" % t.name
out_str += sprintTag(t.name, t, options)
return out_str
def buildElement(element, items, itemName):
""" Create an element for output.
"""
def __round(i):
if i < 0:
logger.debug("I got a less than 0 value: %s" % i)
return 0.0
return i
itemTimes = [ __round(float(item.attrib["time"])) for item in items ]
itemTimes.sort()
itemClocks = [ __round(float(item.attrib["clock"])) for item in items ]
itemClocks.sort()
itemWaits = [ __round(__round(float(item.attrib["time"])) -
__round(float(item.attrib["clock"])))
for item in items ]
itemWaits.sort()
itemMemory = [ __round(float(item.attrib["memory"])) for item in items ]
itemMemory.sort()
assert len(itemClocks) == len(itemTimes)
assert len(itemClocks) == len(itemWaits)
if len(itemTimes) == 0:
itemTimes.append(0)
itemClocks.append(0)
itemWaits.append(0)
itemMemory.append(0)
return ET.SubElement(
element, itemName,
{"total_number":str(len(items)),
"total_time":str(sum(itemTimes)),
"median_time":str(itemTimes[len(itemTimes)/2]),
"average_time":str(sum(itemTimes)/len(itemTimes)),
"min_time":str(min(itemTimes)),
"max_time":str(max(itemTimes)),
"total_clock":str(sum(itemClocks)),
"median_clock":str(itemClocks[len(itemClocks)/2]),
"average_clock":str(sum(itemClocks)/len(itemClocks)),
"min_clock":str(min(itemClocks)),
"max_clock":str(max(itemClocks)),
"total_wait":str(sum(itemWaits)),
"median_wait":str(itemWaits[len(itemWaits)/2]),
"average_wait":str(sum(itemWaits)/len(itemWaits)),
"min_wait":str(min(itemWaits)),
"max_wait":str(max(itemWaits)),
"total_memory":str(sum(itemMemory)),
"median_memory":str(itemMemory[len(itemMemory)/2]),
"average_memory":str(sum(itemMemory)/len(itemMemory)),
"min_memory":str(min(itemMemory)),
"max_memory":str(max(itemMemory))
})
def createSummary(element, containingItems, containingItemName, getFn):
itemCounts = [len(getFn(containingItem)) for
containingItem in containingItems]
itemCounts.sort()
if len(itemCounts) == 0:
itemCounts.append(0)
element.attrib["median_number_per_%s" %
containingItemName] = str(itemCounts[len(itemCounts) / 2])
element.attrib["average_number_per_%s" %
containingItemName] = str(float(sum(itemCounts)) /
len(itemCounts))
element.attrib["min_number_per_%s" %
containingItemName] = str(min(itemCounts))
element.attrib["max_number_per_%s" %
containingItemName] = str(max(itemCounts))
def getSettings(options):
config_file = getConfigFileName(options.jobTree)
stats_file = getStatsFileName(options.jobTree)
try:
config = ET.parse(config_file).getroot()
except ET.ParseError:
sys.stderr.write('The config file xml, %s, is empty.\n' % config_file)
raise
try:
stats = ET.parse(stats_file).getroot()
except ET.ParseError:
sys.stderr.write('The job tree stats file is empty. Either the job '
'has crashed, or no jobs have completed yet.\n')
sys.exit(0)
return config, stats
def processData(config, stats, options):
##########################################
# Collate the stats and report
##########################################
if stats.find("total_time") == None: # Hack to allow unfinished jobtrees.
ET.SubElement(stats, "total_time", { "time":"0.0", "clock":"0.0"})
collatedStatsTag = ET.Element(
"collated_stats",
{"total_run_time":stats.find("total_time").attrib["time"],
"total_clock":stats.find("total_time").attrib["clock"],
"batch_system":config.attrib["batch_system"],
"job_time":config.attrib["job_time"],
"default_memory":config.attrib["default_memory"],
"default_cpu":config.attrib["default_cpu"],
"max_cpus":config.attrib["max_cpus"],
"max_threads":config.attrib["max_threads"] })
# Add slave info
slaves = stats.findall("slave")
buildElement(collatedStatsTag, slaves, "slave")
# Add aggregated target info
targets = []
for slave in slaves:
targets += slave.findall("target")
def fn4(job):
return list(slave.findall("target"))
createSummary(buildElement(collatedStatsTag, targets, "target"),
slaves, "slave", fn4)
# Get info for each target
targetNames = set()
for target in targets:
targetNames.add(target.attrib["class"])
targetTypesTag = ET.SubElement(collatedStatsTag, "target_types")
for targetName in targetNames:
targetTypes = [ target for target in targets
if target.attrib["class"] == targetName ]
targetTypeTag = buildElement(targetTypesTag, targetTypes, targetName)
return collatedStatsTag
def reportData(xml_tree, options):
# Now dump it all out to file
if options.raw:
out_str = prettyXml(xml_tree)
else:
root, slave, target, target_types = refineData(xml_tree, options)
out_str = reportPrettyData(root, slave, target, target_types, options)
if options.outputFile != None:
fileHandle = open(options.outputFile, "w")
fileHandle.write(out_str)
fileHandle.close()
# Now dump onto the screen
print out_str
def main():
""" Reports stats on the job-tree, use with --stats option to jobTree.
"""
parser = getBasicOptionParser(
"usage: %prog [--jobTree] JOB_TREE_DIR [options]", "%prog 0.1")
initializeOptions(parser)
options, args = parseBasicOptions(parser)
checkOptions(options, args, parser)
config, stats = getSettings(options)
collatedStatsTag = processData(config, stats, options)
reportData(collatedStatsTag, options)
def _test():
import doctest
return doctest.testmod()
if __name__ == "__main__":
_test()
main()
|
import os
import sys
import locale
import threading
import contextlib
locale_lock = threading.Lock()
@contextlib.contextmanager
def setlocale(temporary_locale):
with locale_lock:
primary_locale = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, temporary_locale)
finally:
locale.setlocale(locale.LC_ALL, primary_locale)
def raw_input_with_default(prompt, default):
import readline
def pre_input_hook():
readline.insert_text(default)
readline.redisplay()
readline.set_pre_input_hook(pre_input_hook)
try:
return raw_input(prompt)
finally:
readline.set_pre_input_hook(None)
def int_input_with_default(prompt, default):
if default:
default = str(default)
else:
default = ''
s = raw_input_with_default(prompt, default).strip()
if s:
return int(s)
else:
return None
setlocale: raise UnsupportedLocaleSettingError if given locale is unsupported
import os
import sys
import locale
import threading
import contextlib
class UnsupportedLocaleSettingError(locale.Error):
pass
locale_lock = threading.Lock()
@contextlib.contextmanager
def setlocale(temporary_locale):
with locale_lock:
primary_locale = locale.setlocale(locale.LC_ALL)
try:
try:
yield locale.setlocale(locale.LC_ALL, temporary_locale)
except locale.Error, ex:
if ex.message == 'unsupported locale setting':
raise UnsupportedLocaleSettingError(temporary_locale)
else:
raise ex
finally:
locale.setlocale(locale.LC_ALL, primary_locale)
def raw_input_with_default(prompt, default):
import readline
def pre_input_hook():
readline.insert_text(default)
readline.redisplay()
readline.set_pre_input_hook(pre_input_hook)
try:
return raw_input(prompt)
finally:
readline.set_pre_input_hook(None)
def int_input_with_default(prompt, default):
if default:
default = str(default)
else:
default = ''
s = raw_input_with_default(prompt, default).strip()
if s:
return int(s)
else:
return None
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
from tensorflow.lite.python import lite_constants
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Lazy load since some of the performance benchmark skylark rules
# break dependencies.
_toco_python = LazyLoader(
"tensorflow_wrap_toco", globals(),
"tensorflow.lite.toco.python."
"tensorflow_wrap_toco")
del LazyLoader
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
# Map of tf.dtypes to TFLite types_flag_pb2.
_MAP_TF_TO_TFLITE_TYPES = {
dtypes.float32: _types_pb2.FLOAT,
dtypes.int32: _types_pb2.INT32,
dtypes.int64: _types_pb2.INT64,
dtypes.string: _types_pb2.STRING,
dtypes.uint8: _types_pb2.QUANTIZED_UINT8,
dtypes.complex64: _types_pb2.COMPLEX64
}
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return output.decode()
except UnicodeDecodeError:
pass
return output
def convert_dtype_to_tflite_type(tf_dtype):
"""Converts tf.dtype to TFLite proto type.
Args:
tf_dtype: tf.dtype
Raises:
ValueError: Unsupported tf.dtype.
Returns:
types_flag_pb2.
"""
result = _MAP_TF_TO_TFLITE_TYPES.get(tf_dtype)
if result is None:
raise ValueError("Unsupported tf.dtype {0}".format(tf_dtype))
return result
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
# Don't expose these for now.
# @_tf_export("lite.toco_convert_protos")
def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
try:
model_str = _toco_python.TocoConvert(model_flags_str, toco_flags_str,
input_data_str)
return model_str
except Exception as e:
raise ConverterError("TOCO failed: %s" % e)
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(input_data_str)
fp_model.flush()
fp_toco.flush()
fp_input.flush()
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin, model_filename, toco_filename, input_filename,
output_filename
]
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError(
"TOCO failed. See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def tensor_name(x):
return x.name.split(":")[0]
# Don't expose these for now.
# @_tf_export("lite.build_toco_convert_protos")
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
change_concat_input_ranges=False,
post_training_quantize=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length
as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist
or are unused in the final graph. (default False)
Returns:
model_flags, toco_flags: two protocol buffers describing the conversion
process.
Raises:
ValueError: If the input tensor type is unknown
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
toco.post_training_quantize = post_training_quantize
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
if toco.inference_input_type == _types_pb2.QUANTIZED_UINT8:
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
input_array.name = tensor_name(input_tensor)
if input_shapes is None:
shape = input_tensor.get_shape()
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(map(int, shape))
for output_tensor in output_tensors:
model.output_arrays.append(tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
*args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if kwargs["inference_type"] == lite_constants.QUANTIZED_UINT8:
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(map(int, shape))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString())
return data
def toco_convert_impl(input_data, input_tensors, output_tensors, *args,
**kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
data = toco_convert_protos(model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString())
return data
@_tf_export("lite.toco_convert")
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
return toco_convert_impl(input_data, input_tensors, output_tensors, *args,
**kwargs)
Added the functionality of input_data_types flag to the Python API.
PiperOrigin-RevId: 223994946
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
from tensorflow.lite.python import lite_constants
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Lazy load since some of the performance benchmark skylark rules
# break dependencies.
_toco_python = LazyLoader(
"tensorflow_wrap_toco", globals(),
"tensorflow.lite.toco.python."
"tensorflow_wrap_toco")
del LazyLoader
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
# Map of tf.dtypes to TFLite types_flag_pb2.
_MAP_TF_TO_TFLITE_TYPES = {
dtypes.float32: _types_pb2.FLOAT,
dtypes.int32: _types_pb2.INT32,
dtypes.int64: _types_pb2.INT64,
dtypes.string: _types_pb2.STRING,
dtypes.uint8: _types_pb2.QUANTIZED_UINT8,
dtypes.complex64: _types_pb2.COMPLEX64
}
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return output.decode()
except UnicodeDecodeError:
pass
return output
def convert_dtype_to_tflite_type(tf_dtype):
"""Converts tf.dtype to TFLite proto type.
Args:
tf_dtype: tf.dtype
Raises:
ValueError: Unsupported tf.dtype.
Returns:
types_flag_pb2.
"""
result = _MAP_TF_TO_TFLITE_TYPES.get(tf_dtype)
if result is None:
raise ValueError("Unsupported tf.dtype {0}".format(tf_dtype))
return result
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
# Don't expose these for now.
# @_tf_export("lite.toco_convert_protos")
def toco_convert_protos(model_flags_str, toco_flags_str, input_data_str):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
try:
model_str = _toco_python.TocoConvert(model_flags_str, toco_flags_str,
input_data_str)
return model_str
except Exception as e:
raise ConverterError("TOCO failed: %s" % e)
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(input_data_str)
fp_model.flush()
fp_toco.flush()
fp_input.flush()
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin, model_filename, toco_filename, input_filename,
output_filename
]
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError(
"TOCO failed. See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def tensor_name(x):
return x.name.split(":")[0]
# Don't expose these for now.
# @_tf_export("lite.build_toco_convert_protos")
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
change_concat_input_ranges=False,
post_training_quantize=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length
as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist
or are unused in the final graph. (default False)
Returns:
model_flags, toco_flags: two protocol buffers describing the conversion
process.
Raises:
ValueError: If the input tensor type is unknown
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
toco.post_training_quantize = post_training_quantize
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
input_array.name = tensor_name(input_tensor)
input_array.data_type = convert_dtype_to_tflite_type(input_tensor.dtype)
if toco.inference_input_type == _types_pb2.QUANTIZED_UINT8:
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.get_shape()
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(map(int, shape))
for output_tensor in output_tensors:
model.output_arrays.append(tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
*args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if kwargs["inference_type"] == lite_constants.QUANTIZED_UINT8:
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(map(int, shape))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString())
return data
def toco_convert_impl(input_data, input_tensors, output_tensors, *args,
**kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
data = toco_convert_protos(model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString())
return data
@_tf_export("lite.toco_convert")
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.get_shape()` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
return toco_convert_impl(input_data, input_tensors, output_tensors, *args,
**kwargs)
|
#!/usr/bin/env python3
#
# Copyright (C) 2015 Clifford Wolf <clifford@clifford.at>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import iceboxdb
import re, sys
class iceconfig:
def __init__(self):
self.clear()
def clear(self):
self.max_x = 0
self.max_y = 0
self.device = ""
self.warmboot = True
self.logic_tiles = dict()
self.io_tiles = dict()
self.ramb_tiles = dict()
self.ramt_tiles = dict()
self.dsp_tiles = [dict() for i in range(4)]
self.ipcon_tiles = dict()
self.ram_data = dict()
self.extra_bits = set()
self.symbols = dict()
def setup_empty_384(self):
self.clear()
self.device = "384"
self.max_x = 7
self.max_y = 9
for x in range(1, self.max_x):
for y in range(1, self.max_y):
self.logic_tiles[(x, y)] = ["0" * 54 for i in range(16)]
for x in range(1, self.max_x):
self.io_tiles[(x, 0)] = ["0" * 18 for i in range(16)]
self.io_tiles[(x, self.max_y)] = ["0" * 18 for i in range(16)]
for y in range(1, self.max_y):
self.io_tiles[(0, y)] = ["0" * 18 for i in range(16)]
self.io_tiles[(self.max_x, y)] = ["0" * 18 for i in range(16)]
def setup_empty_1k(self):
self.clear()
self.device = "1k"
self.max_x = 13
self.max_y = 17
for x in range(1, self.max_x):
for y in range(1, self.max_y):
if x in (3, 10):
if y % 2 == 1:
self.ramb_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.ramt_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.logic_tiles[(x, y)] = ["0" * 54 for i in range(16)]
for x in range(1, self.max_x):
self.io_tiles[(x, 0)] = ["0" * 18 for i in range(16)]
self.io_tiles[(x, self.max_y)] = ["0" * 18 for i in range(16)]
for y in range(1, self.max_y):
self.io_tiles[(0, y)] = ["0" * 18 for i in range(16)]
self.io_tiles[(self.max_x, y)] = ["0" * 18 for i in range(16)]
def setup_empty_5k(self):
self.clear()
self.device = "5k"
self.max_x = 25
self.max_y = 31
for x in range(1, self.max_x):
for y in range(1, self.max_y):
if x in (6, 19):
if y % 2 == 1:
self.ramb_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.ramt_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.logic_tiles[(x, y)] = ["0" * 54 for i in range(16)]
for x in range(1, self.max_x):
self.io_tiles[(x, 0)] = ["0" * 18 for i in range(16)]
self.io_tiles[(x, self.max_y)] = ["0" * 18 for i in range(16)]
for x in [0, self.max_x]:
for y in range(1, self.max_y):
if y in [5, 10, 15, 23]:
self.dsp_tiles[0][(x, y)] = ["0" * 54 for i in range(16)]
elif y in [6, 11, 16, 24]:
self.dsp_tiles[1][(x, y)] = ["0" * 54 for i in range(16)]
elif y in [7, 12, 17, 25]:
self.dsp_tiles[2][(x, y)] = ["0" * 54 for i in range(16)]
elif y in [8, 13, 18, 26]:
self.dsp_tiles[3][(x, y)] = ["0" * 54 for i in range(16)]
else:
self.ipcon_tiles[(x, y)] = ["0" * 54 for i in range(16)]
def setup_empty_8k(self):
self.clear()
self.device = "8k"
self.max_x = 33
self.max_y = 33
for x in range(1, self.max_x):
for y in range(1, self.max_y):
if x in (8, 25):
if y % 2 == 1:
self.ramb_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.ramt_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.logic_tiles[(x, y)] = ["0" * 54 for i in range(16)]
for x in range(1, self.max_x):
self.io_tiles[(x, 0)] = ["0" * 18 for i in range(16)]
self.io_tiles[(x, self.max_y)] = ["0" * 18 for i in range(16)]
for y in range(1, self.max_y):
self.io_tiles[(0, y)] = ["0" * 18 for i in range(16)]
self.io_tiles[(self.max_x, y)] = ["0" * 18 for i in range(16)]
def lookup_extra_bit(self, bit):
assert self.device in extra_bits_db
if bit in extra_bits_db[self.device]:
return extra_bits_db[self.device][bit]
return ("UNKNOWN_FUNCTION",)
def tile(self, x, y):
if (x, y) in self.io_tiles: return self.io_tiles[(x, y)]
if (x, y) in self.logic_tiles: return self.logic_tiles[(x, y)]
if (x, y) in self.ramb_tiles: return self.ramb_tiles[(x, y)]
if (x, y) in self.ramt_tiles: return self.ramt_tiles[(x, y)]
for i in range(4):
if (x, y) in self.dsp_tiles[i]: return self.dsp_tiles[i][(x, y)]
if (x, y) in self.ipcon_tiles: return self.ipcon_tiles[(x, y)]
return None
def pinloc_db(self):
if self.device == "384": return pinloc_db["384-qn32"]
if self.device == "1k": return pinloc_db["1k-tq144"]
if self.device == "5k": return pinloc_db["5k-sg48"]
if self.device == "8k": return pinloc_db["8k-ct256"]
assert False
def gbufin_db(self):
return gbufin_db[self.device]
def iolatch_db(self):
return iolatch_db[self.device]
def padin_pio_db(self):
return padin_pio_db[self.device]
def extra_bits_db(self):
return extra_bits_db[self.device]
def ieren_db(self):
return ieren_db[self.device]
def pll_list(self):
if self.device == "1k":
return ["1k"]
if self.device == "5k":
return ["5k"]
if self.device == "8k":
return ["8k_0", "8k_1"]
if self.device == "384":
return [ ]
assert False
# Return true if device is Ultra/UltraPlus series, i.e. has
# IpConnect/DSP at the sides instead of IO
def is_ultra(self):
return self.device in ["5k"]
def colbuf_db(self):
if self.device == "1k":
entries = list()
for x in range(self.max_x+1):
for y in range(self.max_y+1):
src_y = None
if 0 <= y <= 4: src_y = 4
if 5 <= y <= 8: src_y = 5
if 9 <= y <= 12: src_y = 12
if 13 <= y <= 17: src_y = 13
if x in [3, 10] and src_y == 4: src_y = 3
if x in [3, 10] and src_y == 12: src_y = 11
entries.append((x, src_y, x, y))
return entries
if self.device == "8k":
entries = list()
for x in range(self.max_x+1):
for y in range(self.max_y+1):
src_y = None
if 0 <= y <= 8: src_y = 8
if 9 <= y <= 16: src_y = 9
if 17 <= y <= 24: src_y = 24
if 25 <= y <= 33: src_y = 25
entries.append((x, src_y, x, y))
return entries
if self.device == "5k": #Interesting, seems the 5k has more colbufs?
entries = list()
for x in range(self.max_x+1):
for y in range(self.max_y+1):
src_y = None
if 0 <= y <= 4: src_y = 4
if 5 <= y <= 10: src_y = 5
if 11 <= y <= 14: src_y = 14
if 15 <= y <= 20: src_y = 15
if 21 <= y <= 26: src_y = 26
if 27 <= y <= 31: src_y = 27
entries.append((x, src_y, x, y))
return entries
if self.device == "384":
entries = list()
for x in range(self.max_x+1):
for y in range(self.max_y+1):
src_y = None #Is ColBufCtrl relevant?
if 0 <= y <= 2: src_y = 2 #384?
if 3 <= y <= 4: src_y = 3 #384?
if 5 <= y <= 6: src_y = 6 #384?
if 7 <= y <= 9: src_y = 7 #384?
entries.append((x, src_y, x, y))
return entries
assert False
# Return a map between HDL name and routing net and location for a given DSP cell
def get_dsp_nets_db(self, x, y):
assert ((x, y) in self.dsp_tiles[0])
# Control signals
nets = {
"CLK": (x, y+2, "lutff_global/clk"),
"CE": (x, y+2, "lutff_global/cen"),
"IRSTTOP": (x, y+1, "lutff_global/s_r"),
"IRSTBOT": (x, y+0, "lutff_global/s_r"),
"ORSTTOP": (x, y+3, "lutff_global/s_r"),
"ORSTBOT": (x, y+2, "lutff_global/s_r"),
"AHOLD": (x, y+2, "lutff_0/in_0"),
"BHOLD": (x, y+1, "lutff_0/in_0"),
"CHOLD": (x, y+3, "lutff_0/in_0"),
"DHOLD": (x, y+0, "lutff_0/in_0"),
"OHOLDTOP": (x, y+3, "lutff_1/in_0"),
"OHOLDBOT": (x, y+0, "lutff_1/in_0"),
"ADDSUBTOP": (x, y+3, "lutff_3/in_0"),
"ADDSUBBOT": (x, y+0, "lutff_3/in_0"),
"OLOADTOP": (x, y+3, "lutff_2/in_0"),
"OLOADBOT": (x, y+0, "lutff_2/in_0"),
"CI": (x, y+0, "lutff_4/in_0"),
"CO": (x, y+4, "slf_op_0")
}
#Data ports
for i in range(8):
nets["C_%d" % i] = (x, y+3, "lutff_%d/in_3" % i)
nets["C_%d" % (i+8)] = (x, y+3, "lutff_%d/in_1" % i)
nets["A_%d" % i] = (x, y+2, "lutff_%d/in_3" % i)
nets["A_%d" % (i+8)] = (x, y+2, "lutff_%d/in_1" % i)
nets["B_%d" % i] = (x, y+1, "lutff_%d/in_3" % i)
nets["B_%d" % (i+8)] = (x, y+1, "lutff_%d/in_1" % i)
nets["D_%d" % i] = (x, y+0, "lutff_%d/in_3" % i)
nets["D_%d" % (i+8)] = (x, y+0, "lutff_%d/in_1" % i)
for i in range(32):
nets["O_%d" % i] = (x, y+(i//8), "mult/O_%d" % i)
return nets
# Return the location of configuration bits for a given DSP cell
def get_dsp_config_db(self, x, y):
assert ((x, y) in self.dsp_tiles[0])
override = { }
if (("%s_%d_%d" % (self.device, x, y)) in dsp_config_db):
override = dsp_config_db["%s_%d_%d" % (self.device, x, y)]
default_db = dsp_config_db["default"]
merged = { }
for cfgkey in default_db:
cx, cy, cbit = default_db[cfgkey]
if cfgkey in override:
cx, cy, cbit = override[cfgkey]
merged[cfgkey] = (x + cx, y + cy, cbit)
return merged
def tile_db(self, x, y):
# Only these devices have IO on the left and right sides.
if self.device in ["384", "1k", "8k"]:
if x == 0: return iotile_l_db
if x == self.max_x: return iotile_r_db
# The 5k needs an IO db including the extra bits
if self.device == "5k":
if y == 0: return iotile_b_5k_db
if y == self.max_y: return iotile_t_5k_db
else:
if y == 0: return iotile_b_db
if y == self.max_y: return iotile_t_db
if self.device == "1k":
if (x, y) in self.logic_tiles: return logictile_db
if (x, y) in self.ramb_tiles: return rambtile_db
if (x, y) in self.ramt_tiles: return ramttile_db
elif self.device == "5k":
if (x, y) in self.logic_tiles: return logictile_5k_db
if (x, y) in self.ramb_tiles: return rambtile_5k_db
if (x, y) in self.ramt_tiles: return ramttile_5k_db
if (x, y) in self.ipcon_tiles: return ipcon_5k_db
if (x, y) in self.dsp_tiles[0]: return dsp0_5k_db
if (x, y) in self.dsp_tiles[1]: return dsp1_5k_db
if (x, y) in self.dsp_tiles[2]: return dsp2_5k_db
if (x, y) in self.dsp_tiles[3]: return dsp3_5k_db
elif self.device == "8k":
if (x, y) in self.logic_tiles: return logictile_8k_db
if (x, y) in self.ramb_tiles: return rambtile_8k_db
if (x, y) in self.ramt_tiles: return ramttile_8k_db
elif self.device == "384":
if (x, y) in self.logic_tiles: return logictile_384_db
print("Tile type unknown at (%d, %d)" % (x, y))
assert False
def tile_type(self, x, y):
if x == 0 and (not self.is_ultra()): return "IO"
if y == 0: return "IO"
if x == self.max_x and (not self.is_ultra()): return "IO"
if y == self.max_y: return "IO"
if (x, y) in self.ramb_tiles: return "RAMB"
if (x, y) in self.ramt_tiles: return "RAMT"
if (x, y) in self.logic_tiles: return "LOGIC"
if (x == 0 or x == self.max_x) and self.is_ultra():
if y in [5, 10, 15, 23]:
return "DSP0"
elif y in [6, 11, 16, 24]:
return "DSP1"
elif y in [7, 12, 17, 25]:
return "DSP2"
elif y in [8, 13, 18, 26]:
return "DSP3"
else:
return "IPCON"
assert False
def tile_pos(self, x, y):
if x == 0 and 0 < y < self.max_y: return "l"
if y == 0 and 0 < x < self.max_x: return "b"
if x == self.max_x and 0 < y < self.max_y: return "r"
if y == self.max_y and 0 < x < self.max_x: return "t"
if 0 < x < self.max_x and 0 < y < self.max_y: return "x"
return None
def tile_has_entry(self, x, y, entry):
if entry[1] in ("routing", "buffer"):
return self.tile_has_net(x, y, entry[2]) and self.tile_has_net(x, y, entry[3])
return True
def tile_has_net(self, x, y, netname):
if netname.startswith("logic_op_"):
if netname.startswith("logic_op_bot_"):
if y == self.max_y and 0 < x < self.max_x: return True
if netname.startswith("logic_op_bnl_"):
if x == self.max_x and 1 < y < self.max_y and (not self.is_ultra()): return True
if y == self.max_y and 1 < x < self.max_x: return True
if netname.startswith("logic_op_bnr_"):
if x == 0 and 1 < y < self.max_y and (not self.is_ultra()): return True
if y == self.max_y and 0 < x < self.max_x-1: return True
if netname.startswith("logic_op_top_"):
if y == 0 and 0 < x < self.max_x: return True
if netname.startswith("logic_op_tnl_"):
if x == self.max_x and 0 < y < self.max_y-1 and (not self.is_ultra()): return True
if y == 0 and 1 < x < self.max_x: return True
if netname.startswith("logic_op_tnr_"):
if x == 0 and 0 < y < self.max_y-1 and (not self.is_ultra()): return True
if y == 0 and 0 < x < self.max_x-1: return True
if netname.startswith("logic_op_lft_"):
if x == self.max_x and (not self.is_ultra()): return True
if netname.startswith("logic_op_rgt_"):
if x == 0 and (not self.is_ultra()): return True
return False
if not 0 <= x <= self.max_x: return False
if not 0 <= y <= self.max_y: return False
return pos_has_net(self.tile_pos(x, y), netname)
def tile_follow_net(self, x, y, direction, netname):
if x == 1 and y not in (0, self.max_y) and direction == 'l': return pos_follow_net("x", "L", netname, self.is_ultra())
if y == 1 and x not in (0, self.max_x) and direction == 'b': return pos_follow_net("x", "B", netname, self.is_ultra())
if x == self.max_x-1 and y not in (0, self.max_y) and direction == 'r': return pos_follow_net("x", "R", netname, self.is_ultra())
if y == self.max_y-1 and x not in (0, self.max_x) and direction == 't': return pos_follow_net("x", "T", netname, self.is_ultra())
if self.is_ultra(): # Pass through corner positions as they must be handled differently
if y == 1 and x in (0, self.max_x) and direction == 'b': return pos_follow_net(self.tile_pos(x, y), "B", netname, self.is_ultra())
if y == self.max_y-1 and x in (0, self.max_x) and direction == 't': return pos_follow_net(self.tile_pos(x, y), "T", netname, self.is_ultra())
if x == 1 and y in (0, self.max_y) and direction == 'l': return pos_follow_net(self.tile_pos(x, y), "L", netname, self.is_ultra())
if x == self.max_x-1 and y in (0, self.max_y) and direction == 'r': return pos_follow_net(self.tile_pos(x, y), "R", netname, self.is_ultra())
return pos_follow_net(self.tile_pos(x, y), direction, netname, self.is_ultra())
def follow_funcnet(self, x, y, func):
neighbours = set()
def do_direction(name, nx, ny):
if (0 < nx < self.max_x or self.is_ultra()) and 0 < ny < self.max_y:
neighbours.add((nx, ny, "neigh_op_%s_%d" % (name, func)))
if nx in (0, self.max_x) and 0 < ny < self.max_y and nx != x and (not self.is_ultra()):
neighbours.add((nx, ny, "logic_op_%s_%d" % (name, func)))
if ny in (0, self.max_y) and 0 < nx < self.max_x and ny != y:
neighbours.add((nx, ny, "logic_op_%s_%d" % (name, func)))
do_direction("bot", x, y+1)
do_direction("bnl", x+1, y+1)
do_direction("bnr", x-1, y+1)
do_direction("top", x, y-1)
do_direction("tnl", x+1, y-1)
do_direction("tnr", x-1, y-1)
do_direction("lft", x+1, y )
do_direction("rgt", x-1, y )
return neighbours
def lookup_funcnet(self, nx, ny, x, y, func):
npos = self.tile_pos(nx, ny)
pos = self.tile_pos(x, y)
if npos is not None and pos is not None:
if npos == "x":
if (nx, ny) in self.logic_tiles:
return (nx, ny, "lutff_%d/out" % func)
for i in range(4):
if (nx, ny) in self.dsp_tiles[i]: #TODO: check this
return (nx, ny, "mult/O_%d" % (i * 8 + func))
if (nx, ny) in self.ramb_tiles:
if self.device == "1k":
return (nx, ny, "ram/RDATA_%d" % func)
elif self.device == "5k":
return (nx, ny, "ram/RDATA_%d" % (15-func))
elif self.device == "8k":
return (nx, ny, "ram/RDATA_%d" % (15-func))
else:
assert False
if (nx, ny) in self.ramt_tiles:
if self.device == "1k":
return (nx, ny, "ram/RDATA_%d" % (8+func))
elif self.device == "5k":
return (nx, ny, "ram/RDATA_%d" % (7-func))
elif self.device == "8k":
return (nx, ny, "ram/RDATA_%d" % (7-func))
else:
assert False
elif pos == "x" and ((npos in ("t", "b")) or ((not self.is_ultra()) and (npos in ("l", "r")))):
if func in (0, 4): return (nx, ny, "io_0/D_IN_0")
if func in (1, 5): return (nx, ny, "io_0/D_IN_1")
if func in (2, 6): return (nx, ny, "io_1/D_IN_0")
if func in (3, 7): return (nx, ny, "io_1/D_IN_1")
return None
def rlookup_funcnet(self, x, y, netname):
funcnets = set()
if netname == "io_0/D_IN_0":
for net in self.follow_funcnet(x, y, 0) | self.follow_funcnet(x, y, 4):
if self.tile_pos(net[0], net[1]) == "x": funcnets.add(net)
if netname == "io_0/D_IN_1":
for net in self.follow_funcnet(x, y, 1) | self.follow_funcnet(x, y, 5):
if self.tile_pos(net[0], net[1]) == "x": funcnets.add(net)
if netname == "io_1/D_IN_0":
for net in self.follow_funcnet(x, y, 2) | self.follow_funcnet(x, y, 6):
if self.tile_pos(net[0], net[1]) == "x": funcnets.add(net)
if netname == "io_1/D_IN_1":
for net in self.follow_funcnet(x, y, 3) | self.follow_funcnet(x, y, 7):
if self.tile_pos(net[0], net[1]) == "x": funcnets.add(net)
match = re.match(r"lutff_(\d+)/out", netname)
if match:
funcnets |= self.follow_funcnet(x, y, int(match.group(1)))
match = re.match(r"ram/RDATA_(\d+)", netname)
if match:
if self.device == "1k":
funcnets |= self.follow_funcnet(x, y, int(match.group(1)) % 8)
elif self.device == "5k":
funcnets |= self.follow_funcnet(x, y, 7 - int(match.group(1)) % 8)
elif self.device == "8k":
funcnets |= self.follow_funcnet(x, y, 7 - int(match.group(1)) % 8)
else:
assert False
return funcnets
def ultraplus_follow_corner(self, corner, direction, netname):
m = re.match("span4_(horz|vert)_([lrtb])_(\d+)$", netname)
if not m:
return None
cur_edge = m.group(2)
cur_index = int(m.group(3))
if direction not in corner:
return None
if direction != cur_edge:
return None
h_idx, v_idx = self.ultraplus_trace_corner_idx(corner, cur_index)
if h_idx is None and (direction == "b" or direction == "t"):
return None
if v_idx is None and (direction == "l" or direction == "r"):
return None
if corner == "bl" and direction == "l":
return (0, 1, sp4v_normalize("sp4_v_b_%d" % v_idx))
if corner == "bl" and direction == "b":
return (1, 0, ultra_span4_horz_normalize("span4_horz_l_%d" % h_idx))
if corner == "br" and direction == "r":
return (self.max_x, 1, sp4v_normalize("sp4_v_b_%d" % v_idx))
if corner == "br" and direction == "b":
return (self.max_x-1, 0, ultra_span4_horz_normalize("span4_horz_r_%d" % h_idx))
if corner == "tl" and direction == "l":
return (0, self.max_y-1, sp4v_normalize("sp4_v_t_%d" % v_idx))
if corner == "tl" and direction == "t":
return (1, self.max_y, ultra_span4_horz_normalize("span4_horz_l_%d" % h_idx))
if corner == "tr" and direction == "r":
return (self.max_x, self.max_y-1, sp4v_normalize("sp4_v_t_%d" % v_idx))
if corner == "tr" and direction == "t":
return (self.max_x-1, self.max_y, ultra_span4_horz_normalize("span4_horz_r_%d" % h_idx))
assert False
#UltraPlus corner routing: given the corner name and net index,
#return a tuple containing H and V indexes, or none if NA
def ultraplus_trace_corner_idx(self, corner, idx):
h_idx = None
v_idx = None
if corner == "bl" or corner == "br":
if idx < 16:
v_idx = idx + 32
if idx >= 32 and idx < 48:
h_idx = idx - 32
elif corner == "tl" or corner == "tr":
if idx >= 0 and idx < 16:
v_idx = idx
h_idx = idx
return (h_idx, v_idx)
def get_corner(self, x, y):
corner = ""
if y == 0:
corner += "b"
elif y == self.max_y:
corner += "t"
else:
corner += "x"
if x == 0:
corner += "l"
elif x == self.max_x:
corner += "r"
else:
corner += "x"
return corner
def follow_net(self, netspec):
x, y, netname = netspec
neighbours = self.rlookup_funcnet(x, y, netname)
#print(netspec)
#print('\t', neighbours)
if netname == "carry_in" and y > 1:
neighbours.add((x, y-1, "lutff_7/cout"))
if netname == "lutff_7/cout" and y+1 < self.max_y:
neighbours.add((x, y+1, "carry_in"))
if netname.startswith("glb_netwk_"):
for nx in range(self.max_x+1):
for ny in range(self.max_y+1):
if self.tile_pos(nx, ny) is not None:
neighbours.add((nx, ny, netname))
match = re.match(r"sp4_r_v_b_(\d+)", netname)
if match and ((0 < x < self.max_x-1) or (self.is_ultra() and (x < self.max_x))):
neighbours.add((x+1, y, sp4v_normalize("sp4_v_b_" + match.group(1))))
#print('\tafter r_v_b', neighbours)
match = re.match(r"sp4_v_[bt]_(\d+)", netname)
if match and (1 < x < self.max_x or (self.is_ultra() and (x > 0))):
n = sp4v_normalize(netname, "b")
if n is not None:
n = n.replace("sp4_", "sp4_r_")
neighbours.add((x-1, y, n))
#print('\tafter v_[bt]', neighbours)
match = re.match(r"(logic|neigh)_op_(...)_(\d+)", netname)
if match:
if match.group(2) == "bot": nx, ny = (x, y-1)
if match.group(2) == "bnl": nx, ny = (x-1, y-1)
if match.group(2) == "bnr": nx, ny = (x+1, y-1)
if match.group(2) == "top": nx, ny = (x, y+1)
if match.group(2) == "tnl": nx, ny = (x-1, y+1)
if match.group(2) == "tnr": nx, ny = (x+1, y+1)
if match.group(2) == "lft": nx, ny = (x-1, y )
if match.group(2) == "rgt": nx, ny = (x+1, y )
n = self.lookup_funcnet(nx, ny, x, y, int(match.group(3)))
if n is not None:
neighbours.add(n)
for direction in ["l", "r", "t", "b"]:
n = self.tile_follow_net(x, y, direction, netname)
if n is not None:
if direction == "l": s = (x-1, y, n)
if direction == "r": s = (x+1, y, n)
if direction == "t": s = (x, y+1, n)
if direction == "b": s = (x, y-1, n)
if s[0] in (0, self.max_x) and s[1] in (0, self.max_y):
if self.is_ultra():
s = self.ultraplus_follow_corner(self.get_corner(s[0], s[1]), direction, n)
if s is None:
continue
elif re.match("span4_(vert|horz)_[lrtb]_\d+$", n) and not self.is_ultra():
m = re.match("span4_(vert|horz)_([lrtb])_\d+$", n)
vert_net = n.replace("_l_", "_t_").replace("_r_", "_b_").replace("_horz_", "_vert_")
horz_net = n.replace("_t_", "_l_").replace("_b_", "_r_").replace("_vert_", "_horz_")
if s[0] == 0 and s[1] == 0:
if direction == "l": s = (0, 1, vert_net)
if direction == "b": s = (1, 0, horz_net)
if s[0] == self.max_x and s[1] == self.max_y:
if direction == "r": s = (self.max_x, self.max_y-1, vert_net)
if direction == "t": s = (self.max_x-1, self.max_y, horz_net)
vert_net = netname.replace("_l_", "_t_").replace("_r_", "_b_").replace("_horz_", "_vert_")
horz_net = netname.replace("_t_", "_l_").replace("_b_", "_r_").replace("_vert_", "_horz_")
if s[0] == 0 and s[1] == self.max_y:
if direction == "l": s = (0, self.max_y-1, vert_net)
if direction == "t": s = (1, self.max_y, horz_net)
if s[0] == self.max_x and s[1] == 0:
if direction == "r": s = (self.max_x, 1, vert_net)
if direction == "b": s = (self.max_x-1, 0, horz_net)
if self.tile_has_net(s[0], s[1], s[2]):
neighbours.add((s[0], s[1], s[2]))
#print('\tafter directions', neighbours)
return neighbours
def group_segments(self, all_from_tiles=set(), extra_connections=list(), extra_segments=list(), connect_gb=True):
seed_segments = set()
seen_segments = set()
connected_segments = dict()
grouped_segments = set()
for seg in extra_segments:
seed_segments.add(seg)
for conn in extra_connections:
s1, s2 = conn
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
for idx, tile in self.io_tiles.items():
tc = tileconfig(tile)
pintypes = [ list("000000"), list("000000") ]
for entry in self.tile_db(idx[0], idx[1]):
if entry[1].startswith("IOB_") and entry[2].startswith("PINTYPE_") and tc.match(entry[0]):
pintypes[int(entry[1][-1])][int(entry[2][-1])] = "1"
if "".join(pintypes[0][2:6]) != "0000":
seed_segments.add((idx[0], idx[1], "io_0/D_OUT_0"))
if "".join(pintypes[1][2:6]) != "0000":
seed_segments.add((idx[0], idx[1], "io_1/D_OUT_0"))
def add_seed_segments(idx, tile, db):
tc = tileconfig(tile)
for entry in db:
if entry[1] in ("routing", "buffer"):
config_match = tc.match(entry[0])
if idx in all_from_tiles or config_match:
if not self.tile_has_net(idx[0], idx[1], entry[2]): continue
if not self.tile_has_net(idx[0], idx[1], entry[3]): continue
s1 = (idx[0], idx[1], entry[2])
s2 = (idx[0], idx[1], entry[3])
if config_match:
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
for idx, tile in self.io_tiles.items():
add_seed_segments(idx, tile, self.tile_db(idx[0], idx[1]))
for idx, tile in self.logic_tiles.items():
if idx in all_from_tiles:
seed_segments.add((idx[0], idx[1], "lutff_7/cout"))
if self.device == "1k":
add_seed_segments(idx, tile, logictile_db)
elif self.device == "5k":
add_seed_segments(idx, tile, logictile_5k_db)
elif self.device == "8k":
add_seed_segments(idx, tile, logictile_8k_db)
elif self.device == "384":
add_seed_segments(idx, tile, logictile_384_db)
else:
assert False
for idx, tile in self.ramb_tiles.items():
if self.device == "1k":
add_seed_segments(idx, tile, rambtile_db)
elif self.device == "5k":
add_seed_segments(idx, tile, rambtile_5k_db)
elif self.device == "8k":
add_seed_segments(idx, tile, rambtile_8k_db)
else:
assert False
for idx, tile in self.ramt_tiles.items():
if self.device == "1k":
add_seed_segments(idx, tile, ramttile_db)
elif self.device == "5k":
add_seed_segments(idx, tile, ramttile_5k_db)
elif self.device == "8k":
add_seed_segments(idx, tile, ramttile_8k_db)
else:
assert False
for idx, tile in self.dsp_tiles[0].items():
if self.device == "5k":
add_seed_segments(idx, tile, dsp0_5k_db)
for idx, tile in self.dsp_tiles[1].items():
if self.device == "5k":
add_seed_segments(idx, tile, dsp1_5k_db)
for idx, tile in self.dsp_tiles[2].items():
if self.device == "5k":
add_seed_segments(idx, tile, dsp2_5k_db)
for idx, tile in self.dsp_tiles[3].items():
if self.device == "5k":
add_seed_segments(idx, tile, dsp3_5k_db)
for idx, tile in self.ipcon_tiles.items():
if self.device == "5k":
add_seed_segments(idx, tile, ipcon_5k_db)
for padin, pio in enumerate(self.padin_pio_db()):
s1 = (pio[0], pio[1], "padin_%d" % pio[2])
s2 = (pio[0], pio[1], "glb_netwk_%d" % padin)
if s1 in seed_segments or (pio[0], pio[1]) in all_from_tiles:
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
for entry in self.iolatch_db():
if entry[0] == 0 or entry[0] == self.max_x:
iocells = [(entry[0], i) for i in range(1, self.max_y)]
if entry[1] == 0 or entry[1] == self.max_y:
iocells = [(i, entry[1]) for i in range(1, self.max_x)]
for cell in iocells:
s1 = (entry[0], entry[1], "fabout")
s2 = (cell[0], cell[1], "io_global/latch")
if s1 in seed_segments or s2 in seed_segments or \
(entry[0], entry[1]) in all_from_tiles or (cell[0], cell[1]) in all_from_tiles:
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
if connect_gb:
for entry in self.gbufin_db():
s1 = (entry[0], entry[1], "fabout")
s2 = (entry[0], entry[1], "glb_netwk_%d" % entry[2])
if s1 in seed_segments or (pio[0], pio[1]) in all_from_tiles:
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
while seed_segments:
queue = set()
segments = set()
queue.add(seed_segments.pop())
while queue:
next_segment = queue.pop()
expanded = self.expand_net(next_segment)
for s in expanded:
if s not in segments:
segments.add(s)
if s in seen_segments:
print("//", s, "has already been seen. Check your bitmapping.")
assert False
seen_segments.add(s)
seed_segments.discard(s)
if s in connected_segments:
for cs in connected_segments[s]:
if not cs in segments:
queue.add(cs)
for s in segments:
assert s not in seed_segments
grouped_segments.add(tuple(sorted(segments)))
return grouped_segments
def expand_net(self, netspec):
queue = set()
segments = set()
queue.add(netspec)
while queue:
n = queue.pop()
segments.add(n)
for k in self.follow_net(n):
if k not in segments:
queue.add(k)
return segments
def read_file(self, filename):
self.clear()
current_data = None
expected_data_lines = 0
with open(filename, "r") as f:
for linenum, linetext in enumerate(f):
# print("DEBUG: input line %d: %s" % (linenum, linetext.strip()))
line = linetext.strip().split()
if len(line) == 0:
assert expected_data_lines == 0
continue
if line[0][0] != ".":
if expected_data_lines == -1:
continue
if line[0][0] not in "0123456789abcdef":
print("Warning: ignoring data block in line %d: %s" % (linenum, linetext.strip()))
expected_data_lines = 0
continue
assert expected_data_lines != 0
current_data.append(line[0])
expected_data_lines -= 1
continue
assert expected_data_lines <= 0
if line[0] in (".io_tile", ".logic_tile", ".ramb_tile", ".ramt_tile", ".ram_data", ".ipcon_tile", ".dsp0_tile", ".dsp1_tile", ".dsp2_tile", ".dsp3_tile"):
current_data = list()
expected_data_lines = 16
self.max_x = max(self.max_x, int(line[1]))
self.max_y = max(self.max_y, int(line[2]))
if line[0] == ".io_tile":
self.io_tiles[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".logic_tile":
self.logic_tiles[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".ramb_tile":
self.ramb_tiles[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".ramt_tile":
self.ramt_tiles[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".ipcon_tile":
self.ipcon_tiles[(int(line[1]), int(line[2]))] = current_data
continue
match = re.match(r".dsp(\d)_tile", line[0])
if match:
self.dsp_tiles[int(match.group(1))][(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".ram_data":
self.ram_data[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".extra_bit":
self.extra_bits.add((int(line[1]), int(line[2]), int(line[3])))
continue
if line[0] == ".device":
assert line[1] in ["1k", "5k", "8k", "384"]
self.device = line[1]
continue
if line[0] == ".warmboot":
assert line[1] in ["disabled", "enabled"]
self.warmboot = line[1] == "enabled"
continue
if line[0] == ".sym":
self.symbols.setdefault(int(line[1]), set()).add(line[2])
continue
if line[0] == ".comment":
expected_data_lines = -1
continue
print("Warning: ignoring line %d: %s" % (linenum, linetext.strip()))
expected_data_lines = -1
def write_file(self, filename):
with open(filename, "w") as f:
print(".device %s" % self.device, file=f)
if not self.warmboot:
print(".warmboot disabled", file=f)
for y in range(self.max_y+1):
for x in range(self.max_x+1):
if self.tile_pos(x, y) is not None:
print(".%s_tile %d %d" % (self.tile_type(x, y).lower(), x, y), file=f)
for line in self.tile(x, y):
print(line, file=f)
for x, y in sorted(self.ram_data):
print(".ram_data %d %d" % (x, y), file=f)
for line in self.ram_data[(x, y)]:
print(line, file=f)
for extra_bit in sorted(self.extra_bits):
print(".extra_bit %d %d %d" % extra_bit, file=f)
class tileconfig:
def __init__(self, tile):
self.bits = set()
for k, line in enumerate(tile):
for i in range(len(line)):
if line[i] == "1":
self.bits.add("B%d[%d]" % (k, i))
else:
self.bits.add("!B%d[%d]" % (k, i))
def match(self, pattern):
for bit in pattern:
if not bit in self.bits:
return False
return True
if False:
## Lattice span net name normalization
valid_sp4_h_l = set([1, 2, 4, 5, 7, 9, 10, 11, 15, 16, 17, 21, 24, 34, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47])
valid_sp4_h_r = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 19, 21, 24, 25, 27, 30, 31, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46])
valid_sp4_v_t = set([1, 3, 5, 9, 12, 14, 16, 17, 18, 21, 22, 23, 26, 28, 29, 30, 32, 33, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47])
valid_sp4_v_b = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19, 21, 22, 23, 24, 26, 30, 33, 36, 37, 38, 42, 46, 47])
valid_sp12_h_l = set([3, 4, 5, 12, 14, 16, 17, 18, 21, 22, 23])
valid_sp12_h_r = set([0, 1, 2, 3, 5, 8, 9, 10, 11, 12, 13, 14, 16, 20, 23])
valid_sp12_v_t = set([0, 1, 2, 3, 6, 9, 10, 12, 14, 21, 22, 23])
valid_sp12_v_b = set([0, 1, 6, 7, 8, 11, 12, 14, 16, 18, 19, 20, 21, 23])
else:
## IceStorm span net name normalization
valid_sp4_h_l = set(range(36, 48))
valid_sp4_h_r = set(range(48))
valid_sp4_v_t = set(range(36, 48))
valid_sp4_v_b = set(range(48))
valid_sp12_h_l = set(range(22, 24))
valid_sp12_h_r = set(range(24))
valid_sp12_v_t = set(range(22, 24))
valid_sp12_v_b = set(range(24))
def sp4h_normalize(netname, edge=""):
m = re.match("sp4_h_([lr])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if cur_edge == "r" and (edge == "l" or (edge == "" and cur_index not in valid_sp4_h_r)):
if cur_index < 12:
return None
return "sp4_h_l_%d" % ((cur_index-12)^1)
if cur_edge == "l" and (edge == "r" or (edge == "" and cur_index not in valid_sp4_h_l)):
if cur_index >= 36:
return None
return "sp4_h_r_%d" % ((cur_index+12)^1)
return netname
# "Normalization" of span4 (not just sp4) is needed during Ultra/UltraPlus
# corner tracing
def ultra_span4_horz_normalize(netname, edge=""):
m = re.match("span4_horz_([rl])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if edge == "":
if cur_edge == "l" and cur_index < 12:
return "span4_horz_r_%d" % (cur_index + 4)
else:
return netname
elif edge == "l" and cur_edge == "r":
if cur_index < 4:
return None
else:
cur_index -= 4
return "span4_horz_l_%d" % cur_index
elif edge == "r" and cur_edge == "l":
if cur_index < 12:
return "span4_horz_r_%d" % (cur_index + 4)
else:
return None
assert False
def sp4v_normalize(netname, edge=""):
m = re.match("sp4_v_([bt])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if cur_edge == "b" and (edge == "t" or (edge == "" and cur_index not in valid_sp4_v_b)):
if cur_index < 12:
return None
return "sp4_v_t_%d" % ((cur_index-12)^1)
if cur_edge == "t" and (edge == "b" or (edge == "" and cur_index not in valid_sp4_v_t)):
if cur_index >= 36:
return None
return "sp4_v_b_%d" % ((cur_index+12)^1)
return netname
def sp12h_normalize(netname, edge=""):
m = re.match("sp12_h_([lr])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if cur_edge == "r" and (edge == "l" or (edge == "" and cur_index not in valid_sp12_h_r)):
if cur_index < 2:
return None
return "sp12_h_l_%d" % ((cur_index-2)^1)
if cur_edge == "l" and (edge == "r" or (edge == "" and cur_index not in valid_sp12_h_l)):
if cur_index >= 22:
return None
return "sp12_h_r_%d" % ((cur_index+2)^1)
return netname
def sp12v_normalize(netname, edge=""):
m = re.match("sp12_v_([bt])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if cur_edge == "b" and (edge == "t" or (edge == "" and cur_index not in valid_sp12_v_b)):
if cur_index < 2:
return None
return "sp12_v_t_%d" % ((cur_index-2)^1)
if cur_edge == "t" and (edge == "b" or (edge == "" and cur_index not in valid_sp12_v_t)):
if cur_index >= 22:
return None
return "sp12_v_b_%d" % ((cur_index+2)^1)
return netname
def netname_normalize(netname, edge="", ramb=False, ramt=False, ramb_8k=False, ramt_8k=False):
if netname.startswith("sp4_v_"): return sp4v_normalize(netname, edge)
if netname.startswith("sp4_h_"): return sp4h_normalize(netname, edge)
if netname.startswith("sp12_v_"): return sp12v_normalize(netname, edge)
if netname.startswith("sp12_h_"): return sp12h_normalize(netname, edge)
if netname.startswith("input_2_"): netname = netname.replace("input_2_", "wire_logic_cluster/lc_") + "/in_2"
netname = netname.replace("lc_trk_", "local_")
netname = netname.replace("lc_", "lutff_")
netname = netname.replace("wire_logic_cluster/", "")
netname = netname.replace("wire_io_cluster/", "")
netname = netname.replace("wire_mult/", "")
netname = netname.replace("wire_con_box/", "")
netname = netname.replace("wire_bram/", "")
if (ramb or ramt or ramb_8k or ramt_8k) and netname.startswith("input"):
match = re.match(r"input(\d)_(\d)", netname)
idx1, idx2 = (int(match.group(1)), int(match.group(2)))
if ramb: netname="ram/WADDR_%d" % (idx1*4 + idx2)
if ramt: netname="ram/RADDR_%d" % (idx1*4 + idx2)
if ramb_8k: netname="ram/RADDR_%d" % ([7, 6, 5, 4, 3, 2, 1, 0, -1, -1, -1, -1, -1, 10, 9, 8][idx1*4 + idx2])
if ramt_8k: netname="ram/WADDR_%d" % ([7, 6, 5, 4, 3, 2, 1, 0, -1, -1, -1, -1, -1, 10, 9, 8][idx1*4 + idx2])
match = re.match(r"(...)_op_(.*)", netname)
if match and (match.group(1) != "slf"):
netname = "neigh_op_%s_%s" % (match.group(1), match.group(2))
if re.match(r"lutff_7/(cen|clk|s_r)", netname):
netname = netname.replace("lutff_7/", "lutff_global/")
if re.match(r"io_1/(cen|inclk|outclk)", netname):
netname = netname.replace("io_1/", "io_global/")
if netname == "carry_in_mux/cout":
return "carry_in_mux"
return netname
def pos_has_net(pos, netname):
if pos in ("l", "r"):
if re.search(r"_vert_\d+$", netname): return False
if re.search(r"_horz_[rl]_\d+$", netname): return False
if pos in ("t", "b"):
if re.search(r"_horz_\d+$", netname): return False
if re.search(r"_vert_[bt]_\d+$", netname): return False
return True
def pos_follow_net(pos, direction, netname, is_ultra):
if pos == "x" or ((pos in ("l", "r")) and is_ultra):
m = re.match("sp4_h_[lr]_(\d+)$", netname)
if m and direction in ("l", "L"):
n = sp4h_normalize(netname, "l")
if n is not None:
if direction == "l" or is_ultra:
n = re.sub("_l_", "_r_", n)
n = sp4h_normalize(n)
else:
n = re.sub("_l_", "_", n)
n = re.sub("sp4_h_", "span4_horz_", n)
return n
if m and direction in ("r", "R"):
n = sp4h_normalize(netname, "r")
if n is not None:
if direction == "r" or is_ultra:
n = re.sub("_r_", "_l_", n)
n = sp4h_normalize(n)
else:
n = re.sub("_r_", "_", n)
n = re.sub("sp4_h_", "span4_horz_", n)
return n
m = re.match("sp4_v_[tb]_(\d+)$", netname)
if m and direction in ("t", "T"):
n = sp4v_normalize(netname, "t")
if n is not None:
if is_ultra and direction == "T" and pos in ("l", "r"):
return re.sub("sp4_v_", "span4_vert_", n)
elif direction == "t":
n = re.sub("_t_", "_b_", n)
n = sp4v_normalize(n)
else:
n = re.sub("_t_", "_", n)
n = re.sub("sp4_v_", "span4_vert_", n)
return n
if m and direction in ("b", "B"):
n = sp4v_normalize(netname, "b")
if n is not None:
if is_ultra and direction == "B" and pos in ("l", "r"):
return re.sub("sp4_v_", "span4_vert_", n)
elif direction == "b":
n = re.sub("_b_", "_t_", n)
n = sp4v_normalize(n)
else:
n = re.sub("_b_", "_", n)
n = re.sub("sp4_v_", "span4_vert_", n)
return n
m = re.match("sp12_h_[lr]_(\d+)$", netname)
if m and direction in ("l", "L"):
n = sp12h_normalize(netname, "l")
if n is not None:
if direction == "l" or is_ultra:
n = re.sub("_l_", "_r_", n)
n = sp12h_normalize(n)
else:
n = re.sub("_l_", "_", n)
n = re.sub("sp12_h_", "span12_horz_", n)
return n
if m and direction in ("r", "R"):
n = sp12h_normalize(netname, "r")
if n is not None:
if direction == "r" or is_ultra:
n = re.sub("_r_", "_l_", n)
n = sp12h_normalize(n)
else:
n = re.sub("_r_", "_", n)
n = re.sub("sp12_h_", "span12_horz_", n)
return n
m = re.match("sp12_v_[tb]_(\d+)$", netname)
if m and direction in ("t", "T"):
n = sp12v_normalize(netname, "t")
if n is not None:
if direction == "t":
n = re.sub("_t_", "_b_", n)
n = sp12v_normalize(n)
elif direction == "T" and pos in ("l", "r"):
pass
else:
n = re.sub("_t_", "_", n)
n = re.sub("sp12_v_", "span12_vert_", n)
return n
if m and direction in ("b", "B"):
n = sp12v_normalize(netname, "b")
if n is not None:
if direction == "b":
n = re.sub("_b_", "_t_", n)
n = sp12v_normalize(n)
elif direction == "B" and pos in ("l", "r"):
pass
else:
n = re.sub("_b_", "_", n)
n = re.sub("sp12_v_", "span12_vert_", n)
return n
if (pos in ("l", "r" )) and (not is_ultra):
m = re.match("span4_vert_([bt])_(\d+)$", netname)
if m:
case, idx = direction + m.group(1), int(m.group(2))
if case == "tt":
return "span4_vert_b_%d" % idx
if case == "tb" and idx >= 4:
return "span4_vert_b_%d" % (idx-4)
if case == "bb" and idx < 12:
return "span4_vert_b_%d" % (idx+4)
if case == "bb" and idx >= 12:
return "span4_vert_t_%d" % idx
if pos in ("t", "b" ):
m = re.match("span4_horz_([rl])_(\d+)$", netname)
if m:
case, idx = direction + m.group(1), int(m.group(2))
if direction == "L":
return ultra_span4_horz_normalize(netname, "l")
elif direction == "R":
return ultra_span4_horz_normalize(netname, "r")
if case == "ll":
return "span4_horz_r_%d" % idx
if case == "lr" and idx >= 4:
return "span4_horz_r_%d" % (idx-4)
if case == "rr" and idx < 12:
return "span4_horz_r_%d" % (idx+4)
if case == "rr" and idx >= 12:
return "span4_horz_l_%d" % idx
if pos == "l" and direction == "r" and (not is_ultra):
m = re.match("span4_horz_(\d+)$", netname)
if m: return sp4h_normalize("sp4_h_l_%s" % m.group(1))
m = re.match("span12_horz_(\d+)$", netname)
if m: return sp12h_normalize("sp12_h_l_%s" % m.group(1))
if pos == "r" and direction == "l" and (not is_ultra):
m = re.match("span4_horz_(\d+)$", netname)
if m: return sp4h_normalize("sp4_h_r_%s" % m.group(1))
m = re.match("span12_horz_(\d+)$", netname)
if m: return sp12h_normalize("sp12_h_r_%s" % m.group(1))
if pos == "t" and direction == "b":
m = re.match("span4_vert_(\d+)$", netname)
if m: return sp4v_normalize("sp4_v_t_%s" % m.group(1))
m = re.match("span12_vert_(\d+)$", netname)
if m: return sp12v_normalize("sp12_v_t_%s" % m.group(1))
if pos == "b" and direction == "t":
m = re.match("span4_vert_(\d+)$", netname)
if m: return sp4v_normalize("sp4_v_b_%s" % m.group(1))
m = re.match("span12_vert_(\d+)$", netname)
if m: return sp12v_normalize("sp12_v_b_%s" % m.group(1))
return None
def get_lutff_bits(tile, index):
bits = list("--------------------")
for k, line in enumerate(tile):
for i in range(36, 46):
lutff_idx = k // 2
lutff_bitnum = (i-36) + 10*(k%2)
if lutff_idx == index:
bits[lutff_bitnum] = line[i];
return bits
def get_lutff_lut_bits(tile, index):
lutff_bits = get_lutff_bits(tile, index)
return [lutff_bits[i] for i in [4, 14, 15, 5, 6, 16, 17, 7, 3, 13, 12, 2, 1, 11, 10, 0]]
def get_lutff_seq_bits(tile, index):
lutff_bits = get_lutff_bits(tile, index)
return [lutff_bits[i] for i in [8, 9, 18, 19]]
def get_carry_cascade_bit(tile):
return tile[1][49]
def get_carry_bit(tile):
return tile[1][50]
def get_negclk_bit(tile):
return tile[0][0]
def key_netname(netname):
return re.sub(r"\d+", lambda m: "%09d" % int(m.group(0)), netname)
def run_checks_neigh():
print("Running consistency checks on neighbour finder..")
ic = iceconfig()
# ic.setup_empty_1k()
ic.setup_empty_5k()
# ic.setup_empty_8k()
# ic.setup_empty_384()
all_segments = set()
def add_segments(idx, db):
for entry in db:
if entry[1] in ("routing", "buffer"):
if not ic.tile_has_net(idx[0], idx[1], entry[2]): continue
if not ic.tile_has_net(idx[0], idx[1], entry[3]): continue
all_segments.add((idx[0], idx[1], entry[2]))
all_segments.add((idx[0], idx[1], entry[3]))
for x in range(ic.max_x+1):
for y in range(ic.max_x+1):
# Skip the corners.
if x in (0, ic.max_x) and y in (0, ic.max_y):
continue
add_segments((x, y), ic.tile_db(x, y))
if (x, y) in ic.logic_tiles:
all_segments.add((x, y, "lutff_7/cout"))
for s1 in all_segments:
for s2 in ic.follow_net(s1):
# if s1[1] > 4: continue
if s1 not in ic.follow_net(s2):
print("ERROR: %s -> %s, but not vice versa!" % (s1, s2))
print("Neighbours of %s:" % (s1,))
for s in ic.follow_net(s1):
print(" ", s)
print("Neighbours of %s:" % (s2,))
for s in ic.follow_net(s2):
print(" ", s)
print()
def run_checks():
run_checks_neigh()
def parse_db(text, device="1k"):
db = list()
for line in text.split("\n"):
line_384 = line.replace("384_glb_netwk_", "glb_netwk_")
line_1k = line.replace("1k_glb_netwk_", "glb_netwk_")
line_8k = line.replace("8k_glb_netwk_", "glb_netwk_")
if line_1k != line:
if device != "1k":
continue
line = line_1k
elif line_8k != line:
if device != "8k" and device != "5k": # global network is the same for 8k and 5k
continue
line = line_8k
elif line_384 != line:
if device != "384":
continue
line = line_384
line = line.split("\t")
if len(line) == 0 or line[0] == "":
continue
line[0] = line[0].split(",")
db.append(line)
return db
extra_bits_db = {
"1k": {
(0, 330, 142): ("padin_glb_netwk", "0"),
(0, 331, 142): ("padin_glb_netwk", "1"),
(1, 330, 143): ("padin_glb_netwk", "2"),
(1, 331, 143): ("padin_glb_netwk", "3"), # (1 3) (331 144) (331 144) routing T_0_0.padin_3 <X> T_0_0.glb_netwk_3
(1, 330, 142): ("padin_glb_netwk", "4"),
(1, 331, 142): ("padin_glb_netwk", "5"),
(0, 330, 143): ("padin_glb_netwk", "6"), # (0 0) (330 143) (330 143) routing T_0_0.padin_6 <X> T_0_0.glb_netwk_6
(0, 331, 143): ("padin_glb_netwk", "7"),
},
"5k": {
(0, 690, 334): ("padin_glb_netwk", "0"), # check
(0, 691, 334): ("padin_glb_netwk", "1"), # good
(1, 690, 175): ("padin_glb_netwk", "2"), # good
(1, 691, 175): ("padin_glb_netwk", "3"), # check
(1, 690, 174): ("padin_glb_netwk", "4"), # good (INTOSC only)
(1, 691, 174): ("padin_glb_netwk", "5"), # good (INTOSC only)
(0, 690, 335): ("padin_glb_netwk", "6"), # check
(0, 691, 335): ("padin_glb_netwk", "7"), # good
},
"8k": {
(0, 870, 270): ("padin_glb_netwk", "0"),
(0, 871, 270): ("padin_glb_netwk", "1"),
(1, 870, 271): ("padin_glb_netwk", "2"),
(1, 871, 271): ("padin_glb_netwk", "3"),
(1, 870, 270): ("padin_glb_netwk", "4"),
(1, 871, 270): ("padin_glb_netwk", "5"),
(0, 870, 271): ("padin_glb_netwk", "6"),
(0, 871, 271): ("padin_glb_netwk", "7"),
},
"384": {
(0, 180, 78): ("padin_glb_netwk", "0"),
(0, 181, 78): ("padin_glb_netwk", "1"),
(1, 180, 79): ("padin_glb_netwk", "2"),
(1, 181, 79): ("padin_glb_netwk", "3"),
(1, 180, 78): ("padin_glb_netwk", "4"),
(1, 181, 78): ("padin_glb_netwk", "5"),
(0, 180, 79): ("padin_glb_netwk", "6"),
(0, 181, 79): ("padin_glb_netwk", "7"),
}
}
gbufin_db = {
"1k": [
(13, 8, 7),
( 0, 8, 6),
( 7, 17, 1),
( 7, 0, 0),
( 0, 9, 3),
(13, 9, 2),
( 6, 0, 5),
( 6, 17, 4),
],
"5k": [
( 6, 0, 6), #checked
(12, 0, 5), #checked
(13, 0, 0), #checked
(19, 0, 7), #checked
( 6, 31, 3), #checked
(12, 31, 4), #checked
(13, 31, 1), #checked
(19, 31, 2), #checked
],
"8k": [
(33, 16, 7),
( 0, 16, 6),
(17, 33, 1),
(17, 0, 0),
( 0, 17, 3),
(33, 17, 2),
(16, 0, 5),
(16, 33, 4),
],
"384": [
( 7, 4, 7),
( 0, 4, 6),
( 4, 9, 1),
( 4, 0, 0),
( 0, 5, 3),
( 7, 5, 2),
( 3, 0, 5),
( 3, 9, 4),
]
}
# To figure these out:
# 1. Copy io_latched.sh and convert it for your pinout (like io_latched_5k.sh).
# 2. Run it. It will create an io_latched_<device>.work directory with a bunch of files.
# 3. Grep the *.ve files in that directory for "'fabout')". The coordinates
# before it are where the io latches are.
#
# Note: This may not work if your icepack configuration of cell sizes is incorrect because
# icebox_vlog.py won't correctly interpret the meaning of particular bits.
iolatch_db = {
"1k": [
( 0, 7),
(13, 10),
( 5, 0),
( 8, 17),
],
"5k": [
(14, 0),
(14, 31),
],
"8k": [
( 0, 15),
(33, 18),
(18, 0),
(15, 33),
],
"384": [
( 0, 3), #384?
( 7, 5), #384?
( 2, 0), #384?
( 5, 9), #384?
],
}
# The x, y cell locations of the WARMBOOT controls. Run tests/sb_warmboot.v
# through icecube.sh to determine these values.
warmbootinfo_db = {
"1k": {
"BOOT": ( 12, 0, "fabout" ),
"S0": ( 13, 1, "fabout" ),
"S1": ( 13, 2, "fabout" ),
},
"5k": {
# These are the right locations but may be the wrong order.
"BOOT": ( 22, 0, "fabout" ),
"S0": ( 23, 0, "fabout" ),
"S1": ( 24, 0, "fabout" ),
},
"8k": {
"BOOT": ( 31, 0, "fabout" ),
"S0": ( 33, 1, "fabout" ),
"S1": ( 33, 2, "fabout" ),
},
"384": {
"BOOT": ( 6, 0, "fabout" ), #384?
"S0": ( 7, 1, "fabout" ),
"S1": ( 7, 2, "fabout" ),
}
}
noplls_db = {
"1k-swg16tr": [ "1k" ],
"1k-cm36": [ "1k" ],
"1k-cm49": [ "1k" ],
"8k-cm81": [ "8k_1" ],
"8k-cm81:4k": [ "8k_1" ],
"1k-qn48": [ "1k" ],
"1k-cb81": [ "1k" ],
"1k-cb121": [ "1k" ],
"1k-vq100": [ "1k" ],
"384-qn32": [ "384" ],
}
pllinfo_db = {
"1k": {
"LOC" : (6, 0),
# 3'b000 = "DISABLED"
# 3'b010 = "SB_PLL40_PAD"
# 3'b100 = "SB_PLL40_2_PAD"
# 3'b110 = "SB_PLL40_2F_PAD"
# 3'b011 = "SB_PLL40_CORE"
# 3'b111 = "SB_PLL40_2F_CORE"
"PLLTYPE_0": ( 0, 3, "PLLCONFIG_5"),
"PLLTYPE_1": ( 0, 5, "PLLCONFIG_1"),
"PLLTYPE_2": ( 0, 5, "PLLCONFIG_3"),
# 3'b000 = "DELAY"
# 3'b001 = "SIMPLE"
# 3'b010 = "PHASE_AND_DELAY"
# 3'b110 = "EXTERNAL"
"FEEDBACK_PATH_0": ( 0, 5, "PLLCONFIG_5"),
"FEEDBACK_PATH_1": ( 0, 2, "PLLCONFIG_9"),
"FEEDBACK_PATH_2": ( 0, 3, "PLLCONFIG_1"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_FEEDBACK=4'b1111)
"DELAY_ADJMODE_FB": ( 0, 4, "PLLCONFIG_4"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_RELATIVE=4'b1111)
"DELAY_ADJMODE_REL": ( 0, 4, "PLLCONFIG_9"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_A_0": ( 0, 3, "PLLCONFIG_6"),
"PLLOUT_SELECT_A_1": ( 0, 3, "PLLCONFIG_7"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_B_0": ( 0, 3, "PLLCONFIG_2"),
"PLLOUT_SELECT_B_1": ( 0, 3, "PLLCONFIG_3"),
# Numeric Parameters
"SHIFTREG_DIV_MODE": ( 0, 3, "PLLCONFIG_4"),
"FDA_FEEDBACK_0": ( 0, 3, "PLLCONFIG_9"),
"FDA_FEEDBACK_1": ( 0, 4, "PLLCONFIG_1"),
"FDA_FEEDBACK_2": ( 0, 4, "PLLCONFIG_2"),
"FDA_FEEDBACK_3": ( 0, 4, "PLLCONFIG_3"),
"FDA_RELATIVE_0": ( 0, 4, "PLLCONFIG_5"),
"FDA_RELATIVE_1": ( 0, 4, "PLLCONFIG_6"),
"FDA_RELATIVE_2": ( 0, 4, "PLLCONFIG_7"),
"FDA_RELATIVE_3": ( 0, 4, "PLLCONFIG_8"),
"DIVR_0": ( 0, 1, "PLLCONFIG_1"),
"DIVR_1": ( 0, 1, "PLLCONFIG_2"),
"DIVR_2": ( 0, 1, "PLLCONFIG_3"),
"DIVR_3": ( 0, 1, "PLLCONFIG_4"),
"DIVF_0": ( 0, 1, "PLLCONFIG_5"),
"DIVF_1": ( 0, 1, "PLLCONFIG_6"),
"DIVF_2": ( 0, 1, "PLLCONFIG_7"),
"DIVF_3": ( 0, 1, "PLLCONFIG_8"),
"DIVF_4": ( 0, 1, "PLLCONFIG_9"),
"DIVF_5": ( 0, 2, "PLLCONFIG_1"),
"DIVF_6": ( 0, 2, "PLLCONFIG_2"),
"DIVQ_0": ( 0, 2, "PLLCONFIG_3"),
"DIVQ_1": ( 0, 2, "PLLCONFIG_4"),
"DIVQ_2": ( 0, 2, "PLLCONFIG_5"),
"FILTER_RANGE_0": ( 0, 2, "PLLCONFIG_6"),
"FILTER_RANGE_1": ( 0, 2, "PLLCONFIG_7"),
"FILTER_RANGE_2": ( 0, 2, "PLLCONFIG_8"),
"TEST_MODE": ( 0, 3, "PLLCONFIG_8"),
# PLL Ports
"PLLOUT_A": ( 6, 0, 1),
"PLLOUT_B": ( 7, 0, 0),
"REFERENCECLK": ( 0, 1, "fabout"),
"EXTFEEDBACK": ( 0, 2, "fabout"),
"DYNAMICDELAY_0": ( 0, 4, "fabout"),
"DYNAMICDELAY_1": ( 0, 5, "fabout"),
"DYNAMICDELAY_2": ( 0, 6, "fabout"),
"DYNAMICDELAY_3": ( 0, 10, "fabout"),
"DYNAMICDELAY_4": ( 0, 11, "fabout"),
"DYNAMICDELAY_5": ( 0, 12, "fabout"),
"DYNAMICDELAY_6": ( 0, 13, "fabout"),
"DYNAMICDELAY_7": ( 0, 14, "fabout"),
"LOCK": ( 1, 1, "neigh_op_bnl_1"),
"BYPASS": ( 1, 0, "fabout"),
"RESETB": ( 2, 0, "fabout"),
"LATCHINPUTVALUE": ( 5, 0, "fabout"),
"SDO": (12, 1, "neigh_op_bnr_3"),
"SDI": ( 4, 0, "fabout"),
"SCLK": ( 3, 0, "fabout"),
},
"5k": {
"LOC" : (12, 31),
# 3'b000 = "DISABLED"
# 3'b010 = "SB_PLL40_PAD"
# 3'b100 = "SB_PLL40_2_PAD"
# 3'b110 = "SB_PLL40_2F_PAD"
# 3'b011 = "SB_PLL40_CORE"
# 3'b111 = "SB_PLL40_2F_CORE"
"PLLTYPE_0": (12, 31, "PLLCONFIG_5"),
"PLLTYPE_1": (14, 31, "PLLCONFIG_1"),
"PLLTYPE_2": (14, 31, "PLLCONFIG_3"),
# 3'b000 = "DELAY"
# 3'b001 = "SIMPLE"
# 3'b010 = "PHASE_AND_DELAY"
# 3'b110 = "EXTERNAL"
"FEEDBACK_PATH_0": (14, 31, "PLLCONFIG_5"),
"FEEDBACK_PATH_1": (11, 31, "PLLCONFIG_9"),
"FEEDBACK_PATH_2": (12, 31, "PLLCONFIG_1"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_FEEDBACK=4'b1111)
"DELAY_ADJMODE_FB": (13, 31, "PLLCONFIG_4"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_RELATIVE=4'b1111)
"DELAY_ADJMODE_REL": (13, 31, "PLLCONFIG_9"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_A_0": (12, 31, "PLLCONFIG_6"),
"PLLOUT_SELECT_A_1": (12, 31, "PLLCONFIG_7"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_B_0": (12, 31, "PLLCONFIG_2"),
"PLLOUT_SELECT_B_1": (12, 31, "PLLCONFIG_3"),
# Numeric Parameters
"SHIFTREG_DIV_MODE": (12, 31, "PLLCONFIG_4"),
"FDA_FEEDBACK_0": (12, 31, "PLLCONFIG_9"),
"FDA_FEEDBACK_1": (13, 31, "PLLCONFIG_1"),
"FDA_FEEDBACK_2": (13, 31, "PLLCONFIG_2"),
"FDA_FEEDBACK_3": (13, 31, "PLLCONFIG_3"),
"FDA_RELATIVE_0": (13, 31, "PLLCONFIG_5"),
"FDA_RELATIVE_1": (13, 31, "PLLCONFIG_6"),
"FDA_RELATIVE_2": (13, 31, "PLLCONFIG_7"),
"FDA_RELATIVE_3": (13, 31, "PLLCONFIG_8"),
"DIVR_0": (10, 31, "PLLCONFIG_1"),
"DIVR_1": (10, 31, "PLLCONFIG_2"),
"DIVR_2": (10, 31, "PLLCONFIG_3"),
"DIVR_3": (10, 31, "PLLCONFIG_4"),
"DIVF_0": (10, 31, "PLLCONFIG_5"),
"DIVF_1": (10, 31, "PLLCONFIG_6"),
"DIVF_2": (10, 31, "PLLCONFIG_7"),
"DIVF_3": (10, 31, "PLLCONFIG_8"),
"DIVF_4": (10, 31, "PLLCONFIG_9"),
"DIVF_5": (11, 31, "PLLCONFIG_1"),
"DIVF_6": (11, 31, "PLLCONFIG_2"),
"DIVQ_0": (11, 31, "PLLCONFIG_3"),
"DIVQ_1": (11, 31, "PLLCONFIG_4"),
"DIVQ_2": (11, 31, "PLLCONFIG_5"),
"FILTER_RANGE_0": (11, 31, "PLLCONFIG_6"),
"FILTER_RANGE_1": (11, 31, "PLLCONFIG_7"),
"FILTER_RANGE_2": (11, 31, "PLLCONFIG_8"),
"TEST_MODE": (12, 31, "PLLCONFIG_8"),
# PLL Ports
"PLLOUT_A": ( 12, 31, 1),
"PLLOUT_B": ( 13, 31, 0),
"REFERENCECLK": ( 10, 31, "fabout"),
"EXTFEEDBACK": ( 11, 31, "fabout"),
"DYNAMICDELAY_0": ( 1, 31, "fabout"),
"DYNAMICDELAY_1": ( 2, 31, "fabout"),
"DYNAMICDELAY_2": ( 3, 31, "fabout"),
"DYNAMICDELAY_3": ( 4, 31, "fabout"),
"DYNAMICDELAY_4": ( 5, 31, "fabout"),
"DYNAMICDELAY_5": ( 7, 31, "fabout"),
"DYNAMICDELAY_6": ( 8, 31, "fabout"),
"DYNAMICDELAY_7": ( 9, 31, "fabout"),
"LOCK": ( 1, 30, "neigh_op_tnl_1"), #check?
"BYPASS": ( 15, 31, "fabout"),
"RESETB": ( 16, 31, "fabout"),
"LATCHINPUTVALUE": ( 14, 31, "fabout"),
"SDO": ( 24, 30, "neigh_op_tnr_1"), #check?
"SDI": ( 18, 31, "fabout"),
"SCLK": ( 17, 31, "fabout"),
},
"8k_0": {
"LOC" : (16, 0),
# 3'b000 = "DISABLED"
# 3'b010 = "SB_PLL40_PAD"
# 3'b100 = "SB_PLL40_2_PAD"
# 3'b110 = "SB_PLL40_2F_PAD"
# 3'b011 = "SB_PLL40_CORE"
# 3'b111 = "SB_PLL40_2F_CORE"
"PLLTYPE_0": ( 16, 0, "PLLCONFIG_5"),
"PLLTYPE_1": ( 18, 0, "PLLCONFIG_1"),
"PLLTYPE_2": ( 18, 0, "PLLCONFIG_3"),
# 3'b000 = "DELAY"
# 3'b001 = "SIMPLE"
# 3'b010 = "PHASE_AND_DELAY"
# 3'b110 = "EXTERNAL"
"FEEDBACK_PATH_0": ( 18, 0, "PLLCONFIG_5"),
"FEEDBACK_PATH_1": ( 15, 0, "PLLCONFIG_9"),
"FEEDBACK_PATH_2": ( 16, 0, "PLLCONFIG_1"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_FEEDBACK=4'b1111)
"DELAY_ADJMODE_FB": ( 17, 0, "PLLCONFIG_4"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_RELATIVE=4'b1111)
"DELAY_ADJMODE_REL": ( 17, 0, "PLLCONFIG_9"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_A_0": ( 16, 0, "PLLCONFIG_6"),
"PLLOUT_SELECT_A_1": ( 16, 0, "PLLCONFIG_7"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_B_0": ( 16, 0, "PLLCONFIG_2"),
"PLLOUT_SELECT_B_1": ( 16, 0, "PLLCONFIG_3"),
# Numeric Parameters
"SHIFTREG_DIV_MODE": ( 16, 0, "PLLCONFIG_4"),
"FDA_FEEDBACK_0": ( 16, 0, "PLLCONFIG_9"),
"FDA_FEEDBACK_1": ( 17, 0, "PLLCONFIG_1"),
"FDA_FEEDBACK_2": ( 17, 0, "PLLCONFIG_2"),
"FDA_FEEDBACK_3": ( 17, 0, "PLLCONFIG_3"),
"FDA_RELATIVE_0": ( 17, 0, "PLLCONFIG_5"),
"FDA_RELATIVE_1": ( 17, 0, "PLLCONFIG_6"),
"FDA_RELATIVE_2": ( 17, 0, "PLLCONFIG_7"),
"FDA_RELATIVE_3": ( 17, 0, "PLLCONFIG_8"),
"DIVR_0": ( 14, 0, "PLLCONFIG_1"),
"DIVR_1": ( 14, 0, "PLLCONFIG_2"),
"DIVR_2": ( 14, 0, "PLLCONFIG_3"),
"DIVR_3": ( 14, 0, "PLLCONFIG_4"),
"DIVF_0": ( 14, 0, "PLLCONFIG_5"),
"DIVF_1": ( 14, 0, "PLLCONFIG_6"),
"DIVF_2": ( 14, 0, "PLLCONFIG_7"),
"DIVF_3": ( 14, 0, "PLLCONFIG_8"),
"DIVF_4": ( 14, 0, "PLLCONFIG_9"),
"DIVF_5": ( 15, 0, "PLLCONFIG_1"),
"DIVF_6": ( 15, 0, "PLLCONFIG_2"),
"DIVQ_0": ( 15, 0, "PLLCONFIG_3"),
"DIVQ_1": ( 15, 0, "PLLCONFIG_4"),
"DIVQ_2": ( 15, 0, "PLLCONFIG_5"),
"FILTER_RANGE_0": ( 15, 0, "PLLCONFIG_6"),
"FILTER_RANGE_1": ( 15, 0, "PLLCONFIG_7"),
"FILTER_RANGE_2": ( 15, 0, "PLLCONFIG_8"),
"TEST_MODE": ( 16, 0, "PLLCONFIG_8"),
# PLL Ports
"PLLOUT_A": ( 16, 0, 1),
"PLLOUT_B": ( 17, 0, 0),
"REFERENCECLK": ( 13, 0, "fabout"),
"EXTFEEDBACK": ( 14, 0, "fabout"),
"DYNAMICDELAY_0": ( 5, 0, "fabout"),
"DYNAMICDELAY_1": ( 6, 0, "fabout"),
"DYNAMICDELAY_2": ( 7, 0, "fabout"),
"DYNAMICDELAY_3": ( 8, 0, "fabout"),
"DYNAMICDELAY_4": ( 9, 0, "fabout"),
"DYNAMICDELAY_5": ( 10, 0, "fabout"),
"DYNAMICDELAY_6": ( 11, 0, "fabout"),
"DYNAMICDELAY_7": ( 12, 0, "fabout"),
"LOCK": ( 1, 1, "neigh_op_bnl_1"),
"BYPASS": ( 19, 0, "fabout"),
"RESETB": ( 20, 0, "fabout"),
"LATCHINPUTVALUE": ( 15, 0, "fabout"),
"SDO": ( 32, 1, "neigh_op_bnr_3"),
"SDI": ( 22, 0, "fabout"),
"SCLK": ( 21, 0, "fabout"),
},
"8k_1": {
"LOC" : (16, 33),
# 3'b000 = "DISABLED"
# 3'b010 = "SB_PLL40_PAD"
# 3'b100 = "SB_PLL40_2_PAD"
# 3'b110 = "SB_PLL40_2F_PAD"
# 3'b011 = "SB_PLL40_CORE"
# 3'b111 = "SB_PLL40_2F_CORE"
"PLLTYPE_0": ( 16, 33, "PLLCONFIG_5"),
"PLLTYPE_1": ( 18, 33, "PLLCONFIG_1"),
"PLLTYPE_2": ( 18, 33, "PLLCONFIG_3"),
# 3'b000 = "DELAY"
# 3'b001 = "SIMPLE"
# 3'b010 = "PHASE_AND_DELAY"
# 3'b110 = "EXTERNAL"
"FEEDBACK_PATH_0": ( 18, 33, "PLLCONFIG_5"),
"FEEDBACK_PATH_1": ( 15, 33, "PLLCONFIG_9"),
"FEEDBACK_PATH_2": ( 16, 33, "PLLCONFIG_1"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_FEEDBACK=4'b1111)
"DELAY_ADJMODE_FB": ( 17, 33, "PLLCONFIG_4"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_RELATIVE=4'b1111)
"DELAY_ADJMODE_REL": ( 17, 33, "PLLCONFIG_9"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_A_0": ( 16, 33, "PLLCONFIG_6"),
"PLLOUT_SELECT_A_1": ( 16, 33, "PLLCONFIG_7"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_B_0": ( 16, 33, "PLLCONFIG_2"),
"PLLOUT_SELECT_B_1": ( 16, 33, "PLLCONFIG_3"),
# Numeric Parameters
"SHIFTREG_DIV_MODE": ( 16, 33, "PLLCONFIG_4"),
"FDA_FEEDBACK_0": ( 16, 33, "PLLCONFIG_9"),
"FDA_FEEDBACK_1": ( 17, 33, "PLLCONFIG_1"),
"FDA_FEEDBACK_2": ( 17, 33, "PLLCONFIG_2"),
"FDA_FEEDBACK_3": ( 17, 33, "PLLCONFIG_3"),
"FDA_RELATIVE_0": ( 17, 33, "PLLCONFIG_5"),
"FDA_RELATIVE_1": ( 17, 33, "PLLCONFIG_6"),
"FDA_RELATIVE_2": ( 17, 33, "PLLCONFIG_7"),
"FDA_RELATIVE_3": ( 17, 33, "PLLCONFIG_8"),
"DIVR_0": ( 14, 33, "PLLCONFIG_1"),
"DIVR_1": ( 14, 33, "PLLCONFIG_2"),
"DIVR_2": ( 14, 33, "PLLCONFIG_3"),
"DIVR_3": ( 14, 33, "PLLCONFIG_4"),
"DIVF_0": ( 14, 33, "PLLCONFIG_5"),
"DIVF_1": ( 14, 33, "PLLCONFIG_6"),
"DIVF_2": ( 14, 33, "PLLCONFIG_7"),
"DIVF_3": ( 14, 33, "PLLCONFIG_8"),
"DIVF_4": ( 14, 33, "PLLCONFIG_9"),
"DIVF_5": ( 15, 33, "PLLCONFIG_1"),
"DIVF_6": ( 15, 33, "PLLCONFIG_2"),
"DIVQ_0": ( 15, 33, "PLLCONFIG_3"),
"DIVQ_1": ( 15, 33, "PLLCONFIG_4"),
"DIVQ_2": ( 15, 33, "PLLCONFIG_5"),
"FILTER_RANGE_0": ( 15, 33, "PLLCONFIG_6"),
"FILTER_RANGE_1": ( 15, 33, "PLLCONFIG_7"),
"FILTER_RANGE_2": ( 15, 33, "PLLCONFIG_8"),
"TEST_MODE": ( 16, 33, "PLLCONFIG_8"),
# PLL Ports
"PLLOUT_A": ( 16, 33, 1),
"PLLOUT_B": ( 17, 33, 0),
"REFERENCECLK": ( 13, 33, "fabout"),
"EXTFEEDBACK": ( 14, 33, "fabout"),
"DYNAMICDELAY_0": ( 5, 33, "fabout"),
"DYNAMICDELAY_1": ( 6, 33, "fabout"),
"DYNAMICDELAY_2": ( 7, 33, "fabout"),
"DYNAMICDELAY_3": ( 8, 33, "fabout"),
"DYNAMICDELAY_4": ( 9, 33, "fabout"),
"DYNAMICDELAY_5": ( 10, 33, "fabout"),
"DYNAMICDELAY_6": ( 11, 33, "fabout"),
"DYNAMICDELAY_7": ( 12, 33, "fabout"),
"LOCK": ( 1, 32, "neigh_op_tnl_1"),
"BYPASS": ( 19, 33, "fabout"),
"RESETB": ( 20, 33, "fabout"),
"LATCHINPUTVALUE": ( 15, 33, "fabout"),
"SDO": ( 32, 32, "neigh_op_tnr_1"),
"SDI": ( 22, 33, "fabout"),
"SCLK": ( 21, 33, "fabout"),
},
}
padin_pio_db = {
"1k": [
(13, 8, 1), # glb_netwk_0
( 0, 8, 1), # glb_netwk_1
( 7, 17, 0), # glb_netwk_2
( 7, 0, 0), # glb_netwk_3
( 0, 9, 0), # glb_netwk_4
(13, 9, 0), # glb_netwk_5
( 6, 0, 1), # glb_netwk_6
( 6, 17, 1), # glb_netwk_7
],
"5k": [
(19, 0, 1), #0 fixed
( 6, 0, 1), #1 fixed
(13, 31, 0), #2 fixed
(13, 0, 0), #3 fixed
(19, 31, 0), #These two are questionable, but keep the order correct
( 6, 31, 0), #They may need to be fixed if other package options are added.
(12, 0, 1), #6 fixed
(12, 31, 1), #7 fixed
],
"8k": [
(33, 16, 1),
( 0, 16, 1),
(17, 33, 0),
(17, 0, 0),
( 0, 17, 0),
(33, 17, 0),
(16, 0, 1),
(16, 33, 1),
],
"384": [
( 7, 4, 1),
( 0, 4, 1),
( 4, 9, 0),
( 4, 0, 0), #QFN32: no pin?!
( 0, 5, 0),
( 7, 5, 0),
( 3, 0, 1), #QFN32: no pin?!
( 3, 9, 1),
]
}
ieren_db = {
"1k": [
# IO-block (X, Y, Z) <-> IeRen-block (X, Y, Z)
( 0, 2, 0, 0, 2, 1),
( 0, 2, 1, 0, 2, 0),
( 0, 3, 0, 0, 3, 1),
( 0, 3, 1, 0, 3, 0),
( 0, 4, 0, 0, 4, 1),
( 0, 4, 1, 0, 4, 0),
( 0, 5, 0, 0, 5, 1),
( 0, 5, 1, 0, 5, 0),
( 0, 6, 0, 0, 6, 1),
( 0, 6, 1, 0, 6, 0),
( 0, 8, 0, 0, 8, 1),
( 0, 8, 1, 0, 8, 0),
( 0, 9, 0, 0, 9, 1),
( 0, 9, 1, 0, 9, 0),
( 0, 10, 0, 0, 10, 1),
( 0, 10, 1, 0, 10, 0),
( 0, 11, 0, 0, 11, 1),
( 0, 11, 1, 0, 11, 0),
( 0, 12, 0, 0, 12, 1),
( 0, 12, 1, 0, 12, 0),
( 0, 13, 0, 0, 13, 1),
( 0, 13, 1, 0, 13, 0),
( 0, 14, 0, 0, 14, 1),
( 0, 14, 1, 0, 14, 0),
( 1, 0, 0, 1, 0, 0),
( 1, 0, 1, 1, 0, 1),
( 1, 17, 0, 1, 17, 0),
( 1, 17, 1, 1, 17, 1),
( 2, 0, 0, 2, 0, 0),
( 2, 0, 1, 2, 0, 1),
( 2, 17, 0, 2, 17, 0),
( 2, 17, 1, 2, 17, 1),
( 3, 0, 0, 3, 0, 0),
( 3, 0, 1, 3, 0, 1),
( 3, 17, 0, 3, 17, 0),
( 3, 17, 1, 3, 17, 1),
( 4, 0, 0, 4, 0, 0),
( 4, 0, 1, 4, 0, 1),
( 4, 17, 0, 4, 17, 0),
( 4, 17, 1, 4, 17, 1),
( 5, 0, 0, 5, 0, 0),
( 5, 0, 1, 5, 0, 1),
( 5, 17, 0, 5, 17, 0),
( 5, 17, 1, 5, 17, 1),
( 6, 0, 0, 7, 0, 0),
( 6, 0, 1, 6, 0, 0),
( 6, 17, 0, 6, 17, 0),
( 6, 17, 1, 6, 17, 1),
( 7, 0, 0, 6, 0, 1),
( 7, 0, 1, 7, 0, 1),
( 7, 17, 0, 7, 17, 0),
( 7, 17, 1, 7, 17, 1),
( 8, 0, 0, 8, 0, 0),
( 8, 0, 1, 8, 0, 1),
( 8, 17, 0, 8, 17, 0),
( 8, 17, 1, 8, 17, 1),
( 9, 0, 0, 9, 0, 0),
( 9, 0, 1, 9, 0, 1),
( 9, 17, 0, 10, 17, 0),
( 9, 17, 1, 10, 17, 1),
(10, 0, 0, 10, 0, 0),
(10, 0, 1, 10, 0, 1),
(10, 17, 0, 9, 17, 0),
(10, 17, 1, 9, 17, 1),
(11, 0, 0, 11, 0, 0),
(11, 0, 1, 11, 0, 1),
(11, 17, 0, 11, 17, 0),
(11, 17, 1, 11, 17, 1),
(12, 0, 0, 12, 0, 0),
(12, 0, 1, 12, 0, 1),
(12, 17, 0, 12, 17, 0),
(12, 17, 1, 12, 17, 1),
(13, 1, 0, 13, 1, 0),
(13, 1, 1, 13, 1, 1),
(13, 2, 0, 13, 2, 0),
(13, 2, 1, 13, 2, 1),
(13, 3, 1, 13, 3, 1),
(13, 4, 0, 13, 4, 0),
(13, 4, 1, 13, 4, 1),
(13, 6, 0, 13, 6, 0),
(13, 6, 1, 13, 6, 1),
(13, 7, 0, 13, 7, 0),
(13, 7, 1, 13, 7, 1),
(13, 8, 0, 13, 8, 0),
(13, 8, 1, 13, 8, 1),
(13, 9, 0, 13, 9, 0),
(13, 9, 1, 13, 9, 1),
(13, 11, 0, 13, 10, 0),
(13, 11, 1, 13, 10, 1),
(13, 12, 0, 13, 11, 0),
(13, 12, 1, 13, 11, 1),
(13, 13, 0, 13, 13, 0),
(13, 13, 1, 13, 13, 1),
(13, 14, 0, 13, 14, 0),
(13, 14, 1, 13, 14, 1),
(13, 15, 0, 13, 15, 0),
(13, 15, 1, 13, 15, 1),
],
"8k": [
( 0, 3, 0, 0, 3, 0),
( 0, 3, 1, 0, 3, 1),
( 0, 4, 0, 0, 4, 0),
( 0, 4, 1, 0, 4, 1),
( 0, 5, 0, 0, 5, 0),
( 0, 5, 1, 0, 5, 1),
( 0, 6, 0, 0, 6, 0),
( 0, 6, 1, 0, 6, 1),
( 0, 7, 0, 0, 7, 0),
( 0, 7, 1, 0, 7, 1),
( 0, 8, 0, 0, 8, 0),
( 0, 8, 1, 0, 8, 1),
( 0, 9, 0, 0, 9, 0),
( 0, 9, 1, 0, 9, 1),
( 0, 10, 0, 0, 10, 0),
( 0, 10, 1, 0, 10, 1),
( 0, 11, 0, 0, 11, 0),
( 0, 11, 1, 0, 11, 1),
( 0, 12, 0, 0, 12, 0),
( 0, 12, 1, 0, 12, 1),
( 0, 13, 0, 0, 13, 0),
( 0, 13, 1, 0, 13, 1),
( 0, 14, 0, 0, 14, 0),
( 0, 14, 1, 0, 14, 1),
( 0, 16, 0, 0, 16, 0),
( 0, 16, 1, 0, 16, 1),
( 0, 17, 0, 0, 17, 0),
( 0, 17, 1, 0, 17, 1),
( 0, 18, 0, 0, 18, 0),
( 0, 18, 1, 0, 18, 1),
( 0, 19, 0, 0, 19, 0),
( 0, 19, 1, 0, 19, 1),
( 0, 20, 0, 0, 20, 0),
( 0, 20, 1, 0, 20, 1),
( 0, 21, 0, 0, 21, 0),
( 0, 21, 1, 0, 21, 1),
( 0, 22, 0, 0, 22, 0),
( 0, 22, 1, 0, 22, 1),
( 0, 23, 0, 0, 23, 0),
( 0, 23, 1, 0, 23, 1),
( 0, 24, 0, 0, 24, 0),
( 0, 24, 1, 0, 24, 1),
( 0, 25, 0, 0, 25, 0),
( 0, 25, 1, 0, 25, 1),
( 0, 27, 0, 0, 27, 0),
( 0, 27, 1, 0, 27, 1),
( 0, 28, 0, 0, 28, 0),
( 0, 28, 1, 0, 28, 1),
( 0, 30, 0, 0, 30, 0),
( 0, 30, 1, 0, 30, 1),
( 0, 31, 0, 0, 31, 0),
( 0, 31, 1, 0, 31, 1),
( 1, 33, 0, 1, 33, 0),
( 1, 33, 1, 1, 33, 1),
( 2, 0, 0, 2, 0, 0),
( 2, 0, 1, 2, 0, 1),
( 2, 33, 0, 2, 33, 0),
( 2, 33, 1, 2, 33, 1),
( 3, 0, 0, 3, 0, 0),
( 3, 0, 1, 3, 0, 1),
( 3, 33, 0, 3, 33, 0),
( 3, 33, 1, 3, 33, 1),
( 4, 0, 0, 4, 0, 0),
( 4, 0, 1, 4, 0, 1),
( 4, 33, 0, 4, 33, 0),
( 4, 33, 1, 4, 33, 1),
( 5, 0, 0, 5, 0, 0),
( 5, 0, 1, 5, 0, 1),
( 5, 33, 0, 5, 33, 0),
( 5, 33, 1, 5, 33, 1),
( 6, 0, 0, 6, 0, 0),
( 6, 0, 1, 6, 0, 1),
( 6, 33, 0, 6, 33, 0),
( 6, 33, 1, 6, 33, 1),
( 7, 0, 0, 7, 0, 0),
( 7, 0, 1, 7, 0, 1),
( 7, 33, 0, 7, 33, 0),
( 7, 33, 1, 7, 33, 1),
( 8, 0, 0, 8, 0, 0),
( 8, 0, 1, 8, 0, 1),
( 8, 33, 0, 8, 33, 0),
( 8, 33, 1, 8, 33, 1),
( 9, 0, 0, 9, 0, 0),
( 9, 0, 1, 9, 0, 1),
( 9, 33, 0, 9, 33, 0),
( 9, 33, 1, 9, 33, 1),
(10, 0, 0, 10, 0, 0),
(10, 0, 1, 10, 0, 1),
(10, 33, 0, 10, 33, 0),
(10, 33, 1, 10, 33, 1),
(11, 0, 0, 11, 0, 0),
(11, 0, 1, 11, 0, 1),
(11, 33, 0, 11, 33, 0),
(11, 33, 1, 11, 33, 1),
(12, 0, 0, 12, 0, 0),
(12, 0, 1, 12, 0, 1),
(12, 33, 0, 12, 33, 0),
(13, 0, 0, 13, 0, 0),
(13, 0, 1, 13, 0, 1),
(13, 33, 0, 13, 33, 0),
(13, 33, 1, 13, 33, 1),
(14, 0, 0, 14, 0, 0),
(14, 0, 1, 14, 0, 1),
(14, 33, 0, 14, 33, 0),
(14, 33, 1, 14, 33, 1),
(15, 0, 0, 15, 0, 0),
(15, 0, 1, 15, 0, 1),
(16, 0, 0, 16, 0, 0),
(16, 0, 1, 16, 0, 1),
(16, 33, 0, 16, 33, 0),
(16, 33, 1, 16, 33, 1),
(17, 0, 0, 17, 0, 0),
(17, 0, 1, 17, 0, 1),
(17, 33, 0, 17, 33, 0),
(17, 33, 1, 17, 33, 1),
(18, 33, 0, 18, 33, 0),
(18, 33, 1, 18, 33, 1),
(19, 0, 0, 19, 0, 0),
(19, 0, 1, 19, 0, 1),
(19, 33, 0, 19, 33, 0),
(19, 33, 1, 19, 33, 1),
(20, 0, 0, 20, 0, 0),
(20, 0, 1, 20, 0, 1),
(20, 33, 0, 20, 33, 0),
(20, 33, 1, 20, 33, 1),
(21, 0, 0, 21, 0, 0),
(21, 0, 1, 21, 0, 1),
(21, 33, 0, 21, 33, 0),
(21, 33, 1, 21, 33, 1),
(22, 0, 0, 22, 0, 0),
(22, 0, 1, 22, 0, 1),
(22, 33, 0, 22, 33, 0),
(22, 33, 1, 22, 33, 1),
(23, 0, 0, 23, 0, 0),
(23, 0, 1, 23, 0, 1),
(23, 33, 0, 23, 33, 0),
(23, 33, 1, 23, 33, 1),
(24, 0, 0, 24, 0, 0),
(24, 0, 1, 24, 0, 1),
(24, 33, 0, 24, 33, 0),
(24, 33, 1, 24, 33, 1),
(25, 0, 0, 25, 0, 0),
(25, 33, 0, 25, 33, 0),
(25, 33, 1, 25, 33, 1),
(26, 0, 0, 26, 0, 0),
(26, 0, 1, 26, 0, 1),
(26, 33, 0, 26, 33, 0),
(26, 33, 1, 26, 33, 1),
(27, 0, 0, 27, 0, 0),
(27, 0, 1, 27, 0, 1),
(27, 33, 0, 27, 33, 0),
(27, 33, 1, 27, 33, 1),
(28, 0, 0, 28, 0, 0),
(28, 33, 1, 28, 33, 1),
(29, 0, 0, 29, 0, 0),
(29, 0, 1, 29, 0, 1),
(29, 33, 0, 29, 33, 0),
(29, 33, 1, 29, 33, 1),
(30, 0, 0, 30, 0, 0),
(30, 0, 1, 30, 0, 1),
(30, 33, 0, 30, 33, 0),
(30, 33, 1, 30, 33, 1),
(31, 0, 0, 31, 0, 0),
(31, 0, 1, 31, 0, 1),
(31, 33, 0, 31, 33, 0),
(31, 33, 1, 31, 33, 1),
(33, 1, 0, 33, 1, 0),
(33, 1, 1, 33, 1, 1),
(33, 2, 0, 33, 2, 0),
(33, 2, 1, 33, 2, 1),
(33, 3, 0, 33, 3, 0),
(33, 3, 1, 33, 3, 1),
(33, 4, 0, 33, 4, 0),
(33, 4, 1, 33, 4, 1),
(33, 5, 0, 33, 5, 0),
(33, 5, 1, 33, 5, 1),
(33, 6, 0, 33, 6, 0),
(33, 6, 1, 33, 6, 1),
(33, 7, 0, 33, 7, 0),
(33, 7, 1, 33, 7, 1),
(33, 8, 0, 33, 8, 0),
(33, 9, 0, 33, 9, 0),
(33, 9, 1, 33, 9, 1),
(33, 10, 0, 33, 10, 0),
(33, 10, 1, 33, 10, 1),
(33, 11, 0, 33, 11, 0),
(33, 11, 1, 33, 11, 1),
(33, 12, 0, 33, 12, 0),
(33, 13, 0, 33, 13, 0),
(33, 13, 1, 33, 13, 1),
(33, 14, 0, 33, 14, 0),
(33, 14, 1, 33, 14, 1),
(33, 15, 0, 33, 15, 0),
(33, 15, 1, 33, 15, 1),
(33, 16, 0, 33, 16, 0),
(33, 16, 1, 33, 16, 1),
(33, 17, 0, 33, 17, 0),
(33, 17, 1, 33, 17, 1),
(33, 19, 0, 33, 19, 0),
(33, 19, 1, 33, 19, 1),
(33, 20, 0, 33, 20, 0),
(33, 20, 1, 33, 20, 1),
(33, 21, 0, 33, 21, 0),
(33, 21, 1, 33, 21, 1),
(33, 22, 0, 33, 22, 0),
(33, 22, 1, 33, 22, 1),
(33, 23, 0, 33, 23, 0),
(33, 23, 1, 33, 23, 1),
(33, 24, 0, 33, 24, 0),
(33, 24, 1, 33, 24, 1),
(33, 25, 0, 33, 25, 0),
(33, 25, 1, 33, 25, 1),
(33, 26, 0, 33, 26, 0),
(33, 26, 1, 33, 26, 1),
(33, 27, 0, 33, 27, 0),
(33, 27, 1, 33, 27, 1),
(33, 28, 0, 33, 28, 0),
(33, 28, 1, 33, 28, 1),
(33, 29, 1, 33, 29, 1),
(33, 30, 0, 33, 30, 0),
(33, 30, 1, 33, 30, 1),
(33, 31, 0, 33, 31, 0),
],
"384": [
( 0, 1, 0, 0, 1, 1),
( 0, 1, 1, 0, 1, 0),
( 0, 2, 0, 0, 2, 1),
( 0, 2, 1, 0, 2, 0),
( 0, 4, 0, 0, 4, 1),
( 0, 4, 1, 0, 4, 0),
( 0, 5, 0, 0, 5, 1),
( 0, 5, 1, 0, 5, 0),
( 0, 6, 0, 0, 6, 1),
( 0, 6, 1, 0, 6, 0),
( 0, 7, 0, 0, 7, 1),
( 0, 7, 1, 0, 7, 0),
( 2, 9, 0, 2, 9, 1),
( 2, 9, 1, 2, 9, 0),
( 3, 0, 0, 3, 0, 1),
( 3, 0, 1, 3, 0, 0),
( 3, 9, 0, 3, 9, 1),
( 3, 9, 1, 3, 9, 0),
( 4, 0, 0, 4, 0, 1),
( 4, 0, 1, 4, 0, 0),
( 4, 9, 0, 4, 9, 1),
( 4, 9, 1, 4, 9, 0),
( 5, 0, 0, 5, 0, 1),
( 5, 0, 1, 5, 0, 0),
( 5, 9, 0, 5, 9, 1),
( 5, 9, 1, 5, 9, 0),
( 6, 0, 0, 6, 0, 1),
( 6, 0, 1, 6, 0, 0),
( 6, 9, 0, 6, 9, 1),
( 6, 9, 1, 6, 9, 0),
( 7, 3, 1, 7, 3, 0),
( 7, 4, 0, 7, 4, 1),
( 7, 4, 1, 7, 4, 0),
( 7, 5, 0, 7, 5, 1),
( 7, 5, 1, 7, 5, 0),
( 7, 6, 0, 7, 6, 1),
( 7, 6, 1, 7, 6, 0),
],
"5k": [
( 8, 0, 0, 8, 0, 1),
( 9, 0, 1, 9, 0, 0),
( 9, 0, 0, 9, 0, 1),
(13, 0, 1, 13, 0, 0),
(15, 0, 0, 15, 0, 1),
(16, 0, 0, 16, 0, 1),
(17, 0, 0, 17, 0, 1),
(18, 0, 0, 18, 0, 1),
(19, 0, 0, 19, 0, 1),
(23, 0, 0, 23, 0, 1),
(24, 0, 0, 24, 0, 1),
(24, 0, 1, 24, 0, 0),
(23, 0, 1, 23, 0, 0),
(22, 0, 1, 22, 0, 0),
(21, 0, 1, 21, 0, 0),
(19, 0, 1, 19, 0, 0),
(18, 0, 1, 18, 0, 0),
(19, 31, 0, 19, 31, 1),
(19, 31, 1, 19, 31, 0),
(18, 31, 0, 18, 31, 1),
(18, 31, 1, 18, 31, 0),
(17, 31, 0, 17, 31, 1),
(16, 31, 1, 16, 31, 0),
(16, 31, 0, 16, 31, 1),
(13, 31, 1, 13, 31, 0),
(12, 31, 1, 12, 31, 0),
( 9, 31, 1, 9, 31, 0),
(13, 31, 0, 13, 31, 1),
( 4, 31, 0, 4, 31, 1),
( 5, 31, 0, 5, 31, 1),
( 6, 31, 0, 6, 31, 1),
( 8, 31, 1, 8, 31, 0),
( 8, 31, 0, 8, 31, 1),
( 9, 31, 0, 9, 31, 1),
( 6, 0, 1, 6, 0, 0),
( 7, 0, 1, 7, 0, 0),
( 5, 0, 0, 5, 0, 1),
( 6, 0, 0, 6, 0, 1),
( 7, 0, 0, 7, 0, 1)
]
}
# This dictionary maps package variants to a table of pin names and their
# corresponding grid location (x, y, block). This is most easily found through
# the package view in iCEcube2 by hovering the mouse over each pin.
pinloc_db = {
"1k-swg16tr": [
( "A2", 6, 17, 1),
( "A4", 2, 17, 0),
( "B1", 11, 17, 1),
( "B2", 0, 8, 1),
( "B3", 0, 9, 0),
( "C1", 12, 0, 0),
( "C2", 11, 0, 1),
( "C3", 11, 0, 0),
( "D1", 12, 0, 1),
( "D3", 6, 0, 1),
],
"1k-cm36": [
( "A1", 0, 13, 0),
( "A2", 4, 17, 1),
( "A3", 7, 17, 0),
( "B1", 0, 13, 1),
( "B3", 6, 17, 1),
( "B4", 13, 9, 0),
( "B5", 13, 11, 0),
( "B6", 13, 11, 1),
( "C1", 0, 9, 0),
( "C2", 0, 9, 1),
( "C3", 4, 17, 0),
( "C5", 13, 8, 1),
( "C6", 13, 12, 0),
( "D1", 0, 8, 1),
( "D5", 12, 0, 1),
( "D6", 13, 6, 0),
( "E1", 0, 8, 0),
( "E2", 6, 0, 0),
( "E3", 10, 0, 0),
( "E4", 11, 0, 0),
( "E5", 12, 0, 0),
( "E6", 13, 4, 1),
( "F2", 6, 0, 1),
( "F3", 10, 0, 1),
( "F5", 11, 0, 1),
],
"1k-cm49": [
( "A1", 0, 11, 1),
( "A2", 3, 17, 1),
( "A3", 8, 17, 1),
( "A4", 8, 17, 0),
( "A5", 9, 17, 1),
( "A6", 10, 17, 0),
( "A7", 9, 17, 0),
( "B1", 0, 11, 0),
( "B2", 0, 13, 0),
( "B3", 4, 17, 0),
( "B4", 6, 17, 1),
( "C1", 0, 5, 0),
( "C2", 0, 13, 1),
( "C4", 7, 17, 0),
( "C5", 13, 12, 0),
( "C6", 13, 11, 1),
( "C7", 13, 11, 0),
( "D1", 0, 5, 1),
( "D2", 0, 9, 0),
( "D3", 0, 9, 1),
( "D4", 4, 17, 1),
( "D6", 13, 8, 1),
( "D7", 13, 9, 0),
( "E2", 0, 8, 1),
( "E6", 12, 0, 1),
( "E7", 13, 4, 1),
( "F2", 0, 8, 0),
( "F3", 6, 0, 0),
( "F4", 10, 0, 0),
( "F5", 11, 0, 0),
( "F6", 12, 0, 0),
( "F7", 13, 6, 0),
( "G3", 6, 0, 1),
( "G4", 10, 0, 1),
( "G6", 11, 0, 1),
],
"1k-cm81": [
( "A1", 1, 17, 1),
( "A2", 4, 17, 0),
( "A3", 5, 17, 0),
( "A4", 6, 17, 0),
( "A6", 8, 17, 1),
( "A7", 9, 17, 0),
( "A8", 10, 17, 0),
( "A9", 13, 14, 1),
( "B1", 0, 13, 0),
( "B2", 0, 14, 0),
( "B3", 2, 17, 1),
( "B4", 4, 17, 1),
( "B5", 8, 17, 0),
( "B6", 9, 17, 1),
( "B7", 10, 17, 1),
( "B8", 11, 17, 0),
( "B9", 13, 11, 1),
( "C1", 0, 13, 1),
( "C2", 0, 14, 1),
( "C3", 0, 12, 1),
( "C4", 6, 17, 1),
( "C5", 7, 17, 0),
( "C9", 13, 12, 0),
( "D1", 0, 11, 1),
( "D2", 0, 12, 0),
( "D3", 0, 9, 0),
( "D5", 3, 17, 1),
( "D6", 13, 6, 0),
( "D7", 13, 7, 0),
( "D8", 13, 9, 0),
( "D9", 13, 11, 0),
( "E1", 0, 10, 1),
( "E2", 0, 10, 0),
( "E3", 0, 8, 1),
( "E4", 0, 11, 0),
( "E5", 5, 17, 1),
( "E7", 13, 6, 1),
( "E8", 13, 8, 1),
( "F1", 0, 8, 0),
( "F3", 0, 9, 1),
( "F7", 12, 0, 1),
( "F8", 13, 4, 0),
( "G1", 0, 5, 1),
( "G3", 0, 5, 0),
( "G4", 6, 0, 0),
( "G5", 10, 0, 0),
( "G6", 11, 0, 0),
( "G7", 12, 0, 0),
( "G8", 13, 4, 1),
( "G9", 13, 2, 1),
( "H1", 2, 0, 0),
( "H4", 6, 0, 1),
( "H5", 10, 0, 1),
( "H7", 11, 0, 1),
( "H9", 13, 2, 0),
( "J1", 3, 0, 0),
( "J2", 2, 0, 1),
( "J3", 3, 0, 1),
( "J4", 5, 0, 0),
( "J6", 7, 0, 0),
( "J7", 9, 0, 1),
( "J8", 13, 1, 0),
( "J9", 13, 1, 1),
],
"1k-cm121": [
( "A1", 0, 14, 0),
( "A2", 2, 17, 1),
( "A3", 3, 17, 0),
( "A5", 5, 17, 1),
( "A7", 8, 17, 0),
( "A8", 10, 17, 1),
( "A9", 11, 17, 0),
("A10", 12, 17, 0),
("A11", 13, 15, 0),
( "B1", 0, 13, 0),
( "B2", 1, 17, 1),
( "B3", 2, 17, 0),
( "B4", 3, 17, 1),
( "B5", 4, 17, 1),
( "B7", 9, 17, 0),
( "B8", 11, 17, 1),
( "B9", 12, 17, 1),
("B10", 13, 15, 1),
("B11", 13, 14, 1),
( "C1", 0, 12, 0),
( "C2", 0, 13, 1),
( "C3", 0, 14, 1),
( "C4", 1, 17, 0),
( "C5", 4, 17, 0),
( "C6", 7, 17, 1),
( "C7", 8, 17, 1),
( "C8", 9, 17, 1),
( "C9", 10, 17, 0),
("C10", 13, 14, 0),
("C11", 13, 13, 1),
( "D1", 0, 11, 0),
( "D2", 0, 12, 1),
( "D3", 0, 11, 1),
( "D4", 0, 10, 1),
( "D5", 6, 17, 1),
( "D6", 7, 17, 0),
("D10", 13, 12, 1),
("D11", 13, 11, 1),
( "E2", 0, 10, 0),
( "E3", 0, 9, 1),
( "E4", 0, 9, 0),
( "E6", 5, 17, 0),
( "E7", 13, 12, 0),
( "E8", 13, 13, 0),
( "E9", 13, 9, 0),
("E10", 13, 9, 1),
( "F2", 0, 6, 0),
( "F3", 0, 5, 0),
( "F4", 0, 8, 1),
( "F5", 0, 8, 0),
( "F6", 6, 17, 0),
( "F8", 13, 11, 0),
( "F9", 13, 8, 1),
("F11", 13, 7, 1),
( "G2", 0, 5, 1),
( "G4", 0, 3, 0),
( "G8", 12, 0, 1),
( "G9", 13, 8, 0),
("G11", 13, 7, 0),
( "H1", 0, 6, 1),
( "H2", 0, 4, 1),
( "H4", 0, 2, 0),
( "H5", 6, 0, 0),
( "H6", 10, 0, 0),
( "H7", 11, 0, 0),
( "H8", 12, 0, 0),
( "H9", 13, 6, 1),
("H10", 13, 2, 1),
("H11", 13, 4, 1),
( "J1", 0, 4, 0),
( "J2", 1, 0, 1),
( "J5", 6, 0, 1),
( "J6", 10, 0, 1),
( "J8", 11, 0, 1),
("J10", 13, 2, 0),
("J11", 13, 6, 0),
( "K1", 0, 3, 1),
( "K2", 2, 0, 0),
( "K3", 2, 0, 1),
( "K4", 4, 0, 0),
( "K5", 5, 0, 0),
( "K7", 7, 0, 1),
( "K8", 9, 0, 0),
( "K9", 13, 1, 0),
("K10", 13, 1, 1),
("K11", 13, 3, 1),
( "L1", 0, 2, 1),
( "L2", 3, 0, 0),
( "L3", 3, 0, 1),
( "L4", 4, 0, 1),
( "L5", 7, 0, 0),
( "L7", 8, 0, 0),
( "L9", 8, 0, 1),
("L10", 9, 0, 1),
("L11", 13, 4, 0),
],
"1k-cb81": [
( "A2", 2, 17, 1),
( "A3", 3, 17, 1),
( "A4", 6, 17, 1),
( "A7", 11, 17, 0),
( "A8", 12, 17, 1),
( "B1", 0, 13, 1),
( "B2", 0, 14, 0),
( "B3", 0, 13, 0),
( "B4", 5, 17, 1),
( "B5", 8, 17, 1),
( "B6", 9, 17, 1),
( "B7", 11, 17, 1),
( "B8", 12, 17, 0),
( "C1", 0, 12, 0),
( "C2", 0, 10, 0),
( "C3", 0, 14, 1),
( "C4", 1, 17, 1),
( "C5", 8, 17, 0),
( "C6", 10, 17, 0),
( "C7", 13, 15, 0),
( "C8", 13, 15, 1),
( "C9", 13, 14, 1),
( "D1", 0, 9, 0),
( "D2", 0, 10, 1),
( "D3", 0, 12, 1),
( "D4", 5, 17, 0),
( "D5", 4, 17, 0),
( "D6", 7, 17, 0),
( "D7", 13, 13, 0),
( "D8", 13, 13, 1),
( "E1", 0, 8, 1),
( "E2", 0, 8, 0),
( "E3", 0, 9, 1),
( "E6", 10, 17, 1),
( "E7", 13, 12, 0),
( "E8", 13, 11, 0),
( "E9", 13, 11, 1),
( "F2", 0, 6, 1),
( "F3", 0, 6, 0),
( "F6", 13, 8, 0),
( "F7", 13, 9, 0),
( "F8", 13, 8, 1),
( "F9", 13, 7, 1),
( "G1", 0, 4, 1),
( "G2", 0, 2, 1),
( "G3", 3, 0, 1),
( "G4", 4, 0, 0),
( "G5", 10, 0, 0),
( "G6", 13, 4, 0),
( "G7", 13, 4, 1),
( "G8", 13, 6, 1),
( "G9", 13, 7, 0),
( "H2", 0, 4, 0),
( "H3", 2, 0, 1),
( "H4", 6, 0, 0),
( "H5", 10, 0, 1),
( "H7", 11, 0, 0),
( "H8", 12, 0, 1),
( "J2", 2, 0, 0),
( "J3", 6, 0, 1),
( "J7", 11, 0, 1),
( "J8", 12, 0, 0),
],
"1k-cb121": [
( "A2", 1, 17, 1),
( "A3", 2, 17, 0),
( "A4", 4, 17, 0),
( "A5", 3, 17, 1),
( "A6", 4, 17, 1),
( "A8", 10, 17, 0),
("A10", 12, 17, 1),
("A11", 13, 15, 0),
( "B1", 0, 14, 0),
( "B3", 1, 17, 0),
( "B4", 2, 17, 1),
( "B5", 3, 17, 0),
( "B8", 10, 17, 1),
( "B9", 12, 17, 0),
("B11", 13, 15, 1),
( "C1", 0, 14, 1),
( "C2", 0, 11, 1),
( "C3", 0, 13, 1),
( "C4", 0, 13, 0),
( "C5", 5, 17, 0),
( "C6", 7, 17, 0),
( "C7", 8, 17, 1),
( "C8", 11, 17, 0),
( "C9", 11, 17, 1),
("C11", 13, 14, 1),
( "D1", 0, 10, 1),
( "D2", 0, 11, 0),
( "D3", 0, 9, 0),
( "D4", 0, 12, 0),
( "D5", 5, 17, 1),
( "D6", 6, 17, 1),
( "D7", 8, 17, 0),
( "D8", 13, 12, 0),
( "D9", 13, 13, 0),
("D10", 13, 13, 1),
("D11", 13, 14, 0),
( "E2", 0, 10, 0),
( "E3", 0, 9, 1),
( "E4", 0, 12, 1),
( "E5", 6, 17, 0),
( "E6", 7, 17, 1),
( "E7", 9, 17, 0),
( "E8", 13, 11, 0),
( "E9", 13, 11, 1),
("E11", 13, 12, 1),
( "F2", 0, 6, 1),
( "F3", 0, 5, 1),
( "F4", 0, 8, 1),
( "F7", 9, 17, 1),
( "F8", 13, 8, 1),
( "F9", 13, 9, 0),
("F10", 13, 9, 1),
( "G1", 0, 6, 0),
( "G3", 0, 5, 0),
( "G4", 0, 8, 0),
( "G7", 13, 6, 1),
( "G8", 13, 7, 0),
( "G9", 13, 7, 1),
("G10", 13, 8, 0),
( "H1", 0, 3, 1),
( "H2", 0, 4, 1),
( "H3", 0, 4, 0),
( "H4", 4, 0, 0),
( "H5", 4, 0, 1),
( "H6", 10, 0, 0),
( "H7", 13, 4, 1),
( "H8", 13, 6, 0),
( "H9", 13, 4, 0),
("H10", 13, 3, 1),
("H11", 9, 0, 1),
( "J1", 0, 3, 0),
( "J2", 0, 2, 0),
( "J3", 0, 2, 1),
( "J4", 2, 0, 1),
( "J5", 3, 0, 0),
( "J6", 10, 0, 1),
( "J8", 11, 0, 0),
( "J9", 12, 0, 1),
("J11", 8, 0, 1),
( "K3", 1, 0, 0),
( "K4", 1, 0, 1),
( "K8", 11, 0, 1),
( "K9", 12, 0, 0),
("K11", 9, 0, 0),
( "L2", 2, 0, 0),
( "L3", 3, 0, 1),
( "L4", 5, 0, 0),
( "L5", 5, 0, 1),
( "L8", 7, 0, 0),
( "L9", 6, 0, 1),
("L10", 7, 0, 1),
("L11", 8, 0, 0),
],
"1k-cb132": [
( "A1", 1, 17, 1),
( "A2", 2, 17, 1),
( "A4", 4, 17, 0),
( "A5", 4, 17, 1),
( "A6", 6, 17, 1),
( "A7", 7, 17, 0),
("A10", 10, 17, 0),
("A12", 12, 17, 0),
( "B1", 0, 14, 1),
("B14", 13, 15, 0),
( "C1", 0, 14, 0),
( "C3", 0, 13, 1),
( "C4", 1, 17, 0),
( "C5", 3, 17, 0),
( "C6", 5, 17, 0),
( "C7", 6, 17, 0),
( "C8", 8, 17, 0),
( "C9", 9, 17, 0),
("C10", 11, 17, 0),
("C11", 11, 17, 1),
("C12", 12, 17, 1),
("C14", 13, 14, 0),
( "D1", 0, 11, 1),
( "D3", 0, 13, 0),
( "D4", 0, 12, 1),
( "D5", 2, 17, 0),
( "D6", 3, 17, 1),
( "D7", 5, 17, 1),
( "D8", 7, 17, 1),
( "D9", 8, 17, 1),
("D10", 9, 17, 1),
("D11", 10, 17, 1),
("D12", 13, 15, 1),
("D14", 13, 13, 1),
( "E1", 0, 11, 0),
( "E4", 0, 12, 0),
("E11", 13, 14, 1),
("E12", 13, 13, 0),
("E14", 13, 12, 0),
( "F3", 0, 10, 0),
( "F4", 0, 10, 1),
("F11", 13, 12, 1),
("F12", 13, 11, 1),
("F14", 13, 8, 1),
( "G1", 0, 8, 1),
( "G3", 0, 8, 0),
( "G4", 0, 6, 1),
("G11", 13, 11, 0),
("G12", 13, 9, 1),
("G14", 13, 9, 0),
( "H1", 0, 9, 0),
( "H3", 0, 9, 1),
( "H4", 0, 6, 0),
("H11", 13, 8, 0),
("H12", 13, 7, 1),
( "J1", 0, 5, 1),
( "J3", 0, 5, 0),
("J11", 13, 7, 0),
("J12", 13, 6, 1),
( "K3", 0, 3, 0),
( "K4", 0, 3, 1),
("K11", 13, 4, 1),
("K12", 13, 4, 0),
("K14", 13, 6, 0),
( "L1", 0, 2, 0),
( "L4", 1, 0, 1),
( "L5", 3, 0, 1),
( "L6", 4, 0, 1),
( "L7", 8, 0, 0),
( "L8", 9, 0, 0),
( "L9", 10, 0, 0),
("L12", 13, 2, 0),
("L14", 13, 3, 1),
( "M1", 0, 2, 1),
( "M3", 1, 0, 0),
( "M4", 3, 0, 0),
( "M6", 5, 0, 1),
( "M7", 6, 0, 0),
( "M8", 8, 0, 1),
( "M9", 9, 0, 1),
("M11", 11, 0, 0),
("M12", 13, 1, 0),
("N14", 13, 2, 1),
( "P2", 2, 0, 0),
( "P3", 2, 0, 1),
( "P4", 4, 0, 0),
( "P5", 5, 0, 0),
( "P7", 6, 0, 1),
( "P8", 7, 0, 0),
( "P9", 7, 0, 1),
("P10", 10, 0, 1),
("P11", 11, 0, 1),
("P12", 12, 0, 0),
("P13", 12, 0, 1),
("P14", 13, 1, 1),
],
"1k-qn84": [
( "A1", 0, 14, 0),
( "A2", 0, 13, 0),
( "A3", 0, 12, 0),
( "A4", 0, 11, 0),
( "A5", 0, 10, 0),
( "A8", 0, 9, 0),
( "A9", 0, 8, 1),
("A10", 0, 5, 1),
("A11", 0, 4, 0),
("A12", 0, 2, 0),
("A13", 4, 0, 0),
("A14", 6, 0, 1),
("A16", 6, 0, 0),
("A19", 9, 0, 1),
("A20", 10, 0, 1),
("A22", 11, 0, 1),
("A23", 12, 0, 0),
("A25", 13, 4, 0),
("A26", 13, 6, 0),
("A27", 13, 7, 1),
("A29", 13, 8, 1),
("A31", 13, 11, 1),
("A32", 13, 12, 1),
("A33", 13, 13, 1),
("A34", 13, 14, 0),
("A35", 13, 15, 0),
("A38", 11, 17, 0),
("A39", 10, 17, 0),
("A40", 9, 17, 0),
("A41", 8, 17, 0),
("A43", 7, 17, 0),
("A44", 6, 17, 0),
("A45", 5, 17, 0),
("A46", 4, 17, 0),
("A47", 3, 17, 0),
("A48", 1, 17, 1),
( "B1", 0, 13, 1),
( "B2", 0, 12, 1),
( "B3", 0, 11, 1),
( "B4", 0, 10, 1),
( "B5", 0, 9, 1),
( "B7", 0, 8, 0),
( "B8", 0, 5, 0),
( "B9", 0, 3, 0),
("B10", 5, 0, 0),
("B11", 5, 0, 1),
("B12", 7, 0, 0),
("B13", 8, 0, 0),
("B14", 9, 0, 0),
("B15", 10, 0, 0),
("B17", 11, 0, 0),
("B18", 12, 0, 1),
("B19", 13, 3, 1),
("B20", 13, 6, 1),
("B21", 13, 7, 0),
("B22", 13, 9, 0),
("B23", 13, 11, 0),
("B24", 13, 12, 0),
("B26", 13, 14, 1),
("B27", 13, 15, 1),
("B29", 10, 17, 1),
("B30", 9, 17, 1),
("B31", 8, 17, 1),
("B32", 6, 17, 1),
("B34", 4, 17, 1),
("B35", 3, 17, 1),
("B36", 2, 17, 1),
],
"1k-tq144": [
( "1", 0, 14, 1),
( "2", 0, 14, 0),
( "3", 0, 13, 1),
( "4", 0, 13, 0),
( "7", 0, 12, 1),
( "8", 0, 12, 0),
( "9", 0, 11, 1),
( "10", 0, 11, 0),
( "11", 0, 10, 1),
( "12", 0, 10, 0),
( "19", 0, 9, 1),
( "20", 0, 9, 0),
( "21", 0, 8, 1),
( "22", 0, 8, 0),
( "23", 0, 6, 1),
( "24", 0, 6, 0),
( "25", 0, 5, 1),
( "26", 0, 5, 0),
( "28", 0, 4, 1),
( "29", 0, 4, 0),
( "31", 0, 3, 1),
( "32", 0, 3, 0),
( "33", 0, 2, 1),
( "34", 0, 2, 0),
( "37", 1, 0, 0),
( "38", 1, 0, 1),
( "39", 2, 0, 0),
( "41", 2, 0, 1),
( "42", 3, 0, 0),
( "43", 3, 0, 1),
( "44", 4, 0, 0),
( "45", 4, 0, 1),
( "47", 5, 0, 0),
( "48", 5, 0, 1),
( "49", 6, 0, 1),
( "50", 7, 0, 0),
( "52", 6, 0, 0),
( "56", 7, 0, 1),
( "58", 8, 0, 0),
( "60", 8, 0, 1),
( "61", 9, 0, 0),
( "62", 9, 0, 1),
( "63", 10, 0, 0),
( "64", 10, 0, 1),
( "67", 11, 0, 0),
( "68", 11, 0, 1),
( "70", 12, 0, 0),
( "71", 12, 0, 1),
( "73", 13, 1, 0),
( "74", 13, 1, 1),
( "75", 13, 2, 0),
( "76", 13, 2, 1),
( "78", 13, 3, 1),
( "79", 13, 4, 0),
( "80", 13, 4, 1),
( "81", 13, 6, 0),
( "87", 13, 6, 1),
( "88", 13, 7, 0),
( "90", 13, 7, 1),
( "91", 13, 8, 0),
( "93", 13, 8, 1),
( "94", 13, 9, 0),
( "95", 13, 9, 1),
( "96", 13, 11, 0),
( "97", 13, 11, 1),
( "98", 13, 12, 0),
( "99", 13, 12, 1),
("101", 13, 13, 0),
("102", 13, 13, 1),
("104", 13, 14, 0),
("105", 13, 14, 1),
("106", 13, 15, 0),
("107", 13, 15, 1),
("112", 12, 17, 1),
("113", 12, 17, 0),
("114", 11, 17, 1),
("115", 11, 17, 0),
("116", 10, 17, 1),
("117", 10, 17, 0),
("118", 9, 17, 1),
("119", 9, 17, 0),
("120", 8, 17, 1),
("121", 8, 17, 0),
("122", 7, 17, 1),
("128", 7, 17, 0),
("129", 6, 17, 1),
("134", 5, 17, 1),
("135", 5, 17, 0),
("136", 4, 17, 1),
("137", 4, 17, 0),
("138", 3, 17, 1),
("139", 3, 17, 0),
("141", 2, 17, 1),
("142", 2, 17, 0),
("143", 1, 17, 1),
("144", 1, 17, 0),
],
"1k-vq100": [
( "1", 0, 14, 1),
( "2", 0, 14, 0),
( "3", 0, 13, 1),
( "4", 0, 13, 0),
( "7", 0, 12, 1),
( "8", 0, 12, 0),
( "9", 0, 10, 1),
( "10", 0, 10, 0),
( "12", 0, 9, 1),
( "13", 0, 9, 0),
( "15", 0, 8, 1),
( "16", 0, 8, 0),
( "18", 0, 6, 1),
( "19", 0, 6, 0),
( "20", 0, 4, 1),
( "21", 0, 4, 0),
( "24", 0, 2, 1),
( "25", 0, 2, 0),
( "26", 2, 0, 0),
( "27", 2, 0, 1),
( "28", 3, 0, 0),
( "29", 3, 0, 1),
( "30", 4, 0, 0),
( "33", 6, 0, 1),
( "34", 7, 0, 0),
( "36", 6, 0, 0),
( "37", 7, 0, 1),
( "40", 9, 0, 1),
( "41", 10, 0, 0),
( "42", 10, 0, 1),
( "45", 11, 0, 0),
( "46", 11, 0, 1),
( "48", 12, 0, 0),
( "49", 12, 0, 1),
( "51", 13, 3, 1),
( "52", 13, 4, 0),
( "53", 13, 4, 1),
( "54", 13, 6, 0),
( "56", 13, 6, 1),
( "57", 13, 7, 0),
( "59", 13, 7, 1),
( "60", 13, 8, 0),
( "62", 13, 8, 1),
( "63", 13, 9, 0),
( "64", 13, 11, 0),
( "65", 13, 11, 1),
( "66", 13, 12, 0),
( "68", 13, 13, 0),
( "69", 13, 13, 1),
( "71", 13, 14, 0),
( "72", 13, 14, 1),
( "73", 13, 15, 0),
( "74", 13, 15, 1),
( "78", 12, 17, 1),
( "79", 12, 17, 0),
( "80", 11, 17, 1),
( "81", 10, 17, 1),
( "82", 10, 17, 0),
( "83", 9, 17, 1),
( "85", 9, 17, 0),
( "86", 8, 17, 1),
( "87", 8, 17, 0),
( "89", 7, 17, 0),
( "90", 6, 17, 1),
( "91", 6, 17, 0),
( "93", 5, 17, 1),
( "94", 5, 17, 0),
( "95", 4, 17, 1),
( "96", 4, 17, 0),
( "97", 3, 17, 1),
( "99", 2, 17, 1),
("100", 1, 17, 1),
],
"8k-cb132:4k": [
( "A1", 2, 33, 0),
( "A2", 3, 33, 0),
( "A3", 3, 33, 1),
( "A4", 5, 33, 0),
( "A5", 10, 33, 1),
( "A6", 16, 33, 1),
( "A7", 17, 33, 0),
("A10", 25, 33, 0),
("A11", 26, 33, 0),
("A12", 30, 33, 1),
( "B1", 0, 30, 1),
("B14", 33, 28, 0),
( "C1", 0, 30, 0),
( "C3", 0, 27, 1),
( "C4", 4, 33, 0),
( "C5", 8, 33, 1),
( "C6", 11, 33, 1),
( "C7", 14, 33, 1),
( "C9", 20, 33, 1),
("C10", 22, 33, 1),
("C11", 28, 33, 1),
("C12", 29, 33, 1),
("C14", 33, 24, 1),
( "D1", 0, 25, 1),
( "D3", 0, 27, 0),
( "D4", 0, 22, 1),
( "D5", 9, 33, 0),
( "D6", 11, 33, 0),
( "D7", 13, 33, 1),
( "D9", 21, 33, 1),
("D10", 27, 33, 0),
("D11", 26, 33, 1),
("D12", 33, 27, 1),
("D14", 33, 23, 1),
( "E1", 0, 25, 0),
( "E4", 0, 22, 0),
("E11", 33, 20, 1),
("E12", 33, 21, 0),
("E14", 33, 21, 1),
( "F3", 0, 21, 0),
( "F4", 0, 21, 1),
("F11", 33, 19, 1),
("F12", 33, 15, 0),
("F14", 33, 16, 1),
( "G1", 0, 17, 0),
( "G3", 0, 17, 1),
( "G4", 0, 20, 0),
("G11", 33, 14, 1),
("G12", 33, 11, 0),
("G14", 33, 17, 0),
( "H1", 0, 16, 1),
( "H3", 0, 16, 0),
( "H4", 0, 20, 1),
("H11", 33, 10, 1),
("H12", 33, 6, 1),
( "J1", 0, 18, 0),
( "J3", 0, 18, 1),
("J11", 33, 6, 0),
("J12", 33, 5, 1),
( "K3", 0, 11, 1),
( "K4", 0, 11, 0),
("K11", 33, 4, 1),
("K12", 33, 4, 0),
("K14", 33, 5, 0),
( "L1", 0, 6, 1),
( "L4", 12, 0, 0),
( "L5", 11, 0, 1),
( "L6", 15, 0, 0),
( "L8", 20, 0, 1),
( "L9", 29, 0, 0),
("L12", 33, 2, 0),
("L14", 33, 3, 1),
( "M1", 0, 6, 0),
( "M3", 8, 0, 0),
( "M4", 7, 0, 1),
( "M6", 14, 0, 1),
( "M7", 15, 0, 1),
( "M9", 22, 0, 1),
("M11", 30, 0, 0),
("M12", 33, 1, 0),
( "N1", 0, 4, 1),
("N14", 33, 2, 1),
( "P1", 0, 4, 0),
( "P2", 4, 0, 0),
( "P3", 5, 0, 1),
( "P4", 12, 0, 1),
( "P5", 13, 0, 0),
( "P7", 16, 0, 1),
( "P8", 17, 0, 0),
( "P9", 21, 0, 1),
("P10", 29, 0, 1),
("P11", 30, 0, 1),
("P12", 31, 0, 0),
("P13", 31, 0, 1),
("P14", 33, 1, 1),
],
"8k-tq144:4k": [
( "1", 0, 30, 1),
( "2", 0, 30, 0),
( "3", 0, 28, 1),
( "4", 0, 28, 0),
( "7", 0, 27, 1),
( "8", 0, 27, 0),
( "9", 0, 25, 1),
( "10", 0, 25, 0),
( "11", 0, 22, 1),
( "12", 0, 22, 0),
( "15", 0, 20, 1),
( "16", 0, 20, 0),
( "17", 0, 18, 1),
( "18", 0, 18, 0),
( "19", 0, 17, 1),
( "20", 0, 17, 0),
( "21", 0, 16, 1),
( "22", 0, 16, 0),
( "23", 0, 12, 1),
( "24", 0, 12, 0),
( "25", 0, 11, 1),
( "26", 0, 11, 0),
( "28", 0, 6, 1),
( "29", 0, 6, 0),
( "31", 0, 5, 1),
( "32", 0, 5, 0),
( "33", 0, 4, 1),
( "34", 0, 4, 0),
( "37", 4, 0, 0),
( "38", 4, 0, 1),
( "39", 6, 0, 1),
( "41", 7, 0, 1),
( "42", 8, 0, 0),
( "43", 11, 0, 1),
( "44", 12, 0, 0),
( "45", 12, 0, 1),
( "47", 15, 0, 1),
( "48", 16, 0, 0),
( "49", 16, 0, 1),
( "52", 17, 0, 0),
( "55", 22, 0, 1),
( "56", 24, 0, 0),
( "60", 24, 0, 1),
( "61", 25, 0, 0),
( "62", 28, 0, 0),
( "63", 29, 0, 0),
( "64", 29, 0, 1),
( "67", 30, 0, 0),
( "68", 30, 0, 1),
( "70", 31, 0, 0),
( "71", 31, 0, 1),
( "73", 33, 1, 0),
( "74", 33, 1, 1),
( "75", 33, 2, 0),
( "76", 33, 2, 1),
( "78", 33, 3, 1),
( "79", 33, 4, 0),
( "80", 33, 4, 1),
( "81", 33, 5, 0),
( "82", 33, 5, 1),
( "83", 33, 6, 0),
( "84", 33, 6, 1),
( "85", 33, 10, 1),
( "87", 33, 14, 1),
( "88", 33, 15, 0),
( "90", 33, 15, 1),
( "91", 33, 16, 0),
( "93", 33, 16, 1),
( "94", 33, 17, 0),
( "95", 33, 19, 1),
( "96", 33, 20, 1),
( "97", 33, 21, 0),
( "98", 33, 21, 1),
( "99", 33, 23, 1),
("101", 33, 27, 1),
("102", 33, 28, 0),
("104", 33, 29, 1),
("105", 33, 30, 0),
("106", 33, 30, 1),
("107", 33, 31, 0),
("110", 31, 33, 1),
("112", 31, 33, 0),
("113", 30, 33, 1),
("114", 30, 33, 0),
("115", 29, 33, 1),
("116", 29, 33, 0),
("117", 28, 33, 1),
("118", 27, 33, 0),
("119", 26, 33, 1),
("120", 26, 33, 0),
("121", 25, 33, 0),
("122", 20, 33, 1),
("124", 20, 33, 0),
("125", 19, 33, 1),
("128", 17, 33, 0),
("129", 16, 33, 1),
("130", 11, 33, 1),
("134", 8, 33, 1),
("135", 8, 33, 0),
("136", 7, 33, 1),
("137", 7, 33, 0),
("138", 6, 33, 1),
("139", 6, 33, 0),
("141", 5, 33, 0),
("142", 4, 33, 1),
("143", 4, 33, 0),
("144", 3, 33, 1),
],
"8k-cm81:4k": [
( "A1", 2, 33, 1),
( "A2", 4, 33, 0),
( "A3", 6, 33, 0),
( "A4", 10, 33, 1),
( "A6", 23, 33, 0),
( "A7", 27, 33, 0),
( "A8", 28, 33, 1),
( "A9", 33, 4, 1),
( "B1", 0, 28, 1),
( "B2", 0, 30, 0),
( "B3", 5, 33, 1),
( "B4", 9, 33, 0),
( "B5", 21, 33, 1),
( "B6", 24, 33, 0),
( "B7", 25, 33, 1),
( "B8", 30, 33, 1),
( "B9", 33, 6, 1),
( "C1", 0, 28, 0),
( "C2", 0, 30, 1),
( "C3", 0, 23, 0),
( "C4", 16, 33, 1),
( "C5", 17, 33, 0),
( "C9", 33, 21, 1),
( "D1", 0, 20, 1),
( "D2", 0, 23, 1),
( "D3", 0, 17, 0),
( "D5", 8, 33, 1),
( "D6", 33, 4, 0),
( "D7", 33, 5, 0),
( "D8", 33, 17, 0),
( "D9", 33, 6, 0),
( "E1", 0, 20, 0),
( "E2", 0, 17, 1),
( "E3", 0, 16, 1),
( "E4", 0, 16, 0),
( "E5", 7, 33, 1),
( "E7", 33, 5, 1),
( "E8", 33, 16, 1),
( "F1", 0, 7, 1),
( "F3", 0, 7, 0),
( "F7", 31, 0, 1),
( "F8", 33, 3, 0),
( "G1", 0, 5, 0),
( "G2", 0, 3, 1),
( "G3", 0, 5, 1),
( "G4", 16, 0, 1),
( "G5", 29, 0, 0),
( "G6", 30, 0, 0),
( "G7", 31, 0, 0),
( "G8", 33, 3, 1),
( "G9", 33, 2, 1),
( "H1", 3, 0, 0),
( "H2", 0, 3, 0),
( "H4", 17, 0, 0),
( "H5", 29, 0, 1),
( "H7", 30, 0, 1),
( "H9", 33, 2, 0),
( "J1", 3, 0, 1),
( "J2", 4, 0, 0),
( "J3", 4, 0, 1),
( "J4", 11, 0, 0),
( "J8", 33, 1, 0),
( "J9", 33, 1, 1),
],
"8k-cm121:4k": [
( "A1", 2, 33, 0),
( "A2", 3, 33, 1),
( "A3", 3, 33, 0),
( "A4", 9, 33, 0),
( "A5", 11, 33, 0),
( "A6", 11, 33, 1),
( "A7", 19, 33, 1),
( "A8", 20, 33, 1),
( "A9", 26, 33, 1),
("A10", 30, 33, 1),
("A11", 31, 33, 1),
( "B1", 0, 30, 1),
( "B2", 0, 30, 0),
( "B3", 4, 33, 0),
( "B4", 5, 33, 0),
( "B5", 10, 33, 1),
( "B6", 16, 33, 1),
( "B7", 17, 33, 0),
( "B8", 27, 33, 0),
( "B9", 28, 33, 1),
("B11", 33, 28, 0),
( "C1", 0, 25, 0),
( "C2", 0, 25, 1),
( "C3", 0, 27, 0),
( "C4", 0, 27, 1),
( "C7", 20, 33, 0),
( "C8", 26, 33, 0),
( "C9", 29, 33, 1),
("C11", 33, 27, 1),
( "D1", 0, 22, 0),
( "D2", 0, 21, 1),
( "D3", 0, 21, 0),
( "D5", 8, 33, 1),
( "D7", 25, 33, 0),
( "D9", 33, 21, 0),
("D10", 33, 24, 1),
("D11", 33, 23, 1),
( "E1", 0, 22, 1),
( "E2", 0, 20, 1),
( "E3", 0, 20, 0),
( "E8", 33, 20, 1),
( "E9", 33, 19, 1),
("E10", 33, 17, 0),
("E11", 33, 21, 1),
( "F1", 0, 18, 1),
( "F2", 0, 18, 0),
( "F3", 0, 17, 0),
( "F4", 0, 17, 1),
( "F9", 33, 15, 0),
("F10", 33, 14, 1),
("F11", 33, 16, 1),
( "G1", 0, 16, 1),
( "G2", 0, 16, 0),
( "G3", 0, 12, 1),
( "G8", 33, 5, 1),
( "G9", 33, 10, 1),
("G10", 33, 6, 1),
("G11", 33, 11, 0),
( "H1", 0, 11, 1),
( "H2", 0, 11, 0),
( "H3", 0, 12, 0),
( "H7", 20, 0, 1),
( "H9", 29, 0, 1),
("H10", 33, 4, 1),
("H11", 33, 6, 0),
( "J1", 0, 6, 1),
( "J2", 0, 4, 0),
( "J3", 4, 0, 1),
( "J4", 8, 0, 0),
( "J5", 15, 0, 0),
( "J7", 20, 0, 0),
( "J8", 22, 0, 1),
( "J9", 30, 0, 1),
("J10", 33, 5, 0),
("J11", 33, 3, 1),
( "K1", 0, 6, 0),
( "K2", 0, 4, 1),
( "K3", 7, 0, 1),
( "K4", 12, 0, 1),
( "K5", 15, 0, 1),
( "K6", 17, 0, 0),
( "K7", 21, 0, 1),
( "K9", 30, 0, 0),
("K10", 31, 0, 1),
("K11", 33, 4, 0),
( "L1", 4, 0, 0),
( "L2", 6, 0, 1),
( "L3", 11, 0, 1),
( "L4", 12, 0, 0),
( "L5", 16, 0, 1),
( "L7", 24, 0, 0),
( "L8", 29, 0, 0),
("L10", 31, 0, 0),
],
"8k-cm225:4k": [
( "A1", 1, 33, 1),
( "A2", 3, 33, 1),
( "A5", 6, 33, 1),
( "A6", 11, 33, 0),
( "A7", 12, 33, 0),
( "A8", 17, 33, 1),
( "A9", 18, 33, 1),
("A11", 23, 33, 1),
("A15", 31, 33, 0),
( "B2", 2, 33, 1),
( "B3", 4, 33, 1),
( "B4", 5, 33, 1),
( "B5", 7, 33, 1),
( "B6", 10, 33, 0),
( "B7", 14, 33, 0),
( "B8", 19, 33, 1),
( "B9", 18, 33, 0),
("B10", 22, 33, 0),
("B11", 23, 33, 0),
("B12", 25, 33, 1),
("B13", 27, 33, 1),
("B14", 31, 33, 1),
("B15", 33, 31, 0),
( "C1", 0, 28, 0),
( "C3", 2, 33, 0),
( "C4", 3, 33, 0),
( "C5", 5, 33, 0),
( "C6", 13, 33, 0),
( "C7", 11, 33, 1),
( "C8", 19, 33, 0),
( "C9", 17, 33, 0),
("C10", 20, 33, 0),
("C11", 24, 33, 1),
("C12", 30, 33, 1),
("C13", 30, 33, 0),
("C14", 33, 30, 0),
( "D1", 0, 25, 0),
( "D2", 0, 24, 1),
( "D3", 0, 27, 0),
( "D4", 0, 30, 0),
( "D5", 4, 33, 0),
( "D6", 9, 33, 0),
( "D7", 10, 33, 1),
( "D8", 16, 33, 1),
( "D9", 26, 33, 1),
("D10", 25, 33, 0),
("D11", 28, 33, 1),
("D13", 33, 27, 1),
("D14", 33, 25, 0),
("D15", 33, 27, 0),
( "E2", 0, 24, 0),
( "E3", 0, 28, 1),
( "E4", 0, 30, 1),
( "E5", 0, 27, 1),
( "E6", 0, 25, 1),
( "E9", 26, 33, 0),
("E10", 27, 33, 0),
("E11", 29, 33, 1),
("E13", 33, 28, 0),
("E14", 33, 24, 0),
( "F1", 0, 20, 0),
( "F2", 0, 21, 0),
( "F3", 0, 21, 1),
( "F4", 0, 22, 0),
( "F5", 0, 22, 1),
( "F7", 8, 33, 1),
( "F9", 20, 33, 1),
("F11", 33, 24, 1),
("F12", 33, 23, 1),
("F13", 33, 23, 0),
("F14", 33, 21, 0),
("F15", 33, 22, 0),
( "G2", 0, 20, 1),
( "G4", 0, 17, 0),
( "G5", 0, 18, 1),
("G10", 33, 20, 1),
("G11", 33, 19, 1),
("G12", 33, 21, 1),
("G13", 33, 17, 0),
("G14", 33, 20, 0),
("G15", 33, 19, 0),
( "H1", 0, 16, 0),
( "H2", 0, 18, 0),
( "H3", 0, 14, 1),
( "H4", 0, 13, 1),
( "H5", 0, 16, 1),
( "H6", 0, 17, 1),
("H11", 33, 14, 1),
("H12", 33, 16, 1),
("H13", 33, 15, 1),
("H14", 33, 15, 0),
( "J1", 0, 13, 0),
( "J2", 0, 12, 0),
( "J3", 0, 14, 0),
( "J4", 0, 11, 1),
( "J5", 0, 12, 1),
("J10", 33, 5, 1),
("J11", 33, 10, 1),
("J12", 33, 6, 1),
("J14", 33, 14, 0),
("J15", 33, 13, 0),
( "K1", 0, 11, 0),
( "K4", 0, 4, 0),
( "K5", 0, 6, 1),
( "K9", 20, 0, 1),
("K11", 29, 0, 0),
("K12", 33, 4, 1),
("K13", 33, 5, 0),
("K15", 33, 9, 0),
( "L3", 0, 7, 1),
( "L4", 0, 3, 0),
( "L5", 4, 0, 0),
( "L6", 7, 0, 0),
( "L7", 12, 0, 0),
( "L9", 17, 0, 0),
("L10", 21, 0, 1),
("L11", 30, 0, 1),
("L12", 33, 3, 1),
("L13", 33, 6, 0),
( "M1", 0, 7, 0),
( "M2", 0, 6, 0),
( "M3", 0, 5, 0),
( "M4", 0, 3, 1),
( "M5", 6, 0, 0),
( "M6", 8, 0, 0),
( "M7", 13, 0, 1),
( "M8", 15, 0, 0),
( "M9", 19, 0, 1),
("M11", 30, 0, 0),
("M12", 31, 0, 1),
("M13", 33, 4, 0),
("M15", 33, 3, 0),
( "N2", 0, 5, 1),
( "N3", 2, 0, 0),
( "N4", 3, 0, 0),
( "N5", 9, 0, 1),
( "N6", 12, 0, 1),
( "N7", 16, 0, 1),
( "N9", 20, 0, 0),
("N10", 22, 0, 1),
("N12", 31, 0, 0),
( "P1", 0, 4, 1),
( "P2", 2, 0, 1),
( "P4", 7, 0, 1),
( "P5", 10, 0, 1),
( "P6", 14, 0, 1),
( "P7", 17, 0, 1),
( "P8", 19, 0, 0),
( "P9", 22, 0, 0),
("P10", 23, 0, 0),
("P11", 25, 0, 0),
("P12", 29, 0, 1),
("P13", 27, 0, 0),
("P14", 33, 2, 1),
("P15", 33, 1, 1),
( "R1", 3, 0, 1),
( "R2", 4, 0, 1),
( "R3", 6, 0, 1),
( "R4", 8, 0, 1),
( "R5", 11, 0, 1),
( "R6", 15, 0, 1),
( "R9", 21, 0, 0),
("R10", 24, 0, 0),
("R11", 26, 0, 0),
("R12", 28, 0, 0),
("R14", 33, 2, 0),
("R15", 33, 1, 0),
],
"8k-cm81": [
( "A1", 2, 33, 1),
( "A2", 4, 33, 0),
( "A3", 6, 33, 0),
( "A4", 10, 33, 1),
( "A6", 23, 33, 0),
( "A7", 27, 33, 0),
( "A8", 28, 33, 1),
( "A9", 33, 4, 1),
( "B1", 0, 28, 1),
( "B2", 0, 30, 0),
( "B3", 5, 33, 1),
( "B4", 9, 33, 0),
( "B5", 21, 33, 1),
( "B6", 24, 33, 0),
( "B7", 25, 33, 1),
( "B8", 30, 33, 1),
( "B9", 33, 6, 1),
( "C1", 0, 28, 0),
( "C2", 0, 30, 1),
( "C3", 0, 23, 0),
( "C4", 16, 33, 1),
( "C5", 17, 33, 0),
( "C9", 33, 21, 1),
( "D1", 0, 20, 1),
( "D2", 0, 23, 1),
( "D3", 0, 17, 0),
( "D5", 8, 33, 1),
( "D6", 33, 4, 0),
( "D7", 33, 5, 0),
( "D8", 33, 17, 0),
( "D9", 33, 6, 0),
( "E1", 0, 20, 0),
( "E2", 0, 17, 1),
( "E3", 0, 16, 1),
( "E4", 0, 16, 0),
( "E5", 7, 33, 1),
( "E7", 33, 5, 1),
( "E8", 33, 16, 1),
( "F1", 0, 7, 1),
( "F3", 0, 7, 0),
( "F7", 31, 0, 1),
( "F8", 33, 3, 0),
( "G1", 0, 5, 0),
( "G2", 0, 3, 1),
( "G3", 0, 5, 1),
( "G4", 16, 0, 1),
( "G5", 29, 0, 0),
( "G6", 30, 0, 0),
( "G7", 31, 0, 0),
( "G8", 33, 3, 1),
( "G9", 33, 2, 1),
( "H1", 3, 0, 0),
( "H2", 0, 3, 0),
( "H4", 17, 0, 0),
( "H5", 29, 0, 1),
( "H7", 30, 0, 1),
( "H9", 33, 2, 0),
( "J1", 3, 0, 1),
( "J2", 4, 0, 0),
( "J3", 4, 0, 1),
( "J4", 11, 0, 0),
( "J8", 33, 1, 0),
( "J9", 33, 1, 1),
],
"8k-cm121": [
( "A1", 2, 33, 0),
( "A2", 3, 33, 1),
( "A3", 3, 33, 0),
( "A4", 9, 33, 0),
( "A5", 11, 33, 0),
( "A6", 11, 33, 1),
( "A7", 19, 33, 1),
( "A8", 20, 33, 1),
( "A9", 26, 33, 1),
("A10", 30, 33, 1),
("A11", 31, 33, 1),
( "B1", 0, 30, 1),
( "B2", 0, 30, 0),
( "B3", 4, 33, 0),
( "B4", 5, 33, 0),
( "B5", 10, 33, 1),
( "B6", 16, 33, 1),
( "B7", 17, 33, 0),
( "B8", 27, 33, 0),
( "B9", 28, 33, 1),
("B11", 33, 28, 0),
( "C1", 0, 25, 0),
( "C2", 0, 25, 1),
( "C3", 0, 27, 0),
( "C4", 0, 27, 1),
( "C7", 20, 33, 0),
( "C8", 26, 33, 0),
( "C9", 29, 33, 1),
("C11", 33, 27, 1),
( "D1", 0, 22, 0),
( "D2", 0, 21, 1),
( "D3", 0, 21, 0),
( "D5", 8, 33, 1),
( "D7", 25, 33, 0),
( "D9", 33, 21, 0),
("D10", 33, 24, 1),
("D11", 33, 23, 1),
( "E1", 0, 22, 1),
( "E2", 0, 20, 1),
( "E3", 0, 20, 0),
( "E8", 33, 20, 1),
( "E9", 33, 19, 1),
("E10", 33, 17, 0),
("E11", 33, 21, 1),
( "F1", 0, 18, 1),
( "F2", 0, 18, 0),
( "F3", 0, 17, 0),
( "F4", 0, 17, 1),
( "F9", 33, 15, 0),
("F10", 33, 14, 1),
("F11", 33, 16, 1),
( "G1", 0, 16, 1),
( "G2", 0, 16, 0),
( "G3", 0, 12, 1),
( "G8", 33, 5, 1),
( "G9", 33, 10, 1),
("G10", 33, 6, 1),
("G11", 33, 11, 0),
( "H1", 0, 11, 1),
( "H2", 0, 11, 0),
( "H3", 0, 12, 0),
( "H7", 20, 0, 1),
( "H9", 29, 0, 1),
("H10", 33, 4, 1),
("H11", 33, 6, 0),
( "J1", 0, 6, 1),
( "J2", 0, 4, 0),
( "J3", 4, 0, 1),
( "J4", 8, 0, 0),
( "J5", 15, 0, 0),
( "J7", 20, 0, 0),
( "J8", 22, 0, 1),
( "J9", 30, 0, 1),
("J10", 33, 5, 0),
("J11", 33, 3, 1),
( "K1", 0, 6, 0),
( "K2", 0, 4, 1),
( "K3", 7, 0, 1),
( "K4", 12, 0, 1),
( "K5", 15, 0, 1),
( "K6", 17, 0, 0),
( "K7", 21, 0, 1),
( "K9", 30, 0, 0),
("K10", 31, 0, 1),
("K11", 33, 4, 0),
( "L1", 4, 0, 0),
( "L2", 6, 0, 1),
( "L3", 11, 0, 1),
( "L4", 12, 0, 0),
( "L5", 16, 0, 1),
( "L7", 24, 0, 0),
( "L8", 29, 0, 0),
("L10", 31, 0, 0),
],
"8k-cm225": [
( "A1", 1, 33, 1),
( "A2", 3, 33, 1),
( "A5", 6, 33, 1),
( "A6", 11, 33, 0),
( "A7", 12, 33, 0),
( "A8", 17, 33, 1),
( "A9", 18, 33, 1),
("A10", 21, 33, 0),
("A11", 23, 33, 1),
("A15", 31, 33, 0),
( "B1", 0, 31, 0),
( "B2", 2, 33, 1),
( "B3", 4, 33, 1),
( "B4", 5, 33, 1),
( "B5", 7, 33, 1),
( "B6", 10, 33, 0),
( "B7", 14, 33, 0),
( "B8", 19, 33, 1),
( "B9", 18, 33, 0),
("B10", 22, 33, 0),
("B11", 23, 33, 0),
("B12", 25, 33, 1),
("B13", 27, 33, 1),
("B14", 31, 33, 1),
("B15", 33, 31, 0),
( "C1", 0, 28, 0),
( "C2", 0, 31, 1),
( "C3", 2, 33, 0),
( "C4", 3, 33, 0),
( "C5", 5, 33, 0),
( "C6", 13, 33, 0),
( "C7", 11, 33, 1),
( "C8", 19, 33, 0),
( "C9", 17, 33, 0),
("C10", 20, 33, 0),
("C11", 24, 33, 1),
("C12", 30, 33, 1),
("C13", 30, 33, 0),
("C14", 33, 30, 0),
( "D1", 0, 25, 0),
( "D2", 0, 24, 1),
( "D3", 0, 27, 0),
( "D4", 0, 30, 0),
( "D5", 4, 33, 0),
( "D6", 9, 33, 0),
( "D7", 10, 33, 1),
( "D8", 16, 33, 1),
( "D9", 26, 33, 1),
("D10", 25, 33, 0),
("D11", 28, 33, 1),
("D13", 33, 27, 1),
("D14", 33, 25, 0),
("D15", 33, 27, 0),
( "E2", 0, 24, 0),
( "E3", 0, 28, 1),
( "E4", 0, 30, 1),
( "E5", 0, 27, 1),
( "E6", 0, 25, 1),
( "E9", 26, 33, 0),
("E10", 27, 33, 0),
("E11", 29, 33, 1),
("E13", 33, 28, 0),
("E14", 33, 24, 0),
( "F1", 0, 20, 0),
( "F2", 0, 21, 0),
( "F3", 0, 21, 1),
( "F4", 0, 22, 0),
( "F5", 0, 22, 1),
( "F7", 8, 33, 1),
( "F9", 20, 33, 1),
("F11", 33, 24, 1),
("F12", 33, 23, 1),
("F13", 33, 23, 0),
("F14", 33, 21, 0),
("F15", 33, 22, 0),
( "G1", 0, 19, 0),
( "G2", 0, 20, 1),
( "G3", 0, 19, 1),
( "G4", 0, 17, 0),
( "G5", 0, 18, 1),
("G10", 33, 20, 1),
("G11", 33, 19, 1),
("G12", 33, 21, 1),
("G13", 33, 17, 0),
("G14", 33, 20, 0),
("G15", 33, 19, 0),
( "H1", 0, 16, 0),
( "H2", 0, 18, 0),
( "H3", 0, 14, 1),
( "H4", 0, 13, 1),
( "H5", 0, 16, 1),
( "H6", 0, 17, 1),
("H11", 33, 14, 1),
("H12", 33, 16, 1),
("H13", 33, 15, 1),
("H14", 33, 15, 0),
( "J1", 0, 13, 0),
( "J2", 0, 12, 0),
( "J3", 0, 14, 0),
( "J4", 0, 11, 1),
( "J5", 0, 12, 1),
("J10", 33, 5, 1),
("J11", 33, 10, 1),
("J12", 33, 6, 1),
("J13", 33, 11, 0),
("J14", 33, 14, 0),
("J15", 33, 13, 0),
( "K1", 0, 11, 0),
( "K3", 0, 9, 1),
( "K4", 0, 4, 0),
( "K5", 0, 6, 1),
( "K9", 20, 0, 1),
("K11", 29, 0, 0),
("K12", 33, 4, 1),
("K13", 33, 5, 0),
("K14", 33, 12, 0),
("K15", 33, 9, 0),
( "L1", 0, 9, 0),
( "L3", 0, 7, 1),
( "L4", 0, 3, 0),
( "L5", 4, 0, 0),
( "L6", 7, 0, 0),
( "L7", 12, 0, 0),
( "L9", 17, 0, 0),
("L10", 21, 0, 1),
("L11", 30, 0, 1),
("L12", 33, 3, 1),
("L13", 33, 6, 0),
("L14", 33, 7, 0),
( "M1", 0, 7, 0),
( "M2", 0, 6, 0),
( "M3", 0, 5, 0),
( "M4", 0, 3, 1),
( "M5", 6, 0, 0),
( "M6", 8, 0, 0),
( "M7", 13, 0, 1),
( "M8", 15, 0, 0),
( "M9", 19, 0, 1),
("M11", 30, 0, 0),
("M12", 31, 0, 1),
("M13", 33, 4, 0),
("M14", 33, 8, 0),
("M15", 33, 3, 0),
( "N2", 0, 5, 1),
( "N3", 2, 0, 0),
( "N4", 3, 0, 0),
( "N5", 9, 0, 1),
( "N6", 12, 0, 1),
( "N7", 16, 0, 1),
( "N9", 20, 0, 0),
("N10", 22, 0, 1),
("N12", 31, 0, 0),
( "P1", 0, 4, 1),
( "P2", 2, 0, 1),
( "P4", 7, 0, 1),
( "P5", 10, 0, 1),
( "P6", 14, 0, 1),
( "P7", 17, 0, 1),
( "P8", 19, 0, 0),
( "P9", 22, 0, 0),
("P10", 23, 0, 0),
("P11", 25, 0, 0),
("P12", 29, 0, 1),
("P13", 27, 0, 0),
("P14", 33, 2, 1),
("P15", 33, 1, 1),
( "R1", 3, 0, 1),
( "R2", 4, 0, 1),
( "R3", 6, 0, 1),
( "R4", 8, 0, 1),
( "R5", 11, 0, 1),
( "R6", 15, 0, 1),
( "R9", 21, 0, 0),
("R10", 24, 0, 0),
("R11", 26, 0, 0),
("R12", 28, 0, 0),
("R14", 33, 2, 0),
("R15", 33, 1, 0),
],
"8k-cb132": [
( "A1", 2, 33, 0),
( "A2", 3, 33, 0),
( "A3", 3, 33, 1),
( "A4", 5, 33, 0),
( "A5", 10, 33, 1),
( "A6", 16, 33, 1),
( "A7", 17, 33, 0),
("A10", 25, 33, 0),
("A11", 26, 33, 0),
("A12", 30, 33, 1),
( "B1", 0, 30, 1),
("B14", 33, 28, 0),
( "C1", 0, 30, 0),
( "C3", 0, 27, 1),
( "C4", 4, 33, 0),
( "C5", 8, 33, 1),
( "C6", 11, 33, 1),
( "C7", 14, 33, 1),
( "C9", 20, 33, 1),
("C10", 22, 33, 1),
("C11", 28, 33, 1),
("C12", 29, 33, 1),
("C14", 33, 24, 1),
( "D1", 0, 25, 1),
( "D3", 0, 27, 0),
( "D4", 0, 22, 1),
( "D5", 9, 33, 0),
( "D6", 11, 33, 0),
( "D7", 13, 33, 1),
( "D9", 21, 33, 1),
("D10", 27, 33, 0),
("D11", 26, 33, 1),
("D12", 33, 27, 1),
("D14", 33, 23, 1),
( "E1", 0, 25, 0),
( "E4", 0, 22, 0),
("E11", 33, 20, 1),
("E12", 33, 21, 0),
("E14", 33, 21, 1),
( "F3", 0, 21, 0),
( "F4", 0, 21, 1),
("F11", 33, 19, 1),
("F12", 33, 15, 0),
("F14", 33, 16, 1),
( "G1", 0, 17, 0),
( "G3", 0, 17, 1),
( "G4", 0, 20, 0),
("G11", 33, 14, 1),
("G12", 33, 11, 0),
("G14", 33, 17, 0),
( "H1", 0, 16, 1),
( "H3", 0, 16, 0),
( "H4", 0, 20, 1),
("H11", 33, 10, 1),
("H12", 33, 6, 1),
( "J1", 0, 18, 0),
( "J3", 0, 18, 1),
("J11", 33, 6, 0),
("J12", 33, 5, 1),
( "K3", 0, 11, 1),
( "K4", 0, 11, 0),
("K11", 33, 4, 1),
("K12", 33, 4, 0),
("K14", 33, 5, 0),
( "L1", 0, 6, 1),
( "L4", 12, 0, 0),
( "L5", 11, 0, 1),
( "L6", 15, 0, 0),
( "L8", 20, 0, 1),
( "L9", 29, 0, 0),
("L12", 33, 2, 0),
("L14", 33, 3, 1),
( "M1", 0, 6, 0),
( "M3", 8, 0, 0),
( "M4", 7, 0, 1),
( "M6", 14, 0, 1),
( "M7", 15, 0, 1),
( "M9", 22, 0, 1),
("M11", 30, 0, 0),
("M12", 33, 1, 0),
( "N1", 0, 4, 1),
("N14", 33, 2, 1),
( "P1", 0, 4, 0),
( "P2", 4, 0, 0),
( "P3", 5, 0, 1),
( "P4", 12, 0, 1),
( "P5", 13, 0, 0),
( "P7", 16, 0, 1),
( "P8", 17, 0, 0),
( "P9", 21, 0, 1),
("P10", 29, 0, 1),
("P11", 30, 0, 1),
("P12", 31, 0, 0),
("P13", 31, 0, 1),
("P14", 33, 1, 1),
],
"8k-ct256": [
( "A1", 4, 33, 1),
( "A2", 5, 33, 1),
( "A5", 8, 33, 0),
( "A6", 9, 33, 0),
( "A7", 12, 33, 0),
( "A9", 18, 33, 1),
("A10", 22, 33, 1),
("A11", 22, 33, 0),
("A15", 27, 33, 0),
("A16", 27, 33, 1),
( "B1", 0, 30, 0),
( "B2", 0, 31, 0),
( "B3", 3, 33, 0),
( "B4", 6, 33, 1),
( "B5", 7, 33, 1),
( "B6", 10, 33, 1),
( "B7", 11, 33, 0),
( "B8", 13, 33, 0),
( "B9", 16, 33, 0),
("B10", 24, 33, 0),
("B11", 23, 33, 1),
("B12", 24, 33, 1),
("B13", 26, 33, 1),
("B14", 30, 33, 0),
("B15", 31, 33, 0),
("B16", 33, 30, 0),
( "C1", 0, 28, 1),
( "C2", 0, 28, 0),
( "C3", 1, 33, 0),
( "C4", 3, 33, 1),
( "C5", 4, 33, 0),
( "C6", 10, 33, 0),
( "C7", 11, 33, 1),
( "C8", 17, 33, 0),
( "C9", 20, 33, 0),
("C10", 23, 33, 0),
("C11", 25, 33, 1),
("C12", 29, 33, 1),
("C13", 28, 33, 1),
("C14", 31, 33, 1),
("C16", 33, 28, 0),
( "D1", 0, 25, 0),
( "D2", 0, 27, 0),
( "D3", 1, 33, 1),
( "D4", 2, 33, 1),
( "D5", 5, 33, 0),
( "D6", 8, 33, 1),
( "D7", 9, 33, 1),
( "D8", 14, 33, 1),
( "D9", 19, 33, 0),
("D10", 20, 33, 1),
("D11", 25, 33, 0),
("D13", 30, 33, 1),
("D14", 33, 31, 0),
("D15", 33, 26, 0),
("D16", 33, 24, 0),
( "E2", 0, 23, 0),
( "E3", 0, 24, 0),
( "E4", 0, 31, 1),
( "E5", 2, 33, 0),
( "E6", 7, 33, 0),
( "E9", 19, 33, 1),
("E10", 26, 33, 0),
("E11", 29, 33, 0),
("E13", 33, 30, 1),
("E14", 33, 27, 1),
("E16", 33, 23, 0),
( "F1", 0, 20, 0),
( "F2", 0, 21, 0),
( "F3", 0, 22, 0),
( "F4", 0, 27, 1),
( "F5", 0, 30, 1),
( "F7", 16, 33, 1),
( "F9", 17, 33, 1),
("F11", 33, 26, 1),
("F12", 33, 25, 1),
("F13", 33, 28, 1),
("F14", 33, 25, 0),
("F15", 33, 22, 0),
("F16", 33, 21, 0),
( "G1", 0, 17, 0),
( "G2", 0, 19, 0),
( "G3", 0, 22, 1),
( "G4", 0, 24, 1),
( "G5", 0, 25, 1),
("G10", 33, 20, 1),
("G11", 33, 21, 1),
("G12", 33, 24, 1),
("G13", 33, 23, 1),
("G14", 33, 22, 1),
("G15", 33, 20, 0),
("G16", 33, 19, 0),
( "H1", 0, 16, 0),
( "H2", 0, 18, 0),
( "H3", 0, 21, 1),
( "H4", 0, 19, 1),
( "H5", 0, 23, 1),
( "H6", 0, 20, 1),
("H11", 33, 16, 1),
("H12", 33, 19, 1),
("H13", 33, 16, 0),
("H14", 33, 17, 1),
("H16", 33, 17, 0),
( "J1", 0, 14, 0),
( "J2", 0, 14, 1),
( "J3", 0, 16, 1),
( "J4", 0, 18, 1),
( "J5", 0, 17, 1),
("J10", 33, 7, 1),
("J11", 33, 9, 1),
("J12", 33, 14, 1),
("J13", 33, 15, 0),
("J14", 33, 13, 1),
("J15", 33, 11, 1),
("J16", 33, 15, 1),
( "K1", 0, 13, 1),
( "K3", 0, 13, 0),
( "K4", 0, 11, 1),
( "K5", 0, 9, 1),
( "K9", 17, 0, 0),
("K11", 29, 0, 0),
("K12", 33, 6, 1),
("K13", 33, 10, 1),
("K14", 33, 11, 0),
("K15", 33, 12, 0),
("K16", 33, 13, 0),
( "L1", 0, 12, 0),
( "L3", 0, 10, 0),
( "L4", 0, 12, 1),
( "L5", 0, 6, 1),
( "L6", 0, 10, 1),
( "L7", 0, 8, 1),
( "L9", 13, 0, 0),
("L10", 19, 0, 1),
("L11", 26, 0, 1),
("L12", 33, 4, 1),
("L13", 33, 5, 1),
("L14", 33, 6, 0),
("L16", 33, 10, 0),
( "M1", 0, 11, 0),
( "M2", 0, 9, 0),
( "M3", 0, 7, 0),
( "M4", 0, 5, 0),
( "M5", 0, 4, 0),
( "M6", 0, 7, 1),
( "M7", 8, 0, 0),
( "M8", 10, 0, 0),
( "M9", 16, 0, 0),
("M11", 23, 0, 1),
("M12", 27, 0, 1),
("M13", 33, 3, 1),
("M14", 33, 4, 0),
("M15", 33, 8, 0),
("M16", 33, 7, 0),
( "N2", 0, 8, 0),
( "N3", 0, 6, 0),
( "N4", 0, 3, 0),
( "N5", 4, 0, 0),
( "N6", 2, 0, 0),
( "N7", 9, 0, 0),
( "N9", 15, 0, 0),
("N10", 20, 0, 1),
("N12", 26, 0, 0),
("N16", 33, 5, 0),
( "P1", 0, 5, 1),
( "P2", 0, 4, 1),
( "P4", 3, 0, 0),
( "P5", 5, 0, 0),
( "P6", 9, 0, 1),
( "P7", 14, 0, 1),
( "P8", 12, 0, 0),
( "P9", 17, 0, 1),
("P10", 20, 0, 0),
("P11", 30, 0, 1),
("P12", 30, 0, 0),
("P13", 29, 0, 1),
("P14", 33, 2, 0),
("P15", 33, 2, 1),
("P16", 33, 3, 0),
( "R1", 0, 3, 1),
( "R2", 3, 0, 1),
( "R3", 5, 0, 1),
( "R4", 7, 0, 1),
( "R5", 6, 0, 0),
( "R6", 11, 0, 1),
( "R9", 16, 0, 1),
("R10", 19, 0, 0),
("R11", 31, 0, 0),
("R12", 31, 0, 1),
("R14", 33, 1, 0),
("R15", 33, 1, 1),
("R16", 28, 0, 0),
( "T1", 2, 0, 1),
( "T2", 4, 0, 1),
( "T3", 6, 0, 1),
( "T5", 10, 0, 1),
( "T6", 12, 0, 1),
( "T7", 13, 0, 1),
( "T8", 14, 0, 0),
( "T9", 15, 0, 1),
("T10", 21, 0, 0),
("T11", 21, 0, 1),
("T13", 24, 0, 0),
("T14", 23, 0, 0),
("T15", 22, 0, 1),
("T16", 27, 0, 0),
],
"384-qn32": [
( "1", 0, 7, 0),
( "2", 0, 7, 1),
( "5", 0, 5, 1),
( "6", 0, 5, 0),
( "7", 0, 4, 0),
( "8", 0, 4, 1),
( "12", 5, 0, 0),
( "13", 5, 0, 1),
( "14", 6, 0, 1),
( "15", 6, 0, 0),
( "18", 7, 4, 0),
( "19", 7, 4, 1),
( "20", 7, 5, 0),
( "22", 7, 6, 0),
( "23", 7, 6, 1),
( "26", 6, 9, 0),
( "27", 5, 9, 0),
( "29", 4, 9, 0),
( "30", 3, 9, 1),
( "31", 2, 9, 0),
( "32", 2, 9, 1),
],
"384-cm36": [
( "A1", 0, 7, 0),
( "A2", 2, 9, 1),
( "A3", 3, 9, 1),
( "B1", 0, 7, 1),
( "B3", 4, 9, 0),
( "B4", 7, 5, 0),
( "B5", 7, 5, 1),
( "B6", 7, 6, 0),
( "C1", 0, 5, 0),
( "C2", 0, 5, 1),
( "C3", 2, 9, 0),
( "C5", 7, 4, 1),
( "C6", 7, 6, 1),
( "D1", 0, 4, 1),
( "D5", 6, 0, 1),
( "D6", 7, 4, 0),
( "E1", 0, 4, 0),
( "E2", 3, 0, 1),
( "E3", 4, 0, 0),
( "E4", 5, 0, 0),
( "E5", 6, 0, 0),
( "E6", 7, 3, 1),
( "F2", 3, 0, 0),
( "F3", 4, 0, 1),
( "F5", 5, 0, 1),
],
"384-cm49": [
( "A1", 0, 7, 1),
( "A2", 2, 9, 1),
( "A3", 3, 9, 0),
( "A4", 4, 9, 1),
( "A5", 5, 9, 0),
( "A6", 6, 9, 0),
( "A7", 6, 9, 1),
( "B1", 0, 7, 0),
( "B2", 0, 6, 0),
( "B3", 2, 9, 0),
( "B4", 4, 9, 0),
( "C1", 0, 5, 1),
( "C2", 0, 6, 1),
( "C4", 3, 9, 1),
( "C5", 7, 6, 1),
( "C6", 7, 5, 1),
( "C7", 7, 6, 0),
( "D1", 0, 4, 0),
( "D2", 0, 5, 0),
( "D3", 0, 2, 0),
( "D4", 5, 9, 1),
( "D6", 7, 4, 1),
( "D7", 7, 5, 0),
( "E2", 0, 4, 1),
( "E6", 6, 0, 1),
( "E7", 7, 4, 0),
( "F1", 0, 2, 1),
( "F2", 0, 1, 0),
( "F3", 3, 0, 1),
( "F4", 4, 0, 0),
( "F5", 5, 0, 0),
( "F6", 6, 0, 0),
( "F7", 7, 3, 1),
( "G1", 0, 1, 1),
( "G3", 3, 0, 0),
( "G4", 4, 0, 1),
( "G6", 5, 0, 1),
],
"5k-sg48": [
( "2", 8, 0, 0),
( "3", 9, 0, 1),
( "4", 9, 0, 0),
( "6", 13, 0, 1),
( "9", 15, 0, 0),
( "10", 16, 0, 0),
( "11", 17, 0, 0),
( "12", 18, 0, 0),
( "13", 19, 0, 0),
( "14", 23, 0, 0),
( "15", 24, 0, 0),
( "16", 24, 0, 1),
( "17", 23, 0, 1),
( "18", 22, 0, 1),
( "19", 21, 0, 1),
( "20", 19, 0, 1),
( "21", 18, 0, 1),
( "23", 19, 31, 0),
( "25", 19, 31, 1),
( "26", 18, 31, 0),
( "27", 18, 31, 1),
( "28", 17, 31, 0),
( "31", 16, 31, 1),
( "32", 16, 31, 0),
( "34", 13, 31, 1),
( "35", 12, 31, 1),
( "36", 9, 31, 1),
( "37", 13, 31, 0),
( "38", 8, 31, 1),
( "39", 4, 31, 0),
( "40", 5, 31, 0),
( "41", 6, 31, 0),
( "42", 8, 31, 0),
( "43", 9, 31, 0),
( "44", 6, 0, 1),
( "45", 7, 0, 1),
( "46", 5, 0, 0),
( "47", 6, 0, 0),
( "48", 7, 0, 0),
],
}
# This database contains the locations of configuration bits of the DSP tiles
# The standard configuration is stored under the key "default". If it is necessary to
# override it for a certain DSP on a certain device use the key "{device}_{x}_{y}" where
# {x} and {y} are the location of the DSP0 tile of the DSP (NOT the tile the cbit is in).
# x and y are relative to the DSP0 tile.
dsp_config_db = {
"default" : {
"C_REG": (0, 0, "CBIT_0"),
"A_REG": (0, 0, "CBIT_1"),
"B_REG": (0, 0, "CBIT_2"),
"D_REG": (0, 0, "CBIT_3"),
"TOP_8x8_MULT_REG": (0, 0, "CBIT_4"),
"BOT_8x8_MULT_REG": (0, 0, "CBIT_5"),
"PIPELINE_16x16_MULT_REG1": (0, 0, "CBIT_6"),
"PIPELINE_16x16_MULT_REG2": (0, 0, "CBIT_7"),
"TOPOUTPUT_SELECT_0": (0, 1, "CBIT_0"),
"TOPOUTPUT_SELECT_1": (0, 1, "CBIT_1"),
"TOPADDSUB_LOWERINPUT_0": (0, 1, "CBIT_2"),
"TOPADDSUB_LOWERINPUT_1": (0, 1, "CBIT_3"),
"TOPADDSUB_UPPERINPUT": (0, 1, "CBIT_4"),
"TOPADDSUB_CARRYSELECT_0": (0, 1, "CBIT_5"),
"TOPADDSUB_CARRYSELECT_1": (0, 1, "CBIT_6"),
"BOTOUTPUT_SELECT_0": (0, 1, "CBIT_7"),
"BOTOUTPUT_SELECT_1": (0, 2, "CBIT_0"),
"BOTADDSUB_LOWERINPUT_0": (0, 2, "CBIT_1"),
"BOTADDSUB_LOWERINPUT_1": (0, 2, "CBIT_2"),
"BOTADDSUB_UPPERINPUT": (0, 2, "CBIT_3"),
"BOTADDSUB_CARRYSELECT_0": (0, 2, "CBIT_4"),
"BOTADDSUB_CARRYSELECT_1": (0, 2, "CBIT_5"),
"MODE_8x8": (0, 2, "CBIT_6"),
"A_SIGNED": (0, 2, "CBIT_7"),
"B_SIGNED": (0, 3, "CBIT_0")
},
"5k_0_15": {
"TOPOUTPUT_SELECT_1": (0, 4, "CBIT_3"),
"TOPADDSUB_LOWERINPUT_0": (0, 4, "CBIT_4"),
"TOPADDSUB_LOWERINPUT_1": (0, 4, "CBIT_5"),
"TOPADDSUB_UPPERINPUT": (0, 4, "CBIT_6")
}
}
# SPRAM data for UltraPlus devices, use icefuzz/tests/fuzz_spram.py
# to generate this
spram_db = {
"5k" : {
(0, 0, 1): {
"ADDRESS_0": (0, 2, "lutff_0/in_1"),
"ADDRESS_10": (0, 2, "lutff_2/in_0"),
"ADDRESS_11": (0, 2, "lutff_3/in_0"),
"ADDRESS_12": (0, 2, "lutff_4/in_0"),
"ADDRESS_13": (0, 2, "lutff_5/in_0"),
"ADDRESS_1": (0, 2, "lutff_1/in_1"),
"ADDRESS_2": (0, 2, "lutff_2/in_1"),
"ADDRESS_3": (0, 2, "lutff_3/in_1"),
"ADDRESS_4": (0, 2, "lutff_4/in_1"),
"ADDRESS_5": (0, 2, "lutff_5/in_1"),
"ADDRESS_6": (0, 2, "lutff_6/in_1"),
"ADDRESS_7": (0, 2, "lutff_7/in_1"),
"ADDRESS_8": (0, 2, "lutff_0/in_0"),
"ADDRESS_9": (0, 2, "lutff_1/in_0"),
"CHIPSELECT": (0, 3, "lutff_6/in_1"),
"CLOCK": (0, 1, "clk"),
"DATAIN_0": (0, 1, "lutff_0/in_3"),
"DATAIN_10": (0, 1, "lutff_2/in_1"),
"DATAIN_11": (0, 1, "lutff_3/in_1"),
"DATAIN_12": (0, 1, "lutff_4/in_1"),
"DATAIN_13": (0, 1, "lutff_5/in_1"),
"DATAIN_14": (0, 1, "lutff_6/in_1"),
"DATAIN_15": (0, 1, "lutff_7/in_1"),
"DATAIN_1": (0, 1, "lutff_1/in_3"),
"DATAIN_2": (0, 1, "lutff_2/in_3"),
"DATAIN_3": (0, 1, "lutff_3/in_3"),
"DATAIN_4": (0, 1, "lutff_4/in_3"),
"DATAIN_5": (0, 1, "lutff_5/in_3"),
"DATAIN_6": (0, 1, "lutff_6/in_3"),
"DATAIN_7": (0, 1, "lutff_7/in_3"),
"DATAIN_8": (0, 1, "lutff_0/in_1"),
"DATAIN_9": (0, 1, "lutff_1/in_1"),
"DATAOUT_0": (0, 1, "slf_op_0"),
"DATAOUT_10": (0, 2, "slf_op_2"),
"DATAOUT_11": (0, 2, "slf_op_3"),
"DATAOUT_12": (0, 2, "slf_op_4"),
"DATAOUT_13": (0, 2, "slf_op_5"),
"DATAOUT_14": (0, 2, "slf_op_6"),
"DATAOUT_15": (0, 2, "slf_op_7"),
"DATAOUT_1": (0, 1, "slf_op_1"),
"DATAOUT_2": (0, 1, "slf_op_2"),
"DATAOUT_3": (0, 1, "slf_op_3"),
"DATAOUT_4": (0, 1, "slf_op_4"),
"DATAOUT_5": (0, 1, "slf_op_5"),
"DATAOUT_6": (0, 1, "slf_op_6"),
"DATAOUT_7": (0, 1, "slf_op_7"),
"DATAOUT_8": (0, 2, "slf_op_0"),
"DATAOUT_9": (0, 2, "slf_op_1"),
"MASKWREN_0": (0, 3, "lutff_0/in_0"),
"MASKWREN_1": (0, 3, "lutff_1/in_0"),
"MASKWREN_2": (0, 3, "lutff_2/in_0"),
"MASKWREN_3": (0, 3, "lutff_3/in_0"),
"POWEROFF": (0, 4, "lutff_4/in_3"),
"SLEEP": (0, 4, "lutff_2/in_3"),
"SPRAM_EN": (0, 1, "CBIT_0"),
"STANDBY": (0, 4, "lutff_0/in_3"),
"WREN": (0, 3, "lutff_4/in_1"),
},
(0, 0, 2): {
"ADDRESS_0": (0, 2, "lutff_6/in_0"),
"ADDRESS_10": (0, 3, "lutff_0/in_1"),
"ADDRESS_11": (0, 3, "lutff_1/in_1"),
"ADDRESS_12": (0, 3, "lutff_2/in_1"),
"ADDRESS_13": (0, 3, "lutff_3/in_1"),
"ADDRESS_1": (0, 2, "lutff_7/in_0"),
"ADDRESS_2": (0, 3, "lutff_0/in_3"),
"ADDRESS_3": (0, 3, "lutff_1/in_3"),
"ADDRESS_4": (0, 3, "lutff_2/in_3"),
"ADDRESS_5": (0, 3, "lutff_3/in_3"),
"ADDRESS_6": (0, 3, "lutff_4/in_3"),
"ADDRESS_7": (0, 3, "lutff_5/in_3"),
"ADDRESS_8": (0, 3, "lutff_6/in_3"),
"ADDRESS_9": (0, 3, "lutff_7/in_3"),
"CHIPSELECT": (0, 3, "lutff_7/in_1"),
"CLOCK": (0, 2, "clk"),
"DATAIN_0": (0, 1, "lutff_0/in_0"),
"DATAIN_10": (0, 2, "lutff_2/in_3"),
"DATAIN_11": (0, 2, "lutff_3/in_3"),
"DATAIN_12": (0, 2, "lutff_4/in_3"),
"DATAIN_13": (0, 2, "lutff_5/in_3"),
"DATAIN_14": (0, 2, "lutff_6/in_3"),
"DATAIN_15": (0, 2, "lutff_7/in_3"),
"DATAIN_1": (0, 1, "lutff_1/in_0"),
"DATAIN_2": (0, 1, "lutff_2/in_0"),
"DATAIN_3": (0, 1, "lutff_3/in_0"),
"DATAIN_4": (0, 1, "lutff_4/in_0"),
"DATAIN_5": (0, 1, "lutff_5/in_0"),
"DATAIN_6": (0, 1, "lutff_6/in_0"),
"DATAIN_7": (0, 1, "lutff_7/in_0"),
"DATAIN_8": (0, 2, "lutff_0/in_3"),
"DATAIN_9": (0, 2, "lutff_1/in_3"),
"DATAOUT_0": (0, 3, "slf_op_0"),
"DATAOUT_10": (0, 4, "slf_op_2"),
"DATAOUT_11": (0, 4, "slf_op_3"),
"DATAOUT_12": (0, 4, "slf_op_4"),
"DATAOUT_13": (0, 4, "slf_op_5"),
"DATAOUT_14": (0, 4, "slf_op_6"),
"DATAOUT_15": (0, 4, "slf_op_7"),
"DATAOUT_1": (0, 3, "slf_op_1"),
"DATAOUT_2": (0, 3, "slf_op_2"),
"DATAOUT_3": (0, 3, "slf_op_3"),
"DATAOUT_4": (0, 3, "slf_op_4"),
"DATAOUT_5": (0, 3, "slf_op_5"),
"DATAOUT_6": (0, 3, "slf_op_6"),
"DATAOUT_7": (0, 3, "slf_op_7"),
"DATAOUT_8": (0, 4, "slf_op_0"),
"DATAOUT_9": (0, 4, "slf_op_1"),
"MASKWREN_0": (0, 3, "lutff_4/in_0"),
"MASKWREN_1": (0, 3, "lutff_5/in_0"),
"MASKWREN_2": (0, 3, "lutff_6/in_0"),
"MASKWREN_3": (0, 3, "lutff_7/in_0"),
"POWEROFF": (0, 4, "lutff_5/in_3"),
"SLEEP": (0, 4, "lutff_3/in_3"),
"SPRAM_EN": (0, 1, "CBIT_1"),
"STANDBY": (0, 4, "lutff_1/in_3"),
"WREN": (0, 3, "lutff_5/in_1"),
},
(25, 0, 3): {
"ADDRESS_0": (25, 2, "lutff_0/in_1"),
"ADDRESS_10": (25, 2, "lutff_2/in_0"),
"ADDRESS_11": (25, 2, "lutff_3/in_0"),
"ADDRESS_12": (25, 2, "lutff_4/in_0"),
"ADDRESS_13": (25, 2, "lutff_5/in_0"),
"ADDRESS_1": (25, 2, "lutff_1/in_1"),
"ADDRESS_2": (25, 2, "lutff_2/in_1"),
"ADDRESS_3": (25, 2, "lutff_3/in_1"),
"ADDRESS_4": (25, 2, "lutff_4/in_1"),
"ADDRESS_5": (25, 2, "lutff_5/in_1"),
"ADDRESS_6": (25, 2, "lutff_6/in_1"),
"ADDRESS_7": (25, 2, "lutff_7/in_1"),
"ADDRESS_8": (25, 2, "lutff_0/in_0"),
"ADDRESS_9": (25, 2, "lutff_1/in_0"),
"CHIPSELECT": (25, 3, "lutff_6/in_1"),
"CLOCK": (25, 1, "clk"),
"DATAIN_0": (25, 1, "lutff_0/in_3"),
"DATAIN_10": (25, 1, "lutff_2/in_1"),
"DATAIN_11": (25, 1, "lutff_3/in_1"),
"DATAIN_12": (25, 1, "lutff_4/in_1"),
"DATAIN_13": (25, 1, "lutff_5/in_1"),
"DATAIN_14": (25, 1, "lutff_6/in_1"),
"DATAIN_15": (25, 1, "lutff_7/in_1"),
"DATAIN_1": (25, 1, "lutff_1/in_3"),
"DATAIN_2": (25, 1, "lutff_2/in_3"),
"DATAIN_3": (25, 1, "lutff_3/in_3"),
"DATAIN_4": (25, 1, "lutff_4/in_3"),
"DATAIN_5": (25, 1, "lutff_5/in_3"),
"DATAIN_6": (25, 1, "lutff_6/in_3"),
"DATAIN_7": (25, 1, "lutff_7/in_3"),
"DATAIN_8": (25, 1, "lutff_0/in_1"),
"DATAIN_9": (25, 1, "lutff_1/in_1"),
"DATAOUT_0": (25, 1, "slf_op_0"),
"DATAOUT_10": (25, 2, "slf_op_2"),
"DATAOUT_11": (25, 2, "slf_op_3"),
"DATAOUT_12": (25, 2, "slf_op_4"),
"DATAOUT_13": (25, 2, "slf_op_5"),
"DATAOUT_14": (25, 2, "slf_op_6"),
"DATAOUT_15": (25, 2, "slf_op_7"),
"DATAOUT_1": (25, 1, "slf_op_1"),
"DATAOUT_2": (25, 1, "slf_op_2"),
"DATAOUT_3": (25, 1, "slf_op_3"),
"DATAOUT_4": (25, 1, "slf_op_4"),
"DATAOUT_5": (25, 1, "slf_op_5"),
"DATAOUT_6": (25, 1, "slf_op_6"),
"DATAOUT_7": (25, 1, "slf_op_7"),
"DATAOUT_8": (25, 2, "slf_op_0"),
"DATAOUT_9": (25, 2, "slf_op_1"),
"MASKWREN_0": (25, 3, "lutff_0/in_0"),
"MASKWREN_1": (25, 3, "lutff_1/in_0"),
"MASKWREN_2": (25, 3, "lutff_2/in_0"),
"MASKWREN_3": (25, 3, "lutff_3/in_0"),
"POWEROFF": (25, 4, "lutff_4/in_3"),
"SLEEP": (25, 4, "lutff_2/in_3"),
"SPRAM_EN": (25, 1, "CBIT_0"),
"STANDBY": (25, 4, "lutff_0/in_3"),
"WREN": (25, 3, "lutff_4/in_1"),
},
(25, 0, 4): {
"ADDRESS_0": (25, 2, "lutff_6/in_0"),
"ADDRESS_10": (25, 3, "lutff_0/in_1"),
"ADDRESS_11": (25, 3, "lutff_1/in_1"),
"ADDRESS_12": (25, 3, "lutff_2/in_1"),
"ADDRESS_13": (25, 3, "lutff_3/in_1"),
"ADDRESS_1": (25, 2, "lutff_7/in_0"),
"ADDRESS_2": (25, 3, "lutff_0/in_3"),
"ADDRESS_3": (25, 3, "lutff_1/in_3"),
"ADDRESS_4": (25, 3, "lutff_2/in_3"),
"ADDRESS_5": (25, 3, "lutff_3/in_3"),
"ADDRESS_6": (25, 3, "lutff_4/in_3"),
"ADDRESS_7": (25, 3, "lutff_5/in_3"),
"ADDRESS_8": (25, 3, "lutff_6/in_3"),
"ADDRESS_9": (25, 3, "lutff_7/in_3"),
"CHIPSELECT": (25, 3, "lutff_7/in_1"),
"CLOCK": (25, 2, "clk"),
"DATAIN_0": (25, 1, "lutff_0/in_0"),
"DATAIN_10": (25, 2, "lutff_2/in_3"),
"DATAIN_11": (25, 2, "lutff_3/in_3"),
"DATAIN_12": (25, 2, "lutff_4/in_3"),
"DATAIN_13": (25, 2, "lutff_5/in_3"),
"DATAIN_14": (25, 2, "lutff_6/in_3"),
"DATAIN_15": (25, 2, "lutff_7/in_3"),
"DATAIN_1": (25, 1, "lutff_1/in_0"),
"DATAIN_2": (25, 1, "lutff_2/in_0"),
"DATAIN_3": (25, 1, "lutff_3/in_0"),
"DATAIN_4": (25, 1, "lutff_4/in_0"),
"DATAIN_5": (25, 1, "lutff_5/in_0"),
"DATAIN_6": (25, 1, "lutff_6/in_0"),
"DATAIN_7": (25, 1, "lutff_7/in_0"),
"DATAIN_8": (25, 2, "lutff_0/in_3"),
"DATAIN_9": (25, 2, "lutff_1/in_3"),
"DATAOUT_0": (25, 3, "slf_op_0"),
"DATAOUT_10": (25, 4, "slf_op_2"),
"DATAOUT_11": (25, 4, "slf_op_3"),
"DATAOUT_12": (25, 4, "slf_op_4"),
"DATAOUT_13": (25, 4, "slf_op_5"),
"DATAOUT_14": (25, 4, "slf_op_6"),
"DATAOUT_15": (25, 4, "slf_op_7"),
"DATAOUT_1": (25, 3, "slf_op_1"),
"DATAOUT_2": (25, 3, "slf_op_2"),
"DATAOUT_3": (25, 3, "slf_op_3"),
"DATAOUT_4": (25, 3, "slf_op_4"),
"DATAOUT_5": (25, 3, "slf_op_5"),
"DATAOUT_6": (25, 3, "slf_op_6"),
"DATAOUT_7": (25, 3, "slf_op_7"),
"DATAOUT_8": (25, 4, "slf_op_0"),
"DATAOUT_9": (25, 4, "slf_op_1"),
"MASKWREN_0": (25, 3, "lutff_4/in_0"),
"MASKWREN_1": (25, 3, "lutff_5/in_0"),
"MASKWREN_2": (25, 3, "lutff_6/in_0"),
"MASKWREN_3": (25, 3, "lutff_7/in_0"),
"POWEROFF": (25, 4, "lutff_5/in_3"),
"SLEEP": (25, 4, "lutff_3/in_3"),
"SPRAM_EN": (25, 1, "CBIT_1"),
"STANDBY": (25, 4, "lutff_1/in_3"),
"WREN": (25, 3, "lutff_5/in_1"),
}
}
}
# This contains the data for extra cells not included
# in any previous databases
extra_cells_db = {
"5k" : {
("HFOSC", (0, 31, 1)) : {
"CLKHFPU": (0, 29, "lutff_0/in_1"),
"CLKHFEN": (0, 29, "lutff_7/in_3"),
"CLKHF": (0, 29, "glb_netwk_4"),
"CLKHF_FABRIC": (0, 28, "slf_op_7"),
"TRIM0": (25, 28, "lutff_4/in_0"),
"TRIM1": (25, 28, "lutff_5/in_0"),
"TRIM2": (25, 28, "lutff_6/in_0"),
"TRIM3": (25, 28, "lutff_7/in_0"),
"TRIM4": (25, 29, "lutff_0/in_3"),
"TRIM5": (25, 29, "lutff_1/in_3"),
"TRIM6": (25, 29, "lutff_2/in_3"),
"TRIM7": (25, 29, "lutff_3/in_3"),
"TRIM8": (25, 29, "lutff_4/in_3"),
"TRIM9": (25, 29, "lutff_5/in_3"),
"CLKHF_DIV_1": (0, 16, "CBIT_4"),
"CLKHF_DIV_0": (0, 16, "CBIT_3"),
"TRIM_EN": (0, 16, "CBIT_5")
},
("LFOSC", (25, 31, 1)) : {
"CLKLFPU": (25, 29, "lutff_0/in_1"),
"CLKLFEN": (25, 29, "lutff_7/in_3"),
"CLKLF": (25, 29, "glb_netwk_5"),
"CLKLF_FABRIC": (25, 29, "slf_op_0")
},
("RGBA_DRV", (0, 30, 0)) : {
"CURREN": (25, 29, "lutff_6/in_3"),
"RGBLEDEN": (0, 30, "lutff_1/in_1"),
"RGB0PWM": (0, 30, "lutff_2/in_1"),
"RGB1PWM": (0, 30, "lutff_3/in_1"),
"RGB2PWM": (0, 30, "lutff_4/in_1"),
"RGBA_DRV_EN": (0, 28, "CBIT_5"),
"RGB0_CURRENT_0": (0, 28, "CBIT_6"),
"RGB0_CURRENT_1": (0, 28, "CBIT_7"),
"RGB0_CURRENT_2": (0, 29, "CBIT_0"),
"RGB0_CURRENT_3": (0, 29, "CBIT_1"),
"RGB0_CURRENT_4": (0, 29, "CBIT_2"),
"RGB0_CURRENT_5": (0, 29, "CBIT_3"),
"RGB1_CURRENT_0": (0, 29, "CBIT_4"),
"RGB1_CURRENT_1": (0, 29, "CBIT_5"),
"RGB1_CURRENT_2": (0, 29, "CBIT_6"),
"RGB1_CURRENT_3": (0, 29, "CBIT_7"),
"RGB1_CURRENT_4": (0, 30, "CBIT_0"),
"RGB1_CURRENT_5": (0, 30, "CBIT_1"),
"RGB2_CURRENT_0": (0, 30, "CBIT_2"),
"RGB2_CURRENT_1": (0, 30, "CBIT_3"),
"RGB2_CURRENT_2": (0, 30, "CBIT_4"),
"RGB2_CURRENT_3": (0, 30, "CBIT_5"),
"RGB2_CURRENT_4": (0, 30, "CBIT_6"),
"RGB2_CURRENT_5": (0, 30, "CBIT_7"),
"CURRENT_MODE": (0, 28, "CBIT_4"),
},
("I2C", (0, 31, 0)): {
"I2CIRQ": (0, 30, "slf_op_7"),
"I2CWKUP": (0, 29, "slf_op_5"),
"I2C_ENABLE_0": (13, 31, "cbit2usealt_in_0"),
"I2C_ENABLE_1": (12, 31, "cbit2usealt_in_1"),
"SBACKO": (0, 30, "slf_op_6"),
"SBADRI0": (0, 30, "lutff_1/in_0"),
"SBADRI1": (0, 30, "lutff_2/in_0"),
"SBADRI2": (0, 30, "lutff_3/in_0"),
"SBADRI3": (0, 30, "lutff_4/in_0"),
"SBADRI4": (0, 30, "lutff_5/in_0"),
"SBADRI5": (0, 30, "lutff_6/in_0"),
"SBADRI6": (0, 30, "lutff_7/in_0"),
"SBADRI7": (0, 29, "lutff_2/in_0"),
"SBCLKI": (0, 30, "clk"),
"SBDATI0": (0, 29, "lutff_5/in_0"),
"SBDATI1": (0, 29, "lutff_6/in_0"),
"SBDATI2": (0, 29, "lutff_7/in_0"),
"SBDATI3": (0, 30, "lutff_0/in_3"),
"SBDATI4": (0, 30, "lutff_5/in_1"),
"SBDATI5": (0, 30, "lutff_6/in_1"),
"SBDATI6": (0, 30, "lutff_7/in_1"),
"SBDATI7": (0, 30, "lutff_0/in_0"),
"SBDATO0": (0, 29, "slf_op_6"),
"SBDATO1": (0, 29, "slf_op_7"),
"SBDATO2": (0, 30, "slf_op_0"),
"SBDATO3": (0, 30, "slf_op_1"),
"SBDATO4": (0, 30, "slf_op_2"),
"SBDATO5": (0, 30, "slf_op_3"),
"SBDATO6": (0, 30, "slf_op_4"),
"SBDATO7": (0, 30, "slf_op_5"),
"SBRWI": (0, 29, "lutff_4/in_0"),
"SBSTBI": (0, 29, "lutff_3/in_0"),
"SCLI": (0, 29, "lutff_2/in_1"),
"SCLO": (0, 29, "slf_op_3"),
"SCLOE": (0, 29, "slf_op_4"),
"SDAI": (0, 29, "lutff_1/in_1"),
"SDAO": (0, 29, "slf_op_1"),
"SDAOE": (0, 29, "slf_op_2"),
"SDA_INPUT_DELAYED": (12, 31, "SDA_input_delay"),
"SDA_OUTPUT_DELAYED": (12, 31, "SDA_output_delay"),
},
("I2C", (25, 31, 0)): {
"I2CIRQ": (25, 30, "slf_op_7"),
"I2CWKUP": (25, 29, "slf_op_5"),
"I2C_ENABLE_0": (19, 31, "cbit2usealt_in_0"),
"I2C_ENABLE_1": (19, 31, "cbit2usealt_in_1"),
"SBACKO": (25, 30, "slf_op_6"),
"SBADRI0": (25, 30, "lutff_1/in_0"),
"SBADRI1": (25, 30, "lutff_2/in_0"),
"SBADRI2": (25, 30, "lutff_3/in_0"),
"SBADRI3": (25, 30, "lutff_4/in_0"),
"SBADRI4": (25, 30, "lutff_5/in_0"),
"SBADRI5": (25, 30, "lutff_6/in_0"),
"SBADRI6": (25, 30, "lutff_7/in_0"),
"SBADRI7": (25, 29, "lutff_2/in_0"),
"SBCLKI": (25, 30, "clk"),
"SBDATI0": (25, 29, "lutff_5/in_0"),
"SBDATI1": (25, 29, "lutff_6/in_0"),
"SBDATI2": (25, 29, "lutff_7/in_0"),
"SBDATI3": (25, 30, "lutff_0/in_3"),
"SBDATI4": (25, 30, "lutff_5/in_1"),
"SBDATI5": (25, 30, "lutff_6/in_1"),
"SBDATI6": (25, 30, "lutff_7/in_1"),
"SBDATI7": (25, 30, "lutff_0/in_0"),
"SBDATO0": (25, 29, "slf_op_6"),
"SBDATO1": (25, 29, "slf_op_7"),
"SBDATO2": (25, 30, "slf_op_0"),
"SBDATO3": (25, 30, "slf_op_1"),
"SBDATO4": (25, 30, "slf_op_2"),
"SBDATO5": (25, 30, "slf_op_3"),
"SBDATO6": (25, 30, "slf_op_4"),
"SBDATO7": (25, 30, "slf_op_5"),
"SBRWI": (25, 29, "lutff_4/in_0"),
"SBSTBI": (25, 29, "lutff_3/in_0"),
"SCLI": (25, 29, "lutff_2/in_1"),
"SCLO": (25, 29, "slf_op_3"),
"SCLOE": (25, 29, "slf_op_4"),
"SDAI": (25, 29, "lutff_1/in_1"),
"SDAO": (25, 29, "slf_op_1"),
"SDAOE": (25, 29, "slf_op_2"),
"SDA_INPUT_DELAYED": (19, 31, "SDA_input_delay"),
"SDA_OUTPUT_DELAYED": (19, 31, "SDA_output_delay"),
},
("SPI", (0, 0, 0)): {
"MCSNO0": (0, 21, "slf_op_2"),
"MCSNO1": (0, 21, "slf_op_4"),
"MCSNO2": (0, 21, "slf_op_7"),
"MCSNO3": (0, 22, "slf_op_1"),
"MCSNOE0": (0, 21, "slf_op_3"),
"MCSNOE1": (0, 21, "slf_op_5"),
"MCSNOE2": (0, 22, "slf_op_0"),
"MCSNOE3": (0, 22, "slf_op_2"),
"MI": (0, 22, "lutff_0/in_1"),
"MO": (0, 20, "slf_op_6"),
"MOE": (0, 20, "slf_op_7"),
"SBACKO": (0, 20, "slf_op_1"),
"SBADRI0": (0, 19, "lutff_1/in_1"),
"SBADRI1": (0, 19, "lutff_2/in_1"),
"SBADRI2": (0, 20, "lutff_0/in_3"),
"SBADRI3": (0, 20, "lutff_1/in_3"),
"SBADRI4": (0, 20, "lutff_2/in_3"),
"SBADRI5": (0, 20, "lutff_3/in_3"),
"SBADRI6": (0, 20, "lutff_4/in_3"),
"SBADRI7": (0, 20, "lutff_5/in_3"),
"SBCLKI": (0, 20, "clk"),
"SBDATI0": (0, 19, "lutff_1/in_3"),
"SBDATI1": (0, 19, "lutff_2/in_3"),
"SBDATI2": (0, 19, "lutff_3/in_3"),
"SBDATI3": (0, 19, "lutff_4/in_3"),
"SBDATI4": (0, 19, "lutff_5/in_3"),
"SBDATI5": (0, 19, "lutff_6/in_3"),
"SBDATI6": (0, 19, "lutff_7/in_3"),
"SBDATI7": (0, 19, "lutff_0/in_1"),
"SBDATO0": (0, 19, "slf_op_1"),
"SBDATO1": (0, 19, "slf_op_2"),
"SBDATO2": (0, 19, "slf_op_3"),
"SBDATO3": (0, 19, "slf_op_4"),
"SBDATO4": (0, 19, "slf_op_5"),
"SBDATO5": (0, 19, "slf_op_6"),
"SBDATO6": (0, 19, "slf_op_7"),
"SBDATO7": (0, 20, "slf_op_0"),
"SBRWI": (0, 19, "lutff_0/in_3"),
"SBSTBI": (0, 20, "lutff_6/in_3"),
"SCKI": (0, 22, "lutff_1/in_1"),
"SCKO": (0, 21, "slf_op_0"),
"SCKOE": (0, 21, "slf_op_1"),
"SCSNI": (0, 22, "lutff_2/in_1"),
"SI": (0, 22, "lutff_7/in_3"),
"SO": (0, 20, "slf_op_4"),
"SOE": (0, 20, "slf_op_5"),
"SPIIRQ": (0, 20, "slf_op_2"),
"SPIWKUP": (0, 20, "slf_op_3"),
"SPI_ENABLE_0": (7, 0, "cbit2usealt_in_0"),
"SPI_ENABLE_1": (7, 0, "cbit2usealt_in_1"),
"SPI_ENABLE_2": (6, 0, "cbit2usealt_in_0"),
"SPI_ENABLE_3": (6, 0, "cbit2usealt_in_1"),
},
("SPI", (25, 0, 1)): {
"MCSNO0": (25, 21, "slf_op_2"),
"MCSNO1": (25, 21, "slf_op_4"),
"MCSNO2": (25, 21, "slf_op_7"),
"MCSNO3": (25, 22, "slf_op_1"),
"MCSNOE0": (25, 21, "slf_op_3"),
"MCSNOE1": (25, 21, "slf_op_5"),
"MCSNOE2": (25, 22, "slf_op_0"),
"MCSNOE3": (25, 22, "slf_op_2"),
"MI": (25, 22, "lutff_0/in_1"),
"MO": (25, 20, "slf_op_6"),
"MOE": (25, 20, "slf_op_7"),
"SBACKO": (25, 20, "slf_op_1"),
"SBADRI0": (25, 19, "lutff_1/in_1"),
"SBADRI1": (25, 19, "lutff_2/in_1"),
"SBADRI2": (25, 20, "lutff_0/in_3"),
"SBADRI3": (25, 20, "lutff_1/in_3"),
"SBADRI4": (25, 20, "lutff_2/in_3"),
"SBADRI5": (25, 20, "lutff_3/in_3"),
"SBADRI6": (25, 20, "lutff_4/in_3"),
"SBADRI7": (25, 20, "lutff_5/in_3"),
"SBCLKI": (25, 20, "clk"),
"SBDATI0": (25, 19, "lutff_1/in_3"),
"SBDATI1": (25, 19, "lutff_2/in_3"),
"SBDATI2": (25, 19, "lutff_3/in_3"),
"SBDATI3": (25, 19, "lutff_4/in_3"),
"SBDATI4": (25, 19, "lutff_5/in_3"),
"SBDATI5": (25, 19, "lutff_6/in_3"),
"SBDATI6": (25, 19, "lutff_7/in_3"),
"SBDATI7": (25, 19, "lutff_0/in_1"),
"SBDATO0": (25, 19, "slf_op_1"),
"SBDATO1": (25, 19, "slf_op_2"),
"SBDATO2": (25, 19, "slf_op_3"),
"SBDATO3": (25, 19, "slf_op_4"),
"SBDATO4": (25, 19, "slf_op_5"),
"SBDATO5": (25, 19, "slf_op_6"),
"SBDATO6": (25, 19, "slf_op_7"),
"SBDATO7": (25, 20, "slf_op_0"),
"SBRWI": (25, 19, "lutff_0/in_3"),
"SBSTBI": (25, 20, "lutff_6/in_3"),
"SCKI": (25, 22, "lutff_1/in_1"),
"SCKO": (25, 21, "slf_op_0"),
"SCKOE": (25, 21, "slf_op_1"),
"SCSNI": (25, 22, "lutff_2/in_1"),
"SI": (25, 22, "lutff_7/in_3"),
"SO": (25, 20, "slf_op_4"),
"SOE": (25, 20, "slf_op_5"),
"SPIIRQ": (25, 20, "slf_op_2"),
"SPIWKUP": (25, 20, "slf_op_3"),
"SPI_ENABLE_0": (23, 0, "cbit2usealt_in_0"),
"SPI_ENABLE_1": (24, 0, "cbit2usealt_in_0"),
"SPI_ENABLE_2": (23, 0, "cbit2usealt_in_1"),
"SPI_ENABLE_3": (24, 0, "cbit2usealt_in_1"),
},
("LEDDA_IP", (0, 31, 2)): {
"LEDDADDR0": (0, 28, "lutff_4/in_0"),
"LEDDADDR1": (0, 28, "lutff_5/in_0"),
"LEDDADDR2": (0, 28, "lutff_6/in_0"),
"LEDDADDR3": (0, 28, "lutff_7/in_0"),
"LEDDCLK": (0, 29, "clk"),
"LEDDCS": (0, 28, "lutff_2/in_0"),
"LEDDDAT0": (0, 28, "lutff_2/in_1"),
"LEDDDAT1": (0, 28, "lutff_3/in_1"),
"LEDDDAT2": (0, 28, "lutff_4/in_1"),
"LEDDDAT3": (0, 28, "lutff_5/in_1"),
"LEDDDAT4": (0, 28, "lutff_6/in_1"),
"LEDDDAT5": (0, 28, "lutff_7/in_1"),
"LEDDDAT6": (0, 28, "lutff_0/in_0"),
"LEDDDAT7": (0, 28, "lutff_1/in_0"),
"LEDDDEN": (0, 28, "lutff_1/in_1"),
"LEDDEXE": (0, 28, "lutff_0/in_1"),
"LEDDON": (0, 29, "slf_op_0"),
"PWMOUT0": (0, 28, "slf_op_4"),
"PWMOUT1": (0, 28, "slf_op_5"),
"PWMOUT2": (0, 28, "slf_op_6"),
},
}
}
iotile_full_db = parse_db(iceboxdb.database_io_txt)
logictile_db = parse_db(iceboxdb.database_logic_txt, "1k")
logictile_5k_db = parse_db(iceboxdb.database_logic_txt, "5k")
logictile_8k_db = parse_db(iceboxdb.database_logic_txt, "8k")
logictile_384_db = parse_db(iceboxdb.database_logic_txt, "384")
rambtile_db = parse_db(iceboxdb.database_ramb_txt, "1k")
ramttile_db = parse_db(iceboxdb.database_ramt_txt, "1k")
rambtile_5k_db = parse_db(iceboxdb.database_ramb_5k_txt, "5k")
ramttile_5k_db = parse_db(iceboxdb.database_ramt_5k_txt, "5k")
rambtile_8k_db = parse_db(iceboxdb.database_ramb_8k_txt, "8k")
ramttile_8k_db = parse_db(iceboxdb.database_ramt_8k_txt, "8k")
ipcon_5k_db = parse_db(iceboxdb.database_ipcon_5k_txt, "5k")
dsp0_5k_db = parse_db(iceboxdb.database_dsp0_5k_txt, "5k")
dsp1_5k_db = parse_db(iceboxdb.database_dsp1_5k_txt, "5k")
#This bit doesn't exist in DB because icecube won't ever set it,
#but it exists
dsp1_5k_db.append([["B4[7]"], "IpConfig", "CBIT_5"])
dsp2_5k_db = parse_db(iceboxdb.database_dsp2_5k_txt, "5k")
dsp3_5k_db = parse_db(iceboxdb.database_dsp3_5k_txt, "5k")
#Add missing LC_ bits to DSP and IPCon databases
for db_to_fix in [ipcon_5k_db, dsp0_5k_db, dsp1_5k_db, dsp2_5k_db, dsp3_5k_db]:
for entry in db_to_fix:
if len(entry) >= 2 and entry[1].startswith("LC_"):
for lentry in logictile_5k_db:
if len(lentry) >= 2 and lentry[1] == entry[1]:
entry[0] = lentry[0]
iotile_l_db = list()
iotile_r_db = list()
iotile_t_db = list()
iotile_b_db = list()
for entry in iotile_full_db:
if entry[1] == "buffer" and entry[2].startswith("IO_L."):
new_entry = entry[:]
new_entry[2] = new_entry[2][5:]
iotile_l_db.append(new_entry)
elif entry[1] == "buffer" and entry[2].startswith("IO_R."):
new_entry = entry[:]
new_entry[2] = new_entry[2][5:]
iotile_r_db.append(new_entry)
elif entry[1] == "buffer" and entry[2].startswith("IO_T."):
new_entry = entry[:]
new_entry[2] = new_entry[2][5:]
iotile_t_db.append(new_entry)
elif entry[1] == "buffer" and entry[2].startswith("IO_B."):
new_entry = entry[:]
new_entry[2] = new_entry[2][5:]
iotile_b_db.append(new_entry)
else:
iotile_l_db.append(entry)
iotile_r_db.append(entry)
iotile_t_db.append(entry)
iotile_b_db.append(entry)
logictile_db.append([["B1[49]"], "buffer", "carry_in", "carry_in_mux"])
logictile_db.append([["B1[50]"], "CarryInSet"])
logictile_5k_db.append([["B1[49]"], "buffer", "carry_in", "carry_in_mux"])
logictile_5k_db.append([["B1[50]"], "CarryInSet"])
logictile_8k_db.append([["B1[49]"], "buffer", "carry_in", "carry_in_mux"])
logictile_8k_db.append([["B1[50]"], "CarryInSet"])
logictile_384_db.append([["B1[49]"], "buffer", "carry_in", "carry_in_mux"])
logictile_384_db.append([["B1[50]"], "CarryInSet"])
# The 5k series has a couple of extra IO configuration bits. Add them in to a copy of the db here
iotile_t_5k_db = list(iotile_t_db)
iotile_t_5k_db.append([["B14[15]"], "IoCtrl", "padeb_test_1"])
iotile_t_5k_db.append([["B15[14]"], "IoCtrl", "padeb_test_0"])
iotile_t_5k_db.append([["B7[10]"], "IoCtrl", "cf_bit_32"])
iotile_t_5k_db.append([["B6[10]"], "IoCtrl", "cf_bit_33"])
iotile_t_5k_db.append([["B7[15]"], "IoCtrl", "cf_bit_34"])
iotile_t_5k_db.append([["B6[15]"], "IoCtrl", "cf_bit_35"])
iotile_t_5k_db.append([["B13[10]"], "IoCtrl", "cf_bit_36"])
iotile_t_5k_db.append([["B12[10]"], "IoCtrl", "cf_bit_37"])
iotile_t_5k_db.append([["B13[15]"], "IoCtrl", "cf_bit_38"])
iotile_t_5k_db.append([["B12[15]"], "IoCtrl", "cf_bit_39"])
iotile_t_5k_db.append([["B10[3]"], "IpConfig", "cbit2usealt_in_0"])
iotile_t_5k_db.append([["B12[2]"], "IpConfig", "cbit2usealt_in_1"])
iotile_t_5k_db.append([["B12[3]"], "IpConfig", "SDA_input_delay"])
iotile_t_5k_db.append([["B15[3]"], "IpConfig", "SDA_output_delay"])
iotile_b_5k_db = list(iotile_b_db)
iotile_b_5k_db.append([["B14[15]"], "IoCtrl", "padeb_test_1"])
iotile_b_5k_db.append([["B15[14]"], "IoCtrl", "padeb_test_0"])
iotile_b_5k_db.append([["B7[10]"], "IoCtrl", "cf_bit_32"])
iotile_b_5k_db.append([["B6[10]"], "IoCtrl", "cf_bit_33"])
iotile_b_5k_db.append([["B7[15]"], "IoCtrl", "cf_bit_34"])
iotile_b_5k_db.append([["B6[15]"], "IoCtrl", "cf_bit_35"])
iotile_b_5k_db.append([["B13[10]"], "IoCtrl", "cf_bit_36"])
iotile_b_5k_db.append([["B12[10]"], "IoCtrl", "cf_bit_37"])
iotile_b_5k_db.append([["B13[15]"], "IoCtrl", "cf_bit_38"])
iotile_b_5k_db.append([["B12[15]"], "IoCtrl", "cf_bit_39"])
iotile_b_5k_db.append([["B10[3]"], "IpConfig", "cbit2usealt_in_0"])
iotile_b_5k_db.append([["B12[2]"], "IpConfig", "cbit2usealt_in_1"])
iotile_b_5k_db.append([["B12[3]"], "IpConfig", "SDA_input_delay"])
iotile_b_5k_db.append([["B15[3]"], "IpConfig", "SDA_output_delay"])
for db in [iotile_l_db, iotile_r_db, iotile_t_db, iotile_b_db, iotile_t_5k_db, iotile_b_5k_db, logictile_db, logictile_5k_db, logictile_8k_db, logictile_384_db, rambtile_db, ramttile_db, rambtile_5k_db, ramttile_5k_db, rambtile_8k_db, ramttile_8k_db, dsp0_5k_db, dsp1_5k_db, dsp2_5k_db, dsp3_5k_db, ipcon_5k_db]:
for entry in db:
if entry[1] in ("buffer", "routing"):
entry[2] = netname_normalize(entry[2],
ramb=(db == rambtile_db),
ramt=(db == ramttile_db),
ramb_8k=(db in (rambtile_8k_db, rambtile_5k_db)),
ramt_8k=(db in (ramttile_8k_db, ramttile_5k_db)))
entry[3] = netname_normalize(entry[3],
ramb=(db == rambtile_db),
ramt=(db == ramttile_db),
ramb_8k=(db in (rambtile_8k_db, rambtile_5k_db)),
ramt_8k=(db in (ramttile_8k_db, ramttile_5k_db)))
unique_entries = dict()
while db:
entry = db.pop()
key = " ".join(entry[1:]) + str(entry)
unique_entries[key] = entry
for key in sorted(unique_entries):
db.append(unique_entries[key])
if __name__ == "__main__":
run_checks()
Add pinout for 5k UWG30 package
#!/usr/bin/env python3
#
# Copyright (C) 2015 Clifford Wolf <clifford@clifford.at>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
import iceboxdb
import re, sys
class iceconfig:
def __init__(self):
self.clear()
def clear(self):
self.max_x = 0
self.max_y = 0
self.device = ""
self.warmboot = True
self.logic_tiles = dict()
self.io_tiles = dict()
self.ramb_tiles = dict()
self.ramt_tiles = dict()
self.dsp_tiles = [dict() for i in range(4)]
self.ipcon_tiles = dict()
self.ram_data = dict()
self.extra_bits = set()
self.symbols = dict()
def setup_empty_384(self):
self.clear()
self.device = "384"
self.max_x = 7
self.max_y = 9
for x in range(1, self.max_x):
for y in range(1, self.max_y):
self.logic_tiles[(x, y)] = ["0" * 54 for i in range(16)]
for x in range(1, self.max_x):
self.io_tiles[(x, 0)] = ["0" * 18 for i in range(16)]
self.io_tiles[(x, self.max_y)] = ["0" * 18 for i in range(16)]
for y in range(1, self.max_y):
self.io_tiles[(0, y)] = ["0" * 18 for i in range(16)]
self.io_tiles[(self.max_x, y)] = ["0" * 18 for i in range(16)]
def setup_empty_1k(self):
self.clear()
self.device = "1k"
self.max_x = 13
self.max_y = 17
for x in range(1, self.max_x):
for y in range(1, self.max_y):
if x in (3, 10):
if y % 2 == 1:
self.ramb_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.ramt_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.logic_tiles[(x, y)] = ["0" * 54 for i in range(16)]
for x in range(1, self.max_x):
self.io_tiles[(x, 0)] = ["0" * 18 for i in range(16)]
self.io_tiles[(x, self.max_y)] = ["0" * 18 for i in range(16)]
for y in range(1, self.max_y):
self.io_tiles[(0, y)] = ["0" * 18 for i in range(16)]
self.io_tiles[(self.max_x, y)] = ["0" * 18 for i in range(16)]
def setup_empty_5k(self):
self.clear()
self.device = "5k"
self.max_x = 25
self.max_y = 31
for x in range(1, self.max_x):
for y in range(1, self.max_y):
if x in (6, 19):
if y % 2 == 1:
self.ramb_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.ramt_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.logic_tiles[(x, y)] = ["0" * 54 for i in range(16)]
for x in range(1, self.max_x):
self.io_tiles[(x, 0)] = ["0" * 18 for i in range(16)]
self.io_tiles[(x, self.max_y)] = ["0" * 18 for i in range(16)]
for x in [0, self.max_x]:
for y in range(1, self.max_y):
if y in [5, 10, 15, 23]:
self.dsp_tiles[0][(x, y)] = ["0" * 54 for i in range(16)]
elif y in [6, 11, 16, 24]:
self.dsp_tiles[1][(x, y)] = ["0" * 54 for i in range(16)]
elif y in [7, 12, 17, 25]:
self.dsp_tiles[2][(x, y)] = ["0" * 54 for i in range(16)]
elif y in [8, 13, 18, 26]:
self.dsp_tiles[3][(x, y)] = ["0" * 54 for i in range(16)]
else:
self.ipcon_tiles[(x, y)] = ["0" * 54 for i in range(16)]
def setup_empty_8k(self):
self.clear()
self.device = "8k"
self.max_x = 33
self.max_y = 33
for x in range(1, self.max_x):
for y in range(1, self.max_y):
if x in (8, 25):
if y % 2 == 1:
self.ramb_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.ramt_tiles[(x, y)] = ["0" * 42 for i in range(16)]
else:
self.logic_tiles[(x, y)] = ["0" * 54 for i in range(16)]
for x in range(1, self.max_x):
self.io_tiles[(x, 0)] = ["0" * 18 for i in range(16)]
self.io_tiles[(x, self.max_y)] = ["0" * 18 for i in range(16)]
for y in range(1, self.max_y):
self.io_tiles[(0, y)] = ["0" * 18 for i in range(16)]
self.io_tiles[(self.max_x, y)] = ["0" * 18 for i in range(16)]
def lookup_extra_bit(self, bit):
assert self.device in extra_bits_db
if bit in extra_bits_db[self.device]:
return extra_bits_db[self.device][bit]
return ("UNKNOWN_FUNCTION",)
def tile(self, x, y):
if (x, y) in self.io_tiles: return self.io_tiles[(x, y)]
if (x, y) in self.logic_tiles: return self.logic_tiles[(x, y)]
if (x, y) in self.ramb_tiles: return self.ramb_tiles[(x, y)]
if (x, y) in self.ramt_tiles: return self.ramt_tiles[(x, y)]
for i in range(4):
if (x, y) in self.dsp_tiles[i]: return self.dsp_tiles[i][(x, y)]
if (x, y) in self.ipcon_tiles: return self.ipcon_tiles[(x, y)]
return None
def pinloc_db(self):
if self.device == "384": return pinloc_db["384-qn32"]
if self.device == "1k": return pinloc_db["1k-tq144"]
if self.device == "5k": return pinloc_db["5k-sg48"]
if self.device == "8k": return pinloc_db["8k-ct256"]
assert False
def gbufin_db(self):
return gbufin_db[self.device]
def iolatch_db(self):
return iolatch_db[self.device]
def padin_pio_db(self):
return padin_pio_db[self.device]
def extra_bits_db(self):
return extra_bits_db[self.device]
def ieren_db(self):
return ieren_db[self.device]
def pll_list(self):
if self.device == "1k":
return ["1k"]
if self.device == "5k":
return ["5k"]
if self.device == "8k":
return ["8k_0", "8k_1"]
if self.device == "384":
return [ ]
assert False
# Return true if device is Ultra/UltraPlus series, i.e. has
# IpConnect/DSP at the sides instead of IO
def is_ultra(self):
return self.device in ["5k"]
def colbuf_db(self):
if self.device == "1k":
entries = list()
for x in range(self.max_x+1):
for y in range(self.max_y+1):
src_y = None
if 0 <= y <= 4: src_y = 4
if 5 <= y <= 8: src_y = 5
if 9 <= y <= 12: src_y = 12
if 13 <= y <= 17: src_y = 13
if x in [3, 10] and src_y == 4: src_y = 3
if x in [3, 10] and src_y == 12: src_y = 11
entries.append((x, src_y, x, y))
return entries
if self.device == "8k":
entries = list()
for x in range(self.max_x+1):
for y in range(self.max_y+1):
src_y = None
if 0 <= y <= 8: src_y = 8
if 9 <= y <= 16: src_y = 9
if 17 <= y <= 24: src_y = 24
if 25 <= y <= 33: src_y = 25
entries.append((x, src_y, x, y))
return entries
if self.device == "5k": #Interesting, seems the 5k has more colbufs?
entries = list()
for x in range(self.max_x+1):
for y in range(self.max_y+1):
src_y = None
if 0 <= y <= 4: src_y = 4
if 5 <= y <= 10: src_y = 5
if 11 <= y <= 14: src_y = 14
if 15 <= y <= 20: src_y = 15
if 21 <= y <= 26: src_y = 26
if 27 <= y <= 31: src_y = 27
entries.append((x, src_y, x, y))
return entries
if self.device == "384":
entries = list()
for x in range(self.max_x+1):
for y in range(self.max_y+1):
src_y = None #Is ColBufCtrl relevant?
if 0 <= y <= 2: src_y = 2 #384?
if 3 <= y <= 4: src_y = 3 #384?
if 5 <= y <= 6: src_y = 6 #384?
if 7 <= y <= 9: src_y = 7 #384?
entries.append((x, src_y, x, y))
return entries
assert False
# Return a map between HDL name and routing net and location for a given DSP cell
def get_dsp_nets_db(self, x, y):
assert ((x, y) in self.dsp_tiles[0])
# Control signals
nets = {
"CLK": (x, y+2, "lutff_global/clk"),
"CE": (x, y+2, "lutff_global/cen"),
"IRSTTOP": (x, y+1, "lutff_global/s_r"),
"IRSTBOT": (x, y+0, "lutff_global/s_r"),
"ORSTTOP": (x, y+3, "lutff_global/s_r"),
"ORSTBOT": (x, y+2, "lutff_global/s_r"),
"AHOLD": (x, y+2, "lutff_0/in_0"),
"BHOLD": (x, y+1, "lutff_0/in_0"),
"CHOLD": (x, y+3, "lutff_0/in_0"),
"DHOLD": (x, y+0, "lutff_0/in_0"),
"OHOLDTOP": (x, y+3, "lutff_1/in_0"),
"OHOLDBOT": (x, y+0, "lutff_1/in_0"),
"ADDSUBTOP": (x, y+3, "lutff_3/in_0"),
"ADDSUBBOT": (x, y+0, "lutff_3/in_0"),
"OLOADTOP": (x, y+3, "lutff_2/in_0"),
"OLOADBOT": (x, y+0, "lutff_2/in_0"),
"CI": (x, y+0, "lutff_4/in_0"),
"CO": (x, y+4, "slf_op_0")
}
#Data ports
for i in range(8):
nets["C_%d" % i] = (x, y+3, "lutff_%d/in_3" % i)
nets["C_%d" % (i+8)] = (x, y+3, "lutff_%d/in_1" % i)
nets["A_%d" % i] = (x, y+2, "lutff_%d/in_3" % i)
nets["A_%d" % (i+8)] = (x, y+2, "lutff_%d/in_1" % i)
nets["B_%d" % i] = (x, y+1, "lutff_%d/in_3" % i)
nets["B_%d" % (i+8)] = (x, y+1, "lutff_%d/in_1" % i)
nets["D_%d" % i] = (x, y+0, "lutff_%d/in_3" % i)
nets["D_%d" % (i+8)] = (x, y+0, "lutff_%d/in_1" % i)
for i in range(32):
nets["O_%d" % i] = (x, y+(i//8), "mult/O_%d" % i)
return nets
# Return the location of configuration bits for a given DSP cell
def get_dsp_config_db(self, x, y):
assert ((x, y) in self.dsp_tiles[0])
override = { }
if (("%s_%d_%d" % (self.device, x, y)) in dsp_config_db):
override = dsp_config_db["%s_%d_%d" % (self.device, x, y)]
default_db = dsp_config_db["default"]
merged = { }
for cfgkey in default_db:
cx, cy, cbit = default_db[cfgkey]
if cfgkey in override:
cx, cy, cbit = override[cfgkey]
merged[cfgkey] = (x + cx, y + cy, cbit)
return merged
def tile_db(self, x, y):
# Only these devices have IO on the left and right sides.
if self.device in ["384", "1k", "8k"]:
if x == 0: return iotile_l_db
if x == self.max_x: return iotile_r_db
# The 5k needs an IO db including the extra bits
if self.device == "5k":
if y == 0: return iotile_b_5k_db
if y == self.max_y: return iotile_t_5k_db
else:
if y == 0: return iotile_b_db
if y == self.max_y: return iotile_t_db
if self.device == "1k":
if (x, y) in self.logic_tiles: return logictile_db
if (x, y) in self.ramb_tiles: return rambtile_db
if (x, y) in self.ramt_tiles: return ramttile_db
elif self.device == "5k":
if (x, y) in self.logic_tiles: return logictile_5k_db
if (x, y) in self.ramb_tiles: return rambtile_5k_db
if (x, y) in self.ramt_tiles: return ramttile_5k_db
if (x, y) in self.ipcon_tiles: return ipcon_5k_db
if (x, y) in self.dsp_tiles[0]: return dsp0_5k_db
if (x, y) in self.dsp_tiles[1]: return dsp1_5k_db
if (x, y) in self.dsp_tiles[2]: return dsp2_5k_db
if (x, y) in self.dsp_tiles[3]: return dsp3_5k_db
elif self.device == "8k":
if (x, y) in self.logic_tiles: return logictile_8k_db
if (x, y) in self.ramb_tiles: return rambtile_8k_db
if (x, y) in self.ramt_tiles: return ramttile_8k_db
elif self.device == "384":
if (x, y) in self.logic_tiles: return logictile_384_db
print("Tile type unknown at (%d, %d)" % (x, y))
assert False
def tile_type(self, x, y):
if x == 0 and (not self.is_ultra()): return "IO"
if y == 0: return "IO"
if x == self.max_x and (not self.is_ultra()): return "IO"
if y == self.max_y: return "IO"
if (x, y) in self.ramb_tiles: return "RAMB"
if (x, y) in self.ramt_tiles: return "RAMT"
if (x, y) in self.logic_tiles: return "LOGIC"
if (x == 0 or x == self.max_x) and self.is_ultra():
if y in [5, 10, 15, 23]:
return "DSP0"
elif y in [6, 11, 16, 24]:
return "DSP1"
elif y in [7, 12, 17, 25]:
return "DSP2"
elif y in [8, 13, 18, 26]:
return "DSP3"
else:
return "IPCON"
assert False
def tile_pos(self, x, y):
if x == 0 and 0 < y < self.max_y: return "l"
if y == 0 and 0 < x < self.max_x: return "b"
if x == self.max_x and 0 < y < self.max_y: return "r"
if y == self.max_y and 0 < x < self.max_x: return "t"
if 0 < x < self.max_x and 0 < y < self.max_y: return "x"
return None
def tile_has_entry(self, x, y, entry):
if entry[1] in ("routing", "buffer"):
return self.tile_has_net(x, y, entry[2]) and self.tile_has_net(x, y, entry[3])
return True
def tile_has_net(self, x, y, netname):
if netname.startswith("logic_op_"):
if netname.startswith("logic_op_bot_"):
if y == self.max_y and 0 < x < self.max_x: return True
if netname.startswith("logic_op_bnl_"):
if x == self.max_x and 1 < y < self.max_y and (not self.is_ultra()): return True
if y == self.max_y and 1 < x < self.max_x: return True
if netname.startswith("logic_op_bnr_"):
if x == 0 and 1 < y < self.max_y and (not self.is_ultra()): return True
if y == self.max_y and 0 < x < self.max_x-1: return True
if netname.startswith("logic_op_top_"):
if y == 0 and 0 < x < self.max_x: return True
if netname.startswith("logic_op_tnl_"):
if x == self.max_x and 0 < y < self.max_y-1 and (not self.is_ultra()): return True
if y == 0 and 1 < x < self.max_x: return True
if netname.startswith("logic_op_tnr_"):
if x == 0 and 0 < y < self.max_y-1 and (not self.is_ultra()): return True
if y == 0 and 0 < x < self.max_x-1: return True
if netname.startswith("logic_op_lft_"):
if x == self.max_x and (not self.is_ultra()): return True
if netname.startswith("logic_op_rgt_"):
if x == 0 and (not self.is_ultra()): return True
return False
if not 0 <= x <= self.max_x: return False
if not 0 <= y <= self.max_y: return False
return pos_has_net(self.tile_pos(x, y), netname)
def tile_follow_net(self, x, y, direction, netname):
if x == 1 and y not in (0, self.max_y) and direction == 'l': return pos_follow_net("x", "L", netname, self.is_ultra())
if y == 1 and x not in (0, self.max_x) and direction == 'b': return pos_follow_net("x", "B", netname, self.is_ultra())
if x == self.max_x-1 and y not in (0, self.max_y) and direction == 'r': return pos_follow_net("x", "R", netname, self.is_ultra())
if y == self.max_y-1 and x not in (0, self.max_x) and direction == 't': return pos_follow_net("x", "T", netname, self.is_ultra())
if self.is_ultra(): # Pass through corner positions as they must be handled differently
if y == 1 and x in (0, self.max_x) and direction == 'b': return pos_follow_net(self.tile_pos(x, y), "B", netname, self.is_ultra())
if y == self.max_y-1 and x in (0, self.max_x) and direction == 't': return pos_follow_net(self.tile_pos(x, y), "T", netname, self.is_ultra())
if x == 1 and y in (0, self.max_y) and direction == 'l': return pos_follow_net(self.tile_pos(x, y), "L", netname, self.is_ultra())
if x == self.max_x-1 and y in (0, self.max_y) and direction == 'r': return pos_follow_net(self.tile_pos(x, y), "R", netname, self.is_ultra())
return pos_follow_net(self.tile_pos(x, y), direction, netname, self.is_ultra())
def follow_funcnet(self, x, y, func):
neighbours = set()
def do_direction(name, nx, ny):
if (0 < nx < self.max_x or self.is_ultra()) and 0 < ny < self.max_y:
neighbours.add((nx, ny, "neigh_op_%s_%d" % (name, func)))
if nx in (0, self.max_x) and 0 < ny < self.max_y and nx != x and (not self.is_ultra()):
neighbours.add((nx, ny, "logic_op_%s_%d" % (name, func)))
if ny in (0, self.max_y) and 0 < nx < self.max_x and ny != y:
neighbours.add((nx, ny, "logic_op_%s_%d" % (name, func)))
do_direction("bot", x, y+1)
do_direction("bnl", x+1, y+1)
do_direction("bnr", x-1, y+1)
do_direction("top", x, y-1)
do_direction("tnl", x+1, y-1)
do_direction("tnr", x-1, y-1)
do_direction("lft", x+1, y )
do_direction("rgt", x-1, y )
return neighbours
def lookup_funcnet(self, nx, ny, x, y, func):
npos = self.tile_pos(nx, ny)
pos = self.tile_pos(x, y)
if npos is not None and pos is not None:
if npos == "x":
if (nx, ny) in self.logic_tiles:
return (nx, ny, "lutff_%d/out" % func)
for i in range(4):
if (nx, ny) in self.dsp_tiles[i]: #TODO: check this
return (nx, ny, "mult/O_%d" % (i * 8 + func))
if (nx, ny) in self.ramb_tiles:
if self.device == "1k":
return (nx, ny, "ram/RDATA_%d" % func)
elif self.device == "5k":
return (nx, ny, "ram/RDATA_%d" % (15-func))
elif self.device == "8k":
return (nx, ny, "ram/RDATA_%d" % (15-func))
else:
assert False
if (nx, ny) in self.ramt_tiles:
if self.device == "1k":
return (nx, ny, "ram/RDATA_%d" % (8+func))
elif self.device == "5k":
return (nx, ny, "ram/RDATA_%d" % (7-func))
elif self.device == "8k":
return (nx, ny, "ram/RDATA_%d" % (7-func))
else:
assert False
elif pos == "x" and ((npos in ("t", "b")) or ((not self.is_ultra()) and (npos in ("l", "r")))):
if func in (0, 4): return (nx, ny, "io_0/D_IN_0")
if func in (1, 5): return (nx, ny, "io_0/D_IN_1")
if func in (2, 6): return (nx, ny, "io_1/D_IN_0")
if func in (3, 7): return (nx, ny, "io_1/D_IN_1")
return None
def rlookup_funcnet(self, x, y, netname):
funcnets = set()
if netname == "io_0/D_IN_0":
for net in self.follow_funcnet(x, y, 0) | self.follow_funcnet(x, y, 4):
if self.tile_pos(net[0], net[1]) == "x": funcnets.add(net)
if netname == "io_0/D_IN_1":
for net in self.follow_funcnet(x, y, 1) | self.follow_funcnet(x, y, 5):
if self.tile_pos(net[0], net[1]) == "x": funcnets.add(net)
if netname == "io_1/D_IN_0":
for net in self.follow_funcnet(x, y, 2) | self.follow_funcnet(x, y, 6):
if self.tile_pos(net[0], net[1]) == "x": funcnets.add(net)
if netname == "io_1/D_IN_1":
for net in self.follow_funcnet(x, y, 3) | self.follow_funcnet(x, y, 7):
if self.tile_pos(net[0], net[1]) == "x": funcnets.add(net)
match = re.match(r"lutff_(\d+)/out", netname)
if match:
funcnets |= self.follow_funcnet(x, y, int(match.group(1)))
match = re.match(r"ram/RDATA_(\d+)", netname)
if match:
if self.device == "1k":
funcnets |= self.follow_funcnet(x, y, int(match.group(1)) % 8)
elif self.device == "5k":
funcnets |= self.follow_funcnet(x, y, 7 - int(match.group(1)) % 8)
elif self.device == "8k":
funcnets |= self.follow_funcnet(x, y, 7 - int(match.group(1)) % 8)
else:
assert False
return funcnets
def ultraplus_follow_corner(self, corner, direction, netname):
m = re.match("span4_(horz|vert)_([lrtb])_(\d+)$", netname)
if not m:
return None
cur_edge = m.group(2)
cur_index = int(m.group(3))
if direction not in corner:
return None
if direction != cur_edge:
return None
h_idx, v_idx = self.ultraplus_trace_corner_idx(corner, cur_index)
if h_idx is None and (direction == "b" or direction == "t"):
return None
if v_idx is None and (direction == "l" or direction == "r"):
return None
if corner == "bl" and direction == "l":
return (0, 1, sp4v_normalize("sp4_v_b_%d" % v_idx))
if corner == "bl" and direction == "b":
return (1, 0, ultra_span4_horz_normalize("span4_horz_l_%d" % h_idx))
if corner == "br" and direction == "r":
return (self.max_x, 1, sp4v_normalize("sp4_v_b_%d" % v_idx))
if corner == "br" and direction == "b":
return (self.max_x-1, 0, ultra_span4_horz_normalize("span4_horz_r_%d" % h_idx))
if corner == "tl" and direction == "l":
return (0, self.max_y-1, sp4v_normalize("sp4_v_t_%d" % v_idx))
if corner == "tl" and direction == "t":
return (1, self.max_y, ultra_span4_horz_normalize("span4_horz_l_%d" % h_idx))
if corner == "tr" and direction == "r":
return (self.max_x, self.max_y-1, sp4v_normalize("sp4_v_t_%d" % v_idx))
if corner == "tr" and direction == "t":
return (self.max_x-1, self.max_y, ultra_span4_horz_normalize("span4_horz_r_%d" % h_idx))
assert False
#UltraPlus corner routing: given the corner name and net index,
#return a tuple containing H and V indexes, or none if NA
def ultraplus_trace_corner_idx(self, corner, idx):
h_idx = None
v_idx = None
if corner == "bl" or corner == "br":
if idx < 16:
v_idx = idx + 32
if idx >= 32 and idx < 48:
h_idx = idx - 32
elif corner == "tl" or corner == "tr":
if idx >= 0 and idx < 16:
v_idx = idx
h_idx = idx
return (h_idx, v_idx)
def get_corner(self, x, y):
corner = ""
if y == 0:
corner += "b"
elif y == self.max_y:
corner += "t"
else:
corner += "x"
if x == 0:
corner += "l"
elif x == self.max_x:
corner += "r"
else:
corner += "x"
return corner
def follow_net(self, netspec):
x, y, netname = netspec
neighbours = self.rlookup_funcnet(x, y, netname)
#print(netspec)
#print('\t', neighbours)
if netname == "carry_in" and y > 1:
neighbours.add((x, y-1, "lutff_7/cout"))
if netname == "lutff_7/cout" and y+1 < self.max_y:
neighbours.add((x, y+1, "carry_in"))
if netname.startswith("glb_netwk_"):
for nx in range(self.max_x+1):
for ny in range(self.max_y+1):
if self.tile_pos(nx, ny) is not None:
neighbours.add((nx, ny, netname))
match = re.match(r"sp4_r_v_b_(\d+)", netname)
if match and ((0 < x < self.max_x-1) or (self.is_ultra() and (x < self.max_x))):
neighbours.add((x+1, y, sp4v_normalize("sp4_v_b_" + match.group(1))))
#print('\tafter r_v_b', neighbours)
match = re.match(r"sp4_v_[bt]_(\d+)", netname)
if match and (1 < x < self.max_x or (self.is_ultra() and (x > 0))):
n = sp4v_normalize(netname, "b")
if n is not None:
n = n.replace("sp4_", "sp4_r_")
neighbours.add((x-1, y, n))
#print('\tafter v_[bt]', neighbours)
match = re.match(r"(logic|neigh)_op_(...)_(\d+)", netname)
if match:
if match.group(2) == "bot": nx, ny = (x, y-1)
if match.group(2) == "bnl": nx, ny = (x-1, y-1)
if match.group(2) == "bnr": nx, ny = (x+1, y-1)
if match.group(2) == "top": nx, ny = (x, y+1)
if match.group(2) == "tnl": nx, ny = (x-1, y+1)
if match.group(2) == "tnr": nx, ny = (x+1, y+1)
if match.group(2) == "lft": nx, ny = (x-1, y )
if match.group(2) == "rgt": nx, ny = (x+1, y )
n = self.lookup_funcnet(nx, ny, x, y, int(match.group(3)))
if n is not None:
neighbours.add(n)
for direction in ["l", "r", "t", "b"]:
n = self.tile_follow_net(x, y, direction, netname)
if n is not None:
if direction == "l": s = (x-1, y, n)
if direction == "r": s = (x+1, y, n)
if direction == "t": s = (x, y+1, n)
if direction == "b": s = (x, y-1, n)
if s[0] in (0, self.max_x) and s[1] in (0, self.max_y):
if self.is_ultra():
s = self.ultraplus_follow_corner(self.get_corner(s[0], s[1]), direction, n)
if s is None:
continue
elif re.match("span4_(vert|horz)_[lrtb]_\d+$", n) and not self.is_ultra():
m = re.match("span4_(vert|horz)_([lrtb])_\d+$", n)
vert_net = n.replace("_l_", "_t_").replace("_r_", "_b_").replace("_horz_", "_vert_")
horz_net = n.replace("_t_", "_l_").replace("_b_", "_r_").replace("_vert_", "_horz_")
if s[0] == 0 and s[1] == 0:
if direction == "l": s = (0, 1, vert_net)
if direction == "b": s = (1, 0, horz_net)
if s[0] == self.max_x and s[1] == self.max_y:
if direction == "r": s = (self.max_x, self.max_y-1, vert_net)
if direction == "t": s = (self.max_x-1, self.max_y, horz_net)
vert_net = netname.replace("_l_", "_t_").replace("_r_", "_b_").replace("_horz_", "_vert_")
horz_net = netname.replace("_t_", "_l_").replace("_b_", "_r_").replace("_vert_", "_horz_")
if s[0] == 0 and s[1] == self.max_y:
if direction == "l": s = (0, self.max_y-1, vert_net)
if direction == "t": s = (1, self.max_y, horz_net)
if s[0] == self.max_x and s[1] == 0:
if direction == "r": s = (self.max_x, 1, vert_net)
if direction == "b": s = (self.max_x-1, 0, horz_net)
if self.tile_has_net(s[0], s[1], s[2]):
neighbours.add((s[0], s[1], s[2]))
#print('\tafter directions', neighbours)
return neighbours
def group_segments(self, all_from_tiles=set(), extra_connections=list(), extra_segments=list(), connect_gb=True):
seed_segments = set()
seen_segments = set()
connected_segments = dict()
grouped_segments = set()
for seg in extra_segments:
seed_segments.add(seg)
for conn in extra_connections:
s1, s2 = conn
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
for idx, tile in self.io_tiles.items():
tc = tileconfig(tile)
pintypes = [ list("000000"), list("000000") ]
for entry in self.tile_db(idx[0], idx[1]):
if entry[1].startswith("IOB_") and entry[2].startswith("PINTYPE_") and tc.match(entry[0]):
pintypes[int(entry[1][-1])][int(entry[2][-1])] = "1"
if "".join(pintypes[0][2:6]) != "0000":
seed_segments.add((idx[0], idx[1], "io_0/D_OUT_0"))
if "".join(pintypes[1][2:6]) != "0000":
seed_segments.add((idx[0], idx[1], "io_1/D_OUT_0"))
def add_seed_segments(idx, tile, db):
tc = tileconfig(tile)
for entry in db:
if entry[1] in ("routing", "buffer"):
config_match = tc.match(entry[0])
if idx in all_from_tiles or config_match:
if not self.tile_has_net(idx[0], idx[1], entry[2]): continue
if not self.tile_has_net(idx[0], idx[1], entry[3]): continue
s1 = (idx[0], idx[1], entry[2])
s2 = (idx[0], idx[1], entry[3])
if config_match:
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
for idx, tile in self.io_tiles.items():
add_seed_segments(idx, tile, self.tile_db(idx[0], idx[1]))
for idx, tile in self.logic_tiles.items():
if idx in all_from_tiles:
seed_segments.add((idx[0], idx[1], "lutff_7/cout"))
if self.device == "1k":
add_seed_segments(idx, tile, logictile_db)
elif self.device == "5k":
add_seed_segments(idx, tile, logictile_5k_db)
elif self.device == "8k":
add_seed_segments(idx, tile, logictile_8k_db)
elif self.device == "384":
add_seed_segments(idx, tile, logictile_384_db)
else:
assert False
for idx, tile in self.ramb_tiles.items():
if self.device == "1k":
add_seed_segments(idx, tile, rambtile_db)
elif self.device == "5k":
add_seed_segments(idx, tile, rambtile_5k_db)
elif self.device == "8k":
add_seed_segments(idx, tile, rambtile_8k_db)
else:
assert False
for idx, tile in self.ramt_tiles.items():
if self.device == "1k":
add_seed_segments(idx, tile, ramttile_db)
elif self.device == "5k":
add_seed_segments(idx, tile, ramttile_5k_db)
elif self.device == "8k":
add_seed_segments(idx, tile, ramttile_8k_db)
else:
assert False
for idx, tile in self.dsp_tiles[0].items():
if self.device == "5k":
add_seed_segments(idx, tile, dsp0_5k_db)
for idx, tile in self.dsp_tiles[1].items():
if self.device == "5k":
add_seed_segments(idx, tile, dsp1_5k_db)
for idx, tile in self.dsp_tiles[2].items():
if self.device == "5k":
add_seed_segments(idx, tile, dsp2_5k_db)
for idx, tile in self.dsp_tiles[3].items():
if self.device == "5k":
add_seed_segments(idx, tile, dsp3_5k_db)
for idx, tile in self.ipcon_tiles.items():
if self.device == "5k":
add_seed_segments(idx, tile, ipcon_5k_db)
for padin, pio in enumerate(self.padin_pio_db()):
s1 = (pio[0], pio[1], "padin_%d" % pio[2])
s2 = (pio[0], pio[1], "glb_netwk_%d" % padin)
if s1 in seed_segments or (pio[0], pio[1]) in all_from_tiles:
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
for entry in self.iolatch_db():
if entry[0] == 0 or entry[0] == self.max_x:
iocells = [(entry[0], i) for i in range(1, self.max_y)]
if entry[1] == 0 or entry[1] == self.max_y:
iocells = [(i, entry[1]) for i in range(1, self.max_x)]
for cell in iocells:
s1 = (entry[0], entry[1], "fabout")
s2 = (cell[0], cell[1], "io_global/latch")
if s1 in seed_segments or s2 in seed_segments or \
(entry[0], entry[1]) in all_from_tiles or (cell[0], cell[1]) in all_from_tiles:
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
if connect_gb:
for entry in self.gbufin_db():
s1 = (entry[0], entry[1], "fabout")
s2 = (entry[0], entry[1], "glb_netwk_%d" % entry[2])
if s1 in seed_segments or (pio[0], pio[1]) in all_from_tiles:
connected_segments.setdefault(s1, set()).add(s2)
connected_segments.setdefault(s2, set()).add(s1)
seed_segments.add(s1)
seed_segments.add(s2)
while seed_segments:
queue = set()
segments = set()
queue.add(seed_segments.pop())
while queue:
next_segment = queue.pop()
expanded = self.expand_net(next_segment)
for s in expanded:
if s not in segments:
segments.add(s)
if s in seen_segments:
print("//", s, "has already been seen. Check your bitmapping.")
assert False
seen_segments.add(s)
seed_segments.discard(s)
if s in connected_segments:
for cs in connected_segments[s]:
if not cs in segments:
queue.add(cs)
for s in segments:
assert s not in seed_segments
grouped_segments.add(tuple(sorted(segments)))
return grouped_segments
def expand_net(self, netspec):
queue = set()
segments = set()
queue.add(netspec)
while queue:
n = queue.pop()
segments.add(n)
for k in self.follow_net(n):
if k not in segments:
queue.add(k)
return segments
def read_file(self, filename):
self.clear()
current_data = None
expected_data_lines = 0
with open(filename, "r") as f:
for linenum, linetext in enumerate(f):
# print("DEBUG: input line %d: %s" % (linenum, linetext.strip()))
line = linetext.strip().split()
if len(line) == 0:
assert expected_data_lines == 0
continue
if line[0][0] != ".":
if expected_data_lines == -1:
continue
if line[0][0] not in "0123456789abcdef":
print("Warning: ignoring data block in line %d: %s" % (linenum, linetext.strip()))
expected_data_lines = 0
continue
assert expected_data_lines != 0
current_data.append(line[0])
expected_data_lines -= 1
continue
assert expected_data_lines <= 0
if line[0] in (".io_tile", ".logic_tile", ".ramb_tile", ".ramt_tile", ".ram_data", ".ipcon_tile", ".dsp0_tile", ".dsp1_tile", ".dsp2_tile", ".dsp3_tile"):
current_data = list()
expected_data_lines = 16
self.max_x = max(self.max_x, int(line[1]))
self.max_y = max(self.max_y, int(line[2]))
if line[0] == ".io_tile":
self.io_tiles[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".logic_tile":
self.logic_tiles[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".ramb_tile":
self.ramb_tiles[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".ramt_tile":
self.ramt_tiles[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".ipcon_tile":
self.ipcon_tiles[(int(line[1]), int(line[2]))] = current_data
continue
match = re.match(r".dsp(\d)_tile", line[0])
if match:
self.dsp_tiles[int(match.group(1))][(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".ram_data":
self.ram_data[(int(line[1]), int(line[2]))] = current_data
continue
if line[0] == ".extra_bit":
self.extra_bits.add((int(line[1]), int(line[2]), int(line[3])))
continue
if line[0] == ".device":
assert line[1] in ["1k", "5k", "8k", "384"]
self.device = line[1]
continue
if line[0] == ".warmboot":
assert line[1] in ["disabled", "enabled"]
self.warmboot = line[1] == "enabled"
continue
if line[0] == ".sym":
self.symbols.setdefault(int(line[1]), set()).add(line[2])
continue
if line[0] == ".comment":
expected_data_lines = -1
continue
print("Warning: ignoring line %d: %s" % (linenum, linetext.strip()))
expected_data_lines = -1
def write_file(self, filename):
with open(filename, "w") as f:
print(".device %s" % self.device, file=f)
if not self.warmboot:
print(".warmboot disabled", file=f)
for y in range(self.max_y+1):
for x in range(self.max_x+1):
if self.tile_pos(x, y) is not None:
print(".%s_tile %d %d" % (self.tile_type(x, y).lower(), x, y), file=f)
for line in self.tile(x, y):
print(line, file=f)
for x, y in sorted(self.ram_data):
print(".ram_data %d %d" % (x, y), file=f)
for line in self.ram_data[(x, y)]:
print(line, file=f)
for extra_bit in sorted(self.extra_bits):
print(".extra_bit %d %d %d" % extra_bit, file=f)
class tileconfig:
def __init__(self, tile):
self.bits = set()
for k, line in enumerate(tile):
for i in range(len(line)):
if line[i] == "1":
self.bits.add("B%d[%d]" % (k, i))
else:
self.bits.add("!B%d[%d]" % (k, i))
def match(self, pattern):
for bit in pattern:
if not bit in self.bits:
return False
return True
if False:
## Lattice span net name normalization
valid_sp4_h_l = set([1, 2, 4, 5, 7, 9, 10, 11, 15, 16, 17, 21, 24, 34, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47])
valid_sp4_h_r = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 19, 21, 24, 25, 27, 30, 31, 33, 34, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46])
valid_sp4_v_t = set([1, 3, 5, 9, 12, 14, 16, 17, 18, 21, 22, 23, 26, 28, 29, 30, 32, 33, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47])
valid_sp4_v_b = set([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 18, 19, 21, 22, 23, 24, 26, 30, 33, 36, 37, 38, 42, 46, 47])
valid_sp12_h_l = set([3, 4, 5, 12, 14, 16, 17, 18, 21, 22, 23])
valid_sp12_h_r = set([0, 1, 2, 3, 5, 8, 9, 10, 11, 12, 13, 14, 16, 20, 23])
valid_sp12_v_t = set([0, 1, 2, 3, 6, 9, 10, 12, 14, 21, 22, 23])
valid_sp12_v_b = set([0, 1, 6, 7, 8, 11, 12, 14, 16, 18, 19, 20, 21, 23])
else:
## IceStorm span net name normalization
valid_sp4_h_l = set(range(36, 48))
valid_sp4_h_r = set(range(48))
valid_sp4_v_t = set(range(36, 48))
valid_sp4_v_b = set(range(48))
valid_sp12_h_l = set(range(22, 24))
valid_sp12_h_r = set(range(24))
valid_sp12_v_t = set(range(22, 24))
valid_sp12_v_b = set(range(24))
def sp4h_normalize(netname, edge=""):
m = re.match("sp4_h_([lr])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if cur_edge == "r" and (edge == "l" or (edge == "" and cur_index not in valid_sp4_h_r)):
if cur_index < 12:
return None
return "sp4_h_l_%d" % ((cur_index-12)^1)
if cur_edge == "l" and (edge == "r" or (edge == "" and cur_index not in valid_sp4_h_l)):
if cur_index >= 36:
return None
return "sp4_h_r_%d" % ((cur_index+12)^1)
return netname
# "Normalization" of span4 (not just sp4) is needed during Ultra/UltraPlus
# corner tracing
def ultra_span4_horz_normalize(netname, edge=""):
m = re.match("span4_horz_([rl])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if edge == "":
if cur_edge == "l" and cur_index < 12:
return "span4_horz_r_%d" % (cur_index + 4)
else:
return netname
elif edge == "l" and cur_edge == "r":
if cur_index < 4:
return None
else:
cur_index -= 4
return "span4_horz_l_%d" % cur_index
elif edge == "r" and cur_edge == "l":
if cur_index < 12:
return "span4_horz_r_%d" % (cur_index + 4)
else:
return None
assert False
def sp4v_normalize(netname, edge=""):
m = re.match("sp4_v_([bt])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if cur_edge == "b" and (edge == "t" or (edge == "" and cur_index not in valid_sp4_v_b)):
if cur_index < 12:
return None
return "sp4_v_t_%d" % ((cur_index-12)^1)
if cur_edge == "t" and (edge == "b" or (edge == "" and cur_index not in valid_sp4_v_t)):
if cur_index >= 36:
return None
return "sp4_v_b_%d" % ((cur_index+12)^1)
return netname
def sp12h_normalize(netname, edge=""):
m = re.match("sp12_h_([lr])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if cur_edge == "r" and (edge == "l" or (edge == "" and cur_index not in valid_sp12_h_r)):
if cur_index < 2:
return None
return "sp12_h_l_%d" % ((cur_index-2)^1)
if cur_edge == "l" and (edge == "r" or (edge == "" and cur_index not in valid_sp12_h_l)):
if cur_index >= 22:
return None
return "sp12_h_r_%d" % ((cur_index+2)^1)
return netname
def sp12v_normalize(netname, edge=""):
m = re.match("sp12_v_([bt])_(\d+)$", netname)
assert m
if not m: return None
cur_edge = m.group(1)
cur_index = int(m.group(2))
if cur_edge == edge:
return netname
if cur_edge == "b" and (edge == "t" or (edge == "" and cur_index not in valid_sp12_v_b)):
if cur_index < 2:
return None
return "sp12_v_t_%d" % ((cur_index-2)^1)
if cur_edge == "t" and (edge == "b" or (edge == "" and cur_index not in valid_sp12_v_t)):
if cur_index >= 22:
return None
return "sp12_v_b_%d" % ((cur_index+2)^1)
return netname
def netname_normalize(netname, edge="", ramb=False, ramt=False, ramb_8k=False, ramt_8k=False):
if netname.startswith("sp4_v_"): return sp4v_normalize(netname, edge)
if netname.startswith("sp4_h_"): return sp4h_normalize(netname, edge)
if netname.startswith("sp12_v_"): return sp12v_normalize(netname, edge)
if netname.startswith("sp12_h_"): return sp12h_normalize(netname, edge)
if netname.startswith("input_2_"): netname = netname.replace("input_2_", "wire_logic_cluster/lc_") + "/in_2"
netname = netname.replace("lc_trk_", "local_")
netname = netname.replace("lc_", "lutff_")
netname = netname.replace("wire_logic_cluster/", "")
netname = netname.replace("wire_io_cluster/", "")
netname = netname.replace("wire_mult/", "")
netname = netname.replace("wire_con_box/", "")
netname = netname.replace("wire_bram/", "")
if (ramb or ramt or ramb_8k or ramt_8k) and netname.startswith("input"):
match = re.match(r"input(\d)_(\d)", netname)
idx1, idx2 = (int(match.group(1)), int(match.group(2)))
if ramb: netname="ram/WADDR_%d" % (idx1*4 + idx2)
if ramt: netname="ram/RADDR_%d" % (idx1*4 + idx2)
if ramb_8k: netname="ram/RADDR_%d" % ([7, 6, 5, 4, 3, 2, 1, 0, -1, -1, -1, -1, -1, 10, 9, 8][idx1*4 + idx2])
if ramt_8k: netname="ram/WADDR_%d" % ([7, 6, 5, 4, 3, 2, 1, 0, -1, -1, -1, -1, -1, 10, 9, 8][idx1*4 + idx2])
match = re.match(r"(...)_op_(.*)", netname)
if match and (match.group(1) != "slf"):
netname = "neigh_op_%s_%s" % (match.group(1), match.group(2))
if re.match(r"lutff_7/(cen|clk|s_r)", netname):
netname = netname.replace("lutff_7/", "lutff_global/")
if re.match(r"io_1/(cen|inclk|outclk)", netname):
netname = netname.replace("io_1/", "io_global/")
if netname == "carry_in_mux/cout":
return "carry_in_mux"
return netname
def pos_has_net(pos, netname):
if pos in ("l", "r"):
if re.search(r"_vert_\d+$", netname): return False
if re.search(r"_horz_[rl]_\d+$", netname): return False
if pos in ("t", "b"):
if re.search(r"_horz_\d+$", netname): return False
if re.search(r"_vert_[bt]_\d+$", netname): return False
return True
def pos_follow_net(pos, direction, netname, is_ultra):
if pos == "x" or ((pos in ("l", "r")) and is_ultra):
m = re.match("sp4_h_[lr]_(\d+)$", netname)
if m and direction in ("l", "L"):
n = sp4h_normalize(netname, "l")
if n is not None:
if direction == "l" or is_ultra:
n = re.sub("_l_", "_r_", n)
n = sp4h_normalize(n)
else:
n = re.sub("_l_", "_", n)
n = re.sub("sp4_h_", "span4_horz_", n)
return n
if m and direction in ("r", "R"):
n = sp4h_normalize(netname, "r")
if n is not None:
if direction == "r" or is_ultra:
n = re.sub("_r_", "_l_", n)
n = sp4h_normalize(n)
else:
n = re.sub("_r_", "_", n)
n = re.sub("sp4_h_", "span4_horz_", n)
return n
m = re.match("sp4_v_[tb]_(\d+)$", netname)
if m and direction in ("t", "T"):
n = sp4v_normalize(netname, "t")
if n is not None:
if is_ultra and direction == "T" and pos in ("l", "r"):
return re.sub("sp4_v_", "span4_vert_", n)
elif direction == "t":
n = re.sub("_t_", "_b_", n)
n = sp4v_normalize(n)
else:
n = re.sub("_t_", "_", n)
n = re.sub("sp4_v_", "span4_vert_", n)
return n
if m and direction in ("b", "B"):
n = sp4v_normalize(netname, "b")
if n is not None:
if is_ultra and direction == "B" and pos in ("l", "r"):
return re.sub("sp4_v_", "span4_vert_", n)
elif direction == "b":
n = re.sub("_b_", "_t_", n)
n = sp4v_normalize(n)
else:
n = re.sub("_b_", "_", n)
n = re.sub("sp4_v_", "span4_vert_", n)
return n
m = re.match("sp12_h_[lr]_(\d+)$", netname)
if m and direction in ("l", "L"):
n = sp12h_normalize(netname, "l")
if n is not None:
if direction == "l" or is_ultra:
n = re.sub("_l_", "_r_", n)
n = sp12h_normalize(n)
else:
n = re.sub("_l_", "_", n)
n = re.sub("sp12_h_", "span12_horz_", n)
return n
if m and direction in ("r", "R"):
n = sp12h_normalize(netname, "r")
if n is not None:
if direction == "r" or is_ultra:
n = re.sub("_r_", "_l_", n)
n = sp12h_normalize(n)
else:
n = re.sub("_r_", "_", n)
n = re.sub("sp12_h_", "span12_horz_", n)
return n
m = re.match("sp12_v_[tb]_(\d+)$", netname)
if m and direction in ("t", "T"):
n = sp12v_normalize(netname, "t")
if n is not None:
if direction == "t":
n = re.sub("_t_", "_b_", n)
n = sp12v_normalize(n)
elif direction == "T" and pos in ("l", "r"):
pass
else:
n = re.sub("_t_", "_", n)
n = re.sub("sp12_v_", "span12_vert_", n)
return n
if m and direction in ("b", "B"):
n = sp12v_normalize(netname, "b")
if n is not None:
if direction == "b":
n = re.sub("_b_", "_t_", n)
n = sp12v_normalize(n)
elif direction == "B" and pos in ("l", "r"):
pass
else:
n = re.sub("_b_", "_", n)
n = re.sub("sp12_v_", "span12_vert_", n)
return n
if (pos in ("l", "r" )) and (not is_ultra):
m = re.match("span4_vert_([bt])_(\d+)$", netname)
if m:
case, idx = direction + m.group(1), int(m.group(2))
if case == "tt":
return "span4_vert_b_%d" % idx
if case == "tb" and idx >= 4:
return "span4_vert_b_%d" % (idx-4)
if case == "bb" and idx < 12:
return "span4_vert_b_%d" % (idx+4)
if case == "bb" and idx >= 12:
return "span4_vert_t_%d" % idx
if pos in ("t", "b" ):
m = re.match("span4_horz_([rl])_(\d+)$", netname)
if m:
case, idx = direction + m.group(1), int(m.group(2))
if direction == "L":
return ultra_span4_horz_normalize(netname, "l")
elif direction == "R":
return ultra_span4_horz_normalize(netname, "r")
if case == "ll":
return "span4_horz_r_%d" % idx
if case == "lr" and idx >= 4:
return "span4_horz_r_%d" % (idx-4)
if case == "rr" and idx < 12:
return "span4_horz_r_%d" % (idx+4)
if case == "rr" and idx >= 12:
return "span4_horz_l_%d" % idx
if pos == "l" and direction == "r" and (not is_ultra):
m = re.match("span4_horz_(\d+)$", netname)
if m: return sp4h_normalize("sp4_h_l_%s" % m.group(1))
m = re.match("span12_horz_(\d+)$", netname)
if m: return sp12h_normalize("sp12_h_l_%s" % m.group(1))
if pos == "r" and direction == "l" and (not is_ultra):
m = re.match("span4_horz_(\d+)$", netname)
if m: return sp4h_normalize("sp4_h_r_%s" % m.group(1))
m = re.match("span12_horz_(\d+)$", netname)
if m: return sp12h_normalize("sp12_h_r_%s" % m.group(1))
if pos == "t" and direction == "b":
m = re.match("span4_vert_(\d+)$", netname)
if m: return sp4v_normalize("sp4_v_t_%s" % m.group(1))
m = re.match("span12_vert_(\d+)$", netname)
if m: return sp12v_normalize("sp12_v_t_%s" % m.group(1))
if pos == "b" and direction == "t":
m = re.match("span4_vert_(\d+)$", netname)
if m: return sp4v_normalize("sp4_v_b_%s" % m.group(1))
m = re.match("span12_vert_(\d+)$", netname)
if m: return sp12v_normalize("sp12_v_b_%s" % m.group(1))
return None
def get_lutff_bits(tile, index):
bits = list("--------------------")
for k, line in enumerate(tile):
for i in range(36, 46):
lutff_idx = k // 2
lutff_bitnum = (i-36) + 10*(k%2)
if lutff_idx == index:
bits[lutff_bitnum] = line[i];
return bits
def get_lutff_lut_bits(tile, index):
lutff_bits = get_lutff_bits(tile, index)
return [lutff_bits[i] for i in [4, 14, 15, 5, 6, 16, 17, 7, 3, 13, 12, 2, 1, 11, 10, 0]]
def get_lutff_seq_bits(tile, index):
lutff_bits = get_lutff_bits(tile, index)
return [lutff_bits[i] for i in [8, 9, 18, 19]]
def get_carry_cascade_bit(tile):
return tile[1][49]
def get_carry_bit(tile):
return tile[1][50]
def get_negclk_bit(tile):
return tile[0][0]
def key_netname(netname):
return re.sub(r"\d+", lambda m: "%09d" % int(m.group(0)), netname)
def run_checks_neigh():
print("Running consistency checks on neighbour finder..")
ic = iceconfig()
# ic.setup_empty_1k()
ic.setup_empty_5k()
# ic.setup_empty_8k()
# ic.setup_empty_384()
all_segments = set()
def add_segments(idx, db):
for entry in db:
if entry[1] in ("routing", "buffer"):
if not ic.tile_has_net(idx[0], idx[1], entry[2]): continue
if not ic.tile_has_net(idx[0], idx[1], entry[3]): continue
all_segments.add((idx[0], idx[1], entry[2]))
all_segments.add((idx[0], idx[1], entry[3]))
for x in range(ic.max_x+1):
for y in range(ic.max_x+1):
# Skip the corners.
if x in (0, ic.max_x) and y in (0, ic.max_y):
continue
add_segments((x, y), ic.tile_db(x, y))
if (x, y) in ic.logic_tiles:
all_segments.add((x, y, "lutff_7/cout"))
for s1 in all_segments:
for s2 in ic.follow_net(s1):
# if s1[1] > 4: continue
if s1 not in ic.follow_net(s2):
print("ERROR: %s -> %s, but not vice versa!" % (s1, s2))
print("Neighbours of %s:" % (s1,))
for s in ic.follow_net(s1):
print(" ", s)
print("Neighbours of %s:" % (s2,))
for s in ic.follow_net(s2):
print(" ", s)
print()
def run_checks():
run_checks_neigh()
def parse_db(text, device="1k"):
db = list()
for line in text.split("\n"):
line_384 = line.replace("384_glb_netwk_", "glb_netwk_")
line_1k = line.replace("1k_glb_netwk_", "glb_netwk_")
line_8k = line.replace("8k_glb_netwk_", "glb_netwk_")
if line_1k != line:
if device != "1k":
continue
line = line_1k
elif line_8k != line:
if device != "8k" and device != "5k": # global network is the same for 8k and 5k
continue
line = line_8k
elif line_384 != line:
if device != "384":
continue
line = line_384
line = line.split("\t")
if len(line) == 0 or line[0] == "":
continue
line[0] = line[0].split(",")
db.append(line)
return db
extra_bits_db = {
"1k": {
(0, 330, 142): ("padin_glb_netwk", "0"),
(0, 331, 142): ("padin_glb_netwk", "1"),
(1, 330, 143): ("padin_glb_netwk", "2"),
(1, 331, 143): ("padin_glb_netwk", "3"), # (1 3) (331 144) (331 144) routing T_0_0.padin_3 <X> T_0_0.glb_netwk_3
(1, 330, 142): ("padin_glb_netwk", "4"),
(1, 331, 142): ("padin_glb_netwk", "5"),
(0, 330, 143): ("padin_glb_netwk", "6"), # (0 0) (330 143) (330 143) routing T_0_0.padin_6 <X> T_0_0.glb_netwk_6
(0, 331, 143): ("padin_glb_netwk", "7"),
},
"5k": {
(0, 690, 334): ("padin_glb_netwk", "0"), # check
(0, 691, 334): ("padin_glb_netwk", "1"), # good
(1, 690, 175): ("padin_glb_netwk", "2"), # good
(1, 691, 175): ("padin_glb_netwk", "3"), # check
(1, 690, 174): ("padin_glb_netwk", "4"), # good (INTOSC only)
(1, 691, 174): ("padin_glb_netwk", "5"), # good (INTOSC only)
(0, 690, 335): ("padin_glb_netwk", "6"), # check
(0, 691, 335): ("padin_glb_netwk", "7"), # good
},
"8k": {
(0, 870, 270): ("padin_glb_netwk", "0"),
(0, 871, 270): ("padin_glb_netwk", "1"),
(1, 870, 271): ("padin_glb_netwk", "2"),
(1, 871, 271): ("padin_glb_netwk", "3"),
(1, 870, 270): ("padin_glb_netwk", "4"),
(1, 871, 270): ("padin_glb_netwk", "5"),
(0, 870, 271): ("padin_glb_netwk", "6"),
(0, 871, 271): ("padin_glb_netwk", "7"),
},
"384": {
(0, 180, 78): ("padin_glb_netwk", "0"),
(0, 181, 78): ("padin_glb_netwk", "1"),
(1, 180, 79): ("padin_glb_netwk", "2"),
(1, 181, 79): ("padin_glb_netwk", "3"),
(1, 180, 78): ("padin_glb_netwk", "4"),
(1, 181, 78): ("padin_glb_netwk", "5"),
(0, 180, 79): ("padin_glb_netwk", "6"),
(0, 181, 79): ("padin_glb_netwk", "7"),
}
}
gbufin_db = {
"1k": [
(13, 8, 7),
( 0, 8, 6),
( 7, 17, 1),
( 7, 0, 0),
( 0, 9, 3),
(13, 9, 2),
( 6, 0, 5),
( 6, 17, 4),
],
"5k": [
( 6, 0, 6), #checked
(12, 0, 5), #checked
(13, 0, 0), #checked
(19, 0, 7), #checked
( 6, 31, 3), #checked
(12, 31, 4), #checked
(13, 31, 1), #checked
(19, 31, 2), #checked
],
"8k": [
(33, 16, 7),
( 0, 16, 6),
(17, 33, 1),
(17, 0, 0),
( 0, 17, 3),
(33, 17, 2),
(16, 0, 5),
(16, 33, 4),
],
"384": [
( 7, 4, 7),
( 0, 4, 6),
( 4, 9, 1),
( 4, 0, 0),
( 0, 5, 3),
( 7, 5, 2),
( 3, 0, 5),
( 3, 9, 4),
]
}
# To figure these out:
# 1. Copy io_latched.sh and convert it for your pinout (like io_latched_5k.sh).
# 2. Run it. It will create an io_latched_<device>.work directory with a bunch of files.
# 3. Grep the *.ve files in that directory for "'fabout')". The coordinates
# before it are where the io latches are.
#
# Note: This may not work if your icepack configuration of cell sizes is incorrect because
# icebox_vlog.py won't correctly interpret the meaning of particular bits.
iolatch_db = {
"1k": [
( 0, 7),
(13, 10),
( 5, 0),
( 8, 17),
],
"5k": [
(14, 0),
(14, 31),
],
"8k": [
( 0, 15),
(33, 18),
(18, 0),
(15, 33),
],
"384": [
( 0, 3), #384?
( 7, 5), #384?
( 2, 0), #384?
( 5, 9), #384?
],
}
# The x, y cell locations of the WARMBOOT controls. Run tests/sb_warmboot.v
# through icecube.sh to determine these values.
warmbootinfo_db = {
"1k": {
"BOOT": ( 12, 0, "fabout" ),
"S0": ( 13, 1, "fabout" ),
"S1": ( 13, 2, "fabout" ),
},
"5k": {
# These are the right locations but may be the wrong order.
"BOOT": ( 22, 0, "fabout" ),
"S0": ( 23, 0, "fabout" ),
"S1": ( 24, 0, "fabout" ),
},
"8k": {
"BOOT": ( 31, 0, "fabout" ),
"S0": ( 33, 1, "fabout" ),
"S1": ( 33, 2, "fabout" ),
},
"384": {
"BOOT": ( 6, 0, "fabout" ), #384?
"S0": ( 7, 1, "fabout" ),
"S1": ( 7, 2, "fabout" ),
}
}
noplls_db = {
"1k-swg16tr": [ "1k" ],
"1k-cm36": [ "1k" ],
"1k-cm49": [ "1k" ],
"8k-cm81": [ "8k_1" ],
"8k-cm81:4k": [ "8k_1" ],
"1k-qn48": [ "1k" ],
"1k-cb81": [ "1k" ],
"1k-cb121": [ "1k" ],
"1k-vq100": [ "1k" ],
"384-qn32": [ "384" ],
}
pllinfo_db = {
"1k": {
"LOC" : (6, 0),
# 3'b000 = "DISABLED"
# 3'b010 = "SB_PLL40_PAD"
# 3'b100 = "SB_PLL40_2_PAD"
# 3'b110 = "SB_PLL40_2F_PAD"
# 3'b011 = "SB_PLL40_CORE"
# 3'b111 = "SB_PLL40_2F_CORE"
"PLLTYPE_0": ( 0, 3, "PLLCONFIG_5"),
"PLLTYPE_1": ( 0, 5, "PLLCONFIG_1"),
"PLLTYPE_2": ( 0, 5, "PLLCONFIG_3"),
# 3'b000 = "DELAY"
# 3'b001 = "SIMPLE"
# 3'b010 = "PHASE_AND_DELAY"
# 3'b110 = "EXTERNAL"
"FEEDBACK_PATH_0": ( 0, 5, "PLLCONFIG_5"),
"FEEDBACK_PATH_1": ( 0, 2, "PLLCONFIG_9"),
"FEEDBACK_PATH_2": ( 0, 3, "PLLCONFIG_1"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_FEEDBACK=4'b1111)
"DELAY_ADJMODE_FB": ( 0, 4, "PLLCONFIG_4"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_RELATIVE=4'b1111)
"DELAY_ADJMODE_REL": ( 0, 4, "PLLCONFIG_9"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_A_0": ( 0, 3, "PLLCONFIG_6"),
"PLLOUT_SELECT_A_1": ( 0, 3, "PLLCONFIG_7"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_B_0": ( 0, 3, "PLLCONFIG_2"),
"PLLOUT_SELECT_B_1": ( 0, 3, "PLLCONFIG_3"),
# Numeric Parameters
"SHIFTREG_DIV_MODE": ( 0, 3, "PLLCONFIG_4"),
"FDA_FEEDBACK_0": ( 0, 3, "PLLCONFIG_9"),
"FDA_FEEDBACK_1": ( 0, 4, "PLLCONFIG_1"),
"FDA_FEEDBACK_2": ( 0, 4, "PLLCONFIG_2"),
"FDA_FEEDBACK_3": ( 0, 4, "PLLCONFIG_3"),
"FDA_RELATIVE_0": ( 0, 4, "PLLCONFIG_5"),
"FDA_RELATIVE_1": ( 0, 4, "PLLCONFIG_6"),
"FDA_RELATIVE_2": ( 0, 4, "PLLCONFIG_7"),
"FDA_RELATIVE_3": ( 0, 4, "PLLCONFIG_8"),
"DIVR_0": ( 0, 1, "PLLCONFIG_1"),
"DIVR_1": ( 0, 1, "PLLCONFIG_2"),
"DIVR_2": ( 0, 1, "PLLCONFIG_3"),
"DIVR_3": ( 0, 1, "PLLCONFIG_4"),
"DIVF_0": ( 0, 1, "PLLCONFIG_5"),
"DIVF_1": ( 0, 1, "PLLCONFIG_6"),
"DIVF_2": ( 0, 1, "PLLCONFIG_7"),
"DIVF_3": ( 0, 1, "PLLCONFIG_8"),
"DIVF_4": ( 0, 1, "PLLCONFIG_9"),
"DIVF_5": ( 0, 2, "PLLCONFIG_1"),
"DIVF_6": ( 0, 2, "PLLCONFIG_2"),
"DIVQ_0": ( 0, 2, "PLLCONFIG_3"),
"DIVQ_1": ( 0, 2, "PLLCONFIG_4"),
"DIVQ_2": ( 0, 2, "PLLCONFIG_5"),
"FILTER_RANGE_0": ( 0, 2, "PLLCONFIG_6"),
"FILTER_RANGE_1": ( 0, 2, "PLLCONFIG_7"),
"FILTER_RANGE_2": ( 0, 2, "PLLCONFIG_8"),
"TEST_MODE": ( 0, 3, "PLLCONFIG_8"),
# PLL Ports
"PLLOUT_A": ( 6, 0, 1),
"PLLOUT_B": ( 7, 0, 0),
"REFERENCECLK": ( 0, 1, "fabout"),
"EXTFEEDBACK": ( 0, 2, "fabout"),
"DYNAMICDELAY_0": ( 0, 4, "fabout"),
"DYNAMICDELAY_1": ( 0, 5, "fabout"),
"DYNAMICDELAY_2": ( 0, 6, "fabout"),
"DYNAMICDELAY_3": ( 0, 10, "fabout"),
"DYNAMICDELAY_4": ( 0, 11, "fabout"),
"DYNAMICDELAY_5": ( 0, 12, "fabout"),
"DYNAMICDELAY_6": ( 0, 13, "fabout"),
"DYNAMICDELAY_7": ( 0, 14, "fabout"),
"LOCK": ( 1, 1, "neigh_op_bnl_1"),
"BYPASS": ( 1, 0, "fabout"),
"RESETB": ( 2, 0, "fabout"),
"LATCHINPUTVALUE": ( 5, 0, "fabout"),
"SDO": (12, 1, "neigh_op_bnr_3"),
"SDI": ( 4, 0, "fabout"),
"SCLK": ( 3, 0, "fabout"),
},
"5k": {
"LOC" : (12, 31),
# 3'b000 = "DISABLED"
# 3'b010 = "SB_PLL40_PAD"
# 3'b100 = "SB_PLL40_2_PAD"
# 3'b110 = "SB_PLL40_2F_PAD"
# 3'b011 = "SB_PLL40_CORE"
# 3'b111 = "SB_PLL40_2F_CORE"
"PLLTYPE_0": (12, 31, "PLLCONFIG_5"),
"PLLTYPE_1": (14, 31, "PLLCONFIG_1"),
"PLLTYPE_2": (14, 31, "PLLCONFIG_3"),
# 3'b000 = "DELAY"
# 3'b001 = "SIMPLE"
# 3'b010 = "PHASE_AND_DELAY"
# 3'b110 = "EXTERNAL"
"FEEDBACK_PATH_0": (14, 31, "PLLCONFIG_5"),
"FEEDBACK_PATH_1": (11, 31, "PLLCONFIG_9"),
"FEEDBACK_PATH_2": (12, 31, "PLLCONFIG_1"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_FEEDBACK=4'b1111)
"DELAY_ADJMODE_FB": (13, 31, "PLLCONFIG_4"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_RELATIVE=4'b1111)
"DELAY_ADJMODE_REL": (13, 31, "PLLCONFIG_9"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_A_0": (12, 31, "PLLCONFIG_6"),
"PLLOUT_SELECT_A_1": (12, 31, "PLLCONFIG_7"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_B_0": (12, 31, "PLLCONFIG_2"),
"PLLOUT_SELECT_B_1": (12, 31, "PLLCONFIG_3"),
# Numeric Parameters
"SHIFTREG_DIV_MODE": (12, 31, "PLLCONFIG_4"),
"FDA_FEEDBACK_0": (12, 31, "PLLCONFIG_9"),
"FDA_FEEDBACK_1": (13, 31, "PLLCONFIG_1"),
"FDA_FEEDBACK_2": (13, 31, "PLLCONFIG_2"),
"FDA_FEEDBACK_3": (13, 31, "PLLCONFIG_3"),
"FDA_RELATIVE_0": (13, 31, "PLLCONFIG_5"),
"FDA_RELATIVE_1": (13, 31, "PLLCONFIG_6"),
"FDA_RELATIVE_2": (13, 31, "PLLCONFIG_7"),
"FDA_RELATIVE_3": (13, 31, "PLLCONFIG_8"),
"DIVR_0": (10, 31, "PLLCONFIG_1"),
"DIVR_1": (10, 31, "PLLCONFIG_2"),
"DIVR_2": (10, 31, "PLLCONFIG_3"),
"DIVR_3": (10, 31, "PLLCONFIG_4"),
"DIVF_0": (10, 31, "PLLCONFIG_5"),
"DIVF_1": (10, 31, "PLLCONFIG_6"),
"DIVF_2": (10, 31, "PLLCONFIG_7"),
"DIVF_3": (10, 31, "PLLCONFIG_8"),
"DIVF_4": (10, 31, "PLLCONFIG_9"),
"DIVF_5": (11, 31, "PLLCONFIG_1"),
"DIVF_6": (11, 31, "PLLCONFIG_2"),
"DIVQ_0": (11, 31, "PLLCONFIG_3"),
"DIVQ_1": (11, 31, "PLLCONFIG_4"),
"DIVQ_2": (11, 31, "PLLCONFIG_5"),
"FILTER_RANGE_0": (11, 31, "PLLCONFIG_6"),
"FILTER_RANGE_1": (11, 31, "PLLCONFIG_7"),
"FILTER_RANGE_2": (11, 31, "PLLCONFIG_8"),
"TEST_MODE": (12, 31, "PLLCONFIG_8"),
# PLL Ports
"PLLOUT_A": ( 12, 31, 1),
"PLLOUT_B": ( 13, 31, 0),
"REFERENCECLK": ( 10, 31, "fabout"),
"EXTFEEDBACK": ( 11, 31, "fabout"),
"DYNAMICDELAY_0": ( 1, 31, "fabout"),
"DYNAMICDELAY_1": ( 2, 31, "fabout"),
"DYNAMICDELAY_2": ( 3, 31, "fabout"),
"DYNAMICDELAY_3": ( 4, 31, "fabout"),
"DYNAMICDELAY_4": ( 5, 31, "fabout"),
"DYNAMICDELAY_5": ( 7, 31, "fabout"),
"DYNAMICDELAY_6": ( 8, 31, "fabout"),
"DYNAMICDELAY_7": ( 9, 31, "fabout"),
"LOCK": ( 1, 30, "neigh_op_tnl_1"), #check?
"BYPASS": ( 15, 31, "fabout"),
"RESETB": ( 16, 31, "fabout"),
"LATCHINPUTVALUE": ( 14, 31, "fabout"),
"SDO": ( 24, 30, "neigh_op_tnr_1"), #check?
"SDI": ( 18, 31, "fabout"),
"SCLK": ( 17, 31, "fabout"),
},
"8k_0": {
"LOC" : (16, 0),
# 3'b000 = "DISABLED"
# 3'b010 = "SB_PLL40_PAD"
# 3'b100 = "SB_PLL40_2_PAD"
# 3'b110 = "SB_PLL40_2F_PAD"
# 3'b011 = "SB_PLL40_CORE"
# 3'b111 = "SB_PLL40_2F_CORE"
"PLLTYPE_0": ( 16, 0, "PLLCONFIG_5"),
"PLLTYPE_1": ( 18, 0, "PLLCONFIG_1"),
"PLLTYPE_2": ( 18, 0, "PLLCONFIG_3"),
# 3'b000 = "DELAY"
# 3'b001 = "SIMPLE"
# 3'b010 = "PHASE_AND_DELAY"
# 3'b110 = "EXTERNAL"
"FEEDBACK_PATH_0": ( 18, 0, "PLLCONFIG_5"),
"FEEDBACK_PATH_1": ( 15, 0, "PLLCONFIG_9"),
"FEEDBACK_PATH_2": ( 16, 0, "PLLCONFIG_1"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_FEEDBACK=4'b1111)
"DELAY_ADJMODE_FB": ( 17, 0, "PLLCONFIG_4"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_RELATIVE=4'b1111)
"DELAY_ADJMODE_REL": ( 17, 0, "PLLCONFIG_9"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_A_0": ( 16, 0, "PLLCONFIG_6"),
"PLLOUT_SELECT_A_1": ( 16, 0, "PLLCONFIG_7"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_B_0": ( 16, 0, "PLLCONFIG_2"),
"PLLOUT_SELECT_B_1": ( 16, 0, "PLLCONFIG_3"),
# Numeric Parameters
"SHIFTREG_DIV_MODE": ( 16, 0, "PLLCONFIG_4"),
"FDA_FEEDBACK_0": ( 16, 0, "PLLCONFIG_9"),
"FDA_FEEDBACK_1": ( 17, 0, "PLLCONFIG_1"),
"FDA_FEEDBACK_2": ( 17, 0, "PLLCONFIG_2"),
"FDA_FEEDBACK_3": ( 17, 0, "PLLCONFIG_3"),
"FDA_RELATIVE_0": ( 17, 0, "PLLCONFIG_5"),
"FDA_RELATIVE_1": ( 17, 0, "PLLCONFIG_6"),
"FDA_RELATIVE_2": ( 17, 0, "PLLCONFIG_7"),
"FDA_RELATIVE_3": ( 17, 0, "PLLCONFIG_8"),
"DIVR_0": ( 14, 0, "PLLCONFIG_1"),
"DIVR_1": ( 14, 0, "PLLCONFIG_2"),
"DIVR_2": ( 14, 0, "PLLCONFIG_3"),
"DIVR_3": ( 14, 0, "PLLCONFIG_4"),
"DIVF_0": ( 14, 0, "PLLCONFIG_5"),
"DIVF_1": ( 14, 0, "PLLCONFIG_6"),
"DIVF_2": ( 14, 0, "PLLCONFIG_7"),
"DIVF_3": ( 14, 0, "PLLCONFIG_8"),
"DIVF_4": ( 14, 0, "PLLCONFIG_9"),
"DIVF_5": ( 15, 0, "PLLCONFIG_1"),
"DIVF_6": ( 15, 0, "PLLCONFIG_2"),
"DIVQ_0": ( 15, 0, "PLLCONFIG_3"),
"DIVQ_1": ( 15, 0, "PLLCONFIG_4"),
"DIVQ_2": ( 15, 0, "PLLCONFIG_5"),
"FILTER_RANGE_0": ( 15, 0, "PLLCONFIG_6"),
"FILTER_RANGE_1": ( 15, 0, "PLLCONFIG_7"),
"FILTER_RANGE_2": ( 15, 0, "PLLCONFIG_8"),
"TEST_MODE": ( 16, 0, "PLLCONFIG_8"),
# PLL Ports
"PLLOUT_A": ( 16, 0, 1),
"PLLOUT_B": ( 17, 0, 0),
"REFERENCECLK": ( 13, 0, "fabout"),
"EXTFEEDBACK": ( 14, 0, "fabout"),
"DYNAMICDELAY_0": ( 5, 0, "fabout"),
"DYNAMICDELAY_1": ( 6, 0, "fabout"),
"DYNAMICDELAY_2": ( 7, 0, "fabout"),
"DYNAMICDELAY_3": ( 8, 0, "fabout"),
"DYNAMICDELAY_4": ( 9, 0, "fabout"),
"DYNAMICDELAY_5": ( 10, 0, "fabout"),
"DYNAMICDELAY_6": ( 11, 0, "fabout"),
"DYNAMICDELAY_7": ( 12, 0, "fabout"),
"LOCK": ( 1, 1, "neigh_op_bnl_1"),
"BYPASS": ( 19, 0, "fabout"),
"RESETB": ( 20, 0, "fabout"),
"LATCHINPUTVALUE": ( 15, 0, "fabout"),
"SDO": ( 32, 1, "neigh_op_bnr_3"),
"SDI": ( 22, 0, "fabout"),
"SCLK": ( 21, 0, "fabout"),
},
"8k_1": {
"LOC" : (16, 33),
# 3'b000 = "DISABLED"
# 3'b010 = "SB_PLL40_PAD"
# 3'b100 = "SB_PLL40_2_PAD"
# 3'b110 = "SB_PLL40_2F_PAD"
# 3'b011 = "SB_PLL40_CORE"
# 3'b111 = "SB_PLL40_2F_CORE"
"PLLTYPE_0": ( 16, 33, "PLLCONFIG_5"),
"PLLTYPE_1": ( 18, 33, "PLLCONFIG_1"),
"PLLTYPE_2": ( 18, 33, "PLLCONFIG_3"),
# 3'b000 = "DELAY"
# 3'b001 = "SIMPLE"
# 3'b010 = "PHASE_AND_DELAY"
# 3'b110 = "EXTERNAL"
"FEEDBACK_PATH_0": ( 18, 33, "PLLCONFIG_5"),
"FEEDBACK_PATH_1": ( 15, 33, "PLLCONFIG_9"),
"FEEDBACK_PATH_2": ( 16, 33, "PLLCONFIG_1"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_FEEDBACK=4'b1111)
"DELAY_ADJMODE_FB": ( 17, 33, "PLLCONFIG_4"),
# 1'b0 = "FIXED"
# 1'b1 = "DYNAMIC" (also set FDA_RELATIVE=4'b1111)
"DELAY_ADJMODE_REL": ( 17, 33, "PLLCONFIG_9"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_A_0": ( 16, 33, "PLLCONFIG_6"),
"PLLOUT_SELECT_A_1": ( 16, 33, "PLLCONFIG_7"),
# 2'b00 = "GENCLK"
# 2'b01 = "GENCLK_HALF"
# 2'b10 = "SHIFTREG_90deg"
# 2'b11 = "SHIFTREG_0deg"
"PLLOUT_SELECT_B_0": ( 16, 33, "PLLCONFIG_2"),
"PLLOUT_SELECT_B_1": ( 16, 33, "PLLCONFIG_3"),
# Numeric Parameters
"SHIFTREG_DIV_MODE": ( 16, 33, "PLLCONFIG_4"),
"FDA_FEEDBACK_0": ( 16, 33, "PLLCONFIG_9"),
"FDA_FEEDBACK_1": ( 17, 33, "PLLCONFIG_1"),
"FDA_FEEDBACK_2": ( 17, 33, "PLLCONFIG_2"),
"FDA_FEEDBACK_3": ( 17, 33, "PLLCONFIG_3"),
"FDA_RELATIVE_0": ( 17, 33, "PLLCONFIG_5"),
"FDA_RELATIVE_1": ( 17, 33, "PLLCONFIG_6"),
"FDA_RELATIVE_2": ( 17, 33, "PLLCONFIG_7"),
"FDA_RELATIVE_3": ( 17, 33, "PLLCONFIG_8"),
"DIVR_0": ( 14, 33, "PLLCONFIG_1"),
"DIVR_1": ( 14, 33, "PLLCONFIG_2"),
"DIVR_2": ( 14, 33, "PLLCONFIG_3"),
"DIVR_3": ( 14, 33, "PLLCONFIG_4"),
"DIVF_0": ( 14, 33, "PLLCONFIG_5"),
"DIVF_1": ( 14, 33, "PLLCONFIG_6"),
"DIVF_2": ( 14, 33, "PLLCONFIG_7"),
"DIVF_3": ( 14, 33, "PLLCONFIG_8"),
"DIVF_4": ( 14, 33, "PLLCONFIG_9"),
"DIVF_5": ( 15, 33, "PLLCONFIG_1"),
"DIVF_6": ( 15, 33, "PLLCONFIG_2"),
"DIVQ_0": ( 15, 33, "PLLCONFIG_3"),
"DIVQ_1": ( 15, 33, "PLLCONFIG_4"),
"DIVQ_2": ( 15, 33, "PLLCONFIG_5"),
"FILTER_RANGE_0": ( 15, 33, "PLLCONFIG_6"),
"FILTER_RANGE_1": ( 15, 33, "PLLCONFIG_7"),
"FILTER_RANGE_2": ( 15, 33, "PLLCONFIG_8"),
"TEST_MODE": ( 16, 33, "PLLCONFIG_8"),
# PLL Ports
"PLLOUT_A": ( 16, 33, 1),
"PLLOUT_B": ( 17, 33, 0),
"REFERENCECLK": ( 13, 33, "fabout"),
"EXTFEEDBACK": ( 14, 33, "fabout"),
"DYNAMICDELAY_0": ( 5, 33, "fabout"),
"DYNAMICDELAY_1": ( 6, 33, "fabout"),
"DYNAMICDELAY_2": ( 7, 33, "fabout"),
"DYNAMICDELAY_3": ( 8, 33, "fabout"),
"DYNAMICDELAY_4": ( 9, 33, "fabout"),
"DYNAMICDELAY_5": ( 10, 33, "fabout"),
"DYNAMICDELAY_6": ( 11, 33, "fabout"),
"DYNAMICDELAY_7": ( 12, 33, "fabout"),
"LOCK": ( 1, 32, "neigh_op_tnl_1"),
"BYPASS": ( 19, 33, "fabout"),
"RESETB": ( 20, 33, "fabout"),
"LATCHINPUTVALUE": ( 15, 33, "fabout"),
"SDO": ( 32, 32, "neigh_op_tnr_1"),
"SDI": ( 22, 33, "fabout"),
"SCLK": ( 21, 33, "fabout"),
},
}
padin_pio_db = {
"1k": [
(13, 8, 1), # glb_netwk_0
( 0, 8, 1), # glb_netwk_1
( 7, 17, 0), # glb_netwk_2
( 7, 0, 0), # glb_netwk_3
( 0, 9, 0), # glb_netwk_4
(13, 9, 0), # glb_netwk_5
( 6, 0, 1), # glb_netwk_6
( 6, 17, 1), # glb_netwk_7
],
"5k": [
(19, 0, 1), #0 fixed
( 6, 0, 1), #1 fixed
(13, 31, 0), #2 fixed
(13, 0, 0), #3 fixed
(19, 31, 0), #These two are questionable, but keep the order correct
( 6, 31, 0), #They may need to be fixed if other package options are added.
(12, 0, 1), #6 fixed
(12, 31, 1), #7 fixed
],
"8k": [
(33, 16, 1),
( 0, 16, 1),
(17, 33, 0),
(17, 0, 0),
( 0, 17, 0),
(33, 17, 0),
(16, 0, 1),
(16, 33, 1),
],
"384": [
( 7, 4, 1),
( 0, 4, 1),
( 4, 9, 0),
( 4, 0, 0), #QFN32: no pin?!
( 0, 5, 0),
( 7, 5, 0),
( 3, 0, 1), #QFN32: no pin?!
( 3, 9, 1),
]
}
ieren_db = {
"1k": [
# IO-block (X, Y, Z) <-> IeRen-block (X, Y, Z)
( 0, 2, 0, 0, 2, 1),
( 0, 2, 1, 0, 2, 0),
( 0, 3, 0, 0, 3, 1),
( 0, 3, 1, 0, 3, 0),
( 0, 4, 0, 0, 4, 1),
( 0, 4, 1, 0, 4, 0),
( 0, 5, 0, 0, 5, 1),
( 0, 5, 1, 0, 5, 0),
( 0, 6, 0, 0, 6, 1),
( 0, 6, 1, 0, 6, 0),
( 0, 8, 0, 0, 8, 1),
( 0, 8, 1, 0, 8, 0),
( 0, 9, 0, 0, 9, 1),
( 0, 9, 1, 0, 9, 0),
( 0, 10, 0, 0, 10, 1),
( 0, 10, 1, 0, 10, 0),
( 0, 11, 0, 0, 11, 1),
( 0, 11, 1, 0, 11, 0),
( 0, 12, 0, 0, 12, 1),
( 0, 12, 1, 0, 12, 0),
( 0, 13, 0, 0, 13, 1),
( 0, 13, 1, 0, 13, 0),
( 0, 14, 0, 0, 14, 1),
( 0, 14, 1, 0, 14, 0),
( 1, 0, 0, 1, 0, 0),
( 1, 0, 1, 1, 0, 1),
( 1, 17, 0, 1, 17, 0),
( 1, 17, 1, 1, 17, 1),
( 2, 0, 0, 2, 0, 0),
( 2, 0, 1, 2, 0, 1),
( 2, 17, 0, 2, 17, 0),
( 2, 17, 1, 2, 17, 1),
( 3, 0, 0, 3, 0, 0),
( 3, 0, 1, 3, 0, 1),
( 3, 17, 0, 3, 17, 0),
( 3, 17, 1, 3, 17, 1),
( 4, 0, 0, 4, 0, 0),
( 4, 0, 1, 4, 0, 1),
( 4, 17, 0, 4, 17, 0),
( 4, 17, 1, 4, 17, 1),
( 5, 0, 0, 5, 0, 0),
( 5, 0, 1, 5, 0, 1),
( 5, 17, 0, 5, 17, 0),
( 5, 17, 1, 5, 17, 1),
( 6, 0, 0, 7, 0, 0),
( 6, 0, 1, 6, 0, 0),
( 6, 17, 0, 6, 17, 0),
( 6, 17, 1, 6, 17, 1),
( 7, 0, 0, 6, 0, 1),
( 7, 0, 1, 7, 0, 1),
( 7, 17, 0, 7, 17, 0),
( 7, 17, 1, 7, 17, 1),
( 8, 0, 0, 8, 0, 0),
( 8, 0, 1, 8, 0, 1),
( 8, 17, 0, 8, 17, 0),
( 8, 17, 1, 8, 17, 1),
( 9, 0, 0, 9, 0, 0),
( 9, 0, 1, 9, 0, 1),
( 9, 17, 0, 10, 17, 0),
( 9, 17, 1, 10, 17, 1),
(10, 0, 0, 10, 0, 0),
(10, 0, 1, 10, 0, 1),
(10, 17, 0, 9, 17, 0),
(10, 17, 1, 9, 17, 1),
(11, 0, 0, 11, 0, 0),
(11, 0, 1, 11, 0, 1),
(11, 17, 0, 11, 17, 0),
(11, 17, 1, 11, 17, 1),
(12, 0, 0, 12, 0, 0),
(12, 0, 1, 12, 0, 1),
(12, 17, 0, 12, 17, 0),
(12, 17, 1, 12, 17, 1),
(13, 1, 0, 13, 1, 0),
(13, 1, 1, 13, 1, 1),
(13, 2, 0, 13, 2, 0),
(13, 2, 1, 13, 2, 1),
(13, 3, 1, 13, 3, 1),
(13, 4, 0, 13, 4, 0),
(13, 4, 1, 13, 4, 1),
(13, 6, 0, 13, 6, 0),
(13, 6, 1, 13, 6, 1),
(13, 7, 0, 13, 7, 0),
(13, 7, 1, 13, 7, 1),
(13, 8, 0, 13, 8, 0),
(13, 8, 1, 13, 8, 1),
(13, 9, 0, 13, 9, 0),
(13, 9, 1, 13, 9, 1),
(13, 11, 0, 13, 10, 0),
(13, 11, 1, 13, 10, 1),
(13, 12, 0, 13, 11, 0),
(13, 12, 1, 13, 11, 1),
(13, 13, 0, 13, 13, 0),
(13, 13, 1, 13, 13, 1),
(13, 14, 0, 13, 14, 0),
(13, 14, 1, 13, 14, 1),
(13, 15, 0, 13, 15, 0),
(13, 15, 1, 13, 15, 1),
],
"8k": [
( 0, 3, 0, 0, 3, 0),
( 0, 3, 1, 0, 3, 1),
( 0, 4, 0, 0, 4, 0),
( 0, 4, 1, 0, 4, 1),
( 0, 5, 0, 0, 5, 0),
( 0, 5, 1, 0, 5, 1),
( 0, 6, 0, 0, 6, 0),
( 0, 6, 1, 0, 6, 1),
( 0, 7, 0, 0, 7, 0),
( 0, 7, 1, 0, 7, 1),
( 0, 8, 0, 0, 8, 0),
( 0, 8, 1, 0, 8, 1),
( 0, 9, 0, 0, 9, 0),
( 0, 9, 1, 0, 9, 1),
( 0, 10, 0, 0, 10, 0),
( 0, 10, 1, 0, 10, 1),
( 0, 11, 0, 0, 11, 0),
( 0, 11, 1, 0, 11, 1),
( 0, 12, 0, 0, 12, 0),
( 0, 12, 1, 0, 12, 1),
( 0, 13, 0, 0, 13, 0),
( 0, 13, 1, 0, 13, 1),
( 0, 14, 0, 0, 14, 0),
( 0, 14, 1, 0, 14, 1),
( 0, 16, 0, 0, 16, 0),
( 0, 16, 1, 0, 16, 1),
( 0, 17, 0, 0, 17, 0),
( 0, 17, 1, 0, 17, 1),
( 0, 18, 0, 0, 18, 0),
( 0, 18, 1, 0, 18, 1),
( 0, 19, 0, 0, 19, 0),
( 0, 19, 1, 0, 19, 1),
( 0, 20, 0, 0, 20, 0),
( 0, 20, 1, 0, 20, 1),
( 0, 21, 0, 0, 21, 0),
( 0, 21, 1, 0, 21, 1),
( 0, 22, 0, 0, 22, 0),
( 0, 22, 1, 0, 22, 1),
( 0, 23, 0, 0, 23, 0),
( 0, 23, 1, 0, 23, 1),
( 0, 24, 0, 0, 24, 0),
( 0, 24, 1, 0, 24, 1),
( 0, 25, 0, 0, 25, 0),
( 0, 25, 1, 0, 25, 1),
( 0, 27, 0, 0, 27, 0),
( 0, 27, 1, 0, 27, 1),
( 0, 28, 0, 0, 28, 0),
( 0, 28, 1, 0, 28, 1),
( 0, 30, 0, 0, 30, 0),
( 0, 30, 1, 0, 30, 1),
( 0, 31, 0, 0, 31, 0),
( 0, 31, 1, 0, 31, 1),
( 1, 33, 0, 1, 33, 0),
( 1, 33, 1, 1, 33, 1),
( 2, 0, 0, 2, 0, 0),
( 2, 0, 1, 2, 0, 1),
( 2, 33, 0, 2, 33, 0),
( 2, 33, 1, 2, 33, 1),
( 3, 0, 0, 3, 0, 0),
( 3, 0, 1, 3, 0, 1),
( 3, 33, 0, 3, 33, 0),
( 3, 33, 1, 3, 33, 1),
( 4, 0, 0, 4, 0, 0),
( 4, 0, 1, 4, 0, 1),
( 4, 33, 0, 4, 33, 0),
( 4, 33, 1, 4, 33, 1),
( 5, 0, 0, 5, 0, 0),
( 5, 0, 1, 5, 0, 1),
( 5, 33, 0, 5, 33, 0),
( 5, 33, 1, 5, 33, 1),
( 6, 0, 0, 6, 0, 0),
( 6, 0, 1, 6, 0, 1),
( 6, 33, 0, 6, 33, 0),
( 6, 33, 1, 6, 33, 1),
( 7, 0, 0, 7, 0, 0),
( 7, 0, 1, 7, 0, 1),
( 7, 33, 0, 7, 33, 0),
( 7, 33, 1, 7, 33, 1),
( 8, 0, 0, 8, 0, 0),
( 8, 0, 1, 8, 0, 1),
( 8, 33, 0, 8, 33, 0),
( 8, 33, 1, 8, 33, 1),
( 9, 0, 0, 9, 0, 0),
( 9, 0, 1, 9, 0, 1),
( 9, 33, 0, 9, 33, 0),
( 9, 33, 1, 9, 33, 1),
(10, 0, 0, 10, 0, 0),
(10, 0, 1, 10, 0, 1),
(10, 33, 0, 10, 33, 0),
(10, 33, 1, 10, 33, 1),
(11, 0, 0, 11, 0, 0),
(11, 0, 1, 11, 0, 1),
(11, 33, 0, 11, 33, 0),
(11, 33, 1, 11, 33, 1),
(12, 0, 0, 12, 0, 0),
(12, 0, 1, 12, 0, 1),
(12, 33, 0, 12, 33, 0),
(13, 0, 0, 13, 0, 0),
(13, 0, 1, 13, 0, 1),
(13, 33, 0, 13, 33, 0),
(13, 33, 1, 13, 33, 1),
(14, 0, 0, 14, 0, 0),
(14, 0, 1, 14, 0, 1),
(14, 33, 0, 14, 33, 0),
(14, 33, 1, 14, 33, 1),
(15, 0, 0, 15, 0, 0),
(15, 0, 1, 15, 0, 1),
(16, 0, 0, 16, 0, 0),
(16, 0, 1, 16, 0, 1),
(16, 33, 0, 16, 33, 0),
(16, 33, 1, 16, 33, 1),
(17, 0, 0, 17, 0, 0),
(17, 0, 1, 17, 0, 1),
(17, 33, 0, 17, 33, 0),
(17, 33, 1, 17, 33, 1),
(18, 33, 0, 18, 33, 0),
(18, 33, 1, 18, 33, 1),
(19, 0, 0, 19, 0, 0),
(19, 0, 1, 19, 0, 1),
(19, 33, 0, 19, 33, 0),
(19, 33, 1, 19, 33, 1),
(20, 0, 0, 20, 0, 0),
(20, 0, 1, 20, 0, 1),
(20, 33, 0, 20, 33, 0),
(20, 33, 1, 20, 33, 1),
(21, 0, 0, 21, 0, 0),
(21, 0, 1, 21, 0, 1),
(21, 33, 0, 21, 33, 0),
(21, 33, 1, 21, 33, 1),
(22, 0, 0, 22, 0, 0),
(22, 0, 1, 22, 0, 1),
(22, 33, 0, 22, 33, 0),
(22, 33, 1, 22, 33, 1),
(23, 0, 0, 23, 0, 0),
(23, 0, 1, 23, 0, 1),
(23, 33, 0, 23, 33, 0),
(23, 33, 1, 23, 33, 1),
(24, 0, 0, 24, 0, 0),
(24, 0, 1, 24, 0, 1),
(24, 33, 0, 24, 33, 0),
(24, 33, 1, 24, 33, 1),
(25, 0, 0, 25, 0, 0),
(25, 33, 0, 25, 33, 0),
(25, 33, 1, 25, 33, 1),
(26, 0, 0, 26, 0, 0),
(26, 0, 1, 26, 0, 1),
(26, 33, 0, 26, 33, 0),
(26, 33, 1, 26, 33, 1),
(27, 0, 0, 27, 0, 0),
(27, 0, 1, 27, 0, 1),
(27, 33, 0, 27, 33, 0),
(27, 33, 1, 27, 33, 1),
(28, 0, 0, 28, 0, 0),
(28, 33, 1, 28, 33, 1),
(29, 0, 0, 29, 0, 0),
(29, 0, 1, 29, 0, 1),
(29, 33, 0, 29, 33, 0),
(29, 33, 1, 29, 33, 1),
(30, 0, 0, 30, 0, 0),
(30, 0, 1, 30, 0, 1),
(30, 33, 0, 30, 33, 0),
(30, 33, 1, 30, 33, 1),
(31, 0, 0, 31, 0, 0),
(31, 0, 1, 31, 0, 1),
(31, 33, 0, 31, 33, 0),
(31, 33, 1, 31, 33, 1),
(33, 1, 0, 33, 1, 0),
(33, 1, 1, 33, 1, 1),
(33, 2, 0, 33, 2, 0),
(33, 2, 1, 33, 2, 1),
(33, 3, 0, 33, 3, 0),
(33, 3, 1, 33, 3, 1),
(33, 4, 0, 33, 4, 0),
(33, 4, 1, 33, 4, 1),
(33, 5, 0, 33, 5, 0),
(33, 5, 1, 33, 5, 1),
(33, 6, 0, 33, 6, 0),
(33, 6, 1, 33, 6, 1),
(33, 7, 0, 33, 7, 0),
(33, 7, 1, 33, 7, 1),
(33, 8, 0, 33, 8, 0),
(33, 9, 0, 33, 9, 0),
(33, 9, 1, 33, 9, 1),
(33, 10, 0, 33, 10, 0),
(33, 10, 1, 33, 10, 1),
(33, 11, 0, 33, 11, 0),
(33, 11, 1, 33, 11, 1),
(33, 12, 0, 33, 12, 0),
(33, 13, 0, 33, 13, 0),
(33, 13, 1, 33, 13, 1),
(33, 14, 0, 33, 14, 0),
(33, 14, 1, 33, 14, 1),
(33, 15, 0, 33, 15, 0),
(33, 15, 1, 33, 15, 1),
(33, 16, 0, 33, 16, 0),
(33, 16, 1, 33, 16, 1),
(33, 17, 0, 33, 17, 0),
(33, 17, 1, 33, 17, 1),
(33, 19, 0, 33, 19, 0),
(33, 19, 1, 33, 19, 1),
(33, 20, 0, 33, 20, 0),
(33, 20, 1, 33, 20, 1),
(33, 21, 0, 33, 21, 0),
(33, 21, 1, 33, 21, 1),
(33, 22, 0, 33, 22, 0),
(33, 22, 1, 33, 22, 1),
(33, 23, 0, 33, 23, 0),
(33, 23, 1, 33, 23, 1),
(33, 24, 0, 33, 24, 0),
(33, 24, 1, 33, 24, 1),
(33, 25, 0, 33, 25, 0),
(33, 25, 1, 33, 25, 1),
(33, 26, 0, 33, 26, 0),
(33, 26, 1, 33, 26, 1),
(33, 27, 0, 33, 27, 0),
(33, 27, 1, 33, 27, 1),
(33, 28, 0, 33, 28, 0),
(33, 28, 1, 33, 28, 1),
(33, 29, 1, 33, 29, 1),
(33, 30, 0, 33, 30, 0),
(33, 30, 1, 33, 30, 1),
(33, 31, 0, 33, 31, 0),
],
"384": [
( 0, 1, 0, 0, 1, 1),
( 0, 1, 1, 0, 1, 0),
( 0, 2, 0, 0, 2, 1),
( 0, 2, 1, 0, 2, 0),
( 0, 4, 0, 0, 4, 1),
( 0, 4, 1, 0, 4, 0),
( 0, 5, 0, 0, 5, 1),
( 0, 5, 1, 0, 5, 0),
( 0, 6, 0, 0, 6, 1),
( 0, 6, 1, 0, 6, 0),
( 0, 7, 0, 0, 7, 1),
( 0, 7, 1, 0, 7, 0),
( 2, 9, 0, 2, 9, 1),
( 2, 9, 1, 2, 9, 0),
( 3, 0, 0, 3, 0, 1),
( 3, 0, 1, 3, 0, 0),
( 3, 9, 0, 3, 9, 1),
( 3, 9, 1, 3, 9, 0),
( 4, 0, 0, 4, 0, 1),
( 4, 0, 1, 4, 0, 0),
( 4, 9, 0, 4, 9, 1),
( 4, 9, 1, 4, 9, 0),
( 5, 0, 0, 5, 0, 1),
( 5, 0, 1, 5, 0, 0),
( 5, 9, 0, 5, 9, 1),
( 5, 9, 1, 5, 9, 0),
( 6, 0, 0, 6, 0, 1),
( 6, 0, 1, 6, 0, 0),
( 6, 9, 0, 6, 9, 1),
( 6, 9, 1, 6, 9, 0),
( 7, 3, 1, 7, 3, 0),
( 7, 4, 0, 7, 4, 1),
( 7, 4, 1, 7, 4, 0),
( 7, 5, 0, 7, 5, 1),
( 7, 5, 1, 7, 5, 0),
( 7, 6, 0, 7, 6, 1),
( 7, 6, 1, 7, 6, 0),
],
"5k": [
( 8, 0, 0, 8, 0, 1),
( 9, 0, 1, 9, 0, 0),
( 9, 0, 0, 9, 0, 1),
(13, 0, 1, 13, 0, 0),
(15, 0, 0, 15, 0, 1),
(16, 0, 0, 16, 0, 1),
(17, 0, 0, 17, 0, 1),
(18, 0, 0, 18, 0, 1),
(19, 0, 0, 19, 0, 1),
(23, 0, 0, 23, 0, 1),
(24, 0, 0, 24, 0, 1),
(24, 0, 1, 24, 0, 0),
(23, 0, 1, 23, 0, 0),
(22, 0, 1, 22, 0, 0),
(21, 0, 1, 21, 0, 0),
(19, 0, 1, 19, 0, 0),
(18, 0, 1, 18, 0, 0),
(19, 31, 0, 19, 31, 1),
(19, 31, 1, 19, 31, 0),
(18, 31, 0, 18, 31, 1),
(18, 31, 1, 18, 31, 0),
(17, 31, 0, 17, 31, 1),
(16, 31, 1, 16, 31, 0),
(16, 31, 0, 16, 31, 1),
(13, 31, 1, 13, 31, 0),
(12, 31, 1, 12, 31, 0),
( 9, 31, 1, 9, 31, 0),
(13, 31, 0, 13, 31, 1),
( 4, 31, 0, 4, 31, 1),
( 5, 31, 0, 5, 31, 1),
( 6, 31, 0, 6, 31, 1),
( 8, 31, 1, 8, 31, 0),
( 8, 31, 0, 8, 31, 1),
( 9, 31, 0, 9, 31, 1),
( 6, 0, 1, 6, 0, 0),
( 7, 0, 1, 7, 0, 0),
( 5, 0, 0, 5, 0, 1),
( 6, 0, 0, 6, 0, 1),
( 7, 0, 0, 7, 0, 1)
]
}
# This dictionary maps package variants to a table of pin names and their
# corresponding grid location (x, y, block). This is most easily found through
# the package view in iCEcube2 by hovering the mouse over each pin.
pinloc_db = {
"1k-swg16tr": [
( "A2", 6, 17, 1),
( "A4", 2, 17, 0),
( "B1", 11, 17, 1),
( "B2", 0, 8, 1),
( "B3", 0, 9, 0),
( "C1", 12, 0, 0),
( "C2", 11, 0, 1),
( "C3", 11, 0, 0),
( "D1", 12, 0, 1),
( "D3", 6, 0, 1),
],
"1k-cm36": [
( "A1", 0, 13, 0),
( "A2", 4, 17, 1),
( "A3", 7, 17, 0),
( "B1", 0, 13, 1),
( "B3", 6, 17, 1),
( "B4", 13, 9, 0),
( "B5", 13, 11, 0),
( "B6", 13, 11, 1),
( "C1", 0, 9, 0),
( "C2", 0, 9, 1),
( "C3", 4, 17, 0),
( "C5", 13, 8, 1),
( "C6", 13, 12, 0),
( "D1", 0, 8, 1),
( "D5", 12, 0, 1),
( "D6", 13, 6, 0),
( "E1", 0, 8, 0),
( "E2", 6, 0, 0),
( "E3", 10, 0, 0),
( "E4", 11, 0, 0),
( "E5", 12, 0, 0),
( "E6", 13, 4, 1),
( "F2", 6, 0, 1),
( "F3", 10, 0, 1),
( "F5", 11, 0, 1),
],
"1k-cm49": [
( "A1", 0, 11, 1),
( "A2", 3, 17, 1),
( "A3", 8, 17, 1),
( "A4", 8, 17, 0),
( "A5", 9, 17, 1),
( "A6", 10, 17, 0),
( "A7", 9, 17, 0),
( "B1", 0, 11, 0),
( "B2", 0, 13, 0),
( "B3", 4, 17, 0),
( "B4", 6, 17, 1),
( "C1", 0, 5, 0),
( "C2", 0, 13, 1),
( "C4", 7, 17, 0),
( "C5", 13, 12, 0),
( "C6", 13, 11, 1),
( "C7", 13, 11, 0),
( "D1", 0, 5, 1),
( "D2", 0, 9, 0),
( "D3", 0, 9, 1),
( "D4", 4, 17, 1),
( "D6", 13, 8, 1),
( "D7", 13, 9, 0),
( "E2", 0, 8, 1),
( "E6", 12, 0, 1),
( "E7", 13, 4, 1),
( "F2", 0, 8, 0),
( "F3", 6, 0, 0),
( "F4", 10, 0, 0),
( "F5", 11, 0, 0),
( "F6", 12, 0, 0),
( "F7", 13, 6, 0),
( "G3", 6, 0, 1),
( "G4", 10, 0, 1),
( "G6", 11, 0, 1),
],
"1k-cm81": [
( "A1", 1, 17, 1),
( "A2", 4, 17, 0),
( "A3", 5, 17, 0),
( "A4", 6, 17, 0),
( "A6", 8, 17, 1),
( "A7", 9, 17, 0),
( "A8", 10, 17, 0),
( "A9", 13, 14, 1),
( "B1", 0, 13, 0),
( "B2", 0, 14, 0),
( "B3", 2, 17, 1),
( "B4", 4, 17, 1),
( "B5", 8, 17, 0),
( "B6", 9, 17, 1),
( "B7", 10, 17, 1),
( "B8", 11, 17, 0),
( "B9", 13, 11, 1),
( "C1", 0, 13, 1),
( "C2", 0, 14, 1),
( "C3", 0, 12, 1),
( "C4", 6, 17, 1),
( "C5", 7, 17, 0),
( "C9", 13, 12, 0),
( "D1", 0, 11, 1),
( "D2", 0, 12, 0),
( "D3", 0, 9, 0),
( "D5", 3, 17, 1),
( "D6", 13, 6, 0),
( "D7", 13, 7, 0),
( "D8", 13, 9, 0),
( "D9", 13, 11, 0),
( "E1", 0, 10, 1),
( "E2", 0, 10, 0),
( "E3", 0, 8, 1),
( "E4", 0, 11, 0),
( "E5", 5, 17, 1),
( "E7", 13, 6, 1),
( "E8", 13, 8, 1),
( "F1", 0, 8, 0),
( "F3", 0, 9, 1),
( "F7", 12, 0, 1),
( "F8", 13, 4, 0),
( "G1", 0, 5, 1),
( "G3", 0, 5, 0),
( "G4", 6, 0, 0),
( "G5", 10, 0, 0),
( "G6", 11, 0, 0),
( "G7", 12, 0, 0),
( "G8", 13, 4, 1),
( "G9", 13, 2, 1),
( "H1", 2, 0, 0),
( "H4", 6, 0, 1),
( "H5", 10, 0, 1),
( "H7", 11, 0, 1),
( "H9", 13, 2, 0),
( "J1", 3, 0, 0),
( "J2", 2, 0, 1),
( "J3", 3, 0, 1),
( "J4", 5, 0, 0),
( "J6", 7, 0, 0),
( "J7", 9, 0, 1),
( "J8", 13, 1, 0),
( "J9", 13, 1, 1),
],
"1k-cm121": [
( "A1", 0, 14, 0),
( "A2", 2, 17, 1),
( "A3", 3, 17, 0),
( "A5", 5, 17, 1),
( "A7", 8, 17, 0),
( "A8", 10, 17, 1),
( "A9", 11, 17, 0),
("A10", 12, 17, 0),
("A11", 13, 15, 0),
( "B1", 0, 13, 0),
( "B2", 1, 17, 1),
( "B3", 2, 17, 0),
( "B4", 3, 17, 1),
( "B5", 4, 17, 1),
( "B7", 9, 17, 0),
( "B8", 11, 17, 1),
( "B9", 12, 17, 1),
("B10", 13, 15, 1),
("B11", 13, 14, 1),
( "C1", 0, 12, 0),
( "C2", 0, 13, 1),
( "C3", 0, 14, 1),
( "C4", 1, 17, 0),
( "C5", 4, 17, 0),
( "C6", 7, 17, 1),
( "C7", 8, 17, 1),
( "C8", 9, 17, 1),
( "C9", 10, 17, 0),
("C10", 13, 14, 0),
("C11", 13, 13, 1),
( "D1", 0, 11, 0),
( "D2", 0, 12, 1),
( "D3", 0, 11, 1),
( "D4", 0, 10, 1),
( "D5", 6, 17, 1),
( "D6", 7, 17, 0),
("D10", 13, 12, 1),
("D11", 13, 11, 1),
( "E2", 0, 10, 0),
( "E3", 0, 9, 1),
( "E4", 0, 9, 0),
( "E6", 5, 17, 0),
( "E7", 13, 12, 0),
( "E8", 13, 13, 0),
( "E9", 13, 9, 0),
("E10", 13, 9, 1),
( "F2", 0, 6, 0),
( "F3", 0, 5, 0),
( "F4", 0, 8, 1),
( "F5", 0, 8, 0),
( "F6", 6, 17, 0),
( "F8", 13, 11, 0),
( "F9", 13, 8, 1),
("F11", 13, 7, 1),
( "G2", 0, 5, 1),
( "G4", 0, 3, 0),
( "G8", 12, 0, 1),
( "G9", 13, 8, 0),
("G11", 13, 7, 0),
( "H1", 0, 6, 1),
( "H2", 0, 4, 1),
( "H4", 0, 2, 0),
( "H5", 6, 0, 0),
( "H6", 10, 0, 0),
( "H7", 11, 0, 0),
( "H8", 12, 0, 0),
( "H9", 13, 6, 1),
("H10", 13, 2, 1),
("H11", 13, 4, 1),
( "J1", 0, 4, 0),
( "J2", 1, 0, 1),
( "J5", 6, 0, 1),
( "J6", 10, 0, 1),
( "J8", 11, 0, 1),
("J10", 13, 2, 0),
("J11", 13, 6, 0),
( "K1", 0, 3, 1),
( "K2", 2, 0, 0),
( "K3", 2, 0, 1),
( "K4", 4, 0, 0),
( "K5", 5, 0, 0),
( "K7", 7, 0, 1),
( "K8", 9, 0, 0),
( "K9", 13, 1, 0),
("K10", 13, 1, 1),
("K11", 13, 3, 1),
( "L1", 0, 2, 1),
( "L2", 3, 0, 0),
( "L3", 3, 0, 1),
( "L4", 4, 0, 1),
( "L5", 7, 0, 0),
( "L7", 8, 0, 0),
( "L9", 8, 0, 1),
("L10", 9, 0, 1),
("L11", 13, 4, 0),
],
"1k-cb81": [
( "A2", 2, 17, 1),
( "A3", 3, 17, 1),
( "A4", 6, 17, 1),
( "A7", 11, 17, 0),
( "A8", 12, 17, 1),
( "B1", 0, 13, 1),
( "B2", 0, 14, 0),
( "B3", 0, 13, 0),
( "B4", 5, 17, 1),
( "B5", 8, 17, 1),
( "B6", 9, 17, 1),
( "B7", 11, 17, 1),
( "B8", 12, 17, 0),
( "C1", 0, 12, 0),
( "C2", 0, 10, 0),
( "C3", 0, 14, 1),
( "C4", 1, 17, 1),
( "C5", 8, 17, 0),
( "C6", 10, 17, 0),
( "C7", 13, 15, 0),
( "C8", 13, 15, 1),
( "C9", 13, 14, 1),
( "D1", 0, 9, 0),
( "D2", 0, 10, 1),
( "D3", 0, 12, 1),
( "D4", 5, 17, 0),
( "D5", 4, 17, 0),
( "D6", 7, 17, 0),
( "D7", 13, 13, 0),
( "D8", 13, 13, 1),
( "E1", 0, 8, 1),
( "E2", 0, 8, 0),
( "E3", 0, 9, 1),
( "E6", 10, 17, 1),
( "E7", 13, 12, 0),
( "E8", 13, 11, 0),
( "E9", 13, 11, 1),
( "F2", 0, 6, 1),
( "F3", 0, 6, 0),
( "F6", 13, 8, 0),
( "F7", 13, 9, 0),
( "F8", 13, 8, 1),
( "F9", 13, 7, 1),
( "G1", 0, 4, 1),
( "G2", 0, 2, 1),
( "G3", 3, 0, 1),
( "G4", 4, 0, 0),
( "G5", 10, 0, 0),
( "G6", 13, 4, 0),
( "G7", 13, 4, 1),
( "G8", 13, 6, 1),
( "G9", 13, 7, 0),
( "H2", 0, 4, 0),
( "H3", 2, 0, 1),
( "H4", 6, 0, 0),
( "H5", 10, 0, 1),
( "H7", 11, 0, 0),
( "H8", 12, 0, 1),
( "J2", 2, 0, 0),
( "J3", 6, 0, 1),
( "J7", 11, 0, 1),
( "J8", 12, 0, 0),
],
"1k-cb121": [
( "A2", 1, 17, 1),
( "A3", 2, 17, 0),
( "A4", 4, 17, 0),
( "A5", 3, 17, 1),
( "A6", 4, 17, 1),
( "A8", 10, 17, 0),
("A10", 12, 17, 1),
("A11", 13, 15, 0),
( "B1", 0, 14, 0),
( "B3", 1, 17, 0),
( "B4", 2, 17, 1),
( "B5", 3, 17, 0),
( "B8", 10, 17, 1),
( "B9", 12, 17, 0),
("B11", 13, 15, 1),
( "C1", 0, 14, 1),
( "C2", 0, 11, 1),
( "C3", 0, 13, 1),
( "C4", 0, 13, 0),
( "C5", 5, 17, 0),
( "C6", 7, 17, 0),
( "C7", 8, 17, 1),
( "C8", 11, 17, 0),
( "C9", 11, 17, 1),
("C11", 13, 14, 1),
( "D1", 0, 10, 1),
( "D2", 0, 11, 0),
( "D3", 0, 9, 0),
( "D4", 0, 12, 0),
( "D5", 5, 17, 1),
( "D6", 6, 17, 1),
( "D7", 8, 17, 0),
( "D8", 13, 12, 0),
( "D9", 13, 13, 0),
("D10", 13, 13, 1),
("D11", 13, 14, 0),
( "E2", 0, 10, 0),
( "E3", 0, 9, 1),
( "E4", 0, 12, 1),
( "E5", 6, 17, 0),
( "E6", 7, 17, 1),
( "E7", 9, 17, 0),
( "E8", 13, 11, 0),
( "E9", 13, 11, 1),
("E11", 13, 12, 1),
( "F2", 0, 6, 1),
( "F3", 0, 5, 1),
( "F4", 0, 8, 1),
( "F7", 9, 17, 1),
( "F8", 13, 8, 1),
( "F9", 13, 9, 0),
("F10", 13, 9, 1),
( "G1", 0, 6, 0),
( "G3", 0, 5, 0),
( "G4", 0, 8, 0),
( "G7", 13, 6, 1),
( "G8", 13, 7, 0),
( "G9", 13, 7, 1),
("G10", 13, 8, 0),
( "H1", 0, 3, 1),
( "H2", 0, 4, 1),
( "H3", 0, 4, 0),
( "H4", 4, 0, 0),
( "H5", 4, 0, 1),
( "H6", 10, 0, 0),
( "H7", 13, 4, 1),
( "H8", 13, 6, 0),
( "H9", 13, 4, 0),
("H10", 13, 3, 1),
("H11", 9, 0, 1),
( "J1", 0, 3, 0),
( "J2", 0, 2, 0),
( "J3", 0, 2, 1),
( "J4", 2, 0, 1),
( "J5", 3, 0, 0),
( "J6", 10, 0, 1),
( "J8", 11, 0, 0),
( "J9", 12, 0, 1),
("J11", 8, 0, 1),
( "K3", 1, 0, 0),
( "K4", 1, 0, 1),
( "K8", 11, 0, 1),
( "K9", 12, 0, 0),
("K11", 9, 0, 0),
( "L2", 2, 0, 0),
( "L3", 3, 0, 1),
( "L4", 5, 0, 0),
( "L5", 5, 0, 1),
( "L8", 7, 0, 0),
( "L9", 6, 0, 1),
("L10", 7, 0, 1),
("L11", 8, 0, 0),
],
"1k-cb132": [
( "A1", 1, 17, 1),
( "A2", 2, 17, 1),
( "A4", 4, 17, 0),
( "A5", 4, 17, 1),
( "A6", 6, 17, 1),
( "A7", 7, 17, 0),
("A10", 10, 17, 0),
("A12", 12, 17, 0),
( "B1", 0, 14, 1),
("B14", 13, 15, 0),
( "C1", 0, 14, 0),
( "C3", 0, 13, 1),
( "C4", 1, 17, 0),
( "C5", 3, 17, 0),
( "C6", 5, 17, 0),
( "C7", 6, 17, 0),
( "C8", 8, 17, 0),
( "C9", 9, 17, 0),
("C10", 11, 17, 0),
("C11", 11, 17, 1),
("C12", 12, 17, 1),
("C14", 13, 14, 0),
( "D1", 0, 11, 1),
( "D3", 0, 13, 0),
( "D4", 0, 12, 1),
( "D5", 2, 17, 0),
( "D6", 3, 17, 1),
( "D7", 5, 17, 1),
( "D8", 7, 17, 1),
( "D9", 8, 17, 1),
("D10", 9, 17, 1),
("D11", 10, 17, 1),
("D12", 13, 15, 1),
("D14", 13, 13, 1),
( "E1", 0, 11, 0),
( "E4", 0, 12, 0),
("E11", 13, 14, 1),
("E12", 13, 13, 0),
("E14", 13, 12, 0),
( "F3", 0, 10, 0),
( "F4", 0, 10, 1),
("F11", 13, 12, 1),
("F12", 13, 11, 1),
("F14", 13, 8, 1),
( "G1", 0, 8, 1),
( "G3", 0, 8, 0),
( "G4", 0, 6, 1),
("G11", 13, 11, 0),
("G12", 13, 9, 1),
("G14", 13, 9, 0),
( "H1", 0, 9, 0),
( "H3", 0, 9, 1),
( "H4", 0, 6, 0),
("H11", 13, 8, 0),
("H12", 13, 7, 1),
( "J1", 0, 5, 1),
( "J3", 0, 5, 0),
("J11", 13, 7, 0),
("J12", 13, 6, 1),
( "K3", 0, 3, 0),
( "K4", 0, 3, 1),
("K11", 13, 4, 1),
("K12", 13, 4, 0),
("K14", 13, 6, 0),
( "L1", 0, 2, 0),
( "L4", 1, 0, 1),
( "L5", 3, 0, 1),
( "L6", 4, 0, 1),
( "L7", 8, 0, 0),
( "L8", 9, 0, 0),
( "L9", 10, 0, 0),
("L12", 13, 2, 0),
("L14", 13, 3, 1),
( "M1", 0, 2, 1),
( "M3", 1, 0, 0),
( "M4", 3, 0, 0),
( "M6", 5, 0, 1),
( "M7", 6, 0, 0),
( "M8", 8, 0, 1),
( "M9", 9, 0, 1),
("M11", 11, 0, 0),
("M12", 13, 1, 0),
("N14", 13, 2, 1),
( "P2", 2, 0, 0),
( "P3", 2, 0, 1),
( "P4", 4, 0, 0),
( "P5", 5, 0, 0),
( "P7", 6, 0, 1),
( "P8", 7, 0, 0),
( "P9", 7, 0, 1),
("P10", 10, 0, 1),
("P11", 11, 0, 1),
("P12", 12, 0, 0),
("P13", 12, 0, 1),
("P14", 13, 1, 1),
],
"1k-qn84": [
( "A1", 0, 14, 0),
( "A2", 0, 13, 0),
( "A3", 0, 12, 0),
( "A4", 0, 11, 0),
( "A5", 0, 10, 0),
( "A8", 0, 9, 0),
( "A9", 0, 8, 1),
("A10", 0, 5, 1),
("A11", 0, 4, 0),
("A12", 0, 2, 0),
("A13", 4, 0, 0),
("A14", 6, 0, 1),
("A16", 6, 0, 0),
("A19", 9, 0, 1),
("A20", 10, 0, 1),
("A22", 11, 0, 1),
("A23", 12, 0, 0),
("A25", 13, 4, 0),
("A26", 13, 6, 0),
("A27", 13, 7, 1),
("A29", 13, 8, 1),
("A31", 13, 11, 1),
("A32", 13, 12, 1),
("A33", 13, 13, 1),
("A34", 13, 14, 0),
("A35", 13, 15, 0),
("A38", 11, 17, 0),
("A39", 10, 17, 0),
("A40", 9, 17, 0),
("A41", 8, 17, 0),
("A43", 7, 17, 0),
("A44", 6, 17, 0),
("A45", 5, 17, 0),
("A46", 4, 17, 0),
("A47", 3, 17, 0),
("A48", 1, 17, 1),
( "B1", 0, 13, 1),
( "B2", 0, 12, 1),
( "B3", 0, 11, 1),
( "B4", 0, 10, 1),
( "B5", 0, 9, 1),
( "B7", 0, 8, 0),
( "B8", 0, 5, 0),
( "B9", 0, 3, 0),
("B10", 5, 0, 0),
("B11", 5, 0, 1),
("B12", 7, 0, 0),
("B13", 8, 0, 0),
("B14", 9, 0, 0),
("B15", 10, 0, 0),
("B17", 11, 0, 0),
("B18", 12, 0, 1),
("B19", 13, 3, 1),
("B20", 13, 6, 1),
("B21", 13, 7, 0),
("B22", 13, 9, 0),
("B23", 13, 11, 0),
("B24", 13, 12, 0),
("B26", 13, 14, 1),
("B27", 13, 15, 1),
("B29", 10, 17, 1),
("B30", 9, 17, 1),
("B31", 8, 17, 1),
("B32", 6, 17, 1),
("B34", 4, 17, 1),
("B35", 3, 17, 1),
("B36", 2, 17, 1),
],
"1k-tq144": [
( "1", 0, 14, 1),
( "2", 0, 14, 0),
( "3", 0, 13, 1),
( "4", 0, 13, 0),
( "7", 0, 12, 1),
( "8", 0, 12, 0),
( "9", 0, 11, 1),
( "10", 0, 11, 0),
( "11", 0, 10, 1),
( "12", 0, 10, 0),
( "19", 0, 9, 1),
( "20", 0, 9, 0),
( "21", 0, 8, 1),
( "22", 0, 8, 0),
( "23", 0, 6, 1),
( "24", 0, 6, 0),
( "25", 0, 5, 1),
( "26", 0, 5, 0),
( "28", 0, 4, 1),
( "29", 0, 4, 0),
( "31", 0, 3, 1),
( "32", 0, 3, 0),
( "33", 0, 2, 1),
( "34", 0, 2, 0),
( "37", 1, 0, 0),
( "38", 1, 0, 1),
( "39", 2, 0, 0),
( "41", 2, 0, 1),
( "42", 3, 0, 0),
( "43", 3, 0, 1),
( "44", 4, 0, 0),
( "45", 4, 0, 1),
( "47", 5, 0, 0),
( "48", 5, 0, 1),
( "49", 6, 0, 1),
( "50", 7, 0, 0),
( "52", 6, 0, 0),
( "56", 7, 0, 1),
( "58", 8, 0, 0),
( "60", 8, 0, 1),
( "61", 9, 0, 0),
( "62", 9, 0, 1),
( "63", 10, 0, 0),
( "64", 10, 0, 1),
( "67", 11, 0, 0),
( "68", 11, 0, 1),
( "70", 12, 0, 0),
( "71", 12, 0, 1),
( "73", 13, 1, 0),
( "74", 13, 1, 1),
( "75", 13, 2, 0),
( "76", 13, 2, 1),
( "78", 13, 3, 1),
( "79", 13, 4, 0),
( "80", 13, 4, 1),
( "81", 13, 6, 0),
( "87", 13, 6, 1),
( "88", 13, 7, 0),
( "90", 13, 7, 1),
( "91", 13, 8, 0),
( "93", 13, 8, 1),
( "94", 13, 9, 0),
( "95", 13, 9, 1),
( "96", 13, 11, 0),
( "97", 13, 11, 1),
( "98", 13, 12, 0),
( "99", 13, 12, 1),
("101", 13, 13, 0),
("102", 13, 13, 1),
("104", 13, 14, 0),
("105", 13, 14, 1),
("106", 13, 15, 0),
("107", 13, 15, 1),
("112", 12, 17, 1),
("113", 12, 17, 0),
("114", 11, 17, 1),
("115", 11, 17, 0),
("116", 10, 17, 1),
("117", 10, 17, 0),
("118", 9, 17, 1),
("119", 9, 17, 0),
("120", 8, 17, 1),
("121", 8, 17, 0),
("122", 7, 17, 1),
("128", 7, 17, 0),
("129", 6, 17, 1),
("134", 5, 17, 1),
("135", 5, 17, 0),
("136", 4, 17, 1),
("137", 4, 17, 0),
("138", 3, 17, 1),
("139", 3, 17, 0),
("141", 2, 17, 1),
("142", 2, 17, 0),
("143", 1, 17, 1),
("144", 1, 17, 0),
],
"1k-vq100": [
( "1", 0, 14, 1),
( "2", 0, 14, 0),
( "3", 0, 13, 1),
( "4", 0, 13, 0),
( "7", 0, 12, 1),
( "8", 0, 12, 0),
( "9", 0, 10, 1),
( "10", 0, 10, 0),
( "12", 0, 9, 1),
( "13", 0, 9, 0),
( "15", 0, 8, 1),
( "16", 0, 8, 0),
( "18", 0, 6, 1),
( "19", 0, 6, 0),
( "20", 0, 4, 1),
( "21", 0, 4, 0),
( "24", 0, 2, 1),
( "25", 0, 2, 0),
( "26", 2, 0, 0),
( "27", 2, 0, 1),
( "28", 3, 0, 0),
( "29", 3, 0, 1),
( "30", 4, 0, 0),
( "33", 6, 0, 1),
( "34", 7, 0, 0),
( "36", 6, 0, 0),
( "37", 7, 0, 1),
( "40", 9, 0, 1),
( "41", 10, 0, 0),
( "42", 10, 0, 1),
( "45", 11, 0, 0),
( "46", 11, 0, 1),
( "48", 12, 0, 0),
( "49", 12, 0, 1),
( "51", 13, 3, 1),
( "52", 13, 4, 0),
( "53", 13, 4, 1),
( "54", 13, 6, 0),
( "56", 13, 6, 1),
( "57", 13, 7, 0),
( "59", 13, 7, 1),
( "60", 13, 8, 0),
( "62", 13, 8, 1),
( "63", 13, 9, 0),
( "64", 13, 11, 0),
( "65", 13, 11, 1),
( "66", 13, 12, 0),
( "68", 13, 13, 0),
( "69", 13, 13, 1),
( "71", 13, 14, 0),
( "72", 13, 14, 1),
( "73", 13, 15, 0),
( "74", 13, 15, 1),
( "78", 12, 17, 1),
( "79", 12, 17, 0),
( "80", 11, 17, 1),
( "81", 10, 17, 1),
( "82", 10, 17, 0),
( "83", 9, 17, 1),
( "85", 9, 17, 0),
( "86", 8, 17, 1),
( "87", 8, 17, 0),
( "89", 7, 17, 0),
( "90", 6, 17, 1),
( "91", 6, 17, 0),
( "93", 5, 17, 1),
( "94", 5, 17, 0),
( "95", 4, 17, 1),
( "96", 4, 17, 0),
( "97", 3, 17, 1),
( "99", 2, 17, 1),
("100", 1, 17, 1),
],
"8k-cb132:4k": [
( "A1", 2, 33, 0),
( "A2", 3, 33, 0),
( "A3", 3, 33, 1),
( "A4", 5, 33, 0),
( "A5", 10, 33, 1),
( "A6", 16, 33, 1),
( "A7", 17, 33, 0),
("A10", 25, 33, 0),
("A11", 26, 33, 0),
("A12", 30, 33, 1),
( "B1", 0, 30, 1),
("B14", 33, 28, 0),
( "C1", 0, 30, 0),
( "C3", 0, 27, 1),
( "C4", 4, 33, 0),
( "C5", 8, 33, 1),
( "C6", 11, 33, 1),
( "C7", 14, 33, 1),
( "C9", 20, 33, 1),
("C10", 22, 33, 1),
("C11", 28, 33, 1),
("C12", 29, 33, 1),
("C14", 33, 24, 1),
( "D1", 0, 25, 1),
( "D3", 0, 27, 0),
( "D4", 0, 22, 1),
( "D5", 9, 33, 0),
( "D6", 11, 33, 0),
( "D7", 13, 33, 1),
( "D9", 21, 33, 1),
("D10", 27, 33, 0),
("D11", 26, 33, 1),
("D12", 33, 27, 1),
("D14", 33, 23, 1),
( "E1", 0, 25, 0),
( "E4", 0, 22, 0),
("E11", 33, 20, 1),
("E12", 33, 21, 0),
("E14", 33, 21, 1),
( "F3", 0, 21, 0),
( "F4", 0, 21, 1),
("F11", 33, 19, 1),
("F12", 33, 15, 0),
("F14", 33, 16, 1),
( "G1", 0, 17, 0),
( "G3", 0, 17, 1),
( "G4", 0, 20, 0),
("G11", 33, 14, 1),
("G12", 33, 11, 0),
("G14", 33, 17, 0),
( "H1", 0, 16, 1),
( "H3", 0, 16, 0),
( "H4", 0, 20, 1),
("H11", 33, 10, 1),
("H12", 33, 6, 1),
( "J1", 0, 18, 0),
( "J3", 0, 18, 1),
("J11", 33, 6, 0),
("J12", 33, 5, 1),
( "K3", 0, 11, 1),
( "K4", 0, 11, 0),
("K11", 33, 4, 1),
("K12", 33, 4, 0),
("K14", 33, 5, 0),
( "L1", 0, 6, 1),
( "L4", 12, 0, 0),
( "L5", 11, 0, 1),
( "L6", 15, 0, 0),
( "L8", 20, 0, 1),
( "L9", 29, 0, 0),
("L12", 33, 2, 0),
("L14", 33, 3, 1),
( "M1", 0, 6, 0),
( "M3", 8, 0, 0),
( "M4", 7, 0, 1),
( "M6", 14, 0, 1),
( "M7", 15, 0, 1),
( "M9", 22, 0, 1),
("M11", 30, 0, 0),
("M12", 33, 1, 0),
( "N1", 0, 4, 1),
("N14", 33, 2, 1),
( "P1", 0, 4, 0),
( "P2", 4, 0, 0),
( "P3", 5, 0, 1),
( "P4", 12, 0, 1),
( "P5", 13, 0, 0),
( "P7", 16, 0, 1),
( "P8", 17, 0, 0),
( "P9", 21, 0, 1),
("P10", 29, 0, 1),
("P11", 30, 0, 1),
("P12", 31, 0, 0),
("P13", 31, 0, 1),
("P14", 33, 1, 1),
],
"8k-tq144:4k": [
( "1", 0, 30, 1),
( "2", 0, 30, 0),
( "3", 0, 28, 1),
( "4", 0, 28, 0),
( "7", 0, 27, 1),
( "8", 0, 27, 0),
( "9", 0, 25, 1),
( "10", 0, 25, 0),
( "11", 0, 22, 1),
( "12", 0, 22, 0),
( "15", 0, 20, 1),
( "16", 0, 20, 0),
( "17", 0, 18, 1),
( "18", 0, 18, 0),
( "19", 0, 17, 1),
( "20", 0, 17, 0),
( "21", 0, 16, 1),
( "22", 0, 16, 0),
( "23", 0, 12, 1),
( "24", 0, 12, 0),
( "25", 0, 11, 1),
( "26", 0, 11, 0),
( "28", 0, 6, 1),
( "29", 0, 6, 0),
( "31", 0, 5, 1),
( "32", 0, 5, 0),
( "33", 0, 4, 1),
( "34", 0, 4, 0),
( "37", 4, 0, 0),
( "38", 4, 0, 1),
( "39", 6, 0, 1),
( "41", 7, 0, 1),
( "42", 8, 0, 0),
( "43", 11, 0, 1),
( "44", 12, 0, 0),
( "45", 12, 0, 1),
( "47", 15, 0, 1),
( "48", 16, 0, 0),
( "49", 16, 0, 1),
( "52", 17, 0, 0),
( "55", 22, 0, 1),
( "56", 24, 0, 0),
( "60", 24, 0, 1),
( "61", 25, 0, 0),
( "62", 28, 0, 0),
( "63", 29, 0, 0),
( "64", 29, 0, 1),
( "67", 30, 0, 0),
( "68", 30, 0, 1),
( "70", 31, 0, 0),
( "71", 31, 0, 1),
( "73", 33, 1, 0),
( "74", 33, 1, 1),
( "75", 33, 2, 0),
( "76", 33, 2, 1),
( "78", 33, 3, 1),
( "79", 33, 4, 0),
( "80", 33, 4, 1),
( "81", 33, 5, 0),
( "82", 33, 5, 1),
( "83", 33, 6, 0),
( "84", 33, 6, 1),
( "85", 33, 10, 1),
( "87", 33, 14, 1),
( "88", 33, 15, 0),
( "90", 33, 15, 1),
( "91", 33, 16, 0),
( "93", 33, 16, 1),
( "94", 33, 17, 0),
( "95", 33, 19, 1),
( "96", 33, 20, 1),
( "97", 33, 21, 0),
( "98", 33, 21, 1),
( "99", 33, 23, 1),
("101", 33, 27, 1),
("102", 33, 28, 0),
("104", 33, 29, 1),
("105", 33, 30, 0),
("106", 33, 30, 1),
("107", 33, 31, 0),
("110", 31, 33, 1),
("112", 31, 33, 0),
("113", 30, 33, 1),
("114", 30, 33, 0),
("115", 29, 33, 1),
("116", 29, 33, 0),
("117", 28, 33, 1),
("118", 27, 33, 0),
("119", 26, 33, 1),
("120", 26, 33, 0),
("121", 25, 33, 0),
("122", 20, 33, 1),
("124", 20, 33, 0),
("125", 19, 33, 1),
("128", 17, 33, 0),
("129", 16, 33, 1),
("130", 11, 33, 1),
("134", 8, 33, 1),
("135", 8, 33, 0),
("136", 7, 33, 1),
("137", 7, 33, 0),
("138", 6, 33, 1),
("139", 6, 33, 0),
("141", 5, 33, 0),
("142", 4, 33, 1),
("143", 4, 33, 0),
("144", 3, 33, 1),
],
"8k-cm81:4k": [
( "A1", 2, 33, 1),
( "A2", 4, 33, 0),
( "A3", 6, 33, 0),
( "A4", 10, 33, 1),
( "A6", 23, 33, 0),
( "A7", 27, 33, 0),
( "A8", 28, 33, 1),
( "A9", 33, 4, 1),
( "B1", 0, 28, 1),
( "B2", 0, 30, 0),
( "B3", 5, 33, 1),
( "B4", 9, 33, 0),
( "B5", 21, 33, 1),
( "B6", 24, 33, 0),
( "B7", 25, 33, 1),
( "B8", 30, 33, 1),
( "B9", 33, 6, 1),
( "C1", 0, 28, 0),
( "C2", 0, 30, 1),
( "C3", 0, 23, 0),
( "C4", 16, 33, 1),
( "C5", 17, 33, 0),
( "C9", 33, 21, 1),
( "D1", 0, 20, 1),
( "D2", 0, 23, 1),
( "D3", 0, 17, 0),
( "D5", 8, 33, 1),
( "D6", 33, 4, 0),
( "D7", 33, 5, 0),
( "D8", 33, 17, 0),
( "D9", 33, 6, 0),
( "E1", 0, 20, 0),
( "E2", 0, 17, 1),
( "E3", 0, 16, 1),
( "E4", 0, 16, 0),
( "E5", 7, 33, 1),
( "E7", 33, 5, 1),
( "E8", 33, 16, 1),
( "F1", 0, 7, 1),
( "F3", 0, 7, 0),
( "F7", 31, 0, 1),
( "F8", 33, 3, 0),
( "G1", 0, 5, 0),
( "G2", 0, 3, 1),
( "G3", 0, 5, 1),
( "G4", 16, 0, 1),
( "G5", 29, 0, 0),
( "G6", 30, 0, 0),
( "G7", 31, 0, 0),
( "G8", 33, 3, 1),
( "G9", 33, 2, 1),
( "H1", 3, 0, 0),
( "H2", 0, 3, 0),
( "H4", 17, 0, 0),
( "H5", 29, 0, 1),
( "H7", 30, 0, 1),
( "H9", 33, 2, 0),
( "J1", 3, 0, 1),
( "J2", 4, 0, 0),
( "J3", 4, 0, 1),
( "J4", 11, 0, 0),
( "J8", 33, 1, 0),
( "J9", 33, 1, 1),
],
"8k-cm121:4k": [
( "A1", 2, 33, 0),
( "A2", 3, 33, 1),
( "A3", 3, 33, 0),
( "A4", 9, 33, 0),
( "A5", 11, 33, 0),
( "A6", 11, 33, 1),
( "A7", 19, 33, 1),
( "A8", 20, 33, 1),
( "A9", 26, 33, 1),
("A10", 30, 33, 1),
("A11", 31, 33, 1),
( "B1", 0, 30, 1),
( "B2", 0, 30, 0),
( "B3", 4, 33, 0),
( "B4", 5, 33, 0),
( "B5", 10, 33, 1),
( "B6", 16, 33, 1),
( "B7", 17, 33, 0),
( "B8", 27, 33, 0),
( "B9", 28, 33, 1),
("B11", 33, 28, 0),
( "C1", 0, 25, 0),
( "C2", 0, 25, 1),
( "C3", 0, 27, 0),
( "C4", 0, 27, 1),
( "C7", 20, 33, 0),
( "C8", 26, 33, 0),
( "C9", 29, 33, 1),
("C11", 33, 27, 1),
( "D1", 0, 22, 0),
( "D2", 0, 21, 1),
( "D3", 0, 21, 0),
( "D5", 8, 33, 1),
( "D7", 25, 33, 0),
( "D9", 33, 21, 0),
("D10", 33, 24, 1),
("D11", 33, 23, 1),
( "E1", 0, 22, 1),
( "E2", 0, 20, 1),
( "E3", 0, 20, 0),
( "E8", 33, 20, 1),
( "E9", 33, 19, 1),
("E10", 33, 17, 0),
("E11", 33, 21, 1),
( "F1", 0, 18, 1),
( "F2", 0, 18, 0),
( "F3", 0, 17, 0),
( "F4", 0, 17, 1),
( "F9", 33, 15, 0),
("F10", 33, 14, 1),
("F11", 33, 16, 1),
( "G1", 0, 16, 1),
( "G2", 0, 16, 0),
( "G3", 0, 12, 1),
( "G8", 33, 5, 1),
( "G9", 33, 10, 1),
("G10", 33, 6, 1),
("G11", 33, 11, 0),
( "H1", 0, 11, 1),
( "H2", 0, 11, 0),
( "H3", 0, 12, 0),
( "H7", 20, 0, 1),
( "H9", 29, 0, 1),
("H10", 33, 4, 1),
("H11", 33, 6, 0),
( "J1", 0, 6, 1),
( "J2", 0, 4, 0),
( "J3", 4, 0, 1),
( "J4", 8, 0, 0),
( "J5", 15, 0, 0),
( "J7", 20, 0, 0),
( "J8", 22, 0, 1),
( "J9", 30, 0, 1),
("J10", 33, 5, 0),
("J11", 33, 3, 1),
( "K1", 0, 6, 0),
( "K2", 0, 4, 1),
( "K3", 7, 0, 1),
( "K4", 12, 0, 1),
( "K5", 15, 0, 1),
( "K6", 17, 0, 0),
( "K7", 21, 0, 1),
( "K9", 30, 0, 0),
("K10", 31, 0, 1),
("K11", 33, 4, 0),
( "L1", 4, 0, 0),
( "L2", 6, 0, 1),
( "L3", 11, 0, 1),
( "L4", 12, 0, 0),
( "L5", 16, 0, 1),
( "L7", 24, 0, 0),
( "L8", 29, 0, 0),
("L10", 31, 0, 0),
],
"8k-cm225:4k": [
( "A1", 1, 33, 1),
( "A2", 3, 33, 1),
( "A5", 6, 33, 1),
( "A6", 11, 33, 0),
( "A7", 12, 33, 0),
( "A8", 17, 33, 1),
( "A9", 18, 33, 1),
("A11", 23, 33, 1),
("A15", 31, 33, 0),
( "B2", 2, 33, 1),
( "B3", 4, 33, 1),
( "B4", 5, 33, 1),
( "B5", 7, 33, 1),
( "B6", 10, 33, 0),
( "B7", 14, 33, 0),
( "B8", 19, 33, 1),
( "B9", 18, 33, 0),
("B10", 22, 33, 0),
("B11", 23, 33, 0),
("B12", 25, 33, 1),
("B13", 27, 33, 1),
("B14", 31, 33, 1),
("B15", 33, 31, 0),
( "C1", 0, 28, 0),
( "C3", 2, 33, 0),
( "C4", 3, 33, 0),
( "C5", 5, 33, 0),
( "C6", 13, 33, 0),
( "C7", 11, 33, 1),
( "C8", 19, 33, 0),
( "C9", 17, 33, 0),
("C10", 20, 33, 0),
("C11", 24, 33, 1),
("C12", 30, 33, 1),
("C13", 30, 33, 0),
("C14", 33, 30, 0),
( "D1", 0, 25, 0),
( "D2", 0, 24, 1),
( "D3", 0, 27, 0),
( "D4", 0, 30, 0),
( "D5", 4, 33, 0),
( "D6", 9, 33, 0),
( "D7", 10, 33, 1),
( "D8", 16, 33, 1),
( "D9", 26, 33, 1),
("D10", 25, 33, 0),
("D11", 28, 33, 1),
("D13", 33, 27, 1),
("D14", 33, 25, 0),
("D15", 33, 27, 0),
( "E2", 0, 24, 0),
( "E3", 0, 28, 1),
( "E4", 0, 30, 1),
( "E5", 0, 27, 1),
( "E6", 0, 25, 1),
( "E9", 26, 33, 0),
("E10", 27, 33, 0),
("E11", 29, 33, 1),
("E13", 33, 28, 0),
("E14", 33, 24, 0),
( "F1", 0, 20, 0),
( "F2", 0, 21, 0),
( "F3", 0, 21, 1),
( "F4", 0, 22, 0),
( "F5", 0, 22, 1),
( "F7", 8, 33, 1),
( "F9", 20, 33, 1),
("F11", 33, 24, 1),
("F12", 33, 23, 1),
("F13", 33, 23, 0),
("F14", 33, 21, 0),
("F15", 33, 22, 0),
( "G2", 0, 20, 1),
( "G4", 0, 17, 0),
( "G5", 0, 18, 1),
("G10", 33, 20, 1),
("G11", 33, 19, 1),
("G12", 33, 21, 1),
("G13", 33, 17, 0),
("G14", 33, 20, 0),
("G15", 33, 19, 0),
( "H1", 0, 16, 0),
( "H2", 0, 18, 0),
( "H3", 0, 14, 1),
( "H4", 0, 13, 1),
( "H5", 0, 16, 1),
( "H6", 0, 17, 1),
("H11", 33, 14, 1),
("H12", 33, 16, 1),
("H13", 33, 15, 1),
("H14", 33, 15, 0),
( "J1", 0, 13, 0),
( "J2", 0, 12, 0),
( "J3", 0, 14, 0),
( "J4", 0, 11, 1),
( "J5", 0, 12, 1),
("J10", 33, 5, 1),
("J11", 33, 10, 1),
("J12", 33, 6, 1),
("J14", 33, 14, 0),
("J15", 33, 13, 0),
( "K1", 0, 11, 0),
( "K4", 0, 4, 0),
( "K5", 0, 6, 1),
( "K9", 20, 0, 1),
("K11", 29, 0, 0),
("K12", 33, 4, 1),
("K13", 33, 5, 0),
("K15", 33, 9, 0),
( "L3", 0, 7, 1),
( "L4", 0, 3, 0),
( "L5", 4, 0, 0),
( "L6", 7, 0, 0),
( "L7", 12, 0, 0),
( "L9", 17, 0, 0),
("L10", 21, 0, 1),
("L11", 30, 0, 1),
("L12", 33, 3, 1),
("L13", 33, 6, 0),
( "M1", 0, 7, 0),
( "M2", 0, 6, 0),
( "M3", 0, 5, 0),
( "M4", 0, 3, 1),
( "M5", 6, 0, 0),
( "M6", 8, 0, 0),
( "M7", 13, 0, 1),
( "M8", 15, 0, 0),
( "M9", 19, 0, 1),
("M11", 30, 0, 0),
("M12", 31, 0, 1),
("M13", 33, 4, 0),
("M15", 33, 3, 0),
( "N2", 0, 5, 1),
( "N3", 2, 0, 0),
( "N4", 3, 0, 0),
( "N5", 9, 0, 1),
( "N6", 12, 0, 1),
( "N7", 16, 0, 1),
( "N9", 20, 0, 0),
("N10", 22, 0, 1),
("N12", 31, 0, 0),
( "P1", 0, 4, 1),
( "P2", 2, 0, 1),
( "P4", 7, 0, 1),
( "P5", 10, 0, 1),
( "P6", 14, 0, 1),
( "P7", 17, 0, 1),
( "P8", 19, 0, 0),
( "P9", 22, 0, 0),
("P10", 23, 0, 0),
("P11", 25, 0, 0),
("P12", 29, 0, 1),
("P13", 27, 0, 0),
("P14", 33, 2, 1),
("P15", 33, 1, 1),
( "R1", 3, 0, 1),
( "R2", 4, 0, 1),
( "R3", 6, 0, 1),
( "R4", 8, 0, 1),
( "R5", 11, 0, 1),
( "R6", 15, 0, 1),
( "R9", 21, 0, 0),
("R10", 24, 0, 0),
("R11", 26, 0, 0),
("R12", 28, 0, 0),
("R14", 33, 2, 0),
("R15", 33, 1, 0),
],
"8k-cm81": [
( "A1", 2, 33, 1),
( "A2", 4, 33, 0),
( "A3", 6, 33, 0),
( "A4", 10, 33, 1),
( "A6", 23, 33, 0),
( "A7", 27, 33, 0),
( "A8", 28, 33, 1),
( "A9", 33, 4, 1),
( "B1", 0, 28, 1),
( "B2", 0, 30, 0),
( "B3", 5, 33, 1),
( "B4", 9, 33, 0),
( "B5", 21, 33, 1),
( "B6", 24, 33, 0),
( "B7", 25, 33, 1),
( "B8", 30, 33, 1),
( "B9", 33, 6, 1),
( "C1", 0, 28, 0),
( "C2", 0, 30, 1),
( "C3", 0, 23, 0),
( "C4", 16, 33, 1),
( "C5", 17, 33, 0),
( "C9", 33, 21, 1),
( "D1", 0, 20, 1),
( "D2", 0, 23, 1),
( "D3", 0, 17, 0),
( "D5", 8, 33, 1),
( "D6", 33, 4, 0),
( "D7", 33, 5, 0),
( "D8", 33, 17, 0),
( "D9", 33, 6, 0),
( "E1", 0, 20, 0),
( "E2", 0, 17, 1),
( "E3", 0, 16, 1),
( "E4", 0, 16, 0),
( "E5", 7, 33, 1),
( "E7", 33, 5, 1),
( "E8", 33, 16, 1),
( "F1", 0, 7, 1),
( "F3", 0, 7, 0),
( "F7", 31, 0, 1),
( "F8", 33, 3, 0),
( "G1", 0, 5, 0),
( "G2", 0, 3, 1),
( "G3", 0, 5, 1),
( "G4", 16, 0, 1),
( "G5", 29, 0, 0),
( "G6", 30, 0, 0),
( "G7", 31, 0, 0),
( "G8", 33, 3, 1),
( "G9", 33, 2, 1),
( "H1", 3, 0, 0),
( "H2", 0, 3, 0),
( "H4", 17, 0, 0),
( "H5", 29, 0, 1),
( "H7", 30, 0, 1),
( "H9", 33, 2, 0),
( "J1", 3, 0, 1),
( "J2", 4, 0, 0),
( "J3", 4, 0, 1),
( "J4", 11, 0, 0),
( "J8", 33, 1, 0),
( "J9", 33, 1, 1),
],
"8k-cm121": [
( "A1", 2, 33, 0),
( "A2", 3, 33, 1),
( "A3", 3, 33, 0),
( "A4", 9, 33, 0),
( "A5", 11, 33, 0),
( "A6", 11, 33, 1),
( "A7", 19, 33, 1),
( "A8", 20, 33, 1),
( "A9", 26, 33, 1),
("A10", 30, 33, 1),
("A11", 31, 33, 1),
( "B1", 0, 30, 1),
( "B2", 0, 30, 0),
( "B3", 4, 33, 0),
( "B4", 5, 33, 0),
( "B5", 10, 33, 1),
( "B6", 16, 33, 1),
( "B7", 17, 33, 0),
( "B8", 27, 33, 0),
( "B9", 28, 33, 1),
("B11", 33, 28, 0),
( "C1", 0, 25, 0),
( "C2", 0, 25, 1),
( "C3", 0, 27, 0),
( "C4", 0, 27, 1),
( "C7", 20, 33, 0),
( "C8", 26, 33, 0),
( "C9", 29, 33, 1),
("C11", 33, 27, 1),
( "D1", 0, 22, 0),
( "D2", 0, 21, 1),
( "D3", 0, 21, 0),
( "D5", 8, 33, 1),
( "D7", 25, 33, 0),
( "D9", 33, 21, 0),
("D10", 33, 24, 1),
("D11", 33, 23, 1),
( "E1", 0, 22, 1),
( "E2", 0, 20, 1),
( "E3", 0, 20, 0),
( "E8", 33, 20, 1),
( "E9", 33, 19, 1),
("E10", 33, 17, 0),
("E11", 33, 21, 1),
( "F1", 0, 18, 1),
( "F2", 0, 18, 0),
( "F3", 0, 17, 0),
( "F4", 0, 17, 1),
( "F9", 33, 15, 0),
("F10", 33, 14, 1),
("F11", 33, 16, 1),
( "G1", 0, 16, 1),
( "G2", 0, 16, 0),
( "G3", 0, 12, 1),
( "G8", 33, 5, 1),
( "G9", 33, 10, 1),
("G10", 33, 6, 1),
("G11", 33, 11, 0),
( "H1", 0, 11, 1),
( "H2", 0, 11, 0),
( "H3", 0, 12, 0),
( "H7", 20, 0, 1),
( "H9", 29, 0, 1),
("H10", 33, 4, 1),
("H11", 33, 6, 0),
( "J1", 0, 6, 1),
( "J2", 0, 4, 0),
( "J3", 4, 0, 1),
( "J4", 8, 0, 0),
( "J5", 15, 0, 0),
( "J7", 20, 0, 0),
( "J8", 22, 0, 1),
( "J9", 30, 0, 1),
("J10", 33, 5, 0),
("J11", 33, 3, 1),
( "K1", 0, 6, 0),
( "K2", 0, 4, 1),
( "K3", 7, 0, 1),
( "K4", 12, 0, 1),
( "K5", 15, 0, 1),
( "K6", 17, 0, 0),
( "K7", 21, 0, 1),
( "K9", 30, 0, 0),
("K10", 31, 0, 1),
("K11", 33, 4, 0),
( "L1", 4, 0, 0),
( "L2", 6, 0, 1),
( "L3", 11, 0, 1),
( "L4", 12, 0, 0),
( "L5", 16, 0, 1),
( "L7", 24, 0, 0),
( "L8", 29, 0, 0),
("L10", 31, 0, 0),
],
"8k-cm225": [
( "A1", 1, 33, 1),
( "A2", 3, 33, 1),
( "A5", 6, 33, 1),
( "A6", 11, 33, 0),
( "A7", 12, 33, 0),
( "A8", 17, 33, 1),
( "A9", 18, 33, 1),
("A10", 21, 33, 0),
("A11", 23, 33, 1),
("A15", 31, 33, 0),
( "B1", 0, 31, 0),
( "B2", 2, 33, 1),
( "B3", 4, 33, 1),
( "B4", 5, 33, 1),
( "B5", 7, 33, 1),
( "B6", 10, 33, 0),
( "B7", 14, 33, 0),
( "B8", 19, 33, 1),
( "B9", 18, 33, 0),
("B10", 22, 33, 0),
("B11", 23, 33, 0),
("B12", 25, 33, 1),
("B13", 27, 33, 1),
("B14", 31, 33, 1),
("B15", 33, 31, 0),
( "C1", 0, 28, 0),
( "C2", 0, 31, 1),
( "C3", 2, 33, 0),
( "C4", 3, 33, 0),
( "C5", 5, 33, 0),
( "C6", 13, 33, 0),
( "C7", 11, 33, 1),
( "C8", 19, 33, 0),
( "C9", 17, 33, 0),
("C10", 20, 33, 0),
("C11", 24, 33, 1),
("C12", 30, 33, 1),
("C13", 30, 33, 0),
("C14", 33, 30, 0),
( "D1", 0, 25, 0),
( "D2", 0, 24, 1),
( "D3", 0, 27, 0),
( "D4", 0, 30, 0),
( "D5", 4, 33, 0),
( "D6", 9, 33, 0),
( "D7", 10, 33, 1),
( "D8", 16, 33, 1),
( "D9", 26, 33, 1),
("D10", 25, 33, 0),
("D11", 28, 33, 1),
("D13", 33, 27, 1),
("D14", 33, 25, 0),
("D15", 33, 27, 0),
( "E2", 0, 24, 0),
( "E3", 0, 28, 1),
( "E4", 0, 30, 1),
( "E5", 0, 27, 1),
( "E6", 0, 25, 1),
( "E9", 26, 33, 0),
("E10", 27, 33, 0),
("E11", 29, 33, 1),
("E13", 33, 28, 0),
("E14", 33, 24, 0),
( "F1", 0, 20, 0),
( "F2", 0, 21, 0),
( "F3", 0, 21, 1),
( "F4", 0, 22, 0),
( "F5", 0, 22, 1),
( "F7", 8, 33, 1),
( "F9", 20, 33, 1),
("F11", 33, 24, 1),
("F12", 33, 23, 1),
("F13", 33, 23, 0),
("F14", 33, 21, 0),
("F15", 33, 22, 0),
( "G1", 0, 19, 0),
( "G2", 0, 20, 1),
( "G3", 0, 19, 1),
( "G4", 0, 17, 0),
( "G5", 0, 18, 1),
("G10", 33, 20, 1),
("G11", 33, 19, 1),
("G12", 33, 21, 1),
("G13", 33, 17, 0),
("G14", 33, 20, 0),
("G15", 33, 19, 0),
( "H1", 0, 16, 0),
( "H2", 0, 18, 0),
( "H3", 0, 14, 1),
( "H4", 0, 13, 1),
( "H5", 0, 16, 1),
( "H6", 0, 17, 1),
("H11", 33, 14, 1),
("H12", 33, 16, 1),
("H13", 33, 15, 1),
("H14", 33, 15, 0),
( "J1", 0, 13, 0),
( "J2", 0, 12, 0),
( "J3", 0, 14, 0),
( "J4", 0, 11, 1),
( "J5", 0, 12, 1),
("J10", 33, 5, 1),
("J11", 33, 10, 1),
("J12", 33, 6, 1),
("J13", 33, 11, 0),
("J14", 33, 14, 0),
("J15", 33, 13, 0),
( "K1", 0, 11, 0),
( "K3", 0, 9, 1),
( "K4", 0, 4, 0),
( "K5", 0, 6, 1),
( "K9", 20, 0, 1),
("K11", 29, 0, 0),
("K12", 33, 4, 1),
("K13", 33, 5, 0),
("K14", 33, 12, 0),
("K15", 33, 9, 0),
( "L1", 0, 9, 0),
( "L3", 0, 7, 1),
( "L4", 0, 3, 0),
( "L5", 4, 0, 0),
( "L6", 7, 0, 0),
( "L7", 12, 0, 0),
( "L9", 17, 0, 0),
("L10", 21, 0, 1),
("L11", 30, 0, 1),
("L12", 33, 3, 1),
("L13", 33, 6, 0),
("L14", 33, 7, 0),
( "M1", 0, 7, 0),
( "M2", 0, 6, 0),
( "M3", 0, 5, 0),
( "M4", 0, 3, 1),
( "M5", 6, 0, 0),
( "M6", 8, 0, 0),
( "M7", 13, 0, 1),
( "M8", 15, 0, 0),
( "M9", 19, 0, 1),
("M11", 30, 0, 0),
("M12", 31, 0, 1),
("M13", 33, 4, 0),
("M14", 33, 8, 0),
("M15", 33, 3, 0),
( "N2", 0, 5, 1),
( "N3", 2, 0, 0),
( "N4", 3, 0, 0),
( "N5", 9, 0, 1),
( "N6", 12, 0, 1),
( "N7", 16, 0, 1),
( "N9", 20, 0, 0),
("N10", 22, 0, 1),
("N12", 31, 0, 0),
( "P1", 0, 4, 1),
( "P2", 2, 0, 1),
( "P4", 7, 0, 1),
( "P5", 10, 0, 1),
( "P6", 14, 0, 1),
( "P7", 17, 0, 1),
( "P8", 19, 0, 0),
( "P9", 22, 0, 0),
("P10", 23, 0, 0),
("P11", 25, 0, 0),
("P12", 29, 0, 1),
("P13", 27, 0, 0),
("P14", 33, 2, 1),
("P15", 33, 1, 1),
( "R1", 3, 0, 1),
( "R2", 4, 0, 1),
( "R3", 6, 0, 1),
( "R4", 8, 0, 1),
( "R5", 11, 0, 1),
( "R6", 15, 0, 1),
( "R9", 21, 0, 0),
("R10", 24, 0, 0),
("R11", 26, 0, 0),
("R12", 28, 0, 0),
("R14", 33, 2, 0),
("R15", 33, 1, 0),
],
"8k-cb132": [
( "A1", 2, 33, 0),
( "A2", 3, 33, 0),
( "A3", 3, 33, 1),
( "A4", 5, 33, 0),
( "A5", 10, 33, 1),
( "A6", 16, 33, 1),
( "A7", 17, 33, 0),
("A10", 25, 33, 0),
("A11", 26, 33, 0),
("A12", 30, 33, 1),
( "B1", 0, 30, 1),
("B14", 33, 28, 0),
( "C1", 0, 30, 0),
( "C3", 0, 27, 1),
( "C4", 4, 33, 0),
( "C5", 8, 33, 1),
( "C6", 11, 33, 1),
( "C7", 14, 33, 1),
( "C9", 20, 33, 1),
("C10", 22, 33, 1),
("C11", 28, 33, 1),
("C12", 29, 33, 1),
("C14", 33, 24, 1),
( "D1", 0, 25, 1),
( "D3", 0, 27, 0),
( "D4", 0, 22, 1),
( "D5", 9, 33, 0),
( "D6", 11, 33, 0),
( "D7", 13, 33, 1),
( "D9", 21, 33, 1),
("D10", 27, 33, 0),
("D11", 26, 33, 1),
("D12", 33, 27, 1),
("D14", 33, 23, 1),
( "E1", 0, 25, 0),
( "E4", 0, 22, 0),
("E11", 33, 20, 1),
("E12", 33, 21, 0),
("E14", 33, 21, 1),
( "F3", 0, 21, 0),
( "F4", 0, 21, 1),
("F11", 33, 19, 1),
("F12", 33, 15, 0),
("F14", 33, 16, 1),
( "G1", 0, 17, 0),
( "G3", 0, 17, 1),
( "G4", 0, 20, 0),
("G11", 33, 14, 1),
("G12", 33, 11, 0),
("G14", 33, 17, 0),
( "H1", 0, 16, 1),
( "H3", 0, 16, 0),
( "H4", 0, 20, 1),
("H11", 33, 10, 1),
("H12", 33, 6, 1),
( "J1", 0, 18, 0),
( "J3", 0, 18, 1),
("J11", 33, 6, 0),
("J12", 33, 5, 1),
( "K3", 0, 11, 1),
( "K4", 0, 11, 0),
("K11", 33, 4, 1),
("K12", 33, 4, 0),
("K14", 33, 5, 0),
( "L1", 0, 6, 1),
( "L4", 12, 0, 0),
( "L5", 11, 0, 1),
( "L6", 15, 0, 0),
( "L8", 20, 0, 1),
( "L9", 29, 0, 0),
("L12", 33, 2, 0),
("L14", 33, 3, 1),
( "M1", 0, 6, 0),
( "M3", 8, 0, 0),
( "M4", 7, 0, 1),
( "M6", 14, 0, 1),
( "M7", 15, 0, 1),
( "M9", 22, 0, 1),
("M11", 30, 0, 0),
("M12", 33, 1, 0),
( "N1", 0, 4, 1),
("N14", 33, 2, 1),
( "P1", 0, 4, 0),
( "P2", 4, 0, 0),
( "P3", 5, 0, 1),
( "P4", 12, 0, 1),
( "P5", 13, 0, 0),
( "P7", 16, 0, 1),
( "P8", 17, 0, 0),
( "P9", 21, 0, 1),
("P10", 29, 0, 1),
("P11", 30, 0, 1),
("P12", 31, 0, 0),
("P13", 31, 0, 1),
("P14", 33, 1, 1),
],
"8k-ct256": [
( "A1", 4, 33, 1),
( "A2", 5, 33, 1),
( "A5", 8, 33, 0),
( "A6", 9, 33, 0),
( "A7", 12, 33, 0),
( "A9", 18, 33, 1),
("A10", 22, 33, 1),
("A11", 22, 33, 0),
("A15", 27, 33, 0),
("A16", 27, 33, 1),
( "B1", 0, 30, 0),
( "B2", 0, 31, 0),
( "B3", 3, 33, 0),
( "B4", 6, 33, 1),
( "B5", 7, 33, 1),
( "B6", 10, 33, 1),
( "B7", 11, 33, 0),
( "B8", 13, 33, 0),
( "B9", 16, 33, 0),
("B10", 24, 33, 0),
("B11", 23, 33, 1),
("B12", 24, 33, 1),
("B13", 26, 33, 1),
("B14", 30, 33, 0),
("B15", 31, 33, 0),
("B16", 33, 30, 0),
( "C1", 0, 28, 1),
( "C2", 0, 28, 0),
( "C3", 1, 33, 0),
( "C4", 3, 33, 1),
( "C5", 4, 33, 0),
( "C6", 10, 33, 0),
( "C7", 11, 33, 1),
( "C8", 17, 33, 0),
( "C9", 20, 33, 0),
("C10", 23, 33, 0),
("C11", 25, 33, 1),
("C12", 29, 33, 1),
("C13", 28, 33, 1),
("C14", 31, 33, 1),
("C16", 33, 28, 0),
( "D1", 0, 25, 0),
( "D2", 0, 27, 0),
( "D3", 1, 33, 1),
( "D4", 2, 33, 1),
( "D5", 5, 33, 0),
( "D6", 8, 33, 1),
( "D7", 9, 33, 1),
( "D8", 14, 33, 1),
( "D9", 19, 33, 0),
("D10", 20, 33, 1),
("D11", 25, 33, 0),
("D13", 30, 33, 1),
("D14", 33, 31, 0),
("D15", 33, 26, 0),
("D16", 33, 24, 0),
( "E2", 0, 23, 0),
( "E3", 0, 24, 0),
( "E4", 0, 31, 1),
( "E5", 2, 33, 0),
( "E6", 7, 33, 0),
( "E9", 19, 33, 1),
("E10", 26, 33, 0),
("E11", 29, 33, 0),
("E13", 33, 30, 1),
("E14", 33, 27, 1),
("E16", 33, 23, 0),
( "F1", 0, 20, 0),
( "F2", 0, 21, 0),
( "F3", 0, 22, 0),
( "F4", 0, 27, 1),
( "F5", 0, 30, 1),
( "F7", 16, 33, 1),
( "F9", 17, 33, 1),
("F11", 33, 26, 1),
("F12", 33, 25, 1),
("F13", 33, 28, 1),
("F14", 33, 25, 0),
("F15", 33, 22, 0),
("F16", 33, 21, 0),
( "G1", 0, 17, 0),
( "G2", 0, 19, 0),
( "G3", 0, 22, 1),
( "G4", 0, 24, 1),
( "G5", 0, 25, 1),
("G10", 33, 20, 1),
("G11", 33, 21, 1),
("G12", 33, 24, 1),
("G13", 33, 23, 1),
("G14", 33, 22, 1),
("G15", 33, 20, 0),
("G16", 33, 19, 0),
( "H1", 0, 16, 0),
( "H2", 0, 18, 0),
( "H3", 0, 21, 1),
( "H4", 0, 19, 1),
( "H5", 0, 23, 1),
( "H6", 0, 20, 1),
("H11", 33, 16, 1),
("H12", 33, 19, 1),
("H13", 33, 16, 0),
("H14", 33, 17, 1),
("H16", 33, 17, 0),
( "J1", 0, 14, 0),
( "J2", 0, 14, 1),
( "J3", 0, 16, 1),
( "J4", 0, 18, 1),
( "J5", 0, 17, 1),
("J10", 33, 7, 1),
("J11", 33, 9, 1),
("J12", 33, 14, 1),
("J13", 33, 15, 0),
("J14", 33, 13, 1),
("J15", 33, 11, 1),
("J16", 33, 15, 1),
( "K1", 0, 13, 1),
( "K3", 0, 13, 0),
( "K4", 0, 11, 1),
( "K5", 0, 9, 1),
( "K9", 17, 0, 0),
("K11", 29, 0, 0),
("K12", 33, 6, 1),
("K13", 33, 10, 1),
("K14", 33, 11, 0),
("K15", 33, 12, 0),
("K16", 33, 13, 0),
( "L1", 0, 12, 0),
( "L3", 0, 10, 0),
( "L4", 0, 12, 1),
( "L5", 0, 6, 1),
( "L6", 0, 10, 1),
( "L7", 0, 8, 1),
( "L9", 13, 0, 0),
("L10", 19, 0, 1),
("L11", 26, 0, 1),
("L12", 33, 4, 1),
("L13", 33, 5, 1),
("L14", 33, 6, 0),
("L16", 33, 10, 0),
( "M1", 0, 11, 0),
( "M2", 0, 9, 0),
( "M3", 0, 7, 0),
( "M4", 0, 5, 0),
( "M5", 0, 4, 0),
( "M6", 0, 7, 1),
( "M7", 8, 0, 0),
( "M8", 10, 0, 0),
( "M9", 16, 0, 0),
("M11", 23, 0, 1),
("M12", 27, 0, 1),
("M13", 33, 3, 1),
("M14", 33, 4, 0),
("M15", 33, 8, 0),
("M16", 33, 7, 0),
( "N2", 0, 8, 0),
( "N3", 0, 6, 0),
( "N4", 0, 3, 0),
( "N5", 4, 0, 0),
( "N6", 2, 0, 0),
( "N7", 9, 0, 0),
( "N9", 15, 0, 0),
("N10", 20, 0, 1),
("N12", 26, 0, 0),
("N16", 33, 5, 0),
( "P1", 0, 5, 1),
( "P2", 0, 4, 1),
( "P4", 3, 0, 0),
( "P5", 5, 0, 0),
( "P6", 9, 0, 1),
( "P7", 14, 0, 1),
( "P8", 12, 0, 0),
( "P9", 17, 0, 1),
("P10", 20, 0, 0),
("P11", 30, 0, 1),
("P12", 30, 0, 0),
("P13", 29, 0, 1),
("P14", 33, 2, 0),
("P15", 33, 2, 1),
("P16", 33, 3, 0),
( "R1", 0, 3, 1),
( "R2", 3, 0, 1),
( "R3", 5, 0, 1),
( "R4", 7, 0, 1),
( "R5", 6, 0, 0),
( "R6", 11, 0, 1),
( "R9", 16, 0, 1),
("R10", 19, 0, 0),
("R11", 31, 0, 0),
("R12", 31, 0, 1),
("R14", 33, 1, 0),
("R15", 33, 1, 1),
("R16", 28, 0, 0),
( "T1", 2, 0, 1),
( "T2", 4, 0, 1),
( "T3", 6, 0, 1),
( "T5", 10, 0, 1),
( "T6", 12, 0, 1),
( "T7", 13, 0, 1),
( "T8", 14, 0, 0),
( "T9", 15, 0, 1),
("T10", 21, 0, 0),
("T11", 21, 0, 1),
("T13", 24, 0, 0),
("T14", 23, 0, 0),
("T15", 22, 0, 1),
("T16", 27, 0, 0),
],
"384-qn32": [
( "1", 0, 7, 0),
( "2", 0, 7, 1),
( "5", 0, 5, 1),
( "6", 0, 5, 0),
( "7", 0, 4, 0),
( "8", 0, 4, 1),
( "12", 5, 0, 0),
( "13", 5, 0, 1),
( "14", 6, 0, 1),
( "15", 6, 0, 0),
( "18", 7, 4, 0),
( "19", 7, 4, 1),
( "20", 7, 5, 0),
( "22", 7, 6, 0),
( "23", 7, 6, 1),
( "26", 6, 9, 0),
( "27", 5, 9, 0),
( "29", 4, 9, 0),
( "30", 3, 9, 1),
( "31", 2, 9, 0),
( "32", 2, 9, 1),
],
"384-cm36": [
( "A1", 0, 7, 0),
( "A2", 2, 9, 1),
( "A3", 3, 9, 1),
( "B1", 0, 7, 1),
( "B3", 4, 9, 0),
( "B4", 7, 5, 0),
( "B5", 7, 5, 1),
( "B6", 7, 6, 0),
( "C1", 0, 5, 0),
( "C2", 0, 5, 1),
( "C3", 2, 9, 0),
( "C5", 7, 4, 1),
( "C6", 7, 6, 1),
( "D1", 0, 4, 1),
( "D5", 6, 0, 1),
( "D6", 7, 4, 0),
( "E1", 0, 4, 0),
( "E2", 3, 0, 1),
( "E3", 4, 0, 0),
( "E4", 5, 0, 0),
( "E5", 6, 0, 0),
( "E6", 7, 3, 1),
( "F2", 3, 0, 0),
( "F3", 4, 0, 1),
( "F5", 5, 0, 1),
],
"384-cm49": [
( "A1", 0, 7, 1),
( "A2", 2, 9, 1),
( "A3", 3, 9, 0),
( "A4", 4, 9, 1),
( "A5", 5, 9, 0),
( "A6", 6, 9, 0),
( "A7", 6, 9, 1),
( "B1", 0, 7, 0),
( "B2", 0, 6, 0),
( "B3", 2, 9, 0),
( "B4", 4, 9, 0),
( "C1", 0, 5, 1),
( "C2", 0, 6, 1),
( "C4", 3, 9, 1),
( "C5", 7, 6, 1),
( "C6", 7, 5, 1),
( "C7", 7, 6, 0),
( "D1", 0, 4, 0),
( "D2", 0, 5, 0),
( "D3", 0, 2, 0),
( "D4", 5, 9, 1),
( "D6", 7, 4, 1),
( "D7", 7, 5, 0),
( "E2", 0, 4, 1),
( "E6", 6, 0, 1),
( "E7", 7, 4, 0),
( "F1", 0, 2, 1),
( "F2", 0, 1, 0),
( "F3", 3, 0, 1),
( "F4", 4, 0, 0),
( "F5", 5, 0, 0),
( "F6", 6, 0, 0),
( "F7", 7, 3, 1),
( "G1", 0, 1, 1),
( "G3", 3, 0, 0),
( "G4", 4, 0, 1),
( "G6", 5, 0, 1),
],
"5k-sg48": [
( "2", 8, 0, 0),
( "3", 9, 0, 1),
( "4", 9, 0, 0),
( "6", 13, 0, 1),
( "9", 15, 0, 0),
( "10", 16, 0, 0),
( "11", 17, 0, 0),
( "12", 18, 0, 0),
( "13", 19, 0, 0),
( "14", 23, 0, 0),
( "15", 24, 0, 0),
( "16", 24, 0, 1),
( "17", 23, 0, 1),
( "18", 22, 0, 1),
( "19", 21, 0, 1),
( "20", 19, 0, 1),
( "21", 18, 0, 1),
( "23", 19, 31, 0),
( "25", 19, 31, 1),
( "26", 18, 31, 0),
( "27", 18, 31, 1),
( "28", 17, 31, 0),
( "31", 16, 31, 1),
( "32", 16, 31, 0),
( "34", 13, 31, 1),
( "35", 12, 31, 1),
( "36", 9, 31, 1),
( "37", 13, 31, 0),
( "38", 8, 31, 1),
( "39", 4, 31, 0),
( "40", 5, 31, 0),
( "41", 6, 31, 0),
( "42", 8, 31, 0),
( "43", 9, 31, 0),
( "44", 6, 0, 1),
( "45", 7, 0, 1),
( "46", 5, 0, 0),
( "47", 6, 0, 0),
( "48", 7, 0, 0),
],
"5k-uwg30": [
( "A1", 19, 31, 1),
( "A2", 19, 31, 0),
( "A4", 12, 31, 0),
( "A5", 4, 31, 0),
( "B1", 19, 0, 0),
( "B3", 12, 31, 1),
( "B5", 5, 31, 0),
( "C1", 24, 0, 1),
( "C3", 12, 0, 0),
( "C5", 6, 31, 0),
( "D1", 24, 0, 0),
( "D3", 13, 0, 0),
( "D5", 6, 0, 0),
( "E1", 23, 0, 1),
( "E3", 13, 0, 1),
( "E4", 9, 0, 1),
( "E5", 5, 0, 0),
( "F1", 23, 0, 0),
( "F2", 19, 0, 1),
( "F4", 12, 0, 1),
( "F5", 6, 0, 1),
]
}
# This database contains the locations of configuration bits of the DSP tiles
# The standard configuration is stored under the key "default". If it is necessary to
# override it for a certain DSP on a certain device use the key "{device}_{x}_{y}" where
# {x} and {y} are the location of the DSP0 tile of the DSP (NOT the tile the cbit is in).
# x and y are relative to the DSP0 tile.
dsp_config_db = {
"default" : {
"C_REG": (0, 0, "CBIT_0"),
"A_REG": (0, 0, "CBIT_1"),
"B_REG": (0, 0, "CBIT_2"),
"D_REG": (0, 0, "CBIT_3"),
"TOP_8x8_MULT_REG": (0, 0, "CBIT_4"),
"BOT_8x8_MULT_REG": (0, 0, "CBIT_5"),
"PIPELINE_16x16_MULT_REG1": (0, 0, "CBIT_6"),
"PIPELINE_16x16_MULT_REG2": (0, 0, "CBIT_7"),
"TOPOUTPUT_SELECT_0": (0, 1, "CBIT_0"),
"TOPOUTPUT_SELECT_1": (0, 1, "CBIT_1"),
"TOPADDSUB_LOWERINPUT_0": (0, 1, "CBIT_2"),
"TOPADDSUB_LOWERINPUT_1": (0, 1, "CBIT_3"),
"TOPADDSUB_UPPERINPUT": (0, 1, "CBIT_4"),
"TOPADDSUB_CARRYSELECT_0": (0, 1, "CBIT_5"),
"TOPADDSUB_CARRYSELECT_1": (0, 1, "CBIT_6"),
"BOTOUTPUT_SELECT_0": (0, 1, "CBIT_7"),
"BOTOUTPUT_SELECT_1": (0, 2, "CBIT_0"),
"BOTADDSUB_LOWERINPUT_0": (0, 2, "CBIT_1"),
"BOTADDSUB_LOWERINPUT_1": (0, 2, "CBIT_2"),
"BOTADDSUB_UPPERINPUT": (0, 2, "CBIT_3"),
"BOTADDSUB_CARRYSELECT_0": (0, 2, "CBIT_4"),
"BOTADDSUB_CARRYSELECT_1": (0, 2, "CBIT_5"),
"MODE_8x8": (0, 2, "CBIT_6"),
"A_SIGNED": (0, 2, "CBIT_7"),
"B_SIGNED": (0, 3, "CBIT_0")
},
"5k_0_15": {
"TOPOUTPUT_SELECT_1": (0, 4, "CBIT_3"),
"TOPADDSUB_LOWERINPUT_0": (0, 4, "CBIT_4"),
"TOPADDSUB_LOWERINPUT_1": (0, 4, "CBIT_5"),
"TOPADDSUB_UPPERINPUT": (0, 4, "CBIT_6")
}
}
# SPRAM data for UltraPlus devices, use icefuzz/tests/fuzz_spram.py
# to generate this
spram_db = {
"5k" : {
(0, 0, 1): {
"ADDRESS_0": (0, 2, "lutff_0/in_1"),
"ADDRESS_10": (0, 2, "lutff_2/in_0"),
"ADDRESS_11": (0, 2, "lutff_3/in_0"),
"ADDRESS_12": (0, 2, "lutff_4/in_0"),
"ADDRESS_13": (0, 2, "lutff_5/in_0"),
"ADDRESS_1": (0, 2, "lutff_1/in_1"),
"ADDRESS_2": (0, 2, "lutff_2/in_1"),
"ADDRESS_3": (0, 2, "lutff_3/in_1"),
"ADDRESS_4": (0, 2, "lutff_4/in_1"),
"ADDRESS_5": (0, 2, "lutff_5/in_1"),
"ADDRESS_6": (0, 2, "lutff_6/in_1"),
"ADDRESS_7": (0, 2, "lutff_7/in_1"),
"ADDRESS_8": (0, 2, "lutff_0/in_0"),
"ADDRESS_9": (0, 2, "lutff_1/in_0"),
"CHIPSELECT": (0, 3, "lutff_6/in_1"),
"CLOCK": (0, 1, "clk"),
"DATAIN_0": (0, 1, "lutff_0/in_3"),
"DATAIN_10": (0, 1, "lutff_2/in_1"),
"DATAIN_11": (0, 1, "lutff_3/in_1"),
"DATAIN_12": (0, 1, "lutff_4/in_1"),
"DATAIN_13": (0, 1, "lutff_5/in_1"),
"DATAIN_14": (0, 1, "lutff_6/in_1"),
"DATAIN_15": (0, 1, "lutff_7/in_1"),
"DATAIN_1": (0, 1, "lutff_1/in_3"),
"DATAIN_2": (0, 1, "lutff_2/in_3"),
"DATAIN_3": (0, 1, "lutff_3/in_3"),
"DATAIN_4": (0, 1, "lutff_4/in_3"),
"DATAIN_5": (0, 1, "lutff_5/in_3"),
"DATAIN_6": (0, 1, "lutff_6/in_3"),
"DATAIN_7": (0, 1, "lutff_7/in_3"),
"DATAIN_8": (0, 1, "lutff_0/in_1"),
"DATAIN_9": (0, 1, "lutff_1/in_1"),
"DATAOUT_0": (0, 1, "slf_op_0"),
"DATAOUT_10": (0, 2, "slf_op_2"),
"DATAOUT_11": (0, 2, "slf_op_3"),
"DATAOUT_12": (0, 2, "slf_op_4"),
"DATAOUT_13": (0, 2, "slf_op_5"),
"DATAOUT_14": (0, 2, "slf_op_6"),
"DATAOUT_15": (0, 2, "slf_op_7"),
"DATAOUT_1": (0, 1, "slf_op_1"),
"DATAOUT_2": (0, 1, "slf_op_2"),
"DATAOUT_3": (0, 1, "slf_op_3"),
"DATAOUT_4": (0, 1, "slf_op_4"),
"DATAOUT_5": (0, 1, "slf_op_5"),
"DATAOUT_6": (0, 1, "slf_op_6"),
"DATAOUT_7": (0, 1, "slf_op_7"),
"DATAOUT_8": (0, 2, "slf_op_0"),
"DATAOUT_9": (0, 2, "slf_op_1"),
"MASKWREN_0": (0, 3, "lutff_0/in_0"),
"MASKWREN_1": (0, 3, "lutff_1/in_0"),
"MASKWREN_2": (0, 3, "lutff_2/in_0"),
"MASKWREN_3": (0, 3, "lutff_3/in_0"),
"POWEROFF": (0, 4, "lutff_4/in_3"),
"SLEEP": (0, 4, "lutff_2/in_3"),
"SPRAM_EN": (0, 1, "CBIT_0"),
"STANDBY": (0, 4, "lutff_0/in_3"),
"WREN": (0, 3, "lutff_4/in_1"),
},
(0, 0, 2): {
"ADDRESS_0": (0, 2, "lutff_6/in_0"),
"ADDRESS_10": (0, 3, "lutff_0/in_1"),
"ADDRESS_11": (0, 3, "lutff_1/in_1"),
"ADDRESS_12": (0, 3, "lutff_2/in_1"),
"ADDRESS_13": (0, 3, "lutff_3/in_1"),
"ADDRESS_1": (0, 2, "lutff_7/in_0"),
"ADDRESS_2": (0, 3, "lutff_0/in_3"),
"ADDRESS_3": (0, 3, "lutff_1/in_3"),
"ADDRESS_4": (0, 3, "lutff_2/in_3"),
"ADDRESS_5": (0, 3, "lutff_3/in_3"),
"ADDRESS_6": (0, 3, "lutff_4/in_3"),
"ADDRESS_7": (0, 3, "lutff_5/in_3"),
"ADDRESS_8": (0, 3, "lutff_6/in_3"),
"ADDRESS_9": (0, 3, "lutff_7/in_3"),
"CHIPSELECT": (0, 3, "lutff_7/in_1"),
"CLOCK": (0, 2, "clk"),
"DATAIN_0": (0, 1, "lutff_0/in_0"),
"DATAIN_10": (0, 2, "lutff_2/in_3"),
"DATAIN_11": (0, 2, "lutff_3/in_3"),
"DATAIN_12": (0, 2, "lutff_4/in_3"),
"DATAIN_13": (0, 2, "lutff_5/in_3"),
"DATAIN_14": (0, 2, "lutff_6/in_3"),
"DATAIN_15": (0, 2, "lutff_7/in_3"),
"DATAIN_1": (0, 1, "lutff_1/in_0"),
"DATAIN_2": (0, 1, "lutff_2/in_0"),
"DATAIN_3": (0, 1, "lutff_3/in_0"),
"DATAIN_4": (0, 1, "lutff_4/in_0"),
"DATAIN_5": (0, 1, "lutff_5/in_0"),
"DATAIN_6": (0, 1, "lutff_6/in_0"),
"DATAIN_7": (0, 1, "lutff_7/in_0"),
"DATAIN_8": (0, 2, "lutff_0/in_3"),
"DATAIN_9": (0, 2, "lutff_1/in_3"),
"DATAOUT_0": (0, 3, "slf_op_0"),
"DATAOUT_10": (0, 4, "slf_op_2"),
"DATAOUT_11": (0, 4, "slf_op_3"),
"DATAOUT_12": (0, 4, "slf_op_4"),
"DATAOUT_13": (0, 4, "slf_op_5"),
"DATAOUT_14": (0, 4, "slf_op_6"),
"DATAOUT_15": (0, 4, "slf_op_7"),
"DATAOUT_1": (0, 3, "slf_op_1"),
"DATAOUT_2": (0, 3, "slf_op_2"),
"DATAOUT_3": (0, 3, "slf_op_3"),
"DATAOUT_4": (0, 3, "slf_op_4"),
"DATAOUT_5": (0, 3, "slf_op_5"),
"DATAOUT_6": (0, 3, "slf_op_6"),
"DATAOUT_7": (0, 3, "slf_op_7"),
"DATAOUT_8": (0, 4, "slf_op_0"),
"DATAOUT_9": (0, 4, "slf_op_1"),
"MASKWREN_0": (0, 3, "lutff_4/in_0"),
"MASKWREN_1": (0, 3, "lutff_5/in_0"),
"MASKWREN_2": (0, 3, "lutff_6/in_0"),
"MASKWREN_3": (0, 3, "lutff_7/in_0"),
"POWEROFF": (0, 4, "lutff_5/in_3"),
"SLEEP": (0, 4, "lutff_3/in_3"),
"SPRAM_EN": (0, 1, "CBIT_1"),
"STANDBY": (0, 4, "lutff_1/in_3"),
"WREN": (0, 3, "lutff_5/in_1"),
},
(25, 0, 3): {
"ADDRESS_0": (25, 2, "lutff_0/in_1"),
"ADDRESS_10": (25, 2, "lutff_2/in_0"),
"ADDRESS_11": (25, 2, "lutff_3/in_0"),
"ADDRESS_12": (25, 2, "lutff_4/in_0"),
"ADDRESS_13": (25, 2, "lutff_5/in_0"),
"ADDRESS_1": (25, 2, "lutff_1/in_1"),
"ADDRESS_2": (25, 2, "lutff_2/in_1"),
"ADDRESS_3": (25, 2, "lutff_3/in_1"),
"ADDRESS_4": (25, 2, "lutff_4/in_1"),
"ADDRESS_5": (25, 2, "lutff_5/in_1"),
"ADDRESS_6": (25, 2, "lutff_6/in_1"),
"ADDRESS_7": (25, 2, "lutff_7/in_1"),
"ADDRESS_8": (25, 2, "lutff_0/in_0"),
"ADDRESS_9": (25, 2, "lutff_1/in_0"),
"CHIPSELECT": (25, 3, "lutff_6/in_1"),
"CLOCK": (25, 1, "clk"),
"DATAIN_0": (25, 1, "lutff_0/in_3"),
"DATAIN_10": (25, 1, "lutff_2/in_1"),
"DATAIN_11": (25, 1, "lutff_3/in_1"),
"DATAIN_12": (25, 1, "lutff_4/in_1"),
"DATAIN_13": (25, 1, "lutff_5/in_1"),
"DATAIN_14": (25, 1, "lutff_6/in_1"),
"DATAIN_15": (25, 1, "lutff_7/in_1"),
"DATAIN_1": (25, 1, "lutff_1/in_3"),
"DATAIN_2": (25, 1, "lutff_2/in_3"),
"DATAIN_3": (25, 1, "lutff_3/in_3"),
"DATAIN_4": (25, 1, "lutff_4/in_3"),
"DATAIN_5": (25, 1, "lutff_5/in_3"),
"DATAIN_6": (25, 1, "lutff_6/in_3"),
"DATAIN_7": (25, 1, "lutff_7/in_3"),
"DATAIN_8": (25, 1, "lutff_0/in_1"),
"DATAIN_9": (25, 1, "lutff_1/in_1"),
"DATAOUT_0": (25, 1, "slf_op_0"),
"DATAOUT_10": (25, 2, "slf_op_2"),
"DATAOUT_11": (25, 2, "slf_op_3"),
"DATAOUT_12": (25, 2, "slf_op_4"),
"DATAOUT_13": (25, 2, "slf_op_5"),
"DATAOUT_14": (25, 2, "slf_op_6"),
"DATAOUT_15": (25, 2, "slf_op_7"),
"DATAOUT_1": (25, 1, "slf_op_1"),
"DATAOUT_2": (25, 1, "slf_op_2"),
"DATAOUT_3": (25, 1, "slf_op_3"),
"DATAOUT_4": (25, 1, "slf_op_4"),
"DATAOUT_5": (25, 1, "slf_op_5"),
"DATAOUT_6": (25, 1, "slf_op_6"),
"DATAOUT_7": (25, 1, "slf_op_7"),
"DATAOUT_8": (25, 2, "slf_op_0"),
"DATAOUT_9": (25, 2, "slf_op_1"),
"MASKWREN_0": (25, 3, "lutff_0/in_0"),
"MASKWREN_1": (25, 3, "lutff_1/in_0"),
"MASKWREN_2": (25, 3, "lutff_2/in_0"),
"MASKWREN_3": (25, 3, "lutff_3/in_0"),
"POWEROFF": (25, 4, "lutff_4/in_3"),
"SLEEP": (25, 4, "lutff_2/in_3"),
"SPRAM_EN": (25, 1, "CBIT_0"),
"STANDBY": (25, 4, "lutff_0/in_3"),
"WREN": (25, 3, "lutff_4/in_1"),
},
(25, 0, 4): {
"ADDRESS_0": (25, 2, "lutff_6/in_0"),
"ADDRESS_10": (25, 3, "lutff_0/in_1"),
"ADDRESS_11": (25, 3, "lutff_1/in_1"),
"ADDRESS_12": (25, 3, "lutff_2/in_1"),
"ADDRESS_13": (25, 3, "lutff_3/in_1"),
"ADDRESS_1": (25, 2, "lutff_7/in_0"),
"ADDRESS_2": (25, 3, "lutff_0/in_3"),
"ADDRESS_3": (25, 3, "lutff_1/in_3"),
"ADDRESS_4": (25, 3, "lutff_2/in_3"),
"ADDRESS_5": (25, 3, "lutff_3/in_3"),
"ADDRESS_6": (25, 3, "lutff_4/in_3"),
"ADDRESS_7": (25, 3, "lutff_5/in_3"),
"ADDRESS_8": (25, 3, "lutff_6/in_3"),
"ADDRESS_9": (25, 3, "lutff_7/in_3"),
"CHIPSELECT": (25, 3, "lutff_7/in_1"),
"CLOCK": (25, 2, "clk"),
"DATAIN_0": (25, 1, "lutff_0/in_0"),
"DATAIN_10": (25, 2, "lutff_2/in_3"),
"DATAIN_11": (25, 2, "lutff_3/in_3"),
"DATAIN_12": (25, 2, "lutff_4/in_3"),
"DATAIN_13": (25, 2, "lutff_5/in_3"),
"DATAIN_14": (25, 2, "lutff_6/in_3"),
"DATAIN_15": (25, 2, "lutff_7/in_3"),
"DATAIN_1": (25, 1, "lutff_1/in_0"),
"DATAIN_2": (25, 1, "lutff_2/in_0"),
"DATAIN_3": (25, 1, "lutff_3/in_0"),
"DATAIN_4": (25, 1, "lutff_4/in_0"),
"DATAIN_5": (25, 1, "lutff_5/in_0"),
"DATAIN_6": (25, 1, "lutff_6/in_0"),
"DATAIN_7": (25, 1, "lutff_7/in_0"),
"DATAIN_8": (25, 2, "lutff_0/in_3"),
"DATAIN_9": (25, 2, "lutff_1/in_3"),
"DATAOUT_0": (25, 3, "slf_op_0"),
"DATAOUT_10": (25, 4, "slf_op_2"),
"DATAOUT_11": (25, 4, "slf_op_3"),
"DATAOUT_12": (25, 4, "slf_op_4"),
"DATAOUT_13": (25, 4, "slf_op_5"),
"DATAOUT_14": (25, 4, "slf_op_6"),
"DATAOUT_15": (25, 4, "slf_op_7"),
"DATAOUT_1": (25, 3, "slf_op_1"),
"DATAOUT_2": (25, 3, "slf_op_2"),
"DATAOUT_3": (25, 3, "slf_op_3"),
"DATAOUT_4": (25, 3, "slf_op_4"),
"DATAOUT_5": (25, 3, "slf_op_5"),
"DATAOUT_6": (25, 3, "slf_op_6"),
"DATAOUT_7": (25, 3, "slf_op_7"),
"DATAOUT_8": (25, 4, "slf_op_0"),
"DATAOUT_9": (25, 4, "slf_op_1"),
"MASKWREN_0": (25, 3, "lutff_4/in_0"),
"MASKWREN_1": (25, 3, "lutff_5/in_0"),
"MASKWREN_2": (25, 3, "lutff_6/in_0"),
"MASKWREN_3": (25, 3, "lutff_7/in_0"),
"POWEROFF": (25, 4, "lutff_5/in_3"),
"SLEEP": (25, 4, "lutff_3/in_3"),
"SPRAM_EN": (25, 1, "CBIT_1"),
"STANDBY": (25, 4, "lutff_1/in_3"),
"WREN": (25, 3, "lutff_5/in_1"),
}
}
}
# This contains the data for extra cells not included
# in any previous databases
extra_cells_db = {
"5k" : {
("HFOSC", (0, 31, 1)) : {
"CLKHFPU": (0, 29, "lutff_0/in_1"),
"CLKHFEN": (0, 29, "lutff_7/in_3"),
"CLKHF": (0, 29, "glb_netwk_4"),
"CLKHF_FABRIC": (0, 28, "slf_op_7"),
"TRIM0": (25, 28, "lutff_4/in_0"),
"TRIM1": (25, 28, "lutff_5/in_0"),
"TRIM2": (25, 28, "lutff_6/in_0"),
"TRIM3": (25, 28, "lutff_7/in_0"),
"TRIM4": (25, 29, "lutff_0/in_3"),
"TRIM5": (25, 29, "lutff_1/in_3"),
"TRIM6": (25, 29, "lutff_2/in_3"),
"TRIM7": (25, 29, "lutff_3/in_3"),
"TRIM8": (25, 29, "lutff_4/in_3"),
"TRIM9": (25, 29, "lutff_5/in_3"),
"CLKHF_DIV_1": (0, 16, "CBIT_4"),
"CLKHF_DIV_0": (0, 16, "CBIT_3"),
"TRIM_EN": (0, 16, "CBIT_5")
},
("LFOSC", (25, 31, 1)) : {
"CLKLFPU": (25, 29, "lutff_0/in_1"),
"CLKLFEN": (25, 29, "lutff_7/in_3"),
"CLKLF": (25, 29, "glb_netwk_5"),
"CLKLF_FABRIC": (25, 29, "slf_op_0")
},
("RGBA_DRV", (0, 30, 0)) : {
"CURREN": (25, 29, "lutff_6/in_3"),
"RGBLEDEN": (0, 30, "lutff_1/in_1"),
"RGB0PWM": (0, 30, "lutff_2/in_1"),
"RGB1PWM": (0, 30, "lutff_3/in_1"),
"RGB2PWM": (0, 30, "lutff_4/in_1"),
"RGBA_DRV_EN": (0, 28, "CBIT_5"),
"RGB0_CURRENT_0": (0, 28, "CBIT_6"),
"RGB0_CURRENT_1": (0, 28, "CBIT_7"),
"RGB0_CURRENT_2": (0, 29, "CBIT_0"),
"RGB0_CURRENT_3": (0, 29, "CBIT_1"),
"RGB0_CURRENT_4": (0, 29, "CBIT_2"),
"RGB0_CURRENT_5": (0, 29, "CBIT_3"),
"RGB1_CURRENT_0": (0, 29, "CBIT_4"),
"RGB1_CURRENT_1": (0, 29, "CBIT_5"),
"RGB1_CURRENT_2": (0, 29, "CBIT_6"),
"RGB1_CURRENT_3": (0, 29, "CBIT_7"),
"RGB1_CURRENT_4": (0, 30, "CBIT_0"),
"RGB1_CURRENT_5": (0, 30, "CBIT_1"),
"RGB2_CURRENT_0": (0, 30, "CBIT_2"),
"RGB2_CURRENT_1": (0, 30, "CBIT_3"),
"RGB2_CURRENT_2": (0, 30, "CBIT_4"),
"RGB2_CURRENT_3": (0, 30, "CBIT_5"),
"RGB2_CURRENT_4": (0, 30, "CBIT_6"),
"RGB2_CURRENT_5": (0, 30, "CBIT_7"),
"CURRENT_MODE": (0, 28, "CBIT_4"),
},
("I2C", (0, 31, 0)): {
"I2CIRQ": (0, 30, "slf_op_7"),
"I2CWKUP": (0, 29, "slf_op_5"),
"I2C_ENABLE_0": (13, 31, "cbit2usealt_in_0"),
"I2C_ENABLE_1": (12, 31, "cbit2usealt_in_1"),
"SBACKO": (0, 30, "slf_op_6"),
"SBADRI0": (0, 30, "lutff_1/in_0"),
"SBADRI1": (0, 30, "lutff_2/in_0"),
"SBADRI2": (0, 30, "lutff_3/in_0"),
"SBADRI3": (0, 30, "lutff_4/in_0"),
"SBADRI4": (0, 30, "lutff_5/in_0"),
"SBADRI5": (0, 30, "lutff_6/in_0"),
"SBADRI6": (0, 30, "lutff_7/in_0"),
"SBADRI7": (0, 29, "lutff_2/in_0"),
"SBCLKI": (0, 30, "clk"),
"SBDATI0": (0, 29, "lutff_5/in_0"),
"SBDATI1": (0, 29, "lutff_6/in_0"),
"SBDATI2": (0, 29, "lutff_7/in_0"),
"SBDATI3": (0, 30, "lutff_0/in_3"),
"SBDATI4": (0, 30, "lutff_5/in_1"),
"SBDATI5": (0, 30, "lutff_6/in_1"),
"SBDATI6": (0, 30, "lutff_7/in_1"),
"SBDATI7": (0, 30, "lutff_0/in_0"),
"SBDATO0": (0, 29, "slf_op_6"),
"SBDATO1": (0, 29, "slf_op_7"),
"SBDATO2": (0, 30, "slf_op_0"),
"SBDATO3": (0, 30, "slf_op_1"),
"SBDATO4": (0, 30, "slf_op_2"),
"SBDATO5": (0, 30, "slf_op_3"),
"SBDATO6": (0, 30, "slf_op_4"),
"SBDATO7": (0, 30, "slf_op_5"),
"SBRWI": (0, 29, "lutff_4/in_0"),
"SBSTBI": (0, 29, "lutff_3/in_0"),
"SCLI": (0, 29, "lutff_2/in_1"),
"SCLO": (0, 29, "slf_op_3"),
"SCLOE": (0, 29, "slf_op_4"),
"SDAI": (0, 29, "lutff_1/in_1"),
"SDAO": (0, 29, "slf_op_1"),
"SDAOE": (0, 29, "slf_op_2"),
"SDA_INPUT_DELAYED": (12, 31, "SDA_input_delay"),
"SDA_OUTPUT_DELAYED": (12, 31, "SDA_output_delay"),
},
("I2C", (25, 31, 0)): {
"I2CIRQ": (25, 30, "slf_op_7"),
"I2CWKUP": (25, 29, "slf_op_5"),
"I2C_ENABLE_0": (19, 31, "cbit2usealt_in_0"),
"I2C_ENABLE_1": (19, 31, "cbit2usealt_in_1"),
"SBACKO": (25, 30, "slf_op_6"),
"SBADRI0": (25, 30, "lutff_1/in_0"),
"SBADRI1": (25, 30, "lutff_2/in_0"),
"SBADRI2": (25, 30, "lutff_3/in_0"),
"SBADRI3": (25, 30, "lutff_4/in_0"),
"SBADRI4": (25, 30, "lutff_5/in_0"),
"SBADRI5": (25, 30, "lutff_6/in_0"),
"SBADRI6": (25, 30, "lutff_7/in_0"),
"SBADRI7": (25, 29, "lutff_2/in_0"),
"SBCLKI": (25, 30, "clk"),
"SBDATI0": (25, 29, "lutff_5/in_0"),
"SBDATI1": (25, 29, "lutff_6/in_0"),
"SBDATI2": (25, 29, "lutff_7/in_0"),
"SBDATI3": (25, 30, "lutff_0/in_3"),
"SBDATI4": (25, 30, "lutff_5/in_1"),
"SBDATI5": (25, 30, "lutff_6/in_1"),
"SBDATI6": (25, 30, "lutff_7/in_1"),
"SBDATI7": (25, 30, "lutff_0/in_0"),
"SBDATO0": (25, 29, "slf_op_6"),
"SBDATO1": (25, 29, "slf_op_7"),
"SBDATO2": (25, 30, "slf_op_0"),
"SBDATO3": (25, 30, "slf_op_1"),
"SBDATO4": (25, 30, "slf_op_2"),
"SBDATO5": (25, 30, "slf_op_3"),
"SBDATO6": (25, 30, "slf_op_4"),
"SBDATO7": (25, 30, "slf_op_5"),
"SBRWI": (25, 29, "lutff_4/in_0"),
"SBSTBI": (25, 29, "lutff_3/in_0"),
"SCLI": (25, 29, "lutff_2/in_1"),
"SCLO": (25, 29, "slf_op_3"),
"SCLOE": (25, 29, "slf_op_4"),
"SDAI": (25, 29, "lutff_1/in_1"),
"SDAO": (25, 29, "slf_op_1"),
"SDAOE": (25, 29, "slf_op_2"),
"SDA_INPUT_DELAYED": (19, 31, "SDA_input_delay"),
"SDA_OUTPUT_DELAYED": (19, 31, "SDA_output_delay"),
},
("SPI", (0, 0, 0)): {
"MCSNO0": (0, 21, "slf_op_2"),
"MCSNO1": (0, 21, "slf_op_4"),
"MCSNO2": (0, 21, "slf_op_7"),
"MCSNO3": (0, 22, "slf_op_1"),
"MCSNOE0": (0, 21, "slf_op_3"),
"MCSNOE1": (0, 21, "slf_op_5"),
"MCSNOE2": (0, 22, "slf_op_0"),
"MCSNOE3": (0, 22, "slf_op_2"),
"MI": (0, 22, "lutff_0/in_1"),
"MO": (0, 20, "slf_op_6"),
"MOE": (0, 20, "slf_op_7"),
"SBACKO": (0, 20, "slf_op_1"),
"SBADRI0": (0, 19, "lutff_1/in_1"),
"SBADRI1": (0, 19, "lutff_2/in_1"),
"SBADRI2": (0, 20, "lutff_0/in_3"),
"SBADRI3": (0, 20, "lutff_1/in_3"),
"SBADRI4": (0, 20, "lutff_2/in_3"),
"SBADRI5": (0, 20, "lutff_3/in_3"),
"SBADRI6": (0, 20, "lutff_4/in_3"),
"SBADRI7": (0, 20, "lutff_5/in_3"),
"SBCLKI": (0, 20, "clk"),
"SBDATI0": (0, 19, "lutff_1/in_3"),
"SBDATI1": (0, 19, "lutff_2/in_3"),
"SBDATI2": (0, 19, "lutff_3/in_3"),
"SBDATI3": (0, 19, "lutff_4/in_3"),
"SBDATI4": (0, 19, "lutff_5/in_3"),
"SBDATI5": (0, 19, "lutff_6/in_3"),
"SBDATI6": (0, 19, "lutff_7/in_3"),
"SBDATI7": (0, 19, "lutff_0/in_1"),
"SBDATO0": (0, 19, "slf_op_1"),
"SBDATO1": (0, 19, "slf_op_2"),
"SBDATO2": (0, 19, "slf_op_3"),
"SBDATO3": (0, 19, "slf_op_4"),
"SBDATO4": (0, 19, "slf_op_5"),
"SBDATO5": (0, 19, "slf_op_6"),
"SBDATO6": (0, 19, "slf_op_7"),
"SBDATO7": (0, 20, "slf_op_0"),
"SBRWI": (0, 19, "lutff_0/in_3"),
"SBSTBI": (0, 20, "lutff_6/in_3"),
"SCKI": (0, 22, "lutff_1/in_1"),
"SCKO": (0, 21, "slf_op_0"),
"SCKOE": (0, 21, "slf_op_1"),
"SCSNI": (0, 22, "lutff_2/in_1"),
"SI": (0, 22, "lutff_7/in_3"),
"SO": (0, 20, "slf_op_4"),
"SOE": (0, 20, "slf_op_5"),
"SPIIRQ": (0, 20, "slf_op_2"),
"SPIWKUP": (0, 20, "slf_op_3"),
"SPI_ENABLE_0": (7, 0, "cbit2usealt_in_0"),
"SPI_ENABLE_1": (7, 0, "cbit2usealt_in_1"),
"SPI_ENABLE_2": (6, 0, "cbit2usealt_in_0"),
"SPI_ENABLE_3": (6, 0, "cbit2usealt_in_1"),
},
("SPI", (25, 0, 1)): {
"MCSNO0": (25, 21, "slf_op_2"),
"MCSNO1": (25, 21, "slf_op_4"),
"MCSNO2": (25, 21, "slf_op_7"),
"MCSNO3": (25, 22, "slf_op_1"),
"MCSNOE0": (25, 21, "slf_op_3"),
"MCSNOE1": (25, 21, "slf_op_5"),
"MCSNOE2": (25, 22, "slf_op_0"),
"MCSNOE3": (25, 22, "slf_op_2"),
"MI": (25, 22, "lutff_0/in_1"),
"MO": (25, 20, "slf_op_6"),
"MOE": (25, 20, "slf_op_7"),
"SBACKO": (25, 20, "slf_op_1"),
"SBADRI0": (25, 19, "lutff_1/in_1"),
"SBADRI1": (25, 19, "lutff_2/in_1"),
"SBADRI2": (25, 20, "lutff_0/in_3"),
"SBADRI3": (25, 20, "lutff_1/in_3"),
"SBADRI4": (25, 20, "lutff_2/in_3"),
"SBADRI5": (25, 20, "lutff_3/in_3"),
"SBADRI6": (25, 20, "lutff_4/in_3"),
"SBADRI7": (25, 20, "lutff_5/in_3"),
"SBCLKI": (25, 20, "clk"),
"SBDATI0": (25, 19, "lutff_1/in_3"),
"SBDATI1": (25, 19, "lutff_2/in_3"),
"SBDATI2": (25, 19, "lutff_3/in_3"),
"SBDATI3": (25, 19, "lutff_4/in_3"),
"SBDATI4": (25, 19, "lutff_5/in_3"),
"SBDATI5": (25, 19, "lutff_6/in_3"),
"SBDATI6": (25, 19, "lutff_7/in_3"),
"SBDATI7": (25, 19, "lutff_0/in_1"),
"SBDATO0": (25, 19, "slf_op_1"),
"SBDATO1": (25, 19, "slf_op_2"),
"SBDATO2": (25, 19, "slf_op_3"),
"SBDATO3": (25, 19, "slf_op_4"),
"SBDATO4": (25, 19, "slf_op_5"),
"SBDATO5": (25, 19, "slf_op_6"),
"SBDATO6": (25, 19, "slf_op_7"),
"SBDATO7": (25, 20, "slf_op_0"),
"SBRWI": (25, 19, "lutff_0/in_3"),
"SBSTBI": (25, 20, "lutff_6/in_3"),
"SCKI": (25, 22, "lutff_1/in_1"),
"SCKO": (25, 21, "slf_op_0"),
"SCKOE": (25, 21, "slf_op_1"),
"SCSNI": (25, 22, "lutff_2/in_1"),
"SI": (25, 22, "lutff_7/in_3"),
"SO": (25, 20, "slf_op_4"),
"SOE": (25, 20, "slf_op_5"),
"SPIIRQ": (25, 20, "slf_op_2"),
"SPIWKUP": (25, 20, "slf_op_3"),
"SPI_ENABLE_0": (23, 0, "cbit2usealt_in_0"),
"SPI_ENABLE_1": (24, 0, "cbit2usealt_in_0"),
"SPI_ENABLE_2": (23, 0, "cbit2usealt_in_1"),
"SPI_ENABLE_3": (24, 0, "cbit2usealt_in_1"),
},
("LEDDA_IP", (0, 31, 2)): {
"LEDDADDR0": (0, 28, "lutff_4/in_0"),
"LEDDADDR1": (0, 28, "lutff_5/in_0"),
"LEDDADDR2": (0, 28, "lutff_6/in_0"),
"LEDDADDR3": (0, 28, "lutff_7/in_0"),
"LEDDCLK": (0, 29, "clk"),
"LEDDCS": (0, 28, "lutff_2/in_0"),
"LEDDDAT0": (0, 28, "lutff_2/in_1"),
"LEDDDAT1": (0, 28, "lutff_3/in_1"),
"LEDDDAT2": (0, 28, "lutff_4/in_1"),
"LEDDDAT3": (0, 28, "lutff_5/in_1"),
"LEDDDAT4": (0, 28, "lutff_6/in_1"),
"LEDDDAT5": (0, 28, "lutff_7/in_1"),
"LEDDDAT6": (0, 28, "lutff_0/in_0"),
"LEDDDAT7": (0, 28, "lutff_1/in_0"),
"LEDDDEN": (0, 28, "lutff_1/in_1"),
"LEDDEXE": (0, 28, "lutff_0/in_1"),
"LEDDON": (0, 29, "slf_op_0"),
"PWMOUT0": (0, 28, "slf_op_4"),
"PWMOUT1": (0, 28, "slf_op_5"),
"PWMOUT2": (0, 28, "slf_op_6"),
},
}
}
iotile_full_db = parse_db(iceboxdb.database_io_txt)
logictile_db = parse_db(iceboxdb.database_logic_txt, "1k")
logictile_5k_db = parse_db(iceboxdb.database_logic_txt, "5k")
logictile_8k_db = parse_db(iceboxdb.database_logic_txt, "8k")
logictile_384_db = parse_db(iceboxdb.database_logic_txt, "384")
rambtile_db = parse_db(iceboxdb.database_ramb_txt, "1k")
ramttile_db = parse_db(iceboxdb.database_ramt_txt, "1k")
rambtile_5k_db = parse_db(iceboxdb.database_ramb_5k_txt, "5k")
ramttile_5k_db = parse_db(iceboxdb.database_ramt_5k_txt, "5k")
rambtile_8k_db = parse_db(iceboxdb.database_ramb_8k_txt, "8k")
ramttile_8k_db = parse_db(iceboxdb.database_ramt_8k_txt, "8k")
ipcon_5k_db = parse_db(iceboxdb.database_ipcon_5k_txt, "5k")
dsp0_5k_db = parse_db(iceboxdb.database_dsp0_5k_txt, "5k")
dsp1_5k_db = parse_db(iceboxdb.database_dsp1_5k_txt, "5k")
#This bit doesn't exist in DB because icecube won't ever set it,
#but it exists
dsp1_5k_db.append([["B4[7]"], "IpConfig", "CBIT_5"])
dsp2_5k_db = parse_db(iceboxdb.database_dsp2_5k_txt, "5k")
dsp3_5k_db = parse_db(iceboxdb.database_dsp3_5k_txt, "5k")
#Add missing LC_ bits to DSP and IPCon databases
for db_to_fix in [ipcon_5k_db, dsp0_5k_db, dsp1_5k_db, dsp2_5k_db, dsp3_5k_db]:
for entry in db_to_fix:
if len(entry) >= 2 and entry[1].startswith("LC_"):
for lentry in logictile_5k_db:
if len(lentry) >= 2 and lentry[1] == entry[1]:
entry[0] = lentry[0]
iotile_l_db = list()
iotile_r_db = list()
iotile_t_db = list()
iotile_b_db = list()
for entry in iotile_full_db:
if entry[1] == "buffer" and entry[2].startswith("IO_L."):
new_entry = entry[:]
new_entry[2] = new_entry[2][5:]
iotile_l_db.append(new_entry)
elif entry[1] == "buffer" and entry[2].startswith("IO_R."):
new_entry = entry[:]
new_entry[2] = new_entry[2][5:]
iotile_r_db.append(new_entry)
elif entry[1] == "buffer" and entry[2].startswith("IO_T."):
new_entry = entry[:]
new_entry[2] = new_entry[2][5:]
iotile_t_db.append(new_entry)
elif entry[1] == "buffer" and entry[2].startswith("IO_B."):
new_entry = entry[:]
new_entry[2] = new_entry[2][5:]
iotile_b_db.append(new_entry)
else:
iotile_l_db.append(entry)
iotile_r_db.append(entry)
iotile_t_db.append(entry)
iotile_b_db.append(entry)
logictile_db.append([["B1[49]"], "buffer", "carry_in", "carry_in_mux"])
logictile_db.append([["B1[50]"], "CarryInSet"])
logictile_5k_db.append([["B1[49]"], "buffer", "carry_in", "carry_in_mux"])
logictile_5k_db.append([["B1[50]"], "CarryInSet"])
logictile_8k_db.append([["B1[49]"], "buffer", "carry_in", "carry_in_mux"])
logictile_8k_db.append([["B1[50]"], "CarryInSet"])
logictile_384_db.append([["B1[49]"], "buffer", "carry_in", "carry_in_mux"])
logictile_384_db.append([["B1[50]"], "CarryInSet"])
# The 5k series has a couple of extra IO configuration bits. Add them in to a copy of the db here
iotile_t_5k_db = list(iotile_t_db)
iotile_t_5k_db.append([["B14[15]"], "IoCtrl", "padeb_test_1"])
iotile_t_5k_db.append([["B15[14]"], "IoCtrl", "padeb_test_0"])
iotile_t_5k_db.append([["B7[10]"], "IoCtrl", "cf_bit_32"])
iotile_t_5k_db.append([["B6[10]"], "IoCtrl", "cf_bit_33"])
iotile_t_5k_db.append([["B7[15]"], "IoCtrl", "cf_bit_34"])
iotile_t_5k_db.append([["B6[15]"], "IoCtrl", "cf_bit_35"])
iotile_t_5k_db.append([["B13[10]"], "IoCtrl", "cf_bit_36"])
iotile_t_5k_db.append([["B12[10]"], "IoCtrl", "cf_bit_37"])
iotile_t_5k_db.append([["B13[15]"], "IoCtrl", "cf_bit_38"])
iotile_t_5k_db.append([["B12[15]"], "IoCtrl", "cf_bit_39"])
iotile_t_5k_db.append([["B10[3]"], "IpConfig", "cbit2usealt_in_0"])
iotile_t_5k_db.append([["B12[2]"], "IpConfig", "cbit2usealt_in_1"])
iotile_t_5k_db.append([["B12[3]"], "IpConfig", "SDA_input_delay"])
iotile_t_5k_db.append([["B15[3]"], "IpConfig", "SDA_output_delay"])
iotile_b_5k_db = list(iotile_b_db)
iotile_b_5k_db.append([["B14[15]"], "IoCtrl", "padeb_test_1"])
iotile_b_5k_db.append([["B15[14]"], "IoCtrl", "padeb_test_0"])
iotile_b_5k_db.append([["B7[10]"], "IoCtrl", "cf_bit_32"])
iotile_b_5k_db.append([["B6[10]"], "IoCtrl", "cf_bit_33"])
iotile_b_5k_db.append([["B7[15]"], "IoCtrl", "cf_bit_34"])
iotile_b_5k_db.append([["B6[15]"], "IoCtrl", "cf_bit_35"])
iotile_b_5k_db.append([["B13[10]"], "IoCtrl", "cf_bit_36"])
iotile_b_5k_db.append([["B12[10]"], "IoCtrl", "cf_bit_37"])
iotile_b_5k_db.append([["B13[15]"], "IoCtrl", "cf_bit_38"])
iotile_b_5k_db.append([["B12[15]"], "IoCtrl", "cf_bit_39"])
iotile_b_5k_db.append([["B10[3]"], "IpConfig", "cbit2usealt_in_0"])
iotile_b_5k_db.append([["B12[2]"], "IpConfig", "cbit2usealt_in_1"])
iotile_b_5k_db.append([["B12[3]"], "IpConfig", "SDA_input_delay"])
iotile_b_5k_db.append([["B15[3]"], "IpConfig", "SDA_output_delay"])
for db in [iotile_l_db, iotile_r_db, iotile_t_db, iotile_b_db, iotile_t_5k_db, iotile_b_5k_db, logictile_db, logictile_5k_db, logictile_8k_db, logictile_384_db, rambtile_db, ramttile_db, rambtile_5k_db, ramttile_5k_db, rambtile_8k_db, ramttile_8k_db, dsp0_5k_db, dsp1_5k_db, dsp2_5k_db, dsp3_5k_db, ipcon_5k_db]:
for entry in db:
if entry[1] in ("buffer", "routing"):
entry[2] = netname_normalize(entry[2],
ramb=(db == rambtile_db),
ramt=(db == ramttile_db),
ramb_8k=(db in (rambtile_8k_db, rambtile_5k_db)),
ramt_8k=(db in (ramttile_8k_db, ramttile_5k_db)))
entry[3] = netname_normalize(entry[3],
ramb=(db == rambtile_db),
ramt=(db == ramttile_db),
ramb_8k=(db in (rambtile_8k_db, rambtile_5k_db)),
ramt_8k=(db in (ramttile_8k_db, ramttile_5k_db)))
unique_entries = dict()
while db:
entry = db.pop()
key = " ".join(entry[1:]) + str(entry)
unique_entries[key] = entry
for key in sorted(unique_entries):
db.append(unique_entries[key])
if __name__ == "__main__":
run_checks()
|
from cantools.web import respond, succeed, fail, cgi_get, getcache
from cantools.db import get, get_schema, get_page, edit
from cantools import config
def response():
action = cgi_get("action", choices=["db", "memcache", "pubsub"])
if cgi_get("pw") != config.admin.pw:
fail("wrong");
if action == "db":
key = cgi_get("key", required=False)
if key:
if key == "edit":
ent = edit(cgi_get("data"))
succeed({ "key": ent.id(), "label": ent.label() })
elif key == "delete":
get(cgi_get("data")).rm()
succeed()
succeed(get(key).data())
import model # load up all models
mname = cgi_get("modelName", required=False)
if mname:
succeed(get_page(mname, cgi_get("limit"), cgi_get("offset")))
succeed(get_schema())
elif action == "memcache":
succeed(getcache())
elif action == "pubsub":
succeed({ "host": config.pubsub.host, "port": config.pubsub.port })
respond(response)
/admin db page getter: support order and filter args
from cantools.web import respond, succeed, fail, cgi_get, getcache
from cantools.db import get, get_schema, get_page, edit
from cantools import config
def response():
action = cgi_get("action", choices=["db", "memcache", "pubsub"])
if cgi_get("pw") != config.admin.pw:
fail("wrong");
if action == "db":
key = cgi_get("key", required=False)
if key:
if key == "edit":
ent = edit(cgi_get("data"))
succeed({ "key": ent.id(), "label": ent.label() })
elif key == "delete":
get(cgi_get("data")).rm()
succeed()
succeed(get(key).data())
import model # load up all models
mname = cgi_get("modelName", required=False)
if mname:
succeed(get_page(mname, cgi_get("limit"), cgi_get("offset"),
cgi_get("order", default="index"),
cgi_get("filters", default=[])))
succeed(get_schema())
elif action == "memcache":
succeed(getcache())
elif action == "pubsub":
succeed({ "host": config.pubsub.host, "port": config.pubsub.port })
respond(response) |
"""
=====================================================
Distance computations (:mod:`scipy.spatial.distance`)
=====================================================
.. sectionauthor:: Damian Eads
Function Reference
------------------
Distance matrix computation from a collection of raw observation vectors
stored in a rectangular array.
.. autosummary::
:toctree: generated/
pdist -- pairwise distances between observation vectors.
cdist -- distances between two collections of observation vectors
squareform -- convert distance matrix to a condensed one and vice versa
directed_hausdorff -- directed Hausdorff distance between arrays
Predicates for checking the validity of distance matrices, both
condensed and redundant. Also contained in this module are functions
for computing the number of observations in a distance matrix.
.. autosummary::
:toctree: generated/
is_valid_dm -- checks for a valid distance matrix
is_valid_y -- checks for a valid condensed distance matrix
num_obs_dm -- # of observations in a distance matrix
num_obs_y -- # of observations in a condensed distance matrix
Distance functions between two numeric vectors ``u`` and ``v``. Computing
distances over a large collection of vectors is inefficient for these
functions. Use ``pdist`` for this purpose.
.. autosummary::
:toctree: generated/
braycurtis -- the Bray-Curtis distance.
canberra -- the Canberra distance.
chebyshev -- the Chebyshev distance.
cityblock -- the Manhattan distance.
correlation -- the Correlation distance.
cosine -- the Cosine distance.
euclidean -- the Euclidean distance.
jensenshannon -- the Jensen-Shannon distance.
mahalanobis -- the Mahalanobis distance.
minkowski -- the Minkowski distance.
seuclidean -- the normalized Euclidean distance.
sqeuclidean -- the squared Euclidean distance.
wminkowski -- (deprecated) alias of `minkowski`.
Distance functions between two boolean vectors (representing sets) ``u`` and
``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
computing the distances between all pairs.
.. autosummary::
:toctree: generated/
dice -- the Dice dissimilarity.
hamming -- the Hamming distance.
jaccard -- the Jaccard distance.
kulsinski -- the Kulsinski distance.
rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
russellrao -- the Russell-Rao dissimilarity.
sokalmichener -- the Sokal-Michener dissimilarity.
sokalsneath -- the Sokal-Sneath dissimilarity.
yule -- the Yule dissimilarity.
:func:`hamming` also operates over discrete numerical vectors.
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
from __future__ import division, print_function, absolute_import
__all__ = [
'braycurtis',
'canberra',
'cdist',
'chebyshev',
'cityblock',
'correlation',
'cosine',
'dice',
'directed_hausdorff',
'euclidean',
'hamming',
'is_valid_dm',
'is_valid_y',
'jaccard',
'jensenshannon',
'kulsinski',
'mahalanobis',
'matching',
'minkowski',
'num_obs_dm',
'num_obs_y',
'pdist',
'rogerstanimoto',
'russellrao',
'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'squareform',
'wminkowski',
'yule'
]
import warnings
import numpy as np
from functools import partial
from collections import namedtuple
from scipy._lib.six import callable, string_types
from scipy._lib.six import xrange
from scipy._lib._util import _asarray_validated
from . import _distance_wrap
from . import _hausdorff
from ..linalg import norm
from ..special import rel_entr
def _args_to_kwargs_xdist(args, kwargs, metric, func_name):
"""
Convert legacy positional arguments to keyword arguments for pdist/cdist.
"""
if not args:
return kwargs
if (callable(metric) and metric not in [
braycurtis, canberra, chebyshev, cityblock, correlation, cosine,
dice, euclidean, hamming, jaccard, jensenshannon, kulsinski,
mahalanobis, matching, minkowski, rogerstanimoto, russellrao,
seuclidean, sokalmichener, sokalsneath, sqeuclidean, yule,
wminkowski]):
raise TypeError('When using a custom metric arguments must be passed'
'as keyword (i.e., ARGNAME=ARGVALUE)')
if func_name == 'pdist':
old_arg_names = ['p', 'w', 'V', 'VI']
else:
old_arg_names = ['p', 'V', 'VI', 'w']
num_args = len(args)
warnings.warn('%d metric parameters have been passed as positional.'
'This will raise an error in a future version.'
'Please pass arguments as keywords(i.e., ARGNAME=ARGVALUE)'
% num_args, DeprecationWarning)
if num_args > 4:
raise ValueError('Deprecated %s signature accepts only 4'
'positional arguments (%s), %d given.'
% (func_name, ', '.join(old_arg_names), num_args))
for old_arg, arg in zip(old_arg_names, args):
if old_arg in kwargs:
raise TypeError('%s() got multiple values for argument %s'
% (func_name, old_arg))
kwargs[old_arg] = arg
return kwargs
def _copy_array_if_base_present(a):
"""Copy the array if its base points to a parent array."""
if a.base is not None:
return a.copy()
return a
def _correlation_cdist_wrap(XA, XB, dm, **kwargs):
XA = XA - XA.mean(axis=1, keepdims=True)
XB = XB - XB.mean(axis=1, keepdims=True)
_distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)
def _correlation_pdist_wrap(X, dm, **kwargs):
X2 = X - X.mean(axis=1, keepdims=True)
_distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)
def _convert_to_type(X, out_type):
return np.ascontiguousarray(X, dtype=out_type)
def _filter_deprecated_kwargs(kwargs, args_blacklist):
# Filtering out old default keywords
for k in args_blacklist:
if k in kwargs:
del kwargs[k]
warnings.warn('Got unexpected kwarg %s. This will raise an error'
' in a future version.' % k, DeprecationWarning)
def _nbool_correspond_all(u, v, w=None):
if u.dtype == v.dtype == bool and w is None:
not_u = ~u
not_v = ~v
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
not_u = 1.0 - u
not_v = 1.0 - v
if w is not None:
not_u = w * not_u
u = w * u
nff = (not_u * not_v).sum()
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
ntt = (u * v).sum()
return (nff, nft, ntf, ntt)
def _nbool_correspond_ft_tf(u, v, w=None):
if u.dtype == v.dtype == bool and w is None:
not_u = ~u
not_v = ~v
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
not_u = 1.0 - u
not_v = 1.0 - v
if w is not None:
not_u = w * not_u
u = w * u
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
return (nft, ntf)
def _validate_cdist_input(XA, XB, mA, mB, n, metric_name, **kwargs):
if metric_name is not None:
# get supported types
types = _METRICS[metric_name].types
# choose best type
typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]
# validate data
XA = _convert_to_type(XA, out_type=typ)
XB = _convert_to_type(XB, out_type=typ)
# validate kwargs
_validate_kwargs = _METRICS[metric_name].validator
if _validate_kwargs:
kwargs = _validate_kwargs(np.vstack([XA, XB]), mA + mB, n, **kwargs)
else:
typ = None
return XA, XB, typ, kwargs
def _validate_mahalanobis_kwargs(X, m, n, **kwargs):
VI = kwargs.pop('VI', None)
if VI is None:
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
CV = np.atleast_2d(np.cov(X.astype(np.double).T))
VI = np.linalg.inv(CV).T.copy()
kwargs["VI"] = _convert_to_double(VI)
return kwargs
def _validate_minkowski_kwargs(X, m, n, **kwargs):
if 'p' not in kwargs:
kwargs['p'] = 2.
return kwargs
def _validate_pdist_input(X, m, n, metric_name, **kwargs):
if metric_name is not None:
# get supported types
types = _METRICS[metric_name].types
# choose best type
typ = types[types.index(X.dtype)] if X.dtype in types else types[0]
# validate data
X = _convert_to_type(X, out_type=typ)
# validate kwargs
_validate_kwargs = _METRICS[metric_name].validator
if _validate_kwargs:
kwargs = _validate_kwargs(X, m, n, **kwargs)
else:
typ = None
return X, typ, kwargs
def _validate_seuclidean_kwargs(X, m, n, **kwargs):
V = kwargs.pop('V', None)
if V is None:
V = np.var(X.astype(np.double), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must '
'be one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the distances '
'are computed.')
kwargs['V'] = _convert_to_double(V)
return kwargs
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = np.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def _validate_weights(w, dtype=np.double):
w = _validate_vector(w, dtype=dtype)
if np.any(w < 0):
raise ValueError("Input weights should be all non-negative")
return w
def _validate_wminkowski_kwargs(X, m, n, **kwargs):
w = kwargs.pop('w', None)
if w is None:
raise ValueError('weighted minkowski requires a weight '
'vector `w` to be given.')
kwargs['w'] = _validate_weights(w)
if 'p' not in kwargs:
kwargs['p'] = 2.
return kwargs
def directed_hausdorff(u, v, seed=0):
"""
Compute the directed Hausdorff distance between two N-D arrays.
Distances between pairs are calculated using a Euclidean metric.
Parameters
----------
u : (M,N) ndarray
Input array.
v : (O,N) ndarray
Input array.
seed : int or None
Local `np.random.RandomState` seed. Default is 0, a random shuffling of
u and v that guarantees reproducibility.
Returns
-------
d : double
The directed Hausdorff distance between arrays `u` and `v`,
index_1 : int
index of point contributing to Hausdorff pair in `u`
index_2 : int
index of point contributing to Hausdorff pair in `v`
Notes
-----
Uses the early break technique and the random sampling approach
described by [1]_. Although worst-case performance is ``O(m * o)``
(as with the brute force algorithm), this is unlikely in practice
as the input data would have to require the algorithm to explore
every single point interaction, and after the algorithm shuffles
the input points at that. The best case performance is O(m), which
is satisfied by selecting an inner loop distance that is less than
cmax and leads to an early break as often as possible. The authors
have formally shown that the average runtime is closer to O(m).
.. versionadded:: 0.19.0
References
----------
.. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
calculating the exact Hausdorff distance." IEEE Transactions On
Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
2015.
See Also
--------
scipy.spatial.procrustes : Another similarity test for two data sets
Examples
--------
Find the directed Hausdorff distance between two 2-D arrays of
coordinates:
>>> from scipy.spatial.distance import directed_hausdorff
>>> u = np.array([(1.0, 0.0),
... (0.0, 1.0),
... (-1.0, 0.0),
... (0.0, -1.0)])
>>> v = np.array([(2.0, 0.0),
... (0.0, 2.0),
... (-2.0, 0.0),
... (0.0, -4.0)])
>>> directed_hausdorff(u, v)[0]
2.23606797749979
>>> directed_hausdorff(v, u)[0]
3.0
Find the general (symmetric) Hausdorff distance between two 2-D
arrays of coordinates:
>>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
3.0
Find the indices of the points that generate the Hausdorff distance
(the Hausdorff pair):
>>> directed_hausdorff(v, u)[1:]
(3, 3)
"""
u = np.asarray(u, dtype=np.float64, order='c')
v = np.asarray(v, dtype=np.float64, order='c')
result = _hausdorff.directed_hausdorff(u, v, seed)
return result
def minkowski(u, v, p=2, w=None):
"""
Compute the Minkowski distance between two 1-D arrays.
The Minkowski distance between 1-D arrays `u` and `v`,
is defined as
.. math::
{||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
\\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
minkowski : double
The Minkowski distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)
2.0
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)
1.4142135623730951
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)
1.2599210498948732
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)
1.0
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)
1.0
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)
1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if p < 1:
raise ValueError("p must be at least 1")
u_v = u - v
if w is not None:
w = _validate_weights(w)
if p == 1:
root_w = w
if p == 2:
# better precision and speed
root_w = np.sqrt(w)
else:
root_w = np.power(w, 1/p)
u_v = root_w * u_v
dist = norm(u_v, ord=p)
return dist
# `minkowski` gained weights in scipy 1.0. Once we're at say version 1.3,
# deprecated `wminkowski`. Not done at once because it would be annoying for
# downstream libraries that used `wminkowski` and support multiple scipy
# versions.
def wminkowski(u, v, p, w):
"""
Compute the weighted Minkowski distance between two 1-D arrays.
The weighted Minkowski distance between `u` and `v`, defined as
.. math::
\\left(\\sum{(|w_i (u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like
The weight vector.
Returns
-------
wminkowski : double
The weighted Minkowski distance between vectors `u` and `v`.
Notes
-----
`wminkowski` is DEPRECATED. It implements a definition where weights
are powered. It is recommended to use the weighted version of `minkowski`
instead. This function will be removed in a future version of scipy.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 1, np.ones(3))
2.0
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 2, np.ones(3))
1.4142135623730951
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 3, np.ones(3))
1.2599210498948732
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 1, np.ones(3))
1.0
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 2, np.ones(3))
1.0
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 3, np.ones(3))
1.0
"""
w = _validate_weights(w)
return minkowski(u, v, p=p, w=w**p)
def euclidean(u, v, w=None):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.euclidean([1, 0, 0], [0, 1, 0])
1.4142135623730951
>>> distance.euclidean([1, 1, 0], [0, 1, 0])
1.0
"""
return minkowski(u, v, p=2, w=w)
def sqeuclidean(u, v, w=None):
"""
Compute the squared Euclidean distance between two 1-D arrays.
The squared Euclidean distance between `u` and `v` is defined as
.. math::
{||u-v||}_2^2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sqeuclidean : double
The squared Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])
2.0
>>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])
1.0
"""
# Preserve float dtypes, but convert everything else to np.float64
# for stability.
utype, vtype = None, None
if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
utype = np.float64
if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
vtype = np.float64
u = _validate_vector(u, dtype=utype)
v = _validate_vector(v, dtype=vtype)
u_v = u - v
u_v_w = u_v # only want weights applied once
if w is not None:
w = _validate_weights(w)
u_v_w = w * u_v
return np.dot(u_v, u_v_w)
def correlation(u, v, w=None, centered=True):
"""
Compute the correlation distance between two 1-D arrays.
The correlation distance between `u` and `v`, is
defined as
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{u}` is the mean of the elements of `u`
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
correlation : double
The correlation distance between 1-D array `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
if centered:
umu = np.average(u, weights=w)
vmu = np.average(v, weights=w)
u = u - umu
v = v - vmu
uv = np.average(u * v, weights=w)
uu = np.average(np.square(u), weights=w)
vv = np.average(np.square(v), weights=w)
dist = 1.0 - uv / np.sqrt(uu * vv)
return dist
def cosine(u, v, w=None):
"""
Compute the Cosine distance between 1-D arrays.
The Cosine distance between `u` and `v`, is defined as
.. math::
1 - \\frac{u \\cdot v}
{||u||_2 ||v||_2}.
where :math:`u \\cdot v` is the dot product of :math:`u` and
:math:`v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
cosine : double
The Cosine distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.cosine([1, 0, 0], [0, 1, 0])
1.0
>>> distance.cosine([100, 0, 0], [0, 1, 0])
1.0
>>> distance.cosine([1, 1, 0], [0, 1, 0])
0.29289321881345254
"""
# cosine distance is also referred to as 'uncentered correlation',
# or 'reflective correlation'
return correlation(u, v, w=w, centered=False)
def hamming(u, v, w=None):
"""
Compute the Hamming distance between two 1-D arrays.
The Hamming distance between 1-D arrays `u` and `v`, is simply the
proportion of disagreeing components in `u` and `v`. If `u` and `v` are
boolean vectors, the Hamming distance is
.. math::
\\frac{c_{01} + c_{10}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
hamming : double
The Hamming distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.hamming([1, 0, 0], [0, 1, 0])
0.66666666666666663
>>> distance.hamming([1, 0, 0], [1, 1, 0])
0.33333333333333331
>>> distance.hamming([1, 0, 0], [2, 0, 0])
0.33333333333333331
>>> distance.hamming([1, 0, 0], [3, 0, 0])
0.33333333333333331
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.shape != v.shape:
raise ValueError('The 1d arrays must have equal lengths.')
u_ne_v = u != v
if w is not None:
w = _validate_weights(w)
return np.average(u_ne_v, weights=w)
def jaccard(u, v, w=None):
"""
Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT}}
{c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
jaccard : double
The Jaccard distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.jaccard([1, 0, 0], [0, 1, 0])
1.0
>>> distance.jaccard([1, 0, 0], [1, 1, 0])
0.5
>>> distance.jaccard([1, 0, 0], [1, 2, 0])
0.5
>>> distance.jaccard([1, 0, 0], [1, 1, 1])
0.66666666666666663
"""
u = _validate_vector(u)
v = _validate_vector(v)
nonzero = np.bitwise_or(u != 0, v != 0)
unequal_nonzero = np.bitwise_and((u != v), nonzero)
if w is not None:
w = _validate_weights(w)
nonzero = w * nonzero
unequal_nonzero = w * unequal_nonzero
dist = np.double(unequal_nonzero.sum()) / np.double(nonzero.sum())
return dist
def kulsinski(u, v, w=None):
"""
Compute the Kulsinski dissimilarity between two boolean 1-D arrays.
The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT} - c_{TT} + n}
{c_{FT} + c_{TF} + n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
kulsinski : double
The Kulsinski distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.kulsinski([1, 0, 0], [0, 1, 0])
1.0
>>> distance.kulsinski([1, 0, 0], [1, 1, 0])
0.75
>>> distance.kulsinski([1, 0, 0], [2, 1, 0])
0.33333333333333331
>>> distance.kulsinski([1, 0, 0], [3, 1, 0])
-0.5
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is None:
n = float(len(u))
else:
w = _validate_weights(w)
n = w.sum()
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return (ntf + nft - ntt + n) / (ntf + nft + n)
def seuclidean(u, v, V):
"""
Return the standardized Euclidean distance between two 1-D arrays.
The standardized Euclidean distance between `u` and `v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
V : (N,) array_like
`V` is an 1-D array of component variances. It is usually computed
among a larger collection vectors.
Returns
-------
seuclidean : double
The standardized Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])
4.4721359549995796
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])
3.3166247903553998
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])
3.1780497164141406
"""
u = _validate_vector(u)
v = _validate_vector(v)
V = _validate_vector(V, dtype=np.float64)
if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
raise TypeError('V must be a 1-D array of the same dimension '
'as u and v.')
return euclidean(u, v, w=1/V)
def cityblock(u, v, w=None):
"""
Compute the City Block (Manhattan) distance.
Computes the Manhattan distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
cityblock : double
The City Block (Manhattan) distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.cityblock([1, 0, 0], [0, 1, 0])
2
>>> distance.cityblock([1, 0, 0], [0, 2, 0])
3
>>> distance.cityblock([1, 0, 0], [1, 1, 0])
1
"""
u = _validate_vector(u)
v = _validate_vector(v)
l1_diff = abs(u - v)
if w is not None:
w = _validate_weights(w)
l1_diff = w * l1_diff
return l1_diff.sum()
def mahalanobis(u, v, VI):
"""
Compute the Mahalanobis distance between two 1-D arrays.
The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
.. math::
\\sqrt{ (u-v) V^{-1} (u-v)^T }
where ``V`` is the covariance matrix. Note that the argument `VI`
is the inverse of ``V``.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
VI : ndarray
The inverse of the covariance matrix.
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
>>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)
1.0
>>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)
1.0
>>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)
1.7320508075688772
"""
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = u - v
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m)
def chebyshev(u, v, w=None):
"""
Compute the Chebyshev distance.
Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\max_i {|u_i-v_i|}.
Parameters
----------
u : (N,) array_like
Input vector.
v : (N,) array_like
Input vector.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
chebyshev : double
The Chebyshev distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.chebyshev([1, 0, 0], [0, 1, 0])
1
>>> distance.chebyshev([1, 1, 0], [0, 1, 0])
1
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
has_weight = w > 0
if has_weight.sum() < w.size:
u = u[has_weight]
v = v[has_weight]
return max(abs(u - v))
def braycurtis(u, v, w=None):
"""
Compute the Bray-Curtis distance between two 1-D arrays.
Bray-Curtis distance is defined as
.. math::
\\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
The Bray-Curtis distance is in the range [0, 1] if all coordinates are
positive, and is undefined if the inputs are of length zero.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
braycurtis : double
The Bray-Curtis distance between 1-D arrays `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.braycurtis([1, 0, 0], [0, 1, 0])
1.0
>>> distance.braycurtis([1, 1, 0], [0, 1, 0])
0.33333333333333331
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
l1_diff = abs(u - v)
l1_sum = abs(u + v)
if w is not None:
w = _validate_weights(w)
l1_diff = w * l1_diff
l1_sum = w * l1_sum
return l1_diff.sum() / l1_sum.sum()
def canberra(u, v, w=None):
"""
Compute the Canberra distance between two 1-D arrays.
The Canberra distance is defined as
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
canberra : double
The Canberra distance between vectors `u` and `v`.
Notes
-----
When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
used in the calculation.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.canberra([1, 0, 0], [0, 1, 0])
2.0
>>> distance.canberra([1, 1, 0], [0, 1, 0])
1.0
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
if w is not None:
w = _validate_weights(w)
olderr = np.seterr(invalid='ignore')
try:
abs_uv = abs(u - v)
abs_u = abs(u)
abs_v = abs(v)
d = abs_uv / (abs_u + abs_v)
if w is not None:
d = w * d
d = np.nansum(d)
finally:
np.seterr(**olderr)
return d
def jensenshannon(p, q, base=None):
"""
Compute the Jensen-Shannon distance (metric) between
two 1-D probability arrays. This is a the square root
of the Jensen-Shannon divergence.
The Jensen-Shannon distance between two probability
vectors `p` and `q` is defined as,
.. math::
\\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
and :math:`D` is the Kullback-Leibler divergence.
This routine will normalize `p` and `q` if they don't sum to 1.0.
Parameters
----------
p : (N,) array_like
left probability vector
q : (N,) array_like
right probability vector
base : double, optional
the base of the logarithm used to compute the output
if not given, then the routine uses the default base of
scipy.stats.entropy.
Returns
-------
js : double
The Jensen-Shannon distance between `p` and `q`
Examples
--------
>>> from scipy.spatial import distance
>>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0)
0.8325546111576977
>>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5])
0.46450140402245893
>>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0])
0.0
"""
p = np.asarray(p)
q = np.asarray(q)
p = p / np.sum(p, axis=0)
q = q / np.sum(q, axis=0)
m = (p + q) / 2.0
left = rel_entr(p, m)
right = rel_entr(q, m)
js = np.sum(left, axis=0) + np.sum(right, axis=0)
if base is not None:
js /= np.log(base)
return np.sqrt(js / 2.0)
def yule(u, v, w=None):
"""
Compute the Yule dissimilarity between two boolean 1-D arrays.
The Yule dissimilarity is defined as
.. math::
\\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
yule : double
The Yule dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.yule([1, 0, 0], [0, 1, 0])
2.0
>>> distance.yule([1, 1, 0], [0, 1, 0])
0.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return float(2.0 * ntf * nft / np.array(ntt * nff + ntf * nft))
@np.deprecate(message="spatial.distance.matching is deprecated in scipy 1.0.0; "
"use spatial.distance.hamming instead.")
def matching(u, v, w=None):
"""
Compute the Hamming distance between two boolean 1-D arrays.
This is a deprecated synonym for :func:`hamming`.
"""
return hamming(u, v, w=w)
def dice(u, v, w=None):
"""
Compute the Dice dissimilarity between two boolean 1-D arrays.
The Dice dissimilarity between `u` and `v`, is
.. math::
\\frac{c_{TF} + c_{FT}}
{2c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) ndarray, bool
Input 1-D array.
v : (N,) ndarray, bool
Input 1-D array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
dice : double
The Dice dissimilarity between 1-D arrays `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.dice([1, 0, 0], [0, 1, 0])
1.0
>>> distance.dice([1, 0, 0], [1, 1, 0])
0.3333333333333333
>>> distance.dice([1, 0, 0], [2, 0, 0])
-0.3333333333333333
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
if w is None:
ntt = (u * v).sum()
else:
ntt = (u * v * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
def rogerstanimoto(u, v, w=None):
"""
Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{R}
{c_{TT} + c_{FF} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
rogerstanimoto : double
The Rogers-Tanimoto dissimilarity between vectors
`u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])
0.8
>>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])
0.5
>>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])
-1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
def russellrao(u, v, w=None):
"""
Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.
The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
`v`, is defined as
.. math::
\\frac{n - c_{TT}}
{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
russellrao : double
The Russell-Rao dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.russellrao([1, 0, 0], [0, 1, 0])
1.0
>>> distance.russellrao([1, 0, 0], [1, 1, 0])
0.6666666666666666
>>> distance.russellrao([1, 0, 0], [2, 0, 0])
0.3333333333333333
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
n = float(len(u))
elif w is None:
ntt = (u * v).sum()
n = float(len(u))
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
n = w.sum()
return float(n - ntt) / n
def sokalmichener(u, v, w=None):
"""
Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.
The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{R}
{S + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
:math:`S = c_{FF} + c_{TT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sokalmichener : double
The Sokal-Michener dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sokalmichener([1, 0, 0], [0, 1, 0])
0.8
>>> distance.sokalmichener([1, 0, 0], [1, 1, 0])
0.5
>>> distance.sokalmichener([1, 0, 0], [2, 0, 0])
-1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
nff = (~u & ~v).sum()
elif w is None:
ntt = (u * v).sum()
nff = ((1.0 - u) * (1.0 - v)).sum()
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
nff = ((1.0 - u) * (1.0 - v) * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
def sokalsneath(u, v, w=None):
"""
Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
The Sokal-Sneath dissimilarity between `u` and `v`,
.. math::
\\frac{R}
{c_{TT} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sokalsneath : double
The Sokal-Sneath dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sokalsneath([1, 0, 0], [0, 1, 0])
1.0
>>> distance.sokalsneath([1, 0, 0], [1, 1, 0])
0.66666666666666663
>>> distance.sokalsneath([1, 0, 0], [2, 1, 0])
0.0
>>> distance.sokalsneath([1, 0, 0], [3, 1, 0])
-2.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
elif w is None:
ntt = (u * v).sum()
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
denom = np.array(ntt + 2.0 * (ntf + nft))
if not denom.any():
raise ValueError('Sokal-Sneath dissimilarity is not defined for '
'vectors that are entirely false.')
return float(2.0 * (ntf + nft)) / denom
_convert_to_double = partial(_convert_to_type, out_type=np.double)
_convert_to_bool = partial(_convert_to_type, out_type=bool)
# adding python-only wrappers to _distance_wrap module
_distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap
_distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap
# Registry of implemented metrics:
# Dictionary with the following structure:
# {
# metric_name : MetricInfo(aka, types=[double], validator=None)
# }
#
# Where:
# `metric_name` must be equal to python metric name
#
# MetricInfo is a named tuple with fields:
# 'aka' : [list of aliases],
#
# 'validator': f(X, m, n, **kwargs) # function that check kwargs and
# # computes default values.
#
# 'types': [list of supported types], # X (pdist) and XA (cdist) are used to
# # choose the type. if there is no match
# # the first type is used. Default double
# }
MetricInfo = namedtuple("MetricInfo", 'aka types validator ')
MetricInfo.__new__.__defaults__ = (['double'], None)
_METRICS = {
'braycurtis': MetricInfo(aka=['braycurtis']),
'canberra': MetricInfo(aka=['canberra']),
'chebyshev': MetricInfo(aka=['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']),
'cityblock': MetricInfo(aka=['cityblock', 'cblock', 'cb', 'c']),
'correlation': MetricInfo(aka=['correlation', 'co']),
'cosine': MetricInfo(aka=['cosine', 'cos']),
'dice': MetricInfo(aka=['dice'], types=['bool']),
'euclidean': MetricInfo(aka=['euclidean', 'euclid', 'eu', 'e']),
'hamming': MetricInfo(aka=['matching', 'hamming', 'hamm', 'ha', 'h'],
types=['double', 'bool']),
'jaccard': MetricInfo(aka=['jaccard', 'jacc', 'ja', 'j'],
types=['double', 'bool']),
'jensenshannon': MetricInfo(aka=['jensenshannon', 'js'],
types=['double']),
'kulsinski': MetricInfo(aka=['kulsinski'], types=['bool']),
'mahalanobis': MetricInfo(aka=['mahalanobis', 'mahal', 'mah'],
validator=_validate_mahalanobis_kwargs),
'minkowski': MetricInfo(aka=['minkowski', 'mi', 'm', 'pnorm'],
validator=_validate_minkowski_kwargs),
'rogerstanimoto': MetricInfo(aka=['rogerstanimoto'], types=['bool']),
'russellrao': MetricInfo(aka=['russellrao'], types=['bool']),
'seuclidean': MetricInfo(aka=['seuclidean', 'se', 's'],
validator=_validate_seuclidean_kwargs),
'sokalmichener': MetricInfo(aka=['sokalmichener'], types=['bool']),
'sokalsneath': MetricInfo(aka=['sokalsneath'], types=['bool']),
'sqeuclidean': MetricInfo(aka=['sqeuclidean', 'sqe', 'sqeuclid']),
'wminkowski': MetricInfo(aka=['wminkowski', 'wmi', 'wm', 'wpnorm'],
validator=_validate_wminkowski_kwargs),
'yule': MetricInfo(aka=['yule'], types=['bool']),
}
_METRIC_ALIAS = dict((alias, name)
for name, info in _METRICS.items()
for alias in info.aka)
_METRICS_NAMES = list(_METRICS.keys())
_TEST_METRICS = {'test_' + name: globals()[name] for name in _METRICS.keys()}
def _select_weighted_metric(mstr, kwargs, out):
kwargs = dict(kwargs)
if "w" in kwargs and kwargs["w"] is None:
# w=None is the same as omitting it
kwargs.pop("w")
if mstr.startswith("test_") or mstr in _METRICS['wminkowski'].aka:
# These support weights
pass
elif "w" in kwargs:
if (mstr in _METRICS['seuclidean'].aka or
mstr in _METRICS['mahalanobis'].aka):
raise ValueError("metric %s incompatible with weights" % mstr)
# XXX: C-versions do not support weights
# need to use python version for weighting
kwargs['out'] = out
mstr = "test_%s" % mstr
return mstr, kwargs
def pdist(X, metric='euclidean', *args, **kwargs):
"""
Pairwise distances between observations in n-dimensional space.
See Notes for common calling conventions.
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : str or function, optional
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'jensenshannon', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
*args : tuple. Deprecated.
Additional arguments should be passed as keyword arguments
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(X, axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(X.T)).T
out : ndarray.
The output array
If not None, condensed distance matrix Y is stored in this array.
Note: metric independent, it will become a regular keyword arg in a
future scipy version
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the number
of original observations. The metric ``dist(u=X[i], v=X[j])``
is computed and stored in entry ``ij``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
{\\sum_i {|u_i+v_i|}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Synonym for 'hamming'.
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski', p=2, w=w)``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
"""
# You can also call this as:
# Y = pdist(X, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in X using the distance metric 'abc' but
# with a more succinct, verifiable, but less efficient implementation.
X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
check_finite=False)
kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "pdist")
X = np.asarray(X, order='c')
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
out = kwargs.pop("out", None)
if out is None:
dm = np.empty((m * (m - 1)) // 2, dtype=np.double)
else:
if out.shape != (m * (m - 1) // 2,):
raise ValueError("output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.double:
raise ValueError("Output array must be double type.")
dm = out
# compute blacklist for deprecated kwargs
if(metric in _METRICS['jensenshannon'].aka
or metric == 'test_jensenshannon' or metric == jensenshannon):
kwargs_blacklist = ["p", "w", "V", "VI"]
elif(metric in _METRICS['minkowski'].aka
or metric in _METRICS['wminkowski'].aka
or metric in ['test_minkowski', 'test_wminkowski']
or metric in [minkowski, wminkowski]):
kwargs_blacklist = ["V", "VI"]
elif(metric in _METRICS['seuclidean'].aka or
metric == 'test_seuclidean' or metric == seuclidean):
kwargs_blacklist = ["p", "w", "VI"]
elif(metric in _METRICS['mahalanobis'].aka
or metric == 'test_mahalanobis' or metric == mahalanobis):
kwargs_blacklist = ["p", "w", "V"]
else:
kwargs_blacklist = ["p", "V", "VI"]
_filter_deprecated_kwargs(kwargs, kwargs_blacklist)
if callable(metric):
mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
k = 0
for i in xrange(0, m - 1):
for j in xrange(i + 1, m):
dm[k] = metric(X[i], X[j], **kwargs)
k = k + 1
elif isinstance(metric, string_types):
mstr = metric.lower()
mstr, kwargs = _select_weighted_metric(mstr, kwargs, out)
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
# get pdist wrapper
pdist_fn = getattr(_distance_wrap,
"pdist_%s_%s_wrap" % (metric_name, typ))
pdist_fn(X, dm, **kwargs)
return dm
elif mstr in ['old_cosine', 'old_cos']:
warnings.warn('"old_cosine" is deprecated and will be removed in '
'a future version. Use "cosine" instead.',
DeprecationWarning)
X = _convert_to_double(X)
norms = np.einsum('ij,ij->i', X, X, dtype=np.double)
np.sqrt(norms, out=norms)
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[xrange(0, m), xrange(0, m)] = 0.0
dm = squareform(dm)
elif mstr.startswith("test_"):
if mstr in _TEST_METRICS:
dm = pdist(X, _TEST_METRICS[mstr], **kwargs)
else:
raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
def squareform(X, force="no", checks=True):
"""
Convert a vector-form distance vector to a square-form distance
matrix, and vice-versa.
Parameters
----------
X : ndarray
Either a condensed or redundant distance matrix.
force : str, optional
As with MATLAB(TM), if force is equal to ``'tovector'`` or
``'tomatrix'``, the input will be treated as a distance matrix or
distance vector respectively.
checks : bool, optional
If set to False, no checks will be made for matrix
symmetry nor zero diagonals. This is useful if it is known that
``X - X.T1`` is small and ``diag(X)`` is close to zero.
These values are ignored any way so they do not disrupt the
squareform transformation.
Returns
-------
Y : ndarray
If a condensed distance matrix is passed, a redundant one is
returned, or if a redundant one is passed, a condensed distance
matrix is returned.
Notes
-----
1. v = squareform(X)
Given a square d-by-d symmetric distance matrix X,
``v = squareform(X)`` returns a ``d * (d-1) / 2`` (or
:math:`{n \\choose 2}`) sized vector v.
:math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` is the distance
between points i and j. If X is non-square or asymmetric, an error
is returned.
2. X = squareform(v)
Given a ``d*(d-1)/2`` sized v for some integer ``d >= 2`` encoding
distances as described, ``X = squareform(v)`` returns a d by d distance
matrix X. The ``X[i, j]`` and ``X[j, i]`` values are set to
:math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` and all
diagonal elements are zero.
In Scipy 0.19.0, ``squareform`` stopped casting all input types to
float64, and started returning arrays of the same dtype as the input.
"""
X = np.ascontiguousarray(X)
s = X.shape
if force.lower() == 'tomatrix':
if len(s) != 1:
raise ValueError("Forcing 'tomatrix' but input X is not a "
"distance vector.")
elif force.lower() == 'tovector':
if len(s) != 2:
raise ValueError("Forcing 'tovector' but input X is not a "
"distance matrix.")
# X = squareform(v)
if len(s) == 1:
if s[0] == 0:
return np.zeros((1, 1), dtype=X.dtype)
# Grab the closest value to the square root of the number
# of elements times 2 to see if the number of elements
# is indeed a binomial coefficient.
d = int(np.ceil(np.sqrt(s[0] * 2)))
# Check that v is of valid dimensions.
if d * (d - 1) != s[0] * 2:
raise ValueError('Incompatible vector size. It must be a binomial '
'coefficient n choose 2 for some integer n >= 2.')
# Allocate memory for the distance matrix.
M = np.zeros((d, d), dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Fill in the values of the distance matrix.
_distance_wrap.to_squareform_from_vector_wrap(M, X)
# Return the distance matrix.
return M
elif len(s) == 2:
if s[0] != s[1]:
raise ValueError('The matrix argument must be square.')
if checks:
is_valid_dm(X, throw=True, name='X')
# One-side of the dimensions is set here.
d = s[0]
if d <= 1:
return np.array([], dtype=X.dtype)
# Create a vector.
v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Convert the vector to squareform.
_distance_wrap.to_vector_from_squareform_wrap(X, v)
return v
else:
raise ValueError(('The first argument must be one or two dimensional '
'array. A %d-dimensional array is not '
'permitted') % len(s))
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Return True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays.
They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def is_valid_y(y, warning=False, throw=False, name=None):
"""
Return True if the input array is a valid condensed distance matrix.
Condensed distance matrices must be 1-dimensional numpy arrays.
Their length must be a binomial coefficient :math:`{n \\choose 2}`
for some positive integer n.
Parameters
----------
y : ndarray
The condensed distance matrix.
warning : bool, optional
Invokes a warning if the variable passed is not a valid
condensed distance matrix. The warning message explains why
the distance matrix is not valid. `name` is used when
referencing the offending variable.
throw : bool, optional
Throws an exception if the variable passed is not a valid
condensed distance matrix.
name : bool, optional
Used when referencing the offending variable in the
warning or exception message.
"""
y = np.asarray(y, order='c')
valid = True
try:
if len(y.shape) != 1:
if name:
raise ValueError(('Condensed distance matrix \'%s\' must '
'have shape=1 (i.e. be one-dimensional).')
% name)
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(('Length n of condensed distance matrix '
'\'%s\' must be a binomial coefficient, i.e.'
'there must be a k such that '
'(k \\choose 2)=n)!') % name)
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \\choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def num_obs_dm(d):
"""
Return the number of original observations that correspond to a
square, redundant distance matrix.
Parameters
----------
d : ndarray
The target distance matrix.
Returns
-------
num_obs_dm : int
The number of observations in the redundant distance matrix.
"""
d = np.asarray(d, order='c')
is_valid_dm(d, tol=np.inf, throw=True, name='d')
return d.shape[0]
def num_obs_y(Y):
"""
Return the number of original observations that correspond to a
condensed distance matrix.
Parameters
----------
Y : ndarray
Condensed distance matrix.
Returns
-------
n : int
The number of observations in the condensed distance matrix `Y`.
"""
Y = np.asarray(Y, order='c')
is_valid_y(Y, throw=True, name='Y')
k = Y.shape[0]
if k == 0:
raise ValueError("The number of observations cannot be determined on "
"an empty distance matrix.")
d = int(np.ceil(np.sqrt(k * 2)))
if (d * (d - 1) / 2) != k:
raise ValueError("Invalid condensed distance matrix passed. Must be "
"some k where k=(n choose 2) for some n >= 2.")
return d
def cdist(XA, XB, metric='euclidean', *args, **kwargs):
"""
Compute distance between each pair of the two collections of inputs.
See Notes for common calling conventions.
Parameters
----------
XA : ndarray
An :math:`m_A` by :math:`n` array of :math:`m_A`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
XB : ndarray
An :math:`m_B` by :math:`n` array of :math:`m_B`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
metric : str or callable, optional
The distance metric to use. If a string, the distance function can be
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',
'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'wminkowski', 'yule'.
*args : tuple. Deprecated.
Additional arguments should be passed as keyword arguments
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(vstack([XA, XB]), axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(vstack([XA, XB].T))).T
out : ndarray
The output array
If not None, the distance matrix Y is stored in this array.
Note: metric independent, it will become a regular keyword arg in a
future scipy version
Returns
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix is returned.
For each :math:`i` and :math:`j`, the metric
``dist(u=XA[i], v=XB[j])`` is computed and stored in the
:math:`ij` th entry.
Raises
------
ValueError
An exception is thrown if `XA` and `XB` do not have
the same number of columns.
Notes
-----
The following are common calling conventions:
1. ``Y = cdist(XA, XB, 'euclidean')``
Computes the distance between :math:`m` points using
Euclidean distance (2-norm) as the distance metric between the
points. The points are arranged as :math:`m`
:math:`n`-dimensional row vectors in the matrix X.
2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
3. ``Y = cdist(XA, XB, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = cdist(XA, XB, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = cdist(XA, XB, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = cdist(XA, XB, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = cdist(XA, XB, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}.
11. ``Y = cdist(XA, XB, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
12. ``Y = cdist(XA, XB, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
{\\sum_i (|u_i+v_i|)}
13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = cdist(XA, XB, 'yule')``
Computes the Yule distance between the boolean
vectors. (see `yule` function documentation)
15. ``Y = cdist(XA, XB, 'matching')``
Synonym for 'hamming'.
16. ``Y = cdist(XA, XB, 'dice')``
Computes the Dice distance between the boolean vectors. (see
`dice` function documentation)
17. ``Y = cdist(XA, XB, 'kulsinski')``
Computes the Kulsinski distance between the boolean
vectors. (see `kulsinski` function documentation)
18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between the boolean
vectors. (see `rogerstanimoto` function documentation)
19. ``Y = cdist(XA, XB, 'russellrao')``
Computes the Russell-Rao distance between the boolean
vectors. (see `russellrao` function documentation)
20. ``Y = cdist(XA, XB, 'sokalmichener')``
Computes the Sokal-Michener distance between the boolean
vectors. (see `sokalmichener` function documentation)
21. ``Y = cdist(XA, XB, 'sokalsneath')``
Computes the Sokal-Sneath distance between the vectors. (see
`sokalsneath` function documentation)
22. ``Y = cdist(XA, XB, 'wminkowski', p=2., w=w)``
Computes the weighted Minkowski distance between the
vectors. (see `wminkowski` function documentation)
23. ``Y = cdist(XA, XB, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = cdist(XA, XB, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function `sokalsneath`. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax::
dm = cdist(XA, XB, 'sokalsneath')
Examples
--------
Find the Euclidean distances between four 2-D coordinates:
>>> from scipy.spatial import distance
>>> coords = [(35.0456, -85.2672),
... (35.1174, -89.9711),
... (35.9728, -83.9422),
... (36.1667, -86.7833)]
>>> distance.cdist(coords, coords, 'euclidean')
array([[ 0. , 4.7044, 1.6172, 1.8856],
[ 4.7044, 0. , 6.0893, 3.3561],
[ 1.6172, 6.0893, 0. , 2.8477],
[ 1.8856, 3.3561, 2.8477, 0. ]])
Find the Manhattan distance from a 3-D point to the corners of the unit
cube:
>>> a = np.array([[0, 0, 0],
... [0, 0, 1],
... [0, 1, 0],
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 1],
... [1, 1, 0],
... [1, 1, 1]])
>>> b = np.array([[ 0.1, 0.2, 0.4]])
>>> distance.cdist(a, b, 'cityblock')
array([[ 0.7],
[ 0.9],
[ 1.3],
[ 1.5],
[ 1.5],
[ 1.7],
[ 2.1],
[ 2.3]])
"""
# You can also call this as:
# Y = cdist(XA, XB, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in XA and XB using the distance metric 'abc'
# but with a more succinct, verifiable, but less efficient implementation.
kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "cdist")
XA = np.asarray(XA, order='c')
XB = np.asarray(XB, order='c')
s = XA.shape
sB = XB.shape
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
out = kwargs.pop("out", None)
if out is None:
dm = np.empty((mA, mB), dtype=np.double)
else:
if out.shape != (mA, mB):
raise ValueError("Output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.double:
raise ValueError("Output array must be double type.")
dm = out
# compute blacklist for deprecated kwargs
if(metric in _METRICS['minkowski'].aka or
metric in _METRICS['wminkowski'].aka or
metric in ['test_minkowski', 'test_wminkowski'] or
metric in [minkowski, wminkowski]):
kwargs_blacklist = ["V", "VI"]
elif(metric in _METRICS['seuclidean'].aka or
metric == 'test_seuclidean' or metric == seuclidean):
kwargs_blacklist = ["p", "w", "VI"]
elif(metric in _METRICS['mahalanobis'].aka or
metric == 'test_mahalanobis' or metric == mahalanobis):
kwargs_blacklist = ["p", "w", "V"]
else:
kwargs_blacklist = ["p", "V", "VI"]
_filter_deprecated_kwargs(kwargs, kwargs_blacklist)
if callable(metric):
mstr = getattr(metric, '__name__', 'Unknown')
metric_name = _METRIC_ALIAS.get(mstr, None)
XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n,
metric_name, **kwargs)
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = metric(XA[i], XB[j], **kwargs)
elif isinstance(metric, string_types):
mstr = metric.lower()
mstr, kwargs = _select_weighted_metric(mstr, kwargs, out)
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n,
metric_name, **kwargs)
# get cdist wrapper
cdist_fn = getattr(_distance_wrap,
"cdist_%s_%s_wrap" % (metric_name, typ))
cdist_fn(XA, XB, dm, **kwargs)
return dm
elif mstr.startswith("test_"):
if mstr in _TEST_METRICS:
dm = cdist(XA, XB, _TEST_METRICS[mstr], **kwargs)
else:
raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
whoops
"""
=====================================================
Distance computations (:mod:`scipy.spatial.distance`)
=====================================================
.. sectionauthor:: Damian Eads
Function Reference
------------------
Distance matrix computation from a collection of raw observation vectors
stored in a rectangular array.
.. autosummary::
:toctree: generated/
pdist -- pairwise distances between observation vectors.
cdist -- distances between two collections of observation vectors
squareform -- convert distance matrix to a condensed one and vice versa
directed_hausdorff -- directed Hausdorff distance between arrays
Predicates for checking the validity of distance matrices, both
condensed and redundant. Also contained in this module are functions
for computing the number of observations in a distance matrix.
.. autosummary::
:toctree: generated/
is_valid_dm -- checks for a valid distance matrix
is_valid_y -- checks for a valid condensed distance matrix
num_obs_dm -- # of observations in a distance matrix
num_obs_y -- # of observations in a condensed distance matrix
Distance functions between two numeric vectors ``u`` and ``v``. Computing
distances over a large collection of vectors is inefficient for these
functions. Use ``pdist`` for this purpose.
.. autosummary::
:toctree: generated/
braycurtis -- the Bray-Curtis distance.
canberra -- the Canberra distance.
chebyshev -- the Chebyshev distance.
cityblock -- the Manhattan distance.
correlation -- the Correlation distance.
cosine -- the Cosine distance.
euclidean -- the Euclidean distance.
jensenshannon -- the Jensen-Shannon distance.
mahalanobis -- the Mahalanobis distance.
minkowski -- the Minkowski distance.
seuclidean -- the normalized Euclidean distance.
sqeuclidean -- the squared Euclidean distance.
wminkowski -- (deprecated) alias of `minkowski`.
Distance functions between two boolean vectors (representing sets) ``u`` and
``v``. As in the case of numerical vectors, ``pdist`` is more efficient for
computing the distances between all pairs.
.. autosummary::
:toctree: generated/
dice -- the Dice dissimilarity.
hamming -- the Hamming distance.
jaccard -- the Jaccard distance.
kulsinski -- the Kulsinski distance.
rogerstanimoto -- the Rogers-Tanimoto dissimilarity.
russellrao -- the Russell-Rao dissimilarity.
sokalmichener -- the Sokal-Michener dissimilarity.
sokalsneath -- the Sokal-Sneath dissimilarity.
yule -- the Yule dissimilarity.
:func:`hamming` also operates over discrete numerical vectors.
"""
# Copyright (C) Damian Eads, 2007-2008. New BSD License.
from __future__ import division, print_function, absolute_import
__all__ = [
'braycurtis',
'canberra',
'cdist',
'chebyshev',
'cityblock',
'correlation',
'cosine',
'dice',
'directed_hausdorff',
'euclidean',
'hamming',
'is_valid_dm',
'is_valid_y',
'jaccard',
'jensenshannon',
'kulsinski',
'mahalanobis',
'matching',
'minkowski',
'num_obs_dm',
'num_obs_y',
'pdist',
'rogerstanimoto',
'russellrao',
'seuclidean',
'sokalmichener',
'sokalsneath',
'sqeuclidean',
'squareform',
'wminkowski',
'yule'
]
import warnings
import numpy as np
from functools import partial
from collections import namedtuple
from scipy._lib.six import callable, string_types
from scipy._lib.six import xrange
from scipy._lib._util import _asarray_validated
from . import _distance_wrap
from . import _hausdorff
from ..linalg import norm
from ..special import rel_entr
def _args_to_kwargs_xdist(args, kwargs, metric, func_name):
"""
Convert legacy positional arguments to keyword arguments for pdist/cdist.
"""
if not args:
return kwargs
if (callable(metric) and metric not in [
braycurtis, canberra, chebyshev, cityblock, correlation, cosine,
dice, euclidean, hamming, jaccard, jensenshannon, kulsinski,
mahalanobis, matching, minkowski, rogerstanimoto, russellrao,
seuclidean, sokalmichener, sokalsneath, sqeuclidean, yule,
wminkowski]):
raise TypeError('When using a custom metric arguments must be passed'
'as keyword (i.e., ARGNAME=ARGVALUE)')
if func_name == 'pdist':
old_arg_names = ['p', 'w', 'V', 'VI']
else:
old_arg_names = ['p', 'V', 'VI', 'w']
num_args = len(args)
warnings.warn('%d metric parameters have been passed as positional.'
'This will raise an error in a future version.'
'Please pass arguments as keywords(i.e., ARGNAME=ARGVALUE)'
% num_args, DeprecationWarning)
if num_args > 4:
raise ValueError('Deprecated %s signature accepts only 4'
'positional arguments (%s), %d given.'
% (func_name, ', '.join(old_arg_names), num_args))
for old_arg, arg in zip(old_arg_names, args):
if old_arg in kwargs:
raise TypeError('%s() got multiple values for argument %s'
% (func_name, old_arg))
kwargs[old_arg] = arg
return kwargs
def _copy_array_if_base_present(a):
"""Copy the array if its base points to a parent array."""
if a.base is not None:
return a.copy()
return a
def _correlation_cdist_wrap(XA, XB, dm, **kwargs):
XA = XA - XA.mean(axis=1, keepdims=True)
XB = XB - XB.mean(axis=1, keepdims=True)
_distance_wrap.cdist_cosine_double_wrap(XA, XB, dm, **kwargs)
def _correlation_pdist_wrap(X, dm, **kwargs):
X2 = X - X.mean(axis=1, keepdims=True)
_distance_wrap.pdist_cosine_double_wrap(X2, dm, **kwargs)
def _convert_to_type(X, out_type):
return np.ascontiguousarray(X, dtype=out_type)
def _filter_deprecated_kwargs(kwargs, args_blacklist):
# Filtering out old default keywords
for k in args_blacklist:
if k in kwargs:
del kwargs[k]
warnings.warn('Got unexpected kwarg %s. This will raise an error'
' in a future version.' % k, DeprecationWarning)
def _nbool_correspond_all(u, v, w=None):
if u.dtype == v.dtype == bool and w is None:
not_u = ~u
not_v = ~v
nff = (not_u & not_v).sum()
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
ntt = (u & v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
not_u = 1.0 - u
not_v = 1.0 - v
if w is not None:
not_u = w * not_u
u = w * u
nff = (not_u * not_v).sum()
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
ntt = (u * v).sum()
return (nff, nft, ntf, ntt)
def _nbool_correspond_ft_tf(u, v, w=None):
if u.dtype == v.dtype == bool and w is None:
not_u = ~u
not_v = ~v
nft = (not_u & v).sum()
ntf = (u & not_v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
not_u = 1.0 - u
not_v = 1.0 - v
if w is not None:
not_u = w * not_u
u = w * u
nft = (not_u * v).sum()
ntf = (u * not_v).sum()
return (nft, ntf)
def _validate_cdist_input(XA, XB, mA, mB, n, metric_name, **kwargs):
if metric_name is not None:
# get supported types
types = _METRICS[metric_name].types
# choose best type
typ = types[types.index(XA.dtype)] if XA.dtype in types else types[0]
# validate data
XA = _convert_to_type(XA, out_type=typ)
XB = _convert_to_type(XB, out_type=typ)
# validate kwargs
_validate_kwargs = _METRICS[metric_name].validator
if _validate_kwargs:
kwargs = _validate_kwargs(np.vstack([XA, XB]), mA + mB, n, **kwargs)
else:
typ = None
return XA, XB, typ, kwargs
def _validate_mahalanobis_kwargs(X, m, n, **kwargs):
VI = kwargs.pop('VI', None)
if VI is None:
if m <= n:
# There are fewer observations than the dimension of
# the observations.
raise ValueError("The number of observations (%d) is too "
"small; the covariance matrix is "
"singular. For observations with %d "
"dimensions, at least %d observations "
"are required." % (m, n, n + 1))
CV = np.atleast_2d(np.cov(X.astype(np.double).T))
VI = np.linalg.inv(CV).T.copy()
kwargs["VI"] = _convert_to_double(VI)
return kwargs
def _validate_minkowski_kwargs(X, m, n, **kwargs):
if 'p' not in kwargs:
kwargs['p'] = 2.
return kwargs
def _validate_pdist_input(X, m, n, metric_name, **kwargs):
if metric_name is not None:
# get supported types
types = _METRICS[metric_name].types
# choose best type
typ = types[types.index(X.dtype)] if X.dtype in types else types[0]
# validate data
X = _convert_to_type(X, out_type=typ)
# validate kwargs
_validate_kwargs = _METRICS[metric_name].validator
if _validate_kwargs:
kwargs = _validate_kwargs(X, m, n, **kwargs)
else:
typ = None
return X, typ, kwargs
def _validate_seuclidean_kwargs(X, m, n, **kwargs):
V = kwargs.pop('V', None)
if V is None:
V = np.var(X.astype(np.double), axis=0, ddof=1)
else:
V = np.asarray(V, order='c')
if V.dtype != np.double:
raise TypeError('Variance vector V must contain doubles.')
if len(V.shape) != 1:
raise ValueError('Variance vector V must '
'be one-dimensional.')
if V.shape[0] != n:
raise ValueError('Variance vector V must be of the same '
'dimension as the vectors on which the distances '
'are computed.')
kwargs['V'] = _convert_to_double(V)
return kwargs
def _validate_vector(u, dtype=None):
# XXX Is order='c' really necessary?
u = np.asarray(u, dtype=dtype, order='c').squeeze()
# Ensure values such as u=1 and u=[1] still return 1-D arrays.
u = np.atleast_1d(u)
if u.ndim > 1:
raise ValueError("Input vector should be 1-D.")
return u
def _validate_weights(w, dtype=np.double):
w = _validate_vector(w, dtype=dtype)
if np.any(w < 0):
raise ValueError("Input weights should be all non-negative")
return w
def _validate_wminkowski_kwargs(X, m, n, **kwargs):
w = kwargs.pop('w', None)
if w is None:
raise ValueError('weighted minkowski requires a weight '
'vector `w` to be given.')
kwargs['w'] = _validate_weights(w)
if 'p' not in kwargs:
kwargs['p'] = 2.
return kwargs
def directed_hausdorff(u, v, seed=0):
"""
Compute the directed Hausdorff distance between two N-D arrays.
Distances between pairs are calculated using a Euclidean metric.
Parameters
----------
u : (M,N) ndarray
Input array.
v : (O,N) ndarray
Input array.
seed : int or None
Local `np.random.RandomState` seed. Default is 0, a random shuffling of
u and v that guarantees reproducibility.
Returns
-------
d : double
The directed Hausdorff distance between arrays `u` and `v`,
index_1 : int
index of point contributing to Hausdorff pair in `u`
index_2 : int
index of point contributing to Hausdorff pair in `v`
Notes
-----
Uses the early break technique and the random sampling approach
described by [1]_. Although worst-case performance is ``O(m * o)``
(as with the brute force algorithm), this is unlikely in practice
as the input data would have to require the algorithm to explore
every single point interaction, and after the algorithm shuffles
the input points at that. The best case performance is O(m), which
is satisfied by selecting an inner loop distance that is less than
cmax and leads to an early break as often as possible. The authors
have formally shown that the average runtime is closer to O(m).
.. versionadded:: 0.19.0
References
----------
.. [1] A. A. Taha and A. Hanbury, "An efficient algorithm for
calculating the exact Hausdorff distance." IEEE Transactions On
Pattern Analysis And Machine Intelligence, vol. 37 pp. 2153-63,
2015.
See Also
--------
scipy.spatial.procrustes : Another similarity test for two data sets
Examples
--------
Find the directed Hausdorff distance between two 2-D arrays of
coordinates:
>>> from scipy.spatial.distance import directed_hausdorff
>>> u = np.array([(1.0, 0.0),
... (0.0, 1.0),
... (-1.0, 0.0),
... (0.0, -1.0)])
>>> v = np.array([(2.0, 0.0),
... (0.0, 2.0),
... (-2.0, 0.0),
... (0.0, -4.0)])
>>> directed_hausdorff(u, v)[0]
2.23606797749979
>>> directed_hausdorff(v, u)[0]
3.0
Find the general (symmetric) Hausdorff distance between two 2-D
arrays of coordinates:
>>> max(directed_hausdorff(u, v)[0], directed_hausdorff(v, u)[0])
3.0
Find the indices of the points that generate the Hausdorff distance
(the Hausdorff pair):
>>> directed_hausdorff(v, u)[1:]
(3, 3)
"""
u = np.asarray(u, dtype=np.float64, order='c')
v = np.asarray(v, dtype=np.float64, order='c')
result = _hausdorff.directed_hausdorff(u, v, seed)
return result
def minkowski(u, v, p=2, w=None):
"""
Compute the Minkowski distance between two 1-D arrays.
The Minkowski distance between 1-D arrays `u` and `v`,
is defined as
.. math::
{||u-v||}_p = (\\sum{|u_i - v_i|^p})^{1/p}.
\\left(\\sum{w_i(|(u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
minkowski : double
The Minkowski distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 1)
2.0
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 2)
1.4142135623730951
>>> distance.minkowski([1, 0, 0], [0, 1, 0], 3)
1.2599210498948732
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 1)
1.0
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 2)
1.0
>>> distance.minkowski([1, 1, 0], [0, 1, 0], 3)
1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if p < 1:
raise ValueError("p must be at least 1")
u_v = u - v
if w is not None:
w = _validate_weights(w)
if p == 1:
root_w = w
if p == 2:
# better precision and speed
root_w = np.sqrt(w)
else:
root_w = np.power(w, 1/p)
u_v = root_w * u_v
dist = norm(u_v, ord=p)
return dist
# `minkowski` gained weights in scipy 1.0. Once we're at say version 1.3,
# deprecated `wminkowski`. Not done at once because it would be annoying for
# downstream libraries that used `wminkowski` and support multiple scipy
# versions.
def wminkowski(u, v, p, w):
"""
Compute the weighted Minkowski distance between two 1-D arrays.
The weighted Minkowski distance between `u` and `v`, defined as
.. math::
\\left(\\sum{(|w_i (u_i - v_i)|^p)}\\right)^{1/p}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
p : int
The order of the norm of the difference :math:`{||u-v||}_p`.
w : (N,) array_like
The weight vector.
Returns
-------
wminkowski : double
The weighted Minkowski distance between vectors `u` and `v`.
Notes
-----
`wminkowski` is DEPRECATED. It implements a definition where weights
are powered. It is recommended to use the weighted version of `minkowski`
instead. This function will be removed in a future version of scipy.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 1, np.ones(3))
2.0
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 2, np.ones(3))
1.4142135623730951
>>> distance.wminkowski([1, 0, 0], [0, 1, 0], 3, np.ones(3))
1.2599210498948732
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 1, np.ones(3))
1.0
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 2, np.ones(3))
1.0
>>> distance.wminkowski([1, 1, 0], [0, 1, 0], 3, np.ones(3))
1.0
"""
w = _validate_weights(w)
return minkowski(u, v, p=p, w=w**p)
def euclidean(u, v, w=None):
"""
Computes the Euclidean distance between two 1-D arrays.
The Euclidean distance between 1-D arrays `u` and `v`, is defined as
.. math::
{||u-v||}_2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)^{1/2}
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
euclidean : double
The Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.euclidean([1, 0, 0], [0, 1, 0])
1.4142135623730951
>>> distance.euclidean([1, 1, 0], [0, 1, 0])
1.0
"""
return minkowski(u, v, p=2, w=w)
def sqeuclidean(u, v, w=None):
"""
Compute the squared Euclidean distance between two 1-D arrays.
The squared Euclidean distance between `u` and `v` is defined as
.. math::
{||u-v||}_2^2
\\left(\\sum{(w_i |(u_i - v_i)|^2)}\\right)
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sqeuclidean : double
The squared Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sqeuclidean([1, 0, 0], [0, 1, 0])
2.0
>>> distance.sqeuclidean([1, 1, 0], [0, 1, 0])
1.0
"""
# Preserve float dtypes, but convert everything else to np.float64
# for stability.
utype, vtype = None, None
if not (hasattr(u, "dtype") and np.issubdtype(u.dtype, np.inexact)):
utype = np.float64
if not (hasattr(v, "dtype") and np.issubdtype(v.dtype, np.inexact)):
vtype = np.float64
u = _validate_vector(u, dtype=utype)
v = _validate_vector(v, dtype=vtype)
u_v = u - v
u_v_w = u_v # only want weights applied once
if w is not None:
w = _validate_weights(w)
u_v_w = w * u_v
return np.dot(u_v, u_v_w)
def correlation(u, v, w=None, centered=True):
"""
Compute the correlation distance between two 1-D arrays.
The correlation distance between `u` and `v`, is
defined as
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{u}` is the mean of the elements of `u`
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
correlation : double
The correlation distance between 1-D array `u` and `v`.
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
if centered:
umu = np.average(u, weights=w)
vmu = np.average(v, weights=w)
u = u - umu
v = v - vmu
uv = np.average(u * v, weights=w)
uu = np.average(np.square(u), weights=w)
vv = np.average(np.square(v), weights=w)
dist = 1.0 - uv / np.sqrt(uu * vv)
return dist
def cosine(u, v, w=None):
"""
Compute the Cosine distance between 1-D arrays.
The Cosine distance between `u` and `v`, is defined as
.. math::
1 - \\frac{u \\cdot v}
{||u||_2 ||v||_2}.
where :math:`u \\cdot v` is the dot product of :math:`u` and
:math:`v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
cosine : double
The Cosine distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.cosine([1, 0, 0], [0, 1, 0])
1.0
>>> distance.cosine([100, 0, 0], [0, 1, 0])
1.0
>>> distance.cosine([1, 1, 0], [0, 1, 0])
0.29289321881345254
"""
# cosine distance is also referred to as 'uncentered correlation',
# or 'reflective correlation'
return correlation(u, v, w=w, centered=False)
def hamming(u, v, w=None):
"""
Compute the Hamming distance between two 1-D arrays.
The Hamming distance between 1-D arrays `u` and `v`, is simply the
proportion of disagreeing components in `u` and `v`. If `u` and `v` are
boolean vectors, the Hamming distance is
.. math::
\\frac{c_{01} + c_{10}}{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
hamming : double
The Hamming distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.hamming([1, 0, 0], [0, 1, 0])
0.66666666666666663
>>> distance.hamming([1, 0, 0], [1, 1, 0])
0.33333333333333331
>>> distance.hamming([1, 0, 0], [2, 0, 0])
0.33333333333333331
>>> distance.hamming([1, 0, 0], [3, 0, 0])
0.33333333333333331
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.shape != v.shape:
raise ValueError('The 1d arrays must have equal lengths.')
u_ne_v = u != v
if w is not None:
w = _validate_weights(w)
return np.average(u_ne_v, weights=w)
def jaccard(u, v, w=None):
"""
Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays.
The Jaccard-Needham dissimilarity between 1-D boolean arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT}}
{c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
jaccard : double
The Jaccard distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.jaccard([1, 0, 0], [0, 1, 0])
1.0
>>> distance.jaccard([1, 0, 0], [1, 1, 0])
0.5
>>> distance.jaccard([1, 0, 0], [1, 2, 0])
0.5
>>> distance.jaccard([1, 0, 0], [1, 1, 1])
0.66666666666666663
"""
u = _validate_vector(u)
v = _validate_vector(v)
nonzero = np.bitwise_or(u != 0, v != 0)
unequal_nonzero = np.bitwise_and((u != v), nonzero)
if w is not None:
w = _validate_weights(w)
nonzero = w * nonzero
unequal_nonzero = w * unequal_nonzero
dist = np.double(unequal_nonzero.sum()) / np.double(nonzero.sum())
return dist
def kulsinski(u, v, w=None):
"""
Compute the Kulsinski dissimilarity between two boolean 1-D arrays.
The Kulsinski dissimilarity between two boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{c_{TF} + c_{FT} - c_{TT} + n}
{c_{FT} + c_{TF} + n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
kulsinski : double
The Kulsinski distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.kulsinski([1, 0, 0], [0, 1, 0])
1.0
>>> distance.kulsinski([1, 0, 0], [1, 1, 0])
0.75
>>> distance.kulsinski([1, 0, 0], [2, 1, 0])
0.33333333333333331
>>> distance.kulsinski([1, 0, 0], [3, 1, 0])
-0.5
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is None:
n = float(len(u))
else:
w = _validate_weights(w)
n = w.sum()
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return (ntf + nft - ntt + n) / (ntf + nft + n)
def seuclidean(u, v, V):
"""
Return the standardized Euclidean distance between two 1-D arrays.
The standardized Euclidean distance between `u` and `v`.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
V : (N,) array_like
`V` is an 1-D array of component variances. It is usually computed
among a larger collection vectors.
Returns
-------
seuclidean : double
The standardized Euclidean distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [0.1, 0.1, 0.1])
4.4721359549995796
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [1, 0.1, 0.1])
3.3166247903553998
>>> distance.seuclidean([1, 0, 0], [0, 1, 0], [10, 0.1, 0.1])
3.1780497164141406
"""
u = _validate_vector(u)
v = _validate_vector(v)
V = _validate_vector(V, dtype=np.float64)
if V.shape[0] != u.shape[0] or u.shape[0] != v.shape[0]:
raise TypeError('V must be a 1-D array of the same dimension '
'as u and v.')
return euclidean(u, v, w=1/V)
def cityblock(u, v, w=None):
"""
Compute the City Block (Manhattan) distance.
Computes the Manhattan distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\sum_i {\\left| u_i - v_i \\right|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
cityblock : double
The City Block (Manhattan) distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.cityblock([1, 0, 0], [0, 1, 0])
2
>>> distance.cityblock([1, 0, 0], [0, 2, 0])
3
>>> distance.cityblock([1, 0, 0], [1, 1, 0])
1
"""
u = _validate_vector(u)
v = _validate_vector(v)
l1_diff = abs(u - v)
if w is not None:
w = _validate_weights(w)
l1_diff = w * l1_diff
return l1_diff.sum()
def mahalanobis(u, v, VI):
"""
Compute the Mahalanobis distance between two 1-D arrays.
The Mahalanobis distance between 1-D arrays `u` and `v`, is defined as
.. math::
\\sqrt{ (u-v) V^{-1} (u-v)^T }
where ``V`` is the covariance matrix. Note that the argument `VI`
is the inverse of ``V``.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
VI : ndarray
The inverse of the covariance matrix.
Returns
-------
mahalanobis : double
The Mahalanobis distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> iv = [[1, 0.5, 0.5], [0.5, 1, 0.5], [0.5, 0.5, 1]]
>>> distance.mahalanobis([1, 0, 0], [0, 1, 0], iv)
1.0
>>> distance.mahalanobis([0, 2, 0], [0, 1, 0], iv)
1.0
>>> distance.mahalanobis([2, 0, 0], [0, 1, 0], iv)
1.7320508075688772
"""
u = _validate_vector(u)
v = _validate_vector(v)
VI = np.atleast_2d(VI)
delta = u - v
m = np.dot(np.dot(delta, VI), delta)
return np.sqrt(m)
def chebyshev(u, v, w=None):
"""
Compute the Chebyshev distance.
Computes the Chebyshev distance between two 1-D arrays `u` and `v`,
which is defined as
.. math::
\\max_i {|u_i-v_i|}.
Parameters
----------
u : (N,) array_like
Input vector.
v : (N,) array_like
Input vector.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
chebyshev : double
The Chebyshev distance between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.chebyshev([1, 0, 0], [0, 1, 0])
1
>>> distance.chebyshev([1, 1, 0], [0, 1, 0])
1
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
has_weight = w > 0
if has_weight.sum() < w.size:
u = u[has_weight]
v = v[has_weight]
return max(abs(u - v))
def braycurtis(u, v, w=None):
"""
Compute the Bray-Curtis distance between two 1-D arrays.
Bray-Curtis distance is defined as
.. math::
\\sum{|u_i-v_i|} / \\sum{|u_i+v_i|}
The Bray-Curtis distance is in the range [0, 1] if all coordinates are
positive, and is undefined if the inputs are of length zero.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
braycurtis : double
The Bray-Curtis distance between 1-D arrays `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.braycurtis([1, 0, 0], [0, 1, 0])
1.0
>>> distance.braycurtis([1, 1, 0], [0, 1, 0])
0.33333333333333331
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
l1_diff = abs(u - v)
l1_sum = abs(u + v)
if w is not None:
w = _validate_weights(w)
l1_diff = w * l1_diff
l1_sum = w * l1_sum
return l1_diff.sum() / l1_sum.sum()
def canberra(u, v, w=None):
"""
Compute the Canberra distance between two 1-D arrays.
The Canberra distance is defined as
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
Parameters
----------
u : (N,) array_like
Input array.
v : (N,) array_like
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
canberra : double
The Canberra distance between vectors `u` and `v`.
Notes
-----
When `u[i]` and `v[i]` are 0 for given i, then the fraction 0/0 = 0 is
used in the calculation.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.canberra([1, 0, 0], [0, 1, 0])
2.0
>>> distance.canberra([1, 1, 0], [0, 1, 0])
1.0
"""
u = _validate_vector(u)
v = _validate_vector(v, dtype=np.float64)
if w is not None:
w = _validate_weights(w)
olderr = np.seterr(invalid='ignore')
try:
abs_uv = abs(u - v)
abs_u = abs(u)
abs_v = abs(v)
d = abs_uv / (abs_u + abs_v)
if w is not None:
d = w * d
d = np.nansum(d)
finally:
np.seterr(**olderr)
return d
def jensenshannon(p, q, base=None):
"""
Compute the Jensen-Shannon distance (metric) between
two 1-D probability arrays. This is a the square root
of the Jensen-Shannon divergence.
The Jensen-Shannon distance between two probability
vectors `p` and `q` is defined as,
.. math::
\\sqrt{\\frac{D(p \\parallel m) + D(q \\parallel m)}{2}}
where :math:`m` is the pointwise mean of :math:`p` and :math:`q`
and :math:`D` is the Kullback-Leibler divergence.
This routine will normalize `p` and `q` if they don't sum to 1.0.
Parameters
----------
p : (N,) array_like
left probability vector
q : (N,) array_like
right probability vector
base : double, optional
the base of the logarithm used to compute the output
if not given, then the routine uses the default base of
scipy.stats.entropy.
Returns
-------
js : double
The Jensen-Shannon distance between `p` and `q`
Examples
--------
>>> from scipy.spatial import distance
>>> distance.jensenshannon([1.0, 0.0, 0.0], [0.0, 1.0, 0.0], 2.0)
1.0
>>> distance.jensenshannon([1.0, 0.0], [0.5, 0.5])
0.46450140402245893
>>> distance.jensenshannon([1.0, 0.0, 0.0], [1.0, 0.0, 0.0])
0.0
"""
p = np.asarray(p)
q = np.asarray(q)
p = p / np.sum(p, axis=0)
q = q / np.sum(q, axis=0)
m = (p + q) / 2.0
left = rel_entr(p, m)
right = rel_entr(q, m)
js = np.sum(left, axis=0) + np.sum(right, axis=0)
if base is not None:
js /= np.log(base)
return np.sqrt(js / 2.0)
def yule(u, v, w=None):
"""
Compute the Yule dissimilarity between two boolean 1-D arrays.
The Yule dissimilarity is defined as
.. math::
\\frac{R}{c_{TT} * c_{FF} + \\frac{R}{2}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2.0 * c_{TF} * c_{FT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
yule : double
The Yule dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.yule([1, 0, 0], [0, 1, 0])
2.0
>>> distance.yule([1, 1, 0], [0, 1, 0])
0.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return float(2.0 * ntf * nft / np.array(ntt * nff + ntf * nft))
@np.deprecate(message="spatial.distance.matching is deprecated in scipy 1.0.0; "
"use spatial.distance.hamming instead.")
def matching(u, v, w=None):
"""
Compute the Hamming distance between two boolean 1-D arrays.
This is a deprecated synonym for :func:`hamming`.
"""
return hamming(u, v, w=w)
def dice(u, v, w=None):
"""
Compute the Dice dissimilarity between two boolean 1-D arrays.
The Dice dissimilarity between `u` and `v`, is
.. math::
\\frac{c_{TF} + c_{FT}}
{2c_{TT} + c_{FT} + c_{TF}}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) ndarray, bool
Input 1-D array.
v : (N,) ndarray, bool
Input 1-D array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
dice : double
The Dice dissimilarity between 1-D arrays `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.dice([1, 0, 0], [0, 1, 0])
1.0
>>> distance.dice([1, 0, 0], [1, 1, 0])
0.3333333333333333
>>> distance.dice([1, 0, 0], [2, 0, 0])
-0.3333333333333333
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
else:
dtype = np.find_common_type([int], [u.dtype, v.dtype])
u = u.astype(dtype)
v = v.astype(dtype)
if w is None:
ntt = (u * v).sum()
else:
ntt = (u * v * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
return float((ntf + nft) / np.array(2.0 * ntt + ntf + nft))
def rogerstanimoto(u, v, w=None):
"""
Compute the Rogers-Tanimoto dissimilarity between two boolean 1-D arrays.
The Rogers-Tanimoto dissimilarity between two boolean 1-D arrays
`u` and `v`, is defined as
.. math::
\\frac{R}
{c_{TT} + c_{FF} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
rogerstanimoto : double
The Rogers-Tanimoto dissimilarity between vectors
`u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.rogerstanimoto([1, 0, 0], [0, 1, 0])
0.8
>>> distance.rogerstanimoto([1, 0, 0], [1, 1, 0])
0.5
>>> distance.rogerstanimoto([1, 0, 0], [2, 0, 0])
-1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if w is not None:
w = _validate_weights(w)
(nff, nft, ntf, ntt) = _nbool_correspond_all(u, v, w=w)
return float(2.0 * (ntf + nft)) / float(ntt + nff + (2.0 * (ntf + nft)))
def russellrao(u, v, w=None):
"""
Compute the Russell-Rao dissimilarity between two boolean 1-D arrays.
The Russell-Rao dissimilarity between two boolean 1-D arrays, `u` and
`v`, is defined as
.. math::
\\frac{n - c_{TT}}
{n}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
russellrao : double
The Russell-Rao dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.russellrao([1, 0, 0], [0, 1, 0])
1.0
>>> distance.russellrao([1, 0, 0], [1, 1, 0])
0.6666666666666666
>>> distance.russellrao([1, 0, 0], [2, 0, 0])
0.3333333333333333
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
n = float(len(u))
elif w is None:
ntt = (u * v).sum()
n = float(len(u))
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
n = w.sum()
return float(n - ntt) / n
def sokalmichener(u, v, w=None):
"""
Compute the Sokal-Michener dissimilarity between two boolean 1-D arrays.
The Sokal-Michener dissimilarity between boolean 1-D arrays `u` and `v`,
is defined as
.. math::
\\frac{R}
{S + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n`, :math:`R = 2 * (c_{TF} + c_{FT})` and
:math:`S = c_{FF} + c_{TT}`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sokalmichener : double
The Sokal-Michener dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sokalmichener([1, 0, 0], [0, 1, 0])
0.8
>>> distance.sokalmichener([1, 0, 0], [1, 1, 0])
0.5
>>> distance.sokalmichener([1, 0, 0], [2, 0, 0])
-1.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
nff = (~u & ~v).sum()
elif w is None:
ntt = (u * v).sum()
nff = ((1.0 - u) * (1.0 - v)).sum()
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
nff = ((1.0 - u) * (1.0 - v) * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v)
return float(2.0 * (ntf + nft)) / float(ntt + nff + 2.0 * (ntf + nft))
def sokalsneath(u, v, w=None):
"""
Compute the Sokal-Sneath dissimilarity between two boolean 1-D arrays.
The Sokal-Sneath dissimilarity between `u` and `v`,
.. math::
\\frac{R}
{c_{TT} + R}
where :math:`c_{ij}` is the number of occurrences of
:math:`\\mathtt{u[k]} = i` and :math:`\\mathtt{v[k]} = j` for
:math:`k < n` and :math:`R = 2(c_{TF} + c_{FT})`.
Parameters
----------
u : (N,) array_like, bool
Input array.
v : (N,) array_like, bool
Input array.
w : (N,) array_like, optional
The weights for each value in `u` and `v`. Default is None,
which gives each value a weight of 1.0
Returns
-------
sokalsneath : double
The Sokal-Sneath dissimilarity between vectors `u` and `v`.
Examples
--------
>>> from scipy.spatial import distance
>>> distance.sokalsneath([1, 0, 0], [0, 1, 0])
1.0
>>> distance.sokalsneath([1, 0, 0], [1, 1, 0])
0.66666666666666663
>>> distance.sokalsneath([1, 0, 0], [2, 1, 0])
0.0
>>> distance.sokalsneath([1, 0, 0], [3, 1, 0])
-2.0
"""
u = _validate_vector(u)
v = _validate_vector(v)
if u.dtype == v.dtype == bool and w is None:
ntt = (u & v).sum()
elif w is None:
ntt = (u * v).sum()
else:
w = _validate_weights(w)
ntt = (u * v * w).sum()
(nft, ntf) = _nbool_correspond_ft_tf(u, v, w=w)
denom = np.array(ntt + 2.0 * (ntf + nft))
if not denom.any():
raise ValueError('Sokal-Sneath dissimilarity is not defined for '
'vectors that are entirely false.')
return float(2.0 * (ntf + nft)) / denom
_convert_to_double = partial(_convert_to_type, out_type=np.double)
_convert_to_bool = partial(_convert_to_type, out_type=bool)
# adding python-only wrappers to _distance_wrap module
_distance_wrap.pdist_correlation_double_wrap = _correlation_pdist_wrap
_distance_wrap.cdist_correlation_double_wrap = _correlation_cdist_wrap
# Registry of implemented metrics:
# Dictionary with the following structure:
# {
# metric_name : MetricInfo(aka, types=[double], validator=None)
# }
#
# Where:
# `metric_name` must be equal to python metric name
#
# MetricInfo is a named tuple with fields:
# 'aka' : [list of aliases],
#
# 'validator': f(X, m, n, **kwargs) # function that check kwargs and
# # computes default values.
#
# 'types': [list of supported types], # X (pdist) and XA (cdist) are used to
# # choose the type. if there is no match
# # the first type is used. Default double
# }
MetricInfo = namedtuple("MetricInfo", 'aka types validator ')
MetricInfo.__new__.__defaults__ = (['double'], None)
_METRICS = {
'braycurtis': MetricInfo(aka=['braycurtis']),
'canberra': MetricInfo(aka=['canberra']),
'chebyshev': MetricInfo(aka=['chebychev', 'chebyshev', 'cheby', 'cheb', 'ch']),
'cityblock': MetricInfo(aka=['cityblock', 'cblock', 'cb', 'c']),
'correlation': MetricInfo(aka=['correlation', 'co']),
'cosine': MetricInfo(aka=['cosine', 'cos']),
'dice': MetricInfo(aka=['dice'], types=['bool']),
'euclidean': MetricInfo(aka=['euclidean', 'euclid', 'eu', 'e']),
'hamming': MetricInfo(aka=['matching', 'hamming', 'hamm', 'ha', 'h'],
types=['double', 'bool']),
'jaccard': MetricInfo(aka=['jaccard', 'jacc', 'ja', 'j'],
types=['double', 'bool']),
'jensenshannon': MetricInfo(aka=['jensenshannon', 'js'],
types=['double']),
'kulsinski': MetricInfo(aka=['kulsinski'], types=['bool']),
'mahalanobis': MetricInfo(aka=['mahalanobis', 'mahal', 'mah'],
validator=_validate_mahalanobis_kwargs),
'minkowski': MetricInfo(aka=['minkowski', 'mi', 'm', 'pnorm'],
validator=_validate_minkowski_kwargs),
'rogerstanimoto': MetricInfo(aka=['rogerstanimoto'], types=['bool']),
'russellrao': MetricInfo(aka=['russellrao'], types=['bool']),
'seuclidean': MetricInfo(aka=['seuclidean', 'se', 's'],
validator=_validate_seuclidean_kwargs),
'sokalmichener': MetricInfo(aka=['sokalmichener'], types=['bool']),
'sokalsneath': MetricInfo(aka=['sokalsneath'], types=['bool']),
'sqeuclidean': MetricInfo(aka=['sqeuclidean', 'sqe', 'sqeuclid']),
'wminkowski': MetricInfo(aka=['wminkowski', 'wmi', 'wm', 'wpnorm'],
validator=_validate_wminkowski_kwargs),
'yule': MetricInfo(aka=['yule'], types=['bool']),
}
_METRIC_ALIAS = dict((alias, name)
for name, info in _METRICS.items()
for alias in info.aka)
_METRICS_NAMES = list(_METRICS.keys())
_TEST_METRICS = {'test_' + name: globals()[name] for name in _METRICS.keys()}
def _select_weighted_metric(mstr, kwargs, out):
kwargs = dict(kwargs)
if "w" in kwargs and kwargs["w"] is None:
# w=None is the same as omitting it
kwargs.pop("w")
if mstr.startswith("test_") or mstr in _METRICS['wminkowski'].aka:
# These support weights
pass
elif "w" in kwargs:
if (mstr in _METRICS['seuclidean'].aka or
mstr in _METRICS['mahalanobis'].aka):
raise ValueError("metric %s incompatible with weights" % mstr)
# XXX: C-versions do not support weights
# need to use python version for weighting
kwargs['out'] = out
mstr = "test_%s" % mstr
return mstr, kwargs
def pdist(X, metric='euclidean', *args, **kwargs):
"""
Pairwise distances between observations in n-dimensional space.
See Notes for common calling conventions.
Parameters
----------
X : ndarray
An m by n array of m original observations in an
n-dimensional space.
metric : str or function, optional
The distance metric to use. The distance function can
be 'braycurtis', 'canberra', 'chebyshev', 'cityblock',
'correlation', 'cosine', 'dice', 'euclidean', 'hamming',
'jaccard', 'jensenshannon', 'kulsinski', 'mahalanobis', 'matching',
'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule'.
*args : tuple. Deprecated.
Additional arguments should be passed as keyword arguments
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(X, axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(X.T)).T
out : ndarray.
The output array
If not None, condensed distance matrix Y is stored in this array.
Note: metric independent, it will become a regular keyword arg in a
future scipy version
Returns
-------
Y : ndarray
Returns a condensed distance matrix Y. For
each :math:`i` and :math:`j` (where :math:`i<j<m`),where m is the number
of original observations. The metric ``dist(u=X[i], v=X[j])``
is computed and stored in entry ``ij``.
See Also
--------
squareform : converts between condensed distance matrices and
square distance matrices.
Notes
-----
See ``squareform`` for information on how to calculate the index of
this entry or to convert the condensed distance matrix to a
redundant square matrix.
The following are common calling conventions.
1. ``Y = pdist(X, 'euclidean')``
Computes the distance between m points using Euclidean distance
(2-norm) as the distance metric between the points. The points
are arranged as m n-dimensional row vectors in the matrix X.
2. ``Y = pdist(X, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (p-norm) where :math:`p \\geq 1`.
3. ``Y = pdist(X, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = pdist(X, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = pdist(X, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = pdist(X, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of ``u`` and ``v``.
7. ``Y = pdist(X, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = pdist(X, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = pdist(X, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree.
10. ``Y = pdist(X, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}
11. ``Y = pdist(X, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}
12. ``Y = pdist(X, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i {|u_i-v_i|}}
{\\sum_i {|u_i+v_i|}}
13. ``Y = pdist(X, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = pdist(X, 'yule')``
Computes the Yule distance between each pair of boolean
vectors. (see yule function documentation)
15. ``Y = pdist(X, 'matching')``
Synonym for 'hamming'.
16. ``Y = pdist(X, 'dice')``
Computes the Dice distance between each pair of boolean
vectors. (see dice function documentation)
17. ``Y = pdist(X, 'kulsinski')``
Computes the Kulsinski distance between each pair of
boolean vectors. (see kulsinski function documentation)
18. ``Y = pdist(X, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between each pair of
boolean vectors. (see rogerstanimoto function documentation)
19. ``Y = pdist(X, 'russellrao')``
Computes the Russell-Rao distance between each pair of
boolean vectors. (see russellrao function documentation)
20. ``Y = pdist(X, 'sokalmichener')``
Computes the Sokal-Michener distance between each pair of
boolean vectors. (see sokalmichener function documentation)
21. ``Y = pdist(X, 'sokalsneath')``
Computes the Sokal-Sneath distance between each pair of
boolean vectors. (see sokalsneath function documentation)
22. ``Y = pdist(X, 'wminkowski', p=2, w=w)``
Computes the weighted Minkowski distance between each pair of
vectors. (see wminkowski function documentation)
23. ``Y = pdist(X, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = pdist(X, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = pdist(X, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function sokalsneath. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax.::
dm = pdist(X, 'sokalsneath')
"""
# You can also call this as:
# Y = pdist(X, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in X using the distance metric 'abc' but
# with a more succinct, verifiable, but less efficient implementation.
X = _asarray_validated(X, sparse_ok=False, objects_ok=True, mask_ok=True,
check_finite=False)
kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "pdist")
X = np.asarray(X, order='c')
s = X.shape
if len(s) != 2:
raise ValueError('A 2-dimensional array must be passed.')
m, n = s
out = kwargs.pop("out", None)
if out is None:
dm = np.empty((m * (m - 1)) // 2, dtype=np.double)
else:
if out.shape != (m * (m - 1) // 2,):
raise ValueError("output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.double:
raise ValueError("Output array must be double type.")
dm = out
# compute blacklist for deprecated kwargs
if(metric in _METRICS['jensenshannon'].aka
or metric == 'test_jensenshannon' or metric == jensenshannon):
kwargs_blacklist = ["p", "w", "V", "VI"]
elif(metric in _METRICS['minkowski'].aka
or metric in _METRICS['wminkowski'].aka
or metric in ['test_minkowski', 'test_wminkowski']
or metric in [minkowski, wminkowski]):
kwargs_blacklist = ["V", "VI"]
elif(metric in _METRICS['seuclidean'].aka or
metric == 'test_seuclidean' or metric == seuclidean):
kwargs_blacklist = ["p", "w", "VI"]
elif(metric in _METRICS['mahalanobis'].aka
or metric == 'test_mahalanobis' or metric == mahalanobis):
kwargs_blacklist = ["p", "w", "V"]
else:
kwargs_blacklist = ["p", "V", "VI"]
_filter_deprecated_kwargs(kwargs, kwargs_blacklist)
if callable(metric):
mstr = getattr(metric, '__name__', 'UnknownCustomMetric')
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
k = 0
for i in xrange(0, m - 1):
for j in xrange(i + 1, m):
dm[k] = metric(X[i], X[j], **kwargs)
k = k + 1
elif isinstance(metric, string_types):
mstr = metric.lower()
mstr, kwargs = _select_weighted_metric(mstr, kwargs, out)
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
X, typ, kwargs = _validate_pdist_input(X, m, n,
metric_name, **kwargs)
# get pdist wrapper
pdist_fn = getattr(_distance_wrap,
"pdist_%s_%s_wrap" % (metric_name, typ))
pdist_fn(X, dm, **kwargs)
return dm
elif mstr in ['old_cosine', 'old_cos']:
warnings.warn('"old_cosine" is deprecated and will be removed in '
'a future version. Use "cosine" instead.',
DeprecationWarning)
X = _convert_to_double(X)
norms = np.einsum('ij,ij->i', X, X, dtype=np.double)
np.sqrt(norms, out=norms)
nV = norms.reshape(m, 1)
# The numerator u * v
nm = np.dot(X, X.T)
# The denom. ||u||*||v||
de = np.dot(nV, nV.T)
dm = 1.0 - (nm / de)
dm[xrange(0, m), xrange(0, m)] = 0.0
dm = squareform(dm)
elif mstr.startswith("test_"):
if mstr in _TEST_METRICS:
dm = pdist(X, _TEST_METRICS[mstr], **kwargs)
else:
raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
def squareform(X, force="no", checks=True):
"""
Convert a vector-form distance vector to a square-form distance
matrix, and vice-versa.
Parameters
----------
X : ndarray
Either a condensed or redundant distance matrix.
force : str, optional
As with MATLAB(TM), if force is equal to ``'tovector'`` or
``'tomatrix'``, the input will be treated as a distance matrix or
distance vector respectively.
checks : bool, optional
If set to False, no checks will be made for matrix
symmetry nor zero diagonals. This is useful if it is known that
``X - X.T1`` is small and ``diag(X)`` is close to zero.
These values are ignored any way so they do not disrupt the
squareform transformation.
Returns
-------
Y : ndarray
If a condensed distance matrix is passed, a redundant one is
returned, or if a redundant one is passed, a condensed distance
matrix is returned.
Notes
-----
1. v = squareform(X)
Given a square d-by-d symmetric distance matrix X,
``v = squareform(X)`` returns a ``d * (d-1) / 2`` (or
:math:`{n \\choose 2}`) sized vector v.
:math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` is the distance
between points i and j. If X is non-square or asymmetric, an error
is returned.
2. X = squareform(v)
Given a ``d*(d-1)/2`` sized v for some integer ``d >= 2`` encoding
distances as described, ``X = squareform(v)`` returns a d by d distance
matrix X. The ``X[i, j]`` and ``X[j, i]`` values are set to
:math:`v[{n \\choose 2}-{n-i \\choose 2} + (j-i-1)]` and all
diagonal elements are zero.
In Scipy 0.19.0, ``squareform`` stopped casting all input types to
float64, and started returning arrays of the same dtype as the input.
"""
X = np.ascontiguousarray(X)
s = X.shape
if force.lower() == 'tomatrix':
if len(s) != 1:
raise ValueError("Forcing 'tomatrix' but input X is not a "
"distance vector.")
elif force.lower() == 'tovector':
if len(s) != 2:
raise ValueError("Forcing 'tovector' but input X is not a "
"distance matrix.")
# X = squareform(v)
if len(s) == 1:
if s[0] == 0:
return np.zeros((1, 1), dtype=X.dtype)
# Grab the closest value to the square root of the number
# of elements times 2 to see if the number of elements
# is indeed a binomial coefficient.
d = int(np.ceil(np.sqrt(s[0] * 2)))
# Check that v is of valid dimensions.
if d * (d - 1) != s[0] * 2:
raise ValueError('Incompatible vector size. It must be a binomial '
'coefficient n choose 2 for some integer n >= 2.')
# Allocate memory for the distance matrix.
M = np.zeros((d, d), dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Fill in the values of the distance matrix.
_distance_wrap.to_squareform_from_vector_wrap(M, X)
# Return the distance matrix.
return M
elif len(s) == 2:
if s[0] != s[1]:
raise ValueError('The matrix argument must be square.')
if checks:
is_valid_dm(X, throw=True, name='X')
# One-side of the dimensions is set here.
d = s[0]
if d <= 1:
return np.array([], dtype=X.dtype)
# Create a vector.
v = np.zeros((d * (d - 1)) // 2, dtype=X.dtype)
# Since the C code does not support striding using strides.
# The dimensions are used instead.
X = _copy_array_if_base_present(X)
# Convert the vector to squareform.
_distance_wrap.to_vector_from_squareform_wrap(X, v)
return v
else:
raise ValueError(('The first argument must be one or two dimensional '
'array. A %d-dimensional array is not '
'permitted') % len(s))
def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False):
"""
Return True if input array is a valid distance matrix.
Distance matrices must be 2-dimensional numpy arrays.
They must have a zero-diagonal, and they must be symmetric.
Parameters
----------
D : ndarray
The candidate object to test for validity.
tol : float, optional
The distance matrix should be symmetric. `tol` is the maximum
difference between entries ``ij`` and ``ji`` for the distance
metric to be considered symmetric.
throw : bool, optional
An exception is thrown if the distance matrix passed is not valid.
name : str, optional
The name of the variable to checked. This is useful if
throw is set to True so the offending variable can be identified
in the exception message when an exception is thrown.
warning : bool, optional
Instead of throwing an exception, a warning message is
raised.
Returns
-------
valid : bool
True if the variable `D` passed is a valid distance matrix.
Notes
-----
Small numerical differences in `D` and `D.T` and non-zeroness of
the diagonal are ignored if they are within the tolerance specified
by `tol`.
"""
D = np.asarray(D, order='c')
valid = True
try:
s = D.shape
if len(D.shape) != 2:
if name:
raise ValueError(('Distance matrix \'%s\' must have shape=2 '
'(i.e. be two-dimensional).') % name)
else:
raise ValueError('Distance matrix must have shape=2 (i.e. '
'be two-dimensional).')
if tol == 0.0:
if not (D == D.T).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric.') % name)
else:
raise ValueError('Distance matrix must be symmetric.')
if not (D[xrange(0, s[0]), xrange(0, s[0])] == 0).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must '
'be zero.') % name)
else:
raise ValueError('Distance matrix diagonal must be zero.')
else:
if not (D - D.T <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' must be '
'symmetric within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError('Distance matrix must be symmetric within'
' tolerance %5.5f.' % tol)
if not (D[xrange(0, s[0]), xrange(0, s[0])] <= tol).all():
if name:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% (name, tol))
else:
raise ValueError(('Distance matrix \'%s\' diagonal must be'
' close to zero within tolerance %5.5f.')
% tol)
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def is_valid_y(y, warning=False, throw=False, name=None):
"""
Return True if the input array is a valid condensed distance matrix.
Condensed distance matrices must be 1-dimensional numpy arrays.
Their length must be a binomial coefficient :math:`{n \\choose 2}`
for some positive integer n.
Parameters
----------
y : ndarray
The condensed distance matrix.
warning : bool, optional
Invokes a warning if the variable passed is not a valid
condensed distance matrix. The warning message explains why
the distance matrix is not valid. `name` is used when
referencing the offending variable.
throw : bool, optional
Throws an exception if the variable passed is not a valid
condensed distance matrix.
name : bool, optional
Used when referencing the offending variable in the
warning or exception message.
"""
y = np.asarray(y, order='c')
valid = True
try:
if len(y.shape) != 1:
if name:
raise ValueError(('Condensed distance matrix \'%s\' must '
'have shape=1 (i.e. be one-dimensional).')
% name)
else:
raise ValueError('Condensed distance matrix must have shape=1 '
'(i.e. be one-dimensional).')
n = y.shape[0]
d = int(np.ceil(np.sqrt(n * 2)))
if (d * (d - 1) / 2) != n:
if name:
raise ValueError(('Length n of condensed distance matrix '
'\'%s\' must be a binomial coefficient, i.e.'
'there must be a k such that '
'(k \\choose 2)=n)!') % name)
else:
raise ValueError('Length n of condensed distance matrix must '
'be a binomial coefficient, i.e. there must '
'be a k such that (k \\choose 2)=n)!')
except Exception as e:
if throw:
raise
if warning:
warnings.warn(str(e))
valid = False
return valid
def num_obs_dm(d):
"""
Return the number of original observations that correspond to a
square, redundant distance matrix.
Parameters
----------
d : ndarray
The target distance matrix.
Returns
-------
num_obs_dm : int
The number of observations in the redundant distance matrix.
"""
d = np.asarray(d, order='c')
is_valid_dm(d, tol=np.inf, throw=True, name='d')
return d.shape[0]
def num_obs_y(Y):
"""
Return the number of original observations that correspond to a
condensed distance matrix.
Parameters
----------
Y : ndarray
Condensed distance matrix.
Returns
-------
n : int
The number of observations in the condensed distance matrix `Y`.
"""
Y = np.asarray(Y, order='c')
is_valid_y(Y, throw=True, name='Y')
k = Y.shape[0]
if k == 0:
raise ValueError("The number of observations cannot be determined on "
"an empty distance matrix.")
d = int(np.ceil(np.sqrt(k * 2)))
if (d * (d - 1) / 2) != k:
raise ValueError("Invalid condensed distance matrix passed. Must be "
"some k where k=(n choose 2) for some n >= 2.")
return d
def cdist(XA, XB, metric='euclidean', *args, **kwargs):
"""
Compute distance between each pair of the two collections of inputs.
See Notes for common calling conventions.
Parameters
----------
XA : ndarray
An :math:`m_A` by :math:`n` array of :math:`m_A`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
XB : ndarray
An :math:`m_B` by :math:`n` array of :math:`m_B`
original observations in an :math:`n`-dimensional space.
Inputs are converted to float type.
metric : str or callable, optional
The distance metric to use. If a string, the distance function can be
'braycurtis', 'canberra', 'chebyshev', 'cityblock', 'correlation',
'cosine', 'dice', 'euclidean', 'hamming', 'jaccard', 'jensenshannon',
'kulsinski', 'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'wminkowski', 'yule'.
*args : tuple. Deprecated.
Additional arguments should be passed as keyword arguments
**kwargs : dict, optional
Extra arguments to `metric`: refer to each metric documentation for a
list of all possible arguments.
Some possible arguments:
p : scalar
The p-norm to apply for Minkowski, weighted and unweighted.
Default: 2.
w : ndarray
The weight vector for metrics that support weights (e.g., Minkowski).
V : ndarray
The variance vector for standardized Euclidean.
Default: var(vstack([XA, XB]), axis=0, ddof=1)
VI : ndarray
The inverse of the covariance matrix for Mahalanobis.
Default: inv(cov(vstack([XA, XB].T))).T
out : ndarray
The output array
If not None, the distance matrix Y is stored in this array.
Note: metric independent, it will become a regular keyword arg in a
future scipy version
Returns
-------
Y : ndarray
A :math:`m_A` by :math:`m_B` distance matrix is returned.
For each :math:`i` and :math:`j`, the metric
``dist(u=XA[i], v=XB[j])`` is computed and stored in the
:math:`ij` th entry.
Raises
------
ValueError
An exception is thrown if `XA` and `XB` do not have
the same number of columns.
Notes
-----
The following are common calling conventions:
1. ``Y = cdist(XA, XB, 'euclidean')``
Computes the distance between :math:`m` points using
Euclidean distance (2-norm) as the distance metric between the
points. The points are arranged as :math:`m`
:math:`n`-dimensional row vectors in the matrix X.
2. ``Y = cdist(XA, XB, 'minkowski', p=2.)``
Computes the distances using the Minkowski distance
:math:`||u-v||_p` (:math:`p`-norm) where :math:`p \\geq 1`.
3. ``Y = cdist(XA, XB, 'cityblock')``
Computes the city block or Manhattan distance between the
points.
4. ``Y = cdist(XA, XB, 'seuclidean', V=None)``
Computes the standardized Euclidean distance. The standardized
Euclidean distance between two n-vectors ``u`` and ``v`` is
.. math::
\\sqrt{\\sum {(u_i-v_i)^2 / V[x_i]}}.
V is the variance vector; V[i] is the variance computed over all
the i'th components of the points. If not passed, it is
automatically computed.
5. ``Y = cdist(XA, XB, 'sqeuclidean')``
Computes the squared Euclidean distance :math:`||u-v||_2^2` between
the vectors.
6. ``Y = cdist(XA, XB, 'cosine')``
Computes the cosine distance between vectors u and v,
.. math::
1 - \\frac{u \\cdot v}
{{||u||}_2 {||v||}_2}
where :math:`||*||_2` is the 2-norm of its argument ``*``, and
:math:`u \\cdot v` is the dot product of :math:`u` and :math:`v`.
7. ``Y = cdist(XA, XB, 'correlation')``
Computes the correlation distance between vectors u and v. This is
.. math::
1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}
{{||(u - \\bar{u})||}_2 {||(v - \\bar{v})||}_2}
where :math:`\\bar{v}` is the mean of the elements of vector v,
and :math:`x \\cdot y` is the dot product of :math:`x` and :math:`y`.
8. ``Y = cdist(XA, XB, 'hamming')``
Computes the normalized Hamming distance, or the proportion of
those vector elements between two n-vectors ``u`` and ``v``
which disagree. To save memory, the matrix ``X`` can be of type
boolean.
9. ``Y = cdist(XA, XB, 'jaccard')``
Computes the Jaccard distance between the points. Given two
vectors, ``u`` and ``v``, the Jaccard distance is the
proportion of those elements ``u[i]`` and ``v[i]`` that
disagree where at least one of them is non-zero.
10. ``Y = cdist(XA, XB, 'chebyshev')``
Computes the Chebyshev distance between the points. The
Chebyshev distance between two n-vectors ``u`` and ``v`` is the
maximum norm-1 distance between their respective elements. More
precisely, the distance is given by
.. math::
d(u,v) = \\max_i {|u_i-v_i|}.
11. ``Y = cdist(XA, XB, 'canberra')``
Computes the Canberra distance between the points. The
Canberra distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\sum_i \\frac{|u_i-v_i|}
{|u_i|+|v_i|}.
12. ``Y = cdist(XA, XB, 'braycurtis')``
Computes the Bray-Curtis distance between the points. The
Bray-Curtis distance between two points ``u`` and ``v`` is
.. math::
d(u,v) = \\frac{\\sum_i (|u_i-v_i|)}
{\\sum_i (|u_i+v_i|)}
13. ``Y = cdist(XA, XB, 'mahalanobis', VI=None)``
Computes the Mahalanobis distance between the points. The
Mahalanobis distance between two points ``u`` and ``v`` is
:math:`\\sqrt{(u-v)(1/V)(u-v)^T}` where :math:`(1/V)` (the ``VI``
variable) is the inverse covariance. If ``VI`` is not None,
``VI`` will be used as the inverse covariance matrix.
14. ``Y = cdist(XA, XB, 'yule')``
Computes the Yule distance between the boolean
vectors. (see `yule` function documentation)
15. ``Y = cdist(XA, XB, 'matching')``
Synonym for 'hamming'.
16. ``Y = cdist(XA, XB, 'dice')``
Computes the Dice distance between the boolean vectors. (see
`dice` function documentation)
17. ``Y = cdist(XA, XB, 'kulsinski')``
Computes the Kulsinski distance between the boolean
vectors. (see `kulsinski` function documentation)
18. ``Y = cdist(XA, XB, 'rogerstanimoto')``
Computes the Rogers-Tanimoto distance between the boolean
vectors. (see `rogerstanimoto` function documentation)
19. ``Y = cdist(XA, XB, 'russellrao')``
Computes the Russell-Rao distance between the boolean
vectors. (see `russellrao` function documentation)
20. ``Y = cdist(XA, XB, 'sokalmichener')``
Computes the Sokal-Michener distance between the boolean
vectors. (see `sokalmichener` function documentation)
21. ``Y = cdist(XA, XB, 'sokalsneath')``
Computes the Sokal-Sneath distance between the vectors. (see
`sokalsneath` function documentation)
22. ``Y = cdist(XA, XB, 'wminkowski', p=2., w=w)``
Computes the weighted Minkowski distance between the
vectors. (see `wminkowski` function documentation)
23. ``Y = cdist(XA, XB, f)``
Computes the distance between all pairs of vectors in X
using the user supplied 2-arity function f. For example,
Euclidean distance between the vectors could be computed
as follows::
dm = cdist(XA, XB, lambda u, v: np.sqrt(((u-v)**2).sum()))
Note that you should avoid passing a reference to one of
the distance functions defined in this library. For example,::
dm = cdist(XA, XB, sokalsneath)
would calculate the pair-wise distances between the vectors in
X using the Python function `sokalsneath`. This would result in
sokalsneath being called :math:`{n \\choose 2}` times, which
is inefficient. Instead, the optimized C version is more
efficient, and we call it using the following syntax::
dm = cdist(XA, XB, 'sokalsneath')
Examples
--------
Find the Euclidean distances between four 2-D coordinates:
>>> from scipy.spatial import distance
>>> coords = [(35.0456, -85.2672),
... (35.1174, -89.9711),
... (35.9728, -83.9422),
... (36.1667, -86.7833)]
>>> distance.cdist(coords, coords, 'euclidean')
array([[ 0. , 4.7044, 1.6172, 1.8856],
[ 4.7044, 0. , 6.0893, 3.3561],
[ 1.6172, 6.0893, 0. , 2.8477],
[ 1.8856, 3.3561, 2.8477, 0. ]])
Find the Manhattan distance from a 3-D point to the corners of the unit
cube:
>>> a = np.array([[0, 0, 0],
... [0, 0, 1],
... [0, 1, 0],
... [0, 1, 1],
... [1, 0, 0],
... [1, 0, 1],
... [1, 1, 0],
... [1, 1, 1]])
>>> b = np.array([[ 0.1, 0.2, 0.4]])
>>> distance.cdist(a, b, 'cityblock')
array([[ 0.7],
[ 0.9],
[ 1.3],
[ 1.5],
[ 1.5],
[ 1.7],
[ 2.1],
[ 2.3]])
"""
# You can also call this as:
# Y = cdist(XA, XB, 'test_abc')
# where 'abc' is the metric being tested. This computes the distance
# between all pairs of vectors in XA and XB using the distance metric 'abc'
# but with a more succinct, verifiable, but less efficient implementation.
kwargs = _args_to_kwargs_xdist(args, kwargs, metric, "cdist")
XA = np.asarray(XA, order='c')
XB = np.asarray(XB, order='c')
s = XA.shape
sB = XB.shape
if len(s) != 2:
raise ValueError('XA must be a 2-dimensional array.')
if len(sB) != 2:
raise ValueError('XB must be a 2-dimensional array.')
if s[1] != sB[1]:
raise ValueError('XA and XB must have the same number of columns '
'(i.e. feature dimension.)')
mA = s[0]
mB = sB[0]
n = s[1]
out = kwargs.pop("out", None)
if out is None:
dm = np.empty((mA, mB), dtype=np.double)
else:
if out.shape != (mA, mB):
raise ValueError("Output array has incorrect shape.")
if not out.flags.c_contiguous:
raise ValueError("Output array must be C-contiguous.")
if out.dtype != np.double:
raise ValueError("Output array must be double type.")
dm = out
# compute blacklist for deprecated kwargs
if(metric in _METRICS['minkowski'].aka or
metric in _METRICS['wminkowski'].aka or
metric in ['test_minkowski', 'test_wminkowski'] or
metric in [minkowski, wminkowski]):
kwargs_blacklist = ["V", "VI"]
elif(metric in _METRICS['seuclidean'].aka or
metric == 'test_seuclidean' or metric == seuclidean):
kwargs_blacklist = ["p", "w", "VI"]
elif(metric in _METRICS['mahalanobis'].aka or
metric == 'test_mahalanobis' or metric == mahalanobis):
kwargs_blacklist = ["p", "w", "V"]
else:
kwargs_blacklist = ["p", "V", "VI"]
_filter_deprecated_kwargs(kwargs, kwargs_blacklist)
if callable(metric):
mstr = getattr(metric, '__name__', 'Unknown')
metric_name = _METRIC_ALIAS.get(mstr, None)
XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n,
metric_name, **kwargs)
for i in xrange(0, mA):
for j in xrange(0, mB):
dm[i, j] = metric(XA[i], XB[j], **kwargs)
elif isinstance(metric, string_types):
mstr = metric.lower()
mstr, kwargs = _select_weighted_metric(mstr, kwargs, out)
metric_name = _METRIC_ALIAS.get(mstr, None)
if metric_name is not None:
XA, XB, typ, kwargs = _validate_cdist_input(XA, XB, mA, mB, n,
metric_name, **kwargs)
# get cdist wrapper
cdist_fn = getattr(_distance_wrap,
"cdist_%s_%s_wrap" % (metric_name, typ))
cdist_fn(XA, XB, dm, **kwargs)
return dm
elif mstr.startswith("test_"):
if mstr in _TEST_METRICS:
dm = cdist(XA, XB, _TEST_METRICS[mstr], **kwargs)
else:
raise ValueError('Unknown "Test" Distance Metric: %s' % mstr[5:])
else:
raise ValueError('Unknown Distance Metric: %s' % mstr)
else:
raise TypeError('2nd argument metric must be a string identifier '
'or a function.')
return dm
|
"""Leetcode 253. Meeting Rooms II (Premium)
Medium
URL: https://leetcode.com/problems/meeting-rooms-ii
Given an array of meeting time intervals consisting of start and end times
[[s1,e1],[s2,e2],...] (si < ei),
find the minimum number of conference rooms required.
Example1
Input: intervals = [[0,30],[5,10],[15,20]]
Output: 2
Explanation:
We need two meeting rooms
room1: (0,30)
room2: (5,10),(15,20)
Example2
Input: intervals = [[2,7]
Output: 1
Explanation:
Only need one meeting room
"""
class SolutionMinHeap(object):
"""
@param intervals: an array of meeting time intervals
@return: the minimum number of conference rooms required
Time complexity: O(n*logn).
Space complexity: O(n).
"""
def minMeetingRooms(self, intervals):
import heapq
if not intervals or not intervals[0]:
return 0
# Sort intervals by start time.
intervals.sort()
# Use min heap to store end times.
endtimes_minpq = []
heapq.heappush(endtimes_minpq, intervals[0][1])
for i in range(1, len(intervals)):
# If start time is after min end time, remove the min end time.
if endtimes_minpq[0] <= intervals[i][0]:
heapq.heappop(endtimes_minpq)
# Add end time to min heap.
heapq.heappush(endtimes_minpq, intervals[i][1])
return len(endtimes_minpq)
def main():
# Output: 2.
intervals = [[0,30],[5,10],[15,20]]
print SolutionMinHeap().minMeetingRooms(intervals)
# Output: 1.
intervals = [[2,7]]
print SolutionMinHeap().minMeetingRooms(intervals)
if __name__ == '__main__':
main()
Revise clarified comments
"""Leetcode 253. Meeting Rooms II (Premium)
Medium
URL: https://leetcode.com/problems/meeting-rooms-ii
Given an array of meeting time intervals consisting of start and end times
[[s1,e1],[s2,e2],...] (si < ei),
find the minimum number of conference rooms required.
Example1
Input: intervals = [[0,30],[5,10],[15,20]]
Output: 2
Explanation:
We need two meeting rooms
room1: (0,30)
room2: (5,10),(15,20)
Example2
Input: intervals = [[7, 10], [2, 4]]
Output: 1
Explanation:
Only need one meeting room
"""
class SolutionMinHeap(object):
"""
@param intervals: an array of meeting time intervals
@return: the minimum number of conference rooms required
Time complexity: O(n*logn).
Space complexity: O(n).
"""
def minMeetingRooms(self, intervals):
import heapq
if not intervals or not intervals[0]:
return 0
# Sort intervals by start time.
intervals.sort()
# Use min heap to store end times.
minheap_endtimes = []
heapq.heappush(minheap_endtimes, intervals[0][1])
for i in range(1, len(intervals)):
# If next start time is after min end time, remove the min end time.
if intervals[i][0] >= minheap_endtimes[0]:
heapq.heappop(minheap_endtimes)
# Add next end time to min heap.
heapq.heappush(minheap_endtimes, intervals[i][1])
return len(minheap_endtimes)
def main():
# Output: 2.
intervals = [[0,30],[5,10],[15,20]]
print SolutionMinHeap().minMeetingRooms(intervals)
# Output: 1.
intervals = [[7, 10], [2, 4]]
print SolutionMinHeap().minMeetingRooms(intervals)
if __name__ == '__main__':
main()
|
# coding: utf-8
from __future__ import unicode_literals, division, print_function
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinitio.qadapters import *
from pymatgen.io.abinitio.qadapters import AbstractQueueAdapter
class QadapterTest(PymatgenTest):
def test_base(self):
"""
Simple unit tests for Qadapter subclasses.
A more complete coverage would require integration testing.
"""
sub_classes = AbstractQueueAdapter.__subclasses__()
modules = ["intel/compilerpro/13.0.1.117",
"fftw3/intel/3.3",
]
shell_env = dict(
PATH="/user/abinit-7.4.3-public/tmp_intel13/src/98_main/:/user/bin:$PATH",
LD_LIBRARY_PATH="/NAPS/intel13/lib:$LD_LIBRARY_PATH",
)
mpi_runner = MpiRunner("mpirun")
# Test if we can instantiate the concrete classes with the abc protocol.
for subc in sub_classes:
# Make sure we have registered the class in qadapeter_class
cls = qadapter_class(subc.QTYPE)
self.assertTrue(cls == subc)
# Create the adapter
qad = cls(qparams=None, setup=None, modules=modules, shell_env=shell_env, omp_env=None,
pre_run=None, post_run=None, mpi_runner=mpi_runner)
# Test the programmatic interface used to change job parameters.
self.assertFalse(qad.has_omp)
self.assertTrue(qad.has_mpirun)
qad.set_mpi_ncpus(2)
self.assertTrue(qad.mpi_ncpus == 2)
# Test the creation of the script
script = qad.get_script_str("job.sh", "/launch/dir", "executable", "qout_path", "qerr_path",
stdin="STDIN", stdout="STDOUT", stderr="STDERR")
# Test whether qad can be serialized with Pickle.
deserialized_qads = self.serialize_with_pickle(qad, test_eq=False)
for new_qad in deserialized_qads:
new_script = new_qad.get_script_str("job.sh", "/launch/dir", "executable", "qout_path", "qerr_path",
stdin="STDIN", stdout="STDOUT", stderr="STDERR")
self.assertEqual(new_script, script)
if __name__ == '__main__':
import unittest
unittest.main()
Use OrderedDict instead of dict in test_qadapters.py because the hash algorithm has been changed in py3k
# coding: utf-8
from __future__ import unicode_literals, division, print_function
from collections import OrderedDict
from pymatgen.util.testing import PymatgenTest
from pymatgen.io.abinitio.qadapters import *
from pymatgen.io.abinitio.qadapters import AbstractQueueAdapter
class QadapterTest(PymatgenTest):
def test_base(self):
"""
Simple unit tests for Qadapter subclasses.
A more complete coverage would require integration testing.
"""
sub_classes = AbstractQueueAdapter.__subclasses__()
modules = ["intel/compilerpro/13.0.1.117",
"fftw3/intel/3.3",
]
shell_env = OrderedDict(
[("PATH", "/user/abinit-7.4.3-public/tmp_intel13/src/98_main/:/user/bin:$PATH"),
("LD_LIBRARY_PATH", "/NAPS/intel13/lib:$LD_LIBRARY_PATH")])
mpi_runner = MpiRunner("mpirun")
# Test if we can instantiate the concrete classes with the abc protocol.
for subc in sub_classes:
# Make sure we have registered the class in qadapeter_class
cls = qadapter_class(subc.QTYPE)
self.assertTrue(cls == subc)
# Create the adapter
qad = cls(qparams=None, setup=None, modules=modules, shell_env=shell_env, omp_env=None,
pre_run=None, post_run=None, mpi_runner=mpi_runner)
# Test the programmatic interface used to change job parameters.
self.assertFalse(qad.has_omp)
self.assertTrue(qad.has_mpirun)
qad.set_mpi_ncpus(2)
self.assertTrue(qad.mpi_ncpus == 2)
# Test the creation of the script
script = qad.get_script_str("job.sh", "/launch/dir", "executable", "qout_path", "qerr_path",
stdin="STDIN", stdout="STDOUT", stderr="STDERR")
# Test whether qad can be serialized with Pickle.
deserialized_qads = self.serialize_with_pickle(qad, test_eq=False)
for new_qad in deserialized_qads:
new_script = new_qad.get_script_str("job.sh", "/launch/dir", "executable", "qout_path", "qerr_path",
stdin="STDIN", stdout="STDOUT", stderr="STDERR")
self.assertEqual(new_script, script)
if __name__ == '__main__':
import unittest
unittest.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.ops.losses import util as tf_losses_util
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.losses.Loss')
class Loss(object):
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation:
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
```
When used with `tf.distribute.Strategy`, outside of built-in training loops
such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction
types, and reduce losses explicitly in your training loop. Using 'AUTO' or
'SUM_OVER_BATCH_SIZE' will raise an error.
Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details on this.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```python
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size))
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
"""Initializes `Loss` class.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op.
"""
losses_utils.ReductionV2.validate(reduction)
self.reduction = reduction
self.name = name
# SUM_OVER_BATCH is only allowed in losses managed by `fit` or
# CannedEstimators.
self._allow_sum_over_batch_size = False
self._set_name_scope()
def _set_name_scope(self):
"""Creates a valid `name_scope` name."""
if self.name is None:
self._name_scope = self.__class__.__name__
elif self.name == '<lambda>':
self._name_scope = 'lambda'
else:
# E.g. '_my_loss' => 'my_loss'
self._name_scope = self.name.strip('_')
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
sample_weight: Optional `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
broadcasted to this shape), then each loss element of `y_pred` is scaled
by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
functions reduce by 1 dimension, usually axis=-1.)
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
because all loss functions reduce by 1 dimension, usually axis=-1.)
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
y_true, y_pred, sample_weight)
with K.name_scope(self._name_scope), graph_ctx:
losses = self.call(y_true, y_pred)
return losses_utils.compute_weighted_loss(
losses, sample_weight, reduction=self._get_reduction())
@classmethod
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).
Args:
config: Output of `get_config()`.
Returns:
A `Loss` instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config dictionary for a `Loss` instance."""
return {'reduction': self.reduction, 'name': self.name}
@abc.abstractmethod
@doc_controls.for_subclass_implementers
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
Returns:
Loss values with the shape `[batch_size, d0, .. dN-1]`.
"""
NotImplementedError('Must be implemented in subclasses.')
def _get_reduction(self):
"""Handles `AUTO` reduction cases and returns the reduction value."""
if (not self._allow_sum_over_batch_size and
distribution_strategy_context.has_strategy() and
(self.reduction == losses_utils.ReductionV2.AUTO or
self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)):
raise ValueError(
'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '
'used with `tf.distribute.Strategy` outside of the built-in training '
'loops. You can implement '
'`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '
'size like:\n```\nwith strategy.scope():\n'
' loss_obj = tf.keras.losses.CategoricalCrossentropy('
'reduction=tf.keras.losses.Reduction.NONE)\n....\n'
' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '
'(1. / global_batch_size)\n```\nPlease see '
'https://www.tensorflow.org/tutorials/distribute/custom_training'
' for more details.')
if self.reduction == losses_utils.ReductionV2.AUTO:
return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
return self.reduction
class LossFunctionWrapper(Loss):
"""Wraps a loss function in the `Loss` class."""
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
"""Initializes `LossFunctionWrapper` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):
y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(
y_pred, y_true)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.losses.MeanSquaredError')
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
`loss = square(y_true - y_pred)`
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError()
>>> mse(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.25
>>> # Using 'sum' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mse(y_true, y_pred).numpy()
1.0
>>> # Using 'none' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mse(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_error'):
"""Initializes `MeanSquaredError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'mean_squared_error'.
"""
super(MeanSquaredError, self).__init__(
mean_squared_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsoluteError')
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
`loss = abs(y_true - y_pred)`
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError()
>>> mae(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.25
>>> # Using 'sum' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mae(y_true, y_pred).numpy()
1.0
>>> # Using 'none' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mae(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
"""Initializes `MeanAbsoluteError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'mean_absolute_error'.
"""
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Usage:
>>> y_true = [[2., 1.], [2., 3.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError()
>>> mape(y_true, y_pred).numpy()
50.
>>> # Calling with 'sample_weight'.
>>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
20.
>>> # Using 'sum' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mape(y_true, y_pred).numpy()
100.
>>> # Using 'none' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mape(y_true, y_pred).numpy()
array([25., 75.], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
"""Initializes `MeanAbsolutePercentageError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to
'mean_absolute_percentage_error'.
"""
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = square(log(y_true + 1.) - log(y_pred + 1.))`
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError()
>>> msle(y_true, y_pred).numpy()
0.240
>>> # Calling with 'sample_weight'.
>>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.120
>>> # Using 'sum' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> msle(y_true, y_pred).numpy()
0.480
>>> # Using 'none' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> msle(y_true, y_pred).numpy()
array([0.240, 0.240], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_logarithmic_error'):
"""Initializes `MeanSquaredLogarithmicError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to
'mean_squared_logarithmic_error'.
"""
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name=name, reduction=reduction)
@keras_export('keras.losses.BinaryCrossentropy')
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss when there are only two label classes (assumed to
be 0 and 1). For each example, there should be a single floating-point value
per prediction.
In the snippet below, each of the four examples has only a single
floating-pointing value, and both `y_pred` and `y_true` have the shape
`[batch_size]`.
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy()
>>> bce(y_true, y_pred).numpy()
0.815
>>> # Calling with 'sample_weight'.
>>> bce(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.458
>>> # Using 'sum' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> bce(y_true, y_pred).numpy()
1.630
>>> # Using 'none' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> bce(y_true, y_pred).numpy()
array([0.916 , 0.714], dtype=float32)
Usage with the `tf.keras` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy())
```
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
"""Initializes `BinaryCrossentropy` instance.
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` contains probabilities (i.e., values in [0, 1]).
**Note - Using from_logits=True may be more numerically stable.
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0,
we compute the loss between the predicted labels and a smoothed version
of the true labels, where the smoothing squeezes the labels towards 0.5.
Larger values of `label_smoothing` correspond to heavier smoothing.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: (Optional) Name for the op. Defaults to 'binary_crossentropy'.
"""
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
self.from_logits = from_logits
@keras_export('keras.losses.CategoricalCrossentropy')
class CategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided in a `one_hot` representation. If you want to
provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature.
In the snippet below, there is `# classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy()
>>> cce(y_true, y_pred).numpy()
1.177
>>> # Calling with 'sample_weight'.
>>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
0.814
>>> # Using 'sum' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> cce(y_true, y_pred).numpy()
2.354
>>> # Using 'none' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> cce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy())
```
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
"""Initializes `CategoricalCrossentropy` instance.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
**Note - Using from_logits=True is more numerically stable.**
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'categorical_crossentropy'.
"""
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.losses.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy()
>>> scce(y_true, y_pred).numpy()
1.177
>>> # Calling with 'sample_weight'.
>>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
0.814
>>> # Using 'sum' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> scce(y_true, y_pred).numpy()
2.354
>>> # Using 'none' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> scce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
"""
def __init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name='sparse_categorical_crossentropy'):
"""Initializes `SparseCategoricalCrossentropy` instance.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
**Note - Using from_logits=True may be more numerically stable.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to
'sparse_categorical_crossentropy'.
"""
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
@keras_export('keras.losses.Hinge')
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = maximum(1 - y_true * y_pred, 0)`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.Hinge()
>>> h(y_true, y_pred).numpy()
1.3
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.55
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.Hinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
2.6
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.Hinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.1, 1.5], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Hinge())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):
"""Initializes `Hinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'hinge'.
"""
super(Hinge, self).__init__(hinge, name=name, reduction=reduction)
@keras_export('keras.losses.SquaredHinge')
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.SquaredHinge()
>>> h(y_true, y_pred).numpy()
1.86
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.73
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.SquaredHinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
3.72
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.SquaredHinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.46, 2.26], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SquaredHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
"""Initializes `SquaredHinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'squared_hinge'.
"""
super(SquaredHinge, self).__init__(
squared_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.CategoricalHinge')
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.CategoricalHinge()
>>> h(y_true, y_pred).numpy()
1.4
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.6
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.CategoricalHinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
2.8
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.CategoricalHinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.2, 1.6], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_hinge'):
"""Initializes `CategoricalHinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'categorical_hinge'.
"""
super(CategoricalHinge, self).__init__(
categorical_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.Poisson')
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` and `y_pred`.
`loss = y_pred - y_true * log(y_pred)`
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> p = tf.keras.losses.Poisson()
>>> p(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.4
>>> # Using 'sum' reduction type.
>>> p = tf.keras.losses.Poisson(
... reduction=tf.keras.losses.Reduction.SUM)
>>> p(y_true, y_pred).numpy()
0.999
>>> # Using 'none' reduction type.
>>> p = tf.keras.losses.Poisson(
... reduction=tf.keras.losses.Reduction.NONE)
>>> p(y_true, y_pred).numpy()
array([0.999, 0.], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Poisson())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):
"""Initializes `Poisson` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'poisson'.
"""
super(Poisson, self).__init__(poisson, name=name, reduction=reduction)
@keras_export('keras.losses.LogCosh')
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`,
where x is the error `y_pred - y_true`.
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> l = tf.keras.losses.LogCosh()
>>> l(y_true, y_pred).numpy()
0.108
>>> # Calling with 'sample_weight'.
>>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.087
>>> # Using 'sum' reduction type.
>>> l = tf.keras.losses.LogCosh(
... reduction=tf.keras.losses.Reduction.SUM)
>>> l(y_true, y_pred).numpy()
0.217
>>> # Using 'none' reduction type.
>>> l = tf.keras.losses.LogCosh(
... reduction=tf.keras.losses.Reduction.NONE)
>>> l(y_true, y_pred).numpy()
array([0.217, 0.], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.LogCosh())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'):
"""Initializes `LogCosh` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'logcosh'.
"""
super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction)
@keras_export('keras.losses.KLDivergence')
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> kl = tf.keras.losses.KLDivergence()
>>> kl(y_true, y_pred).numpy()
0.458
>>> # Calling with 'sample_weight'.
>>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.366
>>> # Using 'sum' reduction type.
>>> kl = tf.keras.losses.KLDivergence(
... reduction=tf.keras.losses.Reduction.SUM)
>>> kl(y_true, y_pred).numpy()
0.916
>>> # Using 'none' reduction type.
>>> kl = tf.keras.losses.KLDivergence(
... reduction=tf.keras.losses.Reduction.NONE)
>>> kl(y_true, y_pred).numpy()
array([0.916, -3.08e-06], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.KLDivergence())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='kullback_leibler_divergence'):
"""Initializes `KLDivergence` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'kullback_leibler_divergence'.
"""
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name=name, reduction=reduction)
@keras_export('keras.losses.Huber')
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` and `y_pred`.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.Huber()
>>> h(y_true, y_pred).numpy()
0.155
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.09
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.Huber(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
0.31
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.Huber(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([0.18, 0.13], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Huber())
```
"""
def __init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
"""Initializes `Huber` instance.
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'huber_loss'.
"""
super(Huber, self).__init__(
huber_loss, name=name, reduction=reduction, delta=delta)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
def mean_squared_error(y_true, y_pred):
"""Computes the mean squared error between labels and predictions.
After computing the squared distance between the inputs, the mean value over
the last dimension is returned.
`loss = mean(square(y_true - y_pred), axis=-1)`
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_error',
'keras.metrics.mae',
'keras.metrics.MAE',
'keras.losses.mean_absolute_error',
'keras.losses.mae',
'keras.losses.MAE')
def mean_absolute_error(y_true, y_pred):
"""Computes the mean absolute error between labels and predictions.
`loss = mean(abs(y_true - y_pred), axis=-1)`
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape',
'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape',
'keras.losses.MAPE')
def mean_absolute_percentage_error(y_true, y_pred):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * mean(abs(y_true - y_pred) / y_true, axis=-1)`
Usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon()))
return 100. * K.mean(diff, axis=-1)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle',
'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle',
'keras.losses.MSLE')
def mean_squared_logarithmic_error(y_true, y_pred):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = np.maximum(y_true, 1e-7)
>>> y_pred = np.maximum(y_pred, 1e-7)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(
... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.)
second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.)
return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary,
_convert_binary_labels, lambda: y_true)
return updated_y_true
@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`
Usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@keras_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`
Usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@keras_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Usage:
>>> y_true = np.random.randint(0, 3, size=(2,))
>>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> pos = np.sum(y_true * y_pred, axis=-1)
>>> neg = np.amax((1. - y_true) * y_pred, axis=-1)
>>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
y_pred: The predicted values.
Returns:
Categorical hinge loss values.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
def huber_loss(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=K.floatx())
y_true = math_ops.cast(y_true, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
linear = math_ops.subtract(abs_error, quadratic)
return K.mean(
math_ops.add(
math_ops.multiply(
ops.convert_to_tensor_v2(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear)),
axis=-1)
@keras_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.logcosh(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> x = y_pred - y_true
>>> assert np.allclose(
... loss.numpy(),
... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1),
... atol=1e-5)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.cast(math_ops.log(2.), x.dtype)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
"""Computes the categorical crossentropy loss.
Usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor_v2(label_smoothing, dtype=K.floatx())
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
"""Computes the sparse categorical crossentropy loss.
Usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
"""Computes the binary crossentropy loss.
Usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.916 , 0.714], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor_v2(label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
@keras_export('keras.metrics.kullback_leibler_divergence',
'keras.metrics.kld',
'keras.metrics.KLD',
'keras.losses.kullback_leibler_divergence',
'keras.losses.kld',
'keras.losses.KLD')
def kullback_leibler_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)
>>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with loss.
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@keras_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.poisson(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_pred = y_pred + 1e-7
>>> assert np.allclose(
... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
... atol=1e-5)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Poisson loss value. shape = `[batch_size, d0, .. dN-1]`.
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
@keras_export(
'keras.losses.cosine_similarity',
v1=[
'keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine',
'keras.losses.cosine_similarity',
])
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Note that it is a negative quantity between -1 and 0, where 0 indicates
orthogonality and values closer to -1 indicate greater similarity. This makes
it usable as a loss function in a setting where you try to maximize the
proximity between predictions and targets. If either `y_true` or `y_pred`
is a zero vector, cosine similarity will be 0 regardless of the proximity
between predictions and targets.
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Usage:
>>> y_true = [[0., 1.], [1., 1.]]
>>> y_pred =[[1., 0.], [1., 1.]]
>>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1)
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # loss = -sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)
>>> # = -[0. + 0., 0.5 + 0.5]
>>> loss.numpy()
array([-0., -0.999], dtype=float32)
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity.
Returns:
Cosine similarity tensor.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return -math_ops.reduce_sum(y_true * y_pred, axis=axis)
@keras_export('keras.losses.CosineSimilarity')
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between labels and predictions.
Note that it is a negative quantity between -1 and 0, where 0 indicates
orthogonality and values closer to -1 indicate greater similarity. This makes
it usable as a loss function in a setting where you try to maximize the
proximity between predictions and targets. If either `y_true` or `y_pred`
is a zero vector, cosine similarity will be 0 regardless of the proximity
between predictions and targets.
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Usage:
>>> y_true = [[0., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = -((0. + 0.) + (0.5 + 0.5)) / 2
>>> cosine_loss(y_true, y_pred).numpy()
-0.5
>>> # Calling with 'sample_weight'.
>>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
-0.0999
>>> # Using 'sum' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,
... reduction=tf.keras.losses.Reduction.SUM)
>>> cosine_loss(y_true, y_pred).numpy()
-0.999
>>> # Using 'none' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,
... reduction=tf.keras.losses.Reduction.NONE)
>>> cosine_loss(y_true, y_pred).numpy()
array([-0., -0.999], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
```
Args:
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of
built-in training loops such as `tf.keras` `compile` and `fit`, using
`AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details.
name: Optional name for the op.
"""
def __init__(self,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='cosine_similarity'):
super(CosineSimilarity, self).__init__(
cosine_similarity, reduction=reduction, name=name, axis=axis)
# Aliases.
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
def is_categorical_crossentropy(loss):
result = ((isinstance(loss, CategoricalCrossentropy) or
(isinstance(loss, LossFunctionWrapper) and
loss.fn == categorical_crossentropy) or
(hasattr(loss, '__name__') and
loss.__name__ == 'categorical_crossentropy') or
(loss == 'categorical_crossentropy')))
return result
@keras_export('keras.losses.serialize')
def serialize(loss):
"""Serializes loss function or `Loss` instance.
Arguments:
loss: A Keras `Loss` instance or a loss function.
Returns:
Loss configuration dictionary.
"""
return serialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
"""Deserializes a serialized loss class/function instance.
Arguments:
name: Loss configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras `Loss` instance or a loss function.
"""
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@keras_export('keras.losses.get')
def get(identifier):
"""Retrieves a Keras loss function.
Arguments:
identifier: A loss identifier. One of None or string name of a loss
function/class or loss configuration dictionary or a loss function or
a loss class instance
Returns:
A Keras loss function/ `Loss` class instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
LABEL_DTYPES_FOR_LOSSES = {
losses_impl.sparse_softmax_cross_entropy: 'int32',
sparse_categorical_crossentropy: 'int32'
}
Added doc and examples for tf.keras.losses.get
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Built-in loss functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.python.distribute import distribution_strategy_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import smart_cond
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import losses_utils
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.keras.utils.generic_utils import deserialize_keras_object
from tensorflow.python.keras.utils.generic_utils import serialize_keras_object
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops.losses import losses_impl
from tensorflow.python.ops.losses import util as tf_losses_util
from tensorflow.python.util.tf_export import keras_export
from tensorflow.tools.docs import doc_controls
@keras_export('keras.losses.Loss')
class Loss(object):
"""Loss base class.
To be implemented by subclasses:
* `call()`: Contains the logic for loss calculation using `y_true`, `y_pred`.
Example subclass implementation:
```python
class MeanSquaredError(Loss):
def call(self, y_true, y_pred):
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.square(y_pred - y_true), axis=-1)
```
When used with `tf.distribute.Strategy`, outside of built-in training loops
such as `tf.keras` `compile` and `fit`, please use 'SUM' or 'NONE' reduction
types, and reduce losses explicitly in your training loop. Using 'AUTO' or
'SUM_OVER_BATCH_SIZE' will raise an error.
Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details on this.
You can implement 'SUM_OVER_BATCH_SIZE' using global batch size like:
```python
with strategy.scope():
loss_obj = tf.keras.losses.CategoricalCrossentropy(
reduction=tf.keras.losses.Reduction.NONE)
....
loss = (tf.reduce_sum(loss_obj(labels, predictions)) *
(1. / global_batch_size))
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name=None):
"""Initializes `Loss` class.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op.
"""
losses_utils.ReductionV2.validate(reduction)
self.reduction = reduction
self.name = name
# SUM_OVER_BATCH is only allowed in losses managed by `fit` or
# CannedEstimators.
self._allow_sum_over_batch_size = False
self._set_name_scope()
def _set_name_scope(self):
"""Creates a valid `name_scope` name."""
if self.name is None:
self._name_scope = self.__class__.__name__
elif self.name == '<lambda>':
self._name_scope = 'lambda'
else:
# E.g. '_my_loss' => 'my_loss'
self._name_scope = self.name.strip('_')
def __call__(self, y_true, y_pred, sample_weight=None):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
sample_weight: Optional `sample_weight` acts as a
coefficient for the loss. If a scalar is provided, then the loss is
simply scaled by the given value. If `sample_weight` is a tensor of size
`[batch_size]`, then the total loss for each sample of the batch is
rescaled by the corresponding element in the `sample_weight` vector. If
the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
broadcasted to this shape), then each loss element of `y_pred` is scaled
by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
functions reduce by 1 dimension, usually axis=-1.)
Returns:
Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
because all loss functions reduce by 1 dimension, usually axis=-1.)
Raises:
ValueError: If the shape of `sample_weight` is invalid.
"""
# If we are wrapping a lambda function strip '<>' from the name as it is not
# accepted in scope name.
graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
y_true, y_pred, sample_weight)
with K.name_scope(self._name_scope), graph_ctx:
losses = self.call(y_true, y_pred)
return losses_utils.compute_weighted_loss(
losses, sample_weight, reduction=self._get_reduction())
@classmethod
def from_config(cls, config):
"""Instantiates a `Loss` from its config (output of `get_config()`).
Args:
config: Output of `get_config()`.
Returns:
A `Loss` instance.
"""
return cls(**config)
def get_config(self):
"""Returns the config dictionary for a `Loss` instance."""
return {'reduction': self.reduction, 'name': self.name}
@abc.abstractmethod
@doc_controls.for_subclass_implementers
def call(self, y_true, y_pred):
"""Invokes the `Loss` instance.
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`, except
sparse loss functions such as sparse categorical crossentropy where
shape = `[batch_size, d0, .. dN-1]`
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
Returns:
Loss values with the shape `[batch_size, d0, .. dN-1]`.
"""
NotImplementedError('Must be implemented in subclasses.')
def _get_reduction(self):
"""Handles `AUTO` reduction cases and returns the reduction value."""
if (not self._allow_sum_over_batch_size and
distribution_strategy_context.has_strategy() and
(self.reduction == losses_utils.ReductionV2.AUTO or
self.reduction == losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE)):
raise ValueError(
'Please use `tf.keras.losses.Reduction.SUM` or '
'`tf.keras.losses.Reduction.NONE` for loss reduction when losses are '
'used with `tf.distribute.Strategy` outside of the built-in training '
'loops. You can implement '
'`tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE` using global batch '
'size like:\n```\nwith strategy.scope():\n'
' loss_obj = tf.keras.losses.CategoricalCrossentropy('
'reduction=tf.keras.losses.Reduction.NONE)\n....\n'
' loss = tf.reduce_sum(loss_obj(labels, predictions)) * '
'(1. / global_batch_size)\n```\nPlease see '
'https://www.tensorflow.org/tutorials/distribute/custom_training'
' for more details.')
if self.reduction == losses_utils.ReductionV2.AUTO:
return losses_utils.ReductionV2.SUM_OVER_BATCH_SIZE
return self.reduction
class LossFunctionWrapper(Loss):
"""Wraps a loss function in the `Loss` class."""
def __init__(self,
fn,
reduction=losses_utils.ReductionV2.AUTO,
name=None,
**kwargs):
"""Initializes `LossFunctionWrapper` class.
Args:
fn: The loss function to wrap, with signature `fn(y_true, y_pred,
**kwargs)`.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: (Optional) name for the loss.
**kwargs: The keyword arguments that are passed on to `fn`.
"""
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
"""Invokes the `LossFunctionWrapper` instance.
Args:
y_true: Ground truth values.
y_pred: The predicted values.
Returns:
Loss values per sample.
"""
if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):
y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(
y_pred, y_true)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = K.eval(v) if tf_utils.is_tensor_or_variable(v) else v
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
@keras_export('keras.losses.MeanSquaredError')
class MeanSquaredError(LossFunctionWrapper):
"""Computes the mean of squares of errors between labels and predictions.
`loss = square(y_true - y_pred)`
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError()
>>> mse(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> mse(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.25
>>> # Using 'sum' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mse(y_true, y_pred).numpy()
1.0
>>> # Using 'none' reduction type.
>>> mse = tf.keras.losses.MeanSquaredError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mse(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_error'):
"""Initializes `MeanSquaredError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'mean_squared_error'.
"""
super(MeanSquaredError, self).__init__(
mean_squared_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsoluteError')
class MeanAbsoluteError(LossFunctionWrapper):
"""Computes the mean of absolute difference between labels and predictions.
`loss = abs(y_true - y_pred)`
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError()
>>> mae(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> mae(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.25
>>> # Using 'sum' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mae(y_true, y_pred).numpy()
1.0
>>> # Using 'none' reduction type.
>>> mae = tf.keras.losses.MeanAbsoluteError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mae(y_true, y_pred).numpy()
array([0.5, 0.5], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsoluteError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_error'):
"""Initializes `MeanAbsoluteError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'mean_absolute_error'.
"""
super(MeanAbsoluteError, self).__init__(
mean_absolute_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanAbsolutePercentageError')
class MeanAbsolutePercentageError(LossFunctionWrapper):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * abs(y_true - y_pred) / y_true`
Usage:
>>> y_true = [[2., 1.], [2., 3.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError()
>>> mape(y_true, y_pred).numpy()
50.
>>> # Calling with 'sample_weight'.
>>> mape(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
20.
>>> # Using 'sum' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> mape(y_true, y_pred).numpy()
100.
>>> # Using 'none' reduction type.
>>> mape = tf.keras.losses.MeanAbsolutePercentageError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> mape(y_true, y_pred).numpy()
array([25., 75.], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanAbsolutePercentageError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_absolute_percentage_error'):
"""Initializes `MeanAbsolutePercentageError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to
'mean_absolute_percentage_error'.
"""
super(MeanAbsolutePercentageError, self).__init__(
mean_absolute_percentage_error, name=name, reduction=reduction)
@keras_export('keras.losses.MeanSquaredLogarithmicError')
class MeanSquaredLogarithmicError(LossFunctionWrapper):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = square(log(y_true + 1.) - log(y_pred + 1.))`
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [1., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError()
>>> msle(y_true, y_pred).numpy()
0.240
>>> # Calling with 'sample_weight'.
>>> msle(y_true, y_pred, sample_weight=[0.7, 0.3]).numpy()
0.120
>>> # Using 'sum' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError(
... reduction=tf.keras.losses.Reduction.SUM)
>>> msle(y_true, y_pred).numpy()
0.480
>>> # Using 'none' reduction type.
>>> msle = tf.keras.losses.MeanSquaredLogarithmicError(
... reduction=tf.keras.losses.Reduction.NONE)
>>> msle(y_true, y_pred).numpy()
array([0.240, 0.240], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.MeanSquaredLogarithmicError())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='mean_squared_logarithmic_error'):
"""Initializes `MeanSquaredLogarithmicError` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to
'mean_squared_logarithmic_error'.
"""
super(MeanSquaredLogarithmicError, self).__init__(
mean_squared_logarithmic_error, name=name, reduction=reduction)
@keras_export('keras.losses.BinaryCrossentropy')
class BinaryCrossentropy(LossFunctionWrapper):
"""Computes the cross-entropy loss between true labels and predicted labels.
Use this cross-entropy loss when there are only two label classes (assumed to
be 0 and 1). For each example, there should be a single floating-point value
per prediction.
In the snippet below, each of the four examples has only a single
floating-pointing value, and both `y_pred` and `y_true` have the shape
`[batch_size]`.
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy()
>>> bce(y_true, y_pred).numpy()
0.815
>>> # Calling with 'sample_weight'.
>>> bce(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.458
>>> # Using 'sum' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> bce(y_true, y_pred).numpy()
1.630
>>> # Using 'none' reduction type.
>>> bce = tf.keras.losses.BinaryCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> bce(y_true, y_pred).numpy()
array([0.916 , 0.714], dtype=float32)
Usage with the `tf.keras` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.BinaryCrossentropy())
```
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='binary_crossentropy'):
"""Initializes `BinaryCrossentropy` instance.
Args:
from_logits: Whether to interpret `y_pred` as a tensor of
[logit](https://en.wikipedia.org/wiki/Logit) values. By default, we
assume that `y_pred` contains probabilities (i.e., values in [0, 1]).
**Note - Using from_logits=True may be more numerically stable.
label_smoothing: Float in [0, 1]. When 0, no smoothing occurs. When > 0,
we compute the loss between the predicted labels and a smoothed version
of the true labels, where the smoothing squeezes the labels towards 0.5.
Larger values of `label_smoothing` correspond to heavier smoothing.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: (Optional) Name for the op. Defaults to 'binary_crossentropy'.
"""
super(BinaryCrossentropy, self).__init__(
binary_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
self.from_logits = from_logits
@keras_export('keras.losses.CategoricalCrossentropy')
class CategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided in a `one_hot` representation. If you want to
provide labels as integers, please use `SparseCategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature.
In the snippet below, there is `# classes` floating pointing values per
example. The shape of both `y_pred` and `y_true` are
`[batch_size, num_classes]`.
Usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy()
>>> cce(y_true, y_pred).numpy()
1.177
>>> # Calling with 'sample_weight'.
>>> cce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
0.814
>>> # Using 'sum' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> cce(y_true, y_pred).numpy()
2.354
>>> # Using 'none' reduction type.
>>> cce = tf.keras.losses.CategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> cce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalCrossentropy())
```
"""
def __init__(self,
from_logits=False,
label_smoothing=0,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_crossentropy'):
"""Initializes `CategoricalCrossentropy` instance.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
**Note - Using from_logits=True is more numerically stable.**
label_smoothing: Float in [0, 1]. When > 0, label values are smoothed,
meaning the confidence on label values are relaxed. e.g.
`label_smoothing=0.2` means that we will use a value of `0.1` for label
`0` and `0.9` for label `1`"
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'categorical_crossentropy'.
"""
super(CategoricalCrossentropy, self).__init__(
categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits,
label_smoothing=label_smoothing)
@keras_export('keras.losses.SparseCategoricalCrossentropy')
class SparseCategoricalCrossentropy(LossFunctionWrapper):
"""Computes the crossentropy loss between the labels and predictions.
Use this crossentropy loss function when there are two or more label classes.
We expect labels to be provided as integers. If you want to provide labels
using `one-hot` representation, please use `CategoricalCrossentropy` loss.
There should be `# classes` floating point values per feature for `y_pred`
and a single floating point value per feature for `y_true`.
In the snippet below, there is a single floating point value per example for
`y_true` and `# classes` floating pointing values per example for `y_pred`.
The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
`[batch_size, num_classes]`.
Usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy()
>>> scce(y_true, y_pred).numpy()
1.177
>>> # Calling with 'sample_weight'.
>>> scce(y_true, y_pred, sample_weight=tf.constant([0.3, 0.7])).numpy()
0.814
>>> # Using 'sum' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.SUM)
>>> scce(y_true, y_pred).numpy()
2.354
>>> # Using 'none' reduction type.
>>> scce = tf.keras.losses.SparseCategoricalCrossentropy(
... reduction=tf.keras.losses.Reduction.NONE)
>>> scce(y_true, y_pred).numpy()
array([0.0513, 2.303], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SparseCategoricalCrossentropy())
```
"""
def __init__(self,
from_logits=False,
reduction=losses_utils.ReductionV2.AUTO,
name='sparse_categorical_crossentropy'):
"""Initializes `SparseCategoricalCrossentropy` instance.
Args:
from_logits: Whether `y_pred` is expected to be a logits tensor. By
default, we assume that `y_pred` encodes a probability distribution.
**Note - Using from_logits=True may be more numerically stable.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to
'sparse_categorical_crossentropy'.
"""
super(SparseCategoricalCrossentropy, self).__init__(
sparse_categorical_crossentropy,
name=name,
reduction=reduction,
from_logits=from_logits)
@keras_export('keras.losses.Hinge')
class Hinge(LossFunctionWrapper):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = maximum(1 - y_true * y_pred, 0)`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.Hinge()
>>> h(y_true, y_pred).numpy()
1.3
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.55
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.Hinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
2.6
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.Hinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.1, 1.5], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Hinge())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='hinge'):
"""Initializes `Hinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'hinge'.
"""
super(Hinge, self).__init__(hinge, name=name, reduction=reduction)
@keras_export('keras.losses.SquaredHinge')
class SquaredHinge(LossFunctionWrapper):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = square(maximum(1 - y_true * y_pred, 0))`
`y_true` values are expected to be -1 or 1. If binary (0 or 1) labels are
provided we will convert them to -1 or 1.
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.SquaredHinge()
>>> h(y_true, y_pred).numpy()
1.86
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.73
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.SquaredHinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
3.72
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.SquaredHinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.46, 2.26], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.SquaredHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='squared_hinge'):
"""Initializes `SquaredHinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'squared_hinge'.
"""
super(SquaredHinge, self).__init__(
squared_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.CategoricalHinge')
class CategoricalHinge(LossFunctionWrapper):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.CategoricalHinge()
>>> h(y_true, y_pred).numpy()
1.4
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.6
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.CategoricalHinge(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
2.8
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.CategoricalHinge(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([1.2, 1.6], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CategoricalHinge())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='categorical_hinge'):
"""Initializes `CategoricalHinge` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'categorical_hinge'.
"""
super(CategoricalHinge, self).__init__(
categorical_hinge, name=name, reduction=reduction)
@keras_export('keras.losses.Poisson')
class Poisson(LossFunctionWrapper):
"""Computes the Poisson loss between `y_true` and `y_pred`.
`loss = y_pred - y_true * log(y_pred)`
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> p = tf.keras.losses.Poisson()
>>> p(y_true, y_pred).numpy()
0.5
>>> # Calling with 'sample_weight'.
>>> p(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.4
>>> # Using 'sum' reduction type.
>>> p = tf.keras.losses.Poisson(
... reduction=tf.keras.losses.Reduction.SUM)
>>> p(y_true, y_pred).numpy()
0.999
>>> # Using 'none' reduction type.
>>> p = tf.keras.losses.Poisson(
... reduction=tf.keras.losses.Reduction.NONE)
>>> p(y_true, y_pred).numpy()
array([0.999, 0.], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Poisson())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='poisson'):
"""Initializes `Poisson` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'poisson'.
"""
super(Poisson, self).__init__(poisson, name=name, reduction=reduction)
@keras_export('keras.losses.LogCosh')
class LogCosh(LossFunctionWrapper):
"""Computes the logarithm of the hyperbolic cosine of the prediction error.
`logcosh = log((exp(x) + exp(-x))/2)`,
where x is the error `y_pred - y_true`.
Usage:
>>> y_true = [[0., 1.], [0., 0.]]
>>> y_pred = [[1., 1.], [0., 0.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> l = tf.keras.losses.LogCosh()
>>> l(y_true, y_pred).numpy()
0.108
>>> # Calling with 'sample_weight'.
>>> l(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.087
>>> # Using 'sum' reduction type.
>>> l = tf.keras.losses.LogCosh(
... reduction=tf.keras.losses.Reduction.SUM)
>>> l(y_true, y_pred).numpy()
0.217
>>> # Using 'none' reduction type.
>>> l = tf.keras.losses.LogCosh(
... reduction=tf.keras.losses.Reduction.NONE)
>>> l(y_true, y_pred).numpy()
array([0.217, 0.], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.LogCosh())
```
"""
def __init__(self, reduction=losses_utils.ReductionV2.AUTO, name='logcosh'):
"""Initializes `LogCosh` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'logcosh'.
"""
super(LogCosh, self).__init__(logcosh, name=name, reduction=reduction)
@keras_export('keras.losses.KLDivergence')
class KLDivergence(LossFunctionWrapper):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> kl = tf.keras.losses.KLDivergence()
>>> kl(y_true, y_pred).numpy()
0.458
>>> # Calling with 'sample_weight'.
>>> kl(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
0.366
>>> # Using 'sum' reduction type.
>>> kl = tf.keras.losses.KLDivergence(
... reduction=tf.keras.losses.Reduction.SUM)
>>> kl(y_true, y_pred).numpy()
0.916
>>> # Using 'none' reduction type.
>>> kl = tf.keras.losses.KLDivergence(
... reduction=tf.keras.losses.Reduction.NONE)
>>> kl(y_true, y_pred).numpy()
array([0.916, -3.08e-06], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.KLDivergence())
```
"""
def __init__(self,
reduction=losses_utils.ReductionV2.AUTO,
name='kullback_leibler_divergence'):
"""Initializes `KLDivergence` instance.
Args:
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'kullback_leibler_divergence'.
"""
super(KLDivergence, self).__init__(
kullback_leibler_divergence, name=name, reduction=reduction)
@keras_export('keras.losses.Huber')
class Huber(LossFunctionWrapper):
"""Computes the Huber loss between `y_true` and `y_pred`.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> h = tf.keras.losses.Huber()
>>> h(y_true, y_pred).numpy()
0.155
>>> # Calling with 'sample_weight'.
>>> h(y_true, y_pred, sample_weight=[1, 0]).numpy()
0.09
>>> # Using 'sum' reduction type.
>>> h = tf.keras.losses.Huber(
... reduction=tf.keras.losses.Reduction.SUM)
>>> h(y_true, y_pred).numpy()
0.31
>>> # Using 'none' reduction type.
>>> h = tf.keras.losses.Huber(
... reduction=tf.keras.losses.Reduction.NONE)
>>> h(y_true, y_pred).numpy()
array([0.18, 0.13], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.Huber())
```
"""
def __init__(self,
delta=1.0,
reduction=losses_utils.ReductionV2.AUTO,
name='huber_loss'):
"""Initializes `Huber` instance.
Args:
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to
loss. Default value is `AUTO`. `AUTO` indicates that the reduction
option will be determined by the usage context. For almost all cases
this defaults to `SUM_OVER_BATCH_SIZE`. When used with
`tf.distribute.Strategy`, outside of built-in training loops such as
`tf.keras` `compile` and `fit`, using `AUTO` or `SUM_OVER_BATCH_SIZE`
will raise an error. Please see this custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training)
for more details.
name: Optional name for the op. Defaults to 'huber_loss'.
"""
super(Huber, self).__init__(
huber_loss, name=name, reduction=reduction, delta=delta)
@keras_export('keras.metrics.mean_squared_error',
'keras.metrics.mse',
'keras.metrics.MSE',
'keras.losses.mean_squared_error',
'keras.losses.mse',
'keras.losses.MSE')
def mean_squared_error(y_true, y_pred):
"""Computes the mean squared error between labels and predictions.
After computing the squared distance between the inputs, the mean value over
the last dimension is returned.
`loss = mean(square(y_true - y_pred), axis=-1)`
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.square(y_true - y_pred), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.squared_difference(y_pred, y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_error',
'keras.metrics.mae',
'keras.metrics.MAE',
'keras.losses.mean_absolute_error',
'keras.losses.mae',
'keras.losses.MAE')
def mean_absolute_error(y_true, y_pred):
"""Computes the mean absolute error between labels and predictions.
`loss = mean(abs(y_true - y_pred), axis=-1)`
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(), np.mean(np.abs(y_true - y_pred), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(math_ops.abs(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.mean_absolute_percentage_error',
'keras.metrics.mape',
'keras.metrics.MAPE',
'keras.losses.mean_absolute_percentage_error',
'keras.losses.mape',
'keras.losses.MAPE')
def mean_absolute_percentage_error(y_true, y_pred):
"""Computes the mean absolute percentage error between `y_true` and `y_pred`.
`loss = 100 * mean(abs(y_true - y_pred) / y_true, axis=-1)`
Usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_true = np.maximum(y_true, 1e-7) # Prevent division by zero
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_absolute_percentage_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... 100. * np.mean(np.abs((y_true - y_pred) / y_true), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean absolute percentage error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
diff = math_ops.abs(
(y_true - y_pred) / K.maximum(math_ops.abs(y_true), K.epsilon()))
return 100. * K.mean(diff, axis=-1)
@keras_export('keras.metrics.mean_squared_logarithmic_error',
'keras.metrics.msle',
'keras.metrics.MSLE',
'keras.losses.mean_squared_logarithmic_error',
'keras.losses.msle',
'keras.losses.MSLE')
def mean_squared_logarithmic_error(y_true, y_pred):
"""Computes the mean squared logarithmic error between `y_true` and `y_pred`.
`loss = mean(square(log(y_true + 1) - log(y_pred + 1)), axis=-1)`
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.mean_squared_logarithmic_error(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = np.maximum(y_true, 1e-7)
>>> y_pred = np.maximum(y_pred, 1e-7)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(
... np.square(np.log(y_true + 1.) - np.log(y_pred + 1.)), axis=-1))
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Mean squared logarithmic error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
first_log = math_ops.log(K.maximum(y_pred, K.epsilon()) + 1.)
second_log = math_ops.log(K.maximum(y_true, K.epsilon()) + 1.)
return K.mean(math_ops.squared_difference(first_log, second_log), axis=-1)
def _maybe_convert_labels(y_true):
"""Converts binary labels into -1/1."""
are_zeros = math_ops.equal(y_true, 0)
are_ones = math_ops.equal(y_true, 1)
is_binary = math_ops.reduce_all(math_ops.logical_or(are_zeros, are_ones))
def _convert_binary_labels():
# Convert the binary labels to -1 or 1.
return 2. * y_true - 1.
updated_y_true = smart_cond.smart_cond(is_binary,
_convert_binary_labels, lambda: y_true)
return updated_y_true
@keras_export('keras.metrics.squared_hinge', 'keras.losses.squared_hinge')
def squared_hinge(y_true, y_pred):
"""Computes the squared hinge loss between `y_true` and `y_pred`.
`loss = mean(square(maximum(1 - y_true * y_pred, 0)), axis=-1)`
Usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.squared_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.square(np.maximum(1. - y_true * y_pred, 0.)), axis=-1))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided we will convert them to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Squared hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(
math_ops.square(math_ops.maximum(1. - y_true * y_pred, 0.)), axis=-1)
@keras_export('keras.metrics.hinge', 'keras.losses.hinge')
def hinge(y_true, y_pred):
"""Computes the hinge loss between `y_true` and `y_pred`.
`loss = mean(maximum(1 - y_true * y_pred, 0), axis=-1)`
Usage:
>>> y_true = np.random.choice([-1, 1], size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> assert np.array_equal(
... loss.numpy(),
... np.mean(np.maximum(1. - y_true * y_pred, 0.), axis=-1))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Hinge loss values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = _maybe_convert_labels(y_true)
return K.mean(math_ops.maximum(1. - y_true * y_pred, 0.), axis=-1)
@keras_export('keras.losses.categorical_hinge')
def categorical_hinge(y_true, y_pred):
"""Computes the categorical hinge loss between `y_true` and `y_pred`.
`loss = maximum(neg - pos + 1, 0)`
where `neg=maximum((1-y_true)*y_pred) and pos=sum(y_true*y_pred)`
Usage:
>>> y_true = np.random.randint(0, 3, size=(2,))
>>> y_true = tf.keras.utils.to_categorical(y_true, num_classes=3)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.categorical_hinge(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> pos = np.sum(y_true * y_pred, axis=-1)
>>> neg = np.amax((1. - y_true) * y_pred, axis=-1)
>>> assert np.array_equal(loss.numpy(), np.maximum(0., neg - pos + 1.))
Args:
y_true: The ground truth values. `y_true` values are expected to be -1 or 1.
If binary (0 or 1) labels are provided they will be converted to -1 or 1.
y_pred: The predicted values.
Returns:
Categorical hinge loss values.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
pos = math_ops.reduce_sum(y_true * y_pred, axis=-1)
neg = math_ops.reduce_max((1. - y_true) * y_pred, axis=-1)
return math_ops.maximum(0., neg - pos + 1.)
def huber_loss(y_true, y_pred, delta=1.0):
"""Computes Huber loss value.
For each value x in `error = y_true - y_pred`:
```
loss = 0.5 * x^2 if |x| <= d
loss = 0.5 * d^2 + d * (|x| - d) if |x| > d
```
where d is `delta`. See: https://en.wikipedia.org/wiki/Huber_loss
Args:
y_true: tensor of true targets.
y_pred: tensor of predicted targets.
delta: A float, the point where the Huber loss function changes from a
quadratic to linear.
Returns:
Tensor with one scalar loss entry per sample.
"""
y_pred = math_ops.cast(y_pred, dtype=K.floatx())
y_true = math_ops.cast(y_true, dtype=K.floatx())
error = math_ops.subtract(y_pred, y_true)
abs_error = math_ops.abs(error)
quadratic = math_ops.minimum(abs_error, delta)
linear = math_ops.subtract(abs_error, quadratic)
return K.mean(
math_ops.add(
math_ops.multiply(
ops.convert_to_tensor_v2(0.5, dtype=quadratic.dtype),
math_ops.multiply(quadratic, quadratic)),
math_ops.multiply(delta, linear)),
axis=-1)
@keras_export('keras.losses.logcosh')
def logcosh(y_true, y_pred):
"""Logarithm of the hyperbolic cosine of the prediction error.
`log(cosh(x))` is approximately equal to `(x ** 2) / 2` for small `x` and
to `abs(x) - log(2)` for large `x`. This means that 'logcosh' works mostly
like the mean squared error, but will not be so strongly affected by the
occasional wildly incorrect prediction.
Usage:
>>> y_true = np.random.random(size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.logcosh(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> x = y_pred - y_true
>>> assert np.allclose(
... loss.numpy(),
... np.mean(x + np.log(np.exp(-2. * x) + 1.) - math_ops.log(2.), axis=-1),
... atol=1e-5)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Logcosh error values. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
def _logcosh(x):
return x + nn.softplus(-2. * x) - math_ops.cast(math_ops.log(2.), x.dtype)
return K.mean(_logcosh(y_pred - y_true), axis=-1)
@keras_export('keras.metrics.categorical_crossentropy',
'keras.losses.categorical_crossentropy')
def categorical_crossentropy(y_true,
y_pred,
from_logits=False,
label_smoothing=0):
"""Computes the categorical crossentropy loss.
Usage:
>>> y_true = [[0, 1, 0], [0, 0, 1]]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Tensor of one-hot true targets.
y_pred: Tensor of predicted targets.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor_v2(label_smoothing, dtype=K.floatx())
def _smooth_labels():
num_classes = math_ops.cast(array_ops.shape(y_true)[1], y_pred.dtype)
return y_true * (1.0 - label_smoothing) + (label_smoothing / num_classes)
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.categorical_crossentropy(y_true, y_pred, from_logits=from_logits)
@keras_export('keras.metrics.sparse_categorical_crossentropy',
'keras.losses.sparse_categorical_crossentropy')
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
"""Computes the sparse categorical crossentropy loss.
Usage:
>>> y_true = [1, 2]
>>> y_pred = [[0.05, 0.95, 0], [0.1, 0.8, 0.1]]
>>> loss = tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.0513, 2.303], dtype=float32)
Args:
y_true: Ground truth values.
y_pred: The predicted values.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
axis: (Optional) Defaults to -1. The dimension along which the entropy is
computed.
Returns:
Sparse categorical crossentropy loss value.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.sparse_categorical_crossentropy(
y_true, y_pred, from_logits=from_logits, axis=axis)
@keras_export('keras.metrics.binary_crossentropy',
'keras.losses.binary_crossentropy')
def binary_crossentropy(y_true, y_pred, from_logits=False, label_smoothing=0):
"""Computes the binary crossentropy loss.
Usage:
>>> y_true = [[0, 1], [0, 0]]
>>> y_pred = [[0.6, 0.4], [0.4, 0.6]]
>>> loss = tf.keras.losses.binary_crossentropy(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> loss.numpy()
array([0.916 , 0.714], dtype=float32)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
we assume that `y_pred` encodes a probability distribution.
label_smoothing: Float in [0, 1]. If > `0` then smooth the labels.
Returns:
Binary crossentropy loss value. shape = `[batch_size, d0, .. dN-1]`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
label_smoothing = ops.convert_to_tensor_v2(label_smoothing, dtype=K.floatx())
def _smooth_labels():
return y_true * (1.0 - label_smoothing) + 0.5 * label_smoothing
y_true = smart_cond.smart_cond(label_smoothing,
_smooth_labels, lambda: y_true)
return K.mean(
K.binary_crossentropy(y_true, y_pred, from_logits=from_logits), axis=-1)
@keras_export('keras.metrics.kullback_leibler_divergence',
'keras.metrics.kld',
'keras.metrics.KLD',
'keras.losses.kullback_leibler_divergence',
'keras.losses.kld',
'keras.losses.KLD')
def kullback_leibler_divergence(y_true, y_pred):
"""Computes Kullback-Leibler divergence loss between `y_true` and `y_pred`.
`loss = y_true * log(y_true / y_pred)`
See: https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3)).astype(np.float64)
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.kullback_leibler_divergence(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_true = tf.keras.backend.clip(y_true, 1e-7, 1)
>>> y_pred = tf.keras.backend.clip(y_pred, 1e-7, 1)
>>> assert np.array_equal(
... loss.numpy(), np.sum(y_true * np.log(y_true / y_pred), axis=-1))
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
Returns:
A `Tensor` with loss.
Raises:
TypeError: If `y_true` cannot be cast to the `y_pred.dtype`.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
y_true = K.clip(y_true, K.epsilon(), 1)
y_pred = K.clip(y_pred, K.epsilon(), 1)
return math_ops.reduce_sum(y_true * math_ops.log(y_true / y_pred), axis=-1)
@keras_export('keras.metrics.poisson', 'keras.losses.poisson')
def poisson(y_true, y_pred):
"""Computes the Poisson loss between y_true and y_pred.
The Poisson loss is the mean of the elements of the `Tensor`
`y_pred - y_true * log(y_pred)`.
Usage:
>>> y_true = np.random.randint(0, 2, size=(2, 3))
>>> y_pred = np.random.random(size=(2, 3))
>>> loss = tf.keras.losses.poisson(y_true, y_pred)
>>> assert loss.shape == (2,)
>>> y_pred = y_pred + 1e-7
>>> assert np.allclose(
... loss.numpy(), np.mean(y_pred - y_true * np.log(y_pred), axis=-1),
... atol=1e-5)
Args:
y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
Returns:
Poisson loss value. shape = `[batch_size, d0, .. dN-1]`.
Raises:
InvalidArgumentError: If `y_true` and `y_pred` have incompatible shapes.
"""
y_pred = ops.convert_to_tensor_v2(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
return K.mean(y_pred - y_true * math_ops.log(y_pred + K.epsilon()), axis=-1)
@keras_export(
'keras.losses.cosine_similarity',
v1=[
'keras.metrics.cosine_proximity',
'keras.metrics.cosine',
'keras.losses.cosine_proximity',
'keras.losses.cosine',
'keras.losses.cosine_similarity',
])
def cosine_similarity(y_true, y_pred, axis=-1):
"""Computes the cosine similarity between labels and predictions.
Note that it is a negative quantity between -1 and 0, where 0 indicates
orthogonality and values closer to -1 indicate greater similarity. This makes
it usable as a loss function in a setting where you try to maximize the
proximity between predictions and targets. If either `y_true` or `y_pred`
is a zero vector, cosine similarity will be 0 regardless of the proximity
between predictions and targets.
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Usage:
>>> y_true = [[0., 1.], [1., 1.]]
>>> y_pred =[[1., 0.], [1., 1.]]
>>> loss = tf.keras.losses.cosine_similarity(y_true, y_pred, axis=1)
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # loss = -sum(l2_norm(y_true) . l2_norm(y_pred), axis=1)
>>> # = -[0. + 0., 0.5 + 0.5]
>>> loss.numpy()
array([-0., -0.999], dtype=float32)
Args:
y_true: Tensor of true targets.
y_pred: Tensor of predicted targets.
axis: Axis along which to determine similarity.
Returns:
Cosine similarity tensor.
"""
y_true = nn.l2_normalize(y_true, axis=axis)
y_pred = nn.l2_normalize(y_pred, axis=axis)
return -math_ops.reduce_sum(y_true * y_pred, axis=axis)
@keras_export('keras.losses.CosineSimilarity')
class CosineSimilarity(LossFunctionWrapper):
"""Computes the cosine similarity between labels and predictions.
Note that it is a negative quantity between -1 and 0, where 0 indicates
orthogonality and values closer to -1 indicate greater similarity. This makes
it usable as a loss function in a setting where you try to maximize the
proximity between predictions and targets. If either `y_true` or `y_pred`
is a zero vector, cosine similarity will be 0 regardless of the proximity
between predictions and targets.
`loss = -sum(l2_norm(y_true) * l2_norm(y_pred))`
Usage:
>>> y_true = [[0., 1.], [1., 1.]]
>>> y_pred = [[1., 0.], [1., 1.]]
>>> # Using 'auto'/'sum_over_batch_size' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1)
>>> # l2_norm(y_true) = [[0., 1.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_pred) = [[1., 0.], [1./1.414], 1./1.414]]]
>>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
>>> # loss = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
>>> # = -((0. + 0.) + (0.5 + 0.5)) / 2
>>> cosine_loss(y_true, y_pred).numpy()
-0.5
>>> # Calling with 'sample_weight'.
>>> cosine_loss(y_true, y_pred, sample_weight=[0.8, 0.2]).numpy()
-0.0999
>>> # Using 'sum' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,
... reduction=tf.keras.losses.Reduction.SUM)
>>> cosine_loss(y_true, y_pred).numpy()
-0.999
>>> # Using 'none' reduction type.
>>> cosine_loss = tf.keras.losses.CosineSimilarity(axis=1,
... reduction=tf.keras.losses.Reduction.NONE)
>>> cosine_loss(y_true, y_pred).numpy()
array([-0., -0.999], dtype=float32)
Usage with the `compile` API:
```python
model = tf.keras.Model(inputs, outputs)
model.compile('sgd', loss=tf.keras.losses.CosineSimilarity(axis=1))
```
Args:
axis: (Optional) Defaults to -1. The dimension along which the cosine
similarity is computed.
reduction: (Optional) Type of `tf.keras.losses.Reduction` to apply to loss.
Default value is `AUTO`. `AUTO` indicates that the reduction option will
be determined by the usage context. For almost all cases this defaults to
`SUM_OVER_BATCH_SIZE`. When used with `tf.distribute.Strategy`, outside of
built-in training loops such as `tf.keras` `compile` and `fit`, using
`AUTO` or `SUM_OVER_BATCH_SIZE` will raise an error. Please see this
custom training [tutorial]
(https://www.tensorflow.org/tutorials/distribute/custom_training) for more
details.
name: Optional name for the op.
"""
def __init__(self,
axis=-1,
reduction=losses_utils.ReductionV2.AUTO,
name='cosine_similarity'):
super(CosineSimilarity, self).__init__(
cosine_similarity, reduction=reduction, name=name, axis=axis)
# Aliases.
bce = BCE = binary_crossentropy
mse = MSE = mean_squared_error
mae = MAE = mean_absolute_error
mape = MAPE = mean_absolute_percentage_error
msle = MSLE = mean_squared_logarithmic_error
kld = KLD = kullback_leibler_divergence
def is_categorical_crossentropy(loss):
result = ((isinstance(loss, CategoricalCrossentropy) or
(isinstance(loss, LossFunctionWrapper) and
loss.fn == categorical_crossentropy) or
(hasattr(loss, '__name__') and
loss.__name__ == 'categorical_crossentropy') or
(loss == 'categorical_crossentropy')))
return result
@keras_export('keras.losses.serialize')
def serialize(loss):
"""Serializes loss function or `Loss` instance.
Arguments:
loss: A Keras `Loss` instance or a loss function.
Returns:
Loss configuration dictionary.
"""
return serialize_keras_object(loss)
@keras_export('keras.losses.deserialize')
def deserialize(name, custom_objects=None):
"""Deserializes a serialized loss class/function instance.
Arguments:
name: Loss configuration.
custom_objects: Optional dictionary mapping names (strings) to custom
objects (classes and functions) to be considered during deserialization.
Returns:
A Keras `Loss` instance or a loss function.
"""
return deserialize_keras_object(
name,
module_objects=globals(),
custom_objects=custom_objects,
printable_module_name='loss function')
@keras_export('keras.losses.get')
def get(identifier):
"""Retrieves a Keras loss as a `function`/`Loss` class instance.
You can get loss as a `function` using parameter `identifier` as a string
of the loss function as shown in below example.
>>> loss = tf.keras.losses.get("categorical_crossentropy")
>>> type(loss)
function
You can get loss as a `Loss` class instance using parameter `identifier`
as a string of the class name of the loss.
>>> loss = tf.keras.losses.get("CategoricalCrossentropy")
>>> type(loss)
tensorflow.python.keras.losses.CategoricalCrossentropy
You can also specify `config` of the loss to this function by passing dict
containing `class_name` and `config` as an identifier.
>>> identifier = {"class_name": "CategoricalCrossentropy",
... "config": {"from_logits": True}}
>>> loss = tf.keras.losses.get(identifier)
>>> type(loss)
tensorflow.python.keras.losses.CategoricalCrossentropy
Arguments:
identifier: A loss identifier. One of None or string name of a loss
function/class or loss configuration dictionary or a loss function or
a loss class instance
Returns:
A Keras loss as a `function`/ `Loss` class instance.
Raises:
ValueError: If `identifier` cannot be interpreted.
"""
if identifier is None:
return None
if isinstance(identifier, six.string_types):
identifier = str(identifier)
return deserialize(identifier)
if isinstance(identifier, dict):
return deserialize(identifier)
elif callable(identifier):
return identifier
else:
raise ValueError('Could not interpret '
'loss function identifier:', identifier)
LABEL_DTYPES_FOR_LOSSES = {
losses_impl.sparse_softmax_cross_entropy: 'int32',
sparse_categorical_crossentropy: 'int32'
}
|
import pandas as pd
import models.common.enums as enums
import models.common.suppliers as mcs
import models.common.overview as mco
import models.common.personnel as mcp
import models.common.match as mcm
import models.common.events as mce
import models.club as mc
from .workflows import WorkflowBase
class MarcottiLoad(WorkflowBase):
"""
Load transformed data into database.
"""
def record_exists(self, model, **conditions):
return self.session.query(model).filter_by(**conditions).count() != 0
def suppliers(self, data_frame):
supplier_records = [mcs.Suppliers(**data_row) for idx, data_row in data_frame.iterrows()
if not self.record_exists(mcs.Suppliers, name=data_row['name'])]
self.session.add_all(supplier_records)
self.session.commit()
def years(self, data_frame):
year_records = [mco.Years(**data_row) for idx, data_row in data_frame.iterrows()
if not self.record_exists(mco.Years, yr=data_row['yr'])]
self.session.add_all(year_records)
self.session.commit()
def seasons(self, data_frame):
season_records = []
map_records = []
for idx, row in data_frame.iterrows():
if 'name' not in row:
if row['start_year'] == row['end_year']:
yr_obj = self.session.query(mco.Years).filter_by(yr=row['start_year']).one()
season_records.append(mco.Seasons(start_year=yr_obj, end_year=yr_obj))
else:
start_yr_obj = self.session.query(mco.Years).filter_by(yr=row['start_year']).one()
end_yr_obj = self.session.query(mco.Years).filter_by(yr=row['end_year']).one()
season_records.append(mco.Seasons(start_year=start_yr_obj, end_year=end_yr_obj))
self.session.add_all(season_records)
else:
if not self.record_exists(mcs.SeasonMap, remote_id=row['remote_id'], supplier_id=self.supplier_id):
map_records.append(mcs.SeasonMap(id=self.get_id(mco.Seasons, name=row['name']),
remote_id=row['remote_id'],
supplier_id=self.supplier_id))
self.session.add_all(map_records)
self.session.commit()
def countries(self, data_frame):
remote_ids = []
country_records = []
fields = ['name', 'code', 'confederation']
for idx, row in data_frame.iterrows():
country_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mco.Countries, name=row['name']):
country_records.append(mco.Countries(**country_dict))
remote_ids.append(row['remote_id'])
self.session.add_all(country_records)
self.session.commit()
map_records = [mcs.CountryMap(id=country_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, country_record in zip(remote_ids, country_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def competitions(self, data_frame):
remote_ids = []
comp_records = []
for idx, row in data_frame.iterrows():
if 'country_id' in data_frame.columns:
fields = ['name', 'level', 'country_id']
comp_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mco.DomesticCompetitions, **comp_dict):
comp_records.append(mco.DomesticCompetitions(**comp_dict))
remote_ids.append(row['remote_id'])
elif 'confederation' in data_frame.columns:
fields = ['name', 'level', 'confederation']
comp_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mco.InternationalCompetitions, **comp_dict):
comp_records.append(mco.InternationalCompetitions(**comp_dict))
remote_ids.append(row['remote_id'])
self.session.add_all(comp_records)
self.session.commit()
map_records = [mcs.CompetitionMap(id=comp_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, comp_record in zip(remote_ids, comp_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def clubs(self, data_frame):
remote_ids = []
club_records = []
fields = ['short_name', 'name', 'country_id']
for idx, row in data_frame.iterrows():
club_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mc.Clubs, **club_dict):
remote_ids.append(row['remote_id'])
club_records.append(mc.Clubs(**club_dict))
self.session.add_all(club_records)
self.session.commit()
map_records = [mc.ClubMap(id=club_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, club_record in zip(remote_ids, club_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def venues(self, data_frame):
remote_ids = []
venue_records = []
history_dicts = []
fields = ['name', 'city', 'region', 'latitude', 'longitude', 'altitude', 'country_id', 'timezone_id']
history_fields = ['eff_date', 'length', 'width', 'capacity', 'seats', 'surface_id']
for idx, row in data_frame.iterrows():
venue_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mco.Venues, **venue_dict):
venue_records.append(mco.Venues(**venue_dict))
history_dicts.append({field: row[field] for field in history_fields if row[field]})
remote_ids.append(row['remote_id'])
self.session.add_all(venue_records)
self.session.commit()
history_records = [mco.VenueHistory(venue_id=venue_record.id, **history_dict)
for history_dict, venue_record in zip(history_dicts, venue_records)]
self.session.add_all(history_records)
map_records = [mcs.VenueMap(id=venue_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, venue_record in zip(remote_ids, venue_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def surfaces(self, data_frame):
surface_records = [mco.Surfaces(**row) for indx, row in data_frame.iterrows()
if not self.record_exists(mco.Surfaces, description=row['description'])]
self.session.add_all(surface_records)
self.session.commit()
def timezones(self, data_frame):
tz_records = [mco.Timezones(**row) for indx, row in data_frame.iterrows()
if not self.record_exists(mco.Timezones, name=row['name'])]
self.session.add_all(tz_records)
self.session.commit()
def players(self, data_frame):
player_records = []
remote_countryids = []
remote_ids = []
fields = ['known_first_name', 'first_name', 'middle_name', 'last_name', 'second_last_name',
'nick_name', 'birth_date', 'order', 'country_id', 'position_id']
for indx, row in data_frame.iterrows():
player_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mcp.Players, **player_dict):
remote_ids.append(row['remote_id'])
remote_countryids.append(row.get('remote_country_id', None))
player_records.append(mcp.Players(**player_dict))
self.session.add_all(player_records)
self.session.commit()
map_records = [mcs.PlayerMap(id=player_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, player_record in zip(remote_ids, player_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
for remote_id, player_record in zip(remote_countryids, player_records):
if remote_id and not self.record_exists(
mcs.CountryMap, remote_id=remote_id, supplier_id=self.supplier_id):
self.session.add(mcs.CountryMap(id=player_record.country_id, remote_id=remote_id,
supplier_id=self.supplier_id))
self.session.commit()
def managers(self, data_frame):
manager_records = []
remote_ids = []
fields = ['known_first_name', 'first_name', 'middle_name', 'last_name', 'second_last_name',
'nick_name', 'birth_date', 'order', 'country_id']
for indx, row in data_frame.iterrows():
manager_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mcp.Managers, **manager_dict):
remote_ids.append(row['remote_id'])
manager_records.append(mcp.Managers(**manager_dict))
self.session.add_all(manager_records)
self.session.commit()
map_records = [mcs.ManagerMap(id=manager_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, manager_record in zip(remote_ids, manager_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def referees(self, data_frame):
referee_records = []
remote_ids = []
fields = ['known_first_name', 'first_name', 'middle_name', 'last_name', 'second_last_name',
'nick_name', 'birth_date', 'order', 'country_id']
for indx, row in data_frame.iterrows():
referee_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mcp.Referees, **referee_dict):
remote_ids.append(row['remote_id'])
referee_records.append(mcp.Referees(**referee_dict))
self.session.add_all(referee_records)
self.session.commit()
map_records = [mcs.RefereeMap(id=referee_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, referee_record in zip(remote_ids, referee_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def positions(self, data_frame):
position_record = []
for indx, row in data_frame.iterrows():
if row['remote_id'] and self.supplier_id:
if not self.record_exists(mcs.PositionMap, remote_id=row['remote_id'], supplier_id=self.supplier_id):
position_record.append(mcs.PositionMap(
id=self.get_id(mcp.Positions, name=row['name']),
remote_id=row['remote_id'], supplier_id=self.supplier_id))
else:
if not self.record_exists(mcp.Positions, name=row['name']):
position_record.append(mcp.Positions(name=row['name'], type=row['type']))
self.session.add_all(position_record)
self.session.commit()
def league_matches(self, data_frame):
match_records = []
remote_ids = []
fields = ['match_date', 'competition_id', 'season_id', 'venue_id', 'home_team_id', 'away_team_id',
'home_manager_id', 'away_manager_id', 'referee_id', 'attendance', 'matchday']
condition_fields = ['kickoff_time', 'kickoff_temp', 'kickoff_humidity',
'kickoff_weather', 'halftime_weather', 'fulltime_weather']
for idx, row in data_frame.iterrows():
match_dict = {field: row[field] for field in fields if row[field]}
condition_dict = {field: row[field] for field in condition_fields if field in row and row[field]}
if not self.record_exists(mc.ClubLeagueMatches, **match_dict):
match_records.append(mcm.MatchConditions(match=mc.ClubLeagueMatches(**match_dict), **condition_dict))
remote_ids.append(row['remote_id'])
self.session.add_all(match_records)
self.session.commit()
map_records = [mcs.MatchMap(id=match_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, match_record in zip(remote_ids, match_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def match_lineups(self, data_frame):
lineup_records = []
fields = ['match_id', 'player_id', 'team_id', 'position_id', 'is_starting', 'is_captain']
for idx, row in data_frame.iterrows():
lineup_dict = {field: row[field] for field in fields if row[field] is not None}
if not self.record_exists(mc.ClubMatchLineups, **lineup_dict):
lineup_records.append(mc.ClubMatchLineups(**lineup_dict))
self.session.add_all(lineup_records)
self.session.commit()
def modifiers(self, data_frame):
mod_records = [mce.Modifiers(**row) for indx, row in data_frame.iterrows()
if not self.record_exists(mce.Modifiers, type=row['type'])]
self.session.add_all(mod_records)
self.session.commit()
def events(self, data_frame):
event_records = []
remote_ids = []
fields = ['timestamp', 'period', 'period_secs', 'x', 'y', 'match_id', 'team_id']
for idx, row in data_frame.iterrows():
if idx and idx % 100 == 0:
print "{} events".format(idx)
event_dict = {field: row[field] for field in fields if field in row and row[field] is not None}
if 'team_id' not in event_dict:
if not self.record_exists(mce.MatchEvents, **event_dict):
event_records.append(mce.MatchEvents(**event_dict))
remote_ids.append(row['remote_id'])
else:
if not self.record_exists(mc.ClubMatchEvents, **event_dict):
event_records.append(mc.ClubMatchEvents(**event_dict))
remote_ids.append(row['remote_id'])
self.session.add_all(event_records)
self.session.commit()
map_records = [mcs.MatchEventMap(id=event_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, event_record in zip(remote_ids, event_records) if remote_id and not
self.record_exists(mcs.MatchEventMap, remote_id=remote_id, supplier_id=self.supplier_id)]
self.session.add_all(map_records)
self.session.commit()
def actions(self, data_frame):
action_records = []
modifier_ids = []
action_fields = ['event_id', 'type', 'x_end', 'y_end', 'z_end', 'is_success']
for idx, row in data_frame.iterrows():
if idx and idx % 100 == 0:
print "{} actions".format(idx)
action_dict = {field: row[field] for field in action_fields if field in row and row[field] is not None}
if row['player_id']:
action_dict['lineup_id'] = self.get_id(mcm.MatchLineups,
match_id=row['match_id'], player_id=row['player_id'])
if row['modifier_type']:
try:
modifier_id = self.get_id(mce.Modifiers,
type=enums.ModifierType.from_string(row['modifier_type']))
except ValueError as ex:
print row
raise ex
else:
modifier_id = None
if not self.record_exists(mce.MatchActions, **action_dict):
action_records.append(mce.MatchActions(**action_dict))
modifier_ids.append(modifier_id)
self.session.add_all(action_records)
self.session.commit()
modifier_records = [mce.MatchActionModifiers(action_id=action_record.id, modifier_id=modifier_id)
for modifier_id, action_record in zip(modifier_ids, action_records) if not
self.record_exists(mce.MatchActionModifiers, action_id=action_record.id,
modifier_id=modifier_id)]
self.session.add_all(modifier_records)
self.session.commit()
Use sets in data loading patterns to ensure unique records
import pandas as pd
import models.common.enums as enums
import models.common.suppliers as mcs
import models.common.overview as mco
import models.common.personnel as mcp
import models.common.match as mcm
import models.common.events as mce
import models.club as mc
from .workflows import WorkflowBase
class MarcottiLoad(WorkflowBase):
"""
Load transformed data into database.
"""
def record_exists(self, model, **conditions):
return self.session.query(model).filter_by(**conditions).count() != 0
def suppliers(self, data_frame):
supplier_records = [mcs.Suppliers(**data_row) for idx, data_row in data_frame.iterrows()
if not self.record_exists(mcs.Suppliers, name=data_row['name'])]
self.session.add_all(supplier_records)
self.session.commit()
def years(self, data_frame):
year_records = [mco.Years(**data_row) for idx, data_row in data_frame.iterrows()
if not self.record_exists(mco.Years, yr=data_row['yr'])]
self.session.add_all(year_records)
self.session.commit()
def seasons(self, data_frame):
season_records = []
map_records = []
for idx, row in data_frame.iterrows():
if 'name' not in row:
if row['start_year'] == row['end_year']:
yr_obj = self.session.query(mco.Years).filter_by(yr=row['start_year']).one()
season_records.append(mco.Seasons(start_year=yr_obj, end_year=yr_obj))
else:
start_yr_obj = self.session.query(mco.Years).filter_by(yr=row['start_year']).one()
end_yr_obj = self.session.query(mco.Years).filter_by(yr=row['end_year']).one()
season_records.append(mco.Seasons(start_year=start_yr_obj, end_year=end_yr_obj))
self.session.add_all(season_records)
else:
if not self.record_exists(mcs.SeasonMap, remote_id=row['remote_id'], supplier_id=self.supplier_id):
map_records.append(mcs.SeasonMap(id=self.get_id(mco.Seasons, name=row['name']),
remote_id=row['remote_id'],
supplier_id=self.supplier_id))
self.session.add_all(map_records)
self.session.commit()
def countries(self, data_frame):
remote_ids = []
country_records = []
fields = ['name', 'code', 'confederation']
for idx, row in data_frame.iterrows():
country_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mco.Countries, name=row['name']):
country_records.append(mco.Countries(**country_dict))
remote_ids.append(row['remote_id'])
self.session.add_all(country_records)
self.session.commit()
map_records = [mcs.CountryMap(id=country_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, country_record in zip(remote_ids, country_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def competitions(self, data_frame):
remote_ids = []
comp_records = []
for idx, row in data_frame.iterrows():
if 'country_id' in data_frame.columns:
fields = ['name', 'level', 'country_id']
comp_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mco.DomesticCompetitions, **comp_dict):
comp_records.append(mco.DomesticCompetitions(**comp_dict))
remote_ids.append(row['remote_id'])
elif 'confederation' in data_frame.columns:
fields = ['name', 'level', 'confederation']
comp_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mco.InternationalCompetitions, **comp_dict):
comp_records.append(mco.InternationalCompetitions(**comp_dict))
remote_ids.append(row['remote_id'])
self.session.add_all(comp_records)
self.session.commit()
map_records = [mcs.CompetitionMap(id=comp_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, comp_record in zip(remote_ids, comp_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def clubs(self, data_frame):
remote_ids = []
club_records = []
fields = ['short_name', 'name', 'country_id']
for idx, row in data_frame.iterrows():
club_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mc.Clubs, **club_dict):
remote_ids.append(row['remote_id'])
club_records.append(mc.Clubs(**club_dict))
self.session.add_all(club_records)
self.session.commit()
map_records = [mc.ClubMap(id=club_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, club_record in zip(remote_ids, club_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def venues(self, data_frame):
remote_ids = []
venue_records = []
history_dicts = []
fields = ['name', 'city', 'region', 'latitude', 'longitude', 'altitude', 'country_id', 'timezone_id']
history_fields = ['eff_date', 'length', 'width', 'capacity', 'seats', 'surface_id']
for idx, row in data_frame.iterrows():
venue_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mco.Venues, **venue_dict):
venue_records.append(mco.Venues(**venue_dict))
history_dicts.append({field: row[field] for field in history_fields if row[field]})
remote_ids.append(row['remote_id'])
self.session.add_all(venue_records)
self.session.commit()
history_records = [mco.VenueHistory(venue_id=venue_record.id, **history_dict)
for history_dict, venue_record in zip(history_dicts, venue_records)]
self.session.add_all(history_records)
map_records = [mcs.VenueMap(id=venue_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, venue_record in zip(remote_ids, venue_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def surfaces(self, data_frame):
surface_records = [mco.Surfaces(**row) for indx, row in data_frame.iterrows()
if not self.record_exists(mco.Surfaces, description=row['description'])]
self.session.add_all(surface_records)
self.session.commit()
def timezones(self, data_frame):
tz_records = [mco.Timezones(**row) for indx, row in data_frame.iterrows()
if not self.record_exists(mco.Timezones, name=row['name'])]
self.session.add_all(tz_records)
self.session.commit()
def players(self, data_frame):
player_set = set()
player_records = []
remote_countryids = []
remote_ids = []
fields = ['known_first_name', 'first_name', 'middle_name', 'last_name', 'second_last_name',
'nick_name', 'birth_date', 'order', 'country_id', 'position_id', 'remote_id',
'remote_country_id']
for _, row in data_frame.iterrows():
player_set.add(tuple([(field, row[field]) for field in fields
if field in row and row[field] is not None]))
for elements in player_set:
player_dict = dict(elements)
remote_id = player_dict.pop('remote_id')
remote_country_id = player_dict.pop('remote_country_id', None)
if not self.record_exists(mcp.Players, **player_dict):
remote_ids.append(remote_id)
remote_countryids.append(remote_country_id)
player_records.append(mcp.Players(**player_dict))
self.session.add_all(player_records)
self.session.commit()
map_records = [mcs.PlayerMap(id=player_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, player_record in zip(remote_ids, player_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
for remote_id, player_record in zip(remote_countryids, player_records):
if remote_id and not self.record_exists(
mcs.CountryMap, remote_id=remote_id, supplier_id=self.supplier_id):
self.session.add(mcs.CountryMap(id=player_record.country_id, remote_id=remote_id,
supplier_id=self.supplier_id))
self.session.commit()
def managers(self, data_frame):
manager_records = []
remote_ids = []
fields = ['known_first_name', 'first_name', 'middle_name', 'last_name', 'second_last_name',
'nick_name', 'birth_date', 'order', 'country_id']
for indx, row in data_frame.iterrows():
manager_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mcp.Managers, **manager_dict):
remote_ids.append(row['remote_id'])
manager_records.append(mcp.Managers(**manager_dict))
self.session.add_all(manager_records)
self.session.commit()
map_records = [mcs.ManagerMap(id=manager_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, manager_record in zip(remote_ids, manager_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def referees(self, data_frame):
referee_records = []
remote_ids = []
fields = ['known_first_name', 'first_name', 'middle_name', 'last_name', 'second_last_name',
'nick_name', 'birth_date', 'order', 'country_id']
for indx, row in data_frame.iterrows():
referee_dict = {field: row[field] for field in fields if row[field]}
if not self.record_exists(mcp.Referees, **referee_dict):
remote_ids.append(row['remote_id'])
referee_records.append(mcp.Referees(**referee_dict))
self.session.add_all(referee_records)
self.session.commit()
map_records = [mcs.RefereeMap(id=referee_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, referee_record in zip(remote_ids, referee_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def positions(self, data_frame):
position_record = []
for indx, row in data_frame.iterrows():
if row['remote_id'] and self.supplier_id:
if not self.record_exists(mcs.PositionMap, remote_id=row['remote_id'], supplier_id=self.supplier_id):
position_record.append(mcs.PositionMap(
id=self.get_id(mcp.Positions, name=row['name']),
remote_id=row['remote_id'], supplier_id=self.supplier_id))
else:
if not self.record_exists(mcp.Positions, name=row['name']):
position_record.append(mcp.Positions(name=row['name'], type=row['type']))
self.session.add_all(position_record)
self.session.commit()
def league_matches(self, data_frame):
match_records = []
remote_ids = []
fields = ['match_date', 'competition_id', 'season_id', 'venue_id', 'home_team_id', 'away_team_id',
'home_manager_id', 'away_manager_id', 'referee_id', 'attendance', 'matchday']
condition_fields = ['kickoff_time', 'kickoff_temp', 'kickoff_humidity',
'kickoff_weather', 'halftime_weather', 'fulltime_weather']
for idx, row in data_frame.iterrows():
match_dict = {field: row[field] for field in fields if row[field]}
condition_dict = {field: row[field] for field in condition_fields if field in row and row[field]}
if not self.record_exists(mc.ClubLeagueMatches, **match_dict):
match_records.append(mcm.MatchConditions(match=mc.ClubLeagueMatches(**match_dict), **condition_dict))
remote_ids.append(row['remote_id'])
self.session.add_all(match_records)
self.session.commit()
map_records = [mcs.MatchMap(id=match_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, match_record in zip(remote_ids, match_records) if remote_id]
self.session.add_all(map_records)
self.session.commit()
def match_lineups(self, data_frame):
lineup_records = []
fields = ['match_id', 'player_id', 'team_id', 'position_id', 'is_starting', 'is_captain']
for idx, row in data_frame.iterrows():
lineup_dict = {field: row[field] for field in fields if row[field] is not None}
if not self.record_exists(mc.ClubMatchLineups, **lineup_dict):
lineup_records.append(mc.ClubMatchLineups(**lineup_dict))
self.session.add_all(lineup_records)
self.session.commit()
def modifiers(self, data_frame):
mod_records = [mce.Modifiers(**row) for indx, row in data_frame.iterrows()
if not self.record_exists(mce.Modifiers, type=row['type'])]
self.session.add_all(mod_records)
self.session.commit()
def events(self, data_frame):
event_set = set()
event_records = []
remote_ids = []
fields = ['timestamp', 'period', 'period_secs', 'x', 'y', 'match_id', 'team_id', 'remote_id']
for _, row in data_frame.iterrows():
event_set.add(tuple([(field, row[field]) for field in fields
if field in row and row[field] is not None]))
print "{} unique events".format(len(event_set))
for indx, elements in enumerate(event_set):
if indx and indx % 100 == 0:
print "Processing {} events".format(indx)
event_dict = dict(elements)
remote_id = event_dict.pop('remote_id')
if 'team_id' not in event_dict:
if not self.record_exists(mce.MatchEvents, **event_dict):
event_records.append(mce.MatchEvents(**event_dict))
remote_ids.append(remote_id)
else:
if not self.record_exists(mc.ClubMatchEvents, **event_dict):
event_records.append(mc.ClubMatchEvents(**event_dict))
remote_ids.append(remote_id)
self.session.add_all(event_records)
self.session.commit()
map_records = [mcs.MatchEventMap(id=event_record.id, remote_id=remote_id, supplier_id=self.supplier_id)
for remote_id, event_record in zip(remote_ids, event_records) if remote_id and not
self.record_exists(mcs.MatchEventMap, remote_id=remote_id, supplier_id=self.supplier_id)]
self.session.add_all(map_records)
self.session.commit()
def actions(self, data_frame):
action_set = set()
action_records = []
modifier_ids = []
action_fields = ['event_id', 'type', 'x_end', 'y_end', 'z_end',
'is_success', 'match_id', 'player_id', 'modifier_type']
for _, row in data_frame.iterrows():
action_set.add(tuple([(field, row[field]) for field in action_fields
if field in row and row[field] is not None]))
print "{} unique actions".format(len(action_set))
for indx, elements in enumerate(action_set):
if indx and indx % 100 == 0:
print "Processing {} actions".format(indx)
action_dict = dict(elements)
match_id = action_dict.pop('match_id')
player_id = action_dict.pop('player_id', None)
modifier_type = action_dict.pop('modifier_type', None)
if player_id:
action_dict['lineup_id'] = self.get_id(mcm.MatchLineups, match_id=match_id, player_id=player_id)
if modifier_type:
try:
modifier_id = self.get_id(mce.Modifiers,
type=enums.ModifierType.from_string(modifier_type))
except ValueError as ex:
print elements
raise ex
else:
modifier_id = None
if not self.record_exists(mce.MatchActions, **action_dict):
action_records.append(mce.MatchActions(**action_dict))
modifier_ids.append(modifier_id)
self.session.add_all(action_records)
self.session.commit()
modifier_records = [mce.MatchActionModifiers(action_id=action_record.id, modifier_id=modifier_id)
for modifier_id, action_record in zip(modifier_ids, action_records) if not
self.record_exists(mce.MatchActionModifiers, action_id=action_record.id,
modifier_id=modifier_id)]
self.session.add_all(modifier_records)
self.session.commit()
|
'''
@author: matt
'''
import cProfile
import pstats
from .maybeerror import MaybeError
from . import combinators as c
from .combinators import (good, run, Itemizer, basic, position, count, Parser, many0,
seq2R, bind, zero, put, pure, get, seq2L, updateState)
def profile(f, *args, **kwargs):
cProfile.runctx('f(*args, **kwargs)', {}, {'f': f, 'args': args, 'kwargs': kwargs}, 'prof.txt')
stats = pstats.Stats('prof.txt')
stats.sort_stats('time').print_stats()
return stats
def _action(xs):
if xs.isEmpty():
return zero
return seq2R(put(xs.rest()), pure(xs.first()))
_item = bind(get, _action)
bas = Itemizer(_item)
pos = Itemizer(bind(basic.item,
lambda char: seq2R(updateState(lambda s: c._bump(char, s)), pure(char))))
ct = Itemizer(seq2L(basic.item, updateState(lambda x: x + 1)))
def random_nums(size=100000):
nums = []
import random
for _ in xrange(size):
nums.append(random.randint(0, 10000))
return nums
def test_case(p1, p2, size=100000, state=None):
nums = random_nums(size)
a = profile(run, many0(p1), nums, state)
b = profile(run, many0(p2), nums, state)
return a, b
def test_basic(size):
return test_case(basic.item, bas.item, size)
def test_pos(size):
# return test_case(position.item, pos.item, size)
return test_case(pos.item, position.item, size, (1,1))
def test_count(size):
return test_case(ct.item, count.item, size, 1)
def test_all(size):
return [f(size) for f in [test_basic, test_pos, test_count]]
print run(position.item, 'abc')
print run(many0(pos.item), range(8))
fix performance tests
'''
@author: matt
'''
from __future__ import print_function
import cProfile
import pstats
from .maybeerror import MaybeError
from .combinators import (run, many0, basic, count, position)
def profile(f, *args, **kwargs):
cProfile.runctx('f(*args, **kwargs)', {}, {'f': f, 'args': args, 'kwargs': kwargs}, 'prof.txt')
stats = pstats.Stats('prof.txt')
stats.sort_stats('time').print_stats()
return stats
def random_nums(size=100000):
nums = []
import random
for _ in xrange(size):
nums.append(random.randint(0, 10000))
return nums
def test_case(p1, p2, size=100000, state1=None, state2=None):
nums = random_nums(size)
a = profile(run, many0(p1), nums, state1)
b = profile(run, many0(p2), nums, state2)
return a, b
def test_basic_position(size):
return test_case(basic.item, position.item, size, None, (1,1))
def test_basic_count(size):
return test_case(basic.item, count.item, size, None, 1)
def test_count_position(size):
return test_case(count.item, position.item, size, 1, (1,1))
def test_all(size):
return [f(size) for f in [test_basic_position, test_basic_count, test_count_position]]
if __name__ == "__main__":
# print(run(position.item, 'abc'))
# print(run(many0(position.item), range(8)))
# for i in range(10):
# print(test_all(2 ** i))
print(test_all(1000000))
|
#!/usr/bin/env python2
from __future__ import print_function
import random
from direct.showbase.ShowBase import ShowBase
import panda3d.bullet as bullet
import panda3d.core as p3d
from citygen import *
import inputmapper
import character
from player_controller import PlayerController
class GameApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
self.input_mapper = inputmapper.InputMapper('input.conf')
self.disableMouse()
wp = p3d.WindowProperties()
wp.set_cursor_hidden(True)
wp.set_mouse_mode(p3d.WindowProperties.MRelative)
self.win.requestProperties(wp)
self.physics_world = bullet.BulletWorld()
self.physics_world.set_gravity(p3d.Vec3(0, 0, -9.81))
self.taskMgr.add(self.update_physics, 'Update Physics')
phydebug = bullet.BulletDebugNode('Physics Debug')
phydebug.show_wireframe(True)
phydebug.show_bounding_boxes(True)
phydebugnp = self.render.attach_new_node(phydebug)
# Uncomment to show debug physics
# phydebugnp.show()
self.physics_world.set_debug_node(phydebug)
self.render.set_shader_auto()
light = p3d.DirectionalLight('sun')
light.set_color(p3d.VBase4(1.0, 0.94, 0.84, 1.0))
light_np = self.render.attach_new_node(light)
light_np.set_hpr(p3d.VBase3(-135.0, -45.0, 0.0))
self.render.set_light(light_np)
light = p3d.DirectionalLight('indirect')
light.set_color(p3d.VBase4(0.1784, 0.2704, 0.3244, 1.0))
light_np = self.render.attach_new_node(light)
light_np.set_hpr(p3d.VBase3(45.0, 45.0, 0.0))
self.render.set_light(light_np)
gen = CityGen()
gen.generate()
city = gen.city
self.import_city(city)
player_spawn = random.choice(city.spawn_points)
print("Spawn player at", player_spawn)
player = character.Character('player')
playernp = self.render.attach_new_node(player)
playernp.set_pos(player_spawn)
self.physics_world.attach_character(player)
self.player_controller = PlayerController(player,
playernp,
self.camera,
self.mouseWatcherNode,
self.win)
self.taskMgr.add(self.player_controller.update, 'Player Controller')
def create_mesh(self, mesh, name, material):
node = p3d.GeomNode(name)
vdata = p3d.GeomVertexData(name,
p3d.GeomVertexFormat.get_v3n3(),
p3d.GeomEnums.UH_stream)
vdata.unclean_set_num_rows(len(mesh.vertices))
vwriter = p3d.GeomVertexWriter(vdata, 'vertex')
nwriter = p3d.GeomVertexWriter(vdata, 'normal')
for vert in mesh.vertices:
vwriter.add_data3(*vert.position)
nwriter.add_data3(*vert.normal)
vwriter = None
nwriter = None
prim = p3d.GeomTriangles(p3d.GeomEnums.UH_stream)
for face in mesh.faces:
prim.add_vertices(*face)
render_state = p3d.RenderState.make_empty()
render_state = render_state.set_attrib(p3d.MaterialAttrib.make(material))
geom = p3d.Geom(vdata)
geom.add_primitive(prim)
node.add_geom(geom, render_state)
return self.render.attach_new_node(node)
def import_city(self, city):
colors = []
colors.append((112, 163, 10))
colors.append((90, 135, 10))
colors.append((67, 100, 10))
building_mats = []
for color in colors:
mat = p3d.Material()
mat.set_shininess(1.0)
color = [c/255.0 for c in color]
mat.set_diffuse(p3d.VBase4(color[0], color[1], color[2], 1.0))
building_mats.append(mat)
for i, building in enumerate(city.buildings):
mesh = building.mesh
name = str(i)
mat = random.choice(building_mats)
np = self.create_mesh(mesh, name, mat)
np.set_pos(p3d.VBase3(*building.position))
node = bullet.BulletRigidBodyNode(name)
node.add_shape(bullet.BulletBoxShape(p3d.Vec3(building.collision)))
np = self.render.attach_new_node(node)
pos = list(building.position)
pos[2] += building.collision[2]
np.set_pos(p3d.VBase3(*pos))
self.physics_world.attach_rigid_body(node)
road_mat = p3d.Material()
road_mat.set_shininess(1.0)
color = [c/255.0 for c in (7, 105, 105)]
road_mat.set_diffuse(p3d.VBase4(color[0], color[1], color[2], 1.0))
self.create_mesh(city.road_mesh, "road", road_mat)
node = bullet.BulletRigidBodyNode('Ground')
node.add_shape(bullet.BulletPlaneShape(p3d.Vec3(0, 0, 1), 0))
self.render.attach_new_node(node)
self.physics_world.attach_rigid_body(node)
# Tasks
def update_physics(self, task):
dt = globalClock.getDt()
self.physics_world.do_physics(dt)
return task.cont
if __name__ == '__main__':
app = GameApp()
app.run()
Demons: Adding some demon portals
#!/usr/bin/env python2
from __future__ import print_function
import random
from direct.showbase.ShowBase import ShowBase
import panda3d.bullet as bullet
import panda3d.core as p3d
from citygen import *
import inputmapper
import character
from player_controller import PlayerController
class GameApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
self.input_mapper = inputmapper.InputMapper('input.conf')
self.disableMouse()
wp = p3d.WindowProperties()
wp.set_cursor_hidden(True)
wp.set_mouse_mode(p3d.WindowProperties.MRelative)
self.win.requestProperties(wp)
self.physics_world = bullet.BulletWorld()
self.physics_world.set_gravity(p3d.Vec3(0, 0, -9.81))
self.taskMgr.add(self.update_physics, 'Update Physics')
phydebug = bullet.BulletDebugNode('Physics Debug')
phydebug.show_wireframe(True)
phydebug.show_bounding_boxes(True)
phydebugnp = self.render.attach_new_node(phydebug)
# Uncomment to show debug physics
# phydebugnp.show()
self.physics_world.set_debug_node(phydebug)
self.render.set_shader_auto()
light = p3d.DirectionalLight('sun')
light.set_color(p3d.VBase4(1.0, 0.94, 0.84, 1.0))
light_np = self.render.attach_new_node(light)
light_np.set_hpr(p3d.VBase3(-135.0, -45.0, 0.0))
self.render.set_light(light_np)
light = p3d.DirectionalLight('indirect')
light.set_color(p3d.VBase4(0.1784, 0.2704, 0.3244, 1.0))
light_np = self.render.attach_new_node(light)
light_np.set_hpr(p3d.VBase3(45.0, 45.0, 0.0))
self.render.set_light(light_np)
gen = CityGen()
gen.generate()
city = gen.city
self.import_city(city)
player_spawn = random.choice(city.spawn_points)
player = character.Character('player')
playernp = self.render.attach_new_node(player)
playernp.set_pos(player_spawn)
self.physics_world.attach_character(player)
self.player_controller = PlayerController(player,
playernp,
self.camera,
self.mouseWatcherNode,
self.win)
self.taskMgr.add(self.player_controller.update, 'Player Controller')
# Demon portals
self.demon_portals = random.sample(city.spawn_points, 5)
model = self.loader.loadModel("models/demon_portal.egg")
for point in self.demon_portals:
placeholder = self.render.attach_new_node("placeholder")
placeholder.set_pos(point[0], point[1], point[2]+1)
model.instance_to(placeholder)
def create_mesh(self, mesh, name, material):
node = p3d.GeomNode(name)
vdata = p3d.GeomVertexData(name,
p3d.GeomVertexFormat.get_v3n3(),
p3d.GeomEnums.UH_stream)
vdata.unclean_set_num_rows(len(mesh.vertices))
vwriter = p3d.GeomVertexWriter(vdata, 'vertex')
nwriter = p3d.GeomVertexWriter(vdata, 'normal')
for vert in mesh.vertices:
vwriter.add_data3(*vert.position)
nwriter.add_data3(*vert.normal)
vwriter = None
nwriter = None
prim = p3d.GeomTriangles(p3d.GeomEnums.UH_stream)
for face in mesh.faces:
prim.add_vertices(*face)
render_state = p3d.RenderState.make_empty()
render_state = render_state.set_attrib(p3d.MaterialAttrib.make(material))
geom = p3d.Geom(vdata)
geom.add_primitive(prim)
node.add_geom(geom, render_state)
return self.render.attach_new_node(node)
def import_city(self, city):
colors = []
colors.append((112, 163, 10))
colors.append((90, 135, 10))
colors.append((67, 100, 10))
building_mats = []
for color in colors:
mat = p3d.Material()
mat.set_shininess(1.0)
color = [c/255.0 for c in color]
mat.set_diffuse(p3d.VBase4(color[0], color[1], color[2], 1.0))
building_mats.append(mat)
for i, building in enumerate(city.buildings):
mesh = building.mesh
name = str(i)
mat = random.choice(building_mats)
np = self.create_mesh(mesh, name, mat)
np.set_pos(p3d.VBase3(*building.position))
node = bullet.BulletRigidBodyNode(name)
node.add_shape(bullet.BulletBoxShape(p3d.Vec3(building.collision)))
np = self.render.attach_new_node(node)
pos = list(building.position)
pos[2] += building.collision[2]
np.set_pos(p3d.VBase3(*pos))
self.physics_world.attach_rigid_body(node)
road_mat = p3d.Material()
road_mat.set_shininess(1.0)
color = [c/255.0 for c in (7, 105, 105)]
road_mat.set_diffuse(p3d.VBase4(color[0], color[1], color[2], 1.0))
self.create_mesh(city.road_mesh, "road", road_mat)
node = bullet.BulletRigidBodyNode('Ground')
node.add_shape(bullet.BulletPlaneShape(p3d.Vec3(0, 0, 1), 0))
self.render.attach_new_node(node)
self.physics_world.attach_rigid_body(node)
# Tasks
def update_physics(self, task):
dt = globalClock.getDt()
self.physics_world.do_physics(dt)
return task.cont
if __name__ == '__main__':
app = GameApp()
app.run()
|
bump version to 0.2.0
|
import copy
import json
import math
import houston.action
class State(object):
"""
Describes the state of the system at a given moment in time, in terms of
its internal and external variables.
"""
@staticmethod
def from_file(fn):
"""
Constructs a system state from a given file, containing a JSON-based
description of its contents.
:param fn the path to the state description file
:returns the corresponding State for that file
"""
with open(fn, "r") as f:
jsn = json.load(f)
print(jsn.keys())
return State.from_json(jsn)
@staticmethod
def from_json(jsn):
"""
Constructs a description of a state from its JSON description.
"""
assert ('variables' in jsn)
assert isinstance(jsn['variables'], dict)
return State(jsn['variables'])
def __init__(self, values):
"""
Constructs a description of the system state.
:param values: a dictionary describing the values of the state
variables, indexed by their names.
"""
self.__values = copy.copy(values)
@property
def values(self):
"""
Returns the values of each of the state variables as a dictionary,
indexed by name.
"""
return copy.copy(self.__values)
def __getitem__(self, variable):
"""
:see `read`
"""
return self.read(variable)
def read(self, variable):
"""
Returns the value for a given state variable
"""
print(self.__values)
return self.__values[variable]
def dump(self):
"""
Prints this state to the standard output.
"""
for variable in self.__values:
print('Variable: {} - State: {}'.format(variable, self[variable]))
def to_json(self):
"""
Returns a JSON description of this state.
"""
return {
'variables': copy.copy(self.__values)
}
def __str__(self):
return str(self.to_json())
def __repr__(self):
return str(self)
class StateVariable(object):
def __init__(self, name, getter, noise=None):
"""
Constructs a new state variable
:param name: the name of this variable
:param type: the type of this variable
:param getter: a lambda function, used to obtain the value of this variable
:param noise: the inherent level of noise when measuring this variable
"""
assert (isinstance(self, InternalVariable) or isinstance(self, ExternalVariable))
assert (noise is None or type(noise) in [float, int])
assert (noise is None or noise >= 0)
self.__name = name
self.__getter = getter
self.__noise = noise
"""
Returns true if there is inherent noise in the measurement of this variable.
"""
@property
def is_noisy(self):
return self.__noise is not None
"""
Returns the inherent level of noise that is to be expected when measuring
this variable. If no noise is expected, None is returned.
"""
@property
def noise(self):
return self.__noise
"""
Returns the name of this system variable
"""
@property
def name(self):
return self.__name
def eq(self, x, y):
"""
Determines whether two measurements of this state variable are
considered to be equal.
"""
if not self.is_noisy:
return x == y
d = math.fabs(x - y)
return d <= self.__noise
def neq(self, x, y):
"""
Determines whether two measurements of this state variable are not
considered to be equal.
"""
return not self.eq(x, y)
def gt(self, x, y):
return x > y
def lt(self, x, y):
return x < y
def leq(self, x, y):
return not self.gt(x, y)
def geq(self, x, y):
return not self.lt(x, y)
"""
Inspects the current state of this system variable
"""
def read(self):
return self.__getter()
class InternalVariable(StateVariable):
"""
Internal variables describe the internal state of a given system.
(i.e., they represent a system's knowledge of itself and its surroundings.)
A user-provided lambda function is used to inspect the value of the state
variable at any given time.
"""
pass
class ExternalVariable(StateVariable):
"""
TODO
"""
pass
class Environment(object):
@staticmethod
def fromFile(fn):
"""
Constructs a system environment from a given file, containing a JSON-based
description of its contents.
:param fn the path to the state description file
:returns the corresponding en for that file
"""
with open(fn, "r") as f:
jsn = json.load(f)
return Environment.from_json(jsn)
@staticmethod
def from_json(jsn):
"""
Constructs a description of an environment from its JSON description.
"""
assert ('constants' in jsn)
assert isinstance(jsn['constants'], dict)
return Environment(jsn['constants'])
"""
Holds a description of an environment in which a mission should be conducted.
"""
def __init__(self, values):
"""
Constructs a description of a mission environment.
:param values: a dictionary of environment constant values, indexed
by the name of those constants.
"""
self.__values = copy.copy(values)
def __getattr(self, variable):
return self.read(variable)
def read(self, variable):
"""
Returns the value of a given environment constant.
"""
return self.__values[variable]
def to_json(self):
"""
Returns this environment description as a JSON object (i.e., a dict)
"""
return {
'constants': copy.copy(self.__values)
}
removed debugging notice
import copy
import json
import math
import houston.action
class State(object):
"""
Describes the state of the system at a given moment in time, in terms of
its internal and external variables.
"""
@staticmethod
def from_file(fn):
"""
Constructs a system state from a given file, containing a JSON-based
description of its contents.
:param fn the path to the state description file
:returns the corresponding State for that file
"""
with open(fn, "r") as f:
jsn = json.load(f)
return State.from_json(jsn)
@staticmethod
def from_json(jsn):
"""
Constructs a description of a state from its JSON description.
"""
assert ('variables' in jsn)
assert isinstance(jsn['variables'], dict)
return State(jsn['variables'])
def __init__(self, values):
"""
Constructs a description of the system state.
:param values: a dictionary describing the values of the state
variables, indexed by their names.
"""
self.__values = copy.copy(values)
@property
def values(self):
"""
Returns the values of each of the state variables as a dictionary,
indexed by name.
"""
return copy.copy(self.__values)
def __getitem__(self, variable):
"""
:see `read`
"""
return self.read(variable)
def read(self, variable):
"""
Returns the value for a given state variable
"""
return self.__values[variable]
def dump(self):
"""
Prints this state to the standard output.
"""
for variable in self.__values:
print('Variable: {} - State: {}'.format(variable, self[variable]))
def to_json(self):
"""
Returns a JSON description of this state.
"""
return {
'variables': copy.copy(self.__values)
}
def __str__(self):
return str(self.to_json())
def __repr__(self):
return str(self)
class StateVariable(object):
def __init__(self, name, getter, noise=None):
"""
Constructs a new state variable
:param name: the name of this variable
:param type: the type of this variable
:param getter: a lambda function, used to obtain the value of this variable
:param noise: the inherent level of noise when measuring this variable
"""
assert (isinstance(self, InternalVariable) or isinstance(self, ExternalVariable))
assert (noise is None or type(noise) in [float, int])
assert (noise is None or noise >= 0)
self.__name = name
self.__getter = getter
self.__noise = noise
"""
Returns true if there is inherent noise in the measurement of this variable.
"""
@property
def is_noisy(self):
return self.__noise is not None
"""
Returns the inherent level of noise that is to be expected when measuring
this variable. If no noise is expected, None is returned.
"""
@property
def noise(self):
return self.__noise
"""
Returns the name of this system variable
"""
@property
def name(self):
return self.__name
def eq(self, x, y):
"""
Determines whether two measurements of this state variable are
considered to be equal.
"""
if not self.is_noisy:
return x == y
d = math.fabs(x - y)
return d <= self.__noise
def neq(self, x, y):
"""
Determines whether two measurements of this state variable are not
considered to be equal.
"""
return not self.eq(x, y)
def gt(self, x, y):
return x > y
def lt(self, x, y):
return x < y
def leq(self, x, y):
return not self.gt(x, y)
def geq(self, x, y):
return not self.lt(x, y)
"""
Inspects the current state of this system variable
"""
def read(self):
return self.__getter()
class InternalVariable(StateVariable):
"""
Internal variables describe the internal state of a given system.
(i.e., they represent a system's knowledge of itself and its surroundings.)
A user-provided lambda function is used to inspect the value of the state
variable at any given time.
"""
pass
class ExternalVariable(StateVariable):
"""
TODO
"""
pass
class Environment(object):
@staticmethod
def fromFile(fn):
"""
Constructs a system environment from a given file, containing a JSON-based
description of its contents.
:param fn the path to the state description file
:returns the corresponding en for that file
"""
with open(fn, "r") as f:
jsn = json.load(f)
return Environment.from_json(jsn)
@staticmethod
def from_json(jsn):
"""
Constructs a description of an environment from its JSON description.
"""
assert ('constants' in jsn)
assert isinstance(jsn['constants'], dict)
return Environment(jsn['constants'])
"""
Holds a description of an environment in which a mission should be conducted.
"""
def __init__(self, values):
"""
Constructs a description of a mission environment.
:param values: a dictionary of environment constant values, indexed
by the name of those constants.
"""
self.__values = copy.copy(values)
def __getattr(self, variable):
return self.read(variable)
def read(self, variable):
"""
Returns the value of a given environment constant.
"""
return self.__values[variable]
def to_json(self):
"""
Returns this environment description as a JSON object (i.e., a dict)
"""
return {
'constants': copy.copy(self.__values)
}
|
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that handles the processing of patches to the source tree."""
import constants
import glob
import json
import logging
import os
import re
import shutil
import tempfile
import itertools
import operator
from chromite.lib import cros_build_lib as cros_lib
# The prefix of the temporary directory created to store local patches.
_TRYBOT_TEMP_PREFIX = 'trybot_patch-'
def _RunCommand(cmd, dryrun):
"""Runs the specified shell cmd if dryrun=False."""
if dryrun:
logging.info('Would have run: %s', ' '.join(cmd))
else:
cros_lib.RunCommand(cmd, error_ok=True)
class GerritException(Exception):
"Base exception, thrown for gerrit failures"""
class PatchException(GerritException):
"""Exception thrown by GetGerritPatchInfo."""
class ApplyPatchException(Exception):
"""Exception thrown if we fail to apply a patch."""
# Types used to denote what we failed to apply against.
TYPE_REBASE_TO_TOT = 1
TYPE_REBASE_TO_PATCH_INFLIGHT = 2
def __init__(self, patch, patch_type=TYPE_REBASE_TO_TOT):
super(ApplyPatchException, self).__init__()
self.patch = patch
self.type = patch_type
def __str__(self):
return 'Failed to apply patch ' + str(self.patch)
class MissingChangeIDException(Exception):
"""Raised if a patch is missing a Change-ID."""
pass
class PaladinMessage():
"""An object that is used to send messages to developers about their changes.
"""
# URL where Paladin documentation is stored.
_PALADIN_DOCUMENTATION_URL = ('http://www.chromium.org/developers/'
'tree-sheriffs/sheriff-details-chromium-os/'
'commit-queue-overview')
def __init__(self, message, patch, helper):
self.message = message
self.patch = patch
self.helper = helper
def _ConstructPaladinMessage(self):
"""Adds any standard Paladin messaging to an existing message."""
return self.message + (' Please see %s for more information.' %
self._PALADIN_DOCUMENTATION_URL)
def Send(self, dryrun):
"""Sends the message to the developer."""
cmd = self.helper.GetGerritReviewCommand(
['-m', '"%s"' % self._ConstructPaladinMessage(),
'%s,%s' % (self.patch.gerrit_number, self.patch.patch_number)])
_RunCommand(cmd, dryrun)
class Patch(object):
"""Abstract class representing a Git Patch."""
def __init__(self, project, tracking_branch):
"""Initialization of abstract Patch class.
Args:
project: The name of the project that the patch applies to.
tracking_branch: The remote branch of the project the patch applies to.
"""
self.project = project
self.tracking_branch = tracking_branch
def Apply(self, buildroot, trivial):
"""Applies the patch to specified buildroot. Implement in subclasses.
Args:
buildroot: The buildroot.
trivial: Only allow trivial merges when applying change.
Raises:
PatchException
"""
raise NotImplementedError('Applies the patch to specified buildroot.')
class GerritPatch(Patch):
"""Object that represents a Gerrit CL."""
_PUBLIC_URL = os.path.join(constants.GERRIT_HTTP_URL, 'gerrit/p')
_GIT_CHANGE_ID_RE = re.compile(r'^\s*Change-Id:\s*(\w+)\s*$', re.MULTILINE)
_PALADIN_DEPENDENCY_RE = re.compile(r'^\s*CQ-DEPEND=(.*)$', re.MULTILINE)
_PALADIN_BUG_RE = re.compile('(\w+)')
def __init__(self, patch_dict, internal):
"""Construct a GerritPatch object from Gerrit query results.
Gerrit query JSON fields are documented at:
http://gerrit-documentation.googlecode.com/svn/Documentation/2.2.1/json.html
Args:
patch_dict: A dictionary containing the parsed JSON gerrit query results.
internal: Whether the CL is an internal CL.
"""
super(GerritPatch, self).__init__(patch_dict['project'],
patch_dict['branch'])
self.patch_dict = patch_dict
self.internal = internal
# id - The CL's ChangeId
self.id = patch_dict['id']
# ref - The remote ref that contains the patch.
self.ref = patch_dict['currentPatchSet']['ref']
# revision - The CL's SHA1 hash.
self.revision = patch_dict['currentPatchSet']['revision']
self.patch_number = patch_dict['currentPatchSet']['number']
self.commit = patch_dict['currentPatchSet']['revision']
self.owner, _, _ = patch_dict['owner']['email'].partition('@')
self.gerrit_number = patch_dict['number']
self.url = patch_dict['url']
# status - Current state of this change. Can be one of
# ['NEW', 'SUBMITTED', 'MERGED', 'ABANDONED'].
self.status = patch_dict['status']
# Allows a caller to specify why we can't apply this change when we
# HandleApplicaiton failures.
self.apply_error_message = ('Please re-sync, rebase, and re-upload your '
'change.')
def __getnewargs__(self):
"""Used for pickling to re-create patch object."""
return self.patch_dict, self.internal
def IsAlreadyMerged(self):
"""Returns whether the patch has already been merged in Gerrit."""
return self.status == 'MERGED'
def _GetProjectUrl(self):
"""Returns the url to the gerrit project."""
if self.internal:
url_prefix = constants.GERRIT_INT_SSH_URL
else:
url_prefix = self._PUBLIC_URL
return os.path.join(url_prefix, self.project)
def _RebaseOnto(self, branch, upstream, project_dir, trivial):
"""Attempts to rebase FETCH_HEAD onto branch -- while not on a branch.
Raises:
cros_lib.RunCommandError: If the rebase operation returns an error code.
In this case, we still rebase --abort before returning.
"""
try:
git_rb = ['git', 'rebase']
if trivial: git_rb.extend(['--strategy', 'resolve', '-X', 'trivial'])
git_rb.extend(['--onto', branch, upstream, 'FETCH_HEAD'])
# Run the rebase command.
cros_lib.RunCommand(git_rb, cwd=project_dir, print_cmd=False)
except cros_lib.RunCommandError:
cros_lib.RunCommand(['git', 'rebase', '--abort'], cwd=project_dir,
error_ok=True, print_cmd=False)
raise
def _RebasePatch(self, buildroot, project_dir, trivial):
"""Rebase patch fetched from gerrit onto constants.PATCH_BRANCH.
When the function completes, the constants.PATCH_BRANCH branch will be
pointing to the rebased change.
Arguments:
buildroot: The buildroot.
project_dir: Directory of the project that is being patched.
trivial: Use trivial logic that only allows trivial merges. Note:
Requires Git >= 1.7.6 -- bug <. Bots have 1.7.6 installed.
Raises:
ApplyPatchException: If the patch failed to apply.
"""
url = self._GetProjectUrl()
upstream = _GetProjectManifestBranch(buildroot, self.project)
cros_lib.RunCommand(['git', 'fetch', url, self.ref], cwd=project_dir,
print_cmd=False)
try:
self._RebaseOnto(constants.PATCH_BRANCH, upstream, project_dir, trivial)
cros_lib.RunCommand(['git', 'checkout', '-B', constants.PATCH_BRANCH],
cwd=project_dir, print_cmd=False)
except cros_lib.RunCommandError:
try:
# Failed to rebase against branch, try TOT.
self._RebaseOnto(upstream, upstream, project_dir, trivial)
except cros_lib.RunCommandError:
raise ApplyPatchException(
self, patch_type=ApplyPatchException.TYPE_REBASE_TO_TOT)
else:
# We failed to apply to patch_branch but succeeded against TOT.
# We should pass a different type of exception in this case.
raise ApplyPatchException(
self, patch_type=ApplyPatchException.TYPE_REBASE_TO_PATCH_INFLIGHT)
finally:
cros_lib.RunCommand(['git', 'checkout', constants.PATCH_BRANCH],
cwd=project_dir, print_cmd=False)
def Apply(self, buildroot, trivial=False):
"""Implementation of Patch.Apply().
Raises:
ApplyPatchException: If the patch failed to apply.
"""
logging.info('Attempting to apply change %s', self)
project_dir = cros_lib.GetProjectDir(buildroot, self.project)
if not cros_lib.DoesLocalBranchExist(project_dir, constants.PATCH_BRANCH):
upstream = cros_lib.GetManifestDefaultBranch(buildroot)
cros_lib.RunCommand(['git', 'checkout', '-b', constants.PATCH_BRANCH,
'-t', 'm/' + upstream], cwd=project_dir,
print_cmd=False)
self._RebasePatch(buildroot, project_dir, trivial)
# --------------------- Gerrit Operations --------------------------------- #
def RemoveCommitReady(self, helper, dryrun=False):
"""Remove any commit ready bits associated with CL."""
query = ['-c',
'"delete from patch_set_approvals where change_id=%s'
' AND category_id=\'COMR\';"' % self.gerrit_number
]
cmd = helper.GetGerritSqlCommand(query)
_RunCommand(cmd, dryrun)
def HandleCouldNotSubmit(self, helper, build_log, dryrun=False):
"""Handler that is called when Paladin can't submit a change.
This should be rare, but if an admin overrides the commit queue and commits
a change that conflicts with this change, it'll apply, build/validate but
receive an error when submitting.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
dryrun: If true, do not actually commit anything to Gerrit.
"""
msg = ('The Commit Queue failed to submit your change in %s . '
'This can happen if you submitted your change or someone else '
'submitted a conflicting change while your change was being tested.'
% build_log)
PaladinMessage(msg, self, helper).Send(dryrun)
self.RemoveCommitReady(helper, dryrun)
def HandleCouldNotVerify(self, helper, build_log, dryrun=False):
"""Handler for when Paladin fails to validate a change.
This handler notifies set Verified-1 to the review forcing the developer
to re-upload a change that works. There are many reasons why this might be
called e.g. build or testing exception.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
build_log: URL to the build log where verification results could be
found.
dryrun: If true, do not actually commit anything to Gerrit.
"""
msg = ('The Commit Queue failed to verify your change in %s . '
'If you believe this happened in error, just re-mark your commit as '
'ready. Your change will then get automatically retried.' %
build_log)
PaladinMessage(msg, self, helper).Send(dryrun)
self.RemoveCommitReady(helper, dryrun)
def HandleCouldNotApply(self, helper, build_log, dryrun=False):
"""Handler for when Paladin fails to apply a change.
This handler notifies set CodeReview-2 to the review forcing the developer
to re-upload a rebased change.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
build_log: URL of where to find the logs for this build.
dryrun: If true, do not actually commit anything to Gerrit.
"""
msg = ('The Commit Queue failed to apply your change in %s . ' %
build_log)
msg += self.apply_error_message
PaladinMessage(msg, self, helper).Send(dryrun)
self.RemoveCommitReady(helper, dryrun)
def HandleApplied(self, helper, build_log, dryrun=False):
"""Handler for when Paladin successfully applies a change.
This handler notifies a developer that their change is being tried as
part of a Paladin run defined by a build_log.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
build_log: URL of where to find the logs for this build.
dryrun: If true, do not actually commit anything to Gerrit.
"""
msg = ('The Commit Queue has picked up your change. '
'You can follow along at %s .' % build_log)
PaladinMessage(msg, self, helper).Send(dryrun)
def CommitMessage(self, buildroot):
"""Returns the commit message for the patch as a string."""
url = self._GetProjectUrl()
project_dir = cros_lib.GetProjectDir(buildroot, self.project)
cros_lib.RunCommand(['git', 'fetch', url, self.ref], cwd=project_dir,
print_cmd=False)
return_obj = cros_lib.RunCommand(['git', 'show', '-s', 'FETCH_HEAD'],
cwd=project_dir, redirect_stdout=True,
print_cmd=False)
return return_obj.output
def GerritDependencies(self, buildroot):
"""Returns an ordered list of dependencies from Gerrit.
The list of changes are in order from FETCH_HEAD back to m/master.
Arguments:
buildroot: The buildroot.
Returns:
An ordered list of Gerrit revisions that this patch depends on.
Raises:
MissingChangeIDException: If a dependent change is missing its ChangeID.
"""
dependencies = []
url = self._GetProjectUrl()
logging.info('Checking for Gerrit dependencies for change %s', self)
project_dir = cros_lib.GetProjectDir(buildroot, self.project)
cros_lib.RunCommand(['git', 'fetch', url, self.ref], cwd=project_dir,
print_cmd=False)
return_obj = cros_lib.RunCommand(
['git', 'log', '-z', '%s..FETCH_HEAD^' %
_GetProjectManifestBranch(buildroot, self.project)],
cwd=project_dir, redirect_stdout=True, print_cmd=False)
for patch_output in return_obj.output.split('\0'):
if not patch_output: continue
change_id_match = self._GIT_CHANGE_ID_RE.search(patch_output)
if change_id_match:
dependencies.append(change_id_match.group(1))
else:
raise MissingChangeIDException('Missing Change-Id in %s' % patch_output)
if dependencies:
logging.info('Found %s Gerrit dependencies for change %s', dependencies,
self)
return dependencies
def PaladinDependencies(self, buildroot):
"""Returns an ordered list of dependencies based on the Commit Message.
Parses the Commit message for this change looking for lines that follow
the format:
CQ-DEPEND:change_num+ e.g.
A commit which depends on a couple others.
BUG=blah
TEST=blah
CQ-DEPEND=10001,10002
"""
dependencies = []
logging.info('Checking for CQ-DEPEND dependencies for change %s', self)
commit_message = self.CommitMessage(buildroot)
matches = self._PALADIN_DEPENDENCY_RE.findall(commit_message)
for match in matches:
dependencies.extend(self._PALADIN_BUG_RE.findall(match))
if dependencies:
logging.info('Found %s Paladin dependencies for change %s', dependencies,
self)
return dependencies
def Submit(self, helper, dryrun=False):
"""Submits patch using Gerrit Review.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
dryrun: If true, do not actually commit anything to Gerrit.
"""
cmd = helper.GetGerritReviewCommand(['--submit', '%s,%s' % (
self.gerrit_number, self.patch_number)])
_RunCommand(cmd, dryrun)
def __str__(self):
"""Returns custom string to identify this patch."""
return '%s:%s' % (self.owner, self.gerrit_number)
# Define methods to use patches in sets. We uniquely identify patches
# by Gerrit change numbers.
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return self.id == other.id
def RemovePatchRoot(patch_root):
"""Removes the temporary directory storing patches."""
assert os.path.basename(patch_root).startswith(_TRYBOT_TEMP_PREFIX)
shutil.rmtree(patch_root)
class LocalPatch(Patch):
"""Object that represents a set of local commits that will be patched."""
def __init__(self, project, tracking_branch, patch_dir, local_branch):
"""Construct a LocalPatch object.
Args:
project: Same as Patch constructor arg.
tracking_branch: Same as Patch constructor arg.
patch_dir: The directory where the .patch files are stored.
local_branch: The local branch of the project that the patch came from.
"""
Patch.__init__(self, project, tracking_branch)
self.patch_dir = patch_dir
self.local_branch = local_branch
def _GetFileList(self):
"""Return a list of .patch files in sorted order."""
file_list = glob.glob(os.path.join(self.patch_dir, '*'))
file_list.sort()
return file_list
def Apply(self, buildroot, trivial=False):
"""Implementation of Patch.Apply(). Does not accept trivial option.
Raises:
PatchException if the patch is for the wrong tracking branch.
"""
assert not trivial, 'Local apply not compatible with trivial set'
manifest_branch = _GetProjectManifestBranch(buildroot, self.project)
if self.tracking_branch != manifest_branch:
raise PatchException('branch %s for project %s is not tracking %s'
% (self.local_branch, self.project,
manifest_branch))
project_dir = cros_lib.GetProjectDir(buildroot, self.project)
try:
cros_lib.RunCommand(['repo', 'start', constants.PATCH_BRANCH, '.'],
cwd=project_dir)
cros_lib.RunCommand(['git', 'am', '--3way'] + self._GetFileList(),
cwd=project_dir)
except cros_lib.RunCommandError:
raise ApplyPatchException(self)
def __str__(self):
"""Returns custom string to identify this patch."""
return '%s:%s' % (self.project, self.local_branch)
def _QueryGerrit(server, port, or_parameters, sort=None, options=()):
"""Freeform querying of a gerrit server
Args:
server: the hostname to query
port: the port to query
or_parameters: sequence of gerrit query chunks that are OR'd together
sort: if given, the key in the resultant json to sort on
options: any additional commandline options to pass to gerrit query
Returns:
a sequence of dictionaries from the gerrit server
Raises:
RunCommandException if the invocation fails, or GerritException if
there is something wrong w/ the query parameters given
"""
cmd = ['ssh', '-p', port, server, 'gerrit', 'query', '--format=JSON']
cmd.extend(options)
cmd.extend(['--', ' OR '.join(or_parameters)])
result = cros_lib.RunCommand(cmd, redirect_stdout=True)
result = map(json.loads, result.output.splitlines())
status = result[-1]
if 'type' not in status:
raise GerritException('weird results from gerrit: asked %s, got %s' %
(or_parameters, result))
if status['type'] != 'stats':
raise GerritException('bad gerrit query: parameters %s, error %s' %
(or_parameters, status.get('message', status)))
result = result[:-1]
if sort:
return sorted(result, key=operator.itemgetter(sort))
return result
def _QueryGerritMultipleCurrentPatchset(queries, internal=False):
"""Query chromeos gerrit servers for the current patch for given changes
Args:
queries: sequence of Change-IDs (Ic04g2ab, 6 characters to 40),
or change numbers (12345 for example).
A change number can refer to the same change as a Change ID,
but Change IDs given should be unique, and the same goes for Change
Numbers.
internal: optional boolean; if the internal servers are to be queried,
set to True. Defaults to False.
Returns:
an unordered sequence of GerritPatches for each requested query.
Raises:
GerritException: if a query fails to match, or isn't specific enough,
or a query is malformed.
RunCommandException: if for whatever reason, the ssh invocation to
gerrit fails.
"""
if not queries:
return
if internal:
server, port = constants.GERRIT_INT_HOST, constants.GERRIT_INT_PORT
else:
server, port = constants.GERRIT_HOST, constants.GERRIT_PORT
# process the queries in two seperate streams; this is done so that
# we can identify exactly which patchset returned no results; it's
# basically impossible to do it if you query with mixed numeric/ID
numeric_queries = [x for x in queries if x.isdigit()]
if numeric_queries:
results = _QueryGerrit(server, port,
['change:%s' % x for x in numeric_queries],
sort='number', options=('--current-patch-set',))
for query, result in itertools.izip_longest(numeric_queries, results):
if result is None or result['number'] != query:
raise PatchException('Change number %s not found on server %s.'
% (query, server))
yield query, GerritPatch(result, internal)
id_queries = sorted(x.lower() for x in queries if not x.isdigit())
if not id_queries:
return
results = _QueryGerrit(server, port, ['change:%s' % x for x in id_queries],
sort='id', options=('--current-patch-set',))
last_patch_id = None
for query, result in itertools.izip_longest(id_queries, results):
# case insensitivity to ensure that if someone queries for IABC
# and gerrit returns Iabc, we still properly match.
result_id = result.get('id', '') if result is not None else ''
result_id = result_id.lower()
if result is None or (query and not result_id.startswith(query)):
if last_patch_id and result_id.startswith(last_patch_id):
raise PatchException('While querying for change %s, we received '
'back multiple results. Please be more specific. Server=%s'
% (last_patch_id, server))
raise PatchException('Change-ID %s not found on server %s.'
% (query, server))
if query is None:
raise PatchException('While querying for change %s, we received '
'back multiple results. Please be more specific. Server=%s'
% (last_patch_id, server))
yield query, GerritPatch(result, internal)
last_patch_id = query
def GetGerritPatchInfo(patches):
"""Query Gerrit server for patch information.
Args:
patches: a list of patch ID's to query. Internal patches start with a '*'.
Returns:
A list of GerritPatch objects describing each patch. Only the first
instance of a requested patch is returned.
Raises:
PatchException if a patch can't be found.
"""
parsed_patches = {}
internal_patches = [x for x in patches if x.startswith('*')]
external_patches = [x for x in patches if not x.startswith('*')]
if internal_patches:
# feed it id's w/ * stripped off, but bind them back
# so that we can return patches in the supplied ordering
# while this may seem silly; we do this to preclude the potential
# of a conflict between gerrit instances; since change-id is
# effectively user controlled, better safe than sorry.
raw_ids = [x[1:] for x in internal_patches]
parsed_patches.update(('*' + k, v) for k, v in
_QueryGerritMultipleCurrentPatchset(raw_ids, True))
if external_patches:
parsed_patches.update(
_QueryGerritMultipleCurrentPatchset(external_patches))
seen = set()
results = []
for query in patches:
# return a unique list, while maintaining the ordering of the first
# seen instance of each patch. Do this to ensure whatever ordering
# the user is trying to enforce, we honor; lest it break on cherry-picking
gpatch = parsed_patches[query.lower()]
if gpatch.id not in seen:
results.append(gpatch)
seen.add(gpatch.id)
return results
def _GetRemoteTrackingBranch(project_dir, branch):
"""Get the remote tracking branch of a local branch.
Raises:
cros_lib.NoTrackingBranchException if branch does not track anything.
"""
(remote, ref) = cros_lib.GetTrackingBranch(branch, project_dir)
return cros_lib.GetShortBranchName(remote, ref)
def _GetProjectManifestBranch(buildroot, project):
"""Get the branch specified in the manifest for the project."""
(remote, ref) = cros_lib.GetProjectManifestBranch(buildroot,
project)
return cros_lib.GetShortBranchName(remote, ref)
def PrepareLocalPatches(patches, manifest_branch):
"""Finish validation of parameters, and save patches to a temp folder.
Args:
patches: A list of user-specified patches, in project[:branch] form.
manifest_branch: The manifest branch of the buildroot.
Raises:
PatchException if:
1. The project branch isn't specified and the project isn't on a branch.
2. The project branch doesn't track a remote branch.
"""
patch_info = []
patch_root = tempfile.mkdtemp(prefix=_TRYBOT_TEMP_PREFIX)
for patch_id in range(0, len(patches)):
project, branch = patches[patch_id].split(':')
project_dir = cros_lib.GetProjectDir('.', project)
patch_dir = os.path.join(patch_root, str(patch_id))
cmd = ['git', 'format-patch', '%s..%s' % ('m/' + manifest_branch, branch),
'-o', patch_dir]
cros_lib.RunCommand(cmd, redirect_stdout=True, cwd=project_dir)
if not os.listdir(patch_dir):
raise PatchException('No changes found in %s:%s' % (project, branch))
# Store remote tracking branch for verification during patch stage.
try:
tracking_branch = _GetRemoteTrackingBranch(project_dir, branch)
except cros_lib.NoTrackingBranchException:
raise PatchException('%s:%s needs to track a remote branch!'
% (project, branch))
patch_info.append(LocalPatch(project, tracking_branch, patch_dir, branch))
return patch_info
CQ should only wipe CR on the patchset it tested.
For instances where someone is on the ball and spots that CQ will fail
and remove the CR from their patch, the dev may upload a fix already,
or do the necessary rebasing and level a CR on the new patchset.
CQ however still would wipe all CR scoring; as such, wipe it for
just the patchset that has it set.
BUG=None
TEST=run_tests.sh, although it doesn't cover this.
Change-Id: I065afaaa271694de9c19d6538288a9a1bfebfe31
Reviewed-on: https://gerrit.chromium.org/gerrit/15554
Tested-by: Brian Harring <fdc2d0a2e2340b623587cee5ea8dff425d1eb1f3@chromium.org>
Reviewed-by: David James <f231dc574bbeb62604660cbc162d1b8642a0a3af@chromium.org>
Reviewed-by: Chris Sosa <959d3d631fb6f17f1b07f751372041a1edb210d1@chromium.org>
Commit-Ready: Brian Harring <fdc2d0a2e2340b623587cee5ea8dff425d1eb1f3@chromium.org>
# Copyright (c) 2011 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module that handles the processing of patches to the source tree."""
import constants
import glob
import json
import logging
import os
import re
import shutil
import tempfile
import itertools
import operator
from chromite.lib import cros_build_lib as cros_lib
# The prefix of the temporary directory created to store local patches.
_TRYBOT_TEMP_PREFIX = 'trybot_patch-'
def _RunCommand(cmd, dryrun):
"""Runs the specified shell cmd if dryrun=False."""
if dryrun:
logging.info('Would have run: %s', ' '.join(cmd))
else:
cros_lib.RunCommand(cmd, error_ok=True)
class GerritException(Exception):
"Base exception, thrown for gerrit failures"""
class PatchException(GerritException):
"""Exception thrown by GetGerritPatchInfo."""
class ApplyPatchException(Exception):
"""Exception thrown if we fail to apply a patch."""
# Types used to denote what we failed to apply against.
TYPE_REBASE_TO_TOT = 1
TYPE_REBASE_TO_PATCH_INFLIGHT = 2
def __init__(self, patch, patch_type=TYPE_REBASE_TO_TOT):
super(ApplyPatchException, self).__init__()
self.patch = patch
self.type = patch_type
def __str__(self):
return 'Failed to apply patch ' + str(self.patch)
class MissingChangeIDException(Exception):
"""Raised if a patch is missing a Change-ID."""
pass
class PaladinMessage():
"""An object that is used to send messages to developers about their changes.
"""
# URL where Paladin documentation is stored.
_PALADIN_DOCUMENTATION_URL = ('http://www.chromium.org/developers/'
'tree-sheriffs/sheriff-details-chromium-os/'
'commit-queue-overview')
def __init__(self, message, patch, helper):
self.message = message
self.patch = patch
self.helper = helper
def _ConstructPaladinMessage(self):
"""Adds any standard Paladin messaging to an existing message."""
return self.message + (' Please see %s for more information.' %
self._PALADIN_DOCUMENTATION_URL)
def Send(self, dryrun):
"""Sends the message to the developer."""
cmd = self.helper.GetGerritReviewCommand(
['-m', '"%s"' % self._ConstructPaladinMessage(),
'%s,%s' % (self.patch.gerrit_number, self.patch.patch_number)])
_RunCommand(cmd, dryrun)
class Patch(object):
"""Abstract class representing a Git Patch."""
def __init__(self, project, tracking_branch):
"""Initialization of abstract Patch class.
Args:
project: The name of the project that the patch applies to.
tracking_branch: The remote branch of the project the patch applies to.
"""
self.project = project
self.tracking_branch = tracking_branch
def Apply(self, buildroot, trivial):
"""Applies the patch to specified buildroot. Implement in subclasses.
Args:
buildroot: The buildroot.
trivial: Only allow trivial merges when applying change.
Raises:
PatchException
"""
raise NotImplementedError('Applies the patch to specified buildroot.')
class GerritPatch(Patch):
"""Object that represents a Gerrit CL."""
_PUBLIC_URL = os.path.join(constants.GERRIT_HTTP_URL, 'gerrit/p')
_GIT_CHANGE_ID_RE = re.compile(r'^\s*Change-Id:\s*(\w+)\s*$', re.MULTILINE)
_PALADIN_DEPENDENCY_RE = re.compile(r'^\s*CQ-DEPEND=(.*)$', re.MULTILINE)
_PALADIN_BUG_RE = re.compile('(\w+)')
def __init__(self, patch_dict, internal):
"""Construct a GerritPatch object from Gerrit query results.
Gerrit query JSON fields are documented at:
http://gerrit-documentation.googlecode.com/svn/Documentation/2.2.1/json.html
Args:
patch_dict: A dictionary containing the parsed JSON gerrit query results.
internal: Whether the CL is an internal CL.
"""
super(GerritPatch, self).__init__(patch_dict['project'],
patch_dict['branch'])
self.patch_dict = patch_dict
self.internal = internal
# id - The CL's ChangeId
self.id = patch_dict['id']
# ref - The remote ref that contains the patch.
self.ref = patch_dict['currentPatchSet']['ref']
# revision - The CL's SHA1 hash.
self.revision = patch_dict['currentPatchSet']['revision']
self.patch_number = patch_dict['currentPatchSet']['number']
self.commit = patch_dict['currentPatchSet']['revision']
self.owner, _, _ = patch_dict['owner']['email'].partition('@')
self.gerrit_number = patch_dict['number']
self.url = patch_dict['url']
# status - Current state of this change. Can be one of
# ['NEW', 'SUBMITTED', 'MERGED', 'ABANDONED'].
self.status = patch_dict['status']
# Allows a caller to specify why we can't apply this change when we
# HandleApplicaiton failures.
self.apply_error_message = ('Please re-sync, rebase, and re-upload your '
'change.')
def __getnewargs__(self):
"""Used for pickling to re-create patch object."""
return self.patch_dict, self.internal
def IsAlreadyMerged(self):
"""Returns whether the patch has already been merged in Gerrit."""
return self.status == 'MERGED'
def _GetProjectUrl(self):
"""Returns the url to the gerrit project."""
if self.internal:
url_prefix = constants.GERRIT_INT_SSH_URL
else:
url_prefix = self._PUBLIC_URL
return os.path.join(url_prefix, self.project)
def _RebaseOnto(self, branch, upstream, project_dir, trivial):
"""Attempts to rebase FETCH_HEAD onto branch -- while not on a branch.
Raises:
cros_lib.RunCommandError: If the rebase operation returns an error code.
In this case, we still rebase --abort before returning.
"""
try:
git_rb = ['git', 'rebase']
if trivial: git_rb.extend(['--strategy', 'resolve', '-X', 'trivial'])
git_rb.extend(['--onto', branch, upstream, 'FETCH_HEAD'])
# Run the rebase command.
cros_lib.RunCommand(git_rb, cwd=project_dir, print_cmd=False)
except cros_lib.RunCommandError:
cros_lib.RunCommand(['git', 'rebase', '--abort'], cwd=project_dir,
error_ok=True, print_cmd=False)
raise
def _RebasePatch(self, buildroot, project_dir, trivial):
"""Rebase patch fetched from gerrit onto constants.PATCH_BRANCH.
When the function completes, the constants.PATCH_BRANCH branch will be
pointing to the rebased change.
Arguments:
buildroot: The buildroot.
project_dir: Directory of the project that is being patched.
trivial: Use trivial logic that only allows trivial merges. Note:
Requires Git >= 1.7.6 -- bug <. Bots have 1.7.6 installed.
Raises:
ApplyPatchException: If the patch failed to apply.
"""
url = self._GetProjectUrl()
upstream = _GetProjectManifestBranch(buildroot, self.project)
cros_lib.RunCommand(['git', 'fetch', url, self.ref], cwd=project_dir,
print_cmd=False)
try:
self._RebaseOnto(constants.PATCH_BRANCH, upstream, project_dir, trivial)
cros_lib.RunCommand(['git', 'checkout', '-B', constants.PATCH_BRANCH],
cwd=project_dir, print_cmd=False)
except cros_lib.RunCommandError:
try:
# Failed to rebase against branch, try TOT.
self._RebaseOnto(upstream, upstream, project_dir, trivial)
except cros_lib.RunCommandError:
raise ApplyPatchException(
self, patch_type=ApplyPatchException.TYPE_REBASE_TO_TOT)
else:
# We failed to apply to patch_branch but succeeded against TOT.
# We should pass a different type of exception in this case.
raise ApplyPatchException(
self, patch_type=ApplyPatchException.TYPE_REBASE_TO_PATCH_INFLIGHT)
finally:
cros_lib.RunCommand(['git', 'checkout', constants.PATCH_BRANCH],
cwd=project_dir, print_cmd=False)
def Apply(self, buildroot, trivial=False):
"""Implementation of Patch.Apply().
Raises:
ApplyPatchException: If the patch failed to apply.
"""
logging.info('Attempting to apply change %s', self)
project_dir = cros_lib.GetProjectDir(buildroot, self.project)
if not cros_lib.DoesLocalBranchExist(project_dir, constants.PATCH_BRANCH):
upstream = cros_lib.GetManifestDefaultBranch(buildroot)
cros_lib.RunCommand(['git', 'checkout', '-b', constants.PATCH_BRANCH,
'-t', 'm/' + upstream], cwd=project_dir,
print_cmd=False)
self._RebasePatch(buildroot, project_dir, trivial)
# --------------------- Gerrit Operations --------------------------------- #
def RemoveCommitReady(self, helper, dryrun=False):
"""Remove any commit ready bits associated with CL."""
query = ['-c',
'"DELETE FROM patch_set_approvals WHERE change_id=%s'
" AND patch_set_id=%s "
" AND category_id='COMR';\""
% (self.gerrit_number, self.patch_number)
]
cmd = helper.GetGerritSqlCommand(query)
_RunCommand(cmd, dryrun)
def HandleCouldNotSubmit(self, helper, build_log, dryrun=False):
"""Handler that is called when Paladin can't submit a change.
This should be rare, but if an admin overrides the commit queue and commits
a change that conflicts with this change, it'll apply, build/validate but
receive an error when submitting.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
dryrun: If true, do not actually commit anything to Gerrit.
"""
msg = ('The Commit Queue failed to submit your change in %s . '
'This can happen if you submitted your change or someone else '
'submitted a conflicting change while your change was being tested.'
% build_log)
PaladinMessage(msg, self, helper).Send(dryrun)
self.RemoveCommitReady(helper, dryrun)
def HandleCouldNotVerify(self, helper, build_log, dryrun=False):
"""Handler for when Paladin fails to validate a change.
This handler notifies set Verified-1 to the review forcing the developer
to re-upload a change that works. There are many reasons why this might be
called e.g. build or testing exception.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
build_log: URL to the build log where verification results could be
found.
dryrun: If true, do not actually commit anything to Gerrit.
"""
msg = ('The Commit Queue failed to verify your change in %s . '
'If you believe this happened in error, just re-mark your commit as '
'ready. Your change will then get automatically retried.' %
build_log)
PaladinMessage(msg, self, helper).Send(dryrun)
self.RemoveCommitReady(helper, dryrun)
def HandleCouldNotApply(self, helper, build_log, dryrun=False):
"""Handler for when Paladin fails to apply a change.
This handler notifies set CodeReview-2 to the review forcing the developer
to re-upload a rebased change.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
build_log: URL of where to find the logs for this build.
dryrun: If true, do not actually commit anything to Gerrit.
"""
msg = ('The Commit Queue failed to apply your change in %s . ' %
build_log)
msg += self.apply_error_message
PaladinMessage(msg, self, helper).Send(dryrun)
self.RemoveCommitReady(helper, dryrun)
def HandleApplied(self, helper, build_log, dryrun=False):
"""Handler for when Paladin successfully applies a change.
This handler notifies a developer that their change is being tried as
part of a Paladin run defined by a build_log.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
build_log: URL of where to find the logs for this build.
dryrun: If true, do not actually commit anything to Gerrit.
"""
msg = ('The Commit Queue has picked up your change. '
'You can follow along at %s .' % build_log)
PaladinMessage(msg, self, helper).Send(dryrun)
def CommitMessage(self, buildroot):
"""Returns the commit message for the patch as a string."""
url = self._GetProjectUrl()
project_dir = cros_lib.GetProjectDir(buildroot, self.project)
cros_lib.RunCommand(['git', 'fetch', url, self.ref], cwd=project_dir,
print_cmd=False)
return_obj = cros_lib.RunCommand(['git', 'show', '-s', 'FETCH_HEAD'],
cwd=project_dir, redirect_stdout=True,
print_cmd=False)
return return_obj.output
def GerritDependencies(self, buildroot):
"""Returns an ordered list of dependencies from Gerrit.
The list of changes are in order from FETCH_HEAD back to m/master.
Arguments:
buildroot: The buildroot.
Returns:
An ordered list of Gerrit revisions that this patch depends on.
Raises:
MissingChangeIDException: If a dependent change is missing its ChangeID.
"""
dependencies = []
url = self._GetProjectUrl()
logging.info('Checking for Gerrit dependencies for change %s', self)
project_dir = cros_lib.GetProjectDir(buildroot, self.project)
cros_lib.RunCommand(['git', 'fetch', url, self.ref], cwd=project_dir,
print_cmd=False)
return_obj = cros_lib.RunCommand(
['git', 'log', '-z', '%s..FETCH_HEAD^' %
_GetProjectManifestBranch(buildroot, self.project)],
cwd=project_dir, redirect_stdout=True, print_cmd=False)
for patch_output in return_obj.output.split('\0'):
if not patch_output: continue
change_id_match = self._GIT_CHANGE_ID_RE.search(patch_output)
if change_id_match:
dependencies.append(change_id_match.group(1))
else:
raise MissingChangeIDException('Missing Change-Id in %s' % patch_output)
if dependencies:
logging.info('Found %s Gerrit dependencies for change %s', dependencies,
self)
return dependencies
def PaladinDependencies(self, buildroot):
"""Returns an ordered list of dependencies based on the Commit Message.
Parses the Commit message for this change looking for lines that follow
the format:
CQ-DEPEND:change_num+ e.g.
A commit which depends on a couple others.
BUG=blah
TEST=blah
CQ-DEPEND=10001,10002
"""
dependencies = []
logging.info('Checking for CQ-DEPEND dependencies for change %s', self)
commit_message = self.CommitMessage(buildroot)
matches = self._PALADIN_DEPENDENCY_RE.findall(commit_message)
for match in matches:
dependencies.extend(self._PALADIN_BUG_RE.findall(match))
if dependencies:
logging.info('Found %s Paladin dependencies for change %s', dependencies,
self)
return dependencies
def Submit(self, helper, dryrun=False):
"""Submits patch using Gerrit Review.
Args:
helper: Instance of gerrit_helper for the gerrit instance.
dryrun: If true, do not actually commit anything to Gerrit.
"""
cmd = helper.GetGerritReviewCommand(['--submit', '%s,%s' % (
self.gerrit_number, self.patch_number)])
_RunCommand(cmd, dryrun)
def __str__(self):
"""Returns custom string to identify this patch."""
return '%s:%s' % (self.owner, self.gerrit_number)
# Define methods to use patches in sets. We uniquely identify patches
# by Gerrit change numbers.
def __hash__(self):
return hash(self.id)
def __eq__(self, other):
return self.id == other.id
def RemovePatchRoot(patch_root):
"""Removes the temporary directory storing patches."""
assert os.path.basename(patch_root).startswith(_TRYBOT_TEMP_PREFIX)
shutil.rmtree(patch_root)
class LocalPatch(Patch):
"""Object that represents a set of local commits that will be patched."""
def __init__(self, project, tracking_branch, patch_dir, local_branch):
"""Construct a LocalPatch object.
Args:
project: Same as Patch constructor arg.
tracking_branch: Same as Patch constructor arg.
patch_dir: The directory where the .patch files are stored.
local_branch: The local branch of the project that the patch came from.
"""
Patch.__init__(self, project, tracking_branch)
self.patch_dir = patch_dir
self.local_branch = local_branch
def _GetFileList(self):
"""Return a list of .patch files in sorted order."""
file_list = glob.glob(os.path.join(self.patch_dir, '*'))
file_list.sort()
return file_list
def Apply(self, buildroot, trivial=False):
"""Implementation of Patch.Apply(). Does not accept trivial option.
Raises:
PatchException if the patch is for the wrong tracking branch.
"""
assert not trivial, 'Local apply not compatible with trivial set'
manifest_branch = _GetProjectManifestBranch(buildroot, self.project)
if self.tracking_branch != manifest_branch:
raise PatchException('branch %s for project %s is not tracking %s'
% (self.local_branch, self.project,
manifest_branch))
project_dir = cros_lib.GetProjectDir(buildroot, self.project)
try:
cros_lib.RunCommand(['repo', 'start', constants.PATCH_BRANCH, '.'],
cwd=project_dir)
cros_lib.RunCommand(['git', 'am', '--3way'] + self._GetFileList(),
cwd=project_dir)
except cros_lib.RunCommandError:
raise ApplyPatchException(self)
def __str__(self):
"""Returns custom string to identify this patch."""
return '%s:%s' % (self.project, self.local_branch)
def _QueryGerrit(server, port, or_parameters, sort=None, options=()):
"""Freeform querying of a gerrit server
Args:
server: the hostname to query
port: the port to query
or_parameters: sequence of gerrit query chunks that are OR'd together
sort: if given, the key in the resultant json to sort on
options: any additional commandline options to pass to gerrit query
Returns:
a sequence of dictionaries from the gerrit server
Raises:
RunCommandException if the invocation fails, or GerritException if
there is something wrong w/ the query parameters given
"""
cmd = ['ssh', '-p', port, server, 'gerrit', 'query', '--format=JSON']
cmd.extend(options)
cmd.extend(['--', ' OR '.join(or_parameters)])
result = cros_lib.RunCommand(cmd, redirect_stdout=True)
result = map(json.loads, result.output.splitlines())
status = result[-1]
if 'type' not in status:
raise GerritException('weird results from gerrit: asked %s, got %s' %
(or_parameters, result))
if status['type'] != 'stats':
raise GerritException('bad gerrit query: parameters %s, error %s' %
(or_parameters, status.get('message', status)))
result = result[:-1]
if sort:
return sorted(result, key=operator.itemgetter(sort))
return result
def _QueryGerritMultipleCurrentPatchset(queries, internal=False):
"""Query chromeos gerrit servers for the current patch for given changes
Args:
queries: sequence of Change-IDs (Ic04g2ab, 6 characters to 40),
or change numbers (12345 for example).
A change number can refer to the same change as a Change ID,
but Change IDs given should be unique, and the same goes for Change
Numbers.
internal: optional boolean; if the internal servers are to be queried,
set to True. Defaults to False.
Returns:
an unordered sequence of GerritPatches for each requested query.
Raises:
GerritException: if a query fails to match, or isn't specific enough,
or a query is malformed.
RunCommandException: if for whatever reason, the ssh invocation to
gerrit fails.
"""
if not queries:
return
if internal:
server, port = constants.GERRIT_INT_HOST, constants.GERRIT_INT_PORT
else:
server, port = constants.GERRIT_HOST, constants.GERRIT_PORT
# process the queries in two seperate streams; this is done so that
# we can identify exactly which patchset returned no results; it's
# basically impossible to do it if you query with mixed numeric/ID
numeric_queries = [x for x in queries if x.isdigit()]
if numeric_queries:
results = _QueryGerrit(server, port,
['change:%s' % x for x in numeric_queries],
sort='number', options=('--current-patch-set',))
for query, result in itertools.izip_longest(numeric_queries, results):
if result is None or result['number'] != query:
raise PatchException('Change number %s not found on server %s.'
% (query, server))
yield query, GerritPatch(result, internal)
id_queries = sorted(x.lower() for x in queries if not x.isdigit())
if not id_queries:
return
results = _QueryGerrit(server, port, ['change:%s' % x for x in id_queries],
sort='id', options=('--current-patch-set',))
last_patch_id = None
for query, result in itertools.izip_longest(id_queries, results):
# case insensitivity to ensure that if someone queries for IABC
# and gerrit returns Iabc, we still properly match.
result_id = result.get('id', '') if result is not None else ''
result_id = result_id.lower()
if result is None or (query and not result_id.startswith(query)):
if last_patch_id and result_id.startswith(last_patch_id):
raise PatchException('While querying for change %s, we received '
'back multiple results. Please be more specific. Server=%s'
% (last_patch_id, server))
raise PatchException('Change-ID %s not found on server %s.'
% (query, server))
if query is None:
raise PatchException('While querying for change %s, we received '
'back multiple results. Please be more specific. Server=%s'
% (last_patch_id, server))
yield query, GerritPatch(result, internal)
last_patch_id = query
def GetGerritPatchInfo(patches):
"""Query Gerrit server for patch information.
Args:
patches: a list of patch ID's to query. Internal patches start with a '*'.
Returns:
A list of GerritPatch objects describing each patch. Only the first
instance of a requested patch is returned.
Raises:
PatchException if a patch can't be found.
"""
parsed_patches = {}
internal_patches = [x for x in patches if x.startswith('*')]
external_patches = [x for x in patches if not x.startswith('*')]
if internal_patches:
# feed it id's w/ * stripped off, but bind them back
# so that we can return patches in the supplied ordering
# while this may seem silly; we do this to preclude the potential
# of a conflict between gerrit instances; since change-id is
# effectively user controlled, better safe than sorry.
raw_ids = [x[1:] for x in internal_patches]
parsed_patches.update(('*' + k, v) for k, v in
_QueryGerritMultipleCurrentPatchset(raw_ids, True))
if external_patches:
parsed_patches.update(
_QueryGerritMultipleCurrentPatchset(external_patches))
seen = set()
results = []
for query in patches:
# return a unique list, while maintaining the ordering of the first
# seen instance of each patch. Do this to ensure whatever ordering
# the user is trying to enforce, we honor; lest it break on cherry-picking
gpatch = parsed_patches[query.lower()]
if gpatch.id not in seen:
results.append(gpatch)
seen.add(gpatch.id)
return results
def _GetRemoteTrackingBranch(project_dir, branch):
"""Get the remote tracking branch of a local branch.
Raises:
cros_lib.NoTrackingBranchException if branch does not track anything.
"""
(remote, ref) = cros_lib.GetTrackingBranch(branch, project_dir)
return cros_lib.GetShortBranchName(remote, ref)
def _GetProjectManifestBranch(buildroot, project):
"""Get the branch specified in the manifest for the project."""
(remote, ref) = cros_lib.GetProjectManifestBranch(buildroot,
project)
return cros_lib.GetShortBranchName(remote, ref)
def PrepareLocalPatches(patches, manifest_branch):
"""Finish validation of parameters, and save patches to a temp folder.
Args:
patches: A list of user-specified patches, in project[:branch] form.
manifest_branch: The manifest branch of the buildroot.
Raises:
PatchException if:
1. The project branch isn't specified and the project isn't on a branch.
2. The project branch doesn't track a remote branch.
"""
patch_info = []
patch_root = tempfile.mkdtemp(prefix=_TRYBOT_TEMP_PREFIX)
for patch_id in range(0, len(patches)):
project, branch = patches[patch_id].split(':')
project_dir = cros_lib.GetProjectDir('.', project)
patch_dir = os.path.join(patch_root, str(patch_id))
cmd = ['git', 'format-patch', '%s..%s' % ('m/' + manifest_branch, branch),
'-o', patch_dir]
cros_lib.RunCommand(cmd, redirect_stdout=True, cwd=project_dir)
if not os.listdir(patch_dir):
raise PatchException('No changes found in %s:%s' % (project, branch))
# Store remote tracking branch for verification during patch stage.
try:
tracking_branch = _GetRemoteTrackingBranch(project_dir, branch)
except cros_lib.NoTrackingBranchException:
raise PatchException('%s:%s needs to track a remote branch!'
% (project, branch))
patch_info.append(LocalPatch(project, tracking_branch, patch_dir, branch))
return patch_info
|
"""
Lcapy is a Python library for linear circuit analysis. It uses SymPy
for symbolic mathematics.
Lcapy can analyse circuits described with netlists using modified nodal
analysis. See lcapy.netlist
Alternatively, Lcapy can analyse networks and circuits formed by
combining one, two, and three port networks. See lcapy.oneport
For more detailed documentation see http://lcapy.elec.canterbury.ac.nz
Copyright 2014--2019 Michael Hayes, UCECE
"""
from __future__ import absolute_import, print_function
del absolute_import, print_function
from pkg_resources import get_distribution
__version__ = get_distribution('lcapy').version
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for Lcapy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
from sympy import init_printing
init_printing()
from .functions import *
from .symbols import *
from .circuit import *
from .oneport import *
from .twoport import *
from .schematic import *
from .expr import *
from .cexpr import *
from .fexpr import *
from .sexpr import *
from .texpr import *
from .noiseexpr import *
from .phasor import *
from .omegaexpr import *
from .super import *
from .printing import *
from .sym import *
def show_version():
"""Show versions of Lcapy, SymPy, NumPy and Python."""
from sys import version as python_version
from sympy import __version__ as sympy_version
from numpy import __version__ as numpy_version
print('Python: %s\nSymPy: %s\nNumPy: %s\nLcapy: %s' %
(python_version, sympy_version, numpy_version, __version__))
Import vector and matrix
"""
Lcapy is a Python library for linear circuit analysis. It uses SymPy
for symbolic mathematics.
Lcapy can analyse circuits described with netlists using modified nodal
analysis. See lcapy.netlist
Alternatively, Lcapy can analyse networks and circuits formed by
combining one, two, and three port networks. See lcapy.oneport
For more detailed documentation see http://lcapy.elec.canterbury.ac.nz
Copyright 2014--2019 Michael Hayes, UCECE
"""
from __future__ import absolute_import, print_function
del absolute_import, print_function
from pkg_resources import get_distribution
__version__ = get_distribution('lcapy').version
import sys
if sys.version_info[0] == 2 and sys.version_info[1] < 6:
raise ImportError("Python Version 2.6 or above is required for Lcapy.")
else: # Python 3
pass
# Here we can also check for specific Python 3 versions, if needed
del sys
from sympy import init_printing
init_printing()
from .functions import *
from .symbols import *
from .circuit import *
from .oneport import *
from .twoport import *
from .schematic import *
from .expr import *
from .cexpr import *
from .fexpr import *
from .sexpr import *
from .texpr import *
from .noiseexpr import *
from .phasor import *
from .omegaexpr import *
from .super import *
from .printing import *
from .sym import *
from .matrix import *
from .vector import *
def show_version():
"""Show versions of Lcapy, SymPy, NumPy and Python."""
from sys import version as python_version
from sympy import __version__ as sympy_version
from numpy import __version__ as numpy_version
print('Python: %s\nSymPy: %s\nNumPy: %s\nLcapy: %s' %
(python_version, sympy_version, numpy_version, __version__))
|
# Copyright 2018-2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
from collections import defaultdict, OrderedDict
from deprecated import deprecated
import inspect
import re
import tarfile
import uuid
import warnings
import zipfile
from typing import Callable, Set, List, Text, Dict, Tuple, Any, Union, Optional
import kfp
from kfp.dsl import _for_loop
from kfp.compiler import _data_passing_rewriter
from .. import dsl
from ._k8s_helper import convert_k8s_obj_to_json, sanitize_k8s_name
from ._op_to_template import _op_to_template, _process_obj
from ._default_transformers import add_pod_env
from ..components.structures import InputSpec
from ..components._yaml_utils import dump_yaml
from ..dsl._metadata import _extract_pipeline_metadata
from ..dsl._ops_group import OpsGroup
from ..dsl._pipeline_param import extract_pipelineparams_from_any, PipelineParam
class Compiler(object):
"""DSL Compiler that compiles pipeline functions into workflow yaml.
Example:
How to use the compiler to construct workflow yaml::
@dsl.pipeline(
name='name',
description='description'
)
def my_pipeline(a: int = 1, b: str = "default value"):
...
Compiler().compile(my_pipeline, 'path/to/workflow.yaml')
"""
def _pipelineparam_full_name(self, param):
"""_pipelineparam_full_name converts the names of pipeline parameters
to unique names in the argo yaml
Args:
param(PipelineParam): pipeline parameter
"""
if param.op_name:
return param.op_name + '-' + param.name
return param.name
def _get_groups_for_ops(self, root_group):
"""Helper function to get belonging groups for each op.
Each pipeline has a root group. Each group has a list of operators (leaf) and groups.
This function traverse the tree and get all ancestor groups for all operators.
Returns:
A dict. Key is the operator's name. Value is a list of ancestor groups including the
op itself. The list of a given operator is sorted in a way that the farthest
group is the first and operator itself is the last.
"""
def _get_op_groups_helper(current_groups, ops_to_groups):
root_group = current_groups[-1]
for g in root_group.groups:
# Add recursive opsgroup in the ops_to_groups
# such that the i/o dependency can be propagated to the ancester opsgroups
if g.recursive_ref:
ops_to_groups[g.name] = [x.name for x in current_groups] + [g.name]
continue
current_groups.append(g)
_get_op_groups_helper(current_groups, ops_to_groups)
del current_groups[-1]
for op in root_group.ops:
ops_to_groups[op.name] = [x.name for x in current_groups] + [op.name]
ops_to_groups = {}
current_groups = [root_group]
_get_op_groups_helper(current_groups, ops_to_groups)
return ops_to_groups
#TODO: combine with the _get_groups_for_ops
def _get_groups_for_opsgroups(self, root_group):
"""Helper function to get belonging groups for each opsgroup.
Each pipeline has a root group. Each group has a list of operators (leaf) and groups.
This function traverse the tree and get all ancestor groups for all opsgroups.
Returns:
A dict. Key is the opsgroup's name. Value is a list of ancestor groups including the
opsgroup itself. The list of a given opsgroup is sorted in a way that the farthest
group is the first and opsgroup itself is the last.
"""
def _get_opsgroup_groups_helper(current_groups, opsgroups_to_groups):
root_group = current_groups[-1]
for g in root_group.groups:
# Add recursive opsgroup in the ops_to_groups
# such that the i/o dependency can be propagated to the ancester opsgroups
if g.recursive_ref:
continue
opsgroups_to_groups[g.name] = [x.name for x in current_groups] + [g.name]
current_groups.append(g)
_get_opsgroup_groups_helper(current_groups, opsgroups_to_groups)
del current_groups[-1]
opsgroups_to_groups = {}
current_groups = [root_group]
_get_opsgroup_groups_helper(current_groups, opsgroups_to_groups)
return opsgroups_to_groups
def _get_groups(self, root_group):
"""Helper function to get all groups (not including ops) in a pipeline."""
def _get_groups_helper(group):
groups = {group.name: group}
for g in group.groups:
# Skip the recursive opsgroup because no templates
# need to be generated for the recursive opsgroups.
if not g.recursive_ref:
groups.update(_get_groups_helper(g))
return groups
return _get_groups_helper(root_group)
def _get_uncommon_ancestors(self, op_groups, opsgroup_groups, op1, op2):
"""Helper function to get unique ancestors between two ops.
For example, op1's ancestor groups are [root, G1, G2, G3, op1], op2's ancestor groups are
[root, G1, G4, op2], then it returns a tuple ([G2, G3, op1], [G4, op2]).
"""
#TODO: extract a function for the following two code module
if op1.name in op_groups:
op1_groups = op_groups[op1.name]
elif op1.name in opsgroup_groups:
op1_groups = opsgroup_groups[op1.name]
else:
raise ValueError(op1.name + ' does not exist.')
if op2.name in op_groups:
op2_groups = op_groups[op2.name]
elif op2.name in opsgroup_groups:
op2_groups = opsgroup_groups[op2.name]
else:
raise ValueError(op2.name + ' does not exist.')
both_groups = [op1_groups, op2_groups]
common_groups_len = sum(1 for x in zip(*both_groups) if x==(x[0],)*len(x))
group1 = op1_groups[common_groups_len:]
group2 = op2_groups[common_groups_len:]
return (group1, group2)
def _get_condition_params_for_ops(self, root_group):
"""Get parameters referenced in conditions of ops."""
conditions = defaultdict(set)
def _get_condition_params_for_ops_helper(group, current_conditions_params):
new_current_conditions_params = current_conditions_params
if group.type == 'condition':
new_current_conditions_params = list(current_conditions_params)
if isinstance(group.condition.operand1, dsl.PipelineParam):
new_current_conditions_params.append(group.condition.operand1)
if isinstance(group.condition.operand2, dsl.PipelineParam):
new_current_conditions_params.append(group.condition.operand2)
for op in group.ops:
for param in new_current_conditions_params:
conditions[op.name].add(param)
for g in group.groups:
# If the subgroup is a recursive opsgroup, propagate the pipelineparams
# in the condition expression, similar to the ops.
if g.recursive_ref:
for param in new_current_conditions_params:
conditions[g.name].add(param)
else:
_get_condition_params_for_ops_helper(g, new_current_conditions_params)
_get_condition_params_for_ops_helper(root_group, [])
return conditions
def _get_next_group_or_op(cls, to_visit: List, already_visited: Set):
"""Get next group or op to visit."""
if len(to_visit) == 0:
return None
next = to_visit.pop(0)
while next in already_visited:
next = to_visit.pop(0)
already_visited.add(next)
return next
def _get_for_loop_ops(self, new_root) -> Dict[Text, dsl.ParallelFor]:
to_visit = self._get_all_subgroups_and_ops(new_root)
op_name_to_op = {}
already_visited = set()
while len(to_visit):
next_op = self._get_next_group_or_op(to_visit, already_visited)
if next_op is None:
break
to_visit.extend(self._get_all_subgroups_and_ops(next_op))
if isinstance(next_op, dsl.ParallelFor):
op_name_to_op[next_op.name] = next_op
return op_name_to_op
def _get_all_subgroups_and_ops(self, op):
"""Get all ops and groups contained within this group."""
subgroups = []
if hasattr(op, 'ops'):
subgroups.extend(op.ops)
if hasattr(op, 'groups'):
subgroups.extend(op.groups)
return subgroups
def _get_inputs_outputs(
self,
pipeline,
root_group,
op_groups,
opsgroup_groups,
condition_params,
op_name_to_for_loop_op: Dict[Text, dsl.ParallelFor],
):
"""Get inputs and outputs of each group and op.
Returns:
A tuple (inputs, outputs).
inputs and outputs are dicts with key being the group/op names and values being list of
tuples (param_name, producing_op_name). producing_op_name is the name of the op that
produces the param. If the param is a pipeline param (no producer op), then
producing_op_name is None.
"""
inputs = defaultdict(set)
outputs = defaultdict(set)
for op in pipeline.ops.values():
# op's inputs and all params used in conditions for that op are both considered.
for param in op.inputs + list(condition_params[op.name]):
# if the value is already provided (immediate value), then no need to expose
# it as input for its parent groups.
if param.value:
continue
if param.op_name:
upstream_op = pipeline.ops[param.op_name]
upstream_groups, downstream_groups = \
self._get_uncommon_ancestors(op_groups, opsgroup_groups, upstream_op, op)
for i, group_name in enumerate(downstream_groups):
if i == 0:
# If it is the first uncommon downstream group, then the input comes from
# the first uncommon upstream group.
inputs[group_name].add((param.full_name, upstream_groups[0]))
else:
# If not the first downstream group, then the input is passed down from
# its ancestor groups so the upstream group is None.
inputs[group_name].add((param.full_name, None))
for i, group_name in enumerate(upstream_groups):
if i == len(upstream_groups) - 1:
# If last upstream group, it is an operator and output comes from container.
outputs[group_name].add((param.full_name, None))
else:
# If not last upstream group, output value comes from one of its child.
outputs[group_name].add((param.full_name, upstream_groups[i+1]))
else:
if not op.is_exit_handler:
for group_name in op_groups[op.name][::-1]:
# if group is for loop group and param is that loop's param, then the param
# is created by that for loop ops_group and it shouldn't be an input to
# any of its parent groups.
inputs[group_name].add((param.full_name, None))
if group_name in op_name_to_for_loop_op:
# for example:
# loop_group.loop_args.name = 'loop-item-param-99ca152e'
# param.name = 'loop-item-param-99ca152e--a'
loop_group = op_name_to_for_loop_op[group_name]
if loop_group.loop_args.name in param.name:
break
# Generate the input/output for recursive opsgroups
# It propagates the recursive opsgroups IO to their ancester opsgroups
def _get_inputs_outputs_recursive_opsgroup(group):
#TODO: refactor the following codes with the above
if group.recursive_ref:
params = [(param, False) for param in group.inputs]
params.extend([(param, True) for param in list(condition_params[group.name])])
for param, is_condition_param in params:
if param.value:
continue
full_name = self._pipelineparam_full_name(param)
if param.op_name:
upstream_op = pipeline.ops[param.op_name]
upstream_groups, downstream_groups = \
self._get_uncommon_ancestors(op_groups, opsgroup_groups, upstream_op, group)
for i, g in enumerate(downstream_groups):
if i == 0:
inputs[g].add((full_name, upstream_groups[0]))
# There is no need to pass the condition param as argument to the downstream ops.
#TODO: this might also apply to ops. add a TODO here and think about it.
elif i == len(downstream_groups) - 1 and is_condition_param:
continue
else:
inputs[g].add((full_name, None))
for i, g in enumerate(upstream_groups):
if i == len(upstream_groups) - 1:
outputs[g].add((full_name, None))
else:
outputs[g].add((full_name, upstream_groups[i+1]))
elif not is_condition_param:
for g in op_groups[group.name]:
inputs[g].add((full_name, None))
for subgroup in group.groups:
_get_inputs_outputs_recursive_opsgroup(subgroup)
_get_inputs_outputs_recursive_opsgroup(root_group)
# Generate the input for SubGraph along with parallelfor
for sub_graph in opsgroup_groups:
if sub_graph in op_name_to_for_loop_op:
# The opsgroup list is sorted with the farthest group as the first and the opsgroup
# itself as the last. To get the latest opsgroup which is not the opsgroup itself -2 is used.
parent = opsgroup_groups[sub_graph][-2]
if parent and parent.startswith('subgraph'):
# propagate only op's pipeline param from subgraph to parallelfor
loop_op = op_name_to_for_loop_op[sub_graph]
pipeline_param = loop_op.loop_args.items_or_pipeline_param
if loop_op.items_is_pipeline_param and pipeline_param.op_name:
param_name = '%s-%s' % (
sanitize_k8s_name(pipeline_param.op_name), pipeline_param.name)
inputs[parent].add((param_name, pipeline_param.op_name))
return inputs, outputs
def _get_dependencies(self, pipeline, root_group, op_groups, opsgroups_groups, opsgroups, condition_params):
"""Get dependent groups and ops for all ops and groups.
Returns:
A dict. Key is group/op name, value is a list of dependent groups/ops.
The dependencies are calculated in the following way: if op2 depends on op1,
and their ancestors are [root, G1, G2, op1] and [root, G1, G3, G4, op2],
then G3 is dependent on G2. Basically dependency only exists in the first uncommon
ancesters in their ancesters chain. Only sibling groups/ops can have dependencies.
"""
dependencies = defaultdict(set)
for op in pipeline.ops.values():
upstream_op_names = set()
for param in op.inputs + list(condition_params[op.name]):
if param.op_name:
upstream_op_names.add(param.op_name)
upstream_op_names |= set(op.dependent_names)
for upstream_op_name in upstream_op_names:
# the dependent op could be either a BaseOp or an opsgroup
if upstream_op_name in pipeline.ops:
upstream_op = pipeline.ops[upstream_op_name]
elif upstream_op_name in opsgroups:
upstream_op = opsgroups[upstream_op_name]
else:
raise ValueError('compiler cannot find the ' + upstream_op_name)
upstream_groups, downstream_groups = self._get_uncommon_ancestors(op_groups, opsgroups_groups, upstream_op, op)
dependencies[downstream_groups[0]].add(upstream_groups[0])
# Generate dependencies based on the recursive opsgroups
#TODO: refactor the following codes with the above
def _get_dependency_opsgroup(group, dependencies):
upstream_op_names = set([dependency.name for dependency in group.dependencies])
if group.recursive_ref:
for param in group.inputs + list(condition_params[group.name]):
if param.op_name:
upstream_op_names.add(param.op_name)
for op_name in upstream_op_names:
if op_name in pipeline.ops:
upstream_op = pipeline.ops[op_name]
elif op_name in opsgroups:
upstream_op = opsgroups[op_name]
else:
raise ValueError('compiler cannot find the ' + op_name)
upstream_groups, downstream_groups = \
self._get_uncommon_ancestors(op_groups, opsgroups_groups, upstream_op, group)
dependencies[downstream_groups[0]].add(upstream_groups[0])
for subgroup in group.groups:
_get_dependency_opsgroup(subgroup, dependencies)
_get_dependency_opsgroup(root_group, dependencies)
return dependencies
def _resolve_value_or_reference(self, value_or_reference, potential_references):
"""_resolve_value_or_reference resolves values and PipelineParams, which could be task parameters or input parameters.
Args:
value_or_reference: value or reference to be resolved. It could be basic python types or PipelineParam
potential_references(dict{str->str}): a dictionary of parameter names to task names
"""
if isinstance(value_or_reference, dsl.PipelineParam):
parameter_name = self._pipelineparam_full_name(value_or_reference)
task_names = [task_name for param_name, task_name in potential_references if param_name == parameter_name]
if task_names:
task_name = task_names[0]
# When the task_name is None, the parameter comes directly from ancient ancesters
# instead of parents. Thus, it is resolved as the input parameter in the current group.
if task_name is None:
return '{{inputs.parameters.%s}}' % parameter_name
else:
return '{{tasks.%s.outputs.parameters.%s}}' % (task_name, parameter_name)
else:
return '{{inputs.parameters.%s}}' % parameter_name
else:
return str(value_or_reference)
@staticmethod
def _resolve_task_pipeline_param(pipeline_param: PipelineParam, group_type) -> str:
if pipeline_param.op_name is None:
return '{{workflow.parameters.%s}}' % pipeline_param.name
param_name = '%s-%s' % (sanitize_k8s_name(pipeline_param.op_name), pipeline_param.name)
if group_type == 'subgraph':
return '{{inputs.parameters.%s}}' % (param_name)
return '{{tasks.%s.outputs.parameters.%s}}' % (sanitize_k8s_name(pipeline_param.op_name), param_name)
def _group_to_dag_template(self, group, inputs, outputs, dependencies):
"""Generate template given an OpsGroup.
inputs, outputs, dependencies are all helper dicts.
"""
template = {'name': group.name}
if group.parallelism != None:
template["parallelism"] = group.parallelism
# Generate inputs section.
if inputs.get(group.name, None):
template_inputs = [{'name': x[0]} for x in inputs[group.name]]
template_inputs.sort(key=lambda x: x['name'])
template['inputs'] = {
'parameters': template_inputs
}
# Generate outputs section.
if outputs.get(group.name, None):
template_outputs = []
for param_name, dependent_name in outputs[group.name]:
template_outputs.append({
'name': param_name,
'valueFrom': {
'parameter': '{{tasks.%s.outputs.parameters.%s}}' % (dependent_name, param_name)
}
})
template_outputs.sort(key=lambda x: x['name'])
template['outputs'] = {'parameters': template_outputs}
# Generate tasks section.
tasks = []
sub_groups = group.groups + group.ops
for sub_group in sub_groups:
is_recursive_subgroup = (isinstance(sub_group, OpsGroup) and sub_group.recursive_ref)
# Special handling for recursive subgroup: use the existing opsgroup name
if is_recursive_subgroup:
task = {
'name': sub_group.recursive_ref.name,
'template': sub_group.recursive_ref.name,
}
else:
task = {
'name': sub_group.name,
'template': sub_group.name,
}
if isinstance(sub_group, dsl.OpsGroup) and sub_group.type == 'condition':
subgroup_inputs = inputs.get(sub_group.name, [])
condition = sub_group.condition
operand1_value = self._resolve_value_or_reference(condition.operand1, subgroup_inputs)
operand2_value = self._resolve_value_or_reference(condition.operand2, subgroup_inputs)
if condition.operator in ['==', '!=']:
operand1_value = '"' + operand1_value + '"'
operand2_value = '"' + operand2_value + '"'
task['when'] = '{} {} {}'.format(operand1_value, condition.operator, operand2_value)
# Generate dependencies section for this task.
if dependencies.get(sub_group.name, None):
group_dependencies = list(dependencies[sub_group.name])
group_dependencies.sort()
task['dependencies'] = group_dependencies
# Generate arguments section for this task.
if inputs.get(sub_group.name, None):
task['arguments'] = {'parameters': self.get_arguments_for_sub_group(sub_group, is_recursive_subgroup, inputs)}
# additional task modifications for withItems and withParam
if isinstance(sub_group, dsl.ParallelFor):
if sub_group.items_is_pipeline_param:
# these loop args are a 'withParam' rather than 'withItems'.
# i.e., rather than a static list, they are either the output of another task or were input
# as global pipeline parameters
pipeline_param = sub_group.loop_args.items_or_pipeline_param
withparam_value = self._resolve_task_pipeline_param(pipeline_param, group.type)
if pipeline_param.op_name:
# these loop args are the output of another task
if 'dependencies' not in task or task['dependencies'] is None:
task['dependencies'] = []
if sanitize_k8s_name(
pipeline_param.op_name) not in task['dependencies'] and group.type != 'subgraph':
task['dependencies'].append(
sanitize_k8s_name(pipeline_param.op_name))
task['withParam'] = withparam_value
else:
# Need to sanitize the dict keys for consistency.
loop_tasks = sub_group.loop_args.to_list_for_task_yaml()
nested_pipeline_params = extract_pipelineparams_from_any(loop_tasks)
# Set dependencies in case of nested pipeline_params
map_to_tmpl_var = {str(p): self._resolve_task_pipeline_param(p, group.type) for p in nested_pipeline_params}
for pipeline_param in nested_pipeline_params:
if pipeline_param.op_name:
# these pipeline_param are the output of another task
if 'dependencies' not in task or task['dependencies'] is None:
task['dependencies'] = []
if sanitize_k8s_name(
pipeline_param.op_name) not in task['dependencies']:
task['dependencies'].append(
sanitize_k8s_name(pipeline_param.op_name))
sanitized_tasks = []
if isinstance(loop_tasks[0], dict):
for argument_set in loop_tasks:
c_dict = {}
for k, v in argument_set.items():
c_dict[sanitize_k8s_name(k, True)] = v
sanitized_tasks.append(c_dict)
else:
sanitized_tasks = loop_tasks
# Replace pipeline param if map_to_tmpl_var not empty
task['withItems'] = _process_obj(sanitized_tasks, map_to_tmpl_var) if map_to_tmpl_var else sanitized_tasks
# We will sort dependencies to have determinitc yaml and thus stable tests
if task.get('dependencies'):
task['dependencies'].sort()
tasks.append(task)
tasks.sort(key=lambda x: x['name'])
template['dag'] = {'tasks': tasks}
return template
def get_arguments_for_sub_group(
self,
sub_group: Union[OpsGroup, dsl._container_op.BaseOp],
is_recursive_subgroup: Optional[bool],
inputs: Dict[Text, Tuple[Text, Text]],
):
arguments = []
for param_name, dependent_name in inputs[sub_group.name]:
if is_recursive_subgroup:
for input_name, input in sub_group.arguments.items():
if param_name == self._pipelineparam_full_name(input):
break
referenced_input = sub_group.recursive_ref.arguments[input_name]
argument_name = self._pipelineparam_full_name(referenced_input)
else:
argument_name = param_name
# Preparing argument. It can be pipeline input reference, task output reference or loop item (or loop item attribute
sanitized_loop_arg_full_name = '---'
if isinstance(sub_group, dsl.ParallelFor):
sanitized_loop_arg_full_name = sanitize_k8s_name(self._pipelineparam_full_name(sub_group.loop_args))
arg_ref_full_name = sanitize_k8s_name(param_name)
# We only care about the reference to the current loop item, not the outer loops
if isinstance(sub_group, dsl.ParallelFor) and arg_ref_full_name.startswith(sanitized_loop_arg_full_name):
if arg_ref_full_name == sanitized_loop_arg_full_name:
argument_value = '{{item}}'
elif _for_loop.LoopArgumentVariable.name_is_loop_arguments_variable(param_name):
subvar_name = _for_loop.LoopArgumentVariable.get_subvar_name(param_name)
argument_value = '{{item.%s}}' % subvar_name
else:
raise ValueError("Argument seems to reference the loop item, but not the item itself and not some attribute of the item. param_name: {}, ".format(param_name))
else:
if dependent_name:
argument_value = '{{tasks.%s.outputs.parameters.%s}}' % (dependent_name, param_name)
else:
argument_value = '{{inputs.parameters.%s}}' % param_name
arguments.append({
'name': argument_name,
'value': argument_value,
})
arguments.sort(key=lambda x: x['name'])
return arguments
def _create_dag_templates(self, pipeline, op_transformers=None, op_to_templates_handler=None):
"""Create all groups and ops templates in the pipeline.
Args:
pipeline: Pipeline context object to get all the pipeline data from.
op_transformers: A list of functions that are applied to all ContainerOp instances that are being processed.
op_to_templates_handler: Handler which converts a base op into a list of argo templates.
"""
op_to_templates_handler = op_to_templates_handler or (lambda op : [_op_to_template(op)])
root_group = pipeline.groups[0]
# Call the transformation functions before determining the inputs/outputs, otherwise
# the user would not be able to use pipeline parameters in the container definition
# (for example as pod labels) - the generated template is invalid.
for op in pipeline.ops.values():
for transformer in op_transformers or []:
transformer(op)
# Generate core data structures to prepare for argo yaml generation
# op_name_to_parent_groups: op name -> list of ancestor groups including the current op
# opsgroups: a dictionary of ospgroup.name -> opsgroup
# inputs, outputs: group/op names -> list of tuples (full_param_name, producing_op_name)
# condition_params: recursive_group/op names -> list of pipelineparam
# dependencies: group/op name -> list of dependent groups/ops.
# Special Handling for the recursive opsgroup
# op_name_to_parent_groups also contains the recursive opsgroups
# condition_params from _get_condition_params_for_ops also contains the recursive opsgroups
# groups does not include the recursive opsgroups
opsgroups = self._get_groups(root_group)
op_name_to_parent_groups = self._get_groups_for_ops(root_group)
opgroup_name_to_parent_groups = self._get_groups_for_opsgroups(root_group)
condition_params = self._get_condition_params_for_ops(root_group)
op_name_to_for_loop_op = self._get_for_loop_ops(root_group)
inputs, outputs = self._get_inputs_outputs(
pipeline,
root_group,
op_name_to_parent_groups,
opgroup_name_to_parent_groups,
condition_params,
op_name_to_for_loop_op,
)
dependencies = self._get_dependencies(
pipeline,
root_group,
op_name_to_parent_groups,
opgroup_name_to_parent_groups,
opsgroups,
condition_params,
)
templates = []
for opsgroup in opsgroups.keys():
template = self._group_to_dag_template(opsgroups[opsgroup], inputs, outputs, dependencies)
templates.append(template)
for op in pipeline.ops.values():
templates.extend(op_to_templates_handler(op))
return templates
def _create_pipeline_workflow(self, parameter_defaults, pipeline, op_transformers=None, pipeline_conf=None):
"""Create workflow for the pipeline."""
# Input Parameters
input_params = []
for name, value in parameter_defaults.items():
param = {'name': name}
if value is not None:
param['value'] = value
input_params.append(param)
# Making the pipeline group name unique to prevent name clashes with templates
pipeline_group = pipeline.groups[0]
temp_pipeline_group_name = uuid.uuid4().hex
pipeline_group.name = temp_pipeline_group_name
# Templates
templates = self._create_dag_templates(pipeline, op_transformers)
# Exit Handler
exit_handler = None
if pipeline.groups[0].groups:
first_group = pipeline.groups[0].groups[0]
if first_group.type == 'exit_handler':
exit_handler = first_group.exit_op
# The whole pipeline workflow
# It must valid as a subdomain
pipeline_name = pipeline.name or 'pipeline'
# Workaround for pipeline name clashing with container template names
# TODO: Make sure template names cannot clash at all (container, DAG, workflow)
template_map = {template['name'].lower(): template for template in templates}
from ..components._naming import _make_name_unique_by_adding_index
pipeline_template_name = _make_name_unique_by_adding_index(pipeline_name, template_map, '-')
# Restoring the name of the pipeline template
pipeline_template = template_map[temp_pipeline_group_name]
pipeline_template['name'] = pipeline_template_name
templates.sort(key=lambda x: x['name'])
workflow = {
'apiVersion': 'argoproj.io/v1alpha1',
'kind': 'Workflow',
'metadata': {'generateName': pipeline_template_name + '-'},
'spec': {
'entrypoint': pipeline_template_name,
'templates': templates,
'arguments': {'parameters': input_params},
'serviceAccountName': 'pipeline-runner',
}
}
# set parallelism limits at pipeline level
if pipeline_conf.parallelism:
workflow['spec']['parallelism'] = pipeline_conf.parallelism
# set ttl after workflow finishes
if pipeline_conf.ttl_seconds_after_finished >= 0:
workflow['spec']['ttlSecondsAfterFinished'] = pipeline_conf.ttl_seconds_after_finished
if pipeline_conf._pod_disruption_budget_min_available:
pod_disruption_budget = {"minAvailable": pipeline_conf._pod_disruption_budget_min_available}
workflow['spec']['podDisruptionBudget'] = pod_disruption_budget
if len(pipeline_conf.image_pull_secrets) > 0:
image_pull_secrets = []
for image_pull_secret in pipeline_conf.image_pull_secrets:
image_pull_secrets.append(convert_k8s_obj_to_json(image_pull_secret))
workflow['spec']['imagePullSecrets'] = image_pull_secrets
if pipeline_conf.timeout:
workflow['spec']['activeDeadlineSeconds'] = pipeline_conf.timeout
if exit_handler:
workflow['spec']['onExit'] = exit_handler.name
# This can be overwritten by the task specific
# nodeselection, specified in the template.
if pipeline_conf.default_pod_node_selector:
workflow['spec']['nodeSelector'] = pipeline_conf.default_pod_node_selector
if pipeline_conf.dns_config:
workflow['spec']['dnsConfig'] = convert_k8s_obj_to_json(pipeline_conf.dns_config)
if pipeline_conf.image_pull_policy != None:
if pipeline_conf.image_pull_policy in ["Always", "Never", "IfNotPresent"]:
for template in workflow["spec"]["templates"]:
container = template.get('container', None)
if container and "imagePullPolicy" not in container:
container["imagePullPolicy"] = pipeline_conf.image_pull_policy
else:
raise ValueError(
'Invalid imagePullPolicy. Must be one of `Always`, `Never`, `IfNotPresent`.'
)
return workflow
def _validate_exit_handler(self, pipeline):
"""Makes sure there is only one global exit handler.
Note this is a temporary workaround until argo supports local exit handler.
"""
def _validate_exit_handler_helper(group, exiting_op_names, handler_exists):
if group.type == 'exit_handler':
if handler_exists or len(exiting_op_names) > 1:
raise ValueError('Only one global exit_handler is allowed and all ops need to be included.')
handler_exists = True
if group.ops:
exiting_op_names.extend([x.name for x in group.ops])
for g in group.groups:
_validate_exit_handler_helper(g, exiting_op_names, handler_exists)
return _validate_exit_handler_helper(pipeline.groups[0], [], False)
def _sanitize_and_inject_artifact(self, pipeline: dsl.Pipeline, pipeline_conf=None):
"""Sanitize operator/param names and inject pipeline artifact location."""
# Sanitize operator names and param names
sanitized_ops = {}
for op in pipeline.ops.values():
sanitized_name = sanitize_k8s_name(op.name)
op.name = sanitized_name
for param in op.outputs.values():
param.name = sanitize_k8s_name(param.name, True)
if param.op_name:
param.op_name = sanitize_k8s_name(param.op_name)
if op.output is not None and not isinstance(op.output, dsl._container_op._MultipleOutputsError):
op.output.name = sanitize_k8s_name(op.output.name, True)
op.output.op_name = sanitize_k8s_name(op.output.op_name)
if op.dependent_names:
op.dependent_names = [sanitize_k8s_name(name) for name in op.dependent_names]
if isinstance(op, dsl.ContainerOp) and op.file_outputs is not None:
sanitized_file_outputs = {}
for key in op.file_outputs.keys():
sanitized_file_outputs[sanitize_k8s_name(key, True)] = op.file_outputs[key]
op.file_outputs = sanitized_file_outputs
elif isinstance(op, dsl.ResourceOp) and op.attribute_outputs is not None:
sanitized_attribute_outputs = {}
for key in op.attribute_outputs.keys():
sanitized_attribute_outputs[sanitize_k8s_name(key, True)] = \
op.attribute_outputs[key]
op.attribute_outputs = sanitized_attribute_outputs
if isinstance(op, dsl.ContainerOp):
if op.input_artifact_paths:
op.input_artifact_paths = {sanitize_k8s_name(key, True): value for key, value in op.input_artifact_paths.items()}
if op.artifact_arguments:
op.artifact_arguments = {sanitize_k8s_name(key, True): value for key, value in op.artifact_arguments.items()}
sanitized_ops[sanitized_name] = op
pipeline.ops = sanitized_ops
def _create_workflow(self,
pipeline_func: Callable,
pipeline_name: Text=None,
pipeline_description: Text=None,
params_list: List[dsl.PipelineParam]=None,
pipeline_conf: dsl.PipelineConf = None,
) -> Dict[Text, Any]:
""" Internal implementation of create_workflow."""
params_list = params_list or []
# Create the arg list with no default values and call pipeline function.
# Assign type information to the PipelineParam
pipeline_meta = _extract_pipeline_metadata(pipeline_func)
pipeline_meta.name = pipeline_name or pipeline_meta.name
pipeline_meta.description = pipeline_description or pipeline_meta.description
pipeline_name = sanitize_k8s_name(pipeline_meta.name)
# Need to first clear the default value of dsl.PipelineParams. Otherwise, it
# will be resolved immediately in place when being to each component.
default_param_values = OrderedDict()
if getattr(pipeline_func, 'output_directory', None):
dsl_pipeline_root = dsl.PipelineParam(
name=dsl.ROOT_PARAMETER_NAME, value=pipeline_func.output_directory)
pipeline_func.output_directory = dsl_pipeline_root
params_list.append(dsl_pipeline_root)
for param in params_list:
default_param_values[param.name] = param.value
param.value = None
args_list = []
kwargs_dict = dict()
signature = inspect.signature(pipeline_func)
for arg_name, arg in signature.parameters.items():
arg_type = None
for input in pipeline_meta.inputs or []:
if arg_name == input.name:
arg_type = input.type
break
param = dsl.PipelineParam(sanitize_k8s_name(arg_name, True), param_type=arg_type)
if arg.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs_dict[arg_name] = param
else:
args_list.append(param)
with dsl.Pipeline(pipeline_name) as dsl_pipeline:
pipeline_func(*args_list, **kwargs_dict)
pipeline_conf = pipeline_conf or dsl_pipeline.conf # Configuration passed to the compiler is overriding. Unfortunately, it's not trivial to detect whether the dsl_pipeline.conf was ever modified.
self._validate_exit_handler(dsl_pipeline)
self._sanitize_and_inject_artifact(dsl_pipeline, pipeline_conf)
# Fill in the default values by merging two param lists.
args_list_with_defaults = OrderedDict()
if pipeline_meta.inputs:
args_list_with_defaults = OrderedDict([
(sanitize_k8s_name(input_spec.name, True), input_spec.default)
for input_spec in pipeline_meta.inputs
])
if params_list:
# Or, if args are provided by params_list, fill in pipeline_meta.
for k, v in default_param_values.items():
args_list_with_defaults[k] = v
pipeline_meta.inputs = pipeline_meta.inputs or []
for param in params_list:
pipeline_meta.inputs.append(
InputSpec(
name=param.name,
type=param.param_type,
default=default_param_values[param.name]))
op_transformers = [add_pod_env]
op_transformers.extend(pipeline_conf.op_transformers)
workflow = self._create_pipeline_workflow(
args_list_with_defaults,
dsl_pipeline,
op_transformers,
pipeline_conf,
)
from ._data_passing_rewriter import fix_big_data_passing
workflow = fix_big_data_passing(workflow)
output_directory = getattr(pipeline_func, 'output_directory', None)
workflow = _data_passing_rewriter.add_pod_name_passing(
workflow, str(output_directory))
if pipeline_conf and pipeline_conf.data_passing_method != None:
workflow = pipeline_conf.data_passing_method(workflow)
metadata = workflow.setdefault('metadata', {})
annotations = metadata.setdefault('annotations', {})
annotations['pipelines.kubeflow.org/kfp_sdk_version'] = kfp.__version__
annotations['pipelines.kubeflow.org/pipeline_compilation_time'] = datetime.datetime.now().isoformat()
annotations['pipelines.kubeflow.org/pipeline_spec'] = json.dumps(pipeline_meta.to_dict(), sort_keys=True)
# Labels might be logged better than annotations so adding some information here as well
labels = metadata.setdefault('labels', {})
labels['pipelines.kubeflow.org/kfp_sdk_version'] = kfp.__version__
return workflow
# For now (0.1.31) this function is only used by TFX's KubeflowDagRunner.
# See https://github.com/tensorflow/tfx/blob/811e4c1cc0f7903d73d151b9d4f21f79f6013d4a/tfx/orchestration/kubeflow/kubeflow_dag_runner.py#L238
@deprecated(
version='0.1.32',
reason='Workflow spec is not intended to be handled by user, please '
'switch to _create_workflow')
def create_workflow(self,
pipeline_func: Callable,
pipeline_name: Text=None,
pipeline_description: Text=None,
params_list: List[dsl.PipelineParam]=None,
pipeline_conf: dsl.PipelineConf = None) -> Dict[Text, Any]:
"""Create workflow spec from pipeline function and specified pipeline
params/metadata. Currently, the pipeline params are either specified in
the signature of the pipeline function or by passing a list of
dsl.PipelineParam. Conflict will cause ValueError.
Args:
pipeline_func: Pipeline function where ContainerOps are invoked.
pipeline_name: The name of the pipeline to compile.
pipeline_description: The description of the pipeline.
params_list: List of pipeline params to append to the pipeline.
pipeline_conf: PipelineConf instance. Can specify op transforms, image pull secrets and other pipeline-level configuration options. Overrides any configuration that may be set by the pipeline.
Returns:
The created workflow dictionary.
"""
return self._create_workflow(pipeline_func, pipeline_name, pipeline_description, params_list, pipeline_conf)
@deprecated(
version='0.1.32',
reason='Switch to _create_workflow.')
def _compile(self, pipeline_func, pipeline_conf: dsl.PipelineConf = None):
"""Compile the given pipeline function into workflow."""
return self._create_workflow(pipeline_func=pipeline_func, pipeline_conf=pipeline_conf)
def compile(self, pipeline_func, package_path, type_check=True, pipeline_conf: dsl.PipelineConf = None):
"""Compile the given pipeline function into workflow yaml.
Args:
pipeline_func: Pipeline functions with @dsl.pipeline decorator.
package_path: The output workflow tar.gz file path. for example,
"~/a.tar.gz"
type_check: Whether to enable the type check or not, default: False.
pipeline_conf: PipelineConf instance. Can specify op transforms, image
pull secrets and other pipeline-level configuration options. Overrides
any configuration that may be set by the pipeline.
"""
import kfp
type_check_old_value = kfp.TYPE_CHECK
try:
kfp.TYPE_CHECK = type_check
self._create_and_write_workflow(
pipeline_func=pipeline_func,
pipeline_conf=pipeline_conf,
package_path=package_path)
finally:
kfp.TYPE_CHECK = type_check_old_value
@staticmethod
def _write_workflow(workflow: Dict[Text, Any], package_path: Text = None):
"""Dump pipeline workflow into yaml spec and write out in the format specified by the user.
Args:
workflow: Workflow spec of the pipline, dict.
package_path: file path to be written. If not specified, a yaml_text string will be returned.
"""
yaml_text = dump_yaml(workflow)
if package_path is None:
return yaml_text
if package_path.endswith('.tar.gz') or package_path.endswith('.tgz'):
from contextlib import closing
from io import BytesIO
with tarfile.open(package_path, "w:gz") as tar:
with closing(BytesIO(yaml_text.encode())) as yaml_file:
tarinfo = tarfile.TarInfo('pipeline.yaml')
tarinfo.size = len(yaml_file.getvalue())
tar.addfile(tarinfo, fileobj=yaml_file)
elif package_path.endswith('.zip'):
with zipfile.ZipFile(package_path, "w") as zip:
zipinfo = zipfile.ZipInfo('pipeline.yaml')
zipinfo.compress_type = zipfile.ZIP_DEFLATED
zip.writestr(zipinfo, yaml_text)
elif package_path.endswith('.yaml') or package_path.endswith('.yml'):
with open(package_path, 'w') as yaml_file:
yaml_file.write(yaml_text)
else:
raise ValueError(
'The output path '+ package_path +
' should ends with one of the following formats: '
'[.tar.gz, .tgz, .zip, .yaml, .yml]')
def _create_and_write_workflow(
self,
pipeline_func: Callable,
pipeline_name: Text=None,
pipeline_description: Text=None,
params_list: List[dsl.PipelineParam]=None,
pipeline_conf: dsl.PipelineConf=None,
package_path: Text=None
) -> None:
"""Compile the given pipeline function and dump it to specified file format."""
workflow = self._create_workflow(
pipeline_func,
pipeline_name,
pipeline_description,
params_list,
pipeline_conf)
self._write_workflow(workflow, package_path)
_validate_workflow(workflow)
def _validate_workflow(workflow: dict):
workflow = workflow.copy()
# Working around Argo lint issue
for argument in workflow['spec'].get('arguments', {}).get('parameters', []):
if 'value' not in argument:
argument['value'] = ''
yaml_text = dump_yaml(workflow)
if '{{pipelineparam' in yaml_text:
raise RuntimeError(
'''Internal compiler error: Found unresolved PipelineParam.
Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.'''
)
# Running Argo lint if available
import shutil
import subprocess
argo_path = shutil.which('argo')
if argo_path:
has_working_argo_lint = False
try:
has_working_argo_lint = _run_argo_lint('')
except:
warnings.warn("Cannot validate the compiled workflow. Found the argo program in PATH, but it's not usable. argo v2.4.3 should work.")
if has_working_argo_lint:
_run_argo_lint(yaml_text)
def _run_argo_lint(yaml_text: str):
# Running Argo lint if available
import shutil
import subprocess
argo_path = shutil.which('argo')
if argo_path:
result = subprocess.run([argo_path, 'lint', '/dev/stdin'], input=yaml_text.encode('utf-8'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode:
if re.match(
pattern=r'.+failed to resolve {{tasks\..+\.outputs\.artifacts\..+}}.+',
string=result.stderr.decode('utf-8')
):
raise RuntimeError(
'Compiler has produced Argo-incompatible workflow due to '
'unresolvable input artifact(s). Please check whether inputPath has'
' been connected to outputUri placeholder, which is not supported '
'yet. Otherwise, please create a new issue at '
'https://github.com/kubeflow/pipelines/issues attaching the '
'pipeline code and the pipeline package. Error: {}'.format(
result.stderr.decode('utf-8'))
)
raise RuntimeError(
'''Internal compiler error: Compiler has produced Argo-incompatible workflow.
Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.
Error: {}'''.format(result.stderr.decode('utf-8'))
)
return True
return False
chore(sdk): remove redundant method: `_pipelineparam_full_name()` (#5174)
# Copyright 2018-2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
from collections import defaultdict, OrderedDict
from deprecated import deprecated
import inspect
import re
import tarfile
import uuid
import warnings
import zipfile
from typing import Callable, Set, List, Text, Dict, Tuple, Any, Union, Optional
import kfp
from kfp.dsl import _for_loop
from kfp.compiler import _data_passing_rewriter
from .. import dsl
from ._k8s_helper import convert_k8s_obj_to_json, sanitize_k8s_name
from ._op_to_template import _op_to_template, _process_obj
from ._default_transformers import add_pod_env
from ..components.structures import InputSpec
from ..components._yaml_utils import dump_yaml
from ..dsl._metadata import _extract_pipeline_metadata
from ..dsl._ops_group import OpsGroup
from ..dsl._pipeline_param import extract_pipelineparams_from_any, PipelineParam
class Compiler(object):
"""DSL Compiler that compiles pipeline functions into workflow yaml.
Example:
How to use the compiler to construct workflow yaml::
@dsl.pipeline(
name='name',
description='description'
)
def my_pipeline(a: int = 1, b: str = "default value"):
...
Compiler().compile(my_pipeline, 'path/to/workflow.yaml')
"""
def _get_groups_for_ops(self, root_group):
"""Helper function to get belonging groups for each op.
Each pipeline has a root group. Each group has a list of operators (leaf) and groups.
This function traverse the tree and get all ancestor groups for all operators.
Returns:
A dict. Key is the operator's name. Value is a list of ancestor groups including the
op itself. The list of a given operator is sorted in a way that the farthest
group is the first and operator itself is the last.
"""
def _get_op_groups_helper(current_groups, ops_to_groups):
root_group = current_groups[-1]
for g in root_group.groups:
# Add recursive opsgroup in the ops_to_groups
# such that the i/o dependency can be propagated to the ancester opsgroups
if g.recursive_ref:
ops_to_groups[g.name] = [x.name for x in current_groups] + [g.name]
continue
current_groups.append(g)
_get_op_groups_helper(current_groups, ops_to_groups)
del current_groups[-1]
for op in root_group.ops:
ops_to_groups[op.name] = [x.name for x in current_groups] + [op.name]
ops_to_groups = {}
current_groups = [root_group]
_get_op_groups_helper(current_groups, ops_to_groups)
return ops_to_groups
#TODO: combine with the _get_groups_for_ops
def _get_groups_for_opsgroups(self, root_group):
"""Helper function to get belonging groups for each opsgroup.
Each pipeline has a root group. Each group has a list of operators (leaf) and groups.
This function traverse the tree and get all ancestor groups for all opsgroups.
Returns:
A dict. Key is the opsgroup's name. Value is a list of ancestor groups including the
opsgroup itself. The list of a given opsgroup is sorted in a way that the farthest
group is the first and opsgroup itself is the last.
"""
def _get_opsgroup_groups_helper(current_groups, opsgroups_to_groups):
root_group = current_groups[-1]
for g in root_group.groups:
# Add recursive opsgroup in the ops_to_groups
# such that the i/o dependency can be propagated to the ancester opsgroups
if g.recursive_ref:
continue
opsgroups_to_groups[g.name] = [x.name for x in current_groups] + [g.name]
current_groups.append(g)
_get_opsgroup_groups_helper(current_groups, opsgroups_to_groups)
del current_groups[-1]
opsgroups_to_groups = {}
current_groups = [root_group]
_get_opsgroup_groups_helper(current_groups, opsgroups_to_groups)
return opsgroups_to_groups
def _get_groups(self, root_group):
"""Helper function to get all groups (not including ops) in a pipeline."""
def _get_groups_helper(group):
groups = {group.name: group}
for g in group.groups:
# Skip the recursive opsgroup because no templates
# need to be generated for the recursive opsgroups.
if not g.recursive_ref:
groups.update(_get_groups_helper(g))
return groups
return _get_groups_helper(root_group)
def _get_uncommon_ancestors(self, op_groups, opsgroup_groups, op1, op2):
"""Helper function to get unique ancestors between two ops.
For example, op1's ancestor groups are [root, G1, G2, G3, op1], op2's ancestor groups are
[root, G1, G4, op2], then it returns a tuple ([G2, G3, op1], [G4, op2]).
"""
#TODO: extract a function for the following two code module
if op1.name in op_groups:
op1_groups = op_groups[op1.name]
elif op1.name in opsgroup_groups:
op1_groups = opsgroup_groups[op1.name]
else:
raise ValueError(op1.name + ' does not exist.')
if op2.name in op_groups:
op2_groups = op_groups[op2.name]
elif op2.name in opsgroup_groups:
op2_groups = opsgroup_groups[op2.name]
else:
raise ValueError(op2.name + ' does not exist.')
both_groups = [op1_groups, op2_groups]
common_groups_len = sum(1 for x in zip(*both_groups) if x==(x[0],)*len(x))
group1 = op1_groups[common_groups_len:]
group2 = op2_groups[common_groups_len:]
return (group1, group2)
def _get_condition_params_for_ops(self, root_group):
"""Get parameters referenced in conditions of ops."""
conditions = defaultdict(set)
def _get_condition_params_for_ops_helper(group, current_conditions_params):
new_current_conditions_params = current_conditions_params
if group.type == 'condition':
new_current_conditions_params = list(current_conditions_params)
if isinstance(group.condition.operand1, dsl.PipelineParam):
new_current_conditions_params.append(group.condition.operand1)
if isinstance(group.condition.operand2, dsl.PipelineParam):
new_current_conditions_params.append(group.condition.operand2)
for op in group.ops:
for param in new_current_conditions_params:
conditions[op.name].add(param)
for g in group.groups:
# If the subgroup is a recursive opsgroup, propagate the pipelineparams
# in the condition expression, similar to the ops.
if g.recursive_ref:
for param in new_current_conditions_params:
conditions[g.name].add(param)
else:
_get_condition_params_for_ops_helper(g, new_current_conditions_params)
_get_condition_params_for_ops_helper(root_group, [])
return conditions
def _get_next_group_or_op(cls, to_visit: List, already_visited: Set):
"""Get next group or op to visit."""
if len(to_visit) == 0:
return None
next = to_visit.pop(0)
while next in already_visited:
next = to_visit.pop(0)
already_visited.add(next)
return next
def _get_for_loop_ops(self, new_root) -> Dict[Text, dsl.ParallelFor]:
to_visit = self._get_all_subgroups_and_ops(new_root)
op_name_to_op = {}
already_visited = set()
while len(to_visit):
next_op = self._get_next_group_or_op(to_visit, already_visited)
if next_op is None:
break
to_visit.extend(self._get_all_subgroups_and_ops(next_op))
if isinstance(next_op, dsl.ParallelFor):
op_name_to_op[next_op.name] = next_op
return op_name_to_op
def _get_all_subgroups_and_ops(self, op):
"""Get all ops and groups contained within this group."""
subgroups = []
if hasattr(op, 'ops'):
subgroups.extend(op.ops)
if hasattr(op, 'groups'):
subgroups.extend(op.groups)
return subgroups
def _get_inputs_outputs(
self,
pipeline,
root_group,
op_groups,
opsgroup_groups,
condition_params,
op_name_to_for_loop_op: Dict[Text, dsl.ParallelFor],
):
"""Get inputs and outputs of each group and op.
Returns:
A tuple (inputs, outputs).
inputs and outputs are dicts with key being the group/op names and values being list of
tuples (param_name, producing_op_name). producing_op_name is the name of the op that
produces the param. If the param is a pipeline param (no producer op), then
producing_op_name is None.
"""
inputs = defaultdict(set)
outputs = defaultdict(set)
for op in pipeline.ops.values():
# op's inputs and all params used in conditions for that op are both considered.
for param in op.inputs + list(condition_params[op.name]):
# if the value is already provided (immediate value), then no need to expose
# it as input for its parent groups.
if param.value:
continue
if param.op_name:
upstream_op = pipeline.ops[param.op_name]
upstream_groups, downstream_groups = \
self._get_uncommon_ancestors(op_groups, opsgroup_groups, upstream_op, op)
for i, group_name in enumerate(downstream_groups):
if i == 0:
# If it is the first uncommon downstream group, then the input comes from
# the first uncommon upstream group.
inputs[group_name].add((param.full_name, upstream_groups[0]))
else:
# If not the first downstream group, then the input is passed down from
# its ancestor groups so the upstream group is None.
inputs[group_name].add((param.full_name, None))
for i, group_name in enumerate(upstream_groups):
if i == len(upstream_groups) - 1:
# If last upstream group, it is an operator and output comes from container.
outputs[group_name].add((param.full_name, None))
else:
# If not last upstream group, output value comes from one of its child.
outputs[group_name].add((param.full_name, upstream_groups[i+1]))
else:
if not op.is_exit_handler:
for group_name in op_groups[op.name][::-1]:
# if group is for loop group and param is that loop's param, then the param
# is created by that for loop ops_group and it shouldn't be an input to
# any of its parent groups.
inputs[group_name].add((param.full_name, None))
if group_name in op_name_to_for_loop_op:
# for example:
# loop_group.loop_args.name = 'loop-item-param-99ca152e'
# param.name = 'loop-item-param-99ca152e--a'
loop_group = op_name_to_for_loop_op[group_name]
if loop_group.loop_args.name in param.name:
break
# Generate the input/output for recursive opsgroups
# It propagates the recursive opsgroups IO to their ancester opsgroups
def _get_inputs_outputs_recursive_opsgroup(group):
#TODO: refactor the following codes with the above
if group.recursive_ref:
params = [(param, False) for param in group.inputs]
params.extend([(param, True) for param in list(condition_params[group.name])])
for param, is_condition_param in params:
if param.value:
continue
full_name = param.full_name
if param.op_name:
upstream_op = pipeline.ops[param.op_name]
upstream_groups, downstream_groups = \
self._get_uncommon_ancestors(op_groups, opsgroup_groups, upstream_op, group)
for i, g in enumerate(downstream_groups):
if i == 0:
inputs[g].add((full_name, upstream_groups[0]))
# There is no need to pass the condition param as argument to the downstream ops.
#TODO: this might also apply to ops. add a TODO here and think about it.
elif i == len(downstream_groups) - 1 and is_condition_param:
continue
else:
inputs[g].add((full_name, None))
for i, g in enumerate(upstream_groups):
if i == len(upstream_groups) - 1:
outputs[g].add((full_name, None))
else:
outputs[g].add((full_name, upstream_groups[i+1]))
elif not is_condition_param:
for g in op_groups[group.name]:
inputs[g].add((full_name, None))
for subgroup in group.groups:
_get_inputs_outputs_recursive_opsgroup(subgroup)
_get_inputs_outputs_recursive_opsgroup(root_group)
# Generate the input for SubGraph along with parallelfor
for sub_graph in opsgroup_groups:
if sub_graph in op_name_to_for_loop_op:
# The opsgroup list is sorted with the farthest group as the first and the opsgroup
# itself as the last. To get the latest opsgroup which is not the opsgroup itself -2 is used.
parent = opsgroup_groups[sub_graph][-2]
if parent and parent.startswith('subgraph'):
# propagate only op's pipeline param from subgraph to parallelfor
loop_op = op_name_to_for_loop_op[sub_graph]
pipeline_param = loop_op.loop_args.items_or_pipeline_param
if loop_op.items_is_pipeline_param and pipeline_param.op_name:
param_name = '%s-%s' % (
sanitize_k8s_name(pipeline_param.op_name), pipeline_param.name)
inputs[parent].add((param_name, pipeline_param.op_name))
return inputs, outputs
def _get_dependencies(self, pipeline, root_group, op_groups, opsgroups_groups, opsgroups, condition_params):
"""Get dependent groups and ops for all ops and groups.
Returns:
A dict. Key is group/op name, value is a list of dependent groups/ops.
The dependencies are calculated in the following way: if op2 depends on op1,
and their ancestors are [root, G1, G2, op1] and [root, G1, G3, G4, op2],
then G3 is dependent on G2. Basically dependency only exists in the first uncommon
ancesters in their ancesters chain. Only sibling groups/ops can have dependencies.
"""
dependencies = defaultdict(set)
for op in pipeline.ops.values():
upstream_op_names = set()
for param in op.inputs + list(condition_params[op.name]):
if param.op_name:
upstream_op_names.add(param.op_name)
upstream_op_names |= set(op.dependent_names)
for upstream_op_name in upstream_op_names:
# the dependent op could be either a BaseOp or an opsgroup
if upstream_op_name in pipeline.ops:
upstream_op = pipeline.ops[upstream_op_name]
elif upstream_op_name in opsgroups:
upstream_op = opsgroups[upstream_op_name]
else:
raise ValueError('compiler cannot find the ' + upstream_op_name)
upstream_groups, downstream_groups = self._get_uncommon_ancestors(op_groups, opsgroups_groups, upstream_op, op)
dependencies[downstream_groups[0]].add(upstream_groups[0])
# Generate dependencies based on the recursive opsgroups
#TODO: refactor the following codes with the above
def _get_dependency_opsgroup(group, dependencies):
upstream_op_names = set([dependency.name for dependency in group.dependencies])
if group.recursive_ref:
for param in group.inputs + list(condition_params[group.name]):
if param.op_name:
upstream_op_names.add(param.op_name)
for op_name in upstream_op_names:
if op_name in pipeline.ops:
upstream_op = pipeline.ops[op_name]
elif op_name in opsgroups:
upstream_op = opsgroups[op_name]
else:
raise ValueError('compiler cannot find the ' + op_name)
upstream_groups, downstream_groups = \
self._get_uncommon_ancestors(op_groups, opsgroups_groups, upstream_op, group)
dependencies[downstream_groups[0]].add(upstream_groups[0])
for subgroup in group.groups:
_get_dependency_opsgroup(subgroup, dependencies)
_get_dependency_opsgroup(root_group, dependencies)
return dependencies
def _resolve_value_or_reference(self, value_or_reference, potential_references):
"""_resolve_value_or_reference resolves values and PipelineParams, which could be task parameters or input parameters.
Args:
value_or_reference: value or reference to be resolved. It could be basic python types or PipelineParam
potential_references(dict{str->str}): a dictionary of parameter names to task names
"""
if isinstance(value_or_reference, dsl.PipelineParam):
parameter_name = value_or_reference.full_name
task_names = [task_name for param_name, task_name in potential_references if param_name == parameter_name]
if task_names:
task_name = task_names[0]
# When the task_name is None, the parameter comes directly from ancient ancesters
# instead of parents. Thus, it is resolved as the input parameter in the current group.
if task_name is None:
return '{{inputs.parameters.%s}}' % parameter_name
else:
return '{{tasks.%s.outputs.parameters.%s}}' % (task_name, parameter_name)
else:
return '{{inputs.parameters.%s}}' % parameter_name
else:
return str(value_or_reference)
@staticmethod
def _resolve_task_pipeline_param(pipeline_param: PipelineParam, group_type) -> str:
if pipeline_param.op_name is None:
return '{{workflow.parameters.%s}}' % pipeline_param.name
param_name = '%s-%s' % (sanitize_k8s_name(pipeline_param.op_name), pipeline_param.name)
if group_type == 'subgraph':
return '{{inputs.parameters.%s}}' % (param_name)
return '{{tasks.%s.outputs.parameters.%s}}' % (sanitize_k8s_name(pipeline_param.op_name), param_name)
def _group_to_dag_template(self, group, inputs, outputs, dependencies):
"""Generate template given an OpsGroup.
inputs, outputs, dependencies are all helper dicts.
"""
template = {'name': group.name}
if group.parallelism != None:
template["parallelism"] = group.parallelism
# Generate inputs section.
if inputs.get(group.name, None):
template_inputs = [{'name': x[0]} for x in inputs[group.name]]
template_inputs.sort(key=lambda x: x['name'])
template['inputs'] = {
'parameters': template_inputs
}
# Generate outputs section.
if outputs.get(group.name, None):
template_outputs = []
for param_name, dependent_name in outputs[group.name]:
template_outputs.append({
'name': param_name,
'valueFrom': {
'parameter': '{{tasks.%s.outputs.parameters.%s}}' % (dependent_name, param_name)
}
})
template_outputs.sort(key=lambda x: x['name'])
template['outputs'] = {'parameters': template_outputs}
# Generate tasks section.
tasks = []
sub_groups = group.groups + group.ops
for sub_group in sub_groups:
is_recursive_subgroup = (isinstance(sub_group, OpsGroup) and sub_group.recursive_ref)
# Special handling for recursive subgroup: use the existing opsgroup name
if is_recursive_subgroup:
task = {
'name': sub_group.recursive_ref.name,
'template': sub_group.recursive_ref.name,
}
else:
task = {
'name': sub_group.name,
'template': sub_group.name,
}
if isinstance(sub_group, dsl.OpsGroup) and sub_group.type == 'condition':
subgroup_inputs = inputs.get(sub_group.name, [])
condition = sub_group.condition
operand1_value = self._resolve_value_or_reference(condition.operand1, subgroup_inputs)
operand2_value = self._resolve_value_or_reference(condition.operand2, subgroup_inputs)
if condition.operator in ['==', '!=']:
operand1_value = '"' + operand1_value + '"'
operand2_value = '"' + operand2_value + '"'
task['when'] = '{} {} {}'.format(operand1_value, condition.operator, operand2_value)
# Generate dependencies section for this task.
if dependencies.get(sub_group.name, None):
group_dependencies = list(dependencies[sub_group.name])
group_dependencies.sort()
task['dependencies'] = group_dependencies
# Generate arguments section for this task.
if inputs.get(sub_group.name, None):
task['arguments'] = {'parameters': self.get_arguments_for_sub_group(sub_group, is_recursive_subgroup, inputs)}
# additional task modifications for withItems and withParam
if isinstance(sub_group, dsl.ParallelFor):
if sub_group.items_is_pipeline_param:
# these loop args are a 'withParam' rather than 'withItems'.
# i.e., rather than a static list, they are either the output of another task or were input
# as global pipeline parameters
pipeline_param = sub_group.loop_args.items_or_pipeline_param
withparam_value = self._resolve_task_pipeline_param(pipeline_param, group.type)
if pipeline_param.op_name:
# these loop args are the output of another task
if 'dependencies' not in task or task['dependencies'] is None:
task['dependencies'] = []
if sanitize_k8s_name(
pipeline_param.op_name) not in task['dependencies'] and group.type != 'subgraph':
task['dependencies'].append(
sanitize_k8s_name(pipeline_param.op_name))
task['withParam'] = withparam_value
else:
# Need to sanitize the dict keys for consistency.
loop_tasks = sub_group.loop_args.to_list_for_task_yaml()
nested_pipeline_params = extract_pipelineparams_from_any(loop_tasks)
# Set dependencies in case of nested pipeline_params
map_to_tmpl_var = {str(p): self._resolve_task_pipeline_param(p, group.type) for p in nested_pipeline_params}
for pipeline_param in nested_pipeline_params:
if pipeline_param.op_name:
# these pipeline_param are the output of another task
if 'dependencies' not in task or task['dependencies'] is None:
task['dependencies'] = []
if sanitize_k8s_name(
pipeline_param.op_name) not in task['dependencies']:
task['dependencies'].append(
sanitize_k8s_name(pipeline_param.op_name))
sanitized_tasks = []
if isinstance(loop_tasks[0], dict):
for argument_set in loop_tasks:
c_dict = {}
for k, v in argument_set.items():
c_dict[sanitize_k8s_name(k, True)] = v
sanitized_tasks.append(c_dict)
else:
sanitized_tasks = loop_tasks
# Replace pipeline param if map_to_tmpl_var not empty
task['withItems'] = _process_obj(sanitized_tasks, map_to_tmpl_var) if map_to_tmpl_var else sanitized_tasks
# We will sort dependencies to have determinitc yaml and thus stable tests
if task.get('dependencies'):
task['dependencies'].sort()
tasks.append(task)
tasks.sort(key=lambda x: x['name'])
template['dag'] = {'tasks': tasks}
return template
def get_arguments_for_sub_group(
self,
sub_group: Union[OpsGroup, dsl._container_op.BaseOp],
is_recursive_subgroup: Optional[bool],
inputs: Dict[Text, Tuple[Text, Text]],
):
arguments = []
for param_name, dependent_name in inputs[sub_group.name]:
if is_recursive_subgroup:
for input_name, input in sub_group.arguments.items():
if param_name == input.full_name:
break
referenced_input = sub_group.recursive_ref.arguments[input_name]
argument_name = referenced_input.full_name
else:
argument_name = param_name
# Preparing argument. It can be pipeline input reference, task output reference or loop item (or loop item attribute
sanitized_loop_arg_full_name = '---'
if isinstance(sub_group, dsl.ParallelFor):
sanitized_loop_arg_full_name = sanitize_k8s_name(sub_group.loop_args.full_name)
arg_ref_full_name = sanitize_k8s_name(param_name)
# We only care about the reference to the current loop item, not the outer loops
if isinstance(sub_group, dsl.ParallelFor) and arg_ref_full_name.startswith(sanitized_loop_arg_full_name):
if arg_ref_full_name == sanitized_loop_arg_full_name:
argument_value = '{{item}}'
elif _for_loop.LoopArgumentVariable.name_is_loop_arguments_variable(param_name):
subvar_name = _for_loop.LoopArgumentVariable.get_subvar_name(param_name)
argument_value = '{{item.%s}}' % subvar_name
else:
raise ValueError("Argument seems to reference the loop item, but not the item itself and not some attribute of the item. param_name: {}, ".format(param_name))
else:
if dependent_name:
argument_value = '{{tasks.%s.outputs.parameters.%s}}' % (dependent_name, param_name)
else:
argument_value = '{{inputs.parameters.%s}}' % param_name
arguments.append({
'name': argument_name,
'value': argument_value,
})
arguments.sort(key=lambda x: x['name'])
return arguments
def _create_dag_templates(self, pipeline, op_transformers=None, op_to_templates_handler=None):
"""Create all groups and ops templates in the pipeline.
Args:
pipeline: Pipeline context object to get all the pipeline data from.
op_transformers: A list of functions that are applied to all ContainerOp instances that are being processed.
op_to_templates_handler: Handler which converts a base op into a list of argo templates.
"""
op_to_templates_handler = op_to_templates_handler or (lambda op : [_op_to_template(op)])
root_group = pipeline.groups[0]
# Call the transformation functions before determining the inputs/outputs, otherwise
# the user would not be able to use pipeline parameters in the container definition
# (for example as pod labels) - the generated template is invalid.
for op in pipeline.ops.values():
for transformer in op_transformers or []:
transformer(op)
# Generate core data structures to prepare for argo yaml generation
# op_name_to_parent_groups: op name -> list of ancestor groups including the current op
# opsgroups: a dictionary of ospgroup.name -> opsgroup
# inputs, outputs: group/op names -> list of tuples (full_param_name, producing_op_name)
# condition_params: recursive_group/op names -> list of pipelineparam
# dependencies: group/op name -> list of dependent groups/ops.
# Special Handling for the recursive opsgroup
# op_name_to_parent_groups also contains the recursive opsgroups
# condition_params from _get_condition_params_for_ops also contains the recursive opsgroups
# groups does not include the recursive opsgroups
opsgroups = self._get_groups(root_group)
op_name_to_parent_groups = self._get_groups_for_ops(root_group)
opgroup_name_to_parent_groups = self._get_groups_for_opsgroups(root_group)
condition_params = self._get_condition_params_for_ops(root_group)
op_name_to_for_loop_op = self._get_for_loop_ops(root_group)
inputs, outputs = self._get_inputs_outputs(
pipeline,
root_group,
op_name_to_parent_groups,
opgroup_name_to_parent_groups,
condition_params,
op_name_to_for_loop_op,
)
dependencies = self._get_dependencies(
pipeline,
root_group,
op_name_to_parent_groups,
opgroup_name_to_parent_groups,
opsgroups,
condition_params,
)
templates = []
for opsgroup in opsgroups.keys():
template = self._group_to_dag_template(opsgroups[opsgroup], inputs, outputs, dependencies)
templates.append(template)
for op in pipeline.ops.values():
templates.extend(op_to_templates_handler(op))
return templates
def _create_pipeline_workflow(self, parameter_defaults, pipeline, op_transformers=None, pipeline_conf=None):
"""Create workflow for the pipeline."""
# Input Parameters
input_params = []
for name, value in parameter_defaults.items():
param = {'name': name}
if value is not None:
param['value'] = value
input_params.append(param)
# Making the pipeline group name unique to prevent name clashes with templates
pipeline_group = pipeline.groups[0]
temp_pipeline_group_name = uuid.uuid4().hex
pipeline_group.name = temp_pipeline_group_name
# Templates
templates = self._create_dag_templates(pipeline, op_transformers)
# Exit Handler
exit_handler = None
if pipeline.groups[0].groups:
first_group = pipeline.groups[0].groups[0]
if first_group.type == 'exit_handler':
exit_handler = first_group.exit_op
# The whole pipeline workflow
# It must valid as a subdomain
pipeline_name = pipeline.name or 'pipeline'
# Workaround for pipeline name clashing with container template names
# TODO: Make sure template names cannot clash at all (container, DAG, workflow)
template_map = {template['name'].lower(): template for template in templates}
from ..components._naming import _make_name_unique_by_adding_index
pipeline_template_name = _make_name_unique_by_adding_index(pipeline_name, template_map, '-')
# Restoring the name of the pipeline template
pipeline_template = template_map[temp_pipeline_group_name]
pipeline_template['name'] = pipeline_template_name
templates.sort(key=lambda x: x['name'])
workflow = {
'apiVersion': 'argoproj.io/v1alpha1',
'kind': 'Workflow',
'metadata': {'generateName': pipeline_template_name + '-'},
'spec': {
'entrypoint': pipeline_template_name,
'templates': templates,
'arguments': {'parameters': input_params},
'serviceAccountName': 'pipeline-runner',
}
}
# set parallelism limits at pipeline level
if pipeline_conf.parallelism:
workflow['spec']['parallelism'] = pipeline_conf.parallelism
# set ttl after workflow finishes
if pipeline_conf.ttl_seconds_after_finished >= 0:
workflow['spec']['ttlSecondsAfterFinished'] = pipeline_conf.ttl_seconds_after_finished
if pipeline_conf._pod_disruption_budget_min_available:
pod_disruption_budget = {"minAvailable": pipeline_conf._pod_disruption_budget_min_available}
workflow['spec']['podDisruptionBudget'] = pod_disruption_budget
if len(pipeline_conf.image_pull_secrets) > 0:
image_pull_secrets = []
for image_pull_secret in pipeline_conf.image_pull_secrets:
image_pull_secrets.append(convert_k8s_obj_to_json(image_pull_secret))
workflow['spec']['imagePullSecrets'] = image_pull_secrets
if pipeline_conf.timeout:
workflow['spec']['activeDeadlineSeconds'] = pipeline_conf.timeout
if exit_handler:
workflow['spec']['onExit'] = exit_handler.name
# This can be overwritten by the task specific
# nodeselection, specified in the template.
if pipeline_conf.default_pod_node_selector:
workflow['spec']['nodeSelector'] = pipeline_conf.default_pod_node_selector
if pipeline_conf.dns_config:
workflow['spec']['dnsConfig'] = convert_k8s_obj_to_json(pipeline_conf.dns_config)
if pipeline_conf.image_pull_policy != None:
if pipeline_conf.image_pull_policy in ["Always", "Never", "IfNotPresent"]:
for template in workflow["spec"]["templates"]:
container = template.get('container', None)
if container and "imagePullPolicy" not in container:
container["imagePullPolicy"] = pipeline_conf.image_pull_policy
else:
raise ValueError(
'Invalid imagePullPolicy. Must be one of `Always`, `Never`, `IfNotPresent`.'
)
return workflow
def _validate_exit_handler(self, pipeline):
"""Makes sure there is only one global exit handler.
Note this is a temporary workaround until argo supports local exit handler.
"""
def _validate_exit_handler_helper(group, exiting_op_names, handler_exists):
if group.type == 'exit_handler':
if handler_exists or len(exiting_op_names) > 1:
raise ValueError('Only one global exit_handler is allowed and all ops need to be included.')
handler_exists = True
if group.ops:
exiting_op_names.extend([x.name for x in group.ops])
for g in group.groups:
_validate_exit_handler_helper(g, exiting_op_names, handler_exists)
return _validate_exit_handler_helper(pipeline.groups[0], [], False)
def _sanitize_and_inject_artifact(self, pipeline: dsl.Pipeline, pipeline_conf=None):
"""Sanitize operator/param names and inject pipeline artifact location."""
# Sanitize operator names and param names
sanitized_ops = {}
for op in pipeline.ops.values():
sanitized_name = sanitize_k8s_name(op.name)
op.name = sanitized_name
for param in op.outputs.values():
param.name = sanitize_k8s_name(param.name, True)
if param.op_name:
param.op_name = sanitize_k8s_name(param.op_name)
if op.output is not None and not isinstance(op.output, dsl._container_op._MultipleOutputsError):
op.output.name = sanitize_k8s_name(op.output.name, True)
op.output.op_name = sanitize_k8s_name(op.output.op_name)
if op.dependent_names:
op.dependent_names = [sanitize_k8s_name(name) for name in op.dependent_names]
if isinstance(op, dsl.ContainerOp) and op.file_outputs is not None:
sanitized_file_outputs = {}
for key in op.file_outputs.keys():
sanitized_file_outputs[sanitize_k8s_name(key, True)] = op.file_outputs[key]
op.file_outputs = sanitized_file_outputs
elif isinstance(op, dsl.ResourceOp) and op.attribute_outputs is not None:
sanitized_attribute_outputs = {}
for key in op.attribute_outputs.keys():
sanitized_attribute_outputs[sanitize_k8s_name(key, True)] = \
op.attribute_outputs[key]
op.attribute_outputs = sanitized_attribute_outputs
if isinstance(op, dsl.ContainerOp):
if op.input_artifact_paths:
op.input_artifact_paths = {sanitize_k8s_name(key, True): value for key, value in op.input_artifact_paths.items()}
if op.artifact_arguments:
op.artifact_arguments = {sanitize_k8s_name(key, True): value for key, value in op.artifact_arguments.items()}
sanitized_ops[sanitized_name] = op
pipeline.ops = sanitized_ops
def _create_workflow(self,
pipeline_func: Callable,
pipeline_name: Text=None,
pipeline_description: Text=None,
params_list: List[dsl.PipelineParam]=None,
pipeline_conf: dsl.PipelineConf = None,
) -> Dict[Text, Any]:
""" Internal implementation of create_workflow."""
params_list = params_list or []
# Create the arg list with no default values and call pipeline function.
# Assign type information to the PipelineParam
pipeline_meta = _extract_pipeline_metadata(pipeline_func)
pipeline_meta.name = pipeline_name or pipeline_meta.name
pipeline_meta.description = pipeline_description or pipeline_meta.description
pipeline_name = sanitize_k8s_name(pipeline_meta.name)
# Need to first clear the default value of dsl.PipelineParams. Otherwise, it
# will be resolved immediately in place when being to each component.
default_param_values = OrderedDict()
if getattr(pipeline_func, 'output_directory', None):
dsl_pipeline_root = dsl.PipelineParam(
name=dsl.ROOT_PARAMETER_NAME, value=pipeline_func.output_directory)
pipeline_func.output_directory = dsl_pipeline_root
params_list.append(dsl_pipeline_root)
for param in params_list:
default_param_values[param.name] = param.value
param.value = None
args_list = []
kwargs_dict = dict()
signature = inspect.signature(pipeline_func)
for arg_name, arg in signature.parameters.items():
arg_type = None
for input in pipeline_meta.inputs or []:
if arg_name == input.name:
arg_type = input.type
break
param = dsl.PipelineParam(sanitize_k8s_name(arg_name, True), param_type=arg_type)
if arg.kind == inspect.Parameter.KEYWORD_ONLY:
kwargs_dict[arg_name] = param
else:
args_list.append(param)
with dsl.Pipeline(pipeline_name) as dsl_pipeline:
pipeline_func(*args_list, **kwargs_dict)
pipeline_conf = pipeline_conf or dsl_pipeline.conf # Configuration passed to the compiler is overriding. Unfortunately, it's not trivial to detect whether the dsl_pipeline.conf was ever modified.
self._validate_exit_handler(dsl_pipeline)
self._sanitize_and_inject_artifact(dsl_pipeline, pipeline_conf)
# Fill in the default values by merging two param lists.
args_list_with_defaults = OrderedDict()
if pipeline_meta.inputs:
args_list_with_defaults = OrderedDict([
(sanitize_k8s_name(input_spec.name, True), input_spec.default)
for input_spec in pipeline_meta.inputs
])
if params_list:
# Or, if args are provided by params_list, fill in pipeline_meta.
for k, v in default_param_values.items():
args_list_with_defaults[k] = v
pipeline_meta.inputs = pipeline_meta.inputs or []
for param in params_list:
pipeline_meta.inputs.append(
InputSpec(
name=param.name,
type=param.param_type,
default=default_param_values[param.name]))
op_transformers = [add_pod_env]
op_transformers.extend(pipeline_conf.op_transformers)
workflow = self._create_pipeline_workflow(
args_list_with_defaults,
dsl_pipeline,
op_transformers,
pipeline_conf,
)
from ._data_passing_rewriter import fix_big_data_passing
workflow = fix_big_data_passing(workflow)
output_directory = getattr(pipeline_func, 'output_directory', None)
workflow = _data_passing_rewriter.add_pod_name_passing(
workflow, str(output_directory))
if pipeline_conf and pipeline_conf.data_passing_method != None:
workflow = pipeline_conf.data_passing_method(workflow)
metadata = workflow.setdefault('metadata', {})
annotations = metadata.setdefault('annotations', {})
annotations['pipelines.kubeflow.org/kfp_sdk_version'] = kfp.__version__
annotations['pipelines.kubeflow.org/pipeline_compilation_time'] = datetime.datetime.now().isoformat()
annotations['pipelines.kubeflow.org/pipeline_spec'] = json.dumps(pipeline_meta.to_dict(), sort_keys=True)
# Labels might be logged better than annotations so adding some information here as well
labels = metadata.setdefault('labels', {})
labels['pipelines.kubeflow.org/kfp_sdk_version'] = kfp.__version__
return workflow
# For now (0.1.31) this function is only used by TFX's KubeflowDagRunner.
# See https://github.com/tensorflow/tfx/blob/811e4c1cc0f7903d73d151b9d4f21f79f6013d4a/tfx/orchestration/kubeflow/kubeflow_dag_runner.py#L238
@deprecated(
version='0.1.32',
reason='Workflow spec is not intended to be handled by user, please '
'switch to _create_workflow')
def create_workflow(self,
pipeline_func: Callable,
pipeline_name: Text=None,
pipeline_description: Text=None,
params_list: List[dsl.PipelineParam]=None,
pipeline_conf: dsl.PipelineConf = None) -> Dict[Text, Any]:
"""Create workflow spec from pipeline function and specified pipeline
params/metadata. Currently, the pipeline params are either specified in
the signature of the pipeline function or by passing a list of
dsl.PipelineParam. Conflict will cause ValueError.
Args:
pipeline_func: Pipeline function where ContainerOps are invoked.
pipeline_name: The name of the pipeline to compile.
pipeline_description: The description of the pipeline.
params_list: List of pipeline params to append to the pipeline.
pipeline_conf: PipelineConf instance. Can specify op transforms, image pull secrets and other pipeline-level configuration options. Overrides any configuration that may be set by the pipeline.
Returns:
The created workflow dictionary.
"""
return self._create_workflow(pipeline_func, pipeline_name, pipeline_description, params_list, pipeline_conf)
@deprecated(
version='0.1.32',
reason='Switch to _create_workflow.')
def _compile(self, pipeline_func, pipeline_conf: dsl.PipelineConf = None):
"""Compile the given pipeline function into workflow."""
return self._create_workflow(pipeline_func=pipeline_func, pipeline_conf=pipeline_conf)
def compile(self, pipeline_func, package_path, type_check=True, pipeline_conf: dsl.PipelineConf = None):
"""Compile the given pipeline function into workflow yaml.
Args:
pipeline_func: Pipeline functions with @dsl.pipeline decorator.
package_path: The output workflow tar.gz file path. for example,
"~/a.tar.gz"
type_check: Whether to enable the type check or not, default: False.
pipeline_conf: PipelineConf instance. Can specify op transforms, image
pull secrets and other pipeline-level configuration options. Overrides
any configuration that may be set by the pipeline.
"""
import kfp
type_check_old_value = kfp.TYPE_CHECK
try:
kfp.TYPE_CHECK = type_check
self._create_and_write_workflow(
pipeline_func=pipeline_func,
pipeline_conf=pipeline_conf,
package_path=package_path)
finally:
kfp.TYPE_CHECK = type_check_old_value
@staticmethod
def _write_workflow(workflow: Dict[Text, Any], package_path: Text = None):
"""Dump pipeline workflow into yaml spec and write out in the format specified by the user.
Args:
workflow: Workflow spec of the pipline, dict.
package_path: file path to be written. If not specified, a yaml_text string will be returned.
"""
yaml_text = dump_yaml(workflow)
if package_path is None:
return yaml_text
if package_path.endswith('.tar.gz') or package_path.endswith('.tgz'):
from contextlib import closing
from io import BytesIO
with tarfile.open(package_path, "w:gz") as tar:
with closing(BytesIO(yaml_text.encode())) as yaml_file:
tarinfo = tarfile.TarInfo('pipeline.yaml')
tarinfo.size = len(yaml_file.getvalue())
tar.addfile(tarinfo, fileobj=yaml_file)
elif package_path.endswith('.zip'):
with zipfile.ZipFile(package_path, "w") as zip:
zipinfo = zipfile.ZipInfo('pipeline.yaml')
zipinfo.compress_type = zipfile.ZIP_DEFLATED
zip.writestr(zipinfo, yaml_text)
elif package_path.endswith('.yaml') or package_path.endswith('.yml'):
with open(package_path, 'w') as yaml_file:
yaml_file.write(yaml_text)
else:
raise ValueError(
'The output path '+ package_path +
' should ends with one of the following formats: '
'[.tar.gz, .tgz, .zip, .yaml, .yml]')
def _create_and_write_workflow(
self,
pipeline_func: Callable,
pipeline_name: Text=None,
pipeline_description: Text=None,
params_list: List[dsl.PipelineParam]=None,
pipeline_conf: dsl.PipelineConf=None,
package_path: Text=None
) -> None:
"""Compile the given pipeline function and dump it to specified file format."""
workflow = self._create_workflow(
pipeline_func,
pipeline_name,
pipeline_description,
params_list,
pipeline_conf)
self._write_workflow(workflow, package_path)
_validate_workflow(workflow)
def _validate_workflow(workflow: dict):
workflow = workflow.copy()
# Working around Argo lint issue
for argument in workflow['spec'].get('arguments', {}).get('parameters', []):
if 'value' not in argument:
argument['value'] = ''
yaml_text = dump_yaml(workflow)
if '{{pipelineparam' in yaml_text:
raise RuntimeError(
'''Internal compiler error: Found unresolved PipelineParam.
Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.'''
)
# Running Argo lint if available
import shutil
import subprocess
argo_path = shutil.which('argo')
if argo_path:
has_working_argo_lint = False
try:
has_working_argo_lint = _run_argo_lint('')
except:
warnings.warn("Cannot validate the compiled workflow. Found the argo program in PATH, but it's not usable. argo v2.4.3 should work.")
if has_working_argo_lint:
_run_argo_lint(yaml_text)
def _run_argo_lint(yaml_text: str):
# Running Argo lint if available
import shutil
import subprocess
argo_path = shutil.which('argo')
if argo_path:
result = subprocess.run([argo_path, 'lint', '/dev/stdin'], input=yaml_text.encode('utf-8'), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode:
if re.match(
pattern=r'.+failed to resolve {{tasks\..+\.outputs\.artifacts\..+}}.+',
string=result.stderr.decode('utf-8')
):
raise RuntimeError(
'Compiler has produced Argo-incompatible workflow due to '
'unresolvable input artifact(s). Please check whether inputPath has'
' been connected to outputUri placeholder, which is not supported '
'yet. Otherwise, please create a new issue at '
'https://github.com/kubeflow/pipelines/issues attaching the '
'pipeline code and the pipeline package. Error: {}'.format(
result.stderr.decode('utf-8'))
)
raise RuntimeError(
'''Internal compiler error: Compiler has produced Argo-incompatible workflow.
Please create a new issue at https://github.com/kubeflow/pipelines/issues attaching the pipeline code and the pipeline package.
Error: {}'''.format(result.stderr.decode('utf-8'))
)
return True
return False
|
from collections import Counter
import random
import numpy as np
from autosklearn.constants import *
from autosklearn.ensembles.abstract_ensemble import AbstractEnsemble
from autosklearn.metrics import calculate_score
from autosklearn.metrics import Scorer
class EnsembleSelection(AbstractEnsemble):
def __init__(self, ensemble_size, task_type, metric,
sorted_initialization=False, bagging=False, mode='fast'):
self.ensemble_size = ensemble_size
self.task_type = task_type
self.metric = metric
self.sorted_initialization = sorted_initialization
self.bagging = bagging
self.mode = mode
def fit(self, predictions, labels, identifiers):
self.ensemble_size = int(self.ensemble_size)
if self.ensemble_size < 1:
raise ValueError('Ensemble size cannot be less than one!')
if not self.task_type in TASK_TYPES:
raise ValueError('Unknown task type %s.' % self.task_type)
if not isinstance(self.metric, Scorer):
raise ValueError('Metric must be of type scorer')
if self.mode not in ('fast', 'slow'):
raise ValueError('Unknown mode %s' % self.mode)
if self.bagging:
self._bagging(predictions, labels)
else:
self._fit(predictions, labels)
self._calculate_weights()
self.identifiers_ = identifiers
return self
def _fit(self, predictions, labels):
if self.mode == 'fast':
self._fast(predictions, labels)
else:
self._slow(predictions, labels)
return self
def _fast(self, predictions, labels):
"""Fast version of Rich Caruana's ensemble selection method."""
self.num_input_models_ = len(predictions)
ensemble = []
trajectory = []
order = []
ensemble_size = self.ensemble_size
if self.sorted_initialization:
n_best = 20
indices = self._sorted_initialization(predictions, labels, n_best)
for idx in indices:
ensemble.append(predictions[idx])
order.append(idx)
ensemble_ = np.array(ensemble).mean(axis=0)
ensemble_performance = calculate_score(
labels, ensemble_, self.task_type, self.metric,
ensemble_.shape[1])
trajectory.append(ensemble_performance)
ensemble_size -= n_best
for i in range(ensemble_size):
scores = np.zeros((len(predictions)))
s = len(ensemble)
if s == 0:
weighted_ensemble_prediction = np.zeros(predictions[0].shape)
else:
ensemble_prediction = np.mean(np.array(ensemble), axis=0)
weighted_ensemble_prediction = (s / float(s + 1)) * \
ensemble_prediction
fant_ensemble_prediction = np.zeros(weighted_ensemble_prediction.shape)
for j, pred in enumerate(predictions):
# TODO: this could potentially be vectorized! - let's profile
# the script first!
fant_ensemble_prediction[:,:] = weighted_ensemble_prediction + \
(1. / float(s + 1)) * pred
scores[j] = calculate_score(
solution=labels,
prediction=fant_ensemble_prediction,
task_type=self.task_type,
metric=self.metric,
all_scoring_functions=False)
all_best = np.argwhere(scores == np.nanmax(scores)).flatten()
best = np.random.choice(all_best)
ensemble.append(predictions[best])
trajectory.append(scores[best])
order.append(best)
# Handle special case
if len(predictions) == 1:
break
self.indices_ = order
self.trajectory_ = trajectory
self.train_score_ = trajectory[-1]
def _slow(self, predictions, labels):
"""Rich Caruana's ensemble selection method."""
self.num_input_models_ = len(predictions)
ensemble = []
trajectory = []
order = []
ensemble_size = self.ensemble_size
if self.sorted_initialization:
n_best = 20
indices = self._sorted_initialization(predictions, labels, n_best)
for idx in indices:
ensemble.append(predictions[idx])
order.append(idx)
ensemble_ = np.array(ensemble).mean(axis=0)
ensemble_performance = calculate_score(
solution=labels,
prediction=ensemble_,
task_type=self.task_type,
metric=self.metric,
all_scoring_functions=False)
trajectory.append(ensemble_performance)
ensemble_size -= n_best
for i in range(ensemble_size):
scores = np.zeros([predictions.shape[0]])
for j, pred in enumerate(predictions):
ensemble.append(pred)
ensemble_prediction = np.mean(np.array(ensemble), axis=0)
scores[j] = calculate_score(
solution=labels,
prediction=ensemble_prediction,
task_type=self.task_type,
metric=self.metric,
all_scoring_functions=False)
ensemble.pop()
best = np.nanargmax(scores)
ensemble.append(predictions[best])
trajectory.append(scores[best])
order.append(best)
# Handle special case
if len(predictions) == 1:
break
self.indices_ = np.array(order)
self.trajectory_ = np.array(trajectory)
self.train_score_ = trajectory[-1]
def _calculate_weights(self):
ensemble_members = Counter(self.indices_).most_common()
weights = np.zeros((self.num_input_models_,), dtype=float)
for ensemble_member in ensemble_members:
weight = float(ensemble_member[1]) / self.ensemble_size
weights[ensemble_member[0]] = weight
if np.sum(weights) < 1:
weights = weights / np.sum(weights)
self.weights_ = weights
def _sorted_initialization(self, predictions, labels, n_best):
perf = np.zeros([predictions.shape[0]])
for idx, prediction in enumerate(predictions):
perf[idx] = calculate_score(labels, prediction, self.task_type,
self.metric, predictions.shape[1])
indices = np.argsort(perf)[perf.shape[0] - n_best:]
return indices
def _bagging(self, predictions, labels, fraction=0.5, n_bags=20):
"""Rich Caruana's ensemble selection method with bagging."""
raise ValueError('Bagging might not work with class-based interface!')
n_models = predictions.shape[0]
bag_size = int(n_models * fraction)
order_of_each_bag = []
for j in range(n_bags):
# Bagging a set of models
indices = sorted(random.sample(range(0, n_models), bag_size))
bag = predictions[indices, :, :]
order, _ = self._fit(bag, labels)
order_of_each_bag.append(order)
return np.array(order_of_each_bag)
def predict(self, predictions):
for i, weight in enumerate(self.weights_):
predictions[i] *= weight
return np.sum(predictions, axis=0)
def __str__(self):
return 'Ensemble Selection:\n\tTrajectory: %s\n\tMembers: %s' \
'\n\tWeights: %s\n\tIdentifiers: %s' % \
(' '.join(['%d: %5f' % (idx, performance)
for idx, performance in enumerate(self.trajectory_)]),
self.indices_, self.weights_,
' '.join([str(identifier) for idx, identifier in
enumerate(self.identifiers_)
if self.weights_[idx] > 0]))
def get_models_with_weights(self, models):
output = []
for i, weight in enumerate(self.weights_):
identifier = self.identifiers_[i]
model = models[identifier]
if weight > 0.0:
output.append((weight, model))
output.sort(reverse=True, key=lambda t: t[0])
return output
def get_model_identifiers(self):
return self.identifiers_
def get_validation_performance(self):
return self.trajectory_[-1]
FIX #438 minimize ensemble loss for consistency
from collections import Counter
import random
import numpy as np
from autosklearn.constants import *
from autosklearn.ensembles.abstract_ensemble import AbstractEnsemble
from autosklearn.metrics import calculate_score
from autosklearn.metrics import Scorer
class EnsembleSelection(AbstractEnsemble):
def __init__(self, ensemble_size, task_type, metric,
sorted_initialization=False, bagging=False, mode='fast'):
self.ensemble_size = ensemble_size
self.task_type = task_type
self.metric = metric
self.sorted_initialization = sorted_initialization
self.bagging = bagging
self.mode = mode
def fit(self, predictions, labels, identifiers):
self.ensemble_size = int(self.ensemble_size)
if self.ensemble_size < 1:
raise ValueError('Ensemble size cannot be less than one!')
if not self.task_type in TASK_TYPES:
raise ValueError('Unknown task type %s.' % self.task_type)
if not isinstance(self.metric, Scorer):
raise ValueError('Metric must be of type scorer')
if self.mode not in ('fast', 'slow'):
raise ValueError('Unknown mode %s' % self.mode)
if self.bagging:
self._bagging(predictions, labels)
else:
self._fit(predictions, labels)
self._calculate_weights()
self.identifiers_ = identifiers
return self
def _fit(self, predictions, labels):
if self.mode == 'fast':
self._fast(predictions, labels)
else:
self._slow(predictions, labels)
return self
def _fast(self, predictions, labels):
"""Fast version of Rich Caruana's ensemble selection method."""
self.num_input_models_ = len(predictions)
ensemble = []
trajectory = []
order = []
ensemble_size = self.ensemble_size
if self.sorted_initialization:
n_best = 20
indices = self._sorted_initialization(predictions, labels, n_best)
for idx in indices:
ensemble.append(predictions[idx])
order.append(idx)
ensemble_ = np.array(ensemble).mean(axis=0)
ensemble_performance = calculate_score(
labels, ensemble_, self.task_type, self.metric,
ensemble_.shape[1])
trajectory.append(ensemble_performance)
ensemble_size -= n_best
for i in range(ensemble_size):
scores = np.zeros((len(predictions)))
s = len(ensemble)
if s == 0:
weighted_ensemble_prediction = np.zeros(predictions[0].shape)
else:
ensemble_prediction = np.mean(np.array(ensemble), axis=0)
weighted_ensemble_prediction = (s / float(s + 1)) * \
ensemble_prediction
fant_ensemble_prediction = np.zeros(weighted_ensemble_prediction.shape)
for j, pred in enumerate(predictions):
# TODO: this could potentially be vectorized! - let's profile
# the script first!
fant_ensemble_prediction[:,:] = weighted_ensemble_prediction + \
(1. / float(s + 1)) * pred
scores[j] = 1 - calculate_score(
solution=labels,
prediction=fant_ensemble_prediction,
task_type=self.task_type,
metric=self.metric,
all_scoring_functions=False)
all_best = np.argwhere(scores == np.nanmin(scores)).flatten()
best = np.random.choice(all_best)
ensemble.append(predictions[best])
trajectory.append(scores[best])
order.append(best)
# Handle special case
if len(predictions) == 1:
break
self.indices_ = order
self.trajectory_ = trajectory
self.train_score_ = trajectory[-1]
def _slow(self, predictions, labels):
"""Rich Caruana's ensemble selection method."""
self.num_input_models_ = len(predictions)
ensemble = []
trajectory = []
order = []
ensemble_size = self.ensemble_size
if self.sorted_initialization:
n_best = 20
indices = self._sorted_initialization(predictions, labels, n_best)
for idx in indices:
ensemble.append(predictions[idx])
order.append(idx)
ensemble_ = np.array(ensemble).mean(axis=0)
ensemble_performance = calculate_score(
solution=labels,
prediction=ensemble_,
task_type=self.task_type,
metric=self.metric,
all_scoring_functions=False)
trajectory.append(ensemble_performance)
ensemble_size -= n_best
for i in range(ensemble_size):
scores = np.zeros([predictions.shape[0]])
for j, pred in enumerate(predictions):
ensemble.append(pred)
ensemble_prediction = np.mean(np.array(ensemble), axis=0)
scores[j] = 1 - calculate_score(
solution=labels,
prediction=ensemble_prediction,
task_type=self.task_type,
metric=self.metric,
all_scoring_functions=False)
ensemble.pop()
best = np.nanargmin(scores)
ensemble.append(predictions[best])
trajectory.append(scores[best])
order.append(best)
# Handle special case
if len(predictions) == 1:
break
self.indices_ = np.array(order)
self.trajectory_ = np.array(trajectory)
self.train_score_ = trajectory[-1]
def _calculate_weights(self):
ensemble_members = Counter(self.indices_).most_common()
weights = np.zeros((self.num_input_models_,), dtype=float)
for ensemble_member in ensemble_members:
weight = float(ensemble_member[1]) / self.ensemble_size
weights[ensemble_member[0]] = weight
if np.sum(weights) < 1:
weights = weights / np.sum(weights)
self.weights_ = weights
def _sorted_initialization(self, predictions, labels, n_best):
perf = np.zeros([predictions.shape[0]])
for idx, prediction in enumerate(predictions):
perf[idx] = calculate_score(labels, prediction, self.task_type,
self.metric, predictions.shape[1])
indices = np.argsort(perf)[perf.shape[0] - n_best:]
return indices
def _bagging(self, predictions, labels, fraction=0.5, n_bags=20):
"""Rich Caruana's ensemble selection method with bagging."""
raise ValueError('Bagging might not work with class-based interface!')
n_models = predictions.shape[0]
bag_size = int(n_models * fraction)
order_of_each_bag = []
for j in range(n_bags):
# Bagging a set of models
indices = sorted(random.sample(range(0, n_models), bag_size))
bag = predictions[indices, :, :]
order, _ = self._fit(bag, labels)
order_of_each_bag.append(order)
return np.array(order_of_each_bag)
def predict(self, predictions):
for i, weight in enumerate(self.weights_):
predictions[i] *= weight
return np.sum(predictions, axis=0)
def __str__(self):
return 'Ensemble Selection:\n\tTrajectory: %s\n\tMembers: %s' \
'\n\tWeights: %s\n\tIdentifiers: %s' % \
(' '.join(['%d: %5f' % (idx, performance)
for idx, performance in enumerate(self.trajectory_)]),
self.indices_, self.weights_,
' '.join([str(identifier) for idx, identifier in
enumerate(self.identifiers_)
if self.weights_[idx] > 0]))
def get_models_with_weights(self, models):
output = []
for i, weight in enumerate(self.weights_):
identifier = self.identifiers_[i]
model = models[identifier]
if weight > 0.0:
output.append((weight, model))
output.sort(reverse=True, key=lambda t: t[0])
return output
def get_model_identifiers(self):
return self.identifiers_
def get_validation_performance(self):
return self.trajectory_[-1] |
import anyjson
import urllib
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.utils.translation import ugettext_lazy as _
from oauth2client.contrib.django_orm import Storage
from rest_framework.parsers import FormParser
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework.views import APIView
from lily.contacts.models import Contact
from lily.deals.models import Deal
from lily.utils.functions import send_get_request
from lily.utils.api.permissions import IsAccountAdmin, IsFeatureAvailable
from .serializers import DocumentSerializer
from ..credentials import get_access_token, get_credentials, put_credentials, LilyOAuthCredentials
from ..models import Document, IntegrationCredentials, IntegrationDetails, IntegrationType
from ..tasks import import_moneybird_contacts
class DocumentDetails(APIView):
serializer = DocumentSerializer
parser_classes = (JSONParser, FormParser)
def get(self, request, document_id, format=None):
"""
Get details of the given document.
"""
document = {}
try:
document = Document.objects.get(document_id=self.kwargs['document_id'])
except Document.DoesNotExist:
pass
else:
deal = Deal.objects.get(pk=document.deal_id)
document = DocumentSerializer(document).data
document.update({
'deal': {
'id': deal.id,
'status': deal.status_id,
'modified': deal.modified,
},
'assigned_to': {
'id': deal.assigned_to.id,
'full_name': deal.assigned_to.full_name,
} if deal.assigned_to else None,
})
return Response({'document': document})
class PandaDocList(APIView):
permission_classes = (IsFeatureAvailable)
serializer = DocumentSerializer
parser_classes = (JSONParser, FormParser)
def get(self, request, contact_id, format=None):
"""
List all PandaDoc documents.
"""
documents = Document.objects.filter(contact=self.kwargs['contact_id'])
temp_documents = []
credentials = get_credentials('pandadoc')
for document in documents:
url = 'https://api.pandadoc.com/public/v1/documents/%s/details' % document.document_id
response = send_get_request(url, credentials)
data = response.json()
if data.get('id'):
temp_documents.append(data)
else:
# No details could be retreived, so it's probably been deleted in PandaDoc.
document.delete()
return Response({'documents': temp_documents})
def post(self, request, contact_id):
contact = Contact.objects.get(pk=contact_id)
deal = Deal.objects.get(pk=request.POST.get('deal_id'))
document_id = request.POST.get('document_id')
document = Document.objects.create(contact=contact, deal=deal, document_id=document_id)
document = DocumentSerializer(document).data
return Response({'document': document})
class MoneybirdContactImport(APIView):
permission_classes = (IsAccountAdmin, IsFeatureAvailable)
def post(self, request):
credentials = get_credentials('moneybird')
if not credentials:
errors = {
'no_credentials': [_('No Moneybird credentials found')]
}
return HttpResponseBadRequest(anyjson.serialize(errors), content_type='application/json')
credentials.integration_context.update({
'auto_sync': self.request.data.get('auto_sync'),
})
put_credentials('moneybird', credentials)
import_moneybird_contacts.apply_async(args=(self.request.user.tenant.id,))
return Response({'import_started': True})
class EstimatesList(APIView):
def get(self, request, contact_id, format=None):
"""
List all Moneybird estimates.
"""
credentials = get_credentials('moneybird')
url = 'https://moneybird.com/api/v2/%s/estimates' % credentials.integration_context.get('administration_id')
response = send_get_request(url, credentials)
data = response.json()
return Response({'estimates': data})
class IntegrationAuth(APIView):
parser_classes = (JSONParser, FormParser)
permission_classes = (IsAccountAdmin, IsFeatureAvailable)
def post(self, request, integration_type):
"""
Get the authentication URL for the given integration type.
"""
client_id = request.POST.get('client_id')
client_secret = request.POST.get('client_secret')
integration_context = request.POST.get('integration_context')
if integration_context:
integration_context = anyjson.loads(integration_context)
errors = {}
if not client_id:
errors.update({
'client_id': ['Please enter a valid client ID'],
})
if not client_secret:
errors.update({
'client_secret': ['Please enter a valid client secret'],
})
if errors:
return HttpResponseBadRequest(anyjson.serialize(errors), content_type='application/json')
integration_type = IntegrationType.objects.get(name__iexact=integration_type)
redirect_uri = request.build_absolute_uri()
params = {
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
'scope': integration_type.scope,
'response_type': 'code',
}
details, created = IntegrationDetails.objects.get_or_create(type=integration_type)
storage = Storage(IntegrationCredentials, 'details', details, 'credentials')
credentials = LilyOAuthCredentials(
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
integration_context=integration_context,
)
storage.put(credentials)
auth_url = integration_type.auth_url + urllib.urlencode(params)
response = anyjson.serialize({'url': auth_url})
return HttpResponse(response, content_type='application/json')
def get(self, request, integration_type, format=None):
"""
Exchange a authorization code for an access token for the given integration type.
"""
code = str(request.GET.get('code'))
error = request.GET.get('error')
if error:
messages.error(
self.request._request, # add_message needs an HttpRequest object
_('Sorry, Please authorize Lily to use the integration.')
)
return HttpResponseRedirect('/#/preferences/admin/integrations/%s' % integration_type)
credentials = get_credentials(integration_type)
if not credentials:
response = anyjson.serialize({'error': 'No credentials found. Please enter your credentials again'})
return HttpResponse(response, content_type='application/json')
get_access_token(credentials, integration_type, code)
messages.success(
self.request._request, # add_message needs an HttpRequest object
_('Your credentials have been saved.')
)
return HttpResponseRedirect('/#/preferences/admin/integrations')
Fixed permissions not being iterable
import anyjson
import urllib
from django.contrib import messages
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseBadRequest
from django.utils.translation import ugettext_lazy as _
from oauth2client.contrib.django_orm import Storage
from rest_framework.parsers import FormParser
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework.views import APIView
from lily.contacts.models import Contact
from lily.deals.models import Deal
from lily.utils.functions import send_get_request
from lily.utils.api.permissions import IsAccountAdmin, IsFeatureAvailable
from .serializers import DocumentSerializer
from ..credentials import get_access_token, get_credentials, put_credentials, LilyOAuthCredentials
from ..models import Document, IntegrationCredentials, IntegrationDetails, IntegrationType
from ..tasks import import_moneybird_contacts
class DocumentDetails(APIView):
serializer = DocumentSerializer
parser_classes = (JSONParser, FormParser)
def get(self, request, document_id, format=None):
"""
Get details of the given document.
"""
document = {}
try:
document = Document.objects.get(document_id=self.kwargs['document_id'])
except Document.DoesNotExist:
pass
else:
deal = Deal.objects.get(pk=document.deal_id)
document = DocumentSerializer(document).data
document.update({
'deal': {
'id': deal.id,
'status': deal.status_id,
'modified': deal.modified,
},
'assigned_to': {
'id': deal.assigned_to.id,
'full_name': deal.assigned_to.full_name,
} if deal.assigned_to else None,
})
return Response({'document': document})
class PandaDocList(APIView):
permission_classes = (IsFeatureAvailable, )
serializer = DocumentSerializer
parser_classes = (JSONParser, FormParser)
def get(self, request, contact_id, format=None):
"""
List all PandaDoc documents.
"""
documents = Document.objects.filter(contact=self.kwargs['contact_id'])
temp_documents = []
credentials = get_credentials('pandadoc')
for document in documents:
url = 'https://api.pandadoc.com/public/v1/documents/%s/details' % document.document_id
response = send_get_request(url, credentials)
data = response.json()
if data.get('id'):
temp_documents.append(data)
else:
# No details could be retreived, so it's probably been deleted in PandaDoc.
document.delete()
return Response({'documents': temp_documents})
def post(self, request, contact_id):
contact = Contact.objects.get(pk=contact_id)
deal = Deal.objects.get(pk=request.POST.get('deal_id'))
document_id = request.POST.get('document_id')
document = Document.objects.create(contact=contact, deal=deal, document_id=document_id)
document = DocumentSerializer(document).data
return Response({'document': document})
class MoneybirdContactImport(APIView):
permission_classes = (IsAccountAdmin, IsFeatureAvailable)
def post(self, request):
credentials = get_credentials('moneybird')
if not credentials:
errors = {
'no_credentials': [_('No Moneybird credentials found')]
}
return HttpResponseBadRequest(anyjson.serialize(errors), content_type='application/json')
credentials.integration_context.update({
'auto_sync': self.request.data.get('auto_sync'),
})
put_credentials('moneybird', credentials)
import_moneybird_contacts.apply_async(args=(self.request.user.tenant.id,))
return Response({'import_started': True})
class EstimatesList(APIView):
def get(self, request, contact_id, format=None):
"""
List all Moneybird estimates.
"""
credentials = get_credentials('moneybird')
url = 'https://moneybird.com/api/v2/%s/estimates' % credentials.integration_context.get('administration_id')
response = send_get_request(url, credentials)
data = response.json()
return Response({'estimates': data})
class IntegrationAuth(APIView):
parser_classes = (JSONParser, FormParser)
permission_classes = (IsAccountAdmin, IsFeatureAvailable)
def post(self, request, integration_type):
"""
Get the authentication URL for the given integration type.
"""
client_id = request.POST.get('client_id')
client_secret = request.POST.get('client_secret')
integration_context = request.POST.get('integration_context')
if integration_context:
integration_context = anyjson.loads(integration_context)
errors = {}
if not client_id:
errors.update({
'client_id': ['Please enter a valid client ID'],
})
if not client_secret:
errors.update({
'client_secret': ['Please enter a valid client secret'],
})
if errors:
return HttpResponseBadRequest(anyjson.serialize(errors), content_type='application/json')
integration_type = IntegrationType.objects.get(name__iexact=integration_type)
redirect_uri = request.build_absolute_uri()
params = {
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
'scope': integration_type.scope,
'response_type': 'code',
}
details, created = IntegrationDetails.objects.get_or_create(type=integration_type)
storage = Storage(IntegrationCredentials, 'details', details, 'credentials')
credentials = LilyOAuthCredentials(
client_id=client_id,
client_secret=client_secret,
redirect_uri=redirect_uri,
integration_context=integration_context,
)
storage.put(credentials)
auth_url = integration_type.auth_url + urllib.urlencode(params)
response = anyjson.serialize({'url': auth_url})
return HttpResponse(response, content_type='application/json')
def get(self, request, integration_type, format=None):
"""
Exchange a authorization code for an access token for the given integration type.
"""
code = str(request.GET.get('code'))
error = request.GET.get('error')
if error:
messages.error(
self.request._request, # add_message needs an HttpRequest object
_('Sorry, Please authorize Lily to use the integration.')
)
return HttpResponseRedirect('/#/preferences/admin/integrations/%s' % integration_type)
credentials = get_credentials(integration_type)
if not credentials:
response = anyjson.serialize({'error': 'No credentials found. Please enter your credentials again'})
return HttpResponse(response, content_type='application/json')
get_access_token(credentials, integration_type, code)
messages.success(
self.request._request, # add_message needs an HttpRequest object
_('Your credentials have been saved.')
)
return HttpResponseRedirect('/#/preferences/admin/integrations')
|
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import mock
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from django_dynamic_fixture import get
from textclassifier.validators import ClassifierValidator
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.constants import (
PRIVATE,
PROTECTED,
PUBLIC,
REPO_TYPE_GIT,
REPO_TYPE_HG,
)
from readthedocs.projects.exceptions import ProjectSpamError
from readthedocs.projects.forms import (
ProjectAdvancedForm,
ProjectBasicsForm,
ProjectExtraForm,
TranslationForm,
UpdateProjectForm,
)
from readthedocs.projects.models import Project
class TestProjectForms(TestCase):
@mock.patch.object(ClassifierValidator, '__call__')
def test_form_spam(self, mocked_validator):
"""Form description field fails spam validation."""
mocked_validator.side_effect = ProjectSpamError
data = {
'description': 'foo',
'documentation_type': 'sphinx',
'language': 'en',
}
form = ProjectExtraForm(data)
with self.assertRaises(ProjectSpamError):
form.is_valid()
def test_import_repo_url(self):
"""Validate different type of repository URLs on importing a Project."""
common_urls = [
# Invalid
('./path/to/relative/folder', False),
('../../path/to/relative/folder', False),
('../../path/to/@/folder', False),
('/path/to/local/folder', False),
('/path/to/@/folder', False),
('file:///path/to/local/folder', False),
('file:///path/to/@/folder', False),
('github.com/humitos/foo', False),
('https://github.com/|/foo', False),
('git://github.com/&&/foo', False),
# Valid
('git://github.com/humitos/foo', True),
('http://github.com/humitos/foo', True),
('https://github.com/humitos/foo', True),
('http://gitlab.com/humitos/foo', True),
('http://bitbucket.com/humitos/foo', True),
('ftp://ftpserver.com/humitos/foo', True),
('ftps://ftpserver.com/humitos/foo', True),
('lp:zaraza', True),
]
public_urls = [
('git@github.com:humitos/foo', False),
('ssh://git@github.com/humitos/foo', False),
('ssh+git://github.com/humitos/foo', False),
('strangeuser@bitbucket.org:strangeuser/readthedocs.git', False),
('user@one-ssh.domain.com:22/_ssh/docs', False),
] + common_urls
private_urls = [
('git@github.com:humitos/foo', True),
('ssh://git@github.com/humitos/foo', True),
('ssh+git://github.com/humitos/foo', True),
('strangeuser@bitbucket.org:strangeuser/readthedocs.git', True),
('user@one-ssh.domain.com:22/_ssh/docs', True),
] + common_urls
with override_settings(ALLOW_PRIVATE_REPOS=False):
for url, valid in public_urls:
initial = {
'name': 'foo',
'repo_type': 'git',
'repo': url,
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
with override_settings(ALLOW_PRIVATE_REPOS=True):
for url, valid in private_urls:
initial = {
'name': 'foo',
'repo_type': 'git',
'repo': url,
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
def test_empty_slug(self):
initial = {
'name': "''",
'repo_type': 'git',
'repo': 'https://github.com/user/repository',
}
form = ProjectBasicsForm(initial)
self.assertFalse(form.is_valid())
self.assertIn('name', form.errors)
def test_changing_vcs_should_change_latest(self):
"""When changing the project's VCS, latest should be changed too."""
project = get(Project, repo_type=REPO_TYPE_HG, default_branch=None)
latest = project.versions.get(slug=LATEST)
self.assertEqual(latest.identifier, 'default')
form = ProjectBasicsForm(
{
'repo': 'http://github.com/test/test',
'name': 'name',
'repo_type': REPO_TYPE_GIT,
},
instance=project,
)
self.assertTrue(form.is_valid())
form.save()
latest.refresh_from_db()
self.assertEqual(latest.identifier, 'master')
def test_changing_vcs_should_not_change_latest_is_not_none(self):
"""
When changing the project's VCS,
we should respect the custom default branch.
"""
project = get(Project, repo_type=REPO_TYPE_HG, default_branch='custom')
latest = project.versions.get(slug=LATEST)
self.assertEqual(latest.identifier, 'custom')
form = ProjectBasicsForm(
{
'repo': 'http://github.com/test/test',
'name': 'name',
'repo_type': REPO_TYPE_GIT,
},
instance=project,
)
self.assertTrue(form.is_valid())
form.save()
latest.refresh_from_db()
self.assertEqual(latest.identifier, 'custom')
class TestProjectAdvancedForm(TestCase):
def setUp(self):
self.project = get(Project)
get(
Version,
project=self.project,
slug='public-1',
active=True,
privacy_level=PUBLIC,
)
get(
Version,
project=self.project,
slug='public-2',
active=True,
privacy_level=PUBLIC,
)
get(
Version,
project=self.project,
slug='public-3',
active=False,
privacy_level=PROTECTED,
)
get(
Version,
project=self.project,
slug='private',
active=True,
privacy_level=PRIVATE,
)
get(
Version,
project=self.project,
slug='protected',
active=True,
privacy_level=PROTECTED,
)
def test_list_only_active_versions_on_default_version(self):
form = ProjectAdvancedForm(instance=self.project)
# This version is created automatically by the project on save
self.assertTrue(self.project.versions.filter(slug=LATEST).exists())
self.assertEqual(
set(
slug
for slug, _ in form.fields['default_version'].widget.choices
),
{'latest', 'public-1', 'public-2', 'private', 'protected'},
)
def test_list_all_versions_on_default_branch(self):
form = ProjectAdvancedForm(instance=self.project)
# This version is created automatically by the project on save
self.assertTrue(self.project.versions.filter(slug=LATEST).exists())
self.assertEqual(
set(
slug
for slug, _ in form.fields['default_branch'].widget.choices
),
{
None, 'latest', 'public-1', 'public-2',
'public-3', 'protected', 'private'
},
)
class TestTranslationForms(TestCase):
def setUp(self):
self.user_a = get(User)
self.project_a_es = self.get_project(lang='es', users=[self.user_a])
self.project_b_en = self.get_project(lang='en', users=[self.user_a])
self.project_c_br = self.get_project(lang='br', users=[self.user_a])
self.project_d_ar = self.get_project(lang='ar', users=[self.user_a])
self.project_e_en = self.get_project(lang='en', users=[self.user_a])
self.user_b = get(User)
self.project_f_ar = self.get_project(lang='ar', users=[self.user_b])
self.project_g_ga = self.get_project(lang='ga', users=[self.user_b])
self.project_s_fr = self.get_project(
lang='fr',
users=[self.user_b, self.user_a]
)
def get_project(self, lang, users, **kwargs):
return get(
Project, language=lang, users=users,
main_language_project=None, **kwargs
)
def test_list_only_owner_projects(self):
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_b_en,
self.project_c_br,
self.project_d_ar,
self.project_e_en,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects}
)
form = TranslationForm(
{'project': self.project_g_ga.slug},
parent=self.project_f_ar,
user=self.user_b,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_g_ga,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects}
)
def test_excludes_existing_translations(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_d_ar.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_d_ar,
self.project_e_en,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects}
)
def test_user_cant_add_other_user_project(self):
form = TranslationForm(
{'project': self.project_f_ar.slug},
parent=self.project_b_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Select a valid choice',
''.join(form.errors['project'])
)
self.assertNotIn(
self.project_f_ar,
[proj_slug for proj_slug, _ in form.fields['project'].choices]
)
def test_user_cant_add_project_with_same_lang(self):
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_e_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Both projects can not have the same language (English).',
''.join(form.errors['project'])
)
def test_user_cant_add_project_with_same_lang_of_other_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_e_en.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'This project already has a translation for English.',
''.join(form.errors['project'])
)
def test_no_nesting_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_c_br,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Select a valid choice',
''.join(form.errors['project'])
)
def test_no_nesting_translation_case_2(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_a_es.slug},
parent=self.project_c_br,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'A project with existing translations can not',
''.join(form.errors['project'])
)
def test_not_already_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_c_br.slug},
parent=self.project_b_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'is already a translation',
''.join(form.errors['project'])
)
def test_cant_change_language_to_translation_lang(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
# Parent project tries to change lang
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'en',
},
instance=self.project_a_es
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "en" translation',
''.join(form.errors['language'])
)
# Translation tries to change lang
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'es',
},
instance=self.project_b_en
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "es" translation',
''.join(form.errors['language'])
)
# Translation tries to change lang
# to the same as its sibling
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'br',
},
instance=self.project_b_en
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "br" translation',
''.join(form.errors['language'])
)
def test_can_change_language_to_self_lang(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
# Parent project tries to change lang
form = UpdateProjectForm(
{
'repo': 'https://github.com/test/test',
'repo_type': self.project_a_es.repo_type,
'name': self.project_a_es.name,
'documentation_type': 'sphinx',
'language': 'es',
},
instance=self.project_a_es
)
self.assertTrue(form.is_valid())
# Translation tries to change lang
form = UpdateProjectForm(
{
'repo': 'https://github.com/test/test',
'repo_type': self.project_b_en.repo_type,
'name': self.project_b_en.name,
'documentation_type': 'sphinx',
'language': 'en',
},
instance=self.project_b_en
)
self.assertTrue(form.is_valid())
Add tests
# -*- coding: utf-8 -*-
from __future__ import (
absolute_import,
division,
print_function,
unicode_literals,
)
import mock
from django.contrib.auth.models import User
from django.test import TestCase
from django.test.utils import override_settings
from django_dynamic_fixture import get
from textclassifier.validators import ClassifierValidator
from django.core.exceptions import ValidationError
from readthedocs.builds.constants import LATEST
from readthedocs.builds.models import Version
from readthedocs.projects.constants import (
PRIVATE,
PROTECTED,
PUBLIC,
REPO_TYPE_GIT,
REPO_TYPE_HG,
)
from readthedocs.projects.exceptions import ProjectSpamError
from readthedocs.projects.forms import (
ProjectAdvancedForm,
ProjectBasicsForm,
ProjectExtraForm,
TranslationForm,
UpdateProjectForm,
WebHookForm,
EmailHookForm
)
from readthedocs.projects.models import Project
class TestProjectForms(TestCase):
@mock.patch.object(ClassifierValidator, '__call__')
def test_form_spam(self, mocked_validator):
"""Form description field fails spam validation."""
mocked_validator.side_effect = ProjectSpamError
data = {
'description': 'foo',
'documentation_type': 'sphinx',
'language': 'en',
}
form = ProjectExtraForm(data)
with self.assertRaises(ProjectSpamError):
form.is_valid()
def test_import_repo_url(self):
"""Validate different type of repository URLs on importing a Project."""
common_urls = [
# Invalid
('./path/to/relative/folder', False),
('../../path/to/relative/folder', False),
('../../path/to/@/folder', False),
('/path/to/local/folder', False),
('/path/to/@/folder', False),
('file:///path/to/local/folder', False),
('file:///path/to/@/folder', False),
('github.com/humitos/foo', False),
('https://github.com/|/foo', False),
('git://github.com/&&/foo', False),
# Valid
('git://github.com/humitos/foo', True),
('http://github.com/humitos/foo', True),
('https://github.com/humitos/foo', True),
('http://gitlab.com/humitos/foo', True),
('http://bitbucket.com/humitos/foo', True),
('ftp://ftpserver.com/humitos/foo', True),
('ftps://ftpserver.com/humitos/foo', True),
('lp:zaraza', True),
]
public_urls = [
('git@github.com:humitos/foo', False),
('ssh://git@github.com/humitos/foo', False),
('ssh+git://github.com/humitos/foo', False),
('strangeuser@bitbucket.org:strangeuser/readthedocs.git', False),
('user@one-ssh.domain.com:22/_ssh/docs', False),
] + common_urls
private_urls = [
('git@github.com:humitos/foo', True),
('ssh://git@github.com/humitos/foo', True),
('ssh+git://github.com/humitos/foo', True),
('strangeuser@bitbucket.org:strangeuser/readthedocs.git', True),
('user@one-ssh.domain.com:22/_ssh/docs', True),
] + common_urls
with override_settings(ALLOW_PRIVATE_REPOS=False):
for url, valid in public_urls:
initial = {
'name': 'foo',
'repo_type': 'git',
'repo': url,
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
with override_settings(ALLOW_PRIVATE_REPOS=True):
for url, valid in private_urls:
initial = {
'name': 'foo',
'repo_type': 'git',
'repo': url,
}
form = ProjectBasicsForm(initial)
self.assertEqual(form.is_valid(), valid, msg=url)
def test_empty_slug(self):
initial = {
'name': "''",
'repo_type': 'git',
'repo': 'https://github.com/user/repository',
}
form = ProjectBasicsForm(initial)
self.assertFalse(form.is_valid())
self.assertIn('name', form.errors)
def test_changing_vcs_should_change_latest(self):
"""When changing the project's VCS, latest should be changed too."""
project = get(Project, repo_type=REPO_TYPE_HG, default_branch=None)
latest = project.versions.get(slug=LATEST)
self.assertEqual(latest.identifier, 'default')
form = ProjectBasicsForm(
{
'repo': 'http://github.com/test/test',
'name': 'name',
'repo_type': REPO_TYPE_GIT,
},
instance=project,
)
self.assertTrue(form.is_valid())
form.save()
latest.refresh_from_db()
self.assertEqual(latest.identifier, 'master')
def test_changing_vcs_should_not_change_latest_is_not_none(self):
"""
When changing the project's VCS,
we should respect the custom default branch.
"""
project = get(Project, repo_type=REPO_TYPE_HG, default_branch='custom')
latest = project.versions.get(slug=LATEST)
self.assertEqual(latest.identifier, 'custom')
form = ProjectBasicsForm(
{
'repo': 'http://github.com/test/test',
'name': 'name',
'repo_type': REPO_TYPE_GIT,
},
instance=project,
)
self.assertTrue(form.is_valid())
form.save()
latest.refresh_from_db()
self.assertEqual(latest.identifier, 'custom')
class TestProjectAdvancedForm(TestCase):
def setUp(self):
self.project = get(Project)
get(
Version,
project=self.project,
slug='public-1',
active=True,
privacy_level=PUBLIC,
)
get(
Version,
project=self.project,
slug='public-2',
active=True,
privacy_level=PUBLIC,
)
get(
Version,
project=self.project,
slug='public-3',
active=False,
privacy_level=PROTECTED,
)
get(
Version,
project=self.project,
slug='private',
active=True,
privacy_level=PRIVATE,
)
get(
Version,
project=self.project,
slug='protected',
active=True,
privacy_level=PROTECTED,
)
def test_list_only_active_versions_on_default_version(self):
form = ProjectAdvancedForm(instance=self.project)
# This version is created automatically by the project on save
self.assertTrue(self.project.versions.filter(slug=LATEST).exists())
self.assertEqual(
set(
slug
for slug, _ in form.fields['default_version'].widget.choices
),
{'latest', 'public-1', 'public-2', 'private', 'protected'},
)
def test_list_all_versions_on_default_branch(self):
form = ProjectAdvancedForm(instance=self.project)
# This version is created automatically by the project on save
self.assertTrue(self.project.versions.filter(slug=LATEST).exists())
self.assertEqual(
set(
slug
for slug, _ in form.fields['default_branch'].widget.choices
),
{
None, 'latest', 'public-1', 'public-2',
'public-3', 'protected', 'private'
},
)
class TestTranslationForms(TestCase):
def setUp(self):
self.user_a = get(User)
self.project_a_es = self.get_project(lang='es', users=[self.user_a])
self.project_b_en = self.get_project(lang='en', users=[self.user_a])
self.project_c_br = self.get_project(lang='br', users=[self.user_a])
self.project_d_ar = self.get_project(lang='ar', users=[self.user_a])
self.project_e_en = self.get_project(lang='en', users=[self.user_a])
self.user_b = get(User)
self.project_f_ar = self.get_project(lang='ar', users=[self.user_b])
self.project_g_ga = self.get_project(lang='ga', users=[self.user_b])
self.project_s_fr = self.get_project(
lang='fr',
users=[self.user_b, self.user_a]
)
def get_project(self, lang, users, **kwargs):
return get(
Project, language=lang, users=users,
main_language_project=None, **kwargs
)
def test_list_only_owner_projects(self):
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_b_en,
self.project_c_br,
self.project_d_ar,
self.project_e_en,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects}
)
form = TranslationForm(
{'project': self.project_g_ga.slug},
parent=self.project_f_ar,
user=self.user_b,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_g_ga,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects}
)
def test_excludes_existing_translations(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_d_ar.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertTrue(form.is_valid())
expected_projects = [
self.project_d_ar,
self.project_e_en,
self.project_s_fr,
]
self.assertEqual(
{proj_slug for proj_slug, _ in form.fields['project'].choices},
{project.slug for project in expected_projects}
)
def test_user_cant_add_other_user_project(self):
form = TranslationForm(
{'project': self.project_f_ar.slug},
parent=self.project_b_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Select a valid choice',
''.join(form.errors['project'])
)
self.assertNotIn(
self.project_f_ar,
[proj_slug for proj_slug, _ in form.fields['project'].choices]
)
def test_user_cant_add_project_with_same_lang(self):
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_e_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Both projects can not have the same language (English).',
''.join(form.errors['project'])
)
def test_user_cant_add_project_with_same_lang_of_other_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_e_en.slug},
parent=self.project_a_es,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'This project already has a translation for English.',
''.join(form.errors['project'])
)
def test_no_nesting_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_b_en.slug},
parent=self.project_c_br,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'Select a valid choice',
''.join(form.errors['project'])
)
def test_no_nesting_translation_case_2(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_a_es.slug},
parent=self.project_c_br,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'A project with existing translations can not',
''.join(form.errors['project'])
)
def test_not_already_translation(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.save()
form = TranslationForm(
{'project': self.project_c_br.slug},
parent=self.project_b_en,
user=self.user_a,
)
self.assertFalse(form.is_valid())
self.assertIn(
'is already a translation',
''.join(form.errors['project'])
)
def test_cant_change_language_to_translation_lang(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
# Parent project tries to change lang
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'en',
},
instance=self.project_a_es
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "en" translation',
''.join(form.errors['language'])
)
# Translation tries to change lang
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'es',
},
instance=self.project_b_en
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "es" translation',
''.join(form.errors['language'])
)
# Translation tries to change lang
# to the same as its sibling
form = UpdateProjectForm(
{
'documentation_type': 'sphinx',
'language': 'br',
},
instance=self.project_b_en
)
self.assertFalse(form.is_valid())
self.assertIn(
'There is already a "br" translation',
''.join(form.errors['language'])
)
def test_can_change_language_to_self_lang(self):
self.project_a_es.translations.add(self.project_b_en)
self.project_a_es.translations.add(self.project_c_br)
self.project_a_es.save()
# Parent project tries to change lang
form = UpdateProjectForm(
{
'repo': 'https://github.com/test/test',
'repo_type': self.project_a_es.repo_type,
'name': self.project_a_es.name,
'documentation_type': 'sphinx',
'language': 'es',
},
instance=self.project_a_es
)
self.assertTrue(form.is_valid())
# Translation tries to change lang
form = UpdateProjectForm(
{
'repo': 'https://github.com/test/test',
'repo_type': self.project_b_en.repo_type,
'name': self.project_b_en.name,
'documentation_type': 'sphinx',
'language': 'en',
},
instance=self.project_b_en
)
self.assertTrue(form.is_valid())
class TestNotificationForm(TestCase):
def setUp(self):
self.project = get(Project)
def test_webhookform(self):
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
'url': 'http://www.example.com/'
}
form = WebHookForm(data=data, project=self.project)
self.assertTrue(form.is_valid())
_ = form.save()
self.assertEqual(self.project.webhook_notifications.all().count(), 1)
def test_wrong_inputs_in_webhookform(self):
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
'url': ''
}
form = WebHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {'url': ['This field is required.']})
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
data = {
'url': 'wrong-url'
}
form = WebHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {'url': ['Enter a valid URL.']})
self.assertEqual(self.project.webhook_notifications.all().count(), 0)
def test_emailhookform(self):
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
data = {
'email': 'test@email.com'
}
form = EmailHookForm(data=data, project=self.project)
self.assertTrue(form.is_valid())
_ = form.save()
self.assertEqual(self.project.emailhook_notifications.all().count(), 1)
def test_wrong_inputs_in_emailhookform(self):
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
data = {
'email': 'wrong_email@'
}
form = EmailHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {'email': ['Enter a valid email address.']})
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
data = {
'email': ''
}
form = EmailHookForm(data=data, project=self.project)
self.assertFalse(form.is_valid())
self.assertDictEqual(form.errors, {'email': ['This field is required.']})
self.assertEqual(self.project.emailhook_notifications.all().count(), 0)
|
# Imports
import time
import cv2
import dlib
import playsound
import imutils
from imutils import face_utils
from imutils.video import VideoStream
import numpy as np
from scipy.spatial import distance
from threading import Thread
# Assignments
drowsy_frame_count = 0
wake_up = False
# Thresholds setter
def set_thresholds(eye_aspect_ratio, frame_threshold):
global drowsy_ear_threshold, drowsy_frame_threshold
drowsy_ear_threshold = eye_aspect_ratio
drowsy_frame_threshold = frame_threshold
# Alert driver on being drowsy
def alert_driver():
print "\tDrowsiness detected, waking the driver up.."
while wake_up:
playsound.playsound("audio/alert.mp3")
print "\tDriver is now awake again!"
# Update driver status on screen
def update_driver_status(current_frame, status):
cv2.putText(current_frame, status , (60, 130), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (153, 76, 0), )
# Compute eye's Eye Aspect Ratio (EAR)
def compute_ear(driver_eye):
# Calculate the eye's vertical lines euclidean distance - Landmark points:(2,6) and (3,5)
vertical_distance_1 = distance.euclidean(driver_eye[1], driver_eye[5])
vertical_distance_2 = distance.euclidean(driver_eye[2], driver_eye[4])
# Calculate the eye's horizontal line euclidean distance - Landmark points: (1,4)
horizontal_distance = distance.euclidean(driver_eye[0], driver_eye[3])
# Eye Aspect ratio computation
eye_aspect_ratio = (vertical_distance_1+vertical_distance_2)/(2.0*horizontal_distance)
return eye_aspect_ratio
# Create frontal face detector
def create_detector():
global detector
detector = dlib.get_frontal_face_detector()
print "=> Frontal face detector has been created successfully.\n"
# Load facial shape predictor
def load_predictor(predictor_path=None):
global predictor
# Default predictor loaded unless parameter passed
if predictor_path is None:
predictor_path = "predictors/face.dat"
try:
predictor = dlib.shape_predictor(predictor_path)
except:
print "=> Facial shape predictor creation failed"
exit()
print "=> Facial shape predictor has been loaded successfully.\n"
# Visualize eye
def visualize_eye(frame,left_eye,right_eye):
left_eye_border = cv2.convexHull(left_eye)
right_eye_border = cv2.convexHull(right_eye)
cv2.drawContours(frame, [left_eye_border], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [right_eye_border], -1, (0, 255, 0), 1)
# Fetch eyes landmarks
def fetch_eye_landmarks():
global left_eye_start, left_eye_end, right_eye_start, right_eye_end
(left_eye_start, left_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] # Left eye indexes
(right_eye_start, right_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] # Right eye indexes
print "=> Eye landmarking indexes haveindexes been loaded successfully.\n\
Left eye start index = {}\n\tLeft eye end index = {}\n\tRight eye start index = {}\n\tRight eye end index = {}\n"\
.format(left_eye_start, left_eye_end, right_eye_start, right_eye_end)
# Dynamically compute drowsiness parameters | Optimization
def compute_drowsy_ear(spectacles=True):
# To be implemented
if spectacles: # Automated spectacles detection to be implemented
drowsy_ear = 0.20
else:
drowsy_ear = 0.25
return drowsy_ear
# Start webcam monitoring
def start_monitoring():
print "=> Real-time monitoring has started..\n"
video_stream = VideoStream(0).start()
time.sleep(1.0)
global drowsy_frame_count, drowsy_ear_threshold, drowsy_frame_threshold, wake_up
detected_ear = 0
# Monitor indefinitely
while True:
# Pre-process frame
frame = imutils.resize(video_stream.read(), width=600)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect face(s) found in frame
detected_facial_areas = detector(gray_frame, 0)
cv2.putText(frame, "Driver status: ", (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (153, 76, 0), 2)
# Detection starts here
for facial_area in detected_facial_areas:
# Predict facial features in the facial area
facial_shape = face_utils.shape_to_np(predictor(gray_frame, facial_area))
# Slice the eyes and compute their EAR (eye aspect ratio) values
detected_left_eye = facial_shape[left_eye_start:left_eye_end]
detected_right_eye = facial_shape[right_eye_start:right_eye_end]
left_eye_ear = compute_ear(detected_left_eye)
right_eye_ear = compute_ear(detected_right_eye)
# Compute the average EAR of the detected eyes
detected_ear = (left_eye_ear + right_eye_ear)/2.0
# Visualize the detected features
visualize_eye(frame, detected_left_eye,detected_right_eye)
# Check for drowsiness
if detected_ear < drowsy_ear_threshold: # Drowsy
# Count drowsy frames
drowsy_frame_count += 1
# Check if threshold exceeded
if drowsy_frame_count >= drowsy_frame_threshold:
if not wake_up:
wake_up = True
alarm_thread = Thread(target=alert_driver)
alarm_thread.deamon = True
alarm_thread.start()
# Display drowsiness alert
update_driver_status(frame, "Drowsy")
else: # Not drowsy
drowsy_frame_count = 0
wake_up = False
update_driver_status(frame, "Awake")
# Display EAR value (for tuning purposes)
cv2.putText(frame, "EAR value : {:.4f}".format(detected_ear), (25, 170), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (25, 111, 130), 2)
# Display output
cv2.imshow("Drowsiness Detector", frame)
# Exiting using ESC
pressed_key = cv2.waitKey(1) & 0xFF
if pressed_key == 27:
break
# Exit
cv2.destroyAllWindows()
video_stream.stop()
# Run
if __name__ == "__main__":
drowsy_ear = compute_drowsy_ear(spectacles=True) # Normal EAR relative to the driver's posture, eye shape and eye spectacles detection
set_thresholds(eye_aspect_ratio = drowsy_ear, frame_threshold = 25) # Greater EAR or lesser frame threshold = Greater sensitivity
create_detector() # Detector
load_predictor(predictor_path="predictors/face.dat") # Predictor
fetch_eye_landmarks() # Eye landmarks
start_monitoring() # Start monimotring
Update thresholds
# Imports
import time
import cv2
import dlib
import playsound
import imutils
from imutils import face_utils
from imutils.video import VideoStream
import numpy as np
from scipy.spatial import distance
from threading import Thread
# Assignments
drowsy_frame_count = 0
wake_up = False
# Thresholds setter
def set_thresholds(eye_aspect_ratio, frame_threshold):
global drowsy_ear_threshold, drowsy_frame_threshold
drowsy_ear_threshold = eye_aspect_ratio
drowsy_frame_threshold = frame_threshold
# Alert driver on being drowsy
def alert_driver():
print "\tDrowsiness detected, waking the driver up.."
while wake_up:
playsound.playsound("audio/alert.mp3")
print "\tDriver is now awake again!"
# Update driver status on screen
def update_driver_status(current_frame, status):
cv2.putText(current_frame, status , (60, 130), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (153, 76, 0), )
# Compute eye's Eye Aspect Ratio (EAR)
def compute_ear(driver_eye):
# Calculate the eye's vertical lines euclidean distance - Landmark points:(2,6) and (3,5)
vertical_distance_1 = distance.euclidean(driver_eye[1], driver_eye[5])
vertical_distance_2 = distance.euclidean(driver_eye[2], driver_eye[4])
# Calculate the eye's horizontal line euclidean distance - Landmark points: (1,4)
horizontal_distance = distance.euclidean(driver_eye[0], driver_eye[3])
# Eye Aspect ratio computation
eye_aspect_ratio = (vertical_distance_1+vertical_distance_2)/(2.0*horizontal_distance)
return eye_aspect_ratio
# Create frontal face detector
def create_detector():
global detector
detector = dlib.get_frontal_face_detector()
print "=> Frontal face detector has been created successfully.\n"
# Load facial shape predictor
def load_predictor(predictor_path=None):
global predictor
# Default predictor loaded unless parameter passed
if predictor_path is None:
predictor_path = "predictors/face.dat"
try:
predictor = dlib.shape_predictor(predictor_path)
except:
print "=> Facial shape predictor creation failed"
exit()
print "=> Facial shape predictor has been loaded successfully.\n"
# Visualize eye
def visualize_eye(frame,left_eye,right_eye):
left_eye_border = cv2.convexHull(left_eye)
right_eye_border = cv2.convexHull(right_eye)
cv2.drawContours(frame, [left_eye_border], -1, (0, 255, 0), 1)
cv2.drawContours(frame, [right_eye_border], -1, (0, 255, 0), 1)
# Fetch eyes landmarks
def fetch_eye_landmarks():
global left_eye_start, left_eye_end, right_eye_start, right_eye_end
(left_eye_start, left_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS["left_eye"] # Left eye indexes
(right_eye_start, right_eye_end) = face_utils.FACIAL_LANDMARKS_IDXS["right_eye"] # Right eye indexes
print "=> Eye landmarking indexes haveindexes been loaded successfully.\n\
Left eye start index = {}\n\tLeft eye end index = {}\n\tRight eye start index = {}\n\tRight eye end index = {}\n"\
.format(left_eye_start, left_eye_end, right_eye_start, right_eye_end)
# Dynamically compute drowsiness parameters | Optimization
def compute_drowsy_ear(spectacles=True):
# To be implemented
if spectacles: # Automated spectacles detection to be implemented
drowsy_ear = 0.22
else:
drowsy_ear = 0.28
return drowsy_ear
# Start webcam monitoring
def start_monitoring():
print "=> Real-time monitoring has started..\n"
video_stream = VideoStream(0).start()
time.sleep(1.0)
global drowsy_frame_count, drowsy_ear_threshold, drowsy_frame_threshold, wake_up
detected_ear = 0
# Monitor indefinitely
while True:
# Pre-process frame
frame = imutils.resize(video_stream.read(), width=600)
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect face(s) found in frame
detected_facial_areas = detector(gray_frame, 0)
cv2.putText(frame, "Driver status: ", (20, 100), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (153, 76, 0), 2)
# Detection starts here
for facial_area in detected_facial_areas:
# Predict facial features in the facial area
facial_shape = face_utils.shape_to_np(predictor(gray_frame, facial_area))
# Slice the eyes and compute their EAR (eye aspect ratio) values
detected_left_eye = facial_shape[left_eye_start:left_eye_end]
detected_right_eye = facial_shape[right_eye_start:right_eye_end]
left_eye_ear = compute_ear(detected_left_eye)
right_eye_ear = compute_ear(detected_right_eye)
# Compute the average EAR of the detected eyes
detected_ear = (left_eye_ear + right_eye_ear)/2.0
# Visualize the detected features
visualize_eye(frame, detected_left_eye,detected_right_eye)
# Check for drowsiness
if detected_ear < drowsy_ear_threshold: # Drowsy
# Count drowsy frames
drowsy_frame_count += 1
# Check if threshold exceeded
if drowsy_frame_count >= drowsy_frame_threshold:
if not wake_up:
wake_up = True
alarm_thread = Thread(target=alert_driver)
alarm_thread.deamon = True
alarm_thread.start()
# Display drowsiness alert
update_driver_status(frame, "Drowsy")
else: # Not drowsy
drowsy_frame_count = 0
wake_up = False
update_driver_status(frame, "Awake")
# Display EAR value (for tuning purposes)
cv2.putText(frame, "EAR value : {:.4f}".format(detected_ear), (25, 170), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (25, 111, 130), 2)
# Display output
cv2.imshow("Drowsiness Detector", frame)
# Exiting using ESC
pressed_key = cv2.waitKey(1) & 0xFF
if pressed_key == 27:
break
# Exit
cv2.destroyAllWindows()
video_stream.stop()
# Run
if __name__ == "__main__":
drowsy_ear = compute_drowsy_ear(spectacles=True) # Normal EAR relative to the driver's posture, eye shape and eye spectacles detection
set_thresholds(eye_aspect_ratio = drowsy_ear, frame_threshold = 20) # Greater EAR or lesser frame threshold = Greater sensitivity
create_detector() # Detector
load_predictor(predictor_path="predictors/face.dat") # Predictor
fetch_eye_landmarks() # Eye landmarks
start_monitoring() # Start monimotring |
from collections import namedtuple
from math import sqrt
import numpy as np
from scipy._lib._util import _validate_int
from scipy.optimize import brentq
from scipy.special import ndtri
from ._discrete_distns import binom
ConfidenceInterval = namedtuple('ConfidenceInterval', ['low', 'high'])
class BinomTestResult:
"""
Result of `scipy.stats.binomtest`.
Attributes
----------
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
"""
def __init__(self, k, n, alternative, pvalue, proportion_estimate):
self.k = k
self.n = n
self.alternative = alternative
self.proportion_estimate = proportion_estimate
self.pvalue = pvalue
def __repr__(self):
s = ("BinomTestResult("
f"k={self.k}, "
f"n={self.n}, "
f"alternative={self.alternative!r}, "
f"proportion_estimate={self.proportion_estimate}, "
f"pvalue={self.pvalue})")
return s
def proportion_ci(self, confidence_level=0.95, method='exact'):
"""
Compute the confidence interval for the estimated proportion.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is 0.95.
method : {'exact', 'wilson', 'wilsoncc'}, optional
Selects the method used to compute the confidence interval
for the estimate of the proportion:
'exact' :
Use the Clopper-Pearson exact method [1]_.
'wilson' :
Wilson's method, without continuity correction ([2]_, [3]_).
'wilsoncc' :
Wilson's method, with continuity correction ([2]_, [3]_).
Default is ``'exact'``.
Returns
-------
ci : namedtuple with fields ``low`` and ``high``
Contains the lower and upper bounds of the confidence interval.
References
----------
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
fiducial limits illustrated in the case of the binomial,
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
.. [2] E. B. Wilson, Probable inference, the law of succession, and
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
(1927).
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
single proportion: comparison of seven methods, Statistics
in Medicine, 17, pp 857-872 (1998).
Examples
--------
>>> from scipy.stats import binomtest
>>> result = binomtest(k=7, n=50, p=0.1)
>>> result.proportion_estimate
0.14
>>> result.proportion_ci()
ConfidenceInterval(low=0.05819170033997341, high=0.2673960024970084)
"""
if method not in ('exact', 'wilson', 'wilsoncc'):
raise ValueError("method must be one of 'exact', 'wilson' or "
"'wilsoncc'.")
if not (0 <= confidence_level <= 1):
raise ValueError('confidence_level must be in the interval '
'[0, 1].')
if method == 'exact':
low, high = _binom_exact_conf_int(self.k, self.n,
confidence_level,
self.alternative)
else:
# method is 'wilson' or 'wilsoncc'
low, high = _binom_wilson_conf_int(self.k, self.n,
confidence_level,
self.alternative,
correction=method == 'wilsoncc')
return ConfidenceInterval(low=low, high=high)
def _binom_exact_conf_int(k, n, confidence_level, alternative):
"""
Compute the estimate and confidence interval for the binomial test.
Returns proportion, prop_low, prop_high
"""
if alternative == 'two-sided':
alpha = (1 - confidence_level) / 2
if k == 0:
plow = 0.0
else:
plow = brentq(lambda p: binom.sf(k-1, n, p) - alpha, 0, 1)
if k == n:
phigh = 1.0
else:
phigh = brentq(lambda p: binom.cdf(k, n, p) - alpha, 0, 1)
elif alternative == 'less':
alpha = 1 - confidence_level
plow = 0.0
if k == n:
phigh = 1.0
else:
phigh = brentq(lambda p: binom.cdf(k, n, p) - alpha, 0, 1)
elif alternative == 'greater':
alpha = 1 - confidence_level
if k == 0:
plow = 0.0
else:
plow = brentq(lambda p: binom.sf(k-1, n, p) - alpha, 0, 1)
phigh = 1.0
return plow, phigh
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
# This function assumes that the arguments have already been validated.
# In particular, `alternative` must be one of 'two-sided', 'less' or
# 'greater'.
p = k / n
if alternative == 'two-sided':
z = ndtri(0.5 + 0.5*confidence_level)
else:
z = ndtri(confidence_level)
t = 1 + z**2/n
r = (p + z**2/(2*n)) / t
if correction:
if alternative == 'less' or k == 0:
lo = 0.0
else:
dlo = ((z * sqrt(z**2 - 1/n + 4*n*p*(1 - p) + (4*p - 2)) + 1) /
(2*n*t))
lo = r - dlo
if alternative == 'greater' or k == n:
hi = 1.0
else:
dhi = ((z * sqrt(z**2 - 1/n + 4*n*p*(1 - p) - (4*p - 2)) + 1) /
(2*n*t))
hi = r + dhi
else:
d = z/t * sqrt(p*(1-p)/n + (z/(2*n))**2)
if alternative == 'less' or k == 0:
lo = 0.0
else:
lo = r - d
if alternative == 'greater' or k == n:
hi = 1.0
else:
hi = r + d
return lo, hi
def binomtest(k, n, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
The binomial test [1]_ is a test of the null hypothesis that the
probability of success in a Bernoulli experiment is `p`.
Details of the test can be found in many texts on statistics, such
as section 24.5 of [2]_.
Parameters
----------
k : int
The number of successes.
n : int
The number of trials.
p : float, optional
The hypothesized probability of success. ``0 <= p <= 1``. The
default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
result : `BinomTestResult` instance
The return value is an object with the following attributes:
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
The object has the following methods:
proportion_ci(confidence_level=0.95, method='exact') :
Compute the confidence interval for ``proportion_estimate``.
References
----------
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
Examples
--------
>>> from scipy.stats import binomtest
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
>>> result.pvalue
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
The estimated proportion is simply ``3/15``:
>>> result.proportion_estimate
0.2
We can use the `proportion_ci()` method of the result to compute the
confidence interval of the estimate:
>>> result.proportion_ci(confidence_level=0.95)
ConfidenceInterval(low=0.056846867590246826, high=1.0)
"""
k = _validate_int(k, 'k', minimum=0)
n = _validate_int(n, 'n', minimum=1)
if k > n:
raise ValueError('k must not be greater than n.')
if not (0 <= p <= 1):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized; \n"
"must be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = binom.cdf(k, n, p)
elif alternative == 'greater':
pval = binom.sf(k-1, n, p)
else:
# alternative is 'two-sided'
d = binom.pmf(k, n, p)
rerr = 1 + 1e-7
if k == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif k < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
pval = min(1.0, pval)
result = BinomTestResult(k=k, n=n, alternative=alternative,
proportion_estimate=k/n, pvalue=pval)
return result
Also refer to the 'proportion of successes' in the description of p.
from collections import namedtuple
from math import sqrt
import numpy as np
from scipy._lib._util import _validate_int
from scipy.optimize import brentq
from scipy.special import ndtri
from ._discrete_distns import binom
ConfidenceInterval = namedtuple('ConfidenceInterval', ['low', 'high'])
class BinomTestResult:
"""
Result of `scipy.stats.binomtest`.
Attributes
----------
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
"""
def __init__(self, k, n, alternative, pvalue, proportion_estimate):
self.k = k
self.n = n
self.alternative = alternative
self.proportion_estimate = proportion_estimate
self.pvalue = pvalue
def __repr__(self):
s = ("BinomTestResult("
f"k={self.k}, "
f"n={self.n}, "
f"alternative={self.alternative!r}, "
f"proportion_estimate={self.proportion_estimate}, "
f"pvalue={self.pvalue})")
return s
def proportion_ci(self, confidence_level=0.95, method='exact'):
"""
Compute the confidence interval for the estimated proportion.
Parameters
----------
confidence_level : float, optional
Confidence level for the computed confidence interval
of the estimated proportion. Default is 0.95.
method : {'exact', 'wilson', 'wilsoncc'}, optional
Selects the method used to compute the confidence interval
for the estimate of the proportion:
'exact' :
Use the Clopper-Pearson exact method [1]_.
'wilson' :
Wilson's method, without continuity correction ([2]_, [3]_).
'wilsoncc' :
Wilson's method, with continuity correction ([2]_, [3]_).
Default is ``'exact'``.
Returns
-------
ci : namedtuple with fields ``low`` and ``high``
Contains the lower and upper bounds of the confidence interval.
References
----------
.. [1] C. J. Clopper and E. S. Pearson, The use of confidence or
fiducial limits illustrated in the case of the binomial,
Biometrika, Vol. 26, No. 4, pp 404-413 (Dec. 1934).
.. [2] E. B. Wilson, Probable inference, the law of succession, and
statistical inference, J. Amer. Stat. Assoc., 22, pp 209-212
(1927).
.. [3] Robert G. Newcombe, Two-sided confidence intervals for the
single proportion: comparison of seven methods, Statistics
in Medicine, 17, pp 857-872 (1998).
Examples
--------
>>> from scipy.stats import binomtest
>>> result = binomtest(k=7, n=50, p=0.1)
>>> result.proportion_estimate
0.14
>>> result.proportion_ci()
ConfidenceInterval(low=0.05819170033997341, high=0.2673960024970084)
"""
if method not in ('exact', 'wilson', 'wilsoncc'):
raise ValueError("method must be one of 'exact', 'wilson' or "
"'wilsoncc'.")
if not (0 <= confidence_level <= 1):
raise ValueError('confidence_level must be in the interval '
'[0, 1].')
if method == 'exact':
low, high = _binom_exact_conf_int(self.k, self.n,
confidence_level,
self.alternative)
else:
# method is 'wilson' or 'wilsoncc'
low, high = _binom_wilson_conf_int(self.k, self.n,
confidence_level,
self.alternative,
correction=method == 'wilsoncc')
return ConfidenceInterval(low=low, high=high)
def _binom_exact_conf_int(k, n, confidence_level, alternative):
"""
Compute the estimate and confidence interval for the binomial test.
Returns proportion, prop_low, prop_high
"""
if alternative == 'two-sided':
alpha = (1 - confidence_level) / 2
if k == 0:
plow = 0.0
else:
plow = brentq(lambda p: binom.sf(k-1, n, p) - alpha, 0, 1)
if k == n:
phigh = 1.0
else:
phigh = brentq(lambda p: binom.cdf(k, n, p) - alpha, 0, 1)
elif alternative == 'less':
alpha = 1 - confidence_level
plow = 0.0
if k == n:
phigh = 1.0
else:
phigh = brentq(lambda p: binom.cdf(k, n, p) - alpha, 0, 1)
elif alternative == 'greater':
alpha = 1 - confidence_level
if k == 0:
plow = 0.0
else:
plow = brentq(lambda p: binom.sf(k-1, n, p) - alpha, 0, 1)
phigh = 1.0
return plow, phigh
def _binom_wilson_conf_int(k, n, confidence_level, alternative, correction):
# This function assumes that the arguments have already been validated.
# In particular, `alternative` must be one of 'two-sided', 'less' or
# 'greater'.
p = k / n
if alternative == 'two-sided':
z = ndtri(0.5 + 0.5*confidence_level)
else:
z = ndtri(confidence_level)
t = 1 + z**2/n
r = (p + z**2/(2*n)) / t
if correction:
if alternative == 'less' or k == 0:
lo = 0.0
else:
dlo = ((z * sqrt(z**2 - 1/n + 4*n*p*(1 - p) + (4*p - 2)) + 1) /
(2*n*t))
lo = r - dlo
if alternative == 'greater' or k == n:
hi = 1.0
else:
dhi = ((z * sqrt(z**2 - 1/n + 4*n*p*(1 - p) - (4*p - 2)) + 1) /
(2*n*t))
hi = r + dhi
else:
d = z/t * sqrt(p*(1-p)/n + (z/(2*n))**2)
if alternative == 'less' or k == 0:
lo = 0.0
else:
lo = r - d
if alternative == 'greater' or k == n:
hi = 1.0
else:
hi = r + d
return lo, hi
def binomtest(k, n, p=0.5, alternative='two-sided'):
"""
Perform a test that the probability of success is p.
The binomial test [1]_ is a test of the null hypothesis that the
probability of success in a Bernoulli experiment is `p`.
Details of the test can be found in many texts on statistics, such
as section 24.5 of [2]_.
Parameters
----------
k : int
The number of successes.
n : int
The number of trials.
p : float, optional
The hypothesized probability of success, i.e. the expected
proportion of successes. The value must be in the interval
``0 <= p <= 1``. The default value is ``p = 0.5``.
alternative : {'two-sided', 'greater', 'less'}, optional
Indicates the alternative hypothesis. The default value is
'two-sided'.
Returns
-------
result : `BinomTestResult` instance
The return value is an object with the following attributes:
k : int
The number of successes (copied from `binomtest` input).
n : int
The number of trials (copied from `binomtest` input).
alternative : str
Indicates the alternative hypothesis specified in the input
to `binomtest`. It will be one of ``'two-sided'``, ``'greater'``,
or ``'less'``.
pvalue : float
The p-value of the hypothesis test.
proportion_estimate : float
The estimate of the proportion of successes.
The object has the following methods:
proportion_ci(confidence_level=0.95, method='exact') :
Compute the confidence interval for ``proportion_estimate``.
References
----------
.. [1] Binomial test, https://en.wikipedia.org/wiki/Binomial_test
.. [2] Jerrold H. Zar, Biostatistical Analysis (fifth edition),
Prentice Hall, Upper Saddle River, New Jersey USA (2010)
Examples
--------
>>> from scipy.stats import binomtest
A car manufacturer claims that no more than 10% of their cars are unsafe.
15 cars are inspected for safety, 3 were found to be unsafe. Test the
manufacturer's claim:
>>> result = binomtest(3, n=15, p=0.1, alternative='greater')
>>> result.pvalue
0.18406106910639114
The null hypothesis cannot be rejected at the 5% level of significance
because the returned p-value is greater than the critical value of 5%.
The estimated proportion is simply ``3/15``:
>>> result.proportion_estimate
0.2
We can use the `proportion_ci()` method of the result to compute the
confidence interval of the estimate:
>>> result.proportion_ci(confidence_level=0.95)
ConfidenceInterval(low=0.056846867590246826, high=1.0)
"""
k = _validate_int(k, 'k', minimum=0)
n = _validate_int(n, 'n', minimum=1)
if k > n:
raise ValueError('k must not be greater than n.')
if not (0 <= p <= 1):
raise ValueError("p must be in range [0,1]")
if alternative not in ('two-sided', 'less', 'greater'):
raise ValueError("alternative not recognized; \n"
"must be 'two-sided', 'less' or 'greater'")
if alternative == 'less':
pval = binom.cdf(k, n, p)
elif alternative == 'greater':
pval = binom.sf(k-1, n, p)
else:
# alternative is 'two-sided'
d = binom.pmf(k, n, p)
rerr = 1 + 1e-7
if k == p * n:
# special case as shortcut, would also be handled by `else` below
pval = 1.
elif k < p * n:
i = np.arange(np.ceil(p * n), n+1)
y = np.sum(binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = binom.cdf(k, n, p) + binom.sf(n - y, n, p)
else:
i = np.arange(np.floor(p*n) + 1)
y = np.sum(binom.pmf(i, n, p) <= d*rerr, axis=0)
pval = binom.cdf(y-1, n, p) + binom.sf(k-1, n, p)
pval = min(1.0, pval)
result = BinomTestResult(k=k, n=n, alternative=alternative,
proportion_estimate=k/n, pvalue=pval)
return result
|
from contentbase import (
AuditFailure,
audit_checker,
)
from .conditions import (
rfa,
)
import datetime
current_statuses = ['released', 'in progress']
not_current_statuses = ['revoked', 'obsolete', 'deleted']
raw_data_formats = [
'fastq',
'csfasta',
'csqual',
'rcc',
'idat',
'CEL',
]
paired_end_assays = [
'RNA-PET',
'ChIA-PET',
'DNA-PET',
]
broadPeaksTargets = [
'H3K4me1-mouse',
'H3K36me3-mouse',
'H3K79me2-mouse',
'H3K27me3-mouse',
'H3K9me1-mouse',
'H3K9me3-mouse',
'H3K4me1-human',
'H3K36me3-human',
'H3K79me2-human',
'H3K27me3-human',
'H3K9me1-human',
'H3K9me3-human',
'H3F3A-human',
'H4K20me1-human',
'H3K79me3-human',
'H3K79me3-mouse',
]
@audit_checker('file', frame=['replicate', 'replicate.experiment',
'derived_from', 'derived_from.replicate',
'derived_from.replicate.experiment'])
def audit_file_biological_replicate_number_match(value, system):
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if 'replicate' not in value:
return
if 'derived_from' not in value or len(value['derived_from']) == 0:
return
bio_rep_number = value['replicate']['biological_replicate_number']
tech_rep_number = value['replicate']['technical_replicate_number']
file_replicate = (bio_rep_number, tech_rep_number)
file_exp_accession = value['replicate']['experiment']['accession']
derived_from_files = value['derived_from']
for derived_from_file in derived_from_files:
if 'replicate' in derived_from_file:
# excluding control files from different experiments
if derived_from_file['replicate']['experiment']['accession'] != file_exp_accession:
continue
derived_bio_rep_num = derived_from_file['replicate']['biological_replicate_number']
derived_tech_rep_num = derived_from_file['replicate']['technical_replicate_number']
derived_replicate = (derived_bio_rep_num, derived_tech_rep_num)
if file_replicate != derived_replicate:
detail = 'Biological replicate number of the file {} '.format(value['@id']) + \
'is {}'.format(file_replicate) + \
', it is inconsistent with the biological replicate number ' +\
'{} of the file {} it was derived from'.format(derived_replicate,
derived_from_file['@id'])
raise AuditFailure('inconsistent biological replicate number',
detail, level='ERROR')
@audit_checker('file', frame=['replicate', 'dataset', 'replicate.experiment'])
def audit_file_replicate_match(value, system):
'''
A file's replicate should belong to the same experiment that the file
does. These tend to get confused when replacing objects.
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if 'replicate' not in value:
return
rep_exp = value['replicate']['experiment']['uuid']
file_exp = value['dataset']['uuid']
if rep_exp != file_exp:
detail = 'File {} has a replicate {} in experiment {}'.format(
value['@id'],
value['replicate']['@id'],
value['replicate']['experiment']['@id'])
raise AuditFailure('mismatched replicate', detail, level='ERROR')
@audit_checker('file', frame='object', condition=rfa('ENCODE3', 'modERN', 'ENCODE2', 'ENCODE2-Mouse'))
def audit_file_platform(value, system):
'''
A raw data file should have a platform specified.
Should be in the schema.
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] not in raw_data_formats:
return
if 'platform' not in value:
detail = 'Raw data file {} missing platform information'.format(value['@id'])
raise AuditFailure('missing platform', detail, level='NOT_COMPLIANT')
@audit_checker('file', frame='object', condition=rfa('ENCODE3', 'modERN',
'ENCODE2', 'ENCODE2-Mouse'))
def audit_file_read_length(value, system):
'''
Reads files should have a read_length
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['output_type'] != 'reads':
return
if 'read_length' not in value:
detail = 'Reads file {} missing read_length'.format(value['@id'])
yield AuditFailure('missing read_length', detail, level='DCC_ACTION')
return
creation_date = value['date_created'][:10].split('-')
year = int(creation_date[0])
month = int(creation_date[1])
day = int(creation_date[2])
file_date_creation = datetime.date(year, month, day)
threshold_date = datetime.date(2015, 6, 30)
read_length = value['read_length']
if read_length < 50:
detail = 'Fastq file {} '.format(value['@id']) + \
'has read length of {}bp.'.format(read_length) + \
' It is not compliant with ENCODE3 standards.' + \
'According to ENCODE3 standards files submitted after 2015-6-30 ' + \
'should be at least 50bp long.'
if file_date_creation < threshold_date:
yield AuditFailure('insufficient read length', detail, level='WARNING')
return
else:
yield AuditFailure('insufficient read length', detail, level='NOT_COMPLIANT')
return
@audit_checker('file',
frame=['dataset', 'dataset.target', 'controlled_by',
'controlled_by.dataset'],
condition=rfa('ENCODE2', 'ENCODE2-Mouse', 'ENCODE3', 'modERN'))
def audit_file_controlled_by(value, system):
'''
A fastq in a ChIP-seq experiment should have a controlled_by
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['dataset'].get('assay_term_name') not in ['ChIP-seq', 'RAMPAGE', 'CAGE', 'shRNA knockdown followed by RNA-seq']:
return
if 'target' in value['dataset'] and 'control' in value['dataset']['target'].get('investigated_as', []):
return
if 'controlled_by' not in value:
value['controlled_by'] = []
if (value['controlled_by'] == []) and (value['file_format'] in ['fastq']):
detail = 'Fastq file {} from {} requires controlled_by'.format(
value['@id'],
value['dataset']['assay_term_name']
)
yield AuditFailure('missing controlled_by', detail, level='NOT_COMPLIANT')
return
possible_controls = value['dataset'].get('possible_controls')
biosample = value['dataset'].get('biosample_term_id')
run_type = value.get('run_type', None)
read_length = value.get('read_length', None)
if value['controlled_by']:
for ff in value['controlled_by']:
control_bs = ff['dataset'].get('biosample_term_id')
control_run = ff.get('run_type', None)
control_length = ff.get('read_length', None)
if control_bs != biosample:
detail = 'File {} has a controlled_by file {} with conflicting biosample {}'.format(
value['@id'],
ff['@id'],
control_bs)
yield AuditFailure('mismatched controlled_by', detail, level='ERROR')
return
if ff['file_format'] != value['file_format']:
detail = 'File {} with file_format {} has a controlled_by file {} with file_format {}'.format(
value['@id'],
value['file_format'],
ff['@id'],
ff['file_format']
)
yield AuditFailure('mismatched controlled_by', detail, level='ERROR')
return
if (possible_controls is None) or (ff['dataset']['@id'] not in possible_controls):
detail = 'File {} has a controlled_by file {} with a dataset {} that is not in possible_controls'.format(
value['@id'],
ff['@id'],
ff['dataset']['@id']
)
yield AuditFailure('mismatched controlled_by', detail, level='ERROR')
return
if (run_type is None) or (control_run is None):
continue
if (read_length is None) or (control_length is None):
continue
if run_type != control_run:
detail = 'File {} is {} but its control file {} is {}'.format(
value['@id'],
run_type,
ff['@id'],
control_run
)
yield AuditFailure('mismatched controlled_by run_type', detail, level='WARNING')
if read_length != control_length:
detail = 'File {} is {} but its control file {} is {}'.format(
value['@id'],
value['read_length'],
ff['@id'],
ff['read_length']
)
yield AuditFailure('mismatched controlled_by read length', detail, level='WARNING')
return
@audit_checker('file', frame='object', condition=rfa('modERN', 'GGR'))
def audit_file_flowcells(value, system):
'''
A fastq file could have its flowcell details.
Don't bother to check anything but ENCODE3
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] not in ['fastq']:
return
if 'flowcell_details' not in value or (value['flowcell_details'] == []):
detail = 'Fastq file {} is missing flowcell_details'.format(value['@id'])
raise AuditFailure('missing flowcell_details', detail, level='WARNING')
@audit_checker('file', frame='object',)
def audit_run_type(value, system):
'''
A fastq file or a fasta file need to specify run_type.
This was attempted to be a dependancy and didn't happen.
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] not in ['fastq']:
return
if 'run_type' not in value:
detail = 'File {} has file_format {}. It requires a value for run_type'.format(
value['@id'],
value['file_format'])
raise AuditFailure('missing run_type', detail, level='NOT_COMPLIANT')
@audit_checker('file', frame=['paired_with'],)
def audit_paired_with(value, system):
'''
A file with a paired_end needs a paired_with.
Should be handled in the schema.
A paired_with should be the same replicate
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if 'paired_end' not in value:
return
if 'paired_with' not in value:
detail = 'File {} has paired_end = {}. It requires a paired file'.format(
value['@id'],
value['paired_end'])
raise AuditFailure('missing paired_with', detail, level='NOT_COMPLIANT')
if 'replicate' not in value['paired_with']:
return
if 'replicate' not in value:
detail = 'File {} has paired_end = {}. It requires a replicate'.format(
value['@id'],
value['paired_end'])
raise AuditFailure('missing replicate', detail, level='DCC_ACTION')
if value['replicate'] != value['paired_with']['replicate']:
detail = 'File {} has replicate {}. It is paired_with file {} with replicate {}'.format(
value['@id'],
value.get('replicate'),
value['paired_with']['@id'],
value['paired_with'].get('replicate'))
raise AuditFailure('mismatched paired_with', detail, level='ERROR')
if value['paired_end'] == '1':
context = system['context']
paired_with = context.get_rev_links('paired_with')
if len(paired_with) > 1:
detail = 'Paired end 1 file {} paired_with by multiple paired end 2 files: {!r}'.format(
value['@id'],
paired_with,
)
raise AuditFailure('multiple paired_with', detail, level='ERROR')
@audit_checker('file', frame=['step_run',
'dataset'], condition=rfa('modERN'))
def audit_modERN_ChIP_pipeline_steps(value, system):
expt = value['dataset']
if 'Experiment' not in expt['@type']:
return
if expt['assay_term_id'] != 'OBI:0000716':
return
if value['file_format'] == 'fastq':
return
if 'step_run' not in value:
detail = 'File {} is missing a step_run'.format(value['@id'])
yield AuditFailure('missing step_run', detail, level='WARNING')
return
if (value['file_format'] != 'fastq') and ('derived_from' not in value):
detail = 'File {} is missing its derived_from'.format(value['@id'])
yield AuditFailure('missing derived_from', detail, level='WARNING')
step = value['step_run']
if (value['file_format'] == 'bam') and step['aliases'][0] != 'modern:chip-seq-bwa-alignment-step-run-v-1-virtual':
detail = 'Bam {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run ChIP-seq bam', detail, level='WARNING')
if (value['output_type'] == 'normalized signal of all reads'):
if not ((step['aliases'][0] != 'modern:chip-seq-unique-read-signal-generation-step-run-v-1-virtual') or (step['aliases'][0] != 'modern:chip-seq-replicate-pooled-unique-read-signal-generation-step-run-v-1-virtual')):
detail = 'Normalized signal of all reads {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for unique signal', detail, level='WARNING')
if (value['output_type']) == 'read-depth normalized signal':
if not ((step['aliases'][0] != 'modern:chip-seq-read-depth-normalized-signal-generation-step-run-v-1-virtual') or (step['aliases'][0] != 'modern:chip-seq-replicate-pooled-read-depth-normalized-signal-generation-step-run-v-1-virtual')):
detail = 'Read depth normalized signal {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for depth signal', detail, level='WARNING')
if (value['output_type']) == 'control normalized signal':
if not ((step['aliases'][0] != 'modern:chip-seq-control-normalized-signal-generation-step-run-v-1-virtual') or (step['aliases'][0] != 'modern:chip-seq-replicate-pooled-control-normalized-signal-generation-step-run-v-1-virtual')):
detail = 'Control normalized signal {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for control signal', detail, level='WARNING')
if (value['file_format'] == 'bigBed'):
if not ((step['aliases'][0] != 'modern:chip-seq-peaks-to-bigbed-step-run-v-1-virtual') or (step['aliases'][0] != 'modern:chip-seq-optimal-idr-thresholded-peaks-to-bigbed-step-run-v-1-virtual')):
detail = 'bigBed {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for bigBed peaks', detail, level='WARNING')
if (value['output_type'] == 'peaks') and (value['file_format'] == 'bed'):
if (value['file_format_type'] == 'narrowPeak') and (step['aliases'][0] != 'modern:chip-seq-spp-peak-calling-step-run-v-1-virtual'):
detail = 'Peaks {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for peaks', detail, level='WARNING')
if (value['output_type'] == 'optimal idr thresholded peaks') and (value['file_format'] == 'bed'):
if (value['file_format_type'] == 'narrowPeak') and (step['aliases'][0] != 'modern:chip-seq-optimal-idr-step-run-v-1-virtual'):
detail = 'Optimal IDR thresholded peaks {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for IDR peaks', detail, level='WARNING')
@audit_checker('file', frame='object')
def audit_file_size(value, system):
if value['status'] in ['deleted', 'replaced', 'uploading', 'revoked']:
return
if 'file_size' not in value:
detail = 'File {} requires a value for file_size'.format(value['@id'])
raise AuditFailure('missing file_size', detail, level='DCC_ACTION')
@audit_checker('file', frame=['file_format_specifications'],)
def audit_file_format_specifications(value, system):
for doc in value.get('file_format_specifications', []):
if doc['document_type'] != "file format specification":
detail = 'File {} has document {} not of type file format specification'.format(
value['@id'],
doc['@id']
)
raise AuditFailure('wrong document_type', detail, level='ERROR')
@audit_checker('file', frame='object')
def audit_file_paired_ended_run_type(value, system):
'''
Audit to catch those files that were upgraded to have run_type = paired ended
resulting from its migration out of replicate but lack the paired_end property
to specify which read it is. This audit will also catch the case where run_type
= paired-ended but there is no paired_end = 2 due to registeration error.
'''
if value['status'] in ['deleted', 'replaced', 'revoked', 'upload failed']:
return
if value['file_format'] not in ['fastq', 'fasta', 'csfasta']:
return
if (value['output_type'] == 'reads') and (value.get('run_type') == 'paired-ended'):
if 'paired_end' not in value:
detail = 'File {} has a paired-ended run_type '.format(value['@id']) + \
'but is missing its paired_end value'
raise AuditFailure('missing paired_end', detail, level='ERROR')
if (value['paired_end'] == 1) and 'paired_with' not in value:
detail = 'File {} has a paired-ended '.format(value['@id']) + \
'run_type but is missing a paired_end=2 mate'
raise AuditFailure('missing mate pair', detail, level='DCC_ACTION')
@audit_checker('file', frame=['quality_metrics',
'analysis_step_version',
'analysis_step_version.analysis_step',
'analysis_step_version.analysis_step.pipelines',
'analysis_step_version.software_versions',
'analysis_step_version.software_versions.software',
'dataset',
'dataset.target',
'derived_from'],
condition=rfa('ENCODE3', 'ENCODE'))
def audit_file_read_depth(value, system):
'''
An alignment file from the ENCODE Processing Pipeline should have read depth
in accordance with the criteria
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] != 'bam':
return
if value['output_type'] == 'transcriptome alignments':
return
if value['lab'] != '/labs/encode-processing-pipeline/':
return
if 'analysis_step_version' not in value:
detail = 'ENCODE Processed alignment file {} has '.format(value['@id']) + \
'no analysis step version'
yield AuditFailure('missing analysis step version', detail, level='DCC_ACTION')
return
if 'analysis_step' not in value['analysis_step_version']:
detail = 'ENCODE Processed alignment file {} has '.format(value['@id']) + \
'no analysis step in {}'.format(value['analysis_step_version']['@id'])
yield AuditFailure('missing analysis step', detail, level='DCC_ACTION')
return
if 'pipelines' not in value['analysis_step_version']['analysis_step']:
detail = 'ENCODE Processed alignment file {} has '.format(value['@id']) + \
'no pipelines in {}'.format(value['analysis_step_version']['analysis_step']['@id'])
yield AuditFailure('missing pipelines in analysis step', detail, level='DCC_ACTION')
return
if 'software_versions' not in value['analysis_step_version']:
detail = 'ENCODE Processed alignment file {} has '.format(value['@id']) + \
'no software_versions in {}'.format(value['analysis_step_version']['@id'])
yield AuditFailure('missing software versions', detail, level='DCC_ACTION')
return
if value['analysis_step_version']['software_versions'] == []:
detail = 'ENCODE Processed alignment file {} has no '.format(value['@id']) + \
'softwares listed in software_versions,' + \
' under {}'.format(value['analysis_step_version']['@id'])
yield AuditFailure('missing software', detail, level='DCC_ACTION')
return
'''
excluding bam files from TopHat
'''
for record in value['analysis_step_version']['software_versions']:
if record['software']['title'] == 'TopHat':
return
quality_metrics = value.get('quality_metrics')
if (quality_metrics is None) or (quality_metrics == []):
detail = 'ENCODE Processed alignment file {} has no quality_metrics'.format(
value['@id'])
yield AuditFailure('missing quality metrics', detail, level='DCC_ACTION')
return
read_depth = 0
derived_from_files = value.get('derived_from')
if (derived_from_files is None) or (derived_from_files == []):
detail = 'ENCODE Processed alignment file {} has no derived_from files'.format(
value['@id'])
yield AuditFailure('missing derived_from files', detail, level='DCC_ACTION')
return
paring_status_detected = False
for derived_from_file in derived_from_files:
if 'file_type' in derived_from_file and derived_from_file['file_type'] == 'fastq' and \
'run_type' in derived_from_file:
if derived_from_file['run_type'] == 'single-ended':
paired_ended_status = False
paring_status_detected = True
break
else:
if derived_from_file['run_type'] == 'paired-ended':
paired_ended_status = True
paring_status_detected = True
break
if paring_status_detected is False:
detail = 'ENCODE Processed alignment file {} has no run_type in derived_from files'.format(
value['@id'])
yield AuditFailure('missing run_type in derived_from files', detail, level='DCC_ACTION')
return
for metric in quality_metrics:
if 'Uniquely mapped reads number' in metric: # start_quality_metric.json
read_depth = metric['Uniquely mapped reads number']
break # continue
else:
if "total" in metric:
if paired_ended_status is False:
read_depth = metric['total']
else:
read_depth = int(metric['total']/2)
break
if read_depth == 0:
detail = 'ENCODE Processed alignment file {} has no uniquely mapped reads number'.format(
value['@id'])
yield AuditFailure('missing read depth', detail, level='DCC_ACTION')
return
special_assay_name = 'empty'
target_name = 'empty'
if 'dataset' in value:
if (value['dataset']['assay_term_name'] == 'shRNA knockdown followed by RNA-seq') or \
(value['dataset']['assay_term_name'] == 'single cell isolation followed by RNA-seq'):
special_assay_name = value['dataset']['assay_term_name']
if 'target' in value['dataset']:
target_name = value['dataset']['target']['name']
pipeline_titles = [
'Small RNA-seq single-end pipeline',
'RNA-seq of long RNAs (paired-end, stranded)',
'RNA-seq of long RNAs (single-end, unstranded)',
'RAMPAGE (paired-end, stranded)',
'Histone ChIP-seq'
]
read_depths_special = {
'shRNA knockdown followed by RNA-seq': 10000000,
'single cell isolation followed by RNA-seq': 5000000
}
read_depths = {
'Small RNA-seq single-end pipeline': 30000000,
'RNA-seq of long RNAs (paired-end, stranded)': 30000000,
'RNA-seq of long RNAs (single-end, unstranded)': 30000000,
'RAMPAGE (paired-end, stranded)': 25000000
}
marks = {
'narrow': 20000000,
'broad': 45000000
}
for pipeline in value['analysis_step_version']['analysis_step']['pipelines']:
if pipeline['title'] not in pipeline_titles:
return
if pipeline['title'] == 'Histone ChIP-seq': # do the chipseq narrow broad ENCODE3
if target_name in ['Control-human', 'Control-mouse']:
if read_depth < marks['broad']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. It can not be used as a control ' + \
'in experiments studying broad histone marks, which ' + \
'require {} uniquely mapped reads.'.format(marks['broad'])
yield AuditFailure('insufficient read depth', detail, level='WARNING')
if read_depth < marks['narrow']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. It can not be used as a control, ' + \
'due to insufficient read depth, narrow histone marks assays ' + \
'require {} uniquely mapped reads.'.format(marks['narrow'])
yield AuditFailure('insufficient read depth',
detail, level='NOT_COMPLIANT')
return
if target_name == 'empty':
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'belongs to ChIP-seq experiment {} '.format(value['dataset']['@id']) + \
'with no target specified.'
yield AuditFailure('ChIP-seq missing target', detail, level='ERROR')
return
if target_name in broadPeaksTargets:
if read_depth < marks['broad']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. Replicates for ChIP-seq ' + \
'assay and target {} require '.format(target_name) + \
'{}'.format(marks['broad'])
yield AuditFailure('insufficient read depth', detail, level='NOT_COMPLIANT')
return
else:
if read_depth < (marks['narrow']+5000000) and read_depth > marks['narrow']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. ' + \
'The recommended numer of uniquely mapped reads for ChIP-seq assay ' + \
'and target {} would be '.format(target_name) + \
'{}'.format(marks['narrow']+5000000)
yield AuditFailure('insufficient read depth', detail, level='WARNING')
return
if read_depth < marks['narrow']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. Replicates for ChIP-seq assay ' + \
'and target {} require '.format(target_name) + \
'{}'.format(marks['narrow'])
yield AuditFailure('insufficient read depth', detail, level='NOT_COMPLIANT')
return
else:
if special_assay_name != 'empty': # either shRNA or single cell
if read_depth < read_depths_special[special_assay_name]:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. Replicates for this assay ' + \
'{} require '.format(pipeline['title']) + \
'{}'.format(read_depths_special[special_assay_name])
yield AuditFailure('insufficient read depth', detail, level='NOT_COMPLIANT')
return
else:
if (read_depth < read_depths[pipeline['title']]):
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'], read_depth) + \
'uniquely mapped reads. Replicates for this ' + \
'assay {} require {}'.format(pipeline['title'],
read_depths[pipeline['title']])
yield AuditFailure('insufficient read depth', detail, level='NOT_COMPLIANT')
return
@audit_checker('file', frame=['quality_metrics',
'analysis_step_version',
'analysis_step_version.analysis_step',
'analysis_step_version.analysis_step.pipelines',
'analysis_step_version.software_versions',
'analysis_step_version.software_versions.software',
'dataset'],
condition=rfa('ENCODE3', 'ENCODE'))
def audit_file_chip_seq_library_complexity(value, system):
'''
An alignment file from the ENCODE ChIP-seq processing pipeline
should have minimal library complexity in accordance with the criteria
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] != 'bam':
return
if value['output_type'] == 'transcriptome alignments':
return
if value['lab'] != '/labs/encode-processing-pipeline/':
return
if ('quality_metrics' not in value) or (value.get('quality_metrics') == []):
return
if 'analysis_step_version' not in value:
return
if 'analysis_step' not in value['analysis_step_version']:
return
if 'pipelines' not in value['analysis_step_version']['analysis_step']:
return
nrf_end_of_detail = "Non redundant fraction (NRF, Number of reads after " + \
"removing duplicates / Total number of reads). 0.0-0.7 is very " + \
"poor complexity, 0.7-0.8 is poor complexity, 0.8-0.9 moderate " + \
"complexity, and >0.9 high complexity. NRF >0.9 is recommended, " + \
"but >0.8 is acceptable"
pbc1_end_of_detail = "PCR Bottlenecking coefficient 1 (PBC1, Number of genomic " + \
"locations where exactly one read maps uniquely/Number of " + \
"distinct genomic locations to which some read maps uniquely). " + \
"0 - 0.5 is severe bottlenecking, 0.5 - 0.8 is moderate " + \
"bottlenecking, 0.8 - 0.9 is mild bottlenecking, and > 0.9 is " + \
"no bottlenecking. PBC1 >0.9 is recommended, but >0.8 is acceptable"
pbc2_end_of_detail = "PCR Bottlenecking coefficient 2 (PBC2, Number of genomic locations " + \
"where only one read maps uniquely/Number of genomic locations where " + \
"2 reads map uniquely). 0 - 1 is severe bottlenecking, 1 - 3 is " + \
"moderate bottlenecking, 3 -10 is mild bottlenecking, > 10 is no " + \
"bottlenecking. PBC2 >10 is recommended, but >3 is acceptable"
for pipeline in value['analysis_step_version']['analysis_step']['pipelines']:
if pipeline['title'] == 'Histone ChIP-seq':
quality_metrics = value.get('quality_metrics')
for metric in quality_metrics:
if 'NRF' in metric:
NRF_value = float(metric['NRF'])
if NRF_value < 0.8:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with NRF value of {}'.format(NRF_value) + \
'. '+nrf_end_of_detail
yield AuditFailure('insufficient library complexity', detail,
level='NOT_COMPLIANT')
else:
if NRF_value <= 0.9:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with NRF value of {}'.format(NRF_value) + \
'. '+nrf_end_of_detail
yield AuditFailure('low library complexity', detail,
level='WARNING')
if 'PBC1' in metric:
PBC1_value = float(metric['PBC1'])
if PBC1_value < 0.8:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with PBC1 value of {}'.format(PBC1_value) + \
'. '+pbc1_end_of_detail
yield AuditFailure('insufficient library complexity', detail,
level='NOT_COMPLIANT')
else:
if PBC1_value <= 0.9:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with PBC1 value of {}'.format(PBC1_value) + \
'. '+pbc1_end_of_detail
yield AuditFailure('low library complexity', detail,
level='WARNING')
if 'PBC2' in metric:
PBC2_value = float(metric['PBC2'])
if PBC2_value < 3:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with PBC2 value of {}'.format(PBC2_value) + \
'. '+pbc2_end_of_detail
yield AuditFailure('insufficient library complexity', detail,
level='NOT_COMPLIANT')
else:
if PBC2_value <= 10:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with PBC2 value of {}'.format(PBC2_value) + \
'. '+pbc2_end_of_detail
yield AuditFailure('low library complexity', detail,
level='WARNING')
return
@audit_checker('file', frame=['quality_metrics',
'analysis_step_version',
'analysis_step_version.analysis_step',
'analysis_step_version.analysis_step.pipelines',
'analysis_step_version.software_versions',
'analysis_step_version.software_versions.software',
'dataset'],
condition=rfa('ENCODE3', 'ENCODE'))
def audit_file_mad_qc_spearman_correlation(value, system):
'''
A gene quantification file from the ENCODE Processing Pipeline should have a mad QC
in accordance with the criteria
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['output_type'] != 'gene quantifications':
return
if value['lab'] != '/labs/encode-processing-pipeline/':
return
if 'analysis_step_version' not in value:
detail = 'ENCODE Processed gene quantification file {} has no analysis step version'.format(
value['@id'])
yield AuditFailure('missing analysis step version', detail, level='DCC_ACTION')
return
if 'analysis_step' not in value['analysis_step_version']:
detail = 'ENCODE Processed gene quantification file {} has no analysis step in {}'.format(
value['@id'],
value['analysis_step_version']['@id'])
yield AuditFailure('missing analysis step', detail, level='DCC_ACTION')
return
if 'pipelines' not in value['analysis_step_version']['analysis_step']:
detail = 'ENCODE Processed gene quantification file {} has no pipelines in {}'.format(
value['@id'],
value['analysis_step_version']['analysis_step']['@id'])
yield AuditFailure('missing pipelines in analysis step', detail, level='DCC_ACTION')
return
quality_metrics = value.get('quality_metrics')
if (quality_metrics is None) or (quality_metrics == []):
detail = 'ENCODE Processed gene quantification file {} has no quality_metrics'.format(
value['@id'])
yield AuditFailure('missing quality metrics', detail, level='DCC_ACTION')
return
spearman_correlation = False
for metric in quality_metrics:
if 'Spearman correlation' in metric:
spearman_correlation = metric['Spearman correlation']
break
if spearman_correlation is False:
detail = 'ENCODE Processed gene quantification file {} '.format(value['@id']) + \
'has no MAD quality metric'
yield AuditFailure('missing Spearman correlation', detail, level='DCC_ACTION')
return
spearman_pipelines = ['RAMPAGE (paired-end, stranded)',
'Small RNA-seq single-end pipeline',
'RNA-seq of long RNAs (single-end, unstranded)',
'RNA-seq of long RNAs (paired-end, stranded)']
experiment_replication_type = 'isogenic'
if 'dataset' in value:
if 'replication_type' in value['dataset']:
if value['dataset']['replication_type'] in ['anisogenic',
'anisogenic, sex-matched and age-matched',
'anisogenic, age-matched',
'anisogenic, sex-matched']:
experiment_replication_type = 'anisogenic'
required_value = 0.8
else:
required_value = 0.9
for pipeline in value['analysis_step_version']['analysis_step']['pipelines']:
if pipeline['title'] in spearman_pipelines:
if spearman_correlation < required_value:
border_value = (required_value - 0.0713512755834)
detail = 'ENCODE processed gene quantification file {} '.format(value['@id']) + \
'has Spearman correlaton of {} '.format(spearman_correlation) + \
', for gene quantification files from an {}'.format(experiment_replication_type) + \
' assay in the {} '.format(pipeline['title']) + \
'pipeline the preferred value is > {}, '.format(required_value) + \
'a borderline is between {} and {}'.format(required_value, border_value)
if spearman_correlation > border_value:
yield AuditFailure('borderline spearman correlation', detail,
level='WARNING')
return
else:
yield AuditFailure('poor spearman correlation', detail,
level='NOT_COMPLIANT')
return
adding ENCODE to the audit and fixing wording of the error message
from contentbase import (
AuditFailure,
audit_checker,
)
from .conditions import (
rfa,
)
import datetime
current_statuses = ['released', 'in progress']
not_current_statuses = ['revoked', 'obsolete', 'deleted']
raw_data_formats = [
'fastq',
'csfasta',
'csqual',
'rcc',
'idat',
'CEL',
]
paired_end_assays = [
'RNA-PET',
'ChIA-PET',
'DNA-PET',
]
broadPeaksTargets = [
'H3K4me1-mouse',
'H3K36me3-mouse',
'H3K79me2-mouse',
'H3K27me3-mouse',
'H3K9me1-mouse',
'H3K9me3-mouse',
'H3K4me1-human',
'H3K36me3-human',
'H3K79me2-human',
'H3K27me3-human',
'H3K9me1-human',
'H3K9me3-human',
'H3F3A-human',
'H4K20me1-human',
'H3K79me3-human',
'H3K79me3-mouse',
]
@audit_checker('file', frame=['replicate', 'replicate.experiment',
'derived_from', 'derived_from.replicate',
'derived_from.replicate.experiment'])
def audit_file_biological_replicate_number_match(value, system):
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if 'replicate' not in value:
return
if 'derived_from' not in value or len(value['derived_from']) == 0:
return
bio_rep_number = value['replicate']['biological_replicate_number']
tech_rep_number = value['replicate']['technical_replicate_number']
file_replicate = (bio_rep_number, tech_rep_number)
file_exp_accession = value['replicate']['experiment']['accession']
derived_from_files = value['derived_from']
for derived_from_file in derived_from_files:
if 'replicate' in derived_from_file:
# excluding control files from different experiments
if derived_from_file['replicate']['experiment']['accession'] != file_exp_accession:
continue
derived_bio_rep_num = derived_from_file['replicate']['biological_replicate_number']
derived_tech_rep_num = derived_from_file['replicate']['technical_replicate_number']
derived_replicate = (derived_bio_rep_num, derived_tech_rep_num)
if file_replicate != derived_replicate:
detail = 'Biological replicate number of the file {} '.format(value['@id']) + \
'is {}'.format(file_replicate) + \
', it is inconsistent with the biological replicate number ' +\
'{} of the file {} it was derived from'.format(derived_replicate,
derived_from_file['@id'])
raise AuditFailure('inconsistent biological replicate number',
detail, level='ERROR')
@audit_checker('file', frame=['replicate', 'dataset', 'replicate.experiment'])
def audit_file_replicate_match(value, system):
'''
A file's replicate should belong to the same experiment that the file
does. These tend to get confused when replacing objects.
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if 'replicate' not in value:
return
rep_exp = value['replicate']['experiment']['uuid']
file_exp = value['dataset']['uuid']
if rep_exp != file_exp:
detail = 'File {} has a replicate {} in experiment {}'.format(
value['@id'],
value['replicate']['@id'],
value['replicate']['experiment']['@id'])
raise AuditFailure('mismatched replicate', detail, level='ERROR')
@audit_checker('file', frame='object', condition=rfa('ENCODE3', 'modERN', 'ENCODE2', 'ENCODE2-Mouse'))
def audit_file_platform(value, system):
'''
A raw data file should have a platform specified.
Should be in the schema.
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] not in raw_data_formats:
return
if 'platform' not in value:
detail = 'Raw data file {} missing platform information'.format(value['@id'])
raise AuditFailure('missing platform', detail, level='NOT_COMPLIANT')
@audit_checker('file', frame='object', condition=rfa('ENCODE3', 'modERN', 'ENCODE',
'ENCODE2', 'ENCODE2-Mouse'))
def audit_file_read_length(value, system):
'''
Reads files should have a read_length
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['output_type'] != 'reads':
return
if 'read_length' not in value:
detail = 'Reads file {} missing read_length'.format(value['@id'])
yield AuditFailure('missing read_length', detail, level='DCC_ACTION')
return
creation_date = value['date_created'][:10].split('-')
year = int(creation_date[0])
month = int(creation_date[1])
day = int(creation_date[2])
created_date = str(year)+'-'+str(month)+'-'+str(day)
file_date_creation = datetime.date(year, month, day)
threshold_date = datetime.date(2015, 6, 30)
read_length = value['read_length']
if read_length < 50:
detail = 'Fastq file {} '.format(value['@id']) + \
'that was created on {} '.format(created_date) + \
'has read length of {}bp.'.format(read_length) + \
' It is not compliant with ENCODE3 standards.' + \
' According to ENCODE3 standards files submitted after 2015-6-30 ' + \
'should be at least 50bp long.'
if file_date_creation < threshold_date:
yield AuditFailure('insufficient read length', detail, level='WARNING')
return
else:
yield AuditFailure('insufficient read length', detail, level='NOT_COMPLIANT')
return
@audit_checker('file',
frame=['dataset', 'dataset.target', 'controlled_by',
'controlled_by.dataset'],
condition=rfa('ENCODE2', 'ENCODE2-Mouse', 'ENCODE3', 'modERN'))
def audit_file_controlled_by(value, system):
'''
A fastq in a ChIP-seq experiment should have a controlled_by
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['dataset'].get('assay_term_name') not in ['ChIP-seq', 'RAMPAGE', 'CAGE', 'shRNA knockdown followed by RNA-seq']:
return
if 'target' in value['dataset'] and 'control' in value['dataset']['target'].get('investigated_as', []):
return
if 'controlled_by' not in value:
value['controlled_by'] = []
if (value['controlled_by'] == []) and (value['file_format'] in ['fastq']):
detail = 'Fastq file {} from {} requires controlled_by'.format(
value['@id'],
value['dataset']['assay_term_name']
)
yield AuditFailure('missing controlled_by', detail, level='NOT_COMPLIANT')
return
possible_controls = value['dataset'].get('possible_controls')
biosample = value['dataset'].get('biosample_term_id')
run_type = value.get('run_type', None)
read_length = value.get('read_length', None)
if value['controlled_by']:
for ff in value['controlled_by']:
control_bs = ff['dataset'].get('biosample_term_id')
control_run = ff.get('run_type', None)
control_length = ff.get('read_length', None)
if control_bs != biosample:
detail = 'File {} has a controlled_by file {} with conflicting biosample {}'.format(
value['@id'],
ff['@id'],
control_bs)
yield AuditFailure('mismatched controlled_by', detail, level='ERROR')
return
if ff['file_format'] != value['file_format']:
detail = 'File {} with file_format {} has a controlled_by file {} with file_format {}'.format(
value['@id'],
value['file_format'],
ff['@id'],
ff['file_format']
)
yield AuditFailure('mismatched controlled_by', detail, level='ERROR')
return
if (possible_controls is None) or (ff['dataset']['@id'] not in possible_controls):
detail = 'File {} has a controlled_by file {} with a dataset {} that is not in possible_controls'.format(
value['@id'],
ff['@id'],
ff['dataset']['@id']
)
yield AuditFailure('mismatched controlled_by', detail, level='ERROR')
return
if (run_type is None) or (control_run is None):
continue
if (read_length is None) or (control_length is None):
continue
if run_type != control_run:
detail = 'File {} is {} but its control file {} is {}'.format(
value['@id'],
run_type,
ff['@id'],
control_run
)
yield AuditFailure('mismatched controlled_by run_type', detail, level='WARNING')
if read_length != control_length:
detail = 'File {} is {} but its control file {} is {}'.format(
value['@id'],
value['read_length'],
ff['@id'],
ff['read_length']
)
yield AuditFailure('mismatched controlled_by read length', detail, level='WARNING')
return
@audit_checker('file', frame='object', condition=rfa('modERN', 'GGR'))
def audit_file_flowcells(value, system):
'''
A fastq file could have its flowcell details.
Don't bother to check anything but ENCODE3
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] not in ['fastq']:
return
if 'flowcell_details' not in value or (value['flowcell_details'] == []):
detail = 'Fastq file {} is missing flowcell_details'.format(value['@id'])
raise AuditFailure('missing flowcell_details', detail, level='WARNING')
@audit_checker('file', frame='object',)
def audit_run_type(value, system):
'''
A fastq file or a fasta file need to specify run_type.
This was attempted to be a dependancy and didn't happen.
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] not in ['fastq']:
return
if 'run_type' not in value:
detail = 'File {} has file_format {}. It requires a value for run_type'.format(
value['@id'],
value['file_format'])
raise AuditFailure('missing run_type', detail, level='NOT_COMPLIANT')
@audit_checker('file', frame=['paired_with'],)
def audit_paired_with(value, system):
'''
A file with a paired_end needs a paired_with.
Should be handled in the schema.
A paired_with should be the same replicate
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if 'paired_end' not in value:
return
if 'paired_with' not in value:
detail = 'File {} has paired_end = {}. It requires a paired file'.format(
value['@id'],
value['paired_end'])
raise AuditFailure('missing paired_with', detail, level='NOT_COMPLIANT')
if 'replicate' not in value['paired_with']:
return
if 'replicate' not in value:
detail = 'File {} has paired_end = {}. It requires a replicate'.format(
value['@id'],
value['paired_end'])
raise AuditFailure('missing replicate', detail, level='DCC_ACTION')
if value['replicate'] != value['paired_with']['replicate']:
detail = 'File {} has replicate {}. It is paired_with file {} with replicate {}'.format(
value['@id'],
value.get('replicate'),
value['paired_with']['@id'],
value['paired_with'].get('replicate'))
raise AuditFailure('mismatched paired_with', detail, level='ERROR')
if value['paired_end'] == '1':
context = system['context']
paired_with = context.get_rev_links('paired_with')
if len(paired_with) > 1:
detail = 'Paired end 1 file {} paired_with by multiple paired end 2 files: {!r}'.format(
value['@id'],
paired_with,
)
raise AuditFailure('multiple paired_with', detail, level='ERROR')
@audit_checker('file', frame=['step_run',
'dataset'], condition=rfa('modERN'))
def audit_modERN_ChIP_pipeline_steps(value, system):
expt = value['dataset']
if 'Experiment' not in expt['@type']:
return
if expt['assay_term_id'] != 'OBI:0000716':
return
if value['file_format'] == 'fastq':
return
if 'step_run' not in value:
detail = 'File {} is missing a step_run'.format(value['@id'])
yield AuditFailure('missing step_run', detail, level='WARNING')
return
if (value['file_format'] != 'fastq') and ('derived_from' not in value):
detail = 'File {} is missing its derived_from'.format(value['@id'])
yield AuditFailure('missing derived_from', detail, level='WARNING')
step = value['step_run']
if (value['file_format'] == 'bam') and step['aliases'][0] != 'modern:chip-seq-bwa-alignment-step-run-v-1-virtual':
detail = 'Bam {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run ChIP-seq bam', detail, level='WARNING')
if (value['output_type'] == 'normalized signal of all reads'):
if not ((step['aliases'][0] != 'modern:chip-seq-unique-read-signal-generation-step-run-v-1-virtual') or (step['aliases'][0] != 'modern:chip-seq-replicate-pooled-unique-read-signal-generation-step-run-v-1-virtual')):
detail = 'Normalized signal of all reads {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for unique signal', detail, level='WARNING')
if (value['output_type']) == 'read-depth normalized signal':
if not ((step['aliases'][0] != 'modern:chip-seq-read-depth-normalized-signal-generation-step-run-v-1-virtual') or (step['aliases'][0] != 'modern:chip-seq-replicate-pooled-read-depth-normalized-signal-generation-step-run-v-1-virtual')):
detail = 'Read depth normalized signal {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for depth signal', detail, level='WARNING')
if (value['output_type']) == 'control normalized signal':
if not ((step['aliases'][0] != 'modern:chip-seq-control-normalized-signal-generation-step-run-v-1-virtual') or (step['aliases'][0] != 'modern:chip-seq-replicate-pooled-control-normalized-signal-generation-step-run-v-1-virtual')):
detail = 'Control normalized signal {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for control signal', detail, level='WARNING')
if (value['file_format'] == 'bigBed'):
if not ((step['aliases'][0] != 'modern:chip-seq-peaks-to-bigbed-step-run-v-1-virtual') or (step['aliases'][0] != 'modern:chip-seq-optimal-idr-thresholded-peaks-to-bigbed-step-run-v-1-virtual')):
detail = 'bigBed {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for bigBed peaks', detail, level='WARNING')
if (value['output_type'] == 'peaks') and (value['file_format'] == 'bed'):
if (value['file_format_type'] == 'narrowPeak') and (step['aliases'][0] != 'modern:chip-seq-spp-peak-calling-step-run-v-1-virtual'):
detail = 'Peaks {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for peaks', detail, level='WARNING')
if (value['output_type'] == 'optimal idr thresholded peaks') and (value['file_format'] == 'bed'):
if (value['file_format_type'] == 'narrowPeak') and (step['aliases'][0] != 'modern:chip-seq-optimal-idr-step-run-v-1-virtual'):
detail = 'Optimal IDR thresholded peaks {} is linked to the wrong step_run: {}'.format(value['@id'], step['aliases'][0])
yield AuditFailure('wrong step_run for IDR peaks', detail, level='WARNING')
@audit_checker('file', frame='object')
def audit_file_size(value, system):
if value['status'] in ['deleted', 'replaced', 'uploading', 'revoked']:
return
if 'file_size' not in value:
detail = 'File {} requires a value for file_size'.format(value['@id'])
raise AuditFailure('missing file_size', detail, level='DCC_ACTION')
@audit_checker('file', frame=['file_format_specifications'],)
def audit_file_format_specifications(value, system):
for doc in value.get('file_format_specifications', []):
if doc['document_type'] != "file format specification":
detail = 'File {} has document {} not of type file format specification'.format(
value['@id'],
doc['@id']
)
raise AuditFailure('wrong document_type', detail, level='ERROR')
@audit_checker('file', frame='object')
def audit_file_paired_ended_run_type(value, system):
'''
Audit to catch those files that were upgraded to have run_type = paired ended
resulting from its migration out of replicate but lack the paired_end property
to specify which read it is. This audit will also catch the case where run_type
= paired-ended but there is no paired_end = 2 due to registeration error.
'''
if value['status'] in ['deleted', 'replaced', 'revoked', 'upload failed']:
return
if value['file_format'] not in ['fastq', 'fasta', 'csfasta']:
return
if (value['output_type'] == 'reads') and (value.get('run_type') == 'paired-ended'):
if 'paired_end' not in value:
detail = 'File {} has a paired-ended run_type '.format(value['@id']) + \
'but is missing its paired_end value'
raise AuditFailure('missing paired_end', detail, level='ERROR')
if (value['paired_end'] == 1) and 'paired_with' not in value:
detail = 'File {} has a paired-ended '.format(value['@id']) + \
'run_type but is missing a paired_end=2 mate'
raise AuditFailure('missing mate pair', detail, level='DCC_ACTION')
@audit_checker('file', frame=['quality_metrics',
'analysis_step_version',
'analysis_step_version.analysis_step',
'analysis_step_version.analysis_step.pipelines',
'analysis_step_version.software_versions',
'analysis_step_version.software_versions.software',
'dataset',
'dataset.target',
'derived_from'],
condition=rfa('ENCODE3', 'ENCODE'))
def audit_file_read_depth(value, system):
'''
An alignment file from the ENCODE Processing Pipeline should have read depth
in accordance with the criteria
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] != 'bam':
return
if value['output_type'] == 'transcriptome alignments':
return
if value['lab'] != '/labs/encode-processing-pipeline/':
return
if 'analysis_step_version' not in value:
detail = 'ENCODE Processed alignment file {} has '.format(value['@id']) + \
'no analysis step version'
yield AuditFailure('missing analysis step version', detail, level='DCC_ACTION')
return
if 'analysis_step' not in value['analysis_step_version']:
detail = 'ENCODE Processed alignment file {} has '.format(value['@id']) + \
'no analysis step in {}'.format(value['analysis_step_version']['@id'])
yield AuditFailure('missing analysis step', detail, level='DCC_ACTION')
return
if 'pipelines' not in value['analysis_step_version']['analysis_step']:
detail = 'ENCODE Processed alignment file {} has '.format(value['@id']) + \
'no pipelines in {}'.format(value['analysis_step_version']['analysis_step']['@id'])
yield AuditFailure('missing pipelines in analysis step', detail, level='DCC_ACTION')
return
if 'software_versions' not in value['analysis_step_version']:
detail = 'ENCODE Processed alignment file {} has '.format(value['@id']) + \
'no software_versions in {}'.format(value['analysis_step_version']['@id'])
yield AuditFailure('missing software versions', detail, level='DCC_ACTION')
return
if value['analysis_step_version']['software_versions'] == []:
detail = 'ENCODE Processed alignment file {} has no '.format(value['@id']) + \
'softwares listed in software_versions,' + \
' under {}'.format(value['analysis_step_version']['@id'])
yield AuditFailure('missing software', detail, level='DCC_ACTION')
return
'''
excluding bam files from TopHat
'''
for record in value['analysis_step_version']['software_versions']:
if record['software']['title'] == 'TopHat':
return
quality_metrics = value.get('quality_metrics')
if (quality_metrics is None) or (quality_metrics == []):
detail = 'ENCODE Processed alignment file {} has no quality_metrics'.format(
value['@id'])
yield AuditFailure('missing quality metrics', detail, level='DCC_ACTION')
return
read_depth = 0
derived_from_files = value.get('derived_from')
if (derived_from_files is None) or (derived_from_files == []):
detail = 'ENCODE Processed alignment file {} has no derived_from files'.format(
value['@id'])
yield AuditFailure('missing derived_from files', detail, level='DCC_ACTION')
return
paring_status_detected = False
for derived_from_file in derived_from_files:
if 'file_type' in derived_from_file and derived_from_file['file_type'] == 'fastq' and \
'run_type' in derived_from_file:
if derived_from_file['run_type'] == 'single-ended':
paired_ended_status = False
paring_status_detected = True
break
else:
if derived_from_file['run_type'] == 'paired-ended':
paired_ended_status = True
paring_status_detected = True
break
if paring_status_detected is False:
detail = 'ENCODE Processed alignment file {} has no run_type in derived_from files'.format(
value['@id'])
yield AuditFailure('missing run_type in derived_from files', detail, level='DCC_ACTION')
return
for metric in quality_metrics:
if 'Uniquely mapped reads number' in metric: # start_quality_metric.json
read_depth = metric['Uniquely mapped reads number']
break # continue
else:
if "total" in metric:
if paired_ended_status is False:
read_depth = metric['total']
else:
read_depth = int(metric['total']/2)
break
if read_depth == 0:
detail = 'ENCODE Processed alignment file {} has no uniquely mapped reads number'.format(
value['@id'])
yield AuditFailure('missing read depth', detail, level='DCC_ACTION')
return
special_assay_name = 'empty'
target_name = 'empty'
if 'dataset' in value:
if (value['dataset']['assay_term_name'] == 'shRNA knockdown followed by RNA-seq') or \
(value['dataset']['assay_term_name'] == 'single cell isolation followed by RNA-seq'):
special_assay_name = value['dataset']['assay_term_name']
if 'target' in value['dataset']:
target_name = value['dataset']['target']['name']
pipeline_titles = [
'Small RNA-seq single-end pipeline',
'RNA-seq of long RNAs (paired-end, stranded)',
'RNA-seq of long RNAs (single-end, unstranded)',
'RAMPAGE (paired-end, stranded)',
'Histone ChIP-seq'
]
read_depths_special = {
'shRNA knockdown followed by RNA-seq': 10000000,
'single cell isolation followed by RNA-seq': 5000000
}
read_depths = {
'Small RNA-seq single-end pipeline': 30000000,
'RNA-seq of long RNAs (paired-end, stranded)': 30000000,
'RNA-seq of long RNAs (single-end, unstranded)': 30000000,
'RAMPAGE (paired-end, stranded)': 25000000
}
marks = {
'narrow': 20000000,
'broad': 45000000
}
for pipeline in value['analysis_step_version']['analysis_step']['pipelines']:
if pipeline['title'] not in pipeline_titles:
return
if pipeline['title'] == 'Histone ChIP-seq': # do the chipseq narrow broad ENCODE3
if target_name in ['Control-human', 'Control-mouse']:
if read_depth < marks['broad']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. It can not be used as a control ' + \
'in experiments studying broad histone marks, which ' + \
'require {} uniquely mapped reads.'.format(marks['broad'])
yield AuditFailure('insufficient read depth', detail, level='WARNING')
if read_depth < marks['narrow']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. It can not be used as a control, ' + \
'due to insufficient read depth, narrow histone marks assays ' + \
'require {} uniquely mapped reads.'.format(marks['narrow'])
yield AuditFailure('insufficient read depth',
detail, level='NOT_COMPLIANT')
return
if target_name == 'empty':
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'belongs to ChIP-seq experiment {} '.format(value['dataset']['@id']) + \
'with no target specified.'
yield AuditFailure('ChIP-seq missing target', detail, level='ERROR')
return
if target_name in broadPeaksTargets:
if read_depth < marks['broad']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. Replicates for ChIP-seq ' + \
'assay and target {} require '.format(target_name) + \
'{}'.format(marks['broad'])
yield AuditFailure('insufficient read depth', detail, level='NOT_COMPLIANT')
return
else:
if read_depth < (marks['narrow']+5000000) and read_depth > marks['narrow']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. ' + \
'The recommended numer of uniquely mapped reads for ChIP-seq assay ' + \
'and target {} would be '.format(target_name) + \
'{}'.format(marks['narrow']+5000000)
yield AuditFailure('insufficient read depth', detail, level='WARNING')
return
if read_depth < marks['narrow']:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. Replicates for ChIP-seq assay ' + \
'and target {} require '.format(target_name) + \
'{}'.format(marks['narrow'])
yield AuditFailure('insufficient read depth', detail, level='NOT_COMPLIANT')
return
else:
if special_assay_name != 'empty': # either shRNA or single cell
if read_depth < read_depths_special[special_assay_name]:
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'],
read_depth) + \
'uniquely mapped reads. Replicates for this assay ' + \
'{} require '.format(pipeline['title']) + \
'{}'.format(read_depths_special[special_assay_name])
yield AuditFailure('insufficient read depth', detail, level='NOT_COMPLIANT')
return
else:
if (read_depth < read_depths[pipeline['title']]):
detail = 'ENCODE Processed alignment file {} has {} '.format(value['@id'], read_depth) + \
'uniquely mapped reads. Replicates for this ' + \
'assay {} require {}'.format(pipeline['title'],
read_depths[pipeline['title']])
yield AuditFailure('insufficient read depth', detail, level='NOT_COMPLIANT')
return
@audit_checker('file', frame=['quality_metrics',
'analysis_step_version',
'analysis_step_version.analysis_step',
'analysis_step_version.analysis_step.pipelines',
'analysis_step_version.software_versions',
'analysis_step_version.software_versions.software',
'dataset'],
condition=rfa('ENCODE3', 'ENCODE'))
def audit_file_chip_seq_library_complexity(value, system):
'''
An alignment file from the ENCODE ChIP-seq processing pipeline
should have minimal library complexity in accordance with the criteria
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['file_format'] != 'bam':
return
if value['output_type'] == 'transcriptome alignments':
return
if value['lab'] != '/labs/encode-processing-pipeline/':
return
if ('quality_metrics' not in value) or (value.get('quality_metrics') == []):
return
if 'analysis_step_version' not in value:
return
if 'analysis_step' not in value['analysis_step_version']:
return
if 'pipelines' not in value['analysis_step_version']['analysis_step']:
return
nrf_end_of_detail = "Non redundant fraction (NRF, Number of reads after " + \
"removing duplicates / Total number of reads). 0.0-0.7 is very " + \
"poor complexity, 0.7-0.8 is poor complexity, 0.8-0.9 moderate " + \
"complexity, and >0.9 high complexity. NRF >0.9 is recommended, " + \
"but >0.8 is acceptable"
pbc1_end_of_detail = "PCR Bottlenecking coefficient 1 (PBC1, Number of genomic " + \
"locations where exactly one read maps uniquely/Number of " + \
"distinct genomic locations to which some read maps uniquely). " + \
"0 - 0.5 is severe bottlenecking, 0.5 - 0.8 is moderate " + \
"bottlenecking, 0.8 - 0.9 is mild bottlenecking, and > 0.9 is " + \
"no bottlenecking. PBC1 >0.9 is recommended, but >0.8 is acceptable"
pbc2_end_of_detail = "PCR Bottlenecking coefficient 2 (PBC2, Number of genomic locations " + \
"where only one read maps uniquely/Number of genomic locations where " + \
"2 reads map uniquely). 0 - 1 is severe bottlenecking, 1 - 3 is " + \
"moderate bottlenecking, 3 -10 is mild bottlenecking, > 10 is no " + \
"bottlenecking. PBC2 >10 is recommended, but >3 is acceptable"
for pipeline in value['analysis_step_version']['analysis_step']['pipelines']:
if pipeline['title'] == 'Histone ChIP-seq':
quality_metrics = value.get('quality_metrics')
for metric in quality_metrics:
if 'NRF' in metric:
NRF_value = float(metric['NRF'])
if NRF_value < 0.8:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with NRF value of {}'.format(NRF_value) + \
'. '+nrf_end_of_detail
yield AuditFailure('insufficient library complexity', detail,
level='NOT_COMPLIANT')
else:
if NRF_value <= 0.9:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with NRF value of {}'.format(NRF_value) + \
'. '+nrf_end_of_detail
yield AuditFailure('low library complexity', detail,
level='WARNING')
if 'PBC1' in metric:
PBC1_value = float(metric['PBC1'])
if PBC1_value < 0.8:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with PBC1 value of {}'.format(PBC1_value) + \
'. '+pbc1_end_of_detail
yield AuditFailure('insufficient library complexity', detail,
level='NOT_COMPLIANT')
else:
if PBC1_value <= 0.9:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with PBC1 value of {}'.format(PBC1_value) + \
'. '+pbc1_end_of_detail
yield AuditFailure('low library complexity', detail,
level='WARNING')
if 'PBC2' in metric:
PBC2_value = float(metric['PBC2'])
if PBC2_value < 3:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with PBC2 value of {}'.format(PBC2_value) + \
'. '+pbc2_end_of_detail
yield AuditFailure('insufficient library complexity', detail,
level='NOT_COMPLIANT')
else:
if PBC2_value <= 10:
detail = 'ENCODE Processed alignment file {} '.format(value['@id']) + \
'was generated from a library with PBC2 value of {}'.format(PBC2_value) + \
'. '+pbc2_end_of_detail
yield AuditFailure('low library complexity', detail,
level='WARNING')
return
@audit_checker('file', frame=['quality_metrics',
'analysis_step_version',
'analysis_step_version.analysis_step',
'analysis_step_version.analysis_step.pipelines',
'analysis_step_version.software_versions',
'analysis_step_version.software_versions.software',
'dataset'],
condition=rfa('ENCODE3', 'ENCODE'))
def audit_file_mad_qc_spearman_correlation(value, system):
'''
A gene quantification file from the ENCODE Processing Pipeline should have a mad QC
in accordance with the criteria
'''
if value['status'] in ['deleted', 'replaced', 'revoked']:
return
if value['output_type'] != 'gene quantifications':
return
if value['lab'] != '/labs/encode-processing-pipeline/':
return
if 'analysis_step_version' not in value:
detail = 'ENCODE Processed gene quantification file {} has no analysis step version'.format(
value['@id'])
yield AuditFailure('missing analysis step version', detail, level='DCC_ACTION')
return
if 'analysis_step' not in value['analysis_step_version']:
detail = 'ENCODE Processed gene quantification file {} has no analysis step in {}'.format(
value['@id'],
value['analysis_step_version']['@id'])
yield AuditFailure('missing analysis step', detail, level='DCC_ACTION')
return
if 'pipelines' not in value['analysis_step_version']['analysis_step']:
detail = 'ENCODE Processed gene quantification file {} has no pipelines in {}'.format(
value['@id'],
value['analysis_step_version']['analysis_step']['@id'])
yield AuditFailure('missing pipelines in analysis step', detail, level='DCC_ACTION')
return
quality_metrics = value.get('quality_metrics')
if (quality_metrics is None) or (quality_metrics == []):
detail = 'ENCODE Processed gene quantification file {} has no quality_metrics'.format(
value['@id'])
yield AuditFailure('missing quality metrics', detail, level='DCC_ACTION')
return
spearman_correlation = False
for metric in quality_metrics:
if 'Spearman correlation' in metric:
spearman_correlation = metric['Spearman correlation']
break
if spearman_correlation is False:
detail = 'ENCODE Processed gene quantification file {} '.format(value['@id']) + \
'has no MAD quality metric'
yield AuditFailure('missing Spearman correlation', detail, level='DCC_ACTION')
return
spearman_pipelines = ['RAMPAGE (paired-end, stranded)',
'Small RNA-seq single-end pipeline',
'RNA-seq of long RNAs (single-end, unstranded)',
'RNA-seq of long RNAs (paired-end, stranded)']
experiment_replication_type = 'isogenic'
if 'dataset' in value:
if 'replication_type' in value['dataset']:
if value['dataset']['replication_type'] in ['anisogenic',
'anisogenic, sex-matched and age-matched',
'anisogenic, age-matched',
'anisogenic, sex-matched']:
experiment_replication_type = 'anisogenic'
required_value = 0.8
else:
required_value = 0.9
for pipeline in value['analysis_step_version']['analysis_step']['pipelines']:
if pipeline['title'] in spearman_pipelines:
if spearman_correlation < required_value:
border_value = (required_value - 0.0713512755834)
detail = 'ENCODE processed gene quantification file {} '.format(value['@id']) + \
'has Spearman correlaton of {} '.format(spearman_correlation) + \
', for gene quantification files from an {}'.format(experiment_replication_type) + \
' assay in the {} '.format(pipeline['title']) + \
'pipeline the preferred value is > {}, '.format(required_value) + \
'a borderline is between {} and {}'.format(required_value, border_value)
if spearman_correlation > border_value:
yield AuditFailure('borderline spearman correlation', detail,
level='WARNING')
return
else:
yield AuditFailure('poor spearman correlation', detail,
level='NOT_COMPLIANT')
return
|
from django.db import models
from captcha.conf import settings as captcha_settings
import datetime, sha
class CaptchaStore(models.Model):
challenge = models.CharField(blank=False, max_length=32)
response = models.CharField(blank=False, max_length=32)
hashkey = models.CharField(blank=False, max_length=40,unique=True)
expiration = models.DateTimeField(blank=False)
def save(self,force_insert=False,force_update=False):
self.response = self.response.lower()
if not self.expiration:
self.expiration = datetime.datetime.now() + datetime.timedelta(minutes= int(captcha_settings.CAPTCHA_TIMEOUT))
if not self.hashkey:
self.hashkey = sha.new(str(self.challenge) + str(self.response)).hexdigest()
super(CaptchaStore,self).save(force_insert=force_insert,force_update=force_update)
def __unicode__(self):
return self.challenge
@classmethod
def remove_expired(cls):
cls.objects.filter(expiration__lte=datetime.datetime.now()).delete()
fixes Issue #7 - DeprecationWarning in python 2.6
from django.db import models
from captcha.conf import settings as captcha_settings
import datetime, hashlib
class CaptchaStore(models.Model):
challenge = models.CharField(blank=False, max_length=32)
response = models.CharField(blank=False, max_length=32)
hashkey = models.CharField(blank=False, max_length=40,unique=True)
expiration = models.DateTimeField(blank=False)
def save(self,force_insert=False,force_update=False):
self.response = self.response.lower()
if not self.expiration:
self.expiration = datetime.datetime.now() + datetime.timedelta(minutes= int(captcha_settings.CAPTCHA_TIMEOUT))
if not self.hashkey:
self.hashkey = hashlib.new('sha', str(self.challenge) + str(self.response)).hexdigest()
super(CaptchaStore,self).save(force_insert=force_insert,force_update=force_update)
def __unicode__(self):
return self.challenge
@classmethod
def remove_expired(cls):
cls.objects.filter(expiration__lte=datetime.datetime.now()).delete()
|
from website.app import init_app
init_app()
# MIGRATE NODES
from website.models import Node as MODMNode
from website.models import Tag as MODMTag
from modularodm import Q
from osf_models.models import Node, User, Tag, Guid, Contributor
import pytz
from datetime import datetime
fk_node_fields = [
'forked_from',
'registered_from',
'root',
'parent_node',
'template_node'
]
m2m_node_fields = [
'nodes',
]
fk_user_fields = [
'registered_user',
'creator',
'merged_by'
]
m2m_user_fields = [
'permissions',
'recently_added',
'users_watching_node',
'contributors'
]
m2m_tag_fields = [
'tags',
'system_tags'
]
node_key_blacklist = [
'__backrefs',
'_version',
'expanded',
# foreign keys not yet implemented
'logs',
'primary_institution',
'registration_approval',
'alternative_citations',
'registered_schema',
'affiliated_institutions',
'retraction',
'embargo',
'node_license',
] + m2m_node_fields + m2m_user_fields + m2m_tag_fields
user_key_blacklist = ['__backrefs', '_version', 'affiliated_institutions', 'watched',
'external_accounts', ] + m2m_node_fields + m2m_user_fields + m2m_tag_fields
tag_key_blacklist = ['_version', '__backrefs', ] + m2m_node_fields + m2m_user_fields + m2m_tag_fields
nodes = 0
tags = 0
users = 0
def process_node_fk_fields(modm_object):
fk_nodes = {}
for fk_node_field in fk_node_fields:
value = getattr(modm_object, fk_node_field, None)
if value is not None:
if fk_node_field in ['root', 'forked_from', ] and value != modm_object:
node = get_or_create_node(value)
if node is not None:
fk_nodes[fk_node_field] = node
else:
fk_nodes[fk_node_field] = None
return fk_nodes
def process_node_m2m_fields(modm_object):
m2m_nodes = {}
for m2m_node_field in m2m_node_fields:
value = getattr(modm_object, m2m_node_field, None)
if value is not None:
if isinstance(value, list):
for nv in value:
if m2m_node_field in m2m_nodes:
node = get_or_create_node(nv)
if node is not None:
m2m_nodes[m2m_node_field].append(node)
else:
m2m_nodes[m2m_node_field] = [node, ]
else:
if m2m_node_field in m2m_nodes:
node = get_or_create_node(value)
if node is not None:
m2m_nodes[m2m_node_field].append(node)
else:
m2m_nodes[m2m_node_field] = [node, ]
return m2m_nodes
def process_user_fk_fields(modm_object):
fk_users = {}
for fk_user_field in fk_user_fields:
modm_user = getattr(modm_object, fk_user_field, None)
if modm_user is not None:
user = get_or_create_user(modm_user)
if user is not None:
fk_users[fk_user_field] = user
return fk_users
def process_user_m2m_fields(modm_object):
m2m_users = {}
for m2m_user_field in m2m_user_fields:
value = getattr(modm_object, m2m_user_field, None)
if isinstance(value, list):
for uv in value:
if m2m_user_field in m2m_users:
user = get_or_create_user(uv)
if user is not None:
m2m_users[m2m_user_field].append(user)
else:
m2m_users[m2m_user_field] = [user, ]
return m2m_users
def process_tag_m2m_fields(modm_object):
m2m_tags = {}
for m2m_tag_field in m2m_tag_fields:
value = getattr(modm_object, m2m_tag_field, None)
if isinstance(value, list):
for tv in value:
if m2m_tag_field == 'system_tags':
system = True
else:
system = False
tag = get_or_create_tag(tv, system)
if tag is not None:
if m2m_tag_field in m2m_tags:
m2m_tags[m2m_tag_field].append(tag)
else:
m2m_tags[m2m_tag_field] = [tag, ]
return m2m_tags
def set_m2m_fields(object, fields):
for key, value in fields.iteritems():
attr = getattr(object, key)
attr.add(*value)
object.save()
def get_or_create_user(modm_user):
try:
user = User.objects.get(_guid__guid=modm_user._id)
except User.DoesNotExist:
user_fk_nodes = process_node_fk_fields(modm_user)
user_m2m_nodes = process_node_m2m_fields(modm_user)
user_fk_users = process_user_fk_fields(modm_user)
user_m2m_users = process_user_m2m_fields(modm_user)
user_m2m_tags = process_tag_m2m_fields(modm_user)
user_fields = {}
user_fields['_guid'] = Guid.objects.get(guid=modm_user._id)
user_fields.update(modm_user.to_storage())
user_fields.update(user_fk_nodes)
user_fields.update(user_fk_users)
user_fields = {k: v for k, v in user_fields.iteritems() if v is not None}
for k, v in user_fields.iteritems():
if isinstance(v, datetime):
user_fields[k] = pytz.utc.localize(v)
user = User.objects.create(**{key: user_fields[key] for key in user_fields if key not in user_key_blacklist})
global users
users += 1
set_m2m_fields(user, user_m2m_nodes)
set_m2m_fields(user, user_m2m_users)
set_m2m_fields(user, user_m2m_tags)
return user
def get_or_create_tag(modm_tag, system=False):
if isinstance(modm_tag, unicode):
try:
tag = Tag.objects.get(_id=modm_tag, system=system)
except Tag.DoesNotExist:
tag = Tag.objects.create(lower=modm_tag.lower(), _id=modm_tag, system=system)
else:
if system is True:
# this should never happen.
print 'Encountered `unicode` tag that was not a system_tag {}'.format(modm_tag._id)
try:
tag = Tag.objects.get(_id=modm_tag._id, system=system)
except Tag.DoesNotExist:
tag_fields = modm_tag.to_storage()
cleaned_tag = {key: tag_fields[key] for key in tag_fields if key not in tag_key_blacklist}
cleaned_tag['system'] = system
tag = Tag.objects.create(**cleaned_tag)
global tags
tags += 1
return tag
def set_contributors(node, modm_node):
for modm_contributor in modm_node.contributors:
try:
user = User.objects.get(_guid__guid=modm_contributor._id)
except User.DoesNotExist:
user = get_or_create_user(modm_contributor)
visible = modm_contributor._id in modm_node.visible_contributor_ids
admin = 'admin' in modm_node.permissions[modm_contributor._id]
read = 'read' in modm_node.permissions[modm_contributor._id]
write = 'write' in modm_node.permissions[modm_contributor._id]
try:
contributor = Contributor.objects.get_or_create(user=user, visible=visible, admin=admin, read=read, write=write,
node=node)
except BaseException as ex:
print 'Caught exception creating contributor for node {} and user {}: {}'.format(node._id,
modm_contributor._id, ex)
def get_or_create_node(modm_node):
if modm_node is None:
return None
try:
# try and get the node
node = Node.objects.get(_guid__guid=modm_node._id)
except Node.DoesNotExist:
# if it doesn't exist, check to see if the guid does
try:
guid = Guid.objects.get(guid=modm_node._id)
except Guid.DoesNotExist:
# fail if the guid doesn't exist
print 'GUID {} DoesNotExist'.format(modm_node._id)
else:
fk_nodes = process_node_fk_fields(modm_node)
m2m_nodes = process_node_m2m_fields(modm_node)
fk_users = process_user_fk_fields(modm_node)
m2m_users = process_user_m2m_fields(modm_node)
m2m_tags = process_tag_m2m_fields(modm_node)
node_fields = {}
node_fields['_guid'] = guid
node_fields.update(modm_node.to_storage())
node_fields.update(fk_nodes)
node_fields.update(fk_users)
cleaned_node = {key: node_fields[key] for key in node_fields if key not in node_key_blacklist}
for k, v in cleaned_node.iteritems():
if isinstance(v, datetime):
cleaned_node[k] = pytz.utc.localize(v)
# this shouldn't need to be here, not sure why it has to be
cleaned_node['is_collection'] = cleaned_node.pop('is_folder')
cleaned_node['is_bookmark_collection'] = cleaned_node.pop('is_dashboard')
# remove empty fields, sql angry, sql smash
cleaned_node = {k: v for k, v in cleaned_node.iteritems() if v is not None}
for fk_field in fk_node_fields + fk_user_fields:
if fk_field in cleaned_node.keys() and isinstance(cleaned_node[fk_field], basestring):
bad = cleaned_node.pop(fk_field)
print 'Removed {} {} from node {} because it no longer exists.'.format(fk_field, bad, guid.guid)
node = Node.objects.create(**cleaned_node)
global nodes
nodes += 1
set_m2m_fields(node, m2m_nodes)
set_m2m_fields(node, m2m_users)
set_m2m_fields(node, m2m_tags)
set_contributors(node, modm_node)
return node
def main():
modm_nodes = MODMNode.find()
total = len(modm_nodes)
count = 0
print 'Doing {} Nodes...'.format(total)
for modm_node in modm_nodes:
noooood = get_or_create_node(modm_node)
count += 1
if count % 1000 == 0:
print count
print 'Nodes: {}, Users: {}, Tags: {}'.format(nodes, users, tags)
print 'MODM: {}'.format(total)
print 'PG: {}'.format(count)
More error catching
from website.app import init_app
init_app()
# MIGRATE NODES
from website.models import Node as MODMNode
from website.models import Tag as MODMTag
from modularodm import Q
from osf_models.models import Node, User, Tag, Guid, Contributor
import pytz
from datetime import datetime
fk_node_fields = [
'forked_from',
'registered_from',
'root',
'parent_node',
'template_node'
]
m2m_node_fields = [
'nodes',
]
fk_user_fields = [
'registered_user',
'creator',
'merged_by'
]
m2m_user_fields = [
'permissions',
'recently_added',
'users_watching_node',
'contributors'
]
m2m_tag_fields = [
'tags',
'system_tags'
]
node_key_blacklist = [
'__backrefs',
'_version',
'expanded',
# foreign keys not yet implemented
'logs',
'primary_institution',
'_primary_institution',
'institution_email_domains',
'institution_domains',
'registration_approval',
'alternative_citations',
'registered_schema',
'affiliated_institutions',
'_affiliated_institutions',
'retraction',
'embargo',
'node_license',
] + m2m_node_fields + m2m_user_fields + m2m_tag_fields
user_key_blacklist = ['__backrefs', '_version', 'affiliated_institutions', '_affiliated_institutions','watched',
'external_accounts', ] + m2m_node_fields + m2m_user_fields + m2m_tag_fields
tag_key_blacklist = ['_version', '__backrefs', ] + m2m_node_fields + m2m_user_fields + m2m_tag_fields
nodes = 0
tags = 0
users = 0
def process_node_fk_fields(modm_object):
fk_nodes = {}
for fk_node_field in fk_node_fields:
value = getattr(modm_object, fk_node_field, None)
if value is not None:
if fk_node_field in ['root', 'forked_from', ] and value != modm_object:
node = get_or_create_node(value)
if node is not None:
fk_nodes[fk_node_field] = node
else:
fk_nodes[fk_node_field] = None
return fk_nodes
def process_node_m2m_fields(modm_object):
m2m_nodes = {}
for m2m_node_field in m2m_node_fields:
value = getattr(modm_object, m2m_node_field, None)
if value is not None:
if isinstance(value, list):
for nv in value:
if m2m_node_field in m2m_nodes:
node = get_or_create_node(nv)
if node is not None:
m2m_nodes[m2m_node_field].append(node)
else:
m2m_nodes[m2m_node_field] = [node, ]
else:
if m2m_node_field in m2m_nodes:
node = get_or_create_node(value)
if node is not None:
m2m_nodes[m2m_node_field].append(node)
else:
m2m_nodes[m2m_node_field] = [node, ]
return m2m_nodes
def process_user_fk_fields(modm_object):
fk_users = {}
for fk_user_field in fk_user_fields:
modm_user = getattr(modm_object, fk_user_field, None)
if modm_user is not None:
user = get_or_create_user(modm_user)
if user is not None:
fk_users[fk_user_field] = user
return fk_users
def process_user_m2m_fields(modm_object):
m2m_users = {}
for m2m_user_field in m2m_user_fields:
value = getattr(modm_object, m2m_user_field, None)
if isinstance(value, list):
for uv in value:
if m2m_user_field in m2m_users:
user = get_or_create_user(uv)
if user is not None:
m2m_users[m2m_user_field].append(user)
else:
m2m_users[m2m_user_field] = [user, ]
return m2m_users
def process_tag_m2m_fields(modm_object):
m2m_tags = {}
for m2m_tag_field in m2m_tag_fields:
value = getattr(modm_object, m2m_tag_field, None)
if isinstance(value, list):
for tv in value:
if m2m_tag_field == 'system_tags':
system = True
else:
system = False
tag = get_or_create_tag(tv, system)
if tag is not None:
if m2m_tag_field in m2m_tags:
m2m_tags[m2m_tag_field].append(tag)
else:
m2m_tags[m2m_tag_field] = [tag, ]
return m2m_tags
def set_m2m_fields(object, fields):
for key, value in fields.iteritems():
attr = getattr(object, key)
attr.add(*value)
object.save()
def get_or_create_user(modm_user):
try:
user = User.objects.get(_guid__guid=modm_user._id)
except User.DoesNotExist:
user_fk_nodes = process_node_fk_fields(modm_user)
user_m2m_nodes = process_node_m2m_fields(modm_user)
user_fk_users = process_user_fk_fields(modm_user)
user_m2m_users = process_user_m2m_fields(modm_user)
user_m2m_tags = process_tag_m2m_fields(modm_user)
user_fields = {}
user_fields['_guid'] = Guid.objects.get(guid=modm_user._id)
user_fields.update(modm_user.to_storage())
user_fields.update(user_fk_nodes)
user_fields.update(user_fk_users)
user_fields = {k: v for k, v in user_fields.iteritems() if v is not None}
for k, v in user_fields.iteritems():
if isinstance(v, datetime):
user_fields[k] = pytz.utc.localize(v)
user = User.objects.create(**{key: user_fields[key] for key in user_fields if key not in user_key_blacklist})
global users
users += 1
set_m2m_fields(user, user_m2m_nodes)
set_m2m_fields(user, user_m2m_users)
set_m2m_fields(user, user_m2m_tags)
return user
def get_or_create_tag(modm_tag, system=False):
if not modm_tag:
return None
if isinstance(modm_tag, unicode):
try:
tag = Tag.objects.get(_id=modm_tag, system=system)
except Tag.DoesNotExist:
tag = Tag.objects.create(lower=modm_tag.lower(), _id=modm_tag, system=system)
else:
if system is True:
# this should never happen.
print 'Encountered `unicode` tag that was not a system_tag {}'.format(modm_tag._id)
try:
tag = Tag.objects.get(_id=modm_tag._id, system=system)
except Tag.DoesNotExist:
tag_fields = modm_tag.to_storage()
cleaned_tag = {key: tag_fields[key] for key in tag_fields if key not in tag_key_blacklist}
cleaned_tag['system'] = system
tag = Tag.objects.create(**cleaned_tag)
global tags
tags += 1
return tag
def set_contributors(node, modm_node):
for modm_contributor in modm_node.contributors:
try:
user = User.objects.get(_guid__guid=modm_contributor._id)
except User.DoesNotExist:
user = get_or_create_user(modm_contributor)
visible = modm_contributor._id in modm_node.visible_contributor_ids
admin = 'admin' in modm_node.permissions[modm_contributor._id]
read = 'read' in modm_node.permissions[modm_contributor._id]
write = 'write' in modm_node.permissions[modm_contributor._id]
try:
contributor = Contributor.objects.get_or_create(user=user, visible=visible, admin=admin, read=read, write=write,
node=node)
except BaseException as ex:
print 'Caught exception creating contributor for node {} and user {}: {}'.format(node._id,
modm_contributor._id, ex)
def get_or_create_node(modm_node):
if modm_node is None:
return None
try:
# try and get the node
node = Node.objects.get(_guid__guid=modm_node._id)
except Node.DoesNotExist:
# if it doesn't exist, check to see if the guid does
try:
guid = Guid.objects.get(guid=modm_node._id)
except Guid.DoesNotExist:
# fail if the guid doesn't exist
print 'GUID {} DoesNotExist'.format(modm_node._id)
else:
fk_nodes = process_node_fk_fields(modm_node)
m2m_nodes = process_node_m2m_fields(modm_node)
fk_users = process_user_fk_fields(modm_node)
m2m_users = process_user_m2m_fields(modm_node)
m2m_tags = process_tag_m2m_fields(modm_node)
node_fields = {}
node_fields['_guid'] = guid
node_fields.update(modm_node.to_storage())
node_fields.update(fk_nodes)
node_fields.update(fk_users)
cleaned_node = {key: node_fields[key] for key in node_fields if key not in node_key_blacklist}
for k, v in cleaned_node.iteritems():
if isinstance(v, datetime):
cleaned_node[k] = pytz.utc.localize(v)
# this shouldn't need to be here, not sure why it has to be
if 'is_folder' in cleaned_node:
cleaned_node['is_collection'] = cleaned_node.pop('is_folder')
if 'is_dashboard' in cleaned_node:
cleaned_node['is_bookmark_collection'] = cleaned_node.pop('is_dashboard')
# remove empty fields, sql angry, sql smash
cleaned_node = {k: v for k, v in cleaned_node.iteritems() if v is not None}
for fk_field in fk_node_fields + fk_user_fields:
if fk_field in cleaned_node.keys() and isinstance(cleaned_node[fk_field], basestring):
bad = cleaned_node.pop(fk_field)
print 'Removed {} {} from node {} because it no longer exists.'.format(fk_field, bad, guid.guid)
node = Node.objects.create(**cleaned_node)
global nodes
nodes += 1
set_m2m_fields(node, m2m_nodes)
set_m2m_fields(node, m2m_users)
set_m2m_fields(node, m2m_tags)
set_contributors(node, modm_node)
return node
def main():
modm_nodes = MODMNode.find()
total = len(modm_nodes)
count = 0
print 'Doing {} Nodes...'.format(total)
for modm_node in modm_nodes:
noooood = get_or_create_node(modm_node)
count += 1
if count % 1000 == 0:
print count
print 'Nodes: {}, Users: {}, Tags: {}'.format(nodes, users, tags)
print 'MODM: {}'.format(total)
print 'PG: {}'.format(count)
|
import math, time
import unittest
import numpy as N
""" This module extends the built-in unittest capabilities to facilitate
performing floating point comparisons on scalars and numpy arrays. It also
provides functions that automate building a test suite from all tests
present in the module, and running the tests in standard or debug mode.
To use this module, import it along with unittest [QUESTION: should this
module import everything from unittest into its namespace to make life
even easier?]. Subclass test cases from testutil.FPTestCase instead of
unittest.TestCase. Call testall or debug from this module:
import testutil
class FileTestCase(testutil.FPTestCase):
def setUp(self):
assorted_test_setup
def testone(self):
self.assertEqual(1,2)
def testtwo(self):
self.assertApproxNumpy(arr1,arr2,accuracy=1e-6)
def tearDown(self):
assorted_test_teardown
if __name__ == '__main__':
if 'debug' in sys.argv:
testutil.debug(__name__)
else:
testutil.testall(__name__,2)
To run the tests in normal mode from the shell, then do the following:
python my_module.py
It will run all tests, success or failure, and print a summary of the results.
To run the tests in debug mode from the shell, do the following:
python -i my_module.py debug
>>> import pdb
>>> pdb.pm()
In debug mode, it will run until it encounters the first failed test, then
stop. Thus if you run with the -i switch, you can then import pdb and
proceed with the usual debugger commands.
If you prefer to run your tests from within the python interpreter,
you may import this module and call its testall() and debug() functions
explicitly. The modules you are testing must be visible in your sys.path.
>>>import testutil as U
>>> U.testall('ui_test')
"""
class LogTestCase(unittest.TestCase):
"""Override the .run() method to do some logging"""
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
self.log('E')
return
ok = False
try:
testMethod()
ok = True
self.log("P")
except self.failureException:
result.addFailure(self, self._exc_info())
self.log("F")
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
self.log("E")
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
def log(self,status,name=None):
"""Creates a log file containing the test name, status,and timestamp,
as well as any attributes in the tda and tra dictionaries if present.
Does not yet support fancy separating of multi-line items."""
if name is None:
try:
name=self.name
except AttributeError:
name=self.id()
try:
f=open(name+'.log','w')
except IOError, e:
print "Error opening log file: %s"%e.strerror
print "***No Logging Performed***"
return
f.write("%s:: Name=%s\n"%(name,name))
f.write("%s:: Status=%s\n"%(name,status))
f.write("%s:: Time=%s\n"%(name,time.asctime()))
try:
for k in self.tda:
f.write("%s:: tda_%s=%s\n"%(name,str(k),str(self.tda[k])))
except AttributeError:
pass
try:
for k in self.tra:
f.write("%s:: tra_%s=%s\n"%(name,str(k),str(self.tra[k])))
except AttributeError:
pass
if status == 'E':
f.write("%s:: ra_Trace=%s\n"%(name,str(self._exc_info())))
f.write("END\n")
f.close()
class FPTestCase(unittest.TestCase):
''' Base class to hold some functionality related to floating-point
precision and array comparisons'''
def assertApproxFP(self, testvalue, expected, accuracy=1.0e-5):
''' Floating point comparison '''
result = math.fabs((testvalue - expected) / expected)
self.failUnless(result <= accuracy,"test: %g, ref: %g"%(testvalue,expected))
def assertApproxNumpy(self, testarray, expected, accuracy=1.0e-5):
''' Floating point array comparison '''
result=N.abs(testarray-expected)/expected
self.failUnless(N.alltrue(result <= accuracy))
def assertEqualNumpy(self, testarray, expected):
''' Identical FP array comparison '''
self.failUnless(N.alltrue(testarray == expected))
class LogTextRunner(unittest.TextTestRunner):
""" Redefines the .run() method to call a .log() method on the test
when it is complete. """
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
test.log("F")
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
test.log("E")
self.stream.writeln(")")
else:
self.stream.writeln("OK")
test.log("P")
return result
def buildsuite(module):
"""Builds a test suite containing all tests found in the module.
Returns the suite."""
M = __import__(module)
suite = unittest.defaultTestLoader.loadTestsFromModule(M)
return suite
def debug(module):
""" Build the test suite, then run in debug mode, which allows for postmortems"""
buildsuite(module).debug()
def testall(module,verb=0):
""" Build and run the suite through the testrunner. Verbosity level
defaults to quiet but can be set to 2 to produce a line as it runs
each test. A summary of the number of tests run, errors, and failures
is always printed at the end."""
result=unittest.TextTestRunner(verbosity=verb).run(buildsuite(module))
return result
def testlog(module,verb=0):
result=LogTextRunner(verbosity=verb).run(buildsuite(module))
return result
Typo in tra_Trace
git-svn-id: e4aa3cfa51ba591d1cd1dbbaf7e4a2c2b5d7a7ff@6765 fe389314-cf27-0410-b35b-8c050e845b92
import math, time
import unittest
import numpy as N
""" This module extends the built-in unittest capabilities to facilitate
performing floating point comparisons on scalars and numpy arrays. It also
provides functions that automate building a test suite from all tests
present in the module, and running the tests in standard or debug mode.
To use this module, import it along with unittest [QUESTION: should this
module import everything from unittest into its namespace to make life
even easier?]. Subclass test cases from testutil.FPTestCase instead of
unittest.TestCase. Call testall or debug from this module:
import testutil
class FileTestCase(testutil.FPTestCase):
def setUp(self):
assorted_test_setup
def testone(self):
self.assertEqual(1,2)
def testtwo(self):
self.assertApproxNumpy(arr1,arr2,accuracy=1e-6)
def tearDown(self):
assorted_test_teardown
if __name__ == '__main__':
if 'debug' in sys.argv:
testutil.debug(__name__)
else:
testutil.testall(__name__,2)
To run the tests in normal mode from the shell, then do the following:
python my_module.py
It will run all tests, success or failure, and print a summary of the results.
To run the tests in debug mode from the shell, do the following:
python -i my_module.py debug
>>> import pdb
>>> pdb.pm()
In debug mode, it will run until it encounters the first failed test, then
stop. Thus if you run with the -i switch, you can then import pdb and
proceed with the usual debugger commands.
If you prefer to run your tests from within the python interpreter,
you may import this module and call its testall() and debug() functions
explicitly. The modules you are testing must be visible in your sys.path.
>>>import testutil as U
>>> U.testall('ui_test')
"""
class LogTestCase(unittest.TestCase):
"""Override the .run() method to do some logging"""
def run(self, result=None):
if result is None: result = self.defaultTestResult()
result.startTest(self)
testMethod = getattr(self, self._testMethodName)
try:
try:
self.setUp()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
self.log('E')
return
ok = False
try:
testMethod()
ok = True
self.log("P")
except self.failureException:
result.addFailure(self, self._exc_info())
self.log("F")
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
self.log("E")
try:
self.tearDown()
except KeyboardInterrupt:
raise
except:
result.addError(self, self._exc_info())
ok = False
if ok: result.addSuccess(self)
finally:
result.stopTest(self)
def log(self,status,name=None):
"""Creates a log file containing the test name, status,and timestamp,
as well as any attributes in the tda and tra dictionaries if present.
Does not yet support fancy separating of multi-line items."""
if name is None:
try:
name=self.name
except AttributeError:
name=self.id()
try:
f=open(name+'.log','w')
except IOError, e:
print "Error opening log file: %s"%e.strerror
print "***No Logging Performed***"
return
f.write("%s:: Name=%s\n"%(name,name))
f.write("%s:: Status=%s\n"%(name,status))
f.write("%s:: Time=%s\n"%(name,time.asctime()))
try:
for k in self.tda:
f.write("%s:: tda_%s=%s\n"%(name,str(k),str(self.tda[k])))
except AttributeError:
pass
try:
for k in self.tra:
f.write("%s:: tra_%s=%s\n"%(name,str(k),str(self.tra[k])))
except AttributeError:
pass
if status == 'E':
f.write("%s:: tra_Trace=%s\n"%(name,str(self._exc_info())))
f.write("END\n")
f.close()
class FPTestCase(unittest.TestCase):
''' Base class to hold some functionality related to floating-point
precision and array comparisons'''
def assertApproxFP(self, testvalue, expected, accuracy=1.0e-5):
''' Floating point comparison '''
result = math.fabs((testvalue - expected) / expected)
self.failUnless(result <= accuracy,"test: %g, ref: %g"%(testvalue,expected))
def assertApproxNumpy(self, testarray, expected, accuracy=1.0e-5):
''' Floating point array comparison '''
result=N.abs(testarray-expected)/expected
self.failUnless(N.alltrue(result <= accuracy))
def assertEqualNumpy(self, testarray, expected):
''' Identical FP array comparison '''
self.failUnless(N.alltrue(testarray == expected))
class LogTextRunner(unittest.TextTestRunner):
""" Redefines the .run() method to call a .log() method on the test
when it is complete. """
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
startTime = time.time()
test(result)
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
if not result.wasSuccessful():
self.stream.write("FAILED (")
failed, errored = map(len, (result.failures, result.errors))
if failed:
self.stream.write("failures=%d" % failed)
test.log("F")
if errored:
if failed: self.stream.write(", ")
self.stream.write("errors=%d" % errored)
test.log("E")
self.stream.writeln(")")
else:
self.stream.writeln("OK")
test.log("P")
return result
def buildsuite(module):
"""Builds a test suite containing all tests found in the module.
Returns the suite."""
M = __import__(module)
suite = unittest.defaultTestLoader.loadTestsFromModule(M)
return suite
def debug(module):
""" Build the test suite, then run in debug mode, which allows for postmortems"""
buildsuite(module).debug()
def testall(module,verb=0):
""" Build and run the suite through the testrunner. Verbosity level
defaults to quiet but can be set to 2 to produce a line as it runs
each test. A summary of the number of tests run, errors, and failures
is always printed at the end."""
result=unittest.TextTestRunner(verbosity=verb).run(buildsuite(module))
return result
def testlog(module,verb=0):
result=LogTextRunner(verbosity=verb).run(buildsuite(module))
return result
|
#!/usr/bin/env python
import numpy as np
import argparse
import os
from SULI import which
from SULI.execute_command import execute_command
from SULI.work_within_directory import work_within_directory
from astropy.io import fits
if __name__ == "__main__":
parser = argparse.ArgumentParser('Submit transient search to the farm at Stanford')
# add the arguments needed to the parser
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--date', help='date specifying file to load')
group.add_argument("--src_dir", help="Directory containing input data to be searched", type=str)
parser.add_argument("--irf", help="Instrument response function name to be used", type=str, required=True)
parser.add_argument("--probability", help="Probability of null hypothesis", type=float, required=True)
parser.add_argument("--min_dist", help="Distance above which regions are not considered to overlap", type=float,
required=True)
parser.add_argument("--res_dir", help="Directory where to put the results and logs for the simulation",
required=False, type=str, default=os.getcwd())
parser.add_argument('--test', dest='test_run', action='store_true')
parser.set_defaults(test_run=False)
# parse the arguments
args = parser.parse_args()
# get output directory from parser
res_dir = os.path.abspath(os.path.expandvars(os.path.expanduser(args.res_dir)))
# Check that the output directory exists
if not os.path.exists(res_dir):
raise RuntimeError("Directory %s does not exist" % res_dir)
# get src directory from parser
src_dir = os.path.abspath(os.path.expandvars(os.path.expanduser(args.src_dir)))
# Check that the simulation directory exists
if not os.path.exists(src_dir):
raise RuntimeError("Directory %s does not exist" % src_dir)
# Go to output directory
with work_within_directory(res_dir):
# Create logs directory if it does not exist
if not os.path.exists('logs'):
os.mkdir('logs')
# Create generated_data directory if it does not exist
if not os.path.exists('generated_data'):
os.mkdir('generated_data')
# Generate universal command line parameters
log_path = os.path.abspath('logs')
out_path = os.path.abspath('generated_data')
exe_path = which.which('search_on_farm.py')
# if using simulated data:
if args.src_dir:
# get list of ft1 files
ft1_files = [f for f in os.listdir(src_dir) if (str(os.path.join(src_dir, f)).endswith(('ft1.fits',
'ft1.fit')))]
# (is basically glob, should be changed to glob at some point)
# get list of ft2 files
ft2_files = [f for f in os.listdir(src_dir) if (str(os.path.join(src_dir, f)).endswith(('ft2.fits',
'ft2.fit')))]
# sort them
def ft_sort(in_list):
out_list = in_list.split("_")[0]
return float(out_list)
ft1_files = sorted(ft1_files, key=ft_sort)
ft2_files = sorted(ft2_files, key=ft_sort)
print '\nFound %s ft1 files\nFound %s ft2 files\n' % (len(ft1_files), len(ft2_files))
# make sure each ft1/ft2 is part of a pair
if len(ft1_files) != len(ft2_files):
# determine which type there is more of for error msg
if len(ft1_files) > len(ft2_files):
x = 'ft1 files'
y = 'ft2 files'
else:
x = 'ft2 files'
y = 'ft1 files'
raise RuntimeError('There are more %s than %s' % (x, y))
# make sure pairs match
for i in range(len(ft1_files)):
with fits.open(os.path.join(src_dir, ft1_files[i])) as fits_file:
# Check the start and stop in the binary table
ft1_times = fits_file['EVENTS'].data.field("TIME")
ft1_starts = fits_file['GTI'].data.field("START")
ft1_stops = fits_file['GTI'].data.field("STOP")
with fits.open(os.path.join(src_dir, ft2_files[i])) as fits_file:
# Check the start and stop in the binary table
ft2_starts = fits_file['SC_DATA'].data.field("START")
ft2_stops = fits_file['SC_DATA'].data.field("STOP")
if ft2_starts.min() - min(ft1_starts.min(), ft1_times.min()) > 0:
raise RuntimeError("Mismatch in ft pair %s (FT2 file starts after the start of the FT1 file)" % i)
if ft2_stops.max() - max(ft1_stops.max(), ft1_stops.max()) < 0:
raise RuntimeError("Mismatch in ft pair %s (FT2 file stops before the end of the FT1 file)" % i)
# generate command line
def sim_cmd_line(ft1, ft2, jobid):
this_cmd_line = "qsub -l vmem=30gb -o %s/%s.out -e %s/%s.err -V -F '--inp_fts %s,%s --irf %s " \
"--probability %s --min_dist %s --out_dir %s' %s" % (log_path, jobid, log_path,
jobid, ft1, ft2, args.irf,
args.probability, args.min_dist,
out_path, exe_path)
return this_cmd_line
# iterate over input directory, calling search on each pair of fits
for i in range(len(ft1_files)):
this_ft1 = src_dir + '/' + ft1_files[i]
this_ft2 = src_dir + '/' + ft2_files[i]
this_id = ft1_files[i]
cmd_line = sim_cmd_line(this_ft1, this_ft2, this_id)
if not args.test_run:
execute_command(cmd_line)
else:
def rl_cmd_line(start):
this_cmd_line = "qsub -l vmem=10gb -o %s/%s.out -e %s/%s.err -V -F '--date %s --irf %s " \
"--probability %s --min_dist %s --out_dir %s' %s" % (log_path, start, log_path,
start, start, args.irf,
args.probability, args.min_dist,
out_path, exe_path)
return this_cmd_line
# A year of Fermi data
dates = np.arange(args.date, args.date + (365.0 * 86400.0), 86400.0)
for this_tstart in dates:
cmd_line = rl_cmd_line(this_tstart)
if not args.test_run:
execute_command(cmd_line)
changed sort in submit a search for inp format 'simulate_[time]_ft?.fit'
#!/usr/bin/env python
import numpy as np
import argparse
import os
from SULI import which
from SULI.execute_command import execute_command
from SULI.work_within_directory import work_within_directory
from astropy.io import fits
if __name__ == "__main__":
parser = argparse.ArgumentParser('Submit transient search to the farm at Stanford')
# add the arguments needed to the parser
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--date', help='date specifying file to load')
group.add_argument("--src_dir", help="Directory containing input data to be searched", type=str)
parser.add_argument("--irf", help="Instrument response function name to be used", type=str, required=True)
parser.add_argument("--probability", help="Probability of null hypothesis", type=float, required=True)
parser.add_argument("--min_dist", help="Distance above which regions are not considered to overlap", type=float,
required=True)
parser.add_argument("--res_dir", help="Directory where to put the results and logs for the search",
required=False, type=str, default=os.getcwd())
parser.add_argument("--job_size", help="Number of jobs to submit at a time", required=False, type=int, default=20)
parser.add_argument('--test', dest='test_run', action='store_true')
parser.set_defaults(test_run=False)
# parse the arguments
args = parser.parse_args()
# get output directory from parser
res_dir = os.path.abspath(os.path.expandvars(os.path.expanduser(args.res_dir)))
# Check that the output directory exists
if not os.path.exists(res_dir):
raise RuntimeError("Directory %s does not exist" % res_dir)
# get src directory from parser
src_dir = os.path.abspath(os.path.expandvars(os.path.expanduser(args.src_dir)))
# Check that the simulation directory exists
if not os.path.exists(src_dir):
raise RuntimeError("Directory %s does not exist" % src_dir)
# Go to output directory
with work_within_directory(res_dir):
# Create logs directory if it does not exist
if not os.path.exists('logs'):
os.mkdir('logs')
# Create generated_data directory if it does not exist
if not os.path.exists('generated_data'):
os.mkdir('generated_data')
# Generate universal command line parameters
log_path = os.path.abspath('logs')
out_path = os.path.abspath('generated_data')
exe_path = which.which('search_on_farm.py')
# if using simulated data:
if args.src_dir:
# get list of ft1 files
ft1_files = [f for f in os.listdir(src_dir) if (str(os.path.join(src_dir, f)).endswith(('ft1.fits',
'ft1.fit')))]
# (is basically glob, should be changed to glob at some point)
# get list of ft2 files
ft2_files = [f for f in os.listdir(src_dir) if (str(os.path.join(src_dir, f)).endswith(('ft2.fits',
'ft2.fit')))]
# sort them
def ft_sort(in_list):
out_list = in_list.split("_")[1]
return float(out_list)
ft1_files = sorted(ft1_files, key=ft_sort)
ft2_files = sorted(ft2_files, key=ft_sort)
print '\nFound %s ft1 files\nFound %s ft2 files\n' % (len(ft1_files), len(ft2_files))
# make sure each ft1/ft2 is part of a pair
if len(ft1_files) != len(ft2_files):
# determine which type there is more of for error msg
if len(ft1_files) > len(ft2_files):
x = 'ft1 files'
y = 'ft2 files'
else:
x = 'ft2 files'
y = 'ft1 files'
raise RuntimeError('There are more %s than %s' % (x, y))
# make sure pairs match
for i in range(len(ft1_files)):
with fits.open(os.path.join(src_dir, ft1_files[i])) as fits_file:
# Check the start and stop in the binary table
ft1_times = fits_file['EVENTS'].data.field("TIME")
ft1_starts = fits_file['GTI'].data.field("START")
ft1_stops = fits_file['GTI'].data.field("STOP")
with fits.open(os.path.join(src_dir, ft2_files[i])) as fits_file:
# Check the start and stop in the binary table
ft2_starts = fits_file['SC_DATA'].data.field("START")
ft2_stops = fits_file['SC_DATA'].data.field("STOP")
if ft2_starts.min() - min(ft1_starts.min(), ft1_times.min()) > 0:
raise RuntimeError("Mismatch in ft pair %s (FT2 file starts after the start of the FT1 file)" % i)
if ft2_stops.max() - max(ft1_stops.max(), ft1_stops.max()) < 0:
raise RuntimeError("Mismatch in ft pair %s (FT2 file stops before the end of the FT1 file)" % i)
# generate command line
def sim_cmd_line(ft1, ft2, jobid):
this_cmd_line = "qsub -l vmem=30gb -o %s/%s.out -e %s/%s.err -V -F '--inp_fts %s,%s --irf %s " \
"--probability %s --min_dist %s --out_dir %s' %s" % (log_path, jobid, log_path,
jobid, ft1, ft2, args.irf,
args.probability, args.min_dist,
out_path, exe_path)
return this_cmd_line
# iterate over input directory, calling search on each pair of fits
for i in range(len(ft1_files)):
this_ft1 = src_dir + '/' + ft1_files[i]
this_ft2 = src_dir + '/' + ft2_files[i]
this_id = ft1_files[i]
cmd_line = sim_cmd_line(this_ft1, this_ft2, this_id)
if not args.test_run:
execute_command(cmd_line)
else:
def rl_cmd_line(start):
this_cmd_line = "qsub -l vmem=10gb -o %s/%s.out -e %s/%s.err -V -F '--date %s --irf %s " \
"--probability %s --min_dist %s --out_dir %s' %s" % (log_path, start, log_path,
start, start, args.irf,
args.probability, args.min_dist,
out_path, exe_path)
return this_cmd_line
# A year of Fermi data
dates = np.arange(args.date, args.date + (365.0 * 86400.0), 86400.0)
for this_tstart in dates:
cmd_line = rl_cmd_line(this_tstart)
if not args.test_run:
execute_command(cmd_line)
|
"""
Django settings for test_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '50l^23v!p7$mdlnd5v#ag5%lya9t=a%$51co6@rk50gl53+(n8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"watson",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get("DB_ENGINE", 'django.db.backends.sqlite3'),
'NAME': os.environ.get("DB_NAME", os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.environ.get("DB_USER", ""),
'PASSWORD': os.environ.get("DB_PASSWORD", ""),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
Attempting to fix test project MySQL settings
"""
Django settings for test_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '50l^23v!p7$mdlnd5v#ag5%lya9t=a%$51co6@rk50gl53+(n8'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
"watson",
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': os.environ.get("DB_ENGINE", 'django.db.backends.sqlite3'),
'NAME': os.environ.get("DB_NAME", os.path.join(BASE_DIR, 'db.sqlite3')),
'USER': os.environ.get("DB_USER", ""),
'PASSWORD': os.environ.get("DB_PASSWORD", ""),
}
}
if DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
DATABASES["default"]["OPTIONS"] = "SET storage_engine=INNODB,character_set_connection=utf8,collation_connection=utf8_general_ci"
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
|
###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2013 Cássio Paixão
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###############################################################################
import heapq
import sys
from multiprocessing import Pool
from core.strategies import SchedulingStrategy
class GabayZaourarSlimAlgorithm(SchedulingStrategy):
def initialize(self):
coeficient = self._config.params['bfd_measure_coeficient_function']
self.processors_to_use = int(self._config.params['bfd_processors_to_use'])
self.parallel_serial_threshold = int(self._config.params['bfd_parallel_serial_threshold'])
if coeficient == '1/C(j)':
self.coeficient_function = frac_1_cj
elif coeficient == '1/R(j)':
self.coeficient_function = frac_1_rj
elif coeficient == 'R(j)/C(j)':
self.coeficient_function = frac_rj_cj
else:
raise 'Set bfd_measure_coeficient_function param with one of these values: 1/C(j), 1/R(j) or R(j)/C(j).'
def initialize_item_heap(self, items):
self._item_heap = list()
for item in items:
heapq.heappush(self._item_heap, (sys.float_info.max - self.s_i[item.name], len(self._item_heap), item))
def initialize_bin_heap(self, bins):
self._bin_heap = list()
for bin in bins:
heapq.heappush(self._bin_heap, (self.s_b[bin.name], len(self._bin_heap), bin))
def compute_sizes(self, items, bins):
global alpha_cpu, alpha_mem, beta_cpu, beta_mem
alpha = self.coeficient_function(items, bins)
alpha_cpu, alpha_mem = alpha[0], alpha[1]
beta = alpha
beta_cpu, beta_mem = beta[0], beta[1]
self.s_i = {}
self.s_b = {}
if len(bins) + len(items) > self.parallel_serial_threshold:
with Pool(processes=self.processors_to_use) as pool:
item_values = pool.map(_gabay_zaourar_compute_item_size, items)
for item_value in item_values:
self.s_i[item_value[0]] = item_value[1]
bin_values = pool.map(_gabay_zaourar_compute_bin_size, bins)
for bin_value in bin_values:
self.s_b[bin_value[0]] = bin_value[1]
else:
for item in items:
self.s_i[item.name] = alpha_cpu*item.cpu + alpha_mem*item.mem
for bin in bins:
self.s_b[bin.name] = beta_cpu*bin.cpu_free + beta_mem*bin.mem_free
def get_biggest_item(self):
return heapq.heappop(self._item_heap)[2] if len(self._item_heap) > 0 else None
def get_smallest_bin(self):
return heapq.heappop(self._bin_heap)[2] if len(self._bin_heap) > 0 else None
def get_smallest_feasible_bin(self, item, bins):
feasible_bins = (bin for bin in bins if fits(item, bin))
try:
return min(feasible_bins, key=self.size_of_bin)
except ValueError:
return None
def get_biggest_feasible_item(self, items, bin):
feasible_items = (item for item in items if fits(item, bin))
try:
return max(feasible_items, key=self.size_of_item)
except ValueError:
return None
def size_of_bin(self, bin):
return self.s_b[bin.name]
def size_of_item(self, item):
return self.s_i[item.name]
class BFDItemCentricSlim(GabayZaourarSlimAlgorithm):
@SchedulingStrategy.schedule_vms_strategy
def schedule_vms(self, vms):
unpacked_items = list(vms)
self.compute_sizes(unpacked_items,
self._config.resource_manager.all_servers())
self.initialize_item_heap(unpacked_items)
while len(unpacked_items) > 0:
biggest_item = self.get_biggest_item()
bin = self.get_smallest_feasible_bin(biggest_item,
self._config.resource_manager.online_servers())
if bin is None:
bin = self.get_smallest_feasible_bin(biggest_item,
self._config.resource_manager.offline_servers())
if bin is not None:
self._config.resource_manager.turn_on_server(bin.name)
if bin is not None:
self._config.resource_manager.schedule_vm_at_server(biggest_item, bin.name)
unpacked_items.remove(biggest_item)
class BFDBinCentricSlim(GabayZaourarSlimAlgorithm):
@SchedulingStrategy.schedule_vms_strategy
def schedule_vms(self, vms):
remaining_vms = self.schedule_vms_at_servers(vms, self._config.resource_manager.online_servers())
if len(remaining_vms) > 0:
remaining_vms = self.schedule_vms_at_servers(vms, self._config.resource_manager.offline_servers(), active_bins=False)
def schedule_vms_at_servers(self, vms, bins, active_bins=True):
list_of_bins = list(bins)
unpacked_items = list(vms)
self.compute_sizes(unpacked_items, list_of_bins)
self.initialize_bin_heap()
while len(list_of_bins) > 0:
smallest_bin = self.get_smallest_bin()
have_used_bin = False
item = self.get_biggest_feasible_item(unpacked_items, smallest_bin)
while item is not None:
if not have_used_bin and not active_bins:
self._config.resource_manager.turn_on_server(smallest_bin.name)
self._config.resource_manager.schedule_vm_at_server(item, smallest_bin.name)
have_used_bin = True
unpacked_items.remove(item)
item = self.get_biggest_feasible_item(unpacked_items, smallest_bin)
list_of_bins.remove(smallest_bin)
return unpacked_items
def fits(item, bin):
return bin.cpu_free >= item.cpu and bin.mem_free >= item.mem
def frac_1_cj(items, bins):
return (1.0/max(sum([b.cpu - b.cpu_alloc for b in bins]), 0.0000000001),
1.0/max(sum([b.mem - b.mem_alloc for b in bins]), 0.0000000001))
def frac_1_rj(items, bins):
return (1.0/max(sum([i.cpu for i in items]), 0.0000000001),
1.0/max(sum([i.mem for i in items]), 0.0000000001))
def frac_rj_cj(items, bins):
frac_cj = frac_1_cj(items, bins)
frac_rj = frac_1_rj(items, bins)
# (1/C(j))/(1/R(j)) = (1/C(j))*R(j) = R(j)/C(j)
return (frac_cj[0]/frac_rj[0],
frac_cj[1]/frac_rj[1])
alpha_cpu = alpha_mem = beta_cpu = beta_mem = None
def _gabay_zaourar_compute_item_size(item):
return (item.name, alpha_cpu*item.cpu + alpha_mem*item.mem)
def _gabay_zaourar_compute_bin_size(bin):
return (bin.name, beta_cpu*bin.cpu_free + beta_mem*bin.mem_free)
Prevents creation of lists
###############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2013 Cássio Paixão
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###############################################################################
import heapq
import sys
from multiprocessing import Pool
from core.strategies import SchedulingStrategy
class GabayZaourarSlimAlgorithm(SchedulingStrategy):
def initialize(self):
coeficient = self._config.params['bfd_measure_coeficient_function']
self.processors_to_use = int(self._config.params['bfd_processors_to_use'])
self.parallel_serial_threshold = int(self._config.params['bfd_parallel_serial_threshold'])
if coeficient == '1/C(j)':
self.coeficient_function = frac_1_cj
elif coeficient == '1/R(j)':
self.coeficient_function = frac_1_rj
elif coeficient == 'R(j)/C(j)':
self.coeficient_function = frac_rj_cj
else:
raise 'Set bfd_measure_coeficient_function param with one of these values: 1/C(j), 1/R(j) or R(j)/C(j).'
def initialize_item_heap(self, items):
self._item_heap = list()
for item in items:
heapq.heappush(self._item_heap, (sys.float_info.max - self.s_i[item.name], len(self._item_heap), item))
def initialize_bin_heap(self, bins):
self._bin_heap = list()
for bin in bins:
heapq.heappush(self._bin_heap, (self.s_b[bin.name], len(self._bin_heap), bin))
def compute_sizes(self, items, bins):
global alpha_cpu, alpha_mem, beta_cpu, beta_mem
alpha = self.coeficient_function(items, bins)
alpha_cpu, alpha_mem = alpha[0], alpha[1]
beta = alpha
beta_cpu, beta_mem = beta[0], beta[1]
self.s_i = {}
self.s_b = {}
if len(bins) + len(items) > self.parallel_serial_threshold:
with Pool(processes=self.processors_to_use) as pool:
item_values = pool.map(_gabay_zaourar_compute_item_size, items)
for item_value in item_values:
self.s_i[item_value[0]] = item_value[1]
bin_values = pool.map(_gabay_zaourar_compute_bin_size, bins)
for bin_value in bin_values:
self.s_b[bin_value[0]] = bin_value[1]
else:
for item in items:
self.s_i[item.name] = alpha_cpu*item.cpu + alpha_mem*item.mem
for bin in bins:
self.s_b[bin.name] = beta_cpu*bin.cpu_free + beta_mem*bin.mem_free
def get_biggest_item(self):
return heapq.heappop(self._item_heap)[2] if len(self._item_heap) > 0 else None
def get_smallest_bin(self):
return heapq.heappop(self._bin_heap)[2] if len(self._bin_heap) > 0 else None
def get_smallest_feasible_bin(self, item, bins):
feasible_bins = (bin for bin in bins if fits(item, bin))
try:
return min(feasible_bins, key=self.size_of_bin)
except ValueError:
return None
def get_biggest_feasible_item(self, items, bin):
feasible_items = (item for item in items if fits(item, bin))
try:
return max(feasible_items, key=self.size_of_item)
except ValueError:
return None
def size_of_bin(self, bin):
return self.s_b[bin.name]
def size_of_item(self, item):
return self.s_i[item.name]
class BFDItemCentricSlim(GabayZaourarSlimAlgorithm):
@SchedulingStrategy.schedule_vms_strategy
def schedule_vms(self, vms):
unpacked_items = list(vms)
self.compute_sizes(unpacked_items,
self._config.resource_manager.all_servers())
self.initialize_item_heap(unpacked_items)
while len(unpacked_items) > 0:
biggest_item = self.get_biggest_item()
bin = self.get_smallest_feasible_bin(biggest_item,
self._config.resource_manager.online_servers())
if bin is None:
bin = self.get_smallest_feasible_bin(biggest_item,
self._config.resource_manager.offline_servers())
if bin is not None:
self._config.resource_manager.turn_on_server(bin.name)
if bin is not None:
self._config.resource_manager.schedule_vm_at_server(biggest_item, bin.name)
unpacked_items.remove(biggest_item)
class BFDBinCentricSlim(GabayZaourarSlimAlgorithm):
@SchedulingStrategy.schedule_vms_strategy
def schedule_vms(self, vms):
remaining_vms = self.schedule_vms_at_servers(vms, self._config.resource_manager.online_servers())
if len(remaining_vms) > 0:
remaining_vms = self.schedule_vms_at_servers(vms, self._config.resource_manager.offline_servers(), active_bins=False)
def schedule_vms_at_servers(self, vms, bins, active_bins=True):
list_of_bins = list(bins)
unpacked_items = list(vms)
self.compute_sizes(unpacked_items, list_of_bins)
self.initialize_bin_heap()
while len(list_of_bins) > 0:
smallest_bin = self.get_smallest_bin()
have_used_bin = False
item = self.get_biggest_feasible_item(unpacked_items, smallest_bin)
while item is not None:
if not have_used_bin and not active_bins:
self._config.resource_manager.turn_on_server(smallest_bin.name)
self._config.resource_manager.schedule_vm_at_server(item, smallest_bin.name)
have_used_bin = True
unpacked_items.remove(item)
item = self.get_biggest_feasible_item(unpacked_items, smallest_bin)
list_of_bins.remove(smallest_bin)
return unpacked_items
def fits(item, bin):
return bin.cpu_free >= item.cpu and bin.mem_free >= item.mem
def frac_1_cj(items, bins):
return (1.0/max(sum(b.cpu_free for b in bins), 0.0000000001),
1.0/max(sum(b.mem_free for b in bins), 0.0000000001))
def frac_1_rj(items, bins):
return (1.0/max(sum(i.cpu for i in items), 0.0000000001),
1.0/max(sum(i.mem for i in items), 0.0000000001))
def frac_rj_cj(items, bins):
frac_cj = frac_1_cj(items, bins)
frac_rj = frac_1_rj(items, bins)
# (1/C(j))/(1/R(j)) = (1/C(j))*R(j) = R(j)/C(j)
return (frac_cj[0]/frac_rj[0],
frac_cj[1]/frac_rj[1])
alpha_cpu = alpha_mem = beta_cpu = beta_mem = None
def _gabay_zaourar_compute_item_size(item):
return (item.name, alpha_cpu*item.cpu + alpha_mem*item.mem)
def _gabay_zaourar_compute_bin_size(bin):
return (bin.name, beta_cpu*bin.cpu_free + beta_mem*bin.mem_free)
|
"""
Created on Jul 16, 2012
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 16, 2012"
import unittest
import os
import json
import numpy as np
from pymatgen.io.vaspio.vasp_output import Chgcar, Locpot, Oszicar, Outcar, \
Vasprun, Procar, Xdatcar
from pymatgen import Spin, Orbital, Lattice, Structure
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class VasprunTest(unittest.TestCase):
def test_properties(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath)
#test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][1][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][-1][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][1].shape, (301, ))
filepath2 = os.path.join(test_dir, 'lifepo4.xml')
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True)
totalscsteps = sum([len(i['electronic_steps'])
for i in vasprun.ionic_steps])
self.assertEquals(29, len(vasprun.ionic_steps))
self.assertEquals(len(vasprun.structures), len(vasprun.ionic_steps))
self.assertEqual(vasprun.lattice,
vasprun.lattice_rec.reciprocal_lattice)
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
"structure"] for i in xrange(len(vasprun.ionic_steps))]))
self.assertEquals(308, totalscsteps,
"Incorrect number of energies read from vasprun.xml")
<<<<<<< HEAD
self.assertEquals(['Li'] + 4 * ['Fe'] + 4 * [u'P'] + 16 * ["O"],
vasprun.atomic_symbols,
"Incorrect symbols read from vasprun.xml")
=======
self.assertEquals(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
vasprun.atomic_symbols)
>>>>>>> upstream/master
self.assertEquals(vasprun.final_structure.composition.reduced_formula,
"LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(vasprun.potcar_symbols,
[u'PAW_PBE Li 17Jan2003', u'PAW_PBE Fe 06Sep2000',
u'PAW_PBE Fe 06Sep2000', u'PAW_PBE P 17Jan2003',
u'PAW_PBE O 08Apr2002'])
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints,
"Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights,
"Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
#test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps),
len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
#Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps,
vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy,
vasprun.final_energy)
#Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6)
self.assertEqual(len(vasprun_offset.ionic_steps),
int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0],
vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[(Spin.up, 0,
0, 96,
Orbital.s)],
0.0032)
d = vasprun_ggau.to_dict
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
filepath = os.path.join(test_dir, 'vasprun.xml.unconverged')
vasprun_unconverged = Vasprun(filepath)
self.assertFalse(vasprun_unconverged.converged)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt')
vasprun_dfpt = Vasprun(filepath)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility().process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
entry.energy)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.unconverged')
vasprun_dfpt_unconv = Vasprun(filepath)
self.assertFalse(vasprun_dfpt_unconv.converged)
vasprun_uniform = Vasprun(os.path.join(test_dir, "vasprun.xml.uniform"))
self.assertEqual(vasprun_uniform.kpoints.style, "Reciprocal")
def test_to_dict(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath)
#Test that to_dict is json-serializable
self.assertIsNotNone(json.dumps(vasprun.to_dict))
self.assertEqual(
vasprun.to_dict["input"]["potcar_type"],
['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])
def test_get_band_structure(self):
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = Vasprun(filepath)
bs = vasprun.get_band_structure(kpoints_filename=
os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64],
"wrong vbm kpoint index")
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
class OutcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'OUTCAR')
outcar = Outcar(filepath)
expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.0, 'p':-0.117, 's': 0.005, 'tot':-0.112},
{'d': 0.0, 'p':-0.165, 's': 0.004, 'tot':-0.162},
{'d': 0.0, 'p':-0.117, 's': 0.005, 'tot':-0.112},
{'d': 0.0, 'p':-0.165, 's': 0.004, 'tot':-0.162})
expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})
self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,
"Wrong magnetization read from Outcar")
self.assertAlmostEqual(outcar.charge, expected_chg, 5,
"Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,
'Total CPU time used (sec)': 545.142,
'Elapsed time (sec)': 546.709,
'Maximum memory used (kb)': 0.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 544.204})
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.to_dict)
filepath = os.path.join(test_dir, 'OUTCAR.stopped')
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
def test_core_state_eigen(self):
filepath = os.path.join(test_dir, "OUTCAR.CL")
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
class OszicarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'OSZICAR')
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps),
len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'LOCPOT')
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954,
sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'CHGCAR.nospin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 2)[0, 1], 0)
filepath = os.path.join(test_dir, 'CHGCAR.spin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022)
#test sum
chg += chg
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022 * 2)
filepath = os.path.join(test_dir, 'CHGCAR.Fe3O4')
chg = Chgcar.from_file(filepath)
ans = [1.93313368, 3.91201473, 4.11858277, 4.1240093, 4.10634989,
3.38864822]
myans = chg.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
class ProcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'PROCAR.simple')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(1, 'd'), 0)
self.assertAlmostEqual(p.get_occupation(1, 's'), 0.3538125)
self.assertAlmostEqual(p.get_occupation(1, 'p'), 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, 'm')
self.assertEqual(p.nb_bands, 10)
self.assertEqual(p.nb_kpoints, 10)
lat = Lattice.cubic(3.)
s = Structure(lat, ["Li", "Na", "K"], [[0., 0., 0.],
[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75]])
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[1][2][2], {'Na': 0.042, 'K': 0.646, 'Li': 0.042})
filepath = os.path.join(test_dir, 'PROCAR')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'd'), 4.3698147704200059)
self.assertAlmostEqual(p.get_occupation(0, 'dxy'), 0.85796295426000124)
class XdatcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'XDATCAR')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 3)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
Fix unittest merging error.
Former-commit-id: 3d87953595bfdf1cec0274468f684127510b292b [formerly 8396c968bd99bb007a08a0ec2bb941583a676b61]
Former-commit-id: 7493093add75def39b3803178c0b0731d81aec83
"""
Created on Jul 16, 2012
"""
from __future__ import division
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyue@mit.edu"
__date__ = "Jul 16, 2012"
import unittest
import os
import json
import numpy as np
from pymatgen.io.vaspio.vasp_output import Chgcar, Locpot, Oszicar, Outcar, \
Vasprun, Procar, Xdatcar
from pymatgen import Spin, Orbital, Lattice, Structure
from pymatgen.entries.compatibility import MaterialsProjectCompatibility
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..", "..",
'test_files')
class VasprunTest(unittest.TestCase):
def test_properties(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath)
#test pdos parsing
pdos0 = vasprun.complete_dos.pdos[vasprun.final_structure[0]]
self.assertAlmostEqual(pdos0[Orbital.s][1][16], 0.0026)
self.assertAlmostEqual(pdos0[Orbital.pz][-1][16], 0.0012)
self.assertEqual(pdos0[Orbital.s][1].shape, (301, ))
filepath2 = os.path.join(test_dir, 'lifepo4.xml')
vasprun_ggau = Vasprun(filepath2, parse_projected_eigen=True)
totalscsteps = sum([len(i['electronic_steps'])
for i in vasprun.ionic_steps])
self.assertEquals(29, len(vasprun.ionic_steps))
self.assertEquals(len(vasprun.structures), len(vasprun.ionic_steps))
self.assertEqual(vasprun.lattice,
vasprun.lattice_rec.reciprocal_lattice)
for i, step in enumerate(vasprun.ionic_steps):
self.assertEqual(vasprun.structures[i], step["structure"])
self.assertTrue(all([vasprun.structures[i] == vasprun.ionic_steps[i][
"structure"] for i in xrange(len(vasprun.ionic_steps))]))
self.assertEquals(308, totalscsteps,
"Incorrect number of energies read from vasprun.xml")
self.assertEquals(['Li'] + 4 * ['Fe'] + 4 * ['P'] + 16 * ["O"],
vasprun.atomic_symbols)
self.assertEquals(vasprun.final_structure.composition.reduced_formula,
"LiFe4(PO4)4")
self.assertIsNotNone(vasprun.incar, "Incar cannot be read")
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.eigenvalues, "Eigenvalues cannot be read")
self.assertAlmostEqual(vasprun.final_energy, -269.38319884, 7)
self.assertAlmostEqual(vasprun.tdos.get_gap(), 2.0589, 4)
expectedans = (2.539, 4.0906, 1.5516, False)
(gap, cbm, vbm, direct) = vasprun.eigenvalue_band_properties
self.assertAlmostEqual(gap, expectedans[0])
self.assertAlmostEqual(cbm, expectedans[1])
self.assertAlmostEqual(vbm, expectedans[2])
self.assertEqual(direct, expectedans[3])
self.assertFalse(vasprun.is_hubbard)
self.assertEqual(vasprun.potcar_symbols,
[u'PAW_PBE Li 17Jan2003', u'PAW_PBE Fe 06Sep2000',
u'PAW_PBE Fe 06Sep2000', u'PAW_PBE P 17Jan2003',
u'PAW_PBE O 08Apr2002'])
self.assertIsNotNone(vasprun.kpoints, "Kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints,
"Actual kpoints cannot be read")
self.assertIsNotNone(vasprun.actual_kpoints_weights,
"Actual kpoints weights cannot be read")
for atomdoses in vasprun.pdos:
for orbitaldos in atomdoses:
self.assertIsNotNone(orbitaldos, "Partial Dos cannot be read")
#test skipping ionic steps.
vasprun_skip = Vasprun(filepath, 3)
self.assertEqual(vasprun_skip.nionic_steps, 29)
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
self.assertEqual(len(vasprun_skip.ionic_steps),
len(vasprun_skip.structures))
self.assertEqual(len(vasprun_skip.ionic_steps),
int(vasprun.nionic_steps / 3) + 1)
#Check that nionic_steps is preserved no matter what.
self.assertEqual(vasprun_skip.nionic_steps,
vasprun.nionic_steps)
self.assertNotAlmostEqual(vasprun_skip.final_energy,
vasprun.final_energy)
#Test with ionic_step_offset
vasprun_offset = Vasprun(filepath, 3, 6)
self.assertEqual(len(vasprun_offset.ionic_steps),
int(len(vasprun.ionic_steps) / 3) - 1)
self.assertEqual(vasprun_offset.structures[0],
vasprun_skip.structures[2])
self.assertTrue(vasprun_ggau.is_hubbard)
self.assertEqual(vasprun_ggau.hubbards["Fe"], 4.3)
self.assertAlmostEqual(vasprun_ggau.projected_eigenvalues[(Spin.up, 0,
0, 96,
Orbital.s)],
0.0032)
d = vasprun_ggau.to_dict
self.assertEqual(d["elements"], ["Fe", "Li", "O", "P"])
self.assertEqual(d["nelements"], 4)
filepath = os.path.join(test_dir, 'vasprun.xml.unconverged')
vasprun_unconverged = Vasprun(filepath)
self.assertFalse(vasprun_unconverged.converged)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt')
vasprun_dfpt = Vasprun(filepath)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][0], 3.26105533)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[0][1], -0.00459066)
self.assertAlmostEqual(vasprun_dfpt.epsilon_static[2][2], 3.24330517)
self.assertTrue(vasprun_dfpt.converged)
entry = vasprun_dfpt.get_computed_entry()
entry = MaterialsProjectCompatibility().process_entry(entry)
self.assertAlmostEqual(entry.uncorrected_energy + entry.correction,
entry.energy)
filepath = os.path.join(test_dir, 'vasprun.xml.dfpt.unconverged')
vasprun_dfpt_unconv = Vasprun(filepath)
self.assertFalse(vasprun_dfpt_unconv.converged)
vasprun_uniform = Vasprun(os.path.join(test_dir, "vasprun.xml.uniform"))
self.assertEqual(vasprun_uniform.kpoints.style, "Reciprocal")
def test_to_dict(self):
filepath = os.path.join(test_dir, 'vasprun.xml')
vasprun = Vasprun(filepath)
#Test that to_dict is json-serializable
self.assertIsNotNone(json.dumps(vasprun.to_dict))
self.assertEqual(
vasprun.to_dict["input"]["potcar_type"],
['PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE', 'PAW_PBE'])
def test_get_band_structure(self):
filepath = os.path.join(test_dir, 'vasprun_Si_bands.xml')
vasprun = Vasprun(filepath)
bs = vasprun.get_band_structure(kpoints_filename=
os.path.join(test_dir,
'KPOINTS_Si_bands'))
cbm = bs.get_cbm()
vbm = bs.get_vbm()
self.assertEqual(cbm['kpoint_index'], [13], "wrong cbm kpoint index")
self.assertAlmostEqual(cbm['energy'], 6.2301, "wrong cbm energy")
self.assertEqual(cbm['band_index'], {Spin.up: [4], Spin.down: [4]},
"wrong cbm bands")
self.assertEqual(vbm['kpoint_index'], [0, 63, 64],
"wrong vbm kpoint index")
self.assertAlmostEqual(vbm['energy'], 5.6158, "wrong vbm energy")
self.assertEqual(vbm['band_index'], {Spin.up: [1, 2, 3],
Spin.down: [1, 2, 3]},
"wrong vbm bands")
self.assertEqual(vbm['kpoint'].label, "\Gamma", "wrong vbm label")
self.assertEqual(cbm['kpoint'].label, None, "wrong cbm label")
class OutcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'OUTCAR')
outcar = Outcar(filepath)
expected_mag = ({'d': 0.0, 'p': 0.003, 's': 0.002, 'tot': 0.005},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.798, 'p': 0.008, 's': 0.007, 'tot': 0.813},
{'d': 0.0, 'p':-0.117, 's': 0.005, 'tot':-0.112},
{'d': 0.0, 'p':-0.165, 's': 0.004, 'tot':-0.162},
{'d': 0.0, 'p':-0.117, 's': 0.005, 'tot':-0.112},
{'d': 0.0, 'p':-0.165, 's': 0.004, 'tot':-0.162})
expected_chg = ({'p': 0.154, 's': 0.078, 'd': 0.0, 'tot': 0.232},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 0.707, 's': 0.463, 'd': 8.316, 'tot': 9.486},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947},
{'p': 3.388, 's': 1.576, 'd': 0.0, 'tot': 4.964},
{'p': 3.365, 's': 1.582, 'd': 0.0, 'tot': 4.947})
self.assertAlmostEqual(outcar.magnetization, expected_mag, 5,
"Wrong magnetization read from Outcar")
self.assertAlmostEqual(outcar.charge, expected_chg, 5,
"Wrong charge read from Outcar")
self.assertFalse(outcar.is_stopped)
self.assertEqual(outcar.run_stats, {'System time (sec)': 0.938,
'Total CPU time used (sec)': 545.142,
'Elapsed time (sec)': 546.709,
'Maximum memory used (kb)': 0.0,
'Average memory used (kb)': 0.0,
'User time (sec)': 544.204})
self.assertAlmostEqual(outcar.efermi, 2.0112)
self.assertAlmostEqual(outcar.nelect, 44.9999991)
self.assertAlmostEqual(outcar.total_mag, 0.9999998)
self.assertIsNotNone(outcar.to_dict)
filepath = os.path.join(test_dir, 'OUTCAR.stopped')
outcar = Outcar(filepath)
self.assertTrue(outcar.is_stopped)
def test_core_state_eigen(self):
filepath = os.path.join(test_dir, "OUTCAR.CL")
cl = Outcar(filepath).read_core_state_eigen()
self.assertAlmostEqual(cl[6]["2s"][-1], -174.4779)
class OszicarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'OSZICAR')
oszicar = Oszicar(filepath)
self.assertEqual(len(oszicar.electronic_steps),
len(oszicar.ionic_steps))
self.assertEqual(len(oszicar.all_energies), 60)
self.assertAlmostEqual(oszicar.final_energy, -526.63928)
class LocpotTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'LOCPOT')
locpot = Locpot.from_file(filepath)
self.assertAlmostEqual(-217.05226954,
sum(locpot.get_average_along_axis(0)))
self.assertAlmostEqual(locpot.get_axis_grid(0)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(1)[-1], 2.87629, 2)
self.assertAlmostEqual(locpot.get_axis_grid(2)[-1], 2.87629, 2)
class ChgcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'CHGCAR.nospin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 2)[0, 1], 0)
filepath = os.path.join(test_dir, 'CHGCAR.spin')
chg = Chgcar.from_file(filepath)
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022)
#test sum
chg += chg
self.assertAlmostEqual(chg.get_integrated_diff(0, 1)[0, 1],
-0.0043896932237534022 * 2)
filepath = os.path.join(test_dir, 'CHGCAR.Fe3O4')
chg = Chgcar.from_file(filepath)
ans = [1.93313368, 3.91201473, 4.11858277, 4.1240093, 4.10634989,
3.38864822]
myans = chg.get_integrated_diff(0, 3, 6)
self.assertTrue(np.allclose(myans[:, 1], ans))
class ProcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'PROCAR.simple')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(1, 'd'), 0)
self.assertAlmostEqual(p.get_occupation(1, 's'), 0.3538125)
self.assertAlmostEqual(p.get_occupation(1, 'p'), 1.19540625)
self.assertRaises(ValueError, p.get_occupation, 1, 'm')
self.assertEqual(p.nb_bands, 10)
self.assertEqual(p.nb_kpoints, 10)
lat = Lattice.cubic(3.)
s = Structure(lat, ["Li", "Na", "K"], [[0., 0., 0.],
[0.25, 0.25, 0.25],
[0.75, 0.75, 0.75]])
d = p.get_projection_on_elements(s)
self.assertAlmostEqual(d[1][2][2], {'Na': 0.042, 'K': 0.646, 'Li': 0.042})
filepath = os.path.join(test_dir, 'PROCAR')
p = Procar(filepath)
self.assertAlmostEqual(p.get_occupation(0, 'd'), 4.3698147704200059)
self.assertAlmostEqual(p.get_occupation(0, 'dxy'), 0.85796295426000124)
class XdatcarTest(unittest.TestCase):
def test_init(self):
filepath = os.path.join(test_dir, 'XDATCAR')
x = Xdatcar(filepath)
structures = x.structures
self.assertEqual(len(structures), 3)
for s in structures:
self.assertEqual(s.formula, "Li2 O1")
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.tf_export import tf_export
@tf_export("keras.initializers.Initializer")
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary.
It will typically be the output of `get_config`.
Returns:
An Initializer instance.
"""
return cls(**config)
@tf_export("keras.initializers.Zeros", "initializers.zeros",
"zeros_initializer", "keras.initializers.zeros")
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return array_ops.zeros(shape, dtype)
def get_config(self):
return {"dtype": self.dtype.name}
@tf_export("keras.initializers.Ones", "initializers.ones", "ones_initializer",
"keras.initializers.ones")
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return array_ops.ones(shape, dtype)
def get_config(self):
return {"dtype": self.dtype.name}
@tf_export("keras.initializers.Constant", "initializers.constant",
"constant_initializer", "keras.initializers.constant")
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list or tuple of values, or a N-dimensional numpy
array. All elements of the initialized variable will be set to the
corresponding value in the `value` argument.
dtype: The data type.
verify_shape: Boolean that enables verification of the shape of `value`. If
`True`, the initializer will throw an error if the shape of `value` is not
compatible with the shape of the initialized tensor.
Raises:
TypeError: If the input `value` is not one of the expected types.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
>>> print('shape verification:')
>>> init_verify = tf.constant_initializer(value, verify_shape=True)
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init_verify)
TypeError: Expected Tensor's shape: (3, 4), got (8,).
```
"""
def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False):
if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))):
raise TypeError(
"Invalid type for initial value: %s (expected Python scalar, list or "
"tuple of values, or numpy.ndarray)." % type(value))
self.value = value
self.dtype = dtypes.as_dtype(dtype)
self._verify_shape = verify_shape
def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None):
if dtype is None:
dtype = self.dtype
if verify_shape is None:
verify_shape = self._verify_shape
return constant_op.constant_v1(
self.value, dtype=dtype, shape=shape, verify_shape=verify_shape)
def get_config(self):
# We don't include `verify_shape` for compatibility with Keras.
# `verify_shape` should be passed as an argument to `__call__` rather
# than as a constructor argument: conceptually it isn't a property
# of the initializer.
return {"value": self.value, "dtype": self.dtype.name}
@tf_export("initializers.random_uniform", "random_uniform_initializer")
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: The data type.
"""
def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32):
self.minval = minval
self.maxval = maxval
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_uniform(
shape, self.minval, self.maxval, dtype, seed=self.seed)
def get_config(self):
return {
"minval": self.minval,
"maxval": self.maxval,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export("initializers.random_normal", "random_normal_initializer")
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_normal(
shape, self.mean, self.stddev, dtype, seed=self.seed)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export("initializers.truncated_normal", "truncated_normal_initializer")
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.truncated_normal(
shape, self.mean, self.stddev, dtype, seed=self.seed)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export(
"initializers.uniform_unit_scaling",
v1=[
"initializers.uniform_unit_scaling", "uniform_unit_scaling_initializer"
])
@deprecation.deprecated_endpoints("uniform_unit_scaling_initializer")
class UniformUnitScaling(Initializer):
"""Initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See (Sussillo et al., 2014) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
References:
[Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf))
"""
@deprecated(None,
"Use tf.initializers.variance_scaling instead with distribution="
"uniform to get equivalent behavior.")
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * self.factor
return random_ops.random_uniform(
shape, -max_val, max_val, dtype, seed=self.seed)
def get_config(self):
return {"factor": self.factor, "seed": self.seed, "dtype": self.dtype.name}
@tf_export(
"keras.initializers.VarianceScaling",
"initializers.variance_scaling",
v1=[
"keras.initializers.VarianceScaling", "initializers.variance_scaling",
"variance_scaling_initializer"
])
@deprecation.deprecated_endpoints("variance_scaling_initializer")
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="truncated_normal" or "untruncated_normal"`,
samples are drawn from a truncated/untruncated normal
distribution with a mean of zero and a standard deviation (after truncation,
if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
@deprecated_arg_values(
None,
"`normal` is a deprecated alias for `truncated_normal`",
distribution="normal")
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"normal", "uniform",
"truncated_normal", "untruncated_normal"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal" or self.distribution == "truncated_normal":
# constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return random_ops.truncated_normal(
shape, 0.0, stddev, dtype, seed=self.seed)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return random_ops.random_normal(
shape, 0.0, stddev, dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(
shape, -limit, limit, dtype, seed=self.seed)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export(
"keras.initializers.Orthogonal",
"initializers.orthogonal",
"keras.initializers.orthogonal",
v1=[
"keras.initializers.Orthogonal", "initializers.orthogonal",
"orthogonal_initializer", "keras.initializers.orthogonal"
])
@deprecation.deprecated_endpoints("orthogonal_initializer")
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: The data type.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name}
class ConvolutionDeltaOrthogonal(Initializer):
"""Initializer that generates a delta orthogonal kernel for ConvNets.
The shape of the tensor must have length 3, 4 or 5. The number of input
filters must not exceed the number of output filters. The center pixels of the
tensor form an orthogonal matrix. Other pixels are set to be zero. See
algorithm 2 in (Xiao et al., 2018).
Args:
gain: Multiplicative factor to apply to the orthogonal
matrix. Default is 1. The 2-norm of an input is multiplied by a factor of
`gain` after applying this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: The data type.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 3 or len(shape) > 5:
raise ValueError("The tensor to initialize must be at least "
"three-dimensional and at most five-dimensional")
if shape[-2] > shape[-1]:
raise ValueError("In_filters cannot be greater than out_filters.")
# Generate a random matrix
a = random_ops.random_normal([shape[-1], shape[-1]],
dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
q = q[:shape[-2], :]
q *= math_ops.cast(self.gain, dtype=dtype)
if len(shape) == 3:
weight = array_ops.scatter_nd([[(shape[0]-1)//2]],
array_ops.expand_dims(q, 0), shape)
elif len(shape) == 4:
weight = array_ops.scatter_nd([[(shape[0]-1)//2, (shape[1]-1)//2]],
array_ops.expand_dims(q, 0), shape)
else:
weight = array_ops.scatter_nd([[(shape[0]-1)//2, (shape[1]-1)//2,
(shape[2]-1)//2]],
array_ops.expand_dims(q, 0), shape)
return weight
def get_config(self):
return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name}
class ConvolutionOrthogonal(Initializer):
"""Initializer that generates orthogonal kernel for ConvNets.
Base class used to construct 1D, 2D and 3D orthogonal kernels for convolution.
Args:
gain: multiplicative factor to apply to the orthogonal
matrix. Default is 1. The 2-norm of an input is multiplied by a factor of
`gain` after applying this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: The data type.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
def get_config(self):
return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name}
# Helper functions.
def _orthogonal_matrix(self, n):
"""Construct an n x n orthogonal matrix.
Args:
n: Dimension.
Returns:
A n x n orthogonal matrix.
"""
a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
if self.seed:
self.seed += 1
q, r = gen_linalg_ops.qr(a)
d = array_ops.diag_part(r)
# make q uniform
q *= math_ops.sign(d)
return q
def _symmetric_projection(self, n):
"""Compute a n x n symmetric projection matrix.
Args:
n: Dimension.
Returns:
A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.
"""
q = self._orthogonal_matrix(n)
# randomly zeroing out some columns
mask = math_ops.cast(random_ops.random_normal([n], seed=self.seed) > 0,
self.dtype)
if self.seed:
self.seed += 1
c = math_ops.multiply(q, mask)
return math_ops.matmul(c, array_ops.matrix_transpose(c))
class ConvolutionOrthogonal2D(ConvolutionOrthogonal):
"""Initializer that generates a 2D orthogonal kernel for ConvNets.
The shape of the tensor must have length 4. The number of input
filters must not exceed the number of output filters.
The orthogonality(==isometry) is exact when the inputs are circular padded.
There are finite-width effects with non-circular padding (e.g. zero padding).
See algorithm 1 in (Xiao et al., 2018).
Args:
gain: Multiplicative factor to apply to the orthogonal
matrix. Default is 1. This has the effect of scaling the output 2-norm by
a factor of `gain`.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: The data type.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if len(shape) != 4:
raise ValueError("The tensor to initialize must be four-dimensional")
if shape[-2] > shape[-1]:
raise ValueError("In_filters cannot be greater than out_filters.")
if shape[0] != shape[1]:
raise ValueError("Kernel sizes must be equal.")
kernel = self._orthogonal_kernel(shape[0], shape[2], shape[3])
kernel *= math_ops.cast(self.gain, dtype=dtype)
return kernel
def _dict_to_tensor(self, x, k1, k2):
"""Convert a dictionary to a tensor.
Args:
x: A k1 * k2 dictionary.
k1: First dimension of x.
k2: Second dimension of x.
Returns:
A k1 * k2 tensor.
"""
return array_ops.stack([array_ops.stack([x[i, j] for j in range(k2)])
for i in range(k1)])
def _block_orth(self, p1, p2):
"""Construct a 2 x 2 kernel. Used to construct orthgonal kernel.
Args:
p1: A symmetric projection matrix.
p2: A symmetric projection matrix.
Returns:
A 2 x 2 kernel [[p1p2, p1(1-p2)],
[(1-p1)p2, (1-p1)(1-p2)]].
Raises:
ValueError: If the dimensions of p1 and p2 are different.
"""
if p1.shape.as_list() != p2.shape.as_list():
raise ValueError("The dimension of the matrices must be the same.")
n = p1.shape.as_list()[0]
kernel2x2 = {}
eye = linalg_ops_impl.eye(n, dtype=self.dtype)
kernel2x2[0, 0] = math_ops.matmul(p1, p2)
kernel2x2[0, 1] = math_ops.matmul(p1, (eye - p2))
kernel2x2[1, 0] = math_ops.matmul((eye - p1), p2)
kernel2x2[1, 1] = math_ops.matmul((eye - p1), (eye - p2))
return kernel2x2
def _matrix_conv(self, m1, m2):
"""Matrix convolution.
Args:
m1: A k x k dictionary, each element is a n x n matrix.
m2: A l x l dictionary, each element is a n x n matrix.
Returns:
(k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix.
Raises:
ValueError: if the entries of m1 and m2 are of different dimensions.
"""
n = (m1[0, 0]).shape.as_list()[0]
if n != (m2[0, 0]).shape.as_list()[0]:
raise ValueError("The entries in matrices m1 and m2 "
"must have the same dimensions!")
k = int(np.sqrt(len(m1)))
l = int(np.sqrt(len(m2)))
result = {}
size = k + l - 1
# Compute matrix convolution between m1 and m2.
for i in range(size):
for j in range(size):
result[i, j] = array_ops.zeros([n, n], self.dtype)
for index1 in range(min(k, i + 1)):
for index2 in range(min(k, j + 1)):
if (i - index1) < l and (j - index2) < l:
result[i, j] += math_ops.matmul(m1[index1, index2],
m2[i - index1, j - index2])
return result
def _orthogonal_kernel(self, ksize, cin, cout):
"""Construct orthogonal kernel for convolution.
Args:
ksize: Kernel size.
cin: Number of input channels.
cout: Number of output channels.
Returns:
An [ksize, ksize, cin, cout] orthogonal kernel.
Raises:
ValueError: If cin > cout.
"""
if cin > cout:
raise ValueError("The number of input channels cannot exceed "
"the number of output channels.")
orth = self._orthogonal_matrix(cout)[0:cin, :]
if ksize == 1:
return array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0)
p = self._block_orth(self._symmetric_projection(cout),
self._symmetric_projection(cout))
for _ in range(ksize - 2):
temp = self._block_orth(self._symmetric_projection(cout),
self._symmetric_projection(cout))
p = self._matrix_conv(p, temp)
for i in range(ksize):
for j in range(ksize):
p[i, j] = math_ops.matmul(orth, p[i, j])
return self._dict_to_tensor(p, ksize, ksize)
class ConvolutionOrthogonal1D(ConvolutionOrthogonal):
"""Initializer that generates a 1D orthogonal kernel for ConvNets.
The shape of the tensor must have length 3. The number of input
filters must not exceed the number of output filters.
The orthogonality(==isometry) is exact when the inputs are circular padded.
There are finite-width effects with non-circular padding (e.g. zero padding).
See algorithm 1 in (Xiao et al., 2018).
Args:
gain: Multiplicative factor to apply to the orthogonal
matrix. Default is 1. The 2-norm of an input is multiplied by a factor of
`gain` after applying this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: The data type.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if len(shape) != 3:
raise ValueError("The tensor to initialize must be three-dimensional")
if shape[-2] > shape[-1]:
raise ValueError("In_filters cannot be greater than out_filters.")
kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1])
kernel *= math_ops.cast(self.gain, dtype=dtype)
return kernel
def _dict_to_tensor(self, x, k):
"""Convert a dictionary to a tensor.
Args:
x: A dictionary of length k.
k: Dimension of x.
Returns:
A tensor with the same dimension.
"""
return array_ops.stack([x[i] for i in range(k)])
def _block_orth(self, projection_matrix):
"""Construct a kernel. Used to construct orthgonal kernel.
Args:
projection_matrix: A symmetric projection matrix of size n x n.
Returns:
[projection_matrix, (1 - projection_matrix)].
"""
n = projection_matrix.shape.as_list()[0]
kernel = {}
eye = linalg_ops_impl.eye(n, dtype=self.dtype)
kernel[0] = projection_matrix
kernel[1] = eye - projection_matrix
return kernel
def _matrix_conv(self, m1, m2):
"""Matrix convolution.
Args:
m1: A dictionary of length k, each element is a n x n matrix.
m2: A dictionary of length l, each element is a n x n matrix.
Returns:
(k + l - 1) dictionary each element is a n x n matrix.
Raises:
ValueError: Ff the entries of m1 and m2 are of different dimensions.
"""
n = (m1[0]).shape.as_list()[0]
if n != (m2[0]).shape.as_list()[0]:
raise ValueError("The entries in matrices m1 and m2 "
"must have the same dimensions!")
k = len(m1)
l = len(m2)
result = {}
size = k + l - 1
# Compute matrix convolution between m1 and m2.
for i in range(size):
result[i] = array_ops.zeros([n, n], self.dtype)
for index in range(min(k, i + 1)):
if (i - index) < l:
result[i] += math_ops.matmul(m1[index], m2[i - index])
return result
def _orthogonal_kernel(self, ksize, cin, cout):
"""Construct orthogonal kernel for convolution.
Args:
ksize: Kernel size.
cin: Number of input channels.
cout: Number of output channels.
Returns:
An [ksize, ksize, cin, cout] orthogonal kernel.
Raises:
ValueError: If cin > cout.
"""
if cin > cout:
raise ValueError("The number of input channels cannot exceed "
"the number of output channels.")
orth = self._orthogonal_matrix(cout)[0:cin, :]
if ksize == 1:
return array_ops.expand_dims(orth, 0)
p = self._block_orth(self._symmetric_projection(cout))
for _ in range(ksize - 2):
temp = self._block_orth(self._symmetric_projection(cout))
p = self._matrix_conv(p, temp)
for i in range(ksize):
p[i] = math_ops.matmul(orth, p[i])
return self._dict_to_tensor(p, ksize)
class ConvolutionOrthogonal3D(ConvolutionOrthogonal):
"""Initializer that generates a 3D orthogonal kernel for ConvNets.
The shape of the tensor must have length 5. The number of input
filters must not exceed the number of output filters.
The orthogonality(==isometry) is exact when the inputs are circular padded.
There are finite-width effects with non-circular padding (e.g. zero padding).
See algorithm 1 (Xiao et al., 2018).
Args:
gain: Multiplicative factor to apply to the orthogonal
matrix. Default is 1. The 2-norm of an input is multiplied by a factor of
`gain` after applying this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: The data type.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if len(shape) != 5:
raise ValueError("The tensor to initialize must be five-dimensional")
if shape[-2] > shape[-1]:
raise ValueError("In_filters cannot be greater than out_filters.")
if shape[0] != shape[1] or shape[0] != shape[2]:
raise ValueError("Kernel sizes must be equal.")
kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1])
kernel *= math_ops.cast(self.gain, dtype=dtype)
return kernel
def _dict_to_tensor(self, x, k1, k2, k3):
"""Convert a dictionary to a tensor.
Args:
x: A k1 * k2 dictionary.
k1: First dimension of x.
k2: Second dimension of x.
k3: Third dimension of x.
Returns:
A k1 * k2 * k3 tensor.
"""
return array_ops.stack([array_ops.stack(
[array_ops.stack([x[i, j, k] for k in range(k3)])
for j in range(k2)]) for i in range(k1)])
def _block_orth(self, p1, p2, p3):
"""Construct a 3 x 3 kernel. Used to construct orthgonal kernel.
Args:
p1: A symmetric projection matrix.
p2: A symmetric projection matrix.
p3: A symmetric projection matrix.
Returns:
A 2 x 2 x 2 kernel.
Raises:
ValueError: If the dimensions of p1, p2 and p3 are different.
"""
p1_shape = p1.shape.as_list()
if p1_shape != p2.shape.as_list() or p1_shape != p3.shape.as_list():
raise ValueError("The dimension of the matrices must be the same.")
n = p1_shape[0]
eye = linalg_ops_impl.eye(n, dtype=self.dtype)
kernel2x2x2 = {}
def matmul(p1, p2, p3):
return math_ops.matmul(math_ops.matmul(p1, p2), p3)
def cast(i, p):
"""Return p or (1-p)."""
return i * p + (1-i) * (eye - p)
for i in [0, 1]:
for j in [0, 1]:
for k in [0, 1]:
kernel2x2x2[i, j, k] = matmul(cast(i, p1), cast(j, p2), cast(k, p3))
return kernel2x2x2
def _matrix_conv(self, m1, m2):
"""Matrix convolution.
Args:
m1: is a k x k x k dictionary, each element is a n x n matrix.
m2: is a l x l x l dictionary, each element is a n x n matrix.
Returns:
(k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each
element is a n x n matrix.
Raises:
ValueError: if the entries of m1 and m2 are of different dimensions.
"""
n = (m1[0, 0, 0]).shape.as_list()[0]
if n != (m2[0, 0, 0]).shape.as_list()[0]:
raise ValueError("The entries in matrices m1 and m2 "
"must have the same dimensions!")
k = int(np.cbrt(len(m1)))
l = int(np.cbrt(len(m2)))
result = {}
size = k + l - 1
# Compute matrix convolution between m1 and m2.
for i in range(size):
for j in range(size):
for r in range(size):
result[i, j, r] = array_ops.zeros([n, n], self.dtype)
for index1 in range(min(k, i + 1)):
for index2 in range(min(k, j + 1)):
for index3 in range(min(k, r + 1)):
if (i - index1) < l and (j - index2) < l and (r - index3) < l:
result[i, j, r] += math_ops.matmul(m1[index1, index2, index3],
m2[i - index1, j - index2,
r - index3])
return result
def _orthogonal_kernel(self, ksize, cin, cout):
"""Construct orthogonal kernel for convolution.
Args:
ksize: Kernel size.
cin: Number of input channels.
cout: Number of output channels.
Returns:
An [ksize, ksize, ksize, cin, cout] orthogonal kernel.
Raises:
ValueError: If cin > cout.
"""
if cin > cout:
raise ValueError("The number of input channels cannot exceed "
"the number of output channels.")
orth = self._orthogonal_matrix(cout)[0:cin, :]
if ksize == 1:
return array_ops.expand_dims(
array_ops.expand_dims(
array_ops.expand_dims(orth, 0), 0), 0)
p = self._block_orth(self._symmetric_projection(cout),
self._symmetric_projection(cout),
self._symmetric_projection(cout))
for _ in range(ksize - 2):
temp = self._block_orth(self._symmetric_projection(cout),
self._symmetric_projection(cout),
self._symmetric_projection(cout))
p = self._matrix_conv(p, temp)
for i in range(ksize):
for j in range(ksize):
for k in range(ksize):
p[i, j, k] = math_ops.matmul(orth, p[i, j, k])
return self._dict_to_tensor(p, ksize, ksize, ksize)
@tf_export("keras.initializers.Identity", "initializers.identity",
"keras.initializers.identity")
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Only use for 2D matrices.
Args:
gain: Multiplicative factor to apply to the identity matrix.
dtype: The type of the output.
"""
def __init__(self, gain=1.0, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
full_shape = shape if partition_info is None else partition_info.full_shape
if len(full_shape) != 2:
raise ValueError(
"Identity matrix initializer can only be used for 2D matrices.")
if dtype is None:
dtype = self.dtype
initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype)
if partition_info is not None:
initializer = array_ops.slice(initializer, partition_info.var_offset,
shape)
return self.gain * initializer
def get_config(self):
return {"gain": self.gain, "dtype": self.dtype.name}
@tf_export("glorot_uniform_initializer", "keras.initializers.glorot_uniform",
"initializers.glorot_uniform")
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Args:
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: The data type. Only floating point types are supported.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None, dtype=dtypes.float32):
super(GlorotUniform, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed,
dtype=dtype)
def get_config(self):
return {"seed": self.seed, "dtype": self.dtype.name}
@tf_export(
"keras.initializers.glorot_normal",
"initializers.glorot_normal",
v1=[
"glorot_normal_initializer", "keras.initializers.glorot_normal",
"initializers.glorot_normal"
])
@deprecation.deprecated_endpoints("glorot_normal_initializer")
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Args:
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: The data type. Only floating point types are supported.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None, dtype=dtypes.float32):
super(GlorotNormal, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed,
dtype=dtype)
def get_config(self):
return {"seed": self.seed, "dtype": self.dtype.name}
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
uniform_unit_scaling_initializer = UniformUnitScaling
variance_scaling_initializer = VarianceScaling
glorot_uniform_initializer = GlorotUniform
glorot_normal_initializer = GlorotNormal
orthogonal_initializer = Orthogonal
identity_initializer = Identity
convolutional_delta_orthogonal = ConvolutionDeltaOrthogonal
convolutional_orthogonal_1d = ConvolutionOrthogonal1D
convolutional_orthogonal_2d = ConvolutionOrthogonal2D
convolutional_orthogonal_3d = ConvolutionOrthogonal3D
# pylint: enable=invalid-name
@tf_export("keras.initializers.lecun_normal", "initializers.lecun_normal")
def lecun_normal(seed=None):
"""LeCun normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(1 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="truncated_normal", seed=seed)
@tf_export("keras.initializers.lecun_uniform", "initializers.lecun_uniform")
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="uniform", seed=seed)
@tf_export("keras.initializers.he_normal", "initializers.he_normal")
def he_normal(seed=None):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="truncated_normal", seed=seed)
@tf_export("keras.initializers.he_uniform", "initializers.he_uniform")
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="uniform", seed=seed)
# Utility functions.
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
Improve documentation of v1 initializers
PiperOrigin-RevId: 223979317
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Operations often used for initializing tensors.
All variable initializers returned by functions in this file should have the
following signature:
def _initializer(shape, dtype=dtypes.float32, partition_info=None):
Args:
shape: List of `int` representing the shape of the output `Tensor`. Some
initializers may also be able to accept a `Tensor`.
dtype: (Optional) Type of the output `Tensor`.
partition_info: (Optional) variable_scope._PartitionInfo object holding
additional information about how the variable is partitioned. May be
`None` if the variable is not partitioned.
Returns:
A `Tensor` of type `dtype` and `shape`.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_linalg_ops
from tensorflow.python.ops import linalg_ops_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.util import deprecation
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_arg_values
from tensorflow.python.util.tf_export import tf_export
@tf_export("keras.initializers.Initializer")
class Initializer(object):
"""Initializer base class: all initializers inherit from this class.
"""
def __call__(self, shape, dtype=None, partition_info=None):
"""Returns a tensor object initialized as specified by the initializer.
Args:
shape: Shape of the tensor.
dtype: Optional dtype of the tensor. If not provided use the initializer
dtype.
partition_info: Optional information about the possible partitioning of a
tensor.
"""
raise NotImplementedError
def get_config(self):
"""Returns the configuration of the initializer as a JSON-serializable dict.
Returns:
A JSON-serializable Python dict.
"""
return {}
@classmethod
def from_config(cls, config):
"""Instantiates an initializer from a configuration dictionary.
Example:
```python
initializer = RandomUniform(-1, 1)
config = initializer.get_config()
initializer = RandomUniform.from_config(config)
```
Args:
config: A Python dictionary.
It will typically be the output of `get_config`.
Returns:
An Initializer instance.
"""
return cls(**config)
@tf_export("keras.initializers.Zeros", "initializers.zeros",
"zeros_initializer", "keras.initializers.zeros")
class Zeros(Initializer):
"""Initializer that generates tensors initialized to 0."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return array_ops.zeros(shape, dtype)
def get_config(self):
return {"dtype": self.dtype.name}
@tf_export("keras.initializers.Ones", "initializers.ones", "ones_initializer",
"keras.initializers.ones")
class Ones(Initializer):
"""Initializer that generates tensors initialized to 1."""
def __init__(self, dtype=dtypes.float32):
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return array_ops.ones(shape, dtype)
def get_config(self):
return {"dtype": self.dtype.name}
@tf_export("keras.initializers.Constant", "initializers.constant",
"constant_initializer", "keras.initializers.constant")
class Constant(Initializer):
"""Initializer that generates tensors with constant values.
The resulting tensor is populated with values of type `dtype`, as
specified by arguments `value` following the desired `shape` of the
new tensor (see examples below).
The argument `value` can be a constant value, or a list of values of type
`dtype`. If `value` is a list, then the length of the list must be less
than or equal to the number of elements implied by the desired shape of the
tensor. In the case where the total number of elements in `value` is less
than the number of elements required by the tensor shape, the last element
in `value` will be used to fill the remaining entries. If the total number of
elements in `value` is greater than the number of elements required by the
tensor shape, the initializer will raise a `ValueError`.
Args:
value: A Python scalar, list or tuple of values, or a N-dimensional numpy
array. All elements of the initialized variable will be set to the
corresponding value in the `value` argument.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer.
verify_shape: Boolean that enables verification of the shape of `value`. If
`True`, the initializer will throw an error if the shape of `value` is not
compatible with the shape of the initialized tensor.
Raises:
TypeError: If the input `value` is not one of the expected types.
Examples:
The following example can be rewritten using a numpy.ndarray instead
of the `value` list, even reshaped, as shown in the two commented lines
below the `value` list initialization.
```python
>>> import numpy as np
>>> import tensorflow as tf
>>> value = [0, 1, 2, 3, 4, 5, 6, 7]
>>> # value = np.array(value)
>>> # value = value.reshape([2, 4])
>>> init = tf.constant_initializer(value)
>>> print('fitting shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
fitting shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]]
>>> print('larger shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init)
>>> x.initializer.run()
>>> print(x.eval())
larger shape:
[[ 0. 1. 2. 3.]
[ 4. 5. 6. 7.]
[ 7. 7. 7. 7.]]
>>> print('smaller shape:')
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[2, 3], initializer=init)
ValueError: Too many elements provided. Needed at most 6, but received 8
>>> print('shape verification:')
>>> init_verify = tf.constant_initializer(value, verify_shape=True)
>>> with tf.Session():
>>> x = tf.get_variable('x', shape=[3, 4], initializer=init_verify)
TypeError: Expected Tensor's shape: (3, 4), got (8,).
```
"""
def __init__(self, value=0, dtype=dtypes.float32, verify_shape=False):
if not (np.isscalar(value) or isinstance(value, (list, tuple, np.ndarray))):
raise TypeError(
"Invalid type for initial value: %s (expected Python scalar, list or "
"tuple of values, or numpy.ndarray)." % type(value))
self.value = value
self.dtype = dtypes.as_dtype(dtype)
self._verify_shape = verify_shape
def __call__(self, shape, dtype=None, partition_info=None, verify_shape=None):
if dtype is None:
dtype = self.dtype
if verify_shape is None:
verify_shape = self._verify_shape
return constant_op.constant_v1(
self.value, dtype=dtype, shape=shape, verify_shape=verify_shape)
def get_config(self):
# We don't include `verify_shape` for compatibility with Keras.
# `verify_shape` should be passed as an argument to `__call__` rather
# than as a constructor argument: conceptually it isn't a property
# of the initializer.
return {"value": self.value, "dtype": self.dtype.name}
@tf_export("initializers.random_uniform", "random_uniform_initializer")
class RandomUniform(Initializer):
"""Initializer that generates tensors with a uniform distribution.
Args:
minval: A python scalar or a scalar tensor. Lower bound of the range
of random values to generate.
maxval: A python scalar or a scalar tensor. Upper bound of the range
of random values to generate. Defaults to 1 for float types.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer.
"""
def __init__(self, minval=0, maxval=None, seed=None, dtype=dtypes.float32):
self.minval = minval
self.maxval = maxval
self.seed = seed
self.dtype = dtypes.as_dtype(dtype)
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_uniform(
shape, self.minval, self.maxval, dtype, seed=self.seed)
def get_config(self):
return {
"minval": self.minval,
"maxval": self.maxval,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export("initializers.random_normal", "random_normal_initializer")
class RandomNormal(Initializer):
"""Initializer that generates tensors with a normal distribution.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.random_normal(
shape, self.mean, self.stddev, dtype, seed=self.seed)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export("initializers.truncated_normal", "truncated_normal_initializer")
class TruncatedNormal(Initializer):
"""Initializer that generates a truncated normal distribution.
These values are similar to values from a `random_normal_initializer`
except that values more than two standard deviations from the mean
are discarded and re-drawn. This is the recommended initializer for
neural network weights and filters.
Args:
mean: a python scalar or a scalar tensor. Mean of the random values
to generate.
stddev: a python scalar or a scalar tensor. Standard deviation of the
random values to generate.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
"""
def __init__(self, mean=0.0, stddev=1.0, seed=None, dtype=dtypes.float32):
self.mean = mean
self.stddev = stddev
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
return random_ops.truncated_normal(
shape, self.mean, self.stddev, dtype, seed=self.seed)
def get_config(self):
return {
"mean": self.mean,
"stddev": self.stddev,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export(
"initializers.uniform_unit_scaling",
v1=[
"initializers.uniform_unit_scaling", "uniform_unit_scaling_initializer"
])
@deprecation.deprecated_endpoints("uniform_unit_scaling_initializer")
class UniformUnitScaling(Initializer):
"""Initializer that generates tensors without scaling variance.
When initializing a deep network, it is in principle advantageous to keep
the scale of the input variance constant, so it does not explode or diminish
by reaching the final layer. If the input is `x` and the operation `x * W`,
and we want to initialize `W` uniformly at random, we need to pick `W` from
[-sqrt(3) / sqrt(dim), sqrt(3) / sqrt(dim)]
to keep the scale intact, where `dim = W.shape[0]` (the size of the input).
A similar calculation for convolutional networks gives an analogous result
with `dim` equal to the product of the first 3 dimensions. When
nonlinearities are present, we need to multiply this by a constant `factor`.
See (Sussillo et al., 2014) for deeper motivation, experiments
and the calculation of constants. In section 2.3 there, the constants were
numerically computed: for a linear layer it's 1.0, relu: ~1.43, tanh: ~1.15.
Args:
factor: Float. A multiplicative factor by which the values will be scaled.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Sussillo et al., 2014](https://arxiv.org/abs/1412.6558)
([pdf](http://arxiv.org/pdf/1412.6558.pdf))
"""
@deprecated(None,
"Use tf.initializers.variance_scaling instead with distribution="
"uniform to get equivalent behavior.")
def __init__(self, factor=1.0, seed=None, dtype=dtypes.float32):
self.factor = factor
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
input_size = 1.0
# Estimating input size is not possible to do perfectly, but we try.
# The estimate, obtained by multiplying all dimensions but the last one,
# is the right thing for matrix multiply and convolutions (see above).
for dim in scale_shape[:-1]:
input_size *= float(dim)
# Avoid errors when initializing zero-size tensors.
input_size = max(input_size, 1.0)
max_val = math.sqrt(3 / input_size) * self.factor
return random_ops.random_uniform(
shape, -max_val, max_val, dtype, seed=self.seed)
def get_config(self):
return {"factor": self.factor, "seed": self.seed, "dtype": self.dtype.name}
@tf_export(
"keras.initializers.VarianceScaling",
"initializers.variance_scaling",
v1=[
"keras.initializers.VarianceScaling", "initializers.variance_scaling",
"variance_scaling_initializer"
])
@deprecation.deprecated_endpoints("variance_scaling_initializer")
class VarianceScaling(Initializer):
"""Initializer capable of adapting its scale to the shape of weights tensors.
With `distribution="truncated_normal" or "untruncated_normal"`,
samples are drawn from a truncated/untruncated normal
distribution with a mean of zero and a standard deviation (after truncation,
if used) `stddev = sqrt(scale / n)`
where n is:
- number of input units in the weight tensor, if mode = "fan_in"
- number of output units, if mode = "fan_out"
- average of the numbers of input and output units, if mode = "fan_avg"
With `distribution="uniform"`, samples are drawn from a uniform distribution
within [-limit, limit], with `limit = sqrt(3 * scale / n)`.
Args:
scale: Scaling factor (positive float).
mode: One of "fan_in", "fan_out", "fan_avg".
distribution: Random distribution to use. One of "normal", "uniform".
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
Raises:
ValueError: In case of an invalid value for the "scale", mode" or
"distribution" arguments.
"""
@deprecated_arg_values(
None,
"`normal` is a deprecated alias for `truncated_normal`",
distribution="normal")
def __init__(self,
scale=1.0,
mode="fan_in",
distribution="truncated_normal",
seed=None,
dtype=dtypes.float32):
if scale <= 0.:
raise ValueError("`scale` must be positive float.")
if mode not in {"fan_in", "fan_out", "fan_avg"}:
raise ValueError("Invalid `mode` argument:", mode)
distribution = distribution.lower()
if distribution not in {"normal", "uniform",
"truncated_normal", "untruncated_normal"}:
raise ValueError("Invalid `distribution` argument:", distribution)
self.scale = scale
self.mode = mode
self.distribution = distribution
self.seed = seed
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
scale = self.scale
scale_shape = shape
if partition_info is not None:
scale_shape = partition_info.full_shape
fan_in, fan_out = _compute_fans(scale_shape)
if self.mode == "fan_in":
scale /= max(1., fan_in)
elif self.mode == "fan_out":
scale /= max(1., fan_out)
else:
scale /= max(1., (fan_in + fan_out) / 2.)
if self.distribution == "normal" or self.distribution == "truncated_normal":
# constant taken from scipy.stats.truncnorm.std(a=-2, b=2, loc=0., scale=1.)
stddev = math.sqrt(scale) / .87962566103423978
return random_ops.truncated_normal(
shape, 0.0, stddev, dtype, seed=self.seed)
elif self.distribution == "untruncated_normal":
stddev = math.sqrt(scale)
return random_ops.random_normal(
shape, 0.0, stddev, dtype, seed=self.seed)
else:
limit = math.sqrt(3.0 * scale)
return random_ops.random_uniform(
shape, -limit, limit, dtype, seed=self.seed)
def get_config(self):
return {
"scale": self.scale,
"mode": self.mode,
"distribution": self.distribution,
"seed": self.seed,
"dtype": self.dtype.name
}
@tf_export(
"keras.initializers.Orthogonal",
"initializers.orthogonal",
"keras.initializers.orthogonal",
v1=[
"keras.initializers.Orthogonal", "initializers.orthogonal",
"orthogonal_initializer", "keras.initializers.orthogonal"
])
@deprecation.deprecated_endpoints("orthogonal_initializer")
class Orthogonal(Initializer):
"""Initializer that generates an orthogonal matrix.
If the shape of the tensor to initialize is two-dimensional, it is initialized
with an orthogonal matrix obtained from the QR decomposition of a matrix of
random numbers drawn from a normal distribution.
If the matrix has fewer rows than columns then the output will have orthogonal
rows. Otherwise, the output will have orthogonal columns.
If the shape of the tensor to initialize is more than two-dimensional,
a matrix of shape `(shape[0] * ... * shape[n - 2], shape[n - 1])`
is initialized, where `n` is the length of the shape vector.
The matrix is subsequently reshaped to give a tensor of the desired shape.
Args:
gain: multiplicative factor to apply to the orthogonal matrix
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Saxe et al., 2014](https://openreview.net/forum?id=_wzZwKpTDF_9C)
([pdf](https://arxiv.org/pdf/1312.6120.pdf))
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 2:
raise ValueError("The tensor to initialize must be "
"at least two-dimensional")
# Flatten the input shape with the last dimension remaining
# its original shape so it works for conv2d
num_rows = 1
for dim in shape[:-1]:
num_rows *= dim
num_cols = shape[-1]
flat_shape = (num_cols, num_rows) if num_rows < num_cols else (num_rows,
num_cols)
# Generate a random matrix
a = random_ops.random_normal(flat_shape, dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
if num_rows < num_cols:
q = array_ops.matrix_transpose(q)
return self.gain * array_ops.reshape(q, shape)
def get_config(self):
return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name}
class ConvolutionDeltaOrthogonal(Initializer):
"""Initializer that generates a delta orthogonal kernel for ConvNets.
The shape of the tensor must have length 3, 4 or 5. The number of input
filters must not exceed the number of output filters. The center pixels of the
tensor form an orthogonal matrix. Other pixels are set to be zero. See
algorithm 2 in (Xiao et al., 2018).
Args:
gain: Multiplicative factor to apply to the orthogonal
matrix. Default is 1. The 2-norm of an input is multiplied by a factor of
`gain` after applying this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
# Check the shape
if len(shape) < 3 or len(shape) > 5:
raise ValueError("The tensor to initialize must be at least "
"three-dimensional and at most five-dimensional")
if shape[-2] > shape[-1]:
raise ValueError("In_filters cannot be greater than out_filters.")
# Generate a random matrix
a = random_ops.random_normal([shape[-1], shape[-1]],
dtype=dtype, seed=self.seed)
# Compute the qr factorization
q, r = gen_linalg_ops.qr(a, full_matrices=False)
# Make Q uniform
d = array_ops.diag_part(r)
q *= math_ops.sign(d)
q = q[:shape[-2], :]
q *= math_ops.cast(self.gain, dtype=dtype)
if len(shape) == 3:
weight = array_ops.scatter_nd([[(shape[0]-1)//2]],
array_ops.expand_dims(q, 0), shape)
elif len(shape) == 4:
weight = array_ops.scatter_nd([[(shape[0]-1)//2, (shape[1]-1)//2]],
array_ops.expand_dims(q, 0), shape)
else:
weight = array_ops.scatter_nd([[(shape[0]-1)//2, (shape[1]-1)//2,
(shape[2]-1)//2]],
array_ops.expand_dims(q, 0), shape)
return weight
def get_config(self):
return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name}
class ConvolutionOrthogonal(Initializer):
"""Initializer that generates orthogonal kernel for ConvNets.
Base class used to construct 1D, 2D and 3D orthogonal kernels for convolution.
Args:
gain: multiplicative factor to apply to the orthogonal
matrix. Default is 1. The 2-norm of an input is multiplied by a factor of
`gain` after applying this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __init__(self, gain=1.0, seed=None, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
self.seed = seed
def __call__(self, shape, dtype=None, partition_info=None):
raise NotImplementedError
def get_config(self):
return {"gain": self.gain, "seed": self.seed, "dtype": self.dtype.name}
# Helper functions.
def _orthogonal_matrix(self, n):
"""Construct an n x n orthogonal matrix.
Args:
n: Dimension.
Returns:
A n x n orthogonal matrix.
"""
a = random_ops.random_normal([n, n], dtype=self.dtype, seed=self.seed)
if self.seed:
self.seed += 1
q, r = gen_linalg_ops.qr(a)
d = array_ops.diag_part(r)
# make q uniform
q *= math_ops.sign(d)
return q
def _symmetric_projection(self, n):
"""Compute a n x n symmetric projection matrix.
Args:
n: Dimension.
Returns:
A n x n symmetric projection matrix, i.e. a matrix P s.t. P=P*P, P=P^T.
"""
q = self._orthogonal_matrix(n)
# randomly zeroing out some columns
mask = math_ops.cast(random_ops.random_normal([n], seed=self.seed) > 0,
self.dtype)
if self.seed:
self.seed += 1
c = math_ops.multiply(q, mask)
return math_ops.matmul(c, array_ops.matrix_transpose(c))
class ConvolutionOrthogonal2D(ConvolutionOrthogonal):
"""Initializer that generates a 2D orthogonal kernel for ConvNets.
The shape of the tensor must have length 4. The number of input
filters must not exceed the number of output filters.
The orthogonality(==isometry) is exact when the inputs are circular padded.
There are finite-width effects with non-circular padding (e.g. zero padding).
See algorithm 1 in (Xiao et al., 2018).
Args:
gain: Multiplicative factor to apply to the orthogonal
matrix. Default is 1. This has the effect of scaling the output 2-norm by
a factor of `gain`.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if len(shape) != 4:
raise ValueError("The tensor to initialize must be four-dimensional")
if shape[-2] > shape[-1]:
raise ValueError("In_filters cannot be greater than out_filters.")
if shape[0] != shape[1]:
raise ValueError("Kernel sizes must be equal.")
kernel = self._orthogonal_kernel(shape[0], shape[2], shape[3])
kernel *= math_ops.cast(self.gain, dtype=dtype)
return kernel
def _dict_to_tensor(self, x, k1, k2):
"""Convert a dictionary to a tensor.
Args:
x: A k1 * k2 dictionary.
k1: First dimension of x.
k2: Second dimension of x.
Returns:
A k1 * k2 tensor.
"""
return array_ops.stack([array_ops.stack([x[i, j] for j in range(k2)])
for i in range(k1)])
def _block_orth(self, p1, p2):
"""Construct a 2 x 2 kernel. Used to construct orthgonal kernel.
Args:
p1: A symmetric projection matrix.
p2: A symmetric projection matrix.
Returns:
A 2 x 2 kernel [[p1p2, p1(1-p2)],
[(1-p1)p2, (1-p1)(1-p2)]].
Raises:
ValueError: If the dimensions of p1 and p2 are different.
"""
if p1.shape.as_list() != p2.shape.as_list():
raise ValueError("The dimension of the matrices must be the same.")
n = p1.shape.as_list()[0]
kernel2x2 = {}
eye = linalg_ops_impl.eye(n, dtype=self.dtype)
kernel2x2[0, 0] = math_ops.matmul(p1, p2)
kernel2x2[0, 1] = math_ops.matmul(p1, (eye - p2))
kernel2x2[1, 0] = math_ops.matmul((eye - p1), p2)
kernel2x2[1, 1] = math_ops.matmul((eye - p1), (eye - p2))
return kernel2x2
def _matrix_conv(self, m1, m2):
"""Matrix convolution.
Args:
m1: A k x k dictionary, each element is a n x n matrix.
m2: A l x l dictionary, each element is a n x n matrix.
Returns:
(k + l - 1) * (k + l - 1) dictionary each element is a n x n matrix.
Raises:
ValueError: if the entries of m1 and m2 are of different dimensions.
"""
n = (m1[0, 0]).shape.as_list()[0]
if n != (m2[0, 0]).shape.as_list()[0]:
raise ValueError("The entries in matrices m1 and m2 "
"must have the same dimensions!")
k = int(np.sqrt(len(m1)))
l = int(np.sqrt(len(m2)))
result = {}
size = k + l - 1
# Compute matrix convolution between m1 and m2.
for i in range(size):
for j in range(size):
result[i, j] = array_ops.zeros([n, n], self.dtype)
for index1 in range(min(k, i + 1)):
for index2 in range(min(k, j + 1)):
if (i - index1) < l and (j - index2) < l:
result[i, j] += math_ops.matmul(m1[index1, index2],
m2[i - index1, j - index2])
return result
def _orthogonal_kernel(self, ksize, cin, cout):
"""Construct orthogonal kernel for convolution.
Args:
ksize: Kernel size.
cin: Number of input channels.
cout: Number of output channels.
Returns:
An [ksize, ksize, cin, cout] orthogonal kernel.
Raises:
ValueError: If cin > cout.
"""
if cin > cout:
raise ValueError("The number of input channels cannot exceed "
"the number of output channels.")
orth = self._orthogonal_matrix(cout)[0:cin, :]
if ksize == 1:
return array_ops.expand_dims(array_ops.expand_dims(orth, 0), 0)
p = self._block_orth(self._symmetric_projection(cout),
self._symmetric_projection(cout))
for _ in range(ksize - 2):
temp = self._block_orth(self._symmetric_projection(cout),
self._symmetric_projection(cout))
p = self._matrix_conv(p, temp)
for i in range(ksize):
for j in range(ksize):
p[i, j] = math_ops.matmul(orth, p[i, j])
return self._dict_to_tensor(p, ksize, ksize)
class ConvolutionOrthogonal1D(ConvolutionOrthogonal):
"""Initializer that generates a 1D orthogonal kernel for ConvNets.
The shape of the tensor must have length 3. The number of input
filters must not exceed the number of output filters.
The orthogonality(==isometry) is exact when the inputs are circular padded.
There are finite-width effects with non-circular padding (e.g. zero padding).
See algorithm 1 in (Xiao et al., 2018).
Args:
gain: Multiplicative factor to apply to the orthogonal
matrix. Default is 1. The 2-norm of an input is multiplied by a factor of
`gain` after applying this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if len(shape) != 3:
raise ValueError("The tensor to initialize must be three-dimensional")
if shape[-2] > shape[-1]:
raise ValueError("In_filters cannot be greater than out_filters.")
kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1])
kernel *= math_ops.cast(self.gain, dtype=dtype)
return kernel
def _dict_to_tensor(self, x, k):
"""Convert a dictionary to a tensor.
Args:
x: A dictionary of length k.
k: Dimension of x.
Returns:
A tensor with the same dimension.
"""
return array_ops.stack([x[i] for i in range(k)])
def _block_orth(self, projection_matrix):
"""Construct a kernel. Used to construct orthgonal kernel.
Args:
projection_matrix: A symmetric projection matrix of size n x n.
Returns:
[projection_matrix, (1 - projection_matrix)].
"""
n = projection_matrix.shape.as_list()[0]
kernel = {}
eye = linalg_ops_impl.eye(n, dtype=self.dtype)
kernel[0] = projection_matrix
kernel[1] = eye - projection_matrix
return kernel
def _matrix_conv(self, m1, m2):
"""Matrix convolution.
Args:
m1: A dictionary of length k, each element is a n x n matrix.
m2: A dictionary of length l, each element is a n x n matrix.
Returns:
(k + l - 1) dictionary each element is a n x n matrix.
Raises:
ValueError: Ff the entries of m1 and m2 are of different dimensions.
"""
n = (m1[0]).shape.as_list()[0]
if n != (m2[0]).shape.as_list()[0]:
raise ValueError("The entries in matrices m1 and m2 "
"must have the same dimensions!")
k = len(m1)
l = len(m2)
result = {}
size = k + l - 1
# Compute matrix convolution between m1 and m2.
for i in range(size):
result[i] = array_ops.zeros([n, n], self.dtype)
for index in range(min(k, i + 1)):
if (i - index) < l:
result[i] += math_ops.matmul(m1[index], m2[i - index])
return result
def _orthogonal_kernel(self, ksize, cin, cout):
"""Construct orthogonal kernel for convolution.
Args:
ksize: Kernel size.
cin: Number of input channels.
cout: Number of output channels.
Returns:
An [ksize, ksize, cin, cout] orthogonal kernel.
Raises:
ValueError: If cin > cout.
"""
if cin > cout:
raise ValueError("The number of input channels cannot exceed "
"the number of output channels.")
orth = self._orthogonal_matrix(cout)[0:cin, :]
if ksize == 1:
return array_ops.expand_dims(orth, 0)
p = self._block_orth(self._symmetric_projection(cout))
for _ in range(ksize - 2):
temp = self._block_orth(self._symmetric_projection(cout))
p = self._matrix_conv(p, temp)
for i in range(ksize):
p[i] = math_ops.matmul(orth, p[i])
return self._dict_to_tensor(p, ksize)
class ConvolutionOrthogonal3D(ConvolutionOrthogonal):
"""Initializer that generates a 3D orthogonal kernel for ConvNets.
The shape of the tensor must have length 5. The number of input
filters must not exceed the number of output filters.
The orthogonality(==isometry) is exact when the inputs are circular padded.
There are finite-width effects with non-circular padding (e.g. zero padding).
See algorithm 1 (Xiao et al., 2018).
Args:
gain: Multiplicative factor to apply to the orthogonal
matrix. Default is 1. The 2-norm of an input is multiplied by a factor of
`gain` after applying this convolution.
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Xiao et al., 2018](http://proceedings.mlr.press/v80/xiao18a.html)
([pdf](http://proceedings.mlr.press/v80/xiao18a/xiao18a.pdf))
"""
def __call__(self, shape, dtype=None, partition_info=None):
if dtype is None:
dtype = self.dtype
if len(shape) != 5:
raise ValueError("The tensor to initialize must be five-dimensional")
if shape[-2] > shape[-1]:
raise ValueError("In_filters cannot be greater than out_filters.")
if shape[0] != shape[1] or shape[0] != shape[2]:
raise ValueError("Kernel sizes must be equal.")
kernel = self._orthogonal_kernel(shape[0], shape[-2], shape[-1])
kernel *= math_ops.cast(self.gain, dtype=dtype)
return kernel
def _dict_to_tensor(self, x, k1, k2, k3):
"""Convert a dictionary to a tensor.
Args:
x: A k1 * k2 dictionary.
k1: First dimension of x.
k2: Second dimension of x.
k3: Third dimension of x.
Returns:
A k1 * k2 * k3 tensor.
"""
return array_ops.stack([array_ops.stack(
[array_ops.stack([x[i, j, k] for k in range(k3)])
for j in range(k2)]) for i in range(k1)])
def _block_orth(self, p1, p2, p3):
"""Construct a 3 x 3 kernel. Used to construct orthgonal kernel.
Args:
p1: A symmetric projection matrix.
p2: A symmetric projection matrix.
p3: A symmetric projection matrix.
Returns:
A 2 x 2 x 2 kernel.
Raises:
ValueError: If the dimensions of p1, p2 and p3 are different.
"""
p1_shape = p1.shape.as_list()
if p1_shape != p2.shape.as_list() or p1_shape != p3.shape.as_list():
raise ValueError("The dimension of the matrices must be the same.")
n = p1_shape[0]
eye = linalg_ops_impl.eye(n, dtype=self.dtype)
kernel2x2x2 = {}
def matmul(p1, p2, p3):
return math_ops.matmul(math_ops.matmul(p1, p2), p3)
def cast(i, p):
"""Return p or (1-p)."""
return i * p + (1-i) * (eye - p)
for i in [0, 1]:
for j in [0, 1]:
for k in [0, 1]:
kernel2x2x2[i, j, k] = matmul(cast(i, p1), cast(j, p2), cast(k, p3))
return kernel2x2x2
def _matrix_conv(self, m1, m2):
"""Matrix convolution.
Args:
m1: is a k x k x k dictionary, each element is a n x n matrix.
m2: is a l x l x l dictionary, each element is a n x n matrix.
Returns:
(k + l - 1) x (k + l - 1) x (k + l - 1) dictionary each
element is a n x n matrix.
Raises:
ValueError: if the entries of m1 and m2 are of different dimensions.
"""
n = (m1[0, 0, 0]).shape.as_list()[0]
if n != (m2[0, 0, 0]).shape.as_list()[0]:
raise ValueError("The entries in matrices m1 and m2 "
"must have the same dimensions!")
k = int(np.cbrt(len(m1)))
l = int(np.cbrt(len(m2)))
result = {}
size = k + l - 1
# Compute matrix convolution between m1 and m2.
for i in range(size):
for j in range(size):
for r in range(size):
result[i, j, r] = array_ops.zeros([n, n], self.dtype)
for index1 in range(min(k, i + 1)):
for index2 in range(min(k, j + 1)):
for index3 in range(min(k, r + 1)):
if (i - index1) < l and (j - index2) < l and (r - index3) < l:
result[i, j, r] += math_ops.matmul(m1[index1, index2, index3],
m2[i - index1, j - index2,
r - index3])
return result
def _orthogonal_kernel(self, ksize, cin, cout):
"""Construct orthogonal kernel for convolution.
Args:
ksize: Kernel size.
cin: Number of input channels.
cout: Number of output channels.
Returns:
An [ksize, ksize, ksize, cin, cout] orthogonal kernel.
Raises:
ValueError: If cin > cout.
"""
if cin > cout:
raise ValueError("The number of input channels cannot exceed "
"the number of output channels.")
orth = self._orthogonal_matrix(cout)[0:cin, :]
if ksize == 1:
return array_ops.expand_dims(
array_ops.expand_dims(
array_ops.expand_dims(orth, 0), 0), 0)
p = self._block_orth(self._symmetric_projection(cout),
self._symmetric_projection(cout),
self._symmetric_projection(cout))
for _ in range(ksize - 2):
temp = self._block_orth(self._symmetric_projection(cout),
self._symmetric_projection(cout),
self._symmetric_projection(cout))
p = self._matrix_conv(p, temp)
for i in range(ksize):
for j in range(ksize):
for k in range(ksize):
p[i, j, k] = math_ops.matmul(orth, p[i, j, k])
return self._dict_to_tensor(p, ksize, ksize, ksize)
@tf_export("keras.initializers.Identity", "initializers.identity",
"keras.initializers.identity")
class Identity(Initializer):
"""Initializer that generates the identity matrix.
Only use for 2D matrices.
Args:
gain: Multiplicative factor to apply to the identity matrix.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
"""
def __init__(self, gain=1.0, dtype=dtypes.float32):
self.gain = gain
self.dtype = _assert_float_dtype(dtypes.as_dtype(dtype))
def __call__(self, shape, dtype=None, partition_info=None):
full_shape = shape if partition_info is None else partition_info.full_shape
if len(full_shape) != 2:
raise ValueError(
"Identity matrix initializer can only be used for 2D matrices.")
if dtype is None:
dtype = self.dtype
initializer = linalg_ops_impl.eye(*full_shape, dtype=dtype)
if partition_info is not None:
initializer = array_ops.slice(initializer, partition_info.var_offset,
shape)
return self.gain * initializer
def get_config(self):
return {"gain": self.gain, "dtype": self.dtype.name}
@tf_export("glorot_uniform_initializer", "keras.initializers.glorot_uniform",
"initializers.glorot_uniform")
class GlorotUniform(VarianceScaling):
"""The Glorot uniform initializer, also called Xavier uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Args:
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed`
for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None, dtype=dtypes.float32):
super(GlorotUniform, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="uniform",
seed=seed,
dtype=dtype)
def get_config(self):
return {"seed": self.seed, "dtype": self.dtype.name}
@tf_export(
"keras.initializers.glorot_normal",
"initializers.glorot_normal",
v1=[
"glorot_normal_initializer", "keras.initializers.glorot_normal",
"initializers.glorot_normal"
])
@deprecation.deprecated_endpoints("glorot_normal_initializer")
class GlorotNormal(VarianceScaling):
"""The Glorot normal initializer, also called Xavier normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / (fan_in + fan_out))`
where `fan_in` is the number of input units in the weight tensor
and `fan_out` is the number of output units in the weight tensor.
Args:
seed: A Python integer. Used to create random seeds. See
`tf.set_random_seed` for behavior.
dtype: Default data type, used if no `dtype` argument is provided when
calling the initializer. Only floating point types are supported.
References:
[Glorot et al., 2010](http://proceedings.mlr.press/v9/glorot10a.html)
([pdf](http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf))
"""
def __init__(self, seed=None, dtype=dtypes.float32):
super(GlorotNormal, self).__init__(
scale=1.0,
mode="fan_avg",
distribution="truncated_normal",
seed=seed,
dtype=dtype)
def get_config(self):
return {"seed": self.seed, "dtype": self.dtype.name}
# Aliases.
# pylint: disable=invalid-name
zeros_initializer = Zeros
ones_initializer = Ones
constant_initializer = Constant
random_uniform_initializer = RandomUniform
random_normal_initializer = RandomNormal
truncated_normal_initializer = TruncatedNormal
uniform_unit_scaling_initializer = UniformUnitScaling
variance_scaling_initializer = VarianceScaling
glorot_uniform_initializer = GlorotUniform
glorot_normal_initializer = GlorotNormal
orthogonal_initializer = Orthogonal
identity_initializer = Identity
convolutional_delta_orthogonal = ConvolutionDeltaOrthogonal
convolutional_orthogonal_1d = ConvolutionOrthogonal1D
convolutional_orthogonal_2d = ConvolutionOrthogonal2D
convolutional_orthogonal_3d = ConvolutionOrthogonal3D
# pylint: enable=invalid-name
@tf_export("keras.initializers.lecun_normal", "initializers.lecun_normal")
def lecun_normal(seed=None):
"""LeCun normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(1 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="truncated_normal", seed=seed)
@tf_export("keras.initializers.lecun_uniform", "initializers.lecun_uniform")
def lecun_uniform(seed=None):
"""LeCun uniform initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(3 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
- Self-Normalizing Neural Networks,
[Klambauer et al., 2017](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks)
([pdf](https://papers.nips.cc/paper/6698-self-normalizing-neural-networks.pdf))
- Efficient Backprop,
[Lecun et al., 1998](http://yann.lecun.com/exdb/publis/pdf/lecun-98b.pdf)
"""
return VarianceScaling(
scale=1., mode="fan_in", distribution="uniform", seed=seed)
@tf_export("keras.initializers.he_normal", "initializers.he_normal")
def he_normal(seed=None):
"""He normal initializer.
It draws samples from a truncated normal distribution centered on 0
with `stddev = sqrt(2 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="truncated_normal", seed=seed)
@tf_export("keras.initializers.he_uniform", "initializers.he_uniform")
def he_uniform(seed=None):
"""He uniform variance scaling initializer.
It draws samples from a uniform distribution within [-limit, limit]
where `limit` is `sqrt(6 / fan_in)`
where `fan_in` is the number of input units in the weight tensor.
Arguments:
seed: A Python integer. Used to seed the random generator.
Returns:
An initializer.
References:
[He et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/html/He_Delving_Deep_into_ICCV_2015_paper.html)
([pdf](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/He_Delving_Deep_into_ICCV_2015_paper.pdf))
"""
return VarianceScaling(
scale=2., mode="fan_in", distribution="uniform", seed=seed)
# Utility functions.
def _compute_fans(shape):
"""Computes the number of input and output units for a weight shape.
Args:
shape: Integer shape tuple or TF tensor shape.
Returns:
A tuple of scalars (fan_in, fan_out).
"""
if len(shape) < 1: # Just to avoid errors for constants.
fan_in = fan_out = 1
elif len(shape) == 1:
fan_in = fan_out = shape[0]
elif len(shape) == 2:
fan_in = shape[0]
fan_out = shape[1]
else:
# Assuming convolution kernels (2D, 3D, or more).
# kernel shape: (..., input_depth, depth)
receptive_field_size = 1.
for dim in shape[:-2]:
receptive_field_size *= dim
fan_in = shape[-2] * receptive_field_size
fan_out = shape[-1] * receptive_field_size
return fan_in, fan_out
def _assert_float_dtype(dtype):
"""Validate and return floating point type based on `dtype`.
`dtype` must be a floating point type.
Args:
dtype: The data type to validate.
Returns:
Validated type.
Raises:
ValueError: if `dtype` is not a floating point type.
"""
if not dtype.is_floating:
raise ValueError("Expected floating point type, got %s." % dtype)
return dtype
|
import os
import os.path
from xml.etree import ElementTree
from elixir.rbx import Container, Script
class ModelCompiler:
"""Creates a ROBLOX Model from source code.
It converts folders, Lua files, and ROBLOX models into an XML file that you
can import into your game.
Usage:
# This is just getting paths to the source directory and the file that
# we'll be outputting to.
root = os.path.abspath(os.path.dirname(__file__))
source = os.path.join(root, "source")
build = os.path.join(root, "build/output.rbxmx")
# Compiles everything under `source/` to `build/output.rbxmx`.
model = ModelCompiler(source, build)
model.compile()
Now you'll have a ROBLOX Model in `build/` that you can drag into your
ROBLOX level. And just like that, all of your code is there!
source : str
The directory containing Lua code and ROBLOX Models that you want
compiled.
dest : str
The name of the file that will be created when compiling. Directories in
this path are automatically created for you.
extension=".rbxmx" : str
The extension appended to `dest`.
It's important that this value be either `.rbxmx` or `.rbxm`, as those
are the two extensions ROBLOX recognizes as Model files. You won't be
able to import the file otherwise.
"""
def __init__(self, source, dest, extension=".rbxmx"):
self.source = source
self.dest = dest+extension
self.compile()
def _make_dirs(self, path):
parent_folders = os.path.dirname(path)
if parent_folders and not os.path.exists(parent_folders):
os.makedirs(parent_folders)
def _make_output_path(self):
self._make_dirs(self.dest)
def _get_base_tag(self):
"""Gets the base <roblox> tag that emcompasses the model.
This should always be the first element in the file. All others are
appended to this tag.
"""
return ElementTree.Element("roblox", attrib={
"xmlns:xmine": "http://www.w3.org/2005/05/xmlmime",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"xsi": "http://www.roblox.com/roblox.xsd",
"version": "4" })
def _get_element(self, path):
if os.path.isdir(path):
return Container(path)
elif os.path.isfile(path):
return Script(path)
def _create_hierarchy(self, path):
"""Turns a directory structure into ROBLOX-compatible XML.
path : str
The path to a directory to recurse through.
"""
hierarchy = Container(path).get_xml()
def recurse(path, hierarchy):
for item in os.listdir(path):
item_path = os.path.join(path, item)
element = self._get_element(item_path).get_xml()
hierarchy.append(element)
if os.path.isdir(item_path):
recurse(item_path, element)
recurse(path, hierarchy)
return hierarchy
def _create_model(self):
model = self._get_base_tag()
hierarchy = self._create_hierarchy(self.source)
model.append(hierarchy)
return model
def _write_model(self):
"""Compiles the model and writes it to the output file."""
# Writing as binary so that we can use UTF-8 encoding.
with open(self.dest, "wb+") as f:
model = self._create_model()
tree = ElementTree.ElementTree(model)
# ROBLOX does not support self-closing tags. In the event that an
# element is blank (eg. a script doesn't have any contents) you
# won't be able to import the model. We need to ensure all elements
# have an ending tag by setting `short_empty_elements=False`.
tree.write(f, encoding="utf-8", short_empty_elements=False)
def compile(self):
"""Compiles source code into a ROBLOX Model file."""
self._make_output_path()
self._write_model()
Rename and make some variables
import os
import os.path
from xml.etree import ElementTree
from elixir.rbx import Container, Script
class ModelCompiler:
"""Creates a ROBLOX Model from source code.
It converts folders, Lua files, and ROBLOX models into an XML file that you
can import into your game.
Usage:
# This is just getting paths to the source directory and the file that
# we'll be outputting to.
root = os.path.abspath(os.path.dirname(__file__))
source = os.path.join(root, "source")
build = os.path.join(root, "build/output.rbxmx")
# Compiles everything under `source/` to `build/output.rbxmx`.
model = ModelCompiler(source, build)
model.compile()
Now you'll have a ROBLOX Model in `build/` that you can drag into your
ROBLOX level. And just like that, all of your code is there!
source : str
The directory containing Lua code and ROBLOX Models that you want
compiled.
dest : str
The name of the file that will be created when compiling. Directories in
this path are automatically created for you.
extension=".rbxmx" : str
The extension appended to `dest`.
It's important that this value be either `.rbxmx` or `.rbxm`, as those
are the two extensions ROBLOX recognizes as Model files. You won't be
able to import the file otherwise.
"""
def __init__(self, source, dest, extension=".rbxmx"):
self.source = source
self.dest = dest+extension
self.compile()
def _make_dirs(self, path):
parent_folders = os.path.dirname(path)
if parent_folders and not os.path.exists(parent_folders):
os.makedirs(parent_folders)
def _make_output_path(self):
self._make_dirs(self.dest)
def _get_base_tag(self):
"""Gets the base <roblox> tag that emcompasses the model.
This should always be the first element in the file. All others are
appended to this tag.
"""
return ElementTree.Element("roblox", attrib={
"xmlns:xmine": "http://www.w3.org/2005/05/xmlmime",
"xmlns:xsi": "http://www.w3.org/2001/XMLSchema-instance",
"xsi": "http://www.roblox.com/roblox.xsd",
"version": "4" })
def _get_element(self, path):
if os.path.isdir(path):
return Container(path)
elif os.path.isfile(path):
return Script(path)
def _create_hierarchy(self, path):
"""Turns a directory structure into ROBLOX-compatible XML.
path : str
The path to a directory to recurse through.
"""
# This is the folder that holds all the source code.
root = Container(path)
root_xml = root.get_xml()
def recurse(path, hierarchy):
for item in os.listdir(path):
item_path = os.path.join(path, item)
element = self._get_element(item_path)
element_xml = element.get_xml()
hierarchy.append(element_xml)
if os.path.isdir(item_path):
recurse(item_path, element_xml)
recurse(path, root_xml)
return root_xml
def _create_model(self):
model = self._get_base_tag()
hierarchy = self._create_hierarchy(self.source)
model.append(hierarchy)
return model
def _write_model(self):
"""Compiles the model and writes it to the output file."""
# Writing as binary so that we can use UTF-8 encoding.
with open(self.dest, "wb+") as f:
model = self._create_model()
tree = ElementTree.ElementTree(model)
# ROBLOX does not support self-closing tags. In the event that an
# element is blank (eg. a script doesn't have any contents) you
# won't be able to import the model. We need to ensure all elements
# have an ending tag by setting `short_empty_elements=False`.
tree.write(f, encoding="utf-8", short_empty_elements=False)
def compile(self):
"""Compiles source code into a ROBLOX Model file."""
self._make_output_path()
self._write_model()
|
import logging
from models import (Countries, Players, PlayerSalaries, PartialTenures, AcquisitionPaths,
AcquisitionType, PlayerDrafts, Competitions, Clubs, Years, Seasons)
from etl import PersonIngest, SeasonalDataIngest
logger = logging.getLogger(__name__)
class AcquisitionIngest(PersonIngest):
BATCH_SIZE = 200
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Player Acquisition Paths...")
for keys in rows:
person_dict = self.get_person_data(**keys)
country_name = self.column_unicode("Country", **keys)
path = self.column("Acquisition", **keys)
acquisition_year = self.column("Year", **keys)
try:
acquisition_path = AcquisitionType.from_string(path)
except ValueError:
acquisition_path = None
country_id = self.get_id(Countries, name=country_name)
if country_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Country record {}".format(person_dict, country_name))
continue
year_id = self.get_id(Years, yr=acquisition_year)
if year_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Year record {}".format(person_dict, acquisition_year))
continue
player_dict = dict(country_id=country_id, **person_dict)
player_id = self.get_id(Players, **player_dict)
if player_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Player record".format(player_dict))
continue
acquisition_dict = dict(player_id=player_id, year_id=year_id, path=acquisition_path)
if not self.record_exists(AcquisitionPaths, **acquisition_dict):
acquisition_record = AcquisitionPaths(**acquisition_dict)
if acquisition_path in [AcquisitionType.college_draft, AcquisitionType.inaugural_draft,
AcquisitionType.super_draft, AcquisitionType.supplemental_draft]:
acquisition_record = self.parse_draft_data(acquisition_dict, keys)
if acquisition_record is not None:
insertion_list.append(acquisition_record)
inserted, insertion_list = self.bulk_insert(insertion_list, AcquisitionIngest.BATCH_SIZE)
inserts += inserted
if inserted and not inserts % AcquisitionIngest.BATCH_SIZE:
logger.info("{} records inserted".format(inserts))
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Acquisition records inserted and committed to database".format(inserts))
logger.info("Acquisition Ingestion complete.")
def parse_draft_data(self, acq_tuple, keys):
draft_round = self.column_int("Round", **keys)
draft_selection = self.column_int("Pick", **keys)
is_generation_adidas = self.column_bool("Gen Adidas", **keys)
drafting_club = self.column_unicode("Acquiring Club", **keys)
club_id = self.get_id(Clubs, name=drafting_club)
if club_id is None:
logger.error(u"Cannot insert {p[Acquisition]} record for {p[First Name]} {p[Last Name]}: "
u"Club {p[Acquiring Club]} not in database".format(p=keys))
return None
return PlayerDrafts(round=draft_round, selection=draft_selection,
gen_adidas=is_generation_adidas, club_id=club_id, **acq_tuple)
class PlayerSalaryIngest(SeasonalDataIngest):
BATCH_SIZE = 100
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Player Salaries...")
for keys in rows:
competition_name = self.column_unicode("Competition", **keys)
season_name = self.column("Season", **keys)
club_symbol = self.column("Club Symbol", **keys)
last_name = self.column_unicode("Last Name", **keys)
first_name = self.column_unicode("First Name", **keys)
base_salary = int(self.column_float("Base", **keys) * 100)
guar_salary = int(self.column_float("Guaranteed", **keys) * 100)
competition_id = self.get_id(Competitions, name=competition_name)
if competition_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Competition {} not in database".format(first_name, last_name, competition_name))
continue
season_id = self.get_id(Seasons, name=season_name)
if season_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Season {} not in database".format(first_name, last_name, season_name))
continue
club_id = self.get_id(Clubs, symbol=club_symbol)
if club_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Club {} not in database".format(first_name, last_name, club_symbol))
continue
player_id = self.get_player_from_name(first_name, last_name)
if player_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Player not in database".format(first_name, last_name))
continue
salary_dict = dict(player_id=player_id, club_id=club_id,
competition_id=competition_id, season_id=season_id)
if not self.record_exists(PlayerSalaries, **salary_dict):
insertion_list.append(PlayerSalaries(base_salary=base_salary,
avg_guaranteed=guar_salary,
**salary_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, PlayerSalaryIngest.BATCH_SIZE)
inserts += inserted
if inserted and not inserts % PlayerSalaryIngest.BATCH_SIZE:
logger.info("{} records inserted".format(inserts))
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Player Salary records inserted and committed to database".format(inserts))
logger.info("Player Salary Ingestion complete.")
class PartialTenureIngest(SeasonalDataIngest):
BATCH_SIZE = 10
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Partial Tenure records...")
for keys in rows:
competition_name = self.column_unicode("Competition", **keys)
season_name = self.column("Season", **keys)
club_symbol = self.column("Club Symbol", **keys)
last_name = self.column_unicode("Last Name", **keys)
first_name = self.column_unicode("First Name", **keys)
start_week = self.column_int("Start Term", **keys)
end_week = self.column_int("End Term", **keys)
competition_id = self.get_id(Competitions, name=competition_name)
if competition_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Competition {} not in database".format(first_name, last_name, competition_name))
continue
season_id = self.get_id(Seasons, name=season_name)
if season_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Season {} not in database".format(first_name, last_name, season_name))
continue
club_id = self.get_id(Clubs, symbol=club_symbol)
if club_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Club {} not in database".format(first_name, last_name, club_symbol))
continue
player_id = self.get_player_from_name(first_name, last_name)
if player_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Player not in database".format(first_name, last_name))
continue
partials_dict = dict(player_id=player_id, club_id=club_id,
competition_id=competition_id,
season_id=season_id)
if not self.record_exists(PartialTenures, **partials_dict):
insertion_list.append(PartialTenures(start_week=start_week,
end_week=end_week,
**partials_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, PartialTenureIngest.BATCH_SIZE)
inserts += inserted
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Partial Tenure records inserted and committed to database".format(inserts))
logger.info("Partial Tenure Ingestion complete.")
Allow for inference of start/end weeks of partial tenured players by transaction dates
import datetime
import logging
from models import (Countries, Players, PlayerSalaries, PartialTenures, AcquisitionPaths,
AcquisitionType, PlayerDrafts, Competitions, CompetitionSeasons, Clubs,
Years, Seasons)
from etl import PersonIngest, SeasonalDataIngest
logger = logging.getLogger(__name__)
class AcquisitionIngest(PersonIngest):
BATCH_SIZE = 200
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Player Acquisition Paths...")
for keys in rows:
person_dict = self.get_person_data(**keys)
country_name = self.column_unicode("Country", **keys)
path = self.column("Acquisition", **keys)
acquisition_year = self.column("Year", **keys)
try:
acquisition_path = AcquisitionType.from_string(path)
except ValueError:
acquisition_path = None
country_id = self.get_id(Countries, name=country_name)
if country_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Country record {}".format(person_dict, country_name))
continue
year_id = self.get_id(Years, yr=acquisition_year)
if year_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Year record {}".format(person_dict, acquisition_year))
continue
player_dict = dict(country_id=country_id, **person_dict)
player_id = self.get_id(Players, **player_dict)
if player_id is None:
logger.error(u"Cannot insert Acquisition record for {}: "
u"Database error involving Player record".format(player_dict))
continue
acquisition_dict = dict(player_id=player_id, year_id=year_id, path=acquisition_path)
if not self.record_exists(AcquisitionPaths, **acquisition_dict):
acquisition_record = AcquisitionPaths(**acquisition_dict)
if acquisition_path in [AcquisitionType.college_draft, AcquisitionType.inaugural_draft,
AcquisitionType.super_draft, AcquisitionType.supplemental_draft]:
acquisition_record = self.parse_draft_data(acquisition_dict, keys)
if acquisition_record is not None:
insertion_list.append(acquisition_record)
inserted, insertion_list = self.bulk_insert(insertion_list, AcquisitionIngest.BATCH_SIZE)
inserts += inserted
if inserted and not inserts % AcquisitionIngest.BATCH_SIZE:
logger.info("{} records inserted".format(inserts))
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Acquisition records inserted and committed to database".format(inserts))
logger.info("Acquisition Ingestion complete.")
def parse_draft_data(self, acq_tuple, keys):
draft_round = self.column_int("Round", **keys)
draft_selection = self.column_int("Pick", **keys)
is_generation_adidas = self.column_bool("Gen Adidas", **keys)
drafting_club = self.column_unicode("Acquiring Club", **keys)
club_id = self.get_id(Clubs, name=drafting_club)
if club_id is None:
logger.error(u"Cannot insert {p[Acquisition]} record for {p[First Name]} {p[Last Name]}: "
u"Club {p[Acquiring Club]} not in database".format(p=keys))
return None
return PlayerDrafts(round=draft_round, selection=draft_selection,
gen_adidas=is_generation_adidas, club_id=club_id, **acq_tuple)
class PlayerSalaryIngest(SeasonalDataIngest):
BATCH_SIZE = 100
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Player Salaries...")
for keys in rows:
competition_name = self.column_unicode("Competition", **keys)
season_name = self.column("Season", **keys)
club_symbol = self.column("Club Symbol", **keys)
last_name = self.column_unicode("Last Name", **keys)
first_name = self.column_unicode("First Name", **keys)
base_salary = int(self.column_float("Base", **keys) * 100)
guar_salary = int(self.column_float("Guaranteed", **keys) * 100)
competition_id = self.get_id(Competitions, name=competition_name)
if competition_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Competition {} not in database".format(first_name, last_name, competition_name))
continue
season_id = self.get_id(Seasons, name=season_name)
if season_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Season {} not in database".format(first_name, last_name, season_name))
continue
club_id = self.get_id(Clubs, symbol=club_symbol)
if club_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Club {} not in database".format(first_name, last_name, club_symbol))
continue
player_id = self.get_player_from_name(first_name, last_name)
if player_id is None:
logger.error(u"Cannot insert Salary record for {} {}: "
u"Player not in database".format(first_name, last_name))
continue
salary_dict = dict(player_id=player_id, club_id=club_id,
competition_id=competition_id, season_id=season_id)
if not self.record_exists(PlayerSalaries, **salary_dict):
insertion_list.append(PlayerSalaries(base_salary=base_salary,
avg_guaranteed=guar_salary,
**salary_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, PlayerSalaryIngest.BATCH_SIZE)
inserts += inserted
if inserted and not inserts % PlayerSalaryIngest.BATCH_SIZE:
logger.info("{} records inserted".format(inserts))
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Player Salary records inserted and committed to database".format(inserts))
logger.info("Player Salary Ingestion complete.")
class PartialTenureIngest(SeasonalDataIngest):
BATCH_SIZE = 10
def season_week(self, competition_id, season_id, **kwargs):
compseason = self.session.query(CompetitionSeasons).filter_by(
competition_id=competition_id, season_id=season_id).one()
if 'start' in kwargs:
ref_date_string = kwargs.get('start')
if ref_date_string is None:
return 1
elif 'end' in kwargs:
ref_date_string = kwargs.get('end')
if ref_date_string is None:
date_delta = compseason.end_date - compseason.start_date
return date_delta.days / 7 + 1
else:
logger.error("No 'start' or 'end' parameter in season_week call")
year, month, day = [int(x) for x in ref_date_string.split('-')]
ref_date = datetime.date(year, month, day)
date_delta = ref_date - compseason.start_date
return date_delta.days / 7 + 1
def parse_file(self, rows):
inserts = 0
insertion_list = []
logger.info("Ingesting Partial Tenure records...")
for keys in rows:
competition_name = self.column_unicode("Competition", **keys)
season_name = self.column("Season", **keys)
club_symbol = self.column("Club Symbol", **keys)
last_name = self.column_unicode("Last Name", **keys)
first_name = self.column_unicode("First Name", **keys)
start_week = self.column_int("Start Term", **keys)
end_week = self.column_int("End Term", **keys)
start_date_iso = self.column("Start Date", **keys) if "Start Date" in keys else None
end_date_iso = self.column("End Date", **keys) if "End Date" in keys else None
competition_id = self.get_id(Competitions, name=competition_name)
if competition_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Competition {} not in database".format(first_name, last_name, competition_name))
continue
season_id = self.get_id(Seasons, name=season_name)
if season_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Season {} not in database".format(first_name, last_name, season_name))
continue
club_id = self.get_id(Clubs, symbol=club_symbol)
if club_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Club {} not in database".format(first_name, last_name, club_symbol))
continue
player_id = self.get_player_from_name(first_name, last_name)
if player_id is None:
logger.error(u"Cannot insert Partial Tenure record for {} {}: "
u"Player not in database".format(first_name, last_name))
continue
start_week = start_week or self.season_week(competition_id, season_id, start=start_date_iso)
end_week = end_week or self.season_week(competition_id, season_id, end=end_date_iso)
partials_dict = dict(player_id=player_id, club_id=club_id,
competition_id=competition_id,
season_id=season_id)
if not self.record_exists(PartialTenures, **partials_dict):
insertion_list.append(PartialTenures(start_week=start_week,
end_week=end_week,
**partials_dict))
inserted, insertion_list = self.bulk_insert(insertion_list, PartialTenureIngest.BATCH_SIZE)
inserts += inserted
self.session.add_all(insertion_list)
self.session.commit()
inserts += len(insertion_list)
logger.info("Total {} Partial Tenure records inserted and committed to database".format(inserts))
logger.info("Partial Tenure Ingestion complete.")
|
# -*- test-case-name: game.test.test_view -*-
"""
View code!
"""
from __future__ import division
from OpenGL.GL import (
GL_PROJECTION, GL_MODELVIEW, GL_RGBA, GL_UNSIGNED_BYTE,
GL_COLOR_MATERIAL, GL_LIGHTING, GL_DEPTH_TEST, GL_LIGHT0, GL_POSITION,
GL_REPEAT, GL_TRIANGLES,
GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_TEXTURE_WRAP_T, GL_TEXTURE_MAG_FILTER,
GL_TEXTURE_MIN_FILTER, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_NEAREST,
GL_TEXTURE_COORD_ARRAY,
glMatrixMode, glViewport,
glGenTextures, glBindTexture, glTexParameteri, glTexImage2D,
glLoadIdentity, glPushMatrix, glPopMatrix,
glEnable, glClear, glColor, glLight,
glTranslate, glRotate, glBegin, glEnd, glVertex3f,
glEnableClientState, glDisableClientState, glVertexPointer, glDrawArrays,
GL_FLOAT, GL_VERTEX_ARRAY, glTexCoordPointer)
from OpenGL.GLU import (
gluPerspective, gluNewQuadric, gluSphere)
from OpenGL.arrays.vbo import VBO
import pygame.display, pygame.locals
from twisted.python.filepath import FilePath
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from epsilon.structlike import record
from game import __file__ as gameFile
from game.vector import Vector
from game.terrain import GRASS, MOUNTAIN, DESERT, WATER, SurfaceMesh
def loadImage(path):
"""
Load an image from the L{FilePath} into a L{pygame.Surface}.
@type path: L{FilePath}
@rtype: L{pygame.Surface}
"""
return pygame.image.load(path.path)
class Color(record("red green blue")):
"""
An RGB color.
"""
class Sphere(record("center radius color")):
"""
A renderer for a sphere.
@ivar center: A L{Vector} giving the center of this sphere.
@ivar radius: A number giving the radius of this sphere.
@ivar color: A L{Color} giving the color of this sphere.
"""
def __init__(self, *args, **kwargs):
super(Sphere, self).__init__(*args, **kwargs)
self.quad = gluNewQuadric()
def paint(self):
glPushMatrix()
glColor(self.color.red, self.color.green, self.color.blue)
glTranslate(self.center.x, self.center.y, self.center.z)
gluSphere(self.quad, self.radius, 25, 25)
glPopMatrix()
class StaticCamera(record('position orientation')):
"""
A fixed viewing perspective from which the scene will be observed.
@ivar position: A L{Vector} giving the coordinates in the space of the
perspective.
@ivar orientation: A L{Vector} giving three rotations to orient the
perspective.
"""
def paint(self):
glRotate(self.orientation.x, 1.0, 0.0, 0.0)
glRotate(self.orientation.y, 0.0, 1.0, 0.0)
glRotate(self.orientation.z, 0.0, 0.0, 1.0)
glTranslate(-self.position.x, -self.position.y, -self.position.z)
class FollowCamera(record('player')):
"""
A viewing perspective which is positioned wherever a particular player is
positioned.
@ivar player: The L{Player} this camera follows.
"""
def paint(self):
v = self.player.getPosition()
o = self.player.orientation
glRotate(o.x, 1.0, 0.0, 0.0)
glRotate(o.y, 0.0, 1.0, 0.0)
glRotate(o.z, 0.0, 0.0, 1.0)
# XXX Put the camera somewhere in the middle-ish of the player model.
# This is a wild guess for now, camera position data should be available
# from the model at some later point.
glTranslate(-v.x - 0.5, -v.y - 1, -v.z - 0.5)
class StaticLight(record('position')):
"""
A source of light in a scene.
@ivar position: A L{Vector} giving the coordinates of the light source.
"""
def paint(self):
glEnable(GL_LIGHT0)
glLight(
GL_LIGHT0, GL_POSITION,
(self.position.x, self.position.y, self.position.z))
class Scene(object):
"""
A collection of things to be rendered.
@ivar _items: A C{list} of things which are part of this scene and which
will be rendered when this scene is rendered.
@ivar _lights: A C{list} of light sources in the scene.
"""
camera = None
def __init__(self):
self._items = []
self._lights = []
# XXX This is a temporary hack to make things not so gloomy until the
# server starts telling us about light sources.
self.addLight(StaticLight(Vector(5, 10, 5)))
def add(self, item):
self._items.append(item)
def addLight(self, light):
self._lights.append(light)
def paint(self):
"""
Display this scene.
"""
# Clear what was rendered for the last frame, and discard the associated
# culling geometry.
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Now the model matrix back into an unmodified state.
glLoadIdentity()
# Then set up the camera transformations. These will affect everything
# that follows, which is just what we want.
if self.camera is not None:
self.camera.paint()
# Then illuminate stuff.
for light in self._lights:
light.paint()
# Then put everything into the scene.
for item in self._items:
item.paint()
class Viewport(object):
"""
Represent the location and size of the view onto the world.
This object serves primarily to convert between model and view coordinates.
@ivar modelPosition: two-tuple of ints giving the model position which
corresponds to the bottom left corner of the view.
@ivar viewSize: two-tuple of ints giving the width and height of the view.
"""
def __init__(self, modelPosition, viewSize):
"""
Initialize the Viewport.
@param modelPosition: Value for C{modelPosition} attribute.
@param viewSize: Value for C{viewSize} attribute.
"""
self.modelPosition = modelPosition
self.viewSize = viewSize
def modelToView(self, position):
"""
Convert the given model coordinates into view coordinates.
@param position: A two-tuple of ints giving a position in the model
coordinate system.
@return: A two-tuple of ints giving a position in the view coordinate
system.
"""
return (
position[0] - self.modelPosition[0],
self.viewSize[1] - (position[1] - self.modelPosition[1]))
def initialize(self):
"""
Set up the viewport.
"""
x, y = self.viewSize
# Hide things that are behind other things
glEnable(GL_DEPTH_TEST)
# Create the OpenGL viewport, setting the size of the window (in pixels)
# and defining how the scene is projected onto it.
glViewport(0, 0, x, y)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# Field of view, aspect ratio, near clipping, far clipping
gluPerspective(60.0, x / y, 0.5, 1000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# This makes material color properties be defined by glColor calls,
# necessary to get the right colors when lighting is enabled.
glEnable(GL_COLOR_MATERIAL)
# This might be desirable at some point, who knows.
# glColorMaterial(GL_FRONT_AND_BACK, GL_EMISSION)
# Make lighting work, because we like lights.
glEnable(GL_LIGHTING)
# Make textures work too.
glEnable(GL_TEXTURE_2D)
class Window(object):
"""
A top-level PyGame-based window. This acts as a container for
other view objects.
# @ivar environment: The L{game.environment.Environment} which is being
# displayed.
@ivar clock: Something providing
L{twisted.internet.interfaces.IReactorTime}.
@ivar screen: The L{pygame.Surface} which will be drawn to.
@ivar _paintCall: C{None} or the L{IDelayedCall} provider for a pending
C{paint} call.
@ivar controller: The current controller.
@ivar display: Something like L{pygame.display}.
@ivar event: Something like L{pygame.event}.
"""
screen = None
def __init__(self,
environment,
clock=reactor,
display=pygame.display,
event=pygame.event):
environment.addObserver(self)
self.viewport = Viewport((0, 0), (800, 600))
self.clock = clock
self.display = display
self.controller = None
self.event = event
self.scene = Scene()
self.scene.camera = StaticCamera(Vector(0, 0, 0), Vector(0, 0, 0))
def paint(self):
"""
Call C{paint} on all views which have been directly added to
this Window.
"""
self.scene.paint()
self.display.flip()
def handleInput(self):
"""
Handle currently available pygame input events.
"""
for event in self.event.get():
if event.type == pygame.locals.QUIT or \
event.type == pygame.KEYDOWN and event.key == pygame.K_q:
self.stop()
elif self.controller is not None:
if event.type == pygame.KEYDOWN:
self.controller.keyDown(event.key)
elif event.type == pygame.KEYUP:
self.controller.keyUp(event.key)
elif event.type == pygame.MOUSEMOTION:
self.controller.mouseMotion(
event.pos, event.rel, event.buttons)
elif event.type == pygame.MOUSEBUTTONUP:
pygame.event.set_grab(not pygame.event.get_grab())
pygame.mouse.set_visible(not pygame.mouse.set_visible(True))
def submitTo(self, controller):
"""
Specify the given controller as the one to receive further
events.
"""
self.controller = controller
# XXX Next line untested
self.scene.camera = FollowCamera(controller.player)
def go(self):
"""
Show this window.
@return: A Deferred that fires when this window is closed by the user.
"""
pygame.init()
self.screen = self.display.set_mode(
self.viewport.viewSize,
pygame.locals.DOUBLEBUF | pygame.locals.OPENGL)
self.viewport.initialize()
self._renderCall = LoopingCall(self.paint)
self._renderCall.start(1 / 60, now=False)
self._inputCall = LoopingCall(self.handleInput)
finishedDeferred = self._inputCall.start(0.04, now=False)
finishedDeferred.addCallback(lambda ign: self._renderCall.stop())
finishedDeferred.addCallback(lambda ign: self.display.quit())
return finishedDeferred
def stop(self):
"""
Stop updating this window and handling events for it.
"""
self._inputCall.stop()
def playerCreated(self, player):
"""
Create a L{PlayerView}.
"""
self.scene.add(PlayerView(player))
def playerRemoved(self, player):
"""
Remove a L{PlayerView}.
"""
for view in self.scene._items:
if isinstance(view, PlayerView) and view.player is player:
self.scene._items.remove(view)
return
class TerrainView(object):
"""
A view for terrain over a tract of land.
@type environment: L{Environment}
@ivar environment: The game environment from which terrain will be rendered.
@ivar loader: A callable like L{loadImage}.
@ivar _images: A cache of L{pygame.Surface} instances, keyed on terrain
types. These images are the source for texture data for each type of
terrain.
"""
square = [(0, 0), (1, 0), (1, 1), (0, 1)]
directions = [
# up
((0, 1, 0), [(0, 0, 0), (1, 0, 0), (1, 0, 1), (0, 0, 1)]),
# down
((0, -1, 0), [(0, -1, 0), (1, -1, 0), (1, -1, 1), (0, -1, 1)]),
# forward
((0, 0, -1), [(0, -1, 0), (1, -1, 0), (1, 0, 0), (0, 0, 0)]),
# backward
((0, 0, 1), [(0, -1, 1), (1, -1, 1), (1, 0, 1), (0, 0, 1)]),
# right
((-1, 0, 0), [(0, -1, 0), (0, 0, 0), (0, 0, 1), (0, -1, 1)]),
# left
((1, 0, 0), [(1, -1, 0), (1, 0, 0), (1, 0, 1), (1, -1, 1)]),
]
_files = {
GRASS: 'grass.png',
MOUNTAIN: 'mountain.png',
DESERT: 'desert.png',
WATER: 'water.png',
}
_texture = None
def __init__(self, environment, loader):
self._images = {}
self.loader = loader
if environment is not None:
self._coord, self._ext = self._getTextureForTerrain()
self.environment = environment
self._surface = SurfaceMesh(
environment.terrain, self._coord, self._ext)
self.environment.terrain.addObserver(self._surface.changed)
self._vbo = VBO(self._surface.surface)
def _getImageForTerrain(self, terrainType):
"""
@param terrainType: The terrain type.
@rtype: L{Surface}
@return: An image which represents the given C{terrainType}.
"""
if terrainType not in self._images:
image = self.loader(
FilePath(gameFile).sibling('data').child(self._files[terrainType]))
self._images[terrainType] = image
return self._images[terrainType]
def _createTexture(self):
surface = self._textureImage
width = surface.get_width()
height = surface.get_height()
raw = pygame.image.tostring(surface, "RGBA", 0)
texture = glGenTextures(1)
# glGenTextures fails by returning 0, particularly if there's no GL
# context yet.
assert texture != 0
glBindTexture(GL_TEXTURE_2D, texture)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(
GL_TEXTURE_2D, 0,
GL_RGBA, width, height, 0,
GL_RGBA,
GL_UNSIGNED_BYTE,
raw)
return texture
def _getTextureForTerrain(self):
"""
Return a single texture containing image data for every terrain type, as
well as a dictionary mapping each terrain type to the corresponding
texture coordinates, and a float indicating the size of each terrain's
area of the overall texture.
"""
dimensions = int(len(self._files) ** 0.5) + 2
surface = pygame.surface.Surface((64 * dimensions, 64 * dimensions))
coordinates = {}
types = self._files.iterkeys()
for y in range(dimensions):
for x in range(dimensions):
try:
terrainType = types.next()
except StopIteration:
break
image = self._getImageForTerrain(terrainType)
surface.blit(image, (x * 64, y * 64))
coordinates[terrainType] = (x / dimensions, y / dimensions)
self._textureImage = surface
return coordinates, 1 / dimensions
def paint(self):
"""
For all of the known terrain, render whatever faces are exposed.
"""
if self._texture is None:
self._texture = self._createTexture()
glBindTexture(GL_TEXTURE_2D, self._texture)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
self._vbo.bind()
glVertexPointer(3, GL_FLOAT, 4 * 5, self._vbo)
glTexCoordPointer(2, GL_FLOAT, 4 * 5, self._vbo + (4 * 3))
# glPushMatrix()
# glTranslate(x, y, z)
glDrawArrays(GL_TRIANGLES, 0, self._surface.important)
# glPopMatrix()
self._vbo.unbind()
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
glBindTexture(GL_TEXTURE_2D, 0)
class PlayerView(record('player')):
"""
View of a player.
"""
def paint(self):
glPushMatrix()
position = self.player.getPosition()
glTranslate(position.x, position.y, position.z)
glRotate(self.player.orientation.y, 0.0, 1.0, 0.0)
# Slide back because the pyramid below is centered at 0.5, 0, 0.5
# instead of at the origin. Without this it rotates around its corner
# instead of around its center.
glTranslate(-0.5, 0, -0.5)
glColor(1.0, 1.0, 1.0)
glBegin(GL_TRIANGLES)
glVertex3f(0.5, 0.0, 0.5)
glVertex3f(0.0, 1.0, 0.0)
glVertex3f(1.0, 1.0, 0.0)
glVertex3f(0.5, 0.0, 0.5)
glVertex3f(1.0, 1.0, 0.0)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(0.5, 0.0, 0.5)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(0.0, 1.0, 1.0)
glVertex3f(0.5, 0.0, 0.5)
glVertex3f(0.0, 1.0, 1.0)
glVertex3f(0.0, 1.0, 0.0)
glEnd()
glPopMatrix()
Remove some dead attributes from TerrainView. Also delay using GLU in Sphere until the GL context is initialized, otherwise it fails miserably.
# -*- test-case-name: game.test.test_view -*-
"""
View code!
"""
from __future__ import division
from OpenGL.GL import (
GL_PROJECTION, GL_MODELVIEW, GL_RGBA, GL_UNSIGNED_BYTE,
GL_COLOR_MATERIAL, GL_LIGHTING, GL_DEPTH_TEST, GL_LIGHT0, GL_POSITION,
GL_REPEAT, GL_TRIANGLES,
GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_TEXTURE_WRAP_T, GL_TEXTURE_MAG_FILTER,
GL_TEXTURE_MIN_FILTER, GL_COLOR_BUFFER_BIT, GL_DEPTH_BUFFER_BIT, GL_NEAREST,
GL_TEXTURE_COORD_ARRAY,
glMatrixMode, glViewport,
glGenTextures, glBindTexture, glTexParameteri, glTexImage2D,
glLoadIdentity, glPushMatrix, glPopMatrix,
glEnable, glClear, glColor, glLight,
glTranslate, glRotate, glBegin, glEnd, glVertex3f,
glEnableClientState, glDisableClientState, glVertexPointer, glDrawArrays,
GL_FLOAT, GL_VERTEX_ARRAY, glTexCoordPointer)
from OpenGL.GLU import (
gluPerspective, gluNewQuadric, gluSphere)
from OpenGL.arrays.vbo import VBO
import pygame.display, pygame.locals
from twisted.python.filepath import FilePath
from twisted.internet.task import LoopingCall
from twisted.internet import reactor
from epsilon.structlike import record
from game import __file__ as gameFile
from game.vector import Vector
from game.terrain import GRASS, MOUNTAIN, DESERT, WATER, SurfaceMesh
def loadImage(path):
"""
Load an image from the L{FilePath} into a L{pygame.Surface}.
@type path: L{FilePath}
@rtype: L{pygame.Surface}
"""
return pygame.image.load(path.path)
class Color(record("red green blue")):
"""
An RGB color.
"""
class Sphere(record("center radius color")):
"""
A renderer for a sphere.
@ivar center: A L{Vector} giving the center of this sphere.
@ivar radius: A number giving the radius of this sphere.
@ivar color: A L{Color} giving the color of this sphere.
"""
quad = None
def paint(self):
if self.quad is None:
self.quad = gluNewQuadric()
glPushMatrix()
glColor(self.color.red, self.color.green, self.color.blue)
glTranslate(self.center.x, self.center.y, self.center.z)
gluSphere(self.quad, self.radius, 25, 25)
glPopMatrix()
class StaticCamera(record('position orientation')):
"""
A fixed viewing perspective from which the scene will be observed.
@ivar position: A L{Vector} giving the coordinates in the space of the
perspective.
@ivar orientation: A L{Vector} giving three rotations to orient the
perspective.
"""
def paint(self):
glRotate(self.orientation.x, 1.0, 0.0, 0.0)
glRotate(self.orientation.y, 0.0, 1.0, 0.0)
glRotate(self.orientation.z, 0.0, 0.0, 1.0)
glTranslate(-self.position.x, -self.position.y, -self.position.z)
class FollowCamera(record('player')):
"""
A viewing perspective which is positioned wherever a particular player is
positioned.
@ivar player: The L{Player} this camera follows.
"""
def paint(self):
v = self.player.getPosition()
o = self.player.orientation
glRotate(o.x, 1.0, 0.0, 0.0)
glRotate(o.y, 0.0, 1.0, 0.0)
glRotate(o.z, 0.0, 0.0, 1.0)
# XXX Put the camera somewhere in the middle-ish of the player model.
# This is a wild guess for now, camera position data should be available
# from the model at some later point.
glTranslate(-v.x - 0.5, -v.y - 1, -v.z - 0.5)
class StaticLight(record('position')):
"""
A source of light in a scene.
@ivar position: A L{Vector} giving the coordinates of the light source.
"""
def paint(self):
glEnable(GL_LIGHT0)
glLight(
GL_LIGHT0, GL_POSITION,
(self.position.x, self.position.y, self.position.z))
class Scene(object):
"""
A collection of things to be rendered.
@ivar _items: A C{list} of things which are part of this scene and which
will be rendered when this scene is rendered.
@ivar _lights: A C{list} of light sources in the scene.
"""
camera = None
def __init__(self):
self._items = []
self._lights = []
# XXX This is a temporary hack to make things not so gloomy until the
# server starts telling us about light sources.
self.addLight(StaticLight(Vector(5, 10, 5)))
def add(self, item):
self._items.append(item)
def addLight(self, light):
self._lights.append(light)
def paint(self):
"""
Display this scene.
"""
# Clear what was rendered for the last frame, and discard the associated
# culling geometry.
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# Now the model matrix back into an unmodified state.
glLoadIdentity()
# Then set up the camera transformations. These will affect everything
# that follows, which is just what we want.
if self.camera is not None:
self.camera.paint()
# Then illuminate stuff.
for light in self._lights:
light.paint()
# Then put everything into the scene.
for item in self._items:
item.paint()
class Viewport(object):
"""
Represent the location and size of the view onto the world.
This object serves primarily to convert between model and view coordinates.
@ivar modelPosition: two-tuple of ints giving the model position which
corresponds to the bottom left corner of the view.
@ivar viewSize: two-tuple of ints giving the width and height of the view.
"""
def __init__(self, modelPosition, viewSize):
"""
Initialize the Viewport.
@param modelPosition: Value for C{modelPosition} attribute.
@param viewSize: Value for C{viewSize} attribute.
"""
self.modelPosition = modelPosition
self.viewSize = viewSize
def modelToView(self, position):
"""
Convert the given model coordinates into view coordinates.
@param position: A two-tuple of ints giving a position in the model
coordinate system.
@return: A two-tuple of ints giving a position in the view coordinate
system.
"""
return (
position[0] - self.modelPosition[0],
self.viewSize[1] - (position[1] - self.modelPosition[1]))
def initialize(self):
"""
Set up the viewport.
"""
x, y = self.viewSize
# Hide things that are behind other things
glEnable(GL_DEPTH_TEST)
# Create the OpenGL viewport, setting the size of the window (in pixels)
# and defining how the scene is projected onto it.
glViewport(0, 0, x, y)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
# Field of view, aspect ratio, near clipping, far clipping
gluPerspective(60.0, x / y, 0.5, 1000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
# This makes material color properties be defined by glColor calls,
# necessary to get the right colors when lighting is enabled.
glEnable(GL_COLOR_MATERIAL)
# This might be desirable at some point, who knows.
# glColorMaterial(GL_FRONT_AND_BACK, GL_EMISSION)
# Make lighting work, because we like lights.
glEnable(GL_LIGHTING)
# Make textures work too.
glEnable(GL_TEXTURE_2D)
class Window(object):
"""
A top-level PyGame-based window. This acts as a container for
other view objects.
# @ivar environment: The L{game.environment.Environment} which is being
# displayed.
@ivar clock: Something providing
L{twisted.internet.interfaces.IReactorTime}.
@ivar screen: The L{pygame.Surface} which will be drawn to.
@ivar _paintCall: C{None} or the L{IDelayedCall} provider for a pending
C{paint} call.
@ivar controller: The current controller.
@ivar display: Something like L{pygame.display}.
@ivar event: Something like L{pygame.event}.
"""
screen = None
def __init__(self,
environment,
clock=reactor,
display=pygame.display,
event=pygame.event):
environment.addObserver(self)
self.viewport = Viewport((0, 0), (800, 600))
self.clock = clock
self.display = display
self.controller = None
self.event = event
self.scene = Scene()
self.scene.camera = StaticCamera(Vector(0, 0, 0), Vector(0, 0, 0))
def paint(self):
"""
Call C{paint} on all views which have been directly added to
this Window.
"""
self.scene.paint()
self.display.flip()
def handleInput(self):
"""
Handle currently available pygame input events.
"""
for event in self.event.get():
if event.type == pygame.locals.QUIT or \
event.type == pygame.KEYDOWN and event.key == pygame.K_q:
self.stop()
elif self.controller is not None:
if event.type == pygame.KEYDOWN:
self.controller.keyDown(event.key)
elif event.type == pygame.KEYUP:
self.controller.keyUp(event.key)
elif event.type == pygame.MOUSEMOTION:
self.controller.mouseMotion(
event.pos, event.rel, event.buttons)
elif event.type == pygame.MOUSEBUTTONUP:
pygame.event.set_grab(not pygame.event.get_grab())
pygame.mouse.set_visible(not pygame.mouse.set_visible(True))
def submitTo(self, controller):
"""
Specify the given controller as the one to receive further
events.
"""
self.controller = controller
# XXX Next line untested
self.scene.camera = FollowCamera(controller.player)
def go(self):
"""
Show this window.
@return: A Deferred that fires when this window is closed by the user.
"""
pygame.init()
self.screen = self.display.set_mode(
self.viewport.viewSize,
pygame.locals.DOUBLEBUF | pygame.locals.OPENGL)
self.viewport.initialize()
self._renderCall = LoopingCall(self.paint)
self._renderCall.start(1 / 60, now=False)
self._inputCall = LoopingCall(self.handleInput)
finishedDeferred = self._inputCall.start(0.04, now=False)
finishedDeferred.addCallback(lambda ign: self._renderCall.stop())
finishedDeferred.addCallback(lambda ign: self.display.quit())
return finishedDeferred
def stop(self):
"""
Stop updating this window and handling events for it.
"""
self._inputCall.stop()
def playerCreated(self, player):
"""
Create a L{PlayerView}.
"""
self.scene.add(PlayerView(player))
def playerRemoved(self, player):
"""
Remove a L{PlayerView}.
"""
for view in self.scene._items:
if isinstance(view, PlayerView) and view.player is player:
self.scene._items.remove(view)
return
class TerrainView(object):
"""
A view for terrain over a tract of land.
@type environment: L{Environment}
@ivar environment: The game environment from which terrain will be rendered.
@ivar loader: A callable like L{loadImage}.
@ivar _images: A cache of L{pygame.Surface} instances, keyed on terrain
types. These images are the source for texture data for each type of
terrain.
"""
_files = {
GRASS: 'grass.png',
MOUNTAIN: 'mountain.png',
DESERT: 'desert.png',
WATER: 'water.png',
}
_texture = None
def __init__(self, environment, loader):
self._images = {}
self.loader = loader
if environment is not None:
self._coord, self._ext = self._getTextureForTerrain()
self.environment = environment
self._surface = SurfaceMesh(
environment.terrain, self._coord, self._ext)
self.environment.terrain.addObserver(self._surface.changed)
self._vbo = VBO(self._surface.surface)
def _getImageForTerrain(self, terrainType):
"""
@param terrainType: The terrain type.
@rtype: L{Surface}
@return: An image which represents the given C{terrainType}.
"""
if terrainType not in self._images:
image = self.loader(
FilePath(gameFile).sibling('data').child(self._files[terrainType]))
self._images[terrainType] = image
return self._images[terrainType]
def _createTexture(self):
surface = self._textureImage
width = surface.get_width()
height = surface.get_height()
raw = pygame.image.tostring(surface, "RGBA", 0)
texture = glGenTextures(1)
# glGenTextures fails by returning 0, particularly if there's no GL
# context yet.
assert texture != 0
glBindTexture(GL_TEXTURE_2D, texture)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexImage2D(
GL_TEXTURE_2D, 0,
GL_RGBA, width, height, 0,
GL_RGBA,
GL_UNSIGNED_BYTE,
raw)
return texture
def _getTextureForTerrain(self):
"""
Return a single texture containing image data for every terrain type, as
well as a dictionary mapping each terrain type to the corresponding
texture coordinates, and a float indicating the size of each terrain's
area of the overall texture.
"""
dimensions = int(len(self._files) ** 0.5) + 2
surface = pygame.surface.Surface((64 * dimensions, 64 * dimensions))
coordinates = {}
types = self._files.iterkeys()
for y in range(dimensions):
for x in range(dimensions):
try:
terrainType = types.next()
except StopIteration:
break
image = self._getImageForTerrain(terrainType)
surface.blit(image, (x * 64, y * 64))
coordinates[terrainType] = (x / dimensions, y / dimensions)
self._textureImage = surface
return coordinates, 1 / dimensions
def paint(self):
"""
For all of the known terrain, render whatever faces are exposed.
"""
if self._texture is None:
self._texture = self._createTexture()
glBindTexture(GL_TEXTURE_2D, self._texture)
glEnableClientState(GL_VERTEX_ARRAY)
glEnableClientState(GL_TEXTURE_COORD_ARRAY)
self._vbo.bind()
glVertexPointer(3, GL_FLOAT, 4 * 5, self._vbo)
glTexCoordPointer(2, GL_FLOAT, 4 * 5, self._vbo + (4 * 3))
# glPushMatrix()
# glTranslate(x, y, z)
glDrawArrays(GL_TRIANGLES, 0, self._surface.important)
# glPopMatrix()
self._vbo.unbind()
glDisableClientState(GL_VERTEX_ARRAY)
glDisableClientState(GL_TEXTURE_COORD_ARRAY)
glBindTexture(GL_TEXTURE_2D, 0)
class PlayerView(record('player')):
"""
View of a player.
"""
def paint(self):
glPushMatrix()
position = self.player.getPosition()
glTranslate(position.x, position.y, position.z)
glRotate(self.player.orientation.y, 0.0, 1.0, 0.0)
# Slide back because the pyramid below is centered at 0.5, 0, 0.5
# instead of at the origin. Without this it rotates around its corner
# instead of around its center.
glTranslate(-0.5, 0, -0.5)
glColor(1.0, 1.0, 1.0)
glBegin(GL_TRIANGLES)
glVertex3f(0.5, 0.0, 0.5)
glVertex3f(0.0, 1.0, 0.0)
glVertex3f(1.0, 1.0, 0.0)
glVertex3f(0.5, 0.0, 0.5)
glVertex3f(1.0, 1.0, 0.0)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(0.5, 0.0, 0.5)
glVertex3f(1.0, 1.0, 1.0)
glVertex3f(0.0, 1.0, 1.0)
glVertex3f(0.5, 0.0, 0.5)
glVertex3f(0.0, 1.0, 1.0)
glVertex3f(0.0, 1.0, 0.0)
glEnd()
glPopMatrix()
|
Add numpy dummy for readthedocs
|
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
import argparse
import glob
import os
import random
import re
import requests
import requests_cache
import sys
try:
from urllib.parse import quote as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from urllib import getproxies as get_proxies
except ImportError:
from urllib.request import getproxies as get_proxies
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
# Handle unicode between Python 2 and 3
# http://stackoverflow.com/a/6633040/305414
if sys.version < '3':
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
SEARCH_URL = 'https://www.google.com/search?q=site:stackoverflow.com%20{0}'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46 Safari/536.5',
'Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46 Safari/536.5',)
ANSWER_HEADER = u('--- Answer {0} ---\n{1}')
NO_ANSWER_MSG = '< no answer given >'
CACHE_DIR = os.path.join(os.path.expanduser('~'), '.howdoi')
CACHE_FILE = os.path.join(CACHE_DIR, 'cache')
def get_result(url):
return requests.get(url, headers={'User-Agent': random.choice(USER_AGENTS)}, proxies=get_proxies()).text
def is_question(link):
return re.search('questions/\d+/', link)
def get_links(query):
url = SEARCH_URL.format(url_quote(query))
result = get_result(url)
html = pq(result)
return [a.attrib['href'] for a in html('.l')] or \
[a.attrib['href'] for a in html('.r')('a')]
def get_link_at_pos(links, pos):
for link in links:
if is_question(link):
if pos == 1:
break
else:
pos = pos - 1
continue
return link
def format_output(code, args):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
lexer = guess_lexer(code)
return highlight(
code,
lexer,
TerminalFormatter(bg='dark')
)
def get_answer(args, links):
link = get_link_at_pos(links, args['pos'])
if args.get('link'):
return link
page = get_result(link + '?answertab=votes')
html = pq(page)
first_answer = html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
if not instructions and not args['all']:
text = first_answer.find('.post-text').eq(0).text()
elif args['all']:
texts = []
for html_tag in first_answer.items('.post-text > *'):
current_text = html_tag.text()
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(format_output(current_text, args))
else:
texts.append(current_text)
texts.append('\n---\nAnswer from {0}'.format(link))
text = '\n'.join(texts)
else:
text = format_output(instructions.eq(0).text(), args)
if text is None:
text = NO_ANSWER_MSG
text = text.strip()
return text
def get_instructions(args):
links = get_links(args['query'])
if not links:
return ''
answers = []
append_header = args['num_answers'] > 1
initial_position = args['pos']
for answer_number in range(args['num_answers']):
current_position = answer_number + initial_position
args['pos'] = current_position
answer = get_answer(args, links)
if not answer:
continue
if append_header:
answer = ANSWER_HEADER.format(current_position, answer)
answer = answer + '\n'
answers.append(answer)
return '\n'.join(answers)
def enable_cache():
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
requests_cache.install_cache(CACHE_FILE)
def clear_cache():
for cache in glob.glob('{0}*'.format(CACHE_FILE)):
os.remove(cache)
def howdoi(args):
args['query'] = ' '.join(args['query']).replace('?', '')
try:
return get_instructions(args) or 'Sorry, couldn\'t find any help with that topic\n'
except ConnectionError:
return 'Failed to establish network connection\n'
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line')
parser.add_argument('query', metavar='QUERY', type=str, nargs='*',
help='the question to answer')
parser.add_argument('-p','--pos', help='select answer in specified position (default: 1)', default=1, type=int)
parser.add_argument('-a','--all', help='display the full text of the answer',
action='store_true')
parser.add_argument('-l','--link', help='display only the answer link',
action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output',
action='store_true')
parser.add_argument('-n','--num-answers', help='number of answers to return', default=1, type=int)
parser.add_argument('-C','--clear-cache', help='clear the cache',
action='store_true')
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['clear_cache']:
clear_cache()
print('Cache cleared successfully')
return
if not args['query']:
parser.print_help()
return
# enable the cache if user doesn't want it to be disabled
if not os.getenv('HOWDOI_DISABLE_CACHE'):
enable_cache()
print(howdoi(args).encode('utf-8', 'ignore'))
if __name__ == '__main__':
command_line_runner()
howdoi: Fair's fair -- add Linux User String
#!/usr/bin/env python
######################################################
#
# howdoi - instant coding answers via the command line
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
######################################################
import argparse
import glob
import os
import random
import re
import requests
import requests_cache
import sys
try:
from urllib.parse import quote as url_quote
except ImportError:
from urllib import quote as url_quote
try:
from urllib import getproxies as get_proxies
except ImportError:
from urllib.request import getproxies as get_proxies
from pygments import highlight
from pygments.lexers import guess_lexer, get_lexer_by_name
from pygments.formatters import TerminalFormatter
from pygments.util import ClassNotFound
from pyquery import PyQuery as pq
from requests.exceptions import ConnectionError
# Handle unicode between Python 2 and 3
# http://stackoverflow.com/a/6633040/305414
if sys.version < '3':
import codecs
def u(x):
return codecs.unicode_escape_decode(x)[0]
else:
def u(x):
return x
SEARCH_URL = 'https://www.google.com/search?q=site:stackoverflow.com%20{0}'
USER_AGENTS = ('Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100 101 Firefox/22.0'
'Mozilla/5.0 (Windows NT 6.1; rv:11.0) Gecko/20100101 Firefox/11.0',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46 Safari/536.5',
'Mozilla/5.0 (Windows; Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.46 Safari/536.5',)
ANSWER_HEADER = u('--- Answer {0} ---\n{1}')
NO_ANSWER_MSG = '< no answer given >'
CACHE_DIR = os.path.join(os.path.expanduser('~'), '.howdoi')
CACHE_FILE = os.path.join(CACHE_DIR, 'cache')
def get_result(url):
return requests.get(url, headers={'User-Agent': random.choice(USER_AGENTS)}, proxies=get_proxies()).text
def is_question(link):
return re.search('questions/\d+/', link)
def get_links(query):
url = SEARCH_URL.format(url_quote(query))
result = get_result(url)
html = pq(result)
return [a.attrib['href'] for a in html('.l')] or \
[a.attrib['href'] for a in html('.r')('a')]
def get_link_at_pos(links, pos):
for link in links:
if is_question(link):
if pos == 1:
break
else:
pos = pos - 1
continue
return link
def format_output(code, args):
if not args['color']:
return code
lexer = None
# try to find a lexer using the StackOverflow tags
# or the query arguments
for keyword in args['query'].split() + args['tags']:
try:
lexer = get_lexer_by_name(keyword)
break
except ClassNotFound:
pass
# no lexer found above, use the guesser
if not lexer:
lexer = guess_lexer(code)
return highlight(
code,
lexer,
TerminalFormatter(bg='dark')
)
def get_answer(args, links):
link = get_link_at_pos(links, args['pos'])
if args.get('link'):
return link
page = get_result(link + '?answertab=votes')
html = pq(page)
first_answer = html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
args['tags'] = [t.text for t in html('.post-tag')]
if not instructions and not args['all']:
text = first_answer.find('.post-text').eq(0).text()
elif args['all']:
texts = []
for html_tag in first_answer.items('.post-text > *'):
current_text = html_tag.text()
if current_text:
if html_tag[0].tag in ['pre', 'code']:
texts.append(format_output(current_text, args))
else:
texts.append(current_text)
texts.append('\n---\nAnswer from {0}'.format(link))
text = '\n'.join(texts)
else:
text = format_output(instructions.eq(0).text(), args)
if text is None:
text = NO_ANSWER_MSG
text = text.strip()
return text
def get_instructions(args):
links = get_links(args['query'])
if not links:
return ''
answers = []
append_header = args['num_answers'] > 1
initial_position = args['pos']
for answer_number in range(args['num_answers']):
current_position = answer_number + initial_position
args['pos'] = current_position
answer = get_answer(args, links)
if not answer:
continue
if append_header:
answer = ANSWER_HEADER.format(current_position, answer)
answer = answer + '\n'
answers.append(answer)
return '\n'.join(answers)
def enable_cache():
if not os.path.exists(CACHE_DIR):
os.makedirs(CACHE_DIR)
requests_cache.install_cache(CACHE_FILE)
def clear_cache():
for cache in glob.glob('{0}*'.format(CACHE_FILE)):
os.remove(cache)
def howdoi(args):
args['query'] = ' '.join(args['query']).replace('?', '')
try:
return get_instructions(args) or 'Sorry, couldn\'t find any help with that topic\n'
except ConnectionError:
return 'Failed to establish network connection\n'
def get_parser():
parser = argparse.ArgumentParser(description='instant coding answers via the command line')
parser.add_argument('query', metavar='QUERY', type=str, nargs='*',
help='the question to answer')
parser.add_argument('-p','--pos', help='select answer in specified position (default: 1)', default=1, type=int)
parser.add_argument('-a','--all', help='display the full text of the answer',
action='store_true')
parser.add_argument('-l','--link', help='display only the answer link',
action='store_true')
parser.add_argument('-c', '--color', help='enable colorized output',
action='store_true')
parser.add_argument('-n','--num-answers', help='number of answers to return', default=1, type=int)
parser.add_argument('-C','--clear-cache', help='clear the cache',
action='store_true')
return parser
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
if args['clear_cache']:
clear_cache()
print('Cache cleared successfully')
return
if not args['query']:
parser.print_help()
return
# enable the cache if user doesn't want it to be disabled
if not os.getenv('HOWDOI_DISABLE_CACHE'):
enable_cache()
print(howdoi(args).encode('utf-8', 'ignore'))
if __name__ == '__main__':
command_line_runner()
|
"""This module handles sequences.
Copyright 2020-2021 Michael Hayes, UCECE
"""
from .expr import ExprList, ExprDomain, expr
from .utils import isiterable
from numpy import array, allclose, arange
# Perhaps subclass numpy ndarray? But then could not have symbolic
# elements in the sequence.
def parse_seq_str(s):
if s.startswith('{'):
if not s.endswith('}'):
raise ValueError('Mismatched braces for %s' % s)
s = s[1:-1]
parts = s.split(',')
N = len(parts)
vals = []
m0 = None
for m, item in enumerate(parts):
item = item.strip()
if item.startswith('_'):
if m0 is not None:
raise ValueError('Cannot have multiple zero index indicators')
m0 = m
item = item[1:]
val = expr(item)
vals.append(val)
if m0 is None:
m0 = 0
ni = range(-m0, N - m0)
return vals, ni
class Sequence(ExprList, ExprDomain):
var = None
is_sequence = True
def __init__(self, seq, ni=None, origin=None, evaluate=False, var=None,
start_trunc=False, end_trunc=False):
"""Sequences can be created from an tuple, list, or ndarray.
See `seq()` to create a Sequence from a string.
>>> a = Sequence((1, 2, 3))
The sequence indices are specified with the optional `ni` argument.
For example:
>>> a = Sequence((1, 2, 3, 4), (-1, 0, 1, 2))
If the `ni` argument is not specified, the sequence indices
are enumerated from 0.
The sequence indices can be found using the `n` attribute.
This returns a list.
>>> a = Sequence('{1, _2, 3, 4}').n
[-1, 0, 1, 2]
Sequences can be converted into discrete-time, discrete-frequency,
z-domain sequences using call notation, for example::
>>> a(z)
`start_trunc` indicates that the start of the sequence was truncated
`end_trunc` indicates that the end of the sequence was truncated
`start_trunc` and `end_trunc` are not propagated if a sequence is
modified. They indicate that ellipsis should be printed to show
the sequence has been truncated.
"""
if isinstance(seq, str):
seq, ni = parse_seq_str(seq)
if not isiterable(seq):
seq = (seq, )
super (Sequence, self).__init__(seq, evaluate)
if ni is not None and origin is not None:
raise ValueError('Cannot specify both ni and origin')
if origin is not None:
ni = range(-origin, len(self) - origin)
if ni is None:
ni = range(len(seq))
# Perhaps enforce contiguous sequences and just store the origin.
# This will simplify sequence comparison.
self.n = list(ni)
# Determine if sequence truncated at start, end, or both.
# Perhaps have separate classes for truncated sequences?
self.start_trunc = start_trunc
self.end_trunc = end_trunc
@property
def vals(self):
"""Return the SymPy values as a list."""
return list(self)
@property
def _pexpr(self):
"""Return expression for printing."""
return self
def __eq__(self, x):
return self.vals == x.vals and self.n == x.n
def __add__(self, x):
return self.__class__(super(Sequence, self).__add__(x))
def __mul__(self, x):
return self.__class__(super(Sequence, self).__mul__(x))
def __lshift__(self, m):
return self.delay(-m)
def __rshift__(self, m):
return self.delay(m)
def __str__(self):
items = []
if self.start_trunc:
items.append('...')
for v1, n1 in zip(self, self.n):
s = str(v1)
if n1 == 0:
s = '_' + s
items.append(s)
if self.end_trunc:
items.append('...')
return r'{%s}' % ', '.join(items)
def __getitem__(self, n):
"""Note this returns the element with index matching n.
This is not necessarily the nth element in the sequence."""
# TODO, support slices, etc.
try:
nindex = list(self.n).index(n)
except ValueError:
return expr(0)
return super(Sequence, self).__getitem__(nindex)
@property
def origin(self):
"""Return the element index for n == 0. This may raise a ValueError
if the origin is not in the sequence."""
return -min(self.n)
@origin.setter
def origin(self, origin):
"""Set the origin to `origin`."""
self.n = list(arange(-origin, len(self) - origin))
def prune(self):
"""Remove zeros from ends of sequence.
{0, 0, 1, 2, 3, 0} -> {1, 2, 3}"""
vals = self.vals
m1 = 0
while vals[m1] == 0:
m1 += 1
m2 = len(vals) - 1
if vals[m2] != 0:
return Sequence(vals[m1:], self.n[m1:])
while vals[m2] == 0:
m2 -= 1
return Sequence(vals[m1:m2 + 1], self.n[m1:m2 + 1])
def zeropad(self, M):
"""Add M zeros to end of sequence:
For example, with M = 3
{1, 2, 3} -> {1, 2, 3, 0, 0, 0}"""
vals = self.vals
n = self.n
zero = expr(0)
for m in range(M):
vals.append(zero)
ni = self.n + list(range(self.n[-1] + 1, len(vals)))
return self.__class__(vals, ni)
def __str__(self):
a = self.zeroextend()
items = []
if self.start_trunc:
items.append('...')
for v1, n1 in zip(a, a.n):
try:
s = v1.latex()
except:
s = str(v1)
if n1 == 0:
s = r'_%s' % v1
items.append(s)
if self.end_trunc:
items.append('...')
return r'{%s}' % ', '.join(items)
def pretty(self, **kwargs):
from .printing import pretty
a = self.zeroextend()
return pretty(a)
def as_array(self):
"""Numerically evaluate and store as NumPy array."""
# If, for some reason, a sequence can have elements that
# depend on n...
#vals = array([v1.evaluate(n1) for v1, n1 in zip(self, self.n)])
vals = array([v1.cval for v1 in self])
if allclose(vals.imag, 0.0):
vals = vals.real
return vals
@property
def expr(self):
"""Convert sequence to an Lcapy expression."""
if self.var is None:
raise ValueError('var not specified')
return self.as_impulses(self.var)
def as_impulses(self, var=None):
"""Convert to discrete-time signal in the form of
a weighted sum of delayed impulses. For example,
{1, 2, 3} -> ui[n] + 2 * ui[n - 1] + 3 * ui[n - 2]"""
from .functions import unitimpulse
from sympy import Add
if var is None:
var = self.var
if var is None:
raise ValueError('var not specified')
# This can reorder terms
result = var * 0
for v1, n1 in zip(self, self.n):
result += v1.as_constant() * unitimpulse(var - n1)
# but so does the unevaluated Add...
# items = []
# for v1, n1 in zip(self, self.n):
# items.append(v1 * unitimpulse(var.var - n1))
#
# expr = Add(*items, evaluate=False)
# result = var.__class__(expr)
return result
def evaluate(self, ni=None):
"""Evaluate expression at sequence indices specified by `arg`. `arg`
may be a scalar or a vector. The result is of type float or
complex. Zeroes are returned for indices outside the sequence
extent.
If arg is iterable, a NumPy array is returned.
"""
if ni is None:
return self.as_array()
if isiterable(ni):
vals = array([self(n1).cval for n1 in ni])
if allclose(vals.imag, 0.0):
vals = vals.real
return vals
else:
val = self(ni).cval
if allclose(val.imag, 0.0):
val = val.real
return val
@property
def extent(self):
"""Determine extent of the sequence.
For example, Sequence([1, 1]).extent = 2
Sequence([1, 0, 1]).extent = 3
Sequence([0, 1, 0, 1]).extent = 3
"""
from numpy import argwhere
# Note, each element is an Expr.
nz = [elt != 0 for elt in self]
w = argwhere(nz)
if len(w) == 0:
return 0
return (max(w) - min(w))[0] + 1
def discrete_time_fourier_transform(self, var=None, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform."""
return self.DTFT(var, **assumptions)
def DTFT(self, var=None, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform."""
return self.as_impulses().DTFT(var, **assumptions)
def plot(self, ni=None, **kwargs):
"""Plot the sequence. If `ni` is not specified, it defaults to the
sequence indices. `ni` can be a vector of specified sequence
indices, a tuple specifing the range, or a constant specifying
the maximum value with the minimum value set to 0.
kwargs include:
axes - the plot axes to use otherwise a new figure is created
xlabel - the x-axis label
ylabel - the y-axis label
xscale - the x-axis scaling, say for plotting as ms
yscale - the y-axis scaling, say for plotting mV
in addition to those supported by the matplotlib plot command.
The plot axes are returned.
"""
if ni is None:
ni = self.n
# This is not the most efficient way but plot routines expect
# an Expr object.
return self.as_impulses().plot(ni, **kwargs)
def __call__(self, arg, **assumptions):
"""Convert sequence to n-domain or k-domain expression.
For example, seq((1, 2, 3))(n)"""
from .nexpr import n
from .kexpr import k
from .zexpr import z
if id(arg) == id(n) or arg == n:
if self.var == n:
return self.copy()
elif self.var == k:
return self.IDFT()
return self.IZT()
if id(arg) == id(k) or arg == k:
if self.var == k:
return self.copy()
elif self.var == z:
return self.IZT().DFT()
return self.DFT()
if id(arg) == id(z) or arg == z:
if self.var == z:
return self.copy()
elif self.var == k:
return self.IDFT().ZT()
return self.ZT()
# This is experimental and may be deprecated.
return self[arg]
def _repr_pretty_(self, p, cycle):
"""This is used by jupyter notebooks to display an expression using
unicode. It is also called by IPython when displaying an
expression."""
# Note, the method in ExprPrint is bypassed since list
# has this methodx.
from .printing import pretty
p.text(self.pretty())
def copy(self):
return self.__class__(super(Sequence, self).copy(),
self.n)
def lfilter(self, b=None, a=None):
"""Implement digital filter specified by a transfer function. The
transfer function is described by a vector `b` of coefficients
for the numerator and an `a` vector of coefficients for the
denominator.
If you would like the response with initial conditions see
`DTfilter.response()`.
For a FIR filter a = [1]."""
if b is None:
b = []
if a is None:
a = [1]
x = self.vals
y = []
a0 = a[0]
for n, x1 in enumerate(x):
y.append(expr(0))
for m, b1 in enumerate(b):
try:
y[-1] += b1 * x[n - m] / a0
except:
pass
yn = y[-1]
for m, a1 in enumerate(a[1:]):
try:
yn += a1 * y[-m - 2] / a0
except:
pass
y[-1] = yn
return self.__class__(y, self.n)
def convolve(self, h, mode='full'):
"""Convolve with h."""
x = self
h = Sequence(h)
Lx = x.extent
Lh = h.extent
Ly = Lx + Lh - 1
if mode == 'full':
x = x.zeropad(Ly - Lx)
elif mode == 'same':
x = x.zeropad(max(Lx, Ly) - Lx)
else:
raise ValueError('Unknown mode ' + mode)
return x.lfilter(h, a=[1])
def delay(self, m=0):
"""Return a new sequence delayed by an integer number of samples `m`.
If `m` is negative, the sequence is advanced."""
if m != int(m):
raise ValueError('Non-integer delay %s' % m)
origin = self.origin - m
ni = list(arange(-origin, len(self) - origin))
return self.__class__(self.vals, ni)
def zeroextend(self):
"""Extend sequence by adding zeros so that the origin
is included. This is used for printing."""
ni = self.n
vals = self.vals
if ni[0] > 0:
vals = [0] * ni[0] + vals
ni = range(0, ni[-1] + 1)
elif ni[-1] < 0:
vals = vals + [0] * -ni[-1]
ni = range(ni[0], 1)
return self.__class__(vals, ni,
start_trunc=self.start_trunc,
end_trunc=self.end_trunc)
Check for compatible sequences
"""This module handles sequences.
Copyright 2020-2021 Michael Hayes, UCECE
"""
from .expr import ExprList, ExprDomain, expr
from .utils import isiterable
from numpy import array, allclose, arange
# Perhaps subclass numpy ndarray? But then could not have symbolic
# elements in the sequence.
def parse_seq_str(s):
if s.startswith('{'):
if not s.endswith('}'):
raise ValueError('Mismatched braces for %s' % s)
s = s[1:-1]
parts = s.split(',')
N = len(parts)
vals = []
m0 = None
for m, item in enumerate(parts):
item = item.strip()
if item.startswith('_'):
if m0 is not None:
raise ValueError('Cannot have multiple zero index indicators')
m0 = m
item = item[1:]
val = expr(item)
vals.append(val)
if m0 is None:
m0 = 0
ni = range(-m0, N - m0)
return vals, ni
class Sequence(ExprList, ExprDomain):
var = None
is_sequence = True
quantity = 'undefined'
def __init__(self, seq, ni=None, origin=None, evaluate=False, var=None,
start_trunc=False, end_trunc=False):
"""Sequences can be created from an tuple, list, or ndarray.
See `seq()` to create a Sequence from a string.
>>> a = Sequence((1, 2, 3))
The sequence indices are specified with the optional `ni` argument.
For example:
>>> a = Sequence((1, 2, 3, 4), (-1, 0, 1, 2))
If the `ni` argument is not specified, the sequence indices
are enumerated from 0.
The sequence indices can be found using the `n` attribute.
This returns a list.
>>> a = Sequence('{1, _2, 3, 4}').n
[-1, 0, 1, 2]
Sequences can be converted into discrete-time, discrete-frequency,
z-domain sequences using call notation, for example::
>>> a(z)
`start_trunc` indicates that the start of the sequence was truncated
`end_trunc` indicates that the end of the sequence was truncated
`start_trunc` and `end_trunc` are not propagated if a sequence is
modified. They indicate that ellipsis should be printed to show
the sequence has been truncated.
"""
if isinstance(seq, str):
seq, ni = parse_seq_str(seq)
if not isiterable(seq):
seq = (seq, )
super (Sequence, self).__init__(seq, evaluate)
if ni is not None and origin is not None:
raise ValueError('Cannot specify both ni and origin')
if origin is not None:
ni = range(-origin, len(self) - origin)
if ni is None:
ni = range(len(seq))
# Perhaps enforce contiguous sequences and just store the origin.
# This will simplify sequence comparison.
self.n = list(ni)
# Determine if sequence truncated at start, end, or both.
# Perhaps have separate classes for truncated sequences?
self.start_trunc = start_trunc
self.end_trunc = end_trunc
@property
def vals(self):
"""Return the SymPy values as a list."""
return list(self)
@property
def _pexpr(self):
"""Return expression for printing."""
return self
def __eq__(self, x):
return self.vals == x.vals and self.n == x.n
def __add__(self, x):
# Check for compatible quantities.
if not isinstance(x, Sequence):
raise TypeError('Can only add a sequence to a sequence')
if x.quantity != 'undefined' and self.quantity != 'undefined' and x.quantity != self.quantity:
raise TypeError('Sequences have different quantities: %s and %s' % (self.quantity, x.quantity))
if self.quantity == 'undefined':
cls = x.__class__
else:
cls = self.__class__
return cls(super(Sequence, self).__add__(x))
def __mul__(self, x):
return self.__class__(super(Sequence, self).__mul__(x))
def __lshift__(self, m):
return self.delay(-m)
def __rshift__(self, m):
return self.delay(m)
def __str__(self):
items = []
if self.start_trunc:
items.append('...')
for v1, n1 in zip(self, self.n):
s = str(v1)
if n1 == 0:
s = '_' + s
items.append(s)
if self.end_trunc:
items.append('...')
return r'{%s}' % ', '.join(items)
def __getitem__(self, n):
"""Note this returns the element with index matching n.
This is not necessarily the nth element in the sequence."""
# TODO, support slices, etc.
try:
nindex = list(self.n).index(n)
except ValueError:
return expr(0)
return super(Sequence, self).__getitem__(nindex)
@property
def origin(self):
"""Return the element index for n == 0. This may raise a ValueError
if the origin is not in the sequence."""
return -min(self.n)
@origin.setter
def origin(self, origin):
"""Set the origin to `origin`."""
self.n = list(arange(-origin, len(self) - origin))
def prune(self):
"""Remove zeros from ends of sequence.
{0, 0, 1, 2, 3, 0} -> {1, 2, 3}"""
vals = self.vals
m1 = 0
while vals[m1] == 0:
m1 += 1
m2 = len(vals) - 1
if vals[m2] != 0:
return Sequence(vals[m1:], self.n[m1:])
while vals[m2] == 0:
m2 -= 1
return Sequence(vals[m1:m2 + 1], self.n[m1:m2 + 1])
def zeropad(self, M):
"""Add M zeros to end of sequence:
For example, with M = 3
{1, 2, 3} -> {1, 2, 3, 0, 0, 0}"""
vals = self.vals
n = self.n
zero = expr(0)
for m in range(M):
vals.append(zero)
ni = self.n + list(range(self.n[-1] + 1, len(vals)))
return self.__class__(vals, ni)
def __str__(self):
a = self.zeroextend()
items = []
if self.start_trunc:
items.append('...')
for v1, n1 in zip(a, a.n):
try:
s = v1.latex()
except:
s = str(v1)
if n1 == 0:
s = r'_%s' % v1
items.append(s)
if self.end_trunc:
items.append('...')
return r'{%s}' % ', '.join(items)
def pretty(self, **kwargs):
from .printing import pretty
a = self.zeroextend()
return pretty(a)
def as_array(self):
"""Numerically evaluate and store as NumPy array."""
# If, for some reason, a sequence can have elements that
# depend on n...
#vals = array([v1.evaluate(n1) for v1, n1 in zip(self, self.n)])
vals = array([v1.cval for v1 in self])
if allclose(vals.imag, 0.0):
vals = vals.real
return vals
@property
def expr(self):
"""Convert sequence to an Lcapy expression."""
if self.var is None:
raise ValueError('var not specified')
return self.as_impulses(self.var)
def as_impulses(self, var=None):
"""Convert to discrete-time signal in the form of
a weighted sum of delayed impulses. For example,
{1, 2, 3} -> ui[n] + 2 * ui[n - 1] + 3 * ui[n - 2]"""
from .functions import unitimpulse
from sympy import Add
if var is None:
var = self.var
if var is None:
raise ValueError('var not specified')
# This can reorder terms
result = var * 0
for v1, n1 in zip(self, self.n):
result += v1.as_constant() * unitimpulse(var - n1)
# but so does the unevaluated Add...
# items = []
# for v1, n1 in zip(self, self.n):
# items.append(v1 * unitimpulse(var.var - n1))
#
# expr = Add(*items, evaluate=False)
# result = var.__class__(expr)
return result
def evaluate(self, ni=None):
"""Evaluate expression at sequence indices specified by `arg`. `arg`
may be a scalar or a vector. The result is of type float or
complex. Zeroes are returned for indices outside the sequence
extent.
If arg is iterable, a NumPy array is returned.
"""
if ni is None:
return self.as_array()
if isiterable(ni):
vals = array([self(n1).cval for n1 in ni])
if allclose(vals.imag, 0.0):
vals = vals.real
return vals
else:
val = self(ni).cval
if allclose(val.imag, 0.0):
val = val.real
return val
@property
def extent(self):
"""Determine extent of the sequence.
For example, Sequence([1, 1]).extent = 2
Sequence([1, 0, 1]).extent = 3
Sequence([0, 1, 0, 1]).extent = 3
"""
from numpy import argwhere
# Note, each element is an Expr.
nz = [elt != 0 for elt in self]
w = argwhere(nz)
if len(w) == 0:
return 0
return (max(w) - min(w))[0] + 1
def discrete_time_fourier_transform(self, var=None, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform."""
return self.DTFT(var, **assumptions)
def DTFT(self, var=None, **assumptions):
"""Convert to Fourier domain using discrete time Fourier transform."""
return self.as_impulses().DTFT(var, **assumptions)
def plot(self, ni=None, **kwargs):
"""Plot the sequence. If `ni` is not specified, it defaults to the
sequence indices. `ni` can be a vector of specified sequence
indices, a tuple specifing the range, or a constant specifying
the maximum value with the minimum value set to 0.
kwargs include:
axes - the plot axes to use otherwise a new figure is created
xlabel - the x-axis label
ylabel - the y-axis label
xscale - the x-axis scaling, say for plotting as ms
yscale - the y-axis scaling, say for plotting mV
in addition to those supported by the matplotlib plot command.
The plot axes are returned.
"""
if ni is None:
ni = self.n
# This is not the most efficient way but plot routines expect
# an Expr object.
return self.as_impulses().plot(ni, **kwargs)
def __call__(self, arg, **assumptions):
"""Convert sequence to n-domain or k-domain expression.
For example, seq((1, 2, 3))(n)"""
from .nexpr import n
from .kexpr import k
from .zexpr import z
if id(arg) == id(n) or arg == n:
if self.var == n:
return self.copy()
elif self.var == k:
return self.IDFT()
return self.IZT()
if id(arg) == id(k) or arg == k:
if self.var == k:
return self.copy()
elif self.var == z:
return self.IZT().DFT()
return self.DFT()
if id(arg) == id(z) or arg == z:
if self.var == z:
return self.copy()
elif self.var == k:
return self.IDFT().ZT()
return self.ZT()
# This is experimental and may be deprecated.
return self[arg]
def _repr_pretty_(self, p, cycle):
"""This is used by jupyter notebooks to display an expression using
unicode. It is also called by IPython when displaying an
expression."""
# Note, the method in ExprPrint is bypassed since list
# has this methodx.
from .printing import pretty
p.text(self.pretty())
def copy(self):
return self.__class__(super(Sequence, self).copy(),
self.n)
def lfilter(self, b=None, a=None):
"""Implement digital filter specified by a transfer function. The
transfer function is described by a vector `b` of coefficients
for the numerator and an `a` vector of coefficients for the
denominator.
If you would like the response with initial conditions see
`DTfilter.response()`.
For a FIR filter a = [1]."""
if b is None:
b = []
if a is None:
a = [1]
x = self.vals
y = []
a0 = a[0]
for n, x1 in enumerate(x):
y.append(expr(0))
for m, b1 in enumerate(b):
try:
y[-1] += b1 * x[n - m] / a0
except:
pass
yn = y[-1]
for m, a1 in enumerate(a[1:]):
try:
yn += a1 * y[-m - 2] / a0
except:
pass
y[-1] = yn
return self.__class__(y, self.n)
def convolve(self, h, mode='full'):
"""Convolve with h."""
x = self
h = Sequence(h)
Lx = x.extent
Lh = h.extent
Ly = Lx + Lh - 1
if mode == 'full':
x = x.zeropad(Ly - Lx)
elif mode == 'same':
x = x.zeropad(max(Lx, Ly) - Lx)
else:
raise ValueError('Unknown mode ' + mode)
return x.lfilter(h, a=[1])
def delay(self, m=0):
"""Return a new sequence delayed by an integer number of samples `m`.
If `m` is negative, the sequence is advanced."""
if m != int(m):
raise ValueError('Non-integer delay %s' % m)
origin = self.origin - m
ni = list(arange(-origin, len(self) - origin))
return self.__class__(self.vals, ni)
def zeroextend(self):
"""Extend sequence by adding zeros so that the origin
is included. This is used for printing."""
ni = self.n
vals = self.vals
if ni[0] > 0:
vals = [0] * ni[0] + vals
ni = range(0, ni[-1] + 1)
elif ni[-1] < 0:
vals = vals + [0] * -ni[-1]
ni = range(ni[0], 1)
return self.__class__(vals, ni,
start_trunc=self.start_trunc,
end_trunc=self.end_trunc)
|
def make_true_nz(test_name):
"""
Function to create true redshift distribution to be shared among several test cases
Parameters
----------
test_name: string
name used to look up parameters for making true_nz
Returns
-------
true_nz: chippr.gmix object
gaussian mixture probability distribution
Notes
-----
test_name is currently ignored but will soon be used to load parameters for making true_nz instead of hardcoded values.
"""
true_amps = np.array([0.20, 0.35, 0.55])
true_means = np.array([0.5, 0.2, 0.75])
true_sigmas = np.array([0.4, 0.2, 0.1])
true_nz = chippr.gmix(true_amps, true_means, true_sigmas, limits=(0., 1.))
return(true_nz)
def set_up_prior(data):
"""
Function to create prior distribution from data
Parameters
----------
data: dict
catalog dictionary containing bin endpoints, log interim prior, and log interim posteriors
Returns
-------
prior: chippr.mvn object
prior distribution as multivariate normal
"""
zs = data['bin_ends']
log_nz_intp = data['log_interim_prior']
log_z_posts = data['log_interim_posteriors']
z_difs = zs[1:]-zs[:-1]
z_mids = (zs[1:]+zs[:-1])/2.
n_bins = len(z_mids)
prior_var = np.eye(n_bins)
for k in range(n_bins):
prior_var[k] = 1. * np.exp(-0.5 * (z_mids[k] - z_mids) ** 2 / 0.16 ** 2)
prior_mean = log_nz_intp
prior = mvn(prior_mean, prior_var)
return (prior, prior_var)
def do_inference(given_key):
"""
Function to do inference from a catalog of photo-z interim posteriors
Parameters
----------
given_key: string
name of test case to be run
"""
test_info = all_tests[given_key]
test_name = test_info['name']
true_nz = test_info['truth']
test_name = test_name[:-1]
param_file_name = test_name + '.txt'
params = chippr.utils.ingest(param_file_name)
params = defaults.check_inf_params(params)
test_dir = os.path.join(result_dir, test_name)
simulated_posteriors = catalog(params=param_file_name, loc=test_dir)
saved_location = 'data'
saved_type = '.txt'
data = simulated_posteriors.read(loc=saved_location, style=saved_type)
(prior, cov) = set_up_prior(data)
nz = log_z_dens(data, prior, truth=true_nz, loc=test_dir, vb=True)
nz_stacked = nz.calculate_stacked()
nz_mmap = nz.calculate_mmap()
nz_mexp = nz.calculate_mexp()
nz_mmle = nz.calculate_mmle(nz_stacked)
start = prior#mvn(data['log_interim_prior'], cov)
n_bins = len(nz_mmle)
if params['n_walkers'] is not None:
n_ivals = params['n_walkers']
else:
n_ivals = 10 * n_bins
initial_values = start.sample(n_ivals)
nz_samps = nz.calculate_samples(initial_values)
nz_stats = nz.compare()
nz.plot_estimators()
nz.write('nz.p')
if __name__ == "__main__":
import numpy as np
import os
import multiprocessing as mp
import chippr
from chippr import *
result_dir = os.path.join('..', 'results')
name_file = 'which_inf_tests.txt'
with open(name_file) as tests_to_run:
all_tests = {}
for test_name in tests_to_run:
true_nz = make_true_nz(test_name)
test_info = {}
test_info['name'] = test_name
test_info['truth'] = true_nz
all_tests[test_name] = test_info
nps = mp.cpu_count()-1
pool = mp.Pool(nps)
pool.map(do_inference, all_tests.keys())
saving and plotting before and after sampling to monitor progress
def make_true_nz(test_name):
"""
Function to create true redshift distribution to be shared among several test cases
Parameters
----------
test_name: string
name used to look up parameters for making true_nz
Returns
-------
true_nz: chippr.gmix object
gaussian mixture probability distribution
Notes
-----
test_name is currently ignored but will soon be used to load parameters for making true_nz instead of hardcoded values.
"""
true_amps = np.array([0.20, 0.35, 0.55])
true_means = np.array([0.5, 0.2, 0.75])
true_sigmas = np.array([0.4, 0.2, 0.1])
true_nz = chippr.gmix(true_amps, true_means, true_sigmas, limits=(0., 1.))
return(true_nz)
def set_up_prior(data):
"""
Function to create prior distribution from data
Parameters
----------
data: dict
catalog dictionary containing bin endpoints, log interim prior, and log interim posteriors
Returns
-------
prior: chippr.mvn object
prior distribution as multivariate normal
"""
zs = data['bin_ends']
log_nz_intp = data['log_interim_prior']
log_z_posts = data['log_interim_posteriors']
z_difs = zs[1:]-zs[:-1]
z_mids = (zs[1:]+zs[:-1])/2.
n_bins = len(z_mids)
prior_var = np.eye(n_bins)
for k in range(n_bins):
prior_var[k] = 1. * np.exp(-0.5 * (z_mids[k] - z_mids) ** 2 / 0.16 ** 2)
prior_mean = log_nz_intp
prior = mvn(prior_mean, prior_var)
return (prior, prior_var)
def do_inference(given_key):
"""
Function to do inference from a catalog of photo-z interim posteriors
Parameters
----------
given_key: string
name of test case to be run
"""
test_info = all_tests[given_key]
test_name = test_info['name']
true_nz = test_info['truth']
test_name = test_name[:-1]
param_file_name = test_name + '.txt'
params = chippr.utils.ingest(param_file_name)
params = defaults.check_inf_params(params)
test_dir = os.path.join(result_dir, test_name)
simulated_posteriors = catalog(params=param_file_name, loc=test_dir)
saved_location = 'data'
saved_type = '.txt'
data = simulated_posteriors.read(loc=saved_location, style=saved_type)
(prior, cov) = set_up_prior(data)
nz = log_z_dens(data, prior, truth=true_nz, loc=test_dir, vb=True)
nz_stacked = nz.calculate_stacked()
nz_mmap = nz.calculate_mmap()
nz_mexp = nz.calculate_mexp()
nz_mmle = nz.calculate_mmle(nz_stacked)
nz.plot_estimators()
nz.write('nz.p')
start = prior#mvn(data['log_interim_prior'], cov)
n_bins = len(nz_mmle)
if params['n_walkers'] is not None:
n_ivals = params['n_walkers']
else:
n_ivals = 10 * n_bins
initial_values = start.sample(n_ivals)
nz_samps = nz.calculate_samples(initial_values)
nz_stats = nz.compare()
nz.plot_estimators()
nz.write('nz.p')
if __name__ == "__main__":
import numpy as np
import os
import multiprocessing as mp
import chippr
from chippr import *
result_dir = os.path.join('..', 'results')
name_file = 'which_inf_tests.txt'
with open(name_file) as tests_to_run:
all_tests = {}
for test_name in tests_to_run:
true_nz = make_true_nz(test_name)
test_info = {}
test_info['name'] = test_name
test_info['truth'] = true_nz
all_tests[test_name] = test_info
nps = mp.cpu_count()-1
pool = mp.Pool(nps)
pool.map(do_inference, all_tests.keys())
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import LongRunningOperation, CliCommandType
from ._client_factory import iot_hub_service_factory
from ._client_factory import iot_service_provisioning_factory
from ._client_factory import iot_central_service_factory
from ._utils import _dps_certificate_response_transform
CS_DEPRECATION_INFO = 'IoT Extension (azure-iot) connection-string command (az iot hub connection-string show)'
class PolicyUpdateResultTransform(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
result = super(PolicyUpdateResultTransform, self).__call__(poller)
return result.properties.authorization_policies
class EndpointUpdateResultTransform(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
result = super(EndpointUpdateResultTransform, self).__call__(poller)
return result.properties.routing.endpoints
class RouteUpdateResultTransform(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
result = super(RouteUpdateResultTransform, self).__call__(poller)
return result.properties.routing.routes
# Deleting IoT Hub is a long-running operation. Due to API implementation issue, 404 error will be thrown during
# deletion of an IoT Hub.
# This is a work around to suppress the 404 error. It should be removed after API is fixed.
class HubDeleteResultTransform(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
from azure.cli.core.util import CLIError
try:
super(HubDeleteResultTransform, self).__call__(poller)
except CLIError as e:
if 'not found' not in str(e):
raise e
def load_command_table(self, _): # pylint: disable=too-many-statements
update_custom_util = CliCommandType(operations_tmpl='azure.cli.command_modules.iot.custom#{}')
iot_central_sdk = CliCommandType(
operations_tmpl='azure.mgmt.iotcentral.operations#IoTCentaralOperations.{}'
)
# iot dps commands
with self.command_group('iot dps', client_factory=iot_service_provisioning_factory) as g:
g.custom_command('list', 'iot_dps_list')
g.custom_show_command('show', 'iot_dps_get')
g.custom_command('create', 'iot_dps_create')
g.custom_command('delete', 'iot_dps_delete')
g.generic_update_command('update', getter_name='iot_dps_get', setter_name='iot_dps_update',
command_type=update_custom_util)
# iot dps access-policy commands (Deprecated)
with self.command_group('iot dps access-policy',
client_factory=iot_service_provisioning_factory,
deprecate_info=self.deprecate(redirect='iot dps policy',
expiration='2.35.0')
) as g:
g.custom_command('list', 'iot_dps_policy_list')
g.custom_show_command('show', 'iot_dps_policy_get')
g.custom_command('create', 'iot_dps_policy_create', supports_no_wait=True)
g.custom_command('update', 'iot_dps_policy_update', supports_no_wait=True)
g.custom_command('delete', 'iot_dps_policy_delete', supports_no_wait=True)
# iot dps linked-hub commands
with self.command_group('iot dps linked-hub', client_factory=iot_service_provisioning_factory) as g:
g.custom_command('list', 'iot_dps_linked_hub_list')
g.custom_show_command('show', 'iot_dps_linked_hub_get')
g.custom_command('create', 'iot_dps_linked_hub_create', supports_no_wait=True)
g.custom_command('update', 'iot_dps_linked_hub_update', supports_no_wait=True)
g.custom_command('delete', 'iot_dps_linked_hub_delete', supports_no_wait=True)
# iot dps certificate commands
with self.command_group('iot dps certificate',
client_factory=iot_service_provisioning_factory,
transform=_dps_certificate_response_transform) as g:
g.custom_command('list', 'iot_dps_certificate_list')
g.custom_show_command('show', 'iot_dps_certificate_get')
g.custom_command('create', 'iot_dps_certificate_create')
g.custom_command('delete', 'iot_dps_certificate_delete')
g.custom_command('generate-verification-code', 'iot_dps_certificate_gen_code')
g.custom_command('verify', 'iot_dps_certificate_verify')
g.custom_command('update', 'iot_dps_certificate_update')
# iot dps policy commands
with self.command_group('iot dps policy', client_factory=iot_service_provisioning_factory) as g:
g.custom_command('list', 'iot_dps_policy_list')
g.custom_show_command('show', 'iot_dps_policy_get')
g.custom_command('create', 'iot_dps_policy_create', supports_no_wait=True)
g.custom_command('update', 'iot_dps_policy_update', supports_no_wait=True)
g.custom_command('delete', 'iot_dps_policy_delete', supports_no_wait=True)
# iot hub certificate commands
with self.command_group('iot hub certificate', client_factory=iot_hub_service_factory) as g:
g.custom_command('list', 'iot_hub_certificate_list')
g.custom_show_command('show', 'iot_hub_certificate_get')
g.custom_command('create', 'iot_hub_certificate_create')
g.custom_command('delete', 'iot_hub_certificate_delete')
g.custom_command('generate-verification-code', 'iot_hub_certificate_gen_code')
g.custom_command('verify', 'iot_hub_certificate_verify')
g.custom_command('update', 'iot_hub_certificate_update')
# iot hub commands
with self.command_group('iot hub', client_factory=iot_hub_service_factory) as g:
g.custom_command('create', 'iot_hub_create')
g.custom_command('list', 'iot_hub_list')
g.custom_command('show-connection-string', 'iot_hub_show_connection_string',
deprecate_info=self.deprecate(redirect=CS_DEPRECATION_INFO))
g.custom_show_command('show', 'iot_hub_get')
g.generic_update_command('update', getter_name='iot_hub_get', setter_name='iot_hub_update',
command_type=update_custom_util, custom_func_name='update_iot_hub_custom')
g.custom_command('delete', 'iot_hub_delete', transform=HubDeleteResultTransform(self.cli_ctx))
g.custom_command('list-skus', 'iot_hub_sku_list')
g.custom_command('show-quota-metrics', 'iot_hub_get_quota_metrics')
g.custom_command('show-stats', 'iot_hub_get_stats')
g.custom_command('manual-failover', 'iot_hub_manual_failover', supports_no_wait=True)
# iot hub consumer group commands
with self.command_group('iot hub consumer-group', client_factory=iot_hub_service_factory) as g:
g.custom_command('create', 'iot_hub_consumer_group_create')
g.custom_command('list', 'iot_hub_consumer_group_list')
g.custom_show_command('show', 'iot_hub_consumer_group_get')
g.custom_command('delete', 'iot_hub_consumer_group_delete')
# iot hub identity commands
with self.command_group('iot hub identity', client_factory=iot_hub_service_factory) as g:
g.custom_command('assign', 'iot_hub_identity_assign')
g.custom_show_command('show', 'iot_hub_identity_show')
g.custom_command('remove', 'iot_hub_identity_remove')
# iot hub policy commands
with self.command_group('iot hub policy', client_factory=iot_hub_service_factory) as g:
g.custom_command('list', 'iot_hub_policy_list')
g.custom_show_command('show', 'iot_hub_policy_get')
g.custom_command('create', 'iot_hub_policy_create', transform=PolicyUpdateResultTransform(self.cli_ctx))
g.custom_command('delete', 'iot_hub_policy_delete', transform=PolicyUpdateResultTransform(self.cli_ctx))
g.custom_command('renew-key', 'iot_hub_policy_key_renew', supports_no_wait=True)
# iot hub routing endpoint commands
with self.command_group('iot hub routing-endpoint', client_factory=iot_hub_service_factory) as g:
g.custom_command('create', 'iot_hub_routing_endpoint_create',
transform=EndpointUpdateResultTransform(self.cli_ctx))
g.custom_show_command('show', 'iot_hub_routing_endpoint_show')
g.custom_command('list', 'iot_hub_routing_endpoint_list')
g.custom_command('delete', 'iot_hub_routing_endpoint_delete',
transform=EndpointUpdateResultTransform(self.cli_ctx))
# iot hub message enrichment commands
with self.command_group('iot hub message-enrichment', client_factory=iot_hub_service_factory,
min_api="2019-07-01-preview") as g:
g.custom_command('create', 'iot_message_enrichment_create')
g.custom_command('list', 'iot_message_enrichment_list')
g.custom_command('delete', 'iot_message_enrichment_delete')
g.custom_command('update', 'iot_message_enrichment_update')
# iot hub route commands
with self.command_group('iot hub route', client_factory=iot_hub_service_factory) as g:
g.custom_command('create', 'iot_hub_route_create', transform=RouteUpdateResultTransform(self.cli_ctx))
g.custom_show_command('show', 'iot_hub_route_show')
g.custom_command('list', 'iot_hub_route_list')
g.custom_command('delete', 'iot_hub_route_delete', transform=RouteUpdateResultTransform(self.cli_ctx))
g.custom_command('update', 'iot_hub_route_update', transform=RouteUpdateResultTransform(self.cli_ctx))
g.custom_command('test', 'iot_hub_route_test')
# iot hub device stream commands
with self.command_group('iot hub devicestream', client_factory=iot_hub_service_factory,
min_api="2019-07-01-preview") as g:
g.custom_show_command('show', 'iot_hub_devicestream_show')
with self.command_group('iot central app', iot_central_sdk, client_factory=iot_central_service_factory) as g:
g.custom_command('create', 'iot_central_app_create', supports_no_wait=True)
g.custom_command('list', 'iot_central_app_list')
g.custom_show_command('show', 'iot_central_app_get')
g.generic_update_command('update', getter_name='iot_central_app_get',
setter_name='iot_central_app_update', command_type=update_custom_util)
g.custom_command('delete', 'iot_central_app_delete', confirmation=True, supports_no_wait=True)
g.custom_command('identity assign', 'iot_central_app_assign_identity')
g.custom_command('identity remove', 'iot_central_app_remove_identity')
g.custom_show_command('identity show', 'iot_central_app_show_identity')
{iot} `az iot dps access-policy`: Defer its deprecation (#21892)
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from azure.cli.core.commands import LongRunningOperation, CliCommandType
from ._client_factory import iot_hub_service_factory
from ._client_factory import iot_service_provisioning_factory
from ._client_factory import iot_central_service_factory
from ._utils import _dps_certificate_response_transform
CS_DEPRECATION_INFO = 'IoT Extension (azure-iot) connection-string command (az iot hub connection-string show)'
class PolicyUpdateResultTransform(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
result = super(PolicyUpdateResultTransform, self).__call__(poller)
return result.properties.authorization_policies
class EndpointUpdateResultTransform(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
result = super(EndpointUpdateResultTransform, self).__call__(poller)
return result.properties.routing.endpoints
class RouteUpdateResultTransform(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
result = super(RouteUpdateResultTransform, self).__call__(poller)
return result.properties.routing.routes
# Deleting IoT Hub is a long-running operation. Due to API implementation issue, 404 error will be thrown during
# deletion of an IoT Hub.
# This is a work around to suppress the 404 error. It should be removed after API is fixed.
class HubDeleteResultTransform(LongRunningOperation): # pylint: disable=too-few-public-methods
def __call__(self, poller):
from azure.cli.core.util import CLIError
try:
super(HubDeleteResultTransform, self).__call__(poller)
except CLIError as e:
if 'not found' not in str(e):
raise e
def load_command_table(self, _): # pylint: disable=too-many-statements
update_custom_util = CliCommandType(operations_tmpl='azure.cli.command_modules.iot.custom#{}')
iot_central_sdk = CliCommandType(
operations_tmpl='azure.mgmt.iotcentral.operations#IoTCentaralOperations.{}'
)
# iot dps commands
with self.command_group('iot dps', client_factory=iot_service_provisioning_factory) as g:
g.custom_command('list', 'iot_dps_list')
g.custom_show_command('show', 'iot_dps_get')
g.custom_command('create', 'iot_dps_create')
g.custom_command('delete', 'iot_dps_delete')
g.generic_update_command('update', getter_name='iot_dps_get', setter_name='iot_dps_update',
command_type=update_custom_util)
# iot dps access-policy commands (Deprecated)
with self.command_group('iot dps access-policy',
client_factory=iot_service_provisioning_factory,
deprecate_info=self.deprecate(redirect='iot dps policy',
expiration='2.36.0')
) as g:
g.custom_command('list', 'iot_dps_policy_list')
g.custom_show_command('show', 'iot_dps_policy_get')
g.custom_command('create', 'iot_dps_policy_create', supports_no_wait=True)
g.custom_command('update', 'iot_dps_policy_update', supports_no_wait=True)
g.custom_command('delete', 'iot_dps_policy_delete', supports_no_wait=True)
# iot dps linked-hub commands
with self.command_group('iot dps linked-hub', client_factory=iot_service_provisioning_factory) as g:
g.custom_command('list', 'iot_dps_linked_hub_list')
g.custom_show_command('show', 'iot_dps_linked_hub_get')
g.custom_command('create', 'iot_dps_linked_hub_create', supports_no_wait=True)
g.custom_command('update', 'iot_dps_linked_hub_update', supports_no_wait=True)
g.custom_command('delete', 'iot_dps_linked_hub_delete', supports_no_wait=True)
# iot dps certificate commands
with self.command_group('iot dps certificate',
client_factory=iot_service_provisioning_factory,
transform=_dps_certificate_response_transform) as g:
g.custom_command('list', 'iot_dps_certificate_list')
g.custom_show_command('show', 'iot_dps_certificate_get')
g.custom_command('create', 'iot_dps_certificate_create')
g.custom_command('delete', 'iot_dps_certificate_delete')
g.custom_command('generate-verification-code', 'iot_dps_certificate_gen_code')
g.custom_command('verify', 'iot_dps_certificate_verify')
g.custom_command('update', 'iot_dps_certificate_update')
# iot dps policy commands
with self.command_group('iot dps policy', client_factory=iot_service_provisioning_factory) as g:
g.custom_command('list', 'iot_dps_policy_list')
g.custom_show_command('show', 'iot_dps_policy_get')
g.custom_command('create', 'iot_dps_policy_create', supports_no_wait=True)
g.custom_command('update', 'iot_dps_policy_update', supports_no_wait=True)
g.custom_command('delete', 'iot_dps_policy_delete', supports_no_wait=True)
# iot hub certificate commands
with self.command_group('iot hub certificate', client_factory=iot_hub_service_factory) as g:
g.custom_command('list', 'iot_hub_certificate_list')
g.custom_show_command('show', 'iot_hub_certificate_get')
g.custom_command('create', 'iot_hub_certificate_create')
g.custom_command('delete', 'iot_hub_certificate_delete')
g.custom_command('generate-verification-code', 'iot_hub_certificate_gen_code')
g.custom_command('verify', 'iot_hub_certificate_verify')
g.custom_command('update', 'iot_hub_certificate_update')
# iot hub commands
with self.command_group('iot hub', client_factory=iot_hub_service_factory) as g:
g.custom_command('create', 'iot_hub_create')
g.custom_command('list', 'iot_hub_list')
g.custom_command('show-connection-string', 'iot_hub_show_connection_string',
deprecate_info=self.deprecate(redirect=CS_DEPRECATION_INFO))
g.custom_show_command('show', 'iot_hub_get')
g.generic_update_command('update', getter_name='iot_hub_get', setter_name='iot_hub_update',
command_type=update_custom_util, custom_func_name='update_iot_hub_custom')
g.custom_command('delete', 'iot_hub_delete', transform=HubDeleteResultTransform(self.cli_ctx))
g.custom_command('list-skus', 'iot_hub_sku_list')
g.custom_command('show-quota-metrics', 'iot_hub_get_quota_metrics')
g.custom_command('show-stats', 'iot_hub_get_stats')
g.custom_command('manual-failover', 'iot_hub_manual_failover', supports_no_wait=True)
# iot hub consumer group commands
with self.command_group('iot hub consumer-group', client_factory=iot_hub_service_factory) as g:
g.custom_command('create', 'iot_hub_consumer_group_create')
g.custom_command('list', 'iot_hub_consumer_group_list')
g.custom_show_command('show', 'iot_hub_consumer_group_get')
g.custom_command('delete', 'iot_hub_consumer_group_delete')
# iot hub identity commands
with self.command_group('iot hub identity', client_factory=iot_hub_service_factory) as g:
g.custom_command('assign', 'iot_hub_identity_assign')
g.custom_show_command('show', 'iot_hub_identity_show')
g.custom_command('remove', 'iot_hub_identity_remove')
# iot hub policy commands
with self.command_group('iot hub policy', client_factory=iot_hub_service_factory) as g:
g.custom_command('list', 'iot_hub_policy_list')
g.custom_show_command('show', 'iot_hub_policy_get')
g.custom_command('create', 'iot_hub_policy_create', transform=PolicyUpdateResultTransform(self.cli_ctx))
g.custom_command('delete', 'iot_hub_policy_delete', transform=PolicyUpdateResultTransform(self.cli_ctx))
g.custom_command('renew-key', 'iot_hub_policy_key_renew', supports_no_wait=True)
# iot hub routing endpoint commands
with self.command_group('iot hub routing-endpoint', client_factory=iot_hub_service_factory) as g:
g.custom_command('create', 'iot_hub_routing_endpoint_create',
transform=EndpointUpdateResultTransform(self.cli_ctx))
g.custom_show_command('show', 'iot_hub_routing_endpoint_show')
g.custom_command('list', 'iot_hub_routing_endpoint_list')
g.custom_command('delete', 'iot_hub_routing_endpoint_delete',
transform=EndpointUpdateResultTransform(self.cli_ctx))
# iot hub message enrichment commands
with self.command_group('iot hub message-enrichment', client_factory=iot_hub_service_factory,
min_api="2019-07-01-preview") as g:
g.custom_command('create', 'iot_message_enrichment_create')
g.custom_command('list', 'iot_message_enrichment_list')
g.custom_command('delete', 'iot_message_enrichment_delete')
g.custom_command('update', 'iot_message_enrichment_update')
# iot hub route commands
with self.command_group('iot hub route', client_factory=iot_hub_service_factory) as g:
g.custom_command('create', 'iot_hub_route_create', transform=RouteUpdateResultTransform(self.cli_ctx))
g.custom_show_command('show', 'iot_hub_route_show')
g.custom_command('list', 'iot_hub_route_list')
g.custom_command('delete', 'iot_hub_route_delete', transform=RouteUpdateResultTransform(self.cli_ctx))
g.custom_command('update', 'iot_hub_route_update', transform=RouteUpdateResultTransform(self.cli_ctx))
g.custom_command('test', 'iot_hub_route_test')
# iot hub device stream commands
with self.command_group('iot hub devicestream', client_factory=iot_hub_service_factory,
min_api="2019-07-01-preview") as g:
g.custom_show_command('show', 'iot_hub_devicestream_show')
with self.command_group('iot central app', iot_central_sdk, client_factory=iot_central_service_factory) as g:
g.custom_command('create', 'iot_central_app_create', supports_no_wait=True)
g.custom_command('list', 'iot_central_app_list')
g.custom_show_command('show', 'iot_central_app_get')
g.generic_update_command('update', getter_name='iot_central_app_get',
setter_name='iot_central_app_update', command_type=update_custom_util)
g.custom_command('delete', 'iot_central_app_delete', confirmation=True, supports_no_wait=True)
g.custom_command('identity assign', 'iot_central_app_assign_identity')
g.custom_command('identity remove', 'iot_central_app_remove_identity')
g.custom_show_command('identity show', 'iot_central_app_show_identity')
|
"""Provide support to Lexicon for Gandi DNS changes.
Lexicon provides a common interface for querying and managing DNS services
through those services' APIs. This module implements the Lexicon interface
against the Gandi API.
The Gandi API is different from typical DNS APIs in that Gandi
zone changes are atomic. You cannot edit the currently active
configuration. Any changes require editing either a new or inactive
configuration. Once the changes are committed, then the domain is switched
to using the new zone configuration. This module makes no attempt to
cleanup previous zone configurations.
Note that Gandi domains can share zone configurations. In other words,
I can have domain-a.com and domain-b.com which share the same zone
configuration file. If I make changes to domain-a.com, those changes
will only apply to domain-a.com, as domain-b.com will continue using
the previous version of the zone configuration. This module makes no
attempt to detect and account for that.
"""
from __future__ import print_function
from __future__ import absolute_import
import logging
from .base import Provider as BaseProvider
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
LOGGER = logging.getLogger(__name__)
def ProviderParser(subparser):
"""Specify arguments for Gandi Lexicon Provider."""
subparser.add_argument('--auth-token', help="specify Gandi API key")
class Provider(BaseProvider):
"""Provide Gandi DNS API implementation of Lexicon Provider interface.
The class will use the following environment variables to configure
it instance. For more information, read the Lexicon documentation.
- LEXICON_GANDI_API_ENDPOINT - the Gandi API endpoint to use
The default is the production URL https://rpc.gandi.net/xmlrpc/.
Set this environment variable to the OT&E URL for testing.
"""
def __init__(self, options, provider_options=None):
"""Initialize Gandi DNS provider."""
super(Provider, self).__init__(options)
if provider_options is None:
provider_options = {}
api_endpoint = provider_options.get('api_endpoint') or 'https://rpc.gandi.net/xmlrpc/'
self.apikey = self.options['auth_token']
self.api = xmlrpclib.ServerProxy(api_endpoint, allow_none=True)
self.default_ttl = 3600
# self.domain_id is required by test suite
self.domain_id = None
self.zone_id = None
self.domain = self.options['domain'].lower()
# Authenicate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls. Should throw an error if
# authentication fails for any reason, or if the domain does not exist.
def authenticate(self):
"""Determine the current domain and zone IDs for the domain."""
try:
payload = self.api.domain.info(self.apikey, self.domain)
self.domain_id = payload['id']
self.zone_id = payload['zone_id']
except xmlrpclib.Fault as err:
raise Exception("Failed to authenticate: '{0}'".format(err))
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
"""Creates a record for the domain in a new Gandi zone."""
version = None
ret = False
name = self._relative_name(name)
# This isn't quite "do nothing" if the record already exists.
# In this case, no new record will be created, but a new zone version
# will be created and set.
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
self.api.domain.zone.record.add(self.apikey, self.zone_id, version,
{'type': type.upper(),
'name': name,
'value': content,
'ttl': self.options.get('ttl') or self.default_ttl
})
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
LOGGER.debug("create_record: %s", ret)
return ret
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
opts = {}
if type is not None:
opts['type'] = type.upper()
if name is not None:
opts['name'] = self._relative_name(name)
if content is not None:
opts['value'] = self._txt_encode(content) if opts.get('type', '') == 'TXT' else content
records = []
payload = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
for record in payload:
processed_record = {
'type': record['type'],
'name': self._fqdn_name(record['name']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
# Gandi will add quotes to all TXT record strings
if processed_record['type'] == 'TXT':
processed_record['content'] = self._txt_decode(processed_record['content'])
records.append(processed_record)
LOGGER.debug("list_records: %s", records)
return records
# Update a record. Identifier must be specified.
def update_record(self, identifier, type=None, name=None, content=None):
"""Updates the specified record in a new Gandi zone."""
if not identifier:
records = self.list_records(type, name)
if len(records) == 1:
identifier = records[0]['id']
elif len(records) > 1:
raise Exception('Several record identifiers match the request')
else:
raise Exception('Record identifier could not be found')
identifier = int(identifier)
version = None
# Gandi doesn't allow you to edit records on the active zone file.
# Gandi also doesn't persist zone record identifiers when creating
# a new zone file. To update by identifier, we lookup the record
# by identifier, then use the record fields to find the record in
# the newly created zone.
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, {'id': identifier})
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, version, rec)
if len(records) != 1:
raise GandiInternalError("expected one record")
if type is not None:
rec['type'] = type.upper()
if name is not None:
rec['name'] = self._relative_name(name)
if content is not None:
rec['value'] = self._txt_encode(content) if rec['type'] == 'TXT' else content
records = self.api.domain.zone.record.update(self.apikey,
self.zone_id,
version,
{'id': records[0]['id']},
rec)
if len(records) != 1:
raise GandiInternalError("expected one updated record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
LOGGER.debug("update_record: %s", ret)
return ret
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Removes the specified record in a new Gandi zone."""
version = None
ret = False
opts = {}
if identifier is not None:
opts['id'] = identifier
else:
opts['type'] = type.upper()
opts['name'] = self._relative_name(name)
opts["value"] = self._txt_encode(content) if opts['type'] == 'TXT' else content
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
cnt = self.api.domain.zone.record.delete(self.apikey, self.zone_id, version, rec)
if cnt != 1:
raise GandiInternalError("expected one deleted record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
LOGGER.debug("delete_record: %s", ret)
return ret
@staticmethod
def _txt_encode(val):
return ''.join(['"', val.replace('\\', '\\\\').replace('"', '\\"'), '"'])
@staticmethod
def _txt_decode(val):
if len(val) > 1 and val[0:1] == '"':
val = val[1:-1].replace('" "', '').replace('\\"', '"').replace('\\\\', '\\')
return val
# This exception is for cleaner handling of internal errors
# within the Gandi provider codebase
class GandiInternalError(Exception):
"""Internal exception handling class for Gandi management errors"""
pass
Implement empty _request
"""Provide support to Lexicon for Gandi DNS changes.
Lexicon provides a common interface for querying and managing DNS services
through those services' APIs. This module implements the Lexicon interface
against the Gandi API.
The Gandi API is different from typical DNS APIs in that Gandi
zone changes are atomic. You cannot edit the currently active
configuration. Any changes require editing either a new or inactive
configuration. Once the changes are committed, then the domain is switched
to using the new zone configuration. This module makes no attempt to
cleanup previous zone configurations.
Note that Gandi domains can share zone configurations. In other words,
I can have domain-a.com and domain-b.com which share the same zone
configuration file. If I make changes to domain-a.com, those changes
will only apply to domain-a.com, as domain-b.com will continue using
the previous version of the zone configuration. This module makes no
attempt to detect and account for that.
"""
from __future__ import print_function
from __future__ import absolute_import
import logging
from .base import Provider as BaseProvider
try:
import xmlrpclib
except ImportError:
import xmlrpc.client as xmlrpclib
LOGGER = logging.getLogger(__name__)
def ProviderParser(subparser):
"""Specify arguments for Gandi Lexicon Provider."""
subparser.add_argument('--auth-token', help="specify Gandi API key")
class Provider(BaseProvider):
"""Provide Gandi DNS API implementation of Lexicon Provider interface.
The class will use the following environment variables to configure
it instance. For more information, read the Lexicon documentation.
- LEXICON_GANDI_API_ENDPOINT - the Gandi API endpoint to use
The default is the production URL https://rpc.gandi.net/xmlrpc/.
Set this environment variable to the OT&E URL for testing.
"""
def __init__(self, options, provider_options=None):
"""Initialize Gandi DNS provider."""
super(Provider, self).__init__(options)
if provider_options is None:
provider_options = {}
api_endpoint = provider_options.get('api_endpoint') or 'https://rpc.gandi.net/xmlrpc/'
self.apikey = self.options['auth_token']
self.api = xmlrpclib.ServerProxy(api_endpoint, allow_none=True)
self.default_ttl = 3600
# self.domain_id is required by test suite
self.domain_id = None
self.zone_id = None
self.domain = self.options['domain'].lower()
# Authenicate against provider,
# Make any requests required to get the domain's id for this provider,
# so it can be used in subsequent calls. Should throw an error if
# authentication fails for any reason, or if the domain does not exist.
def authenticate(self):
"""Determine the current domain and zone IDs for the domain."""
try:
payload = self.api.domain.info(self.apikey, self.domain)
self.domain_id = payload['id']
self.zone_id = payload['zone_id']
except xmlrpclib.Fault as err:
raise Exception("Failed to authenticate: '{0}'".format(err))
# Create record. If record already exists with the same content, do nothing'
def create_record(self, type, name, content):
"""Creates a record for the domain in a new Gandi zone."""
version = None
ret = False
name = self._relative_name(name)
# This isn't quite "do nothing" if the record already exists.
# In this case, no new record will be created, but a new zone version
# will be created and set.
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
self.api.domain.zone.record.add(self.apikey, self.zone_id, version,
{'type': type.upper(),
'name': name,
'value': content,
'ttl': self.options.get('ttl') or self.default_ttl
})
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
LOGGER.debug("create_record: %s", ret)
return ret
# List all records. Return an empty list if no records found
# type, name and content are used to filter records.
# If possible filter during the query, otherwise filter after response is received.
def list_records(self, type=None, name=None, content=None):
"""List all record for the domain in the active Gandi zone."""
opts = {}
if type is not None:
opts['type'] = type.upper()
if name is not None:
opts['name'] = self._relative_name(name)
if content is not None:
opts['value'] = self._txt_encode(content) if opts.get('type', '') == 'TXT' else content
records = []
payload = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
for record in payload:
processed_record = {
'type': record['type'],
'name': self._fqdn_name(record['name']),
'ttl': record['ttl'],
'content': record['value'],
'id': record['id']
}
# Gandi will add quotes to all TXT record strings
if processed_record['type'] == 'TXT':
processed_record['content'] = self._txt_decode(processed_record['content'])
records.append(processed_record)
LOGGER.debug("list_records: %s", records)
return records
# Update a record. Identifier must be specified.
def update_record(self, identifier, type=None, name=None, content=None):
"""Updates the specified record in a new Gandi zone."""
if not identifier:
records = self.list_records(type, name)
if len(records) == 1:
identifier = records[0]['id']
elif len(records) > 1:
raise Exception('Several record identifiers match the request')
else:
raise Exception('Record identifier could not be found')
identifier = int(identifier)
version = None
# Gandi doesn't allow you to edit records on the active zone file.
# Gandi also doesn't persist zone record identifiers when creating
# a new zone file. To update by identifier, we lookup the record
# by identifier, then use the record fields to find the record in
# the newly created zone.
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, {'id': identifier})
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, version, rec)
if len(records) != 1:
raise GandiInternalError("expected one record")
if type is not None:
rec['type'] = type.upper()
if name is not None:
rec['name'] = self._relative_name(name)
if content is not None:
rec['value'] = self._txt_encode(content) if rec['type'] == 'TXT' else content
records = self.api.domain.zone.record.update(self.apikey,
self.zone_id,
version,
{'id': records[0]['id']},
rec)
if len(records) != 1:
raise GandiInternalError("expected one updated record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
LOGGER.debug("update_record: %s", ret)
return ret
# Delete an existing record.
# If record does not exist, do nothing.
# If an identifier is specified, use it, otherwise do a lookup using type, name and content.
def delete_record(self, identifier=None, type=None, name=None, content=None):
"""Removes the specified record in a new Gandi zone."""
version = None
ret = False
opts = {}
if identifier is not None:
opts['id'] = identifier
else:
opts['type'] = type.upper()
opts['name'] = self._relative_name(name)
opts["value"] = self._txt_encode(content) if opts['type'] == 'TXT' else content
records = self.api.domain.zone.record.list(self.apikey, self.zone_id, 0, opts)
if len(records) == 1:
rec = records[0]
del rec['id']
try:
version = self.api.domain.zone.version.new(self.apikey, self.zone_id)
cnt = self.api.domain.zone.record.delete(self.apikey, self.zone_id, version, rec)
if cnt != 1:
raise GandiInternalError("expected one deleted record")
self.api.domain.zone.version.set(self.apikey, self.zone_id, version)
ret = True
except GandiInternalError:
pass
finally:
if not ret and version is not None:
self.api.domain.zone.version.delete(self.apikey, self.zone_id, version)
LOGGER.debug("delete_record: %s", ret)
return ret
def _request(self, action='GET', url='/', data=None, query_params=None):
# Not used here, as requests are handled by xmlrpc
pass
@staticmethod
def _txt_encode(val):
return ''.join(['"', val.replace('\\', '\\\\').replace('"', '\\"'), '"'])
@staticmethod
def _txt_decode(val):
if len(val) > 1 and val[0:1] == '"':
val = val[1:-1].replace('" "', '').replace('\\"', '"').replace('\\\\', '\\')
return val
# This exception is for cleaner handling of internal errors
# within the Gandi provider codebase
class GandiInternalError(Exception):
"""Internal exception handling class for Gandi management errors"""
pass
|
VERSION = (0,12,11)
__version__ = '.'.join(map(str, VERSION))
DATE = "2012-10-20"
version changed
VERSION = (0,12,12)
__version__ = '.'.join(map(str, VERSION))
DATE = "2012-10-25"
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
from typing import Any, Dict, List, TypeVar, Union, Callable, Optional, Sequence
from argo.models import V1alpha1ArtifactLocation
from kubernetes.client import V1Toleration
from kubernetes.client.models import (
V1Container, V1EnvVar, V1EnvFromSource, V1SecurityContext, V1Probe,
V1ResourceRequirements, V1VolumeDevice, V1VolumeMount, V1ContainerPort,
V1Lifecycle, V1Volume
)
from . import _pipeline_param
from ._metadata import ComponentMeta
# generics
T = TypeVar('T')
# type alias: either a string or a list of string
StringOrStringList = Union[str, List[str]]
# util functions
def deprecation_warning(func: Callable, op_name: str,
container_name: str) -> Callable:
"""Decorator function to give a pending deprecation warning"""
def _wrapped(*args, **kwargs):
warnings.warn(
'`dsl.ContainerOp.%s` will be removed in future releases. '
'Use `dsl.ContainerOp.container.%s` instead.' %
(op_name, container_name), PendingDeprecationWarning)
return func(*args, **kwargs)
return _wrapped
def _create_getter_setter(prop):
"""Create a tuple of getter and setter methods for a property in `Container`."""
def _getter(self):
return getattr(self._container, prop)
def _setter(self, value):
return setattr(self._container, prop, value)
return _getter, _setter
def _proxy_container_op_props(cls: "ContainerOp"):
"""Takes the `ContainerOp` class and proxy the PendingDeprecation properties
in `ContainerOp` to the `Container` instance.
"""
# properties mapping to proxy: ContainerOps.<prop> => Container.<prop>
prop_map = dict(image='image', env_variables='env')
# itera and create class props
for op_prop, container_prop in prop_map.items():
# create getter and setter
_getter, _setter = _create_getter_setter(container_prop)
# decorate with deprecation warning
getter = deprecation_warning(_getter, op_prop, container_prop)
setter = deprecation_warning(_setter, op_prop, container_prop)
# update attribites with properties
setattr(cls, op_prop, property(getter, setter))
return cls
def as_string_list(list_or_str: Optional[Union[Any, Sequence[Any]]]) -> List[str]:
"""Convert any value except None to a list if not already a list."""
if list_or_str is None:
return None
if isinstance(list_or_str, Sequence) and not isinstance(list_or_str, str):
list_value = list_or_str
else:
list_value = [list_or_str]
return [str(item) for item in list_value]
def create_and_append(current_list: Union[List[T], None], item: T) -> List[T]:
"""Create a list (if needed) and appends an item to it."""
current_list = current_list or []
current_list.append(item)
return current_list
class Container(V1Container):
"""
A wrapper over k8s container definition object (io.k8s.api.core.v1.Container),
which is used to represent the `container` property in argo's workflow
template (io.argoproj.workflow.v1alpha1.Template).
`Container` class also comes with utility functions to set and update the
the various properties for a k8s container definition.
NOTE: A notable difference is that `name` is not required and will not be
processed for `Container` (in contrast to `V1Container` where `name` is a
required property).
See:
- https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_container.py
- https://github.com/argoproj/argo/blob/master/api/openapi-spec/swagger.json
Example:
from kfp.dsl import ContainerOp
from kubernetes.client.models import V1EnvVar
# creates a operation
op = ContainerOp(name='bash-ops',
image='busybox:latest',
command=['echo'],
arguments=['$MSG'])
# returns a `Container` object from `ContainerOp`
# and add an environment variable to `Container`
op.container.add_env_variable(V1EnvVar(name='MSG', value='hello world'))
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
# remove `name` from swagger_types so `name` is not generated in the JSON
swagger_types = {
key: value
for key, value in V1Container.swagger_types.items() if key != 'name'
}
attribute_map = {
key: value
for key, value in V1Container.attribute_map.items() if key != 'name'
}
def __init__(self, image: str, command: List[str], args: List[str],
**kwargs):
"""Creates a new instance of `Container`.
Args:
image {str}: image to use, e.g. busybox:latest
command {List[str]}: entrypoint array. Not executed within a shell.
args {List[str]}: arguments to entrypoint.
**kwargs: keyword arguments for `V1Container`
"""
# set name to '' if name is not provided
# k8s container MUST have a name
# argo workflow template does not need a name for container def
if not kwargs.get('name'):
kwargs['name'] = ''
super(Container, self).__init__(
image=image, command=command, args=args, **kwargs)
def _validate_memory_string(self, memory_string):
"""Validate a given string is valid for memory request or limit."""
if isinstance(memory_string, _pipeline_param.PipelineParam):
if memory_string.value:
memory_string = memory_string.value
else:
return
if re.match(r'^[0-9]+(E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki){0,1}$',
memory_string) is None:
raise ValueError(
'Invalid memory string. Should be an integer, or integer followed '
'by one of "E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki"')
def _validate_cpu_string(self, cpu_string):
"Validate a given string is valid for cpu request or limit."
if isinstance(cpu_string, _pipeline_param.PipelineParam):
if cpu_string.value:
cpu_string = cpu_string.value
else:
return
if re.match(r'^[0-9]+m$', cpu_string) is not None:
return
try:
float(cpu_string)
except ValueError:
raise ValueError(
'Invalid cpu string. Should be float or integer, or integer followed '
'by "m".')
def _validate_positive_number(self, str_value, param_name):
"Validate a given string is in positive integer format."
if isinstance(str_value, _pipeline_param.PipelineParam):
if str_value.value:
str_value = str_value.value
else:
return
try:
int_value = int(str_value)
except ValueError:
raise ValueError(
'Invalid {}. Should be integer.'.format(param_name))
if int_value <= 0:
raise ValueError('{} must be positive integer.'.format(param_name))
def add_resource_limit(self, resource_name, value):
"""Add the resource limit of the container.
Args:
resource_name: The name of the resource. It can be cpu, memory, etc.
value: The string value of the limit.
"""
self.resources = self.resources or V1ResourceRequirements()
self.resources.limits = self.resources.limits or {}
self.resources.limits.update({resource_name: value})
return self
def add_resource_request(self, resource_name, value):
"""Add the resource request of the container.
Args:
resource_name: The name of the resource. It can be cpu, memory, etc.
value: The string value of the request.
"""
self.resources = self.resources or V1ResourceRequirements()
self.resources.requests = self.resources.requests or {}
self.resources.requests.update({resource_name: value})
return self
def set_memory_request(self, memory):
"""Set memory request (minimum) for this operator.
Args:
memory: a string which can be a number or a number followed by one of
"E", "P", "T", "G", "M", "K".
"""
self._validate_memory_string(memory)
return self.add_resource_request("memory", memory)
def set_memory_limit(self, memory):
"""Set memory limit (maximum) for this operator.
Args:
memory: a string which can be a number or a number followed by one of
"E", "P", "T", "G", "M", "K".
"""
self._validate_memory_string(memory)
return self.add_resource_limit("memory", memory)
def set_cpu_request(self, cpu):
"""Set cpu request (minimum) for this operator.
Args:
cpu: A string which can be a number or a number followed by "m", which means 1/1000.
"""
self._validate_cpu_string(cpu)
return self.add_resource_request("cpu", cpu)
def set_cpu_limit(self, cpu):
"""Set cpu limit (maximum) for this operator.
Args:
cpu: A string which can be a number or a number followed by "m", which means 1/1000.
"""
self._validate_cpu_string(cpu)
return self.add_resource_limit("cpu", cpu)
def set_gpu_limit(self, gpu, vendor="nvidia"):
"""Set gpu limit for the operator. This function add '<vendor>.com/gpu' into resource limit.
Note that there is no need to add GPU request. GPUs are only supposed to be specified in
the limits section. See https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/.
Args:
gpu: A string which must be a positive number.
vendor: Optional. A string which is the vendor of the requested gpu. The supported values
are: 'nvidia' (default), and 'amd'.
"""
self._validate_positive_number(gpu, 'gpu')
if vendor != 'nvidia' and vendor != 'amd':
raise ValueError('vendor can only be nvidia or amd.')
return self.add_resource_limit("%s.com/gpu" % vendor, gpu)
def add_volume_mount(self, volume_mount):
"""Add volume to the container
Args:
volume_mount: Kubernetes volume mount
For detailed spec, check volume mount definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_volume_mount.py
"""
if not isinstance(volume_mount, V1VolumeMount):
raise ValueError(
'invalid argument. Must be of instance `V1VolumeMount`.')
self.volume_mounts = create_and_append(self.volume_mounts,
volume_mount)
return self
def add_volume_devices(self, volume_device):
"""
Add a block device to be used by the container.
Args:
volume_device: Kubernetes volume device
For detailed spec, volume device definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_volume_device.py
"""
if not isinstance(volume_device, V1VolumeDevice):
raise ValueError(
'invalid argument. Must be of instance `V1VolumeDevice`.')
self.volume_devices = create_and_append(self.volume_devices,
volume_device)
return self
def add_env_variable(self, env_variable):
"""Add environment variable to the container.
Args:
env_variable: Kubernetes environment variable
For detailed spec, check environment variable definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_env_var.py
"""
if not isinstance(env_variable, V1EnvVar):
raise ValueError(
'invalid argument. Must be of instance `V1EnvVar`.')
self.env = create_and_append(self.env, env_variable)
return self
def add_env_from(self, env_from):
"""Add a source to populate environment variables int the container.
Args:
env_from: Kubernetes environment from source
For detailed spec, check environment from source definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_env_var_source.py
"""
if not isinstance(env_from, V1EnvFromSource):
raise ValueError(
'invalid argument. Must be of instance `V1EnvFromSource`.')
self.env_from = create_and_append(self.env_from, env_from)
return self
def set_image_pull_policy(self, image_pull_policy):
"""Set image pull policy for the container.
Args:
image_pull_policy: One of `Always`, `Never`, `IfNotPresent`.
"""
if image_pull_policy not in ['Always', 'Never', 'IfNotPresent']:
raise ValueError(
'Invalid imagePullPolicy. Must be one of `Always`, `Never`, `IfNotPresent`.'
)
self.image_pull_policy = image_pull_policy
return self
def add_port(self, container_port):
"""Add a container port to the container.
Args:
container_port: Kubernetes container port
For detailed spec, check container port definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_container_port.py
"""
if not isinstance(container_port, V1ContainerPort):
raise ValueError(
'invalid argument. Must be of instance `V1ContainerPort`.')
self.ports = create_and_append(self.ports, container_port)
return self
def set_security_context(self, security_context):
"""Set security configuration to be applied on the container.
Args:
security_context: Kubernetes security context
For detailed spec, check security context definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_security_context.py
"""
if not isinstance(security_context, V1SecurityContext):
raise ValueError(
'invalid argument. Must be of instance `V1SecurityContext`.')
self.security_context = security_context
return self
def set_stdin(self, stdin=True):
"""
Whether this container should allocate a buffer for stdin in the container
runtime. If this is not set, reads from stdin in the container will always
result in EOF.
Args:
stdin: boolean flag
"""
self.stdin = stdin
return self
def set_stdin_once(self, stdin_once=True):
"""
Whether the container runtime should close the stdin channel after it has
been opened by a single attach. When stdin is true the stdin stream will
remain open across multiple attach sessions. If stdinOnce is set to true,
stdin is opened on container start, is empty until the first client attaches
to stdin, and then remains open and accepts data until the client
disconnects, at which time stdin is closed and remains closed until the
container is restarted. If this flag is false, a container processes that
reads from stdin will never receive an EOF.
Args:
stdin_once: boolean flag
"""
self.stdin_once = stdin_once
return self
def set_termination_message_path(self, termination_message_path):
"""
Path at which the file to which the container's termination message will be
written is mounted into the container's filesystem. Message written is
intended to be brief final status, such as an assertion failure message.
Will be truncated by the node if greater than 4096 bytes. The total message
length across all containers will be limited to 12kb.
Args:
termination_message_path: path for the termination message
"""
self.termination_message_path = termination_message_path
return self
def set_termination_message_policy(self, termination_message_policy):
"""
Indicate how the termination message should be populated. File will use the
contents of terminationMessagePath to populate the container status message
on both success and failure. FallbackToLogsOnError will use the last chunk
of container log output if the termination message file is empty and the
container exited with an error. The log output is limited to 2048 bytes or
80 lines, whichever is smaller.
Args:
termination_message_policy: `File` or `FallbackToLogsOnError`
"""
if termination_message_policy not in ['File', 'FallbackToLogsOnError']:
raise ValueError(
'terminationMessagePolicy must be `File` or `FallbackToLogsOnError`'
)
self.termination_message_policy = termination_message_policy
return self
def set_tty(self, tty=True):
"""
Whether this container should allocate a TTY for itself, also requires
'stdin' to be true.
Args:
tty: boolean flag
"""
self.tty = tty
return self
def set_readiness_probe(self, readiness_probe):
"""
Set a readiness probe for the container.
Args:
readiness_probe: Kubernetes readiness probe
For detailed spec, check probe definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_probe.py
"""
if not isinstance(readiness_probe, V1Probe):
raise ValueError(
'invalid argument. Must be of instance `V1Probe`.')
self.readiness_probe = readiness_probe
return self
def set_liveness_probe(self, liveness_probe):
"""
Set a liveness probe for the container.
Args:
liveness_probe: Kubernetes liveness probe
For detailed spec, check probe definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_probe.py
"""
if not isinstance(liveness_probe, V1Probe):
raise ValueError(
'invalid argument. Must be of instance `V1Probe`.')
self.liveness_probe = liveness_probe
return self
def set_lifecycle(self, lifecycle):
"""
Setup a lifecycle config for the container.
Args:
lifecycle: Kubernetes lifecycle
For detailed spec, lifecycle definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_lifecycle.py
"""
if not isinstance(lifecycle, V1Lifecycle):
raise ValueError(
'invalid argument. Must be of instance `V1Lifecycle`.')
self.lifecycle = lifecycle
return self
class Sidecar(Container):
"""
Represents an argo workflow sidecar (io.argoproj.workflow.v1alpha1.Sidecar)
to be used in `sidecars` property in argo's workflow template
(io.argoproj.workflow.v1alpha1.Template).
`Sidecar` inherits from `Container` class with an addition of `mirror_volume_mounts`
attribute (`mirrorVolumeMounts` property).
See https://github.com/argoproj/argo/blob/master/api/openapi-spec/swagger.json
Example
from kfp.dsl import ContainerOp, Sidecar
# creates a `ContainerOp` and adds a redis `Sidecar`
op = (ContainerOp(name='foo-op', image='busybox:latest')
.add_sidecar(
Sidecar(name='redis', image='redis:alpine')))
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
# adds `mirror_volume_mounts` to `Sidecar` swagger definition
# NOTE inherits definition from `V1Container` rather than `Container`
# because `Container` has no `name` property.
swagger_types = dict(
**V1Container.swagger_types, mirror_volume_mounts='bool')
attribute_map = dict(
**V1Container.attribute_map, mirror_volume_mounts='mirrorVolumeMounts')
def __init__(self,
name: str,
image: str,
command: StringOrStringList = None,
args: StringOrStringList = None,
mirror_volume_mounts: bool = None,
**kwargs):
"""Creates a new instance of `Sidecar`.
Args:
name {str}: unique name for the sidecar container
image {str}: image to use for the sidecar container, e.g. redis:alpine
command {StringOrStringList}: entrypoint array. Not executed within a shell.
args {StringOrStringList}: arguments to the entrypoint.
mirror_volume_mounts {bool}: MirrorVolumeMounts will mount the same
volumes specified in the main container to the sidecar (including artifacts),
at the same mountPaths. This enables dind daemon to partially see the same
filesystem as the main container in order to use features such as docker
volume binding
**kwargs: keyword arguments available for `Container`
"""
super().__init__(
name=name,
image=image,
command=as_string_list(command),
args=as_string_list(args),
**kwargs)
self.mirror_volume_mounts = mirror_volume_mounts
def set_mirror_volume_mounts(self, mirror_volume_mounts=True):
"""
Setting mirrorVolumeMounts to true will mount the same volumes specified
in the main container to the sidecar (including artifacts), at the same
mountPaths. This enables dind daemon to partially see the same filesystem
as the main container in order to use features such as docker volume
binding.
Args:
mirror_volume_mounts: boolean flag
"""
self.mirror_volume_mounts = mirror_volume_mounts
return self
@property
def inputs(self):
"""A list of PipelineParam found in the Sidecar object."""
return _pipeline_param.extract_pipelineparams_from_any(self)
def _make_hash_based_id_for_op(op):
# Generating a unique ID for Op. For class instances, the hash is the object's memory address which is unique.
return op.human_name + ' ' + hex(2**63 + hash(op))[2:]
# Pointer to a function that generates a unique ID for the Op instance (Possibly by registering the Op instance in some system).
_register_op_handler = _make_hash_based_id_for_op
class BaseOp(object):
# list of attributes that might have pipeline params - used to generate
# the input parameters during compilation.
# Excludes `file_outputs` and `outputs` as they are handled separately
# in the compilation process to generate the DAGs and task io parameters.
attrs_with_pipelineparams = [
'node_selector', 'volumes', 'pod_annotations', 'pod_labels',
'num_retries', 'sidecars', 'tolerations'
]
def __init__(self,
name: str,
sidecars: List[Sidecar] = None,
is_exit_handler: bool = False):
"""Create a new instance of BaseOp
Args:
name: the name of the op. It does not have to be unique within a pipeline
because the pipeline will generates a unique new name in case of conflicts.
sidecars: the list of `Sidecar` objects describing the sidecar containers to deploy
together with the `main` container.
is_exit_handler: Whether it is used as an exit handler.
"""
valid_name_regex = r'^[A-Za-z][A-Za-z0-9\s_-]*$'
if not re.match(valid_name_regex, name):
raise ValueError(
'Only letters, numbers, spaces, "_", and "-" are allowed in name. Must begin with letter: %s'
% (name))
self.is_exit_handler = is_exit_handler
# human_name must exist to construct operator's name
self.human_name = name
# ID of the current Op. Ideally, it should be generated by the compiler that sees the bigger context.
# However, the ID is used in the task output references (PipelineParams) which can be serialized to strings.
# Because of this we must obtain a unique ID right now.
self.name = _register_op_handler(self)
# TODO: proper k8s definitions so that `convert_k8s_obj_to_json` can be used?
# `io.argoproj.workflow.v1alpha1.Template` properties
self.node_selector = {}
self.volumes = []
self.tolerations = []
self.pod_annotations = {}
self.pod_labels = {}
self.num_retries = 0
self.sidecars = sidecars or []
# attributes specific to `BaseOp`
self._inputs = []
self.dependent_names = []
@property
def inputs(self):
"""List of PipelineParams that will be converted into input parameters
(io.argoproj.workflow.v1alpha1.Inputs) for the argo workflow.
"""
# Iterate through and extract all the `PipelineParam` in Op when
# called the 1st time (because there are in-place updates to `PipelineParam`
# during compilation - remove in-place updates for easier debugging?)
if not self._inputs:
self._inputs = []
# TODO replace with proper k8s obj?
for key in self.attrs_with_pipelineparams:
self._inputs += [
param for param in _pipeline_param.
extract_pipelineparams_from_any(getattr(self, key))
]
# keep only unique
self._inputs = list(set(self._inputs))
return self._inputs
@inputs.setter
def inputs(self, value):
# to support in-place updates
self._inputs = value
def apply(self, mod_func):
"""Applies a modifier function to self. The function should return the passed object.
This is needed to chain "extention methods" to this class.
Example:
from kfp.gcp import use_gcp_secret
task = (
train_op(...)
.set_memory_request('1GB')
.apply(use_gcp_secret('user-gcp-sa'))
.set_memory_limit('2GB')
)
"""
return mod_func(self)
def after(self, *ops):
"""Specify explicit dependency on other ops."""
for op in ops:
self.dependent_names.append(op.name)
return self
def add_volume(self, volume):
"""Add K8s volume to the container
Args:
volume: Kubernetes volumes
For detailed spec, check volume definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_volume.py
"""
self.volumes.append(volume)
return self
def add_toleration(self, tolerations: V1Toleration):
"""Add K8s tolerations
Args:
volume: Kubernetes toleration
For detailed spec, check toleration definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_toleration.py
"""
self.tolerations.append(tolerations)
return self
def add_node_selector_constraint(self, label_name, value):
"""Add a constraint for nodeSelector. Each constraint is a key-value pair label. For the
container to be eligible to run on a node, the node must have each of the constraints appeared
as labels.
Args:
label_name: The name of the constraint label.
value: The value of the constraint label.
"""
self.node_selector[label_name] = value
return self
def add_pod_annotation(self, name: str, value: str):
"""Adds a pod's metadata annotation.
Args:
name: The name of the annotation.
value: The value of the annotation.
"""
self.pod_annotations[name] = value
return self
def add_pod_label(self, name: str, value: str):
"""Adds a pod's metadata label.
Args:
name: The name of the label.
value: The value of the label.
"""
self.pod_labels[name] = value
return self
def set_retry(self, num_retries: int):
"""Sets the number of times the task is retried until it's declared failed.
Args:
num_retries: Number of times to retry on failures.
"""
self.num_retries = num_retries
return self
def add_sidecar(self, sidecar: Sidecar):
"""Add a sidecar to the Op.
Args:
sidecar: SideCar object.
"""
self.sidecars.append(sidecar)
return self
def __repr__(self):
return str({self.__class__.__name__: self.__dict__})
from ._pipeline_volume import PipelineVolume #The import is here to prevent circular reference problems.
class ContainerOp(BaseOp):
"""
Represents an op implemented by a container image.
Example::
from kfp import dsl
from kubernetes.client.models import V1EnvVar, V1SecretKeySelector
@dsl.pipeline(
name='foo',
description='hello world')
def foo_pipeline(tag: str, pull_image_policy: str):
# configures artifact location
artifact_location = dsl.ArtifactLocation.s3(
bucket="foobar",
endpoint="minio-service:9000",
insecure=True,
access_key_secret=V1SecretKeySelector(name="minio", key="accesskey"),
secret_key_secret=V1SecretKeySelector(name="minio", key="secretkey"))
# any attributes can be parameterized (both serialized string or actual PipelineParam)
op = dsl.ContainerOp(name='foo',
image='busybox:%s' % tag,
# pass in sidecars list
sidecars=[dsl.Sidecar('print', 'busybox:latest', command='echo "hello"')],
# pass in k8s container kwargs
container_kwargs={'env': [V1EnvVar('foo', 'bar')]},
# configures artifact location
artifact_location=artifact_location)
# set `imagePullPolicy` property for `container` with `PipelineParam`
op.container.set_pull_image_policy(pull_image_policy)
# add sidecar with parameterized image tag
# sidecar follows the argo sidecar swagger spec
op.add_sidecar(dsl.Sidecar('redis', 'redis:%s' % tag).set_image_pull_policy('Always'))
"""
# list of attributes that might have pipeline params - used to generate
# the input parameters during compilation.
# Excludes `file_outputs` and `outputs` as they are handled separately
# in the compilation process to generate the DAGs and task io parameters.
def __init__(self,
name: str,
image: str,
command: StringOrStringList = None,
arguments: StringOrStringList = None,
sidecars: List[Sidecar] = None,
container_kwargs: Dict = None,
file_outputs: Dict[str, str] = None,
output_artifact_paths : Dict[str, str]=None,
artifact_location: V1alpha1ArtifactLocation=None,
is_exit_handler=False,
pvolumes: Dict[str, V1Volume] = None,
):
"""Create a new instance of ContainerOp.
Args:
name: the name of the op. It does not have to be unique within a pipeline
because the pipeline will generates a unique new name in case of conflicts.
image: the container image name, such as 'python:3.5-jessie'
command: the command to run in the container.
If None, uses default CMD in defined in container.
arguments: the arguments of the command. The command can include "%s" and supply
a PipelineParam as the string replacement. For example, ('echo %s' % input_param).
At container run time the argument will be 'echo param_value'.
sidecars: the list of `Sidecar` objects describing the sidecar containers to deploy
together with the `main` container.
container_kwargs: the dict of additional keyword arguments to pass to the
op's `Container` definition.
file_outputs: Maps output labels to local file paths. At pipeline run time,
the value of a PipelineParam is saved to its corresponding local file. It's
one way for outside world to receive outputs of the container.
output_artifact_paths: Maps output artifact labels to local artifact file paths.
It has the following default artifact paths during compile time.
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json',
'mlpipeline-metrics': '/mlpipeline-metrics.json'}
artifact_location: configures the default artifact location for artifacts
in the argo workflow template. Must be a `V1alpha1ArtifactLocation`
object.
is_exit_handler: Whether it is used as an exit handler.
pvolumes: Dictionary for the user to match a path on the op's fs with a
V1Volume or it inherited type.
E.g {"/my/path": vol, "/mnt": other_op.pvolumes["/output"]}.
"""
super().__init__(name=name, sidecars=sidecars, is_exit_handler=is_exit_handler)
self.attrs_with_pipelineparams = BaseOp.attrs_with_pipelineparams + ['_container', 'artifact_location'] #Copying the BaseOp class variable!
# convert to list if not a list
command = as_string_list(command)
arguments = as_string_list(arguments)
# `container` prop in `io.argoproj.workflow.v1alpha1.Template`
container_kwargs = container_kwargs or {}
self._container = Container(
image=image, args=arguments, command=command, **container_kwargs)
# NOTE for backward compatibility (remove in future?)
# proxy old ContainerOp callables to Container
# attributes to NOT proxy
ignore_set = frozenset(['to_dict', 'to_str'])
# decorator func to proxy a method in `Container` into `ContainerOp`
def _proxy(proxy_attr):
"""Decorator func to proxy to ContainerOp.container"""
def _decorated(*args, **kwargs):
# execute method
ret = getattr(self._container, proxy_attr)(*args, **kwargs)
if ret == self._container:
return self
return ret
return deprecation_warning(_decorated, proxy_attr, proxy_attr)
# iter thru container and attach a proxy func to the container method
for attr_to_proxy in dir(self._container):
func = getattr(self._container, attr_to_proxy)
# ignore private methods
if hasattr(func, '__call__') and (attr_to_proxy[0] != '_') and (
attr_to_proxy not in ignore_set):
# only proxy public callables
setattr(self, attr_to_proxy, _proxy(attr_to_proxy))
# attributes specific to `ContainerOp`
self.file_outputs = file_outputs
self.output_artifact_paths = output_artifact_paths or {}
self.artifact_location = artifact_location
self._metadata = None
self.outputs = {}
if file_outputs:
self.outputs = {
name: _pipeline_param.PipelineParam(name, op_name=self.name)
for name in file_outputs.keys()
}
self.output = None
if len(self.outputs) == 1:
self.output = list(self.outputs.values())[0]
self.pvolumes = {}
if pvolumes:
for mount_path, pvolume in pvolumes.items():
if hasattr(pvolume, "dependent_names"): #TODO: Replace with type check
self.dependent_names.extend(pvolume.dependent_names)
else:
pvolume = PipelineVolume(volume=pvolume)
self.pvolumes[mount_path] = pvolume.after(self)
self.add_volume(pvolume)
self._container.add_volume_mount(V1VolumeMount(
name=pvolume.name,
mount_path=mount_path
))
self.pvolume = None
if self.pvolumes and len(self.pvolumes) == 1:
self.pvolume = list(self.pvolumes.values())[0]
@property
def command(self):
return self._container.command
@command.setter
def command(self, value):
self._container.command = as_string_list(value)
@property
def arguments(self):
return self._container.args
@arguments.setter
def arguments(self, value):
self._container.args = as_string_list(value)
@property
def container(self):
"""`Container` object that represents the `container` property in
`io.argoproj.workflow.v1alpha1.Template`. Can be used to update the
container configurations.
Example:
import kfp.dsl as dsl
from kubernetes.client.models import V1EnvVar
@dsl.pipeline(name='example_pipeline')
def immediate_value_pipeline():
op1 = (dsl.ContainerOp(name='example', image='nginx:alpine')
.container
.add_env_variable(V1EnvVar(name='HOST', value='foo.bar'))
.add_env_variable(V1EnvVar(name='PORT', value='80'))
.parent # return the parent `ContainerOp`
)
"""
return self._container
def _set_metadata(self, metadata):
'''_set_metadata passes the containerop the metadata information
and configures the right output
Args:
metadata (ComponentMeta): component metadata
'''
if not isinstance(metadata, ComponentMeta):
raise ValueError('_set_medata is expecting ComponentMeta.')
self._metadata = metadata
if self.file_outputs:
for output in self.file_outputs.keys():
output_type = self.outputs[output].param_type
for output_meta in self._metadata.outputs:
if output_meta.name == output:
output_type = output_meta.param_type
self.outputs[output].param_type = output_type
self.output = None
if len(self.outputs) == 1:
self.output = list(self.outputs.values())[0]
# proxy old ContainerOp properties to ContainerOp.container
# with PendingDeprecationWarning.
ContainerOp = _proxy_container_op_props(ContainerOp)
SDK/DSL - ContainerOp.apply method now supports functions that do not return anything (#1226)
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import warnings
from typing import Any, Dict, List, TypeVar, Union, Callable, Optional, Sequence
from argo.models import V1alpha1ArtifactLocation
from kubernetes.client import V1Toleration
from kubernetes.client.models import (
V1Container, V1EnvVar, V1EnvFromSource, V1SecurityContext, V1Probe,
V1ResourceRequirements, V1VolumeDevice, V1VolumeMount, V1ContainerPort,
V1Lifecycle, V1Volume
)
from . import _pipeline_param
from ._metadata import ComponentMeta
# generics
T = TypeVar('T')
# type alias: either a string or a list of string
StringOrStringList = Union[str, List[str]]
# util functions
def deprecation_warning(func: Callable, op_name: str,
container_name: str) -> Callable:
"""Decorator function to give a pending deprecation warning"""
def _wrapped(*args, **kwargs):
warnings.warn(
'`dsl.ContainerOp.%s` will be removed in future releases. '
'Use `dsl.ContainerOp.container.%s` instead.' %
(op_name, container_name), PendingDeprecationWarning)
return func(*args, **kwargs)
return _wrapped
def _create_getter_setter(prop):
"""Create a tuple of getter and setter methods for a property in `Container`."""
def _getter(self):
return getattr(self._container, prop)
def _setter(self, value):
return setattr(self._container, prop, value)
return _getter, _setter
def _proxy_container_op_props(cls: "ContainerOp"):
"""Takes the `ContainerOp` class and proxy the PendingDeprecation properties
in `ContainerOp` to the `Container` instance.
"""
# properties mapping to proxy: ContainerOps.<prop> => Container.<prop>
prop_map = dict(image='image', env_variables='env')
# itera and create class props
for op_prop, container_prop in prop_map.items():
# create getter and setter
_getter, _setter = _create_getter_setter(container_prop)
# decorate with deprecation warning
getter = deprecation_warning(_getter, op_prop, container_prop)
setter = deprecation_warning(_setter, op_prop, container_prop)
# update attribites with properties
setattr(cls, op_prop, property(getter, setter))
return cls
def as_string_list(list_or_str: Optional[Union[Any, Sequence[Any]]]) -> List[str]:
"""Convert any value except None to a list if not already a list."""
if list_or_str is None:
return None
if isinstance(list_or_str, Sequence) and not isinstance(list_or_str, str):
list_value = list_or_str
else:
list_value = [list_or_str]
return [str(item) for item in list_value]
def create_and_append(current_list: Union[List[T], None], item: T) -> List[T]:
"""Create a list (if needed) and appends an item to it."""
current_list = current_list or []
current_list.append(item)
return current_list
class Container(V1Container):
"""
A wrapper over k8s container definition object (io.k8s.api.core.v1.Container),
which is used to represent the `container` property in argo's workflow
template (io.argoproj.workflow.v1alpha1.Template).
`Container` class also comes with utility functions to set and update the
the various properties for a k8s container definition.
NOTE: A notable difference is that `name` is not required and will not be
processed for `Container` (in contrast to `V1Container` where `name` is a
required property).
See:
- https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_container.py
- https://github.com/argoproj/argo/blob/master/api/openapi-spec/swagger.json
Example:
from kfp.dsl import ContainerOp
from kubernetes.client.models import V1EnvVar
# creates a operation
op = ContainerOp(name='bash-ops',
image='busybox:latest',
command=['echo'],
arguments=['$MSG'])
# returns a `Container` object from `ContainerOp`
# and add an environment variable to `Container`
op.container.add_env_variable(V1EnvVar(name='MSG', value='hello world'))
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
# remove `name` from swagger_types so `name` is not generated in the JSON
swagger_types = {
key: value
for key, value in V1Container.swagger_types.items() if key != 'name'
}
attribute_map = {
key: value
for key, value in V1Container.attribute_map.items() if key != 'name'
}
def __init__(self, image: str, command: List[str], args: List[str],
**kwargs):
"""Creates a new instance of `Container`.
Args:
image {str}: image to use, e.g. busybox:latest
command {List[str]}: entrypoint array. Not executed within a shell.
args {List[str]}: arguments to entrypoint.
**kwargs: keyword arguments for `V1Container`
"""
# set name to '' if name is not provided
# k8s container MUST have a name
# argo workflow template does not need a name for container def
if not kwargs.get('name'):
kwargs['name'] = ''
super(Container, self).__init__(
image=image, command=command, args=args, **kwargs)
def _validate_memory_string(self, memory_string):
"""Validate a given string is valid for memory request or limit."""
if isinstance(memory_string, _pipeline_param.PipelineParam):
if memory_string.value:
memory_string = memory_string.value
else:
return
if re.match(r'^[0-9]+(E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki){0,1}$',
memory_string) is None:
raise ValueError(
'Invalid memory string. Should be an integer, or integer followed '
'by one of "E|Ei|P|Pi|T|Ti|G|Gi|M|Mi|K|Ki"')
def _validate_cpu_string(self, cpu_string):
"Validate a given string is valid for cpu request or limit."
if isinstance(cpu_string, _pipeline_param.PipelineParam):
if cpu_string.value:
cpu_string = cpu_string.value
else:
return
if re.match(r'^[0-9]+m$', cpu_string) is not None:
return
try:
float(cpu_string)
except ValueError:
raise ValueError(
'Invalid cpu string. Should be float or integer, or integer followed '
'by "m".')
def _validate_positive_number(self, str_value, param_name):
"Validate a given string is in positive integer format."
if isinstance(str_value, _pipeline_param.PipelineParam):
if str_value.value:
str_value = str_value.value
else:
return
try:
int_value = int(str_value)
except ValueError:
raise ValueError(
'Invalid {}. Should be integer.'.format(param_name))
if int_value <= 0:
raise ValueError('{} must be positive integer.'.format(param_name))
def add_resource_limit(self, resource_name, value):
"""Add the resource limit of the container.
Args:
resource_name: The name of the resource. It can be cpu, memory, etc.
value: The string value of the limit.
"""
self.resources = self.resources or V1ResourceRequirements()
self.resources.limits = self.resources.limits or {}
self.resources.limits.update({resource_name: value})
return self
def add_resource_request(self, resource_name, value):
"""Add the resource request of the container.
Args:
resource_name: The name of the resource. It can be cpu, memory, etc.
value: The string value of the request.
"""
self.resources = self.resources or V1ResourceRequirements()
self.resources.requests = self.resources.requests or {}
self.resources.requests.update({resource_name: value})
return self
def set_memory_request(self, memory):
"""Set memory request (minimum) for this operator.
Args:
memory: a string which can be a number or a number followed by one of
"E", "P", "T", "G", "M", "K".
"""
self._validate_memory_string(memory)
return self.add_resource_request("memory", memory)
def set_memory_limit(self, memory):
"""Set memory limit (maximum) for this operator.
Args:
memory: a string which can be a number or a number followed by one of
"E", "P", "T", "G", "M", "K".
"""
self._validate_memory_string(memory)
return self.add_resource_limit("memory", memory)
def set_cpu_request(self, cpu):
"""Set cpu request (minimum) for this operator.
Args:
cpu: A string which can be a number or a number followed by "m", which means 1/1000.
"""
self._validate_cpu_string(cpu)
return self.add_resource_request("cpu", cpu)
def set_cpu_limit(self, cpu):
"""Set cpu limit (maximum) for this operator.
Args:
cpu: A string which can be a number or a number followed by "m", which means 1/1000.
"""
self._validate_cpu_string(cpu)
return self.add_resource_limit("cpu", cpu)
def set_gpu_limit(self, gpu, vendor="nvidia"):
"""Set gpu limit for the operator. This function add '<vendor>.com/gpu' into resource limit.
Note that there is no need to add GPU request. GPUs are only supposed to be specified in
the limits section. See https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/.
Args:
gpu: A string which must be a positive number.
vendor: Optional. A string which is the vendor of the requested gpu. The supported values
are: 'nvidia' (default), and 'amd'.
"""
self._validate_positive_number(gpu, 'gpu')
if vendor != 'nvidia' and vendor != 'amd':
raise ValueError('vendor can only be nvidia or amd.')
return self.add_resource_limit("%s.com/gpu" % vendor, gpu)
def add_volume_mount(self, volume_mount):
"""Add volume to the container
Args:
volume_mount: Kubernetes volume mount
For detailed spec, check volume mount definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_volume_mount.py
"""
if not isinstance(volume_mount, V1VolumeMount):
raise ValueError(
'invalid argument. Must be of instance `V1VolumeMount`.')
self.volume_mounts = create_and_append(self.volume_mounts,
volume_mount)
return self
def add_volume_devices(self, volume_device):
"""
Add a block device to be used by the container.
Args:
volume_device: Kubernetes volume device
For detailed spec, volume device definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_volume_device.py
"""
if not isinstance(volume_device, V1VolumeDevice):
raise ValueError(
'invalid argument. Must be of instance `V1VolumeDevice`.')
self.volume_devices = create_and_append(self.volume_devices,
volume_device)
return self
def add_env_variable(self, env_variable):
"""Add environment variable to the container.
Args:
env_variable: Kubernetes environment variable
For detailed spec, check environment variable definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_env_var.py
"""
if not isinstance(env_variable, V1EnvVar):
raise ValueError(
'invalid argument. Must be of instance `V1EnvVar`.')
self.env = create_and_append(self.env, env_variable)
return self
def add_env_from(self, env_from):
"""Add a source to populate environment variables int the container.
Args:
env_from: Kubernetes environment from source
For detailed spec, check environment from source definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_env_var_source.py
"""
if not isinstance(env_from, V1EnvFromSource):
raise ValueError(
'invalid argument. Must be of instance `V1EnvFromSource`.')
self.env_from = create_and_append(self.env_from, env_from)
return self
def set_image_pull_policy(self, image_pull_policy):
"""Set image pull policy for the container.
Args:
image_pull_policy: One of `Always`, `Never`, `IfNotPresent`.
"""
if image_pull_policy not in ['Always', 'Never', 'IfNotPresent']:
raise ValueError(
'Invalid imagePullPolicy. Must be one of `Always`, `Never`, `IfNotPresent`.'
)
self.image_pull_policy = image_pull_policy
return self
def add_port(self, container_port):
"""Add a container port to the container.
Args:
container_port: Kubernetes container port
For detailed spec, check container port definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_container_port.py
"""
if not isinstance(container_port, V1ContainerPort):
raise ValueError(
'invalid argument. Must be of instance `V1ContainerPort`.')
self.ports = create_and_append(self.ports, container_port)
return self
def set_security_context(self, security_context):
"""Set security configuration to be applied on the container.
Args:
security_context: Kubernetes security context
For detailed spec, check security context definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_security_context.py
"""
if not isinstance(security_context, V1SecurityContext):
raise ValueError(
'invalid argument. Must be of instance `V1SecurityContext`.')
self.security_context = security_context
return self
def set_stdin(self, stdin=True):
"""
Whether this container should allocate a buffer for stdin in the container
runtime. If this is not set, reads from stdin in the container will always
result in EOF.
Args:
stdin: boolean flag
"""
self.stdin = stdin
return self
def set_stdin_once(self, stdin_once=True):
"""
Whether the container runtime should close the stdin channel after it has
been opened by a single attach. When stdin is true the stdin stream will
remain open across multiple attach sessions. If stdinOnce is set to true,
stdin is opened on container start, is empty until the first client attaches
to stdin, and then remains open and accepts data until the client
disconnects, at which time stdin is closed and remains closed until the
container is restarted. If this flag is false, a container processes that
reads from stdin will never receive an EOF.
Args:
stdin_once: boolean flag
"""
self.stdin_once = stdin_once
return self
def set_termination_message_path(self, termination_message_path):
"""
Path at which the file to which the container's termination message will be
written is mounted into the container's filesystem. Message written is
intended to be brief final status, such as an assertion failure message.
Will be truncated by the node if greater than 4096 bytes. The total message
length across all containers will be limited to 12kb.
Args:
termination_message_path: path for the termination message
"""
self.termination_message_path = termination_message_path
return self
def set_termination_message_policy(self, termination_message_policy):
"""
Indicate how the termination message should be populated. File will use the
contents of terminationMessagePath to populate the container status message
on both success and failure. FallbackToLogsOnError will use the last chunk
of container log output if the termination message file is empty and the
container exited with an error. The log output is limited to 2048 bytes or
80 lines, whichever is smaller.
Args:
termination_message_policy: `File` or `FallbackToLogsOnError`
"""
if termination_message_policy not in ['File', 'FallbackToLogsOnError']:
raise ValueError(
'terminationMessagePolicy must be `File` or `FallbackToLogsOnError`'
)
self.termination_message_policy = termination_message_policy
return self
def set_tty(self, tty=True):
"""
Whether this container should allocate a TTY for itself, also requires
'stdin' to be true.
Args:
tty: boolean flag
"""
self.tty = tty
return self
def set_readiness_probe(self, readiness_probe):
"""
Set a readiness probe for the container.
Args:
readiness_probe: Kubernetes readiness probe
For detailed spec, check probe definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_probe.py
"""
if not isinstance(readiness_probe, V1Probe):
raise ValueError(
'invalid argument. Must be of instance `V1Probe`.')
self.readiness_probe = readiness_probe
return self
def set_liveness_probe(self, liveness_probe):
"""
Set a liveness probe for the container.
Args:
liveness_probe: Kubernetes liveness probe
For detailed spec, check probe definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_probe.py
"""
if not isinstance(liveness_probe, V1Probe):
raise ValueError(
'invalid argument. Must be of instance `V1Probe`.')
self.liveness_probe = liveness_probe
return self
def set_lifecycle(self, lifecycle):
"""
Setup a lifecycle config for the container.
Args:
lifecycle: Kubernetes lifecycle
For detailed spec, lifecycle definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_lifecycle.py
"""
if not isinstance(lifecycle, V1Lifecycle):
raise ValueError(
'invalid argument. Must be of instance `V1Lifecycle`.')
self.lifecycle = lifecycle
return self
class Sidecar(Container):
"""
Represents an argo workflow sidecar (io.argoproj.workflow.v1alpha1.Sidecar)
to be used in `sidecars` property in argo's workflow template
(io.argoproj.workflow.v1alpha1.Template).
`Sidecar` inherits from `Container` class with an addition of `mirror_volume_mounts`
attribute (`mirrorVolumeMounts` property).
See https://github.com/argoproj/argo/blob/master/api/openapi-spec/swagger.json
Example
from kfp.dsl import ContainerOp, Sidecar
# creates a `ContainerOp` and adds a redis `Sidecar`
op = (ContainerOp(name='foo-op', image='busybox:latest')
.add_sidecar(
Sidecar(name='redis', image='redis:alpine')))
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
# adds `mirror_volume_mounts` to `Sidecar` swagger definition
# NOTE inherits definition from `V1Container` rather than `Container`
# because `Container` has no `name` property.
swagger_types = dict(
**V1Container.swagger_types, mirror_volume_mounts='bool')
attribute_map = dict(
**V1Container.attribute_map, mirror_volume_mounts='mirrorVolumeMounts')
def __init__(self,
name: str,
image: str,
command: StringOrStringList = None,
args: StringOrStringList = None,
mirror_volume_mounts: bool = None,
**kwargs):
"""Creates a new instance of `Sidecar`.
Args:
name {str}: unique name for the sidecar container
image {str}: image to use for the sidecar container, e.g. redis:alpine
command {StringOrStringList}: entrypoint array. Not executed within a shell.
args {StringOrStringList}: arguments to the entrypoint.
mirror_volume_mounts {bool}: MirrorVolumeMounts will mount the same
volumes specified in the main container to the sidecar (including artifacts),
at the same mountPaths. This enables dind daemon to partially see the same
filesystem as the main container in order to use features such as docker
volume binding
**kwargs: keyword arguments available for `Container`
"""
super().__init__(
name=name,
image=image,
command=as_string_list(command),
args=as_string_list(args),
**kwargs)
self.mirror_volume_mounts = mirror_volume_mounts
def set_mirror_volume_mounts(self, mirror_volume_mounts=True):
"""
Setting mirrorVolumeMounts to true will mount the same volumes specified
in the main container to the sidecar (including artifacts), at the same
mountPaths. This enables dind daemon to partially see the same filesystem
as the main container in order to use features such as docker volume
binding.
Args:
mirror_volume_mounts: boolean flag
"""
self.mirror_volume_mounts = mirror_volume_mounts
return self
@property
def inputs(self):
"""A list of PipelineParam found in the Sidecar object."""
return _pipeline_param.extract_pipelineparams_from_any(self)
def _make_hash_based_id_for_op(op):
# Generating a unique ID for Op. For class instances, the hash is the object's memory address which is unique.
return op.human_name + ' ' + hex(2**63 + hash(op))[2:]
# Pointer to a function that generates a unique ID for the Op instance (Possibly by registering the Op instance in some system).
_register_op_handler = _make_hash_based_id_for_op
class BaseOp(object):
# list of attributes that might have pipeline params - used to generate
# the input parameters during compilation.
# Excludes `file_outputs` and `outputs` as they are handled separately
# in the compilation process to generate the DAGs and task io parameters.
attrs_with_pipelineparams = [
'node_selector', 'volumes', 'pod_annotations', 'pod_labels',
'num_retries', 'sidecars', 'tolerations'
]
def __init__(self,
name: str,
sidecars: List[Sidecar] = None,
is_exit_handler: bool = False):
"""Create a new instance of BaseOp
Args:
name: the name of the op. It does not have to be unique within a pipeline
because the pipeline will generates a unique new name in case of conflicts.
sidecars: the list of `Sidecar` objects describing the sidecar containers to deploy
together with the `main` container.
is_exit_handler: Whether it is used as an exit handler.
"""
valid_name_regex = r'^[A-Za-z][A-Za-z0-9\s_-]*$'
if not re.match(valid_name_regex, name):
raise ValueError(
'Only letters, numbers, spaces, "_", and "-" are allowed in name. Must begin with letter: %s'
% (name))
self.is_exit_handler = is_exit_handler
# human_name must exist to construct operator's name
self.human_name = name
# ID of the current Op. Ideally, it should be generated by the compiler that sees the bigger context.
# However, the ID is used in the task output references (PipelineParams) which can be serialized to strings.
# Because of this we must obtain a unique ID right now.
self.name = _register_op_handler(self)
# TODO: proper k8s definitions so that `convert_k8s_obj_to_json` can be used?
# `io.argoproj.workflow.v1alpha1.Template` properties
self.node_selector = {}
self.volumes = []
self.tolerations = []
self.pod_annotations = {}
self.pod_labels = {}
self.num_retries = 0
self.sidecars = sidecars or []
# attributes specific to `BaseOp`
self._inputs = []
self.dependent_names = []
@property
def inputs(self):
"""List of PipelineParams that will be converted into input parameters
(io.argoproj.workflow.v1alpha1.Inputs) for the argo workflow.
"""
# Iterate through and extract all the `PipelineParam` in Op when
# called the 1st time (because there are in-place updates to `PipelineParam`
# during compilation - remove in-place updates for easier debugging?)
if not self._inputs:
self._inputs = []
# TODO replace with proper k8s obj?
for key in self.attrs_with_pipelineparams:
self._inputs += [
param for param in _pipeline_param.
extract_pipelineparams_from_any(getattr(self, key))
]
# keep only unique
self._inputs = list(set(self._inputs))
return self._inputs
@inputs.setter
def inputs(self, value):
# to support in-place updates
self._inputs = value
def apply(self, mod_func):
"""Applies a modifier function to self. The function should return the passed object.
This is needed to chain "extention methods" to this class.
Example:
from kfp.gcp import use_gcp_secret
task = (
train_op(...)
.set_memory_request('1GB')
.apply(use_gcp_secret('user-gcp-sa'))
.set_memory_limit('2GB')
)
"""
return mod_func(self) or self
def after(self, *ops):
"""Specify explicit dependency on other ops."""
for op in ops:
self.dependent_names.append(op.name)
return self
def add_volume(self, volume):
"""Add K8s volume to the container
Args:
volume: Kubernetes volumes
For detailed spec, check volume definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_volume.py
"""
self.volumes.append(volume)
return self
def add_toleration(self, tolerations: V1Toleration):
"""Add K8s tolerations
Args:
volume: Kubernetes toleration
For detailed spec, check toleration definition
https://github.com/kubernetes-client/python/blob/master/kubernetes/client/models/v1_toleration.py
"""
self.tolerations.append(tolerations)
return self
def add_node_selector_constraint(self, label_name, value):
"""Add a constraint for nodeSelector. Each constraint is a key-value pair label. For the
container to be eligible to run on a node, the node must have each of the constraints appeared
as labels.
Args:
label_name: The name of the constraint label.
value: The value of the constraint label.
"""
self.node_selector[label_name] = value
return self
def add_pod_annotation(self, name: str, value: str):
"""Adds a pod's metadata annotation.
Args:
name: The name of the annotation.
value: The value of the annotation.
"""
self.pod_annotations[name] = value
return self
def add_pod_label(self, name: str, value: str):
"""Adds a pod's metadata label.
Args:
name: The name of the label.
value: The value of the label.
"""
self.pod_labels[name] = value
return self
def set_retry(self, num_retries: int):
"""Sets the number of times the task is retried until it's declared failed.
Args:
num_retries: Number of times to retry on failures.
"""
self.num_retries = num_retries
return self
def add_sidecar(self, sidecar: Sidecar):
"""Add a sidecar to the Op.
Args:
sidecar: SideCar object.
"""
self.sidecars.append(sidecar)
return self
def __repr__(self):
return str({self.__class__.__name__: self.__dict__})
from ._pipeline_volume import PipelineVolume #The import is here to prevent circular reference problems.
class ContainerOp(BaseOp):
"""
Represents an op implemented by a container image.
Example::
from kfp import dsl
from kubernetes.client.models import V1EnvVar, V1SecretKeySelector
@dsl.pipeline(
name='foo',
description='hello world')
def foo_pipeline(tag: str, pull_image_policy: str):
# configures artifact location
artifact_location = dsl.ArtifactLocation.s3(
bucket="foobar",
endpoint="minio-service:9000",
insecure=True,
access_key_secret=V1SecretKeySelector(name="minio", key="accesskey"),
secret_key_secret=V1SecretKeySelector(name="minio", key="secretkey"))
# any attributes can be parameterized (both serialized string or actual PipelineParam)
op = dsl.ContainerOp(name='foo',
image='busybox:%s' % tag,
# pass in sidecars list
sidecars=[dsl.Sidecar('print', 'busybox:latest', command='echo "hello"')],
# pass in k8s container kwargs
container_kwargs={'env': [V1EnvVar('foo', 'bar')]},
# configures artifact location
artifact_location=artifact_location)
# set `imagePullPolicy` property for `container` with `PipelineParam`
op.container.set_pull_image_policy(pull_image_policy)
# add sidecar with parameterized image tag
# sidecar follows the argo sidecar swagger spec
op.add_sidecar(dsl.Sidecar('redis', 'redis:%s' % tag).set_image_pull_policy('Always'))
"""
# list of attributes that might have pipeline params - used to generate
# the input parameters during compilation.
# Excludes `file_outputs` and `outputs` as they are handled separately
# in the compilation process to generate the DAGs and task io parameters.
def __init__(self,
name: str,
image: str,
command: StringOrStringList = None,
arguments: StringOrStringList = None,
sidecars: List[Sidecar] = None,
container_kwargs: Dict = None,
file_outputs: Dict[str, str] = None,
output_artifact_paths : Dict[str, str]=None,
artifact_location: V1alpha1ArtifactLocation=None,
is_exit_handler=False,
pvolumes: Dict[str, V1Volume] = None,
):
"""Create a new instance of ContainerOp.
Args:
name: the name of the op. It does not have to be unique within a pipeline
because the pipeline will generates a unique new name in case of conflicts.
image: the container image name, such as 'python:3.5-jessie'
command: the command to run in the container.
If None, uses default CMD in defined in container.
arguments: the arguments of the command. The command can include "%s" and supply
a PipelineParam as the string replacement. For example, ('echo %s' % input_param).
At container run time the argument will be 'echo param_value'.
sidecars: the list of `Sidecar` objects describing the sidecar containers to deploy
together with the `main` container.
container_kwargs: the dict of additional keyword arguments to pass to the
op's `Container` definition.
file_outputs: Maps output labels to local file paths. At pipeline run time,
the value of a PipelineParam is saved to its corresponding local file. It's
one way for outside world to receive outputs of the container.
output_artifact_paths: Maps output artifact labels to local artifact file paths.
It has the following default artifact paths during compile time.
{'mlpipeline-ui-metadata': '/mlpipeline-ui-metadata.json',
'mlpipeline-metrics': '/mlpipeline-metrics.json'}
artifact_location: configures the default artifact location for artifacts
in the argo workflow template. Must be a `V1alpha1ArtifactLocation`
object.
is_exit_handler: Whether it is used as an exit handler.
pvolumes: Dictionary for the user to match a path on the op's fs with a
V1Volume or it inherited type.
E.g {"/my/path": vol, "/mnt": other_op.pvolumes["/output"]}.
"""
super().__init__(name=name, sidecars=sidecars, is_exit_handler=is_exit_handler)
self.attrs_with_pipelineparams = BaseOp.attrs_with_pipelineparams + ['_container', 'artifact_location'] #Copying the BaseOp class variable!
# convert to list if not a list
command = as_string_list(command)
arguments = as_string_list(arguments)
# `container` prop in `io.argoproj.workflow.v1alpha1.Template`
container_kwargs = container_kwargs or {}
self._container = Container(
image=image, args=arguments, command=command, **container_kwargs)
# NOTE for backward compatibility (remove in future?)
# proxy old ContainerOp callables to Container
# attributes to NOT proxy
ignore_set = frozenset(['to_dict', 'to_str'])
# decorator func to proxy a method in `Container` into `ContainerOp`
def _proxy(proxy_attr):
"""Decorator func to proxy to ContainerOp.container"""
def _decorated(*args, **kwargs):
# execute method
ret = getattr(self._container, proxy_attr)(*args, **kwargs)
if ret == self._container:
return self
return ret
return deprecation_warning(_decorated, proxy_attr, proxy_attr)
# iter thru container and attach a proxy func to the container method
for attr_to_proxy in dir(self._container):
func = getattr(self._container, attr_to_proxy)
# ignore private methods
if hasattr(func, '__call__') and (attr_to_proxy[0] != '_') and (
attr_to_proxy not in ignore_set):
# only proxy public callables
setattr(self, attr_to_proxy, _proxy(attr_to_proxy))
# attributes specific to `ContainerOp`
self.file_outputs = file_outputs
self.output_artifact_paths = output_artifact_paths or {}
self.artifact_location = artifact_location
self._metadata = None
self.outputs = {}
if file_outputs:
self.outputs = {
name: _pipeline_param.PipelineParam(name, op_name=self.name)
for name in file_outputs.keys()
}
self.output = None
if len(self.outputs) == 1:
self.output = list(self.outputs.values())[0]
self.pvolumes = {}
if pvolumes:
for mount_path, pvolume in pvolumes.items():
if hasattr(pvolume, "dependent_names"): #TODO: Replace with type check
self.dependent_names.extend(pvolume.dependent_names)
else:
pvolume = PipelineVolume(volume=pvolume)
self.pvolumes[mount_path] = pvolume.after(self)
self.add_volume(pvolume)
self._container.add_volume_mount(V1VolumeMount(
name=pvolume.name,
mount_path=mount_path
))
self.pvolume = None
if self.pvolumes and len(self.pvolumes) == 1:
self.pvolume = list(self.pvolumes.values())[0]
@property
def command(self):
return self._container.command
@command.setter
def command(self, value):
self._container.command = as_string_list(value)
@property
def arguments(self):
return self._container.args
@arguments.setter
def arguments(self, value):
self._container.args = as_string_list(value)
@property
def container(self):
"""`Container` object that represents the `container` property in
`io.argoproj.workflow.v1alpha1.Template`. Can be used to update the
container configurations.
Example:
import kfp.dsl as dsl
from kubernetes.client.models import V1EnvVar
@dsl.pipeline(name='example_pipeline')
def immediate_value_pipeline():
op1 = (dsl.ContainerOp(name='example', image='nginx:alpine')
.container
.add_env_variable(V1EnvVar(name='HOST', value='foo.bar'))
.add_env_variable(V1EnvVar(name='PORT', value='80'))
.parent # return the parent `ContainerOp`
)
"""
return self._container
def _set_metadata(self, metadata):
'''_set_metadata passes the containerop the metadata information
and configures the right output
Args:
metadata (ComponentMeta): component metadata
'''
if not isinstance(metadata, ComponentMeta):
raise ValueError('_set_medata is expecting ComponentMeta.')
self._metadata = metadata
if self.file_outputs:
for output in self.file_outputs.keys():
output_type = self.outputs[output].param_type
for output_meta in self._metadata.outputs:
if output_meta.name == output:
output_type = output_meta.param_type
self.outputs[output].param_type = output_type
self.output = None
if len(self.outputs) == 1:
self.output = list(self.outputs.values())[0]
# proxy old ContainerOp properties to ContainerOp.container
# with PendingDeprecationWarning.
ContainerOp = _proxy_container_op_props(ContainerOp)
|
#!/usr/bin/env python3
import os
from skimage.transform import resize as imresize
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw, ImageFont
from tensorflow.python.ops.metrics_impl import mean_iou
import logging
from vgg import VGG
from voc_loader import VOCLoader
from boxer import PriorBoxGrid
from config import args, train_dir
from paths import CKPT_ROOT, EVAL_DIR, RESULTS_DIR
from utils import decode_bboxes, batch_iou
slim = tf.contrib.slim
streaming_mean_iou = tf.contrib.metrics.streaming_mean_iou
log = logging.getLogger()
class Detector(object):
def __init__(self, sess, net, loader, config, no_gt=False, folder=None):
self.sess = sess
self.net = net
self.loader = loader
self.config = config
self.fm_sizes = self.config['fm_sizes']
self.no_gt = no_gt
self.bboxer = PriorBoxGrid(self.config)
self.build_detector()
if folder is not None:
self.directory = folder
else:
self.directory = os.path.join(RESULTS_DIR, args.run_name)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
@staticmethod
def draw_rectangle(draw, coordinates, color, width=1):
for i in range(width):
rect_start = (coordinates[0] - i, coordinates[1] - i)
rect_end = (coordinates[2] + i, coordinates[3] + i)
draw.rectangle((rect_start, rect_end), outline=color)
def draw(self, img, dets, cats, scores, name, gt_bboxes, gt_cats):
"""Visualize objects detected by the network by putting bounding boxes"""
colors = np.load('Extra/colors.npy').tolist()
font = ImageFont.truetype("Extra/FreeSansBold.ttf", 14)
h, w = img.shape[:2]
image = Image.fromarray((img * 255).astype('uint8'))
dr = ImageDraw.Draw(image)
if not args.segment:
image.save(self.directory + '/%s.jpg' % name, 'JPEG')
for i in range(len(cats)):
cat = cats[i]
score = scores[i]
bbox = np.array(dets[i])
bbox[[2, 3]] += bbox[[0, 1]]
# color = 'green' if matched_det[i] else 'red'
color = colors[cat]
self.draw_rectangle(dr, bbox, color, width=5)
dr.text(bbox[:2], self.loader.ids_to_cats[cat] + ' ' + str(score),
fill=color, font=font)
draw_gt = False
if draw_gt:
match = quick_matching(dets, gt_bboxes, cats, gt_cats)
matched_gt = match.sum(0)
for i in range(len(gt_cats)):
x, y, w, h = gt_bboxes[i]
color = 'white' if matched_gt[i] else 'blue'
bbox = (x, y, x + w, y + h)
self.draw_rectangle(dr, bbox, color, width=3)
dr.text((x, y), self.loader.ids_to_cats[gt_cats[i]], fill=color)
image.save(self.directory + '/%s_det_%i.jpg' % (name, int(100 *
args.eval_min_conf)), 'JPEG')
del dr
def draw_seg(self, img, seg_gt, segmentation, name):
"""Applies generated segmentation mask to an image"""
palette = np.load('Extra/palette.npy').tolist()
img_size = (img.shape[1], img.shape[0])
segmentation = imresize(segmentation, img_size, order=0, preserve_range=True).astype(int)
image = Image.fromarray((img * 255).astype('uint8'))
segmentation_draw = Image.fromarray((segmentation).astype('uint8'), 'P')
segmentation_draw.putpalette(palette)
segmentation_draw.save(self.directory + '/%s_segmentation.png' % name, 'PNG')
image.save(self.directory + '/%s.jpg' % name, 'JPEG')
if seg_gt:
seg_gt_draw = Image.fromarray((seg_gt).astype('uint8'), 'P')
seg_gt_draw.putpalette(palette)
seg_gt_draw.save(self.directory + '/%s_seg_gt.png' % name, 'PNG')
def restore_from_ckpt(self, ckpt):
ckpt_path = os.path.join(CKPT_ROOT, args.run_name, 'model.ckpt-%i000' % ckpt)
log.debug("Restoring checkpoint %s" % ckpt_path)
self.sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
saver.restore(self.sess, ckpt_path)
def nms(self, localization, confidence, tiling):
good_bboxes = decode_bboxes(localization, tiling)
not_crap_mask = tf.reduce_max(confidence[:, 1:], axis=-1) >= args.conf_thresh
good_bboxes = tf.boolean_mask(good_bboxes, not_crap_mask)
confidence = tf.boolean_mask(confidence, not_crap_mask)
self.detection_list = []
self.score_list = []
for i in range(1, self.loader.num_classes):
class_mask = tf.greater(confidence[:, i], args.conf_thresh)
class_scores = tf.boolean_mask(confidence[:, i], class_mask)
class_bboxes = tf.boolean_mask(good_bboxes, class_mask)
K = tf.minimum(tf.size(class_scores), args.top_k_nms)
_, top_k_inds = tf.nn.top_k(class_scores, K)
top_class_scores = tf.gather(class_scores, top_k_inds)
top_class_bboxes = tf.gather(class_bboxes, top_k_inds)
final_inds = tf.image.non_max_suppression(top_class_bboxes,
top_class_scores,
max_output_size=args.top_k_after_nms,
iou_threshold=args.nms_thresh)
final_class_bboxes = tf.gather(top_class_bboxes, final_inds)
final_scores = tf.gather(top_class_scores, final_inds)
self.detection_list.append(final_class_bboxes)
self.score_list.append(final_scores)
def build_detector(self):
img_size = self.config['image_size']
self.image_ph = tf.placeholder(shape=[None, None, 3],
dtype=tf.float32, name='img_ph')
self.seg_ph = tf.placeholder(shape=[None, None], dtype=tf.int32, name='seg_ph')
img = tf.image.resize_bilinear(tf.expand_dims(self.image_ph, 0),
(img_size, img_size))
self.net.create_trunk(img)
if args.detect:
self.net.create_multibox_head(self.loader.num_classes)
confidence = tf.nn.softmax(tf.squeeze(self.net.outputs['confidence']))
location = tf.squeeze(self.net.outputs['location'])
self.nms(location, confidence, self.bboxer.tiling)
if args.segment:
self.net.create_segmentation_head(self.loader.num_classes)
self.segmentation = self.net.outputs['segmentation']
seg_shape = tf.shape(self.image_ph)[:2]
self.segmentation = tf.image.resize_bilinear(self.segmentation, seg_shape)
self.segmentation = tf.cast(tf.argmax(tf.squeeze(self.segmentation), axis=-1), tf.int32)
self.segmentation = tf.reshape(self.segmentation, seg_shape)
self.segmentation.set_shape([None, None])
if not self.no_gt:
easy_mask = self.seg_ph <= self.loader.num_classes
predictions = tf.boolean_mask(self.segmentation, easy_mask)
labels = tf.boolean_mask(self.seg_ph, easy_mask)
self.mean_iou, self.iou_update = mean_iou(predictions, labels, self.loader.num_classes)
else:
self.mean_iou = tf.constant(0)
self.iou_update = tf.constant(0)
def process_detection(self, outputs, img, w, h, gt_bboxes, gt_cats, name, draw):
detection_vec, score_vec = outputs[:2]
dets, scores, cats = [], [], []
no_dets = True
for i in range(self.loader.num_classes-1):
if score_vec[i].size > 0:
no_dets = False
dets.append(detection_vec[i])
scores.append(score_vec[i])
cats.append(np.zeros(len(score_vec[i]), dtype='int') + i + 1)
if not no_dets:
dets = np.vstack(dets)
scores = np.concatenate(scores, axis=0)
cats = np.concatenate(cats, axis=0)
top_k_inds = np.argsort(scores)[::-1]
if scores.size > args.top_k_post_nms:
top_k_inds = top_k_inds[0:args.top_k_post_nms]
dets = dets[top_k_inds]
scores = scores[top_k_inds]
cats = cats[top_k_inds]
mask_high = scores >= args.eval_min_conf
dets = dets[mask_high]
scores = scores[mask_high]
cats = cats[mask_high]
dets[:, :] = dets[:, [1, 0, 3, 2]]
dets[:, [2, 3]] -= dets[:, [0, 1]]
dets[:, [0, 2]] *= w
dets[:, [1, 3]] *= h
if draw:
self.draw(img, dets, cats, scores, name, gt_bboxes, gt_cats)
return(dets, scores, cats)
def process_segmentation(self, outputs, img, seg_gt, name, draw):
segmentation, iou, _ = outputs[-3:]
if draw:
self.draw_seg(img, seg_gt, segmentation, name)
return segmentation, iou
def get_mean_iou(self):
iou = self.sess.run(self.mean_iou)
return iou
def feed_forward(self, img, seg_gt, w, h, name, gt_bboxes, gt_cats, draw=False):
feed_dict = {self.image_ph: img}
net_out = []
if args.detect:
net_out.extend([self.detection_list, self.score_list])
if args.segment:
seg_gt_ = np.zeros(img.shape[:2]) if seg_gt is None else seg_gt
seg_dict = {self.seg_ph: seg_gt_}
feed_dict.update(seg_dict)
net_out.extend([self.segmentation, self.mean_iou, self.iou_update])
# outputs order with det and seg modes on:
# detection_vec, score_vec, segmentation, iou, _
outputs = self.sess.run(net_out, feed_dict=feed_dict)
results = []
if args.detect:
dets, scores, cats = self.process_detection(outputs, img, w, h,
gt_bboxes, gt_cats,
name, draw=draw)
results.extend([dets, scores, cats])
if args.segment:
segmentation, iou = self.process_segmentation(outputs, img, seg_gt, name, draw)
results.extend([segmentation, iou])
return results
def quick_matching(det_boxes, gt_boxes, det_cats, gt_cats):
iou_mask = batch_iou(det_boxes, gt_boxes) >= 0.5
det_cats = np.expand_dims(det_cats, axis=1)
gt_cats = np.expand_dims(gt_cats, axis=0)
cat_mask = (det_cats == gt_cats)
matching = np.logical_and(iou_mask, cat_mask)
return matching
segmentation resize fix
#!/usr/bin/env python3
import os
from skimage.transform import resize as imresize
import numpy as np
import tensorflow as tf
from PIL import Image, ImageDraw, ImageFont
from tensorflow.python.ops.metrics_impl import mean_iou
import logging
from vgg import VGG
from voc_loader import VOCLoader
from boxer import PriorBoxGrid
from config import args, train_dir
from paths import CKPT_ROOT, EVAL_DIR, RESULTS_DIR
from utils import decode_bboxes, batch_iou
slim = tf.contrib.slim
streaming_mean_iou = tf.contrib.metrics.streaming_mean_iou
log = logging.getLogger()
class Detector(object):
def __init__(self, sess, net, loader, config, no_gt=False, folder=None):
self.sess = sess
self.net = net
self.loader = loader
self.config = config
self.fm_sizes = self.config['fm_sizes']
self.no_gt = no_gt
self.bboxer = PriorBoxGrid(self.config)
self.build_detector()
if folder is not None:
self.directory = folder
else:
self.directory = os.path.join(RESULTS_DIR, args.run_name)
if not os.path.exists(self.directory):
os.makedirs(self.directory)
@staticmethod
def draw_rectangle(draw, coordinates, color, width=1):
for i in range(width):
rect_start = (coordinates[0] - i, coordinates[1] - i)
rect_end = (coordinates[2] + i, coordinates[3] + i)
draw.rectangle((rect_start, rect_end), outline=color)
def draw(self, img, dets, cats, scores, name, gt_bboxes, gt_cats):
"""Visualize objects detected by the network by putting bounding boxes"""
colors = np.load('Extra/colors.npy').tolist()
font = ImageFont.truetype("Extra/FreeSansBold.ttf", 14)
h, w = img.shape[:2]
image = Image.fromarray((img * 255).astype('uint8'))
dr = ImageDraw.Draw(image)
if not args.segment:
image.save(self.directory + '/%s.jpg' % name, 'JPEG')
for i in range(len(cats)):
cat = cats[i]
score = scores[i]
bbox = np.array(dets[i])
bbox[[2, 3]] += bbox[[0, 1]]
# color = 'green' if matched_det[i] else 'red'
color = colors[cat]
self.draw_rectangle(dr, bbox, color, width=5)
dr.text(bbox[:2], self.loader.ids_to_cats[cat] + ' ' + str(score),
fill=color, font=font)
draw_gt = False
if draw_gt:
match = quick_matching(dets, gt_bboxes, cats, gt_cats)
matched_gt = match.sum(0)
for i in range(len(gt_cats)):
x, y, w, h = gt_bboxes[i]
color = 'white' if matched_gt[i] else 'blue'
bbox = (x, y, x + w, y + h)
self.draw_rectangle(dr, bbox, color, width=3)
dr.text((x, y), self.loader.ids_to_cats[gt_cats[i]], fill=color)
image.save(self.directory + '/%s_det_%i.jpg' % (name, int(100 *
args.eval_min_conf)), 'JPEG')
del dr
def draw_seg(self, img, seg_gt, segmentation, name):
"""Applies generated segmentation mask to an image"""
palette = np.load('Extra/palette.npy').tolist()
img_size = (img.shape[0], img.shape[1])
segmentation = imresize(segmentation, img_size, order=0, preserve_range=True).astype(int)
image = Image.fromarray((img * 255).astype('uint8'))
segmentation_draw = Image.fromarray((segmentation).astype('uint8'), 'P')
segmentation_draw.putpalette(palette)
segmentation_draw.save(self.directory + '/%s_segmentation.png' % name, 'PNG')
image.save(self.directory + '/%s.jpg' % name, 'JPEG')
if seg_gt:
seg_gt_draw = Image.fromarray((seg_gt).astype('uint8'), 'P')
seg_gt_draw.putpalette(palette)
seg_gt_draw.save(self.directory + '/%s_seg_gt.png' % name, 'PNG')
def restore_from_ckpt(self, ckpt):
ckpt_path = os.path.join(CKPT_ROOT, args.run_name, 'model.ckpt-%i000' % ckpt)
log.debug("Restoring checkpoint %s" % ckpt_path)
self.sess.run(tf.local_variables_initializer())
saver = tf.train.Saver(tf.global_variables())
saver.restore(self.sess, ckpt_path)
def nms(self, localization, confidence, tiling):
good_bboxes = decode_bboxes(localization, tiling)
not_crap_mask = tf.reduce_max(confidence[:, 1:], axis=-1) >= args.conf_thresh
good_bboxes = tf.boolean_mask(good_bboxes, not_crap_mask)
confidence = tf.boolean_mask(confidence, not_crap_mask)
self.detection_list = []
self.score_list = []
for i in range(1, self.loader.num_classes):
class_mask = tf.greater(confidence[:, i], args.conf_thresh)
class_scores = tf.boolean_mask(confidence[:, i], class_mask)
class_bboxes = tf.boolean_mask(good_bboxes, class_mask)
K = tf.minimum(tf.size(class_scores), args.top_k_nms)
_, top_k_inds = tf.nn.top_k(class_scores, K)
top_class_scores = tf.gather(class_scores, top_k_inds)
top_class_bboxes = tf.gather(class_bboxes, top_k_inds)
final_inds = tf.image.non_max_suppression(top_class_bboxes,
top_class_scores,
max_output_size=args.top_k_after_nms,
iou_threshold=args.nms_thresh)
final_class_bboxes = tf.gather(top_class_bboxes, final_inds)
final_scores = tf.gather(top_class_scores, final_inds)
self.detection_list.append(final_class_bboxes)
self.score_list.append(final_scores)
def build_detector(self):
img_size = self.config['image_size']
self.image_ph = tf.placeholder(shape=[None, None, 3],
dtype=tf.float32, name='img_ph')
self.seg_ph = tf.placeholder(shape=[None, None], dtype=tf.int32, name='seg_ph')
img = tf.image.resize_bilinear(tf.expand_dims(self.image_ph, 0),
(img_size, img_size))
self.net.create_trunk(img)
if args.detect:
self.net.create_multibox_head(self.loader.num_classes)
confidence = tf.nn.softmax(tf.squeeze(self.net.outputs['confidence']))
location = tf.squeeze(self.net.outputs['location'])
self.nms(location, confidence, self.bboxer.tiling)
if args.segment:
self.net.create_segmentation_head(self.loader.num_classes)
self.segmentation = self.net.outputs['segmentation']
seg_shape = tf.shape(self.image_ph)[:2]
self.segmentation = tf.image.resize_bilinear(self.segmentation, seg_shape)
self.segmentation = tf.cast(tf.argmax(tf.squeeze(self.segmentation), axis=-1), tf.int32)
self.segmentation = tf.reshape(self.segmentation, seg_shape)
self.segmentation.set_shape([None, None])
if not self.no_gt:
easy_mask = self.seg_ph <= self.loader.num_classes
predictions = tf.boolean_mask(self.segmentation, easy_mask)
labels = tf.boolean_mask(self.seg_ph, easy_mask)
self.mean_iou, self.iou_update = mean_iou(predictions, labels, self.loader.num_classes)
else:
self.mean_iou = tf.constant(0)
self.iou_update = tf.constant(0)
def process_detection(self, outputs, img, w, h, gt_bboxes, gt_cats, name, draw):
detection_vec, score_vec = outputs[:2]
dets, scores, cats = [], [], []
no_dets = True
for i in range(self.loader.num_classes-1):
if score_vec[i].size > 0:
no_dets = False
dets.append(detection_vec[i])
scores.append(score_vec[i])
cats.append(np.zeros(len(score_vec[i]), dtype='int') + i + 1)
if not no_dets:
dets = np.vstack(dets)
scores = np.concatenate(scores, axis=0)
cats = np.concatenate(cats, axis=0)
top_k_inds = np.argsort(scores)[::-1]
if scores.size > args.top_k_post_nms:
top_k_inds = top_k_inds[0:args.top_k_post_nms]
dets = dets[top_k_inds]
scores = scores[top_k_inds]
cats = cats[top_k_inds]
mask_high = scores >= args.eval_min_conf
dets = dets[mask_high]
scores = scores[mask_high]
cats = cats[mask_high]
dets[:, :] = dets[:, [1, 0, 3, 2]]
dets[:, [2, 3]] -= dets[:, [0, 1]]
dets[:, [0, 2]] *= w
dets[:, [1, 3]] *= h
if draw:
self.draw(img, dets, cats, scores, name, gt_bboxes, gt_cats)
return(dets, scores, cats)
def process_segmentation(self, outputs, img, seg_gt, name, draw):
segmentation, iou, _ = outputs[-3:]
if draw:
self.draw_seg(img, seg_gt, segmentation, name)
return segmentation, iou
def get_mean_iou(self):
iou = self.sess.run(self.mean_iou)
return iou
def feed_forward(self, img, seg_gt, w, h, name, gt_bboxes, gt_cats, draw=False):
feed_dict = {self.image_ph: img}
net_out = []
if args.detect:
net_out.extend([self.detection_list, self.score_list])
if args.segment:
seg_gt_ = np.zeros(img.shape[:2]) if seg_gt is None else seg_gt
seg_dict = {self.seg_ph: seg_gt_}
feed_dict.update(seg_dict)
net_out.extend([self.segmentation, self.mean_iou, self.iou_update])
# outputs order with det and seg modes on:
# detection_vec, score_vec, segmentation, iou, _
outputs = self.sess.run(net_out, feed_dict=feed_dict)
results = []
if args.detect:
dets, scores, cats = self.process_detection(outputs, img, w, h,
gt_bboxes, gt_cats,
name, draw=draw)
results.extend([dets, scores, cats])
if args.segment:
segmentation, iou = self.process_segmentation(outputs, img, seg_gt, name, draw)
results.extend([segmentation, iou])
return results
def quick_matching(det_boxes, gt_boxes, det_cats, gt_cats):
iou_mask = batch_iou(det_boxes, gt_boxes) >= 0.5
det_cats = np.expand_dims(det_cats, axis=1)
gt_cats = np.expand_dims(gt_cats, axis=0)
cat_mask = (det_cats == gt_cats)
matching = np.logical_and(iou_mask, cat_mask)
return matching
|
from info import __doc__
from scipy_test_version import scipy_test_version as __version__
from testing import ScipyTest
Fixing svn commit testing issues.
from info import __doc__
from scipytest import *
from utils import *
|
from snovault import (
AuditFailure,
audit_checker,
)
from .formatter import (
audit_link,
path_to_text,
)
from .item import STATUS_LEVEL
def check_award_condition(value, awards):
return value.get('award', None) and value.get('award', {}).get('rfa') in awards
def audit_file_processed_step_run(value, system):
if value['status'] in ['replaced',
'deleted',
'revoked',
'uploading',
'content error',
'upload failed']:
return
if value['output_category'] in ['raw data',
'reference']:
return
if check_award_condition(value.get('dataset'), ['ENCODE3', 'ENCODE4']):
if 'step_run' not in value:
detail = ('Missing analysis_step_run information in file {}.'.format(
audit_link(path_to_text(value['@id']), value['@id'])
)
)
if value.get('lab', '') == '/labs/encode-processing-pipeline/':
yield AuditFailure('missing analysis_step_run',
detail, level='ERROR')
else:
yield AuditFailure('missing analysis_step_run',
detail, level='WARNING')
def audit_file_processed_derived_from(value, system):
if value['output_category'] in ['raw data',
'reference']:
return
if 'derived_from' not in value or \
'derived_from' in value and len(value['derived_from']) == 0:
detail = ('derived_from is a list of files that were used to create a given file; '
'for example, fastq file(s) will appear in the derived_from list of an '
'alignments file. Processed file {} is missing the requisite file'
' specification in its derived_from list.'.format(
audit_link(path_to_text(value['@id']), value['@id'])
)
)
yield AuditFailure('missing derived_from',
detail, level='INTERNAL_ACTION')
return
if value['file_format'] != 'bam':
return
# Ignore replaced BAMs because missing derived_from logic should be applied to their
# replacements instead (ENCD-3595).
if value['status'] == 'replaced':
return
fastq_bam_counter = 0
for f in value.get('derived_from'):
if (f['file_format'] == 'bam'
or f['file_format'] == 'fastq'
or (f['file_format'] in ['fasta', 'csfasta', 'csqual']
and f['output_type'] == 'reads'
and f['output_category'] == 'raw data')):
# Audit shouldn't trigger if status isn't registered in STATUS_LEVEL dict.
if f['status'] not in STATUS_LEVEL or value['status'] not in STATUS_LEVEL:
return
if STATUS_LEVEL[f['status']] >= STATUS_LEVEL[value['status']]:
fastq_bam_counter += 1
if f['dataset'] != value['dataset'].get('@id'):
detail = ('derived_from is a list of files that were used '
'to create a given file; '
'for example, fastq file(s) will appear in the '
'derived_from list of an '
'alignments file. '
'Alignments file {} '
'from experiment {} '
'specifies a file {} '
'from a different experiment {} '
'in its derived_from list.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(value['dataset']['@id']), value['dataset']['@id']),
audit_link(path_to_text(f['@id']), f['@id']),
audit_link(path_to_text(f['dataset']), f['dataset'])
)
)
yield AuditFailure('inconsistent derived_from',
detail, level='INTERNAL_ACTION')
if fastq_bam_counter == 0:
detail = ('derived_from is a list of files that were used to create a given file; '
'for example, fastq file(s) will appear in the derived_from list of an '
'alignments file. Alignments file {} is missing the requisite '
'file specification in its derived_from list.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
)
)
yield AuditFailure('missing derived_from',
detail, level='INTERNAL_ACTION')
def audit_file_assembly(value, system):
if 'derived_from' not in value:
return
for f in value['derived_from']:
if f.get('assembly') and value.get('assembly') and \
f.get('assembly') != value.get('assembly'):
detail = ('Processed file {} assembly {} '
'does not match assembly {} of the file {} '
'it was derived from.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
value['assembly'],
f['assembly'],
audit_link(path_to_text(f['@id']), f['@id'])
)
)
yield AuditFailure('inconsistent assembly',
detail, level='WARNING')
return
def audit_file_replicate_match(value, system):
'''
A file's replicate should belong to the same experiment that the file
does. These tend to get confused when replacing objects.
'''
if 'replicate' not in value:
return
rep_exp = value['replicate']['experiment']
file_exp = value['dataset']['@id']
if rep_exp != file_exp:
detail = ('File {} from experiment {} '
'is associated with replicate [{},{}] '
'{}, but that replicate is associated with a different '
'experiment {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(value['dataset']['@id']), value['dataset']['@id']),
value['replicate']['biological_replicate_number'],
value['replicate']['technical_replicate_number'],
audit_link(path_to_text(value['replicate']['@id']), value['replicate']['@id']),
audit_link(path_to_text(value['replicate']['experiment']), value['replicate']['experiment'])
)
)
yield AuditFailure('inconsistent replicate', detail, level='ERROR')
return
def audit_paired_with(value, system):
'''
A file with a paired_end needs a paired_with.
Should be handled in the schema.
A fastq file should be paired_with a fastq file.
A paired_with should be the same replicate
'''
if 'paired_end' not in value:
return
if value['paired_end'] in ['1,2']:
return
if 'paired_with' not in value:
return
paired_with_file_format = value['paired_with'].get('file_format')
if value.get('file_format') == 'fastq' and paired_with_file_format != 'fastq':
detail = ('Both the files in a paired-end run must be fastq files. '
'Fastq file {} is paired with file {}, which is a {} file.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(value['paired_with']['@id']), value['paired_with']['@id']),
paired_with_file_format
)
)
yield AuditFailure('paired with non-fastq', detail, level='ERROR')
if 'replicate' not in value['paired_with']:
return
if 'replicate' not in value:
detail = ('File {} has paired_end = {}. It requires a replicate'.format(
audit_link(path_to_text(value['@id']), value['@id']),
value['paired_end']
)
)
yield AuditFailure('missing replicate', detail, level='INTERNAL_ACTION')
elif value['replicate'].get('@id') != value['paired_with']['replicate']:
detail = ('File {} has replicate {}. It is paired_with file {} with replicate {}'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(value['replicate'].get('@id')), value['replicate'].get('@id')),
audit_link(path_to_text(value['paired_with']['@id']), value['paired_with']['@id']),
audit_link(path_to_text(value['paired_with'].get('replicate')), value['paired_with'].get('replicate'))
)
)
yield AuditFailure('inconsistent paired_with', detail, level='ERROR')
if value['paired_end'] == '1':
context = system['context']
paired_with = context.get_rev_links('paired_with')
if len(paired_with) > 1:
detail = ('Paired end 1 file {} paired_with by multiple paired end 2 files: {!r}'.format(
audit_link(path_to_text(value['@id']), value['@id']),
paired_with
)
)
yield AuditFailure('multiple paired_with', detail, level='ERROR')
return
file_read_count = value.get('read_count')
paired_with_read_count = value['paired_with'].get('read_count')
if (file_read_count and paired_with_read_count) and (file_read_count != paired_with_read_count):
detail = ('File {} has {} reads. It is'
' paired_with file {} that has {} reads'.format(
audit_link(path_to_text(value['@id']), value['@id']),
file_read_count,
audit_link(path_to_text(value['paired_with']['@id']), value['paired_with']['@id']),
paired_with_read_count
)
)
yield AuditFailure('inconsistent read count', detail, level='ERROR')
def audit_file_format_specifications(value, system):
for doc in value.get('file_format_specifications', []):
if doc['document_type'] != "file format specification":
detail = ('File {} has document {} not of type file format specification'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(doc['@id']), doc['@id'])
)
)
yield AuditFailure('inconsistent document_type', detail, level='ERROR')
return
def audit_file_controlled_by(value, system):
'''
A fastq in a ChIP-seq experiment should have a controlled_by
'''
if value['dataset'].get('assay_term_name') not in ['ChIP-seq',
'RAMPAGE',
'CAGE',
'shRNA knockdown followed by RNA-seq',
'siRNA knockdown followed by RNA-seq',
'CRISPR genome editing followed by RNA-seq']:
return
if value['file_format'] not in ['fastq']:
return
if 'target' in value['dataset'] and \
'control' in value['dataset']['target'].get('investigated_as', []):
return
if not value.get('controlled_by'):
detail = ('controlled_by is a list of files that are used as '
'controls for a given experimental file. '
'Fastq files generated in a {} assay require the '
'specification of control fastq file(s) in the controlled_by list. '
'Fastq file {} '
'is missing the requisite file specification in controlled_by list.'.format(
value['dataset']['assay_term_name'],
audit_link(path_to_text(value['@id']), value['@id'])
)
)
yield AuditFailure('missing controlled_by', detail, level='NOT_COMPLIANT')
return
possible_controls = value['dataset'].get('possible_controls')
biosample = value['dataset'].get('biosample_ontology', {}).get('term_id')
biosample_term_name = value['dataset'].get('biosample_ontology', {}).get('term_name')
run_type = value.get('run_type', None)
read_length = value.get('read_length', None)
if value['controlled_by']:
for ff in value['controlled_by']:
control_bs = ff['dataset'].get('biosample_ontology', {}).get('term_id')
control_run = ff.get('run_type', None)
control_length = ff.get('read_length', None)
if control_bs != biosample:
detail = ('controlled_by is a list of files that are used as controls for a given file. '
'This experiment was performed using {}, but '
'file {} contains in controlled_by list a file '
'{} that belongs to experiment with different biosample {}.'.format(
biosample_term_name,
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(ff['@id']), ff['@id']),
ff['dataset'].get('biosample_ontology', {}).get('term_name')
)
)
yield AuditFailure('inconsistent control', detail, level='ERROR')
return
if ff['file_format'] != value['file_format']:
detail = ('controlled_by is a list of files that are used as controls for a given file. '
'File {} with file_format {} contains in controlled_by list '
'a file {} with different file_format {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
value['file_format'],
audit_link(path_to_text(ff['@id']), ff['@id']),
ff['file_format']
)
)
yield AuditFailure('inconsistent control', detail, level='ERROR')
return
if (possible_controls is None) or (ff['dataset']['@id'] not in possible_controls):
detail = ('possible_controls is a list of experiment(s) that can serve as '
'analytical controls for a given experiment. '
'controlled_by is a list of files that are used as '
'controls for a given file. '
'File {} contains in controlled_by list a file {} '
'that belongs to an experiment {} that '
'is not specified in possible_controls list of this experiment.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(ff['@id']), ff['@id']),
audit_link(path_to_text(ff['dataset']['@id']), ff['dataset']['@id'])
)
)
yield AuditFailure('inconsistent control', detail, level='ERROR')
return
if (run_type is None) or (control_run is None):
continue
if (read_length is None) or (control_length is None):
continue
if run_type != control_run and \
value['dataset'].get('assay_term_name') not in ['RAMPAGE', 'CAGE']:
detail = ('File {} is {} but its control file {} is {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
run_type,
audit_link(path_to_text(ff['@id']), ff['@id']),
control_run
)
)
yield AuditFailure('inconsistent control run_type',
detail, level='WARNING')
if read_length != control_length and \
abs(read_length - control_length) > 2 and \
value['dataset'].get('assay_term_name') not in \
['shRNA knockdown followed by RNA-seq',
'siRNA knockdown followed by RNA-seq',
'CRISPR genome editing followed by RNA-seq']:
detail = ('File {} is {} but its control file {} is {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
value['read_length'],
audit_link(path_to_text(ff['@id']), ff['@id']),
ff['read_length']
)
)
yield AuditFailure('inconsistent control read length',
detail, level='WARNING')
return
def audit_duplicate_quality_metrics(value, system):
quality_metrics = value.get('quality_metrics')
if not quality_metrics:
return
metric_signatures = []
audit_signatures = []
for metric in quality_metrics:
metric_type = metric.get('@type', [None])[0]
signature = (
metric_type,
metric.get('processing_stage')
)
if signature not in metric_signatures:
metric_signatures.append(signature)
elif signature not in audit_signatures:
# Add so only yields audit once per signature per file.
audit_signatures.append(signature)
detail = ('File {} has more than one {} quality metric.'.format(
audit_link(path_to_text(value.get('@id')), value.get('@id')),
metric_type
)
)
yield AuditFailure(
'duplicate quality metric',
detail,
level='INTERNAL_ACTION'
)
def audit_file_in_correct_bucket(value, system):
request = system.get('request')
file_item = request.root.get_by_uuid(value['uuid'])
result, current_path, destination_path = file_item._file_in_correct_bucket(request)
if not result:
detail = ('Move {} file {} from {} to {}'.format(
value.get('status'),
value.get('accession', value.get('uuid')),
current_path,
destination_path
)
)
yield AuditFailure(
'incorrect file bucket',
detail,
level='INTERNAL_ACTION'
)
def audit_read_structure(value, system):
read_structure = value.get('read_structure', [])
for element in read_structure:
if element['start'] == 0 or element['end'] == 0:
detail = ('The read_stucture is 1-based. '
'Neither start or end can be 0 for sequence element {}.'.format(
element['sequence_element']
)
)
yield AuditFailure(
'invalid read_structure',
detail,
level='ERROR'
)
if element['start'] > element['end']:
detail = ('The start coordinate is bigger than the end coordinate '
'for sequence element {}.'.format(
element['sequence_element']
)
)
yield AuditFailure(
'invalid read_structure',
detail,
level='ERROR'
)
def audit_file_matching_md5sum(value, system):
'''
Files with md5 sums matching other files should be marked with a WARNING audit.
If the other files are listed as matching but in fact have different md5 sums,
the file should be flagged with an ERROR for incorrect metadata.
'''
matching_files = []
checked_statuses = ['released', 'revoked', 'archived', 'in progress']
if 'matching_md5sum' not in value or value.get('status') not in checked_statuses:
return
for file in value.get('matching_md5sum'):
if file.get('md5sum') != value.get('md5sum'):
detail = ('File {} is listed as having a matching md5 sum '
'as file {}, but the files have different md5 sums.'.format(
audit_link(path_to_text(file['@id']), file['@id']),
audit_link(path_to_text(value['@id']), value['@id'])
)
)
yield AuditFailure('Incorrect matching_md5sum', detail, level='ERROR')
else:
matching_files.append(file['@id'])
matching_files_links = [audit_link(path_to_text(file), file) for file in matching_files]
if not matching_files:
return
elif len(matching_files) > 2:
matching_files_joined = 'Files {}, and {}'.format(
', '.join(matching_files_links[:-1]),
matching_files_links[-1]
)
else:
matching_files_joined = ' and '.join(matching_files_links)
detail = ('The md5 sum of file {} '
'matches that of file(s) {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
matching_files_joined
)
)
yield AuditFailure('Matching md5 sums', detail, level='WARNING')
return
function_dispatcher = {
'audit_step_run': audit_file_processed_step_run,
'audit_derived_from': audit_file_processed_derived_from,
'audit_assembly': audit_file_assembly,
'audit_replicate_match': audit_file_replicate_match,
'audit_paired_with': audit_paired_with,
'audit_specifications': audit_file_format_specifications,
'audit_controlled_by': audit_file_controlled_by,
'audit_duplicate_quality_metrics': audit_duplicate_quality_metrics,
'audit_file_in_correct_bucket': audit_file_in_correct_bucket,
'audit_read_structure': audit_read_structure,
'audit_file_matching_md5sum': audit_file_matching_md5sum
}
@audit_checker('File',
frame=['derived_from',
'replicate',
'library',
'paired_with',
'file_format_specifications',
'dataset',
'dataset.biosample_ontology',
'dataset.target',
'dataset.award',
'platform',
'controlled_by',
'controlled_by.replicate',
'controlled_by.dataset',
'controlled_by.dataset.biosample_ontology',
'controlled_by.paired_with',
'controlled_by.platform',
'quality_metrics',
'matching_md5sum',
]
)
def audit_file(value, system):
for function_name in function_dispatcher.keys():
for failure in function_dispatcher[function_name](value, system):
yield failure
# def audit_file_chip_seq_control_read_depth(value, system):
# migrated to experiment https://encodedcc.atlassian.net/browse/ENCD-3493
# def audit_file_flowcells(value, system): # removed in version 56
# http://redmine.encodedcc.org/issues/5060
# def audit_modERN_ChIP_pipeline_steps(value, system):
# removed https://encodedcc.atlassian.net/browse/ENCD-3493
# def audit_file_pipeline_status(value, system): removed at release 56
# http://redmine.encodedcc.org/issues/5017
# def audit_file_md5sum_integrity(value, system): # removed release 55
# def audit_file_derived_from_revoked(value, system): removed at release 56
# http://redmine.encodedcc.org/issues/5018
# def audit_file_biological_replicate_number_match
# https://encodedcc.atlassian.net/browse/ENCD-3493
# def audit_file_platform(value, system): removed from release v56
ENCD-4854 Change severity of missing controlled by audit (#2953)
from snovault import (
AuditFailure,
audit_checker,
)
from .formatter import (
audit_link,
path_to_text,
)
from .item import STATUS_LEVEL
def check_award_condition(value, awards):
return value.get('award', None) and value.get('award', {}).get('rfa') in awards
def audit_file_processed_step_run(value, system):
if value['status'] in ['replaced',
'deleted',
'revoked',
'uploading',
'content error',
'upload failed']:
return
if value['output_category'] in ['raw data',
'reference']:
return
if check_award_condition(value.get('dataset'), ['ENCODE3', 'ENCODE4']):
if 'step_run' not in value:
detail = ('Missing analysis_step_run information in file {}.'.format(
audit_link(path_to_text(value['@id']), value['@id'])
)
)
if value.get('lab', '') == '/labs/encode-processing-pipeline/':
yield AuditFailure('missing analysis_step_run',
detail, level='ERROR')
else:
yield AuditFailure('missing analysis_step_run',
detail, level='WARNING')
def audit_file_processed_derived_from(value, system):
if value['output_category'] in ['raw data',
'reference']:
return
if 'derived_from' not in value or \
'derived_from' in value and len(value['derived_from']) == 0:
detail = ('derived_from is a list of files that were used to create a given file; '
'for example, fastq file(s) will appear in the derived_from list of an '
'alignments file. Processed file {} is missing the requisite file'
' specification in its derived_from list.'.format(
audit_link(path_to_text(value['@id']), value['@id'])
)
)
yield AuditFailure('missing derived_from',
detail, level='INTERNAL_ACTION')
return
if value['file_format'] != 'bam':
return
# Ignore replaced BAMs because missing derived_from logic should be applied to their
# replacements instead (ENCD-3595).
if value['status'] == 'replaced':
return
fastq_bam_counter = 0
for f in value.get('derived_from'):
if (f['file_format'] == 'bam'
or f['file_format'] == 'fastq'
or (f['file_format'] in ['fasta', 'csfasta', 'csqual']
and f['output_type'] == 'reads'
and f['output_category'] == 'raw data')):
# Audit shouldn't trigger if status isn't registered in STATUS_LEVEL dict.
if f['status'] not in STATUS_LEVEL or value['status'] not in STATUS_LEVEL:
return
if STATUS_LEVEL[f['status']] >= STATUS_LEVEL[value['status']]:
fastq_bam_counter += 1
if f['dataset'] != value['dataset'].get('@id'):
detail = ('derived_from is a list of files that were used '
'to create a given file; '
'for example, fastq file(s) will appear in the '
'derived_from list of an '
'alignments file. '
'Alignments file {} '
'from experiment {} '
'specifies a file {} '
'from a different experiment {} '
'in its derived_from list.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(value['dataset']['@id']), value['dataset']['@id']),
audit_link(path_to_text(f['@id']), f['@id']),
audit_link(path_to_text(f['dataset']), f['dataset'])
)
)
yield AuditFailure('inconsistent derived_from',
detail, level='INTERNAL_ACTION')
if fastq_bam_counter == 0:
detail = ('derived_from is a list of files that were used to create a given file; '
'for example, fastq file(s) will appear in the derived_from list of an '
'alignments file. Alignments file {} is missing the requisite '
'file specification in its derived_from list.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
)
)
yield AuditFailure('missing derived_from',
detail, level='INTERNAL_ACTION')
def audit_file_assembly(value, system):
if 'derived_from' not in value:
return
for f in value['derived_from']:
if f.get('assembly') and value.get('assembly') and \
f.get('assembly') != value.get('assembly'):
detail = ('Processed file {} assembly {} '
'does not match assembly {} of the file {} '
'it was derived from.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
value['assembly'],
f['assembly'],
audit_link(path_to_text(f['@id']), f['@id'])
)
)
yield AuditFailure('inconsistent assembly',
detail, level='WARNING')
return
def audit_file_replicate_match(value, system):
'''
A file's replicate should belong to the same experiment that the file
does. These tend to get confused when replacing objects.
'''
if 'replicate' not in value:
return
rep_exp = value['replicate']['experiment']
file_exp = value['dataset']['@id']
if rep_exp != file_exp:
detail = ('File {} from experiment {} '
'is associated with replicate [{},{}] '
'{}, but that replicate is associated with a different '
'experiment {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(value['dataset']['@id']), value['dataset']['@id']),
value['replicate']['biological_replicate_number'],
value['replicate']['technical_replicate_number'],
audit_link(path_to_text(value['replicate']['@id']), value['replicate']['@id']),
audit_link(path_to_text(value['replicate']['experiment']), value['replicate']['experiment'])
)
)
yield AuditFailure('inconsistent replicate', detail, level='ERROR')
return
def audit_paired_with(value, system):
'''
A file with a paired_end needs a paired_with.
Should be handled in the schema.
A fastq file should be paired_with a fastq file.
A paired_with should be the same replicate
'''
if 'paired_end' not in value:
return
if value['paired_end'] in ['1,2']:
return
if 'paired_with' not in value:
return
paired_with_file_format = value['paired_with'].get('file_format')
if value.get('file_format') == 'fastq' and paired_with_file_format != 'fastq':
detail = ('Both the files in a paired-end run must be fastq files. '
'Fastq file {} is paired with file {}, which is a {} file.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(value['paired_with']['@id']), value['paired_with']['@id']),
paired_with_file_format
)
)
yield AuditFailure('paired with non-fastq', detail, level='ERROR')
if 'replicate' not in value['paired_with']:
return
if 'replicate' not in value:
detail = ('File {} has paired_end = {}. It requires a replicate'.format(
audit_link(path_to_text(value['@id']), value['@id']),
value['paired_end']
)
)
yield AuditFailure('missing replicate', detail, level='INTERNAL_ACTION')
elif value['replicate'].get('@id') != value['paired_with']['replicate']:
detail = ('File {} has replicate {}. It is paired_with file {} with replicate {}'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(value['replicate'].get('@id')), value['replicate'].get('@id')),
audit_link(path_to_text(value['paired_with']['@id']), value['paired_with']['@id']),
audit_link(path_to_text(value['paired_with'].get('replicate')), value['paired_with'].get('replicate'))
)
)
yield AuditFailure('inconsistent paired_with', detail, level='ERROR')
if value['paired_end'] == '1':
context = system['context']
paired_with = context.get_rev_links('paired_with')
if len(paired_with) > 1:
detail = ('Paired end 1 file {} paired_with by multiple paired end 2 files: {!r}'.format(
audit_link(path_to_text(value['@id']), value['@id']),
paired_with
)
)
yield AuditFailure('multiple paired_with', detail, level='ERROR')
return
file_read_count = value.get('read_count')
paired_with_read_count = value['paired_with'].get('read_count')
if (file_read_count and paired_with_read_count) and (file_read_count != paired_with_read_count):
detail = ('File {} has {} reads. It is'
' paired_with file {} that has {} reads'.format(
audit_link(path_to_text(value['@id']), value['@id']),
file_read_count,
audit_link(path_to_text(value['paired_with']['@id']), value['paired_with']['@id']),
paired_with_read_count
)
)
yield AuditFailure('inconsistent read count', detail, level='ERROR')
def audit_file_format_specifications(value, system):
for doc in value.get('file_format_specifications', []):
if doc['document_type'] != "file format specification":
detail = ('File {} has document {} not of type file format specification'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(doc['@id']), doc['@id'])
)
)
yield AuditFailure('inconsistent document_type', detail, level='ERROR')
return
def audit_file_controlled_by(value, system):
'''
A fastq in a ChIP-seq experiment should have a controlled_by
'''
if value['dataset'].get('assay_term_name') not in ['ChIP-seq',
'RAMPAGE',
'CAGE',
'shRNA knockdown followed by RNA-seq',
'siRNA knockdown followed by RNA-seq',
'CRISPR genome editing followed by RNA-seq']:
return
if value['file_format'] not in ['fastq']:
return
if 'target' in value['dataset'] and \
'control' in value['dataset']['target'].get('investigated_as', []):
return
if not value.get('controlled_by'):
detail = ('controlled_by is a list of files that are used as '
'controls for a given experimental file. '
'Fastq files generated in a {} assay require the '
'specification of control fastq file(s) in the controlled_by list. '
'Fastq file {} '
'is missing the requisite file specification in controlled_by list.'.format(
value['dataset']['assay_term_name'],
audit_link(path_to_text(value['@id']), value['@id'])
)
)
yield AuditFailure('missing controlled_by', detail, level='WARNING')
return
possible_controls = value['dataset'].get('possible_controls')
biosample = value['dataset'].get('biosample_ontology', {}).get('term_id')
biosample_term_name = value['dataset'].get('biosample_ontology', {}).get('term_name')
run_type = value.get('run_type', None)
read_length = value.get('read_length', None)
if value['controlled_by']:
for ff in value['controlled_by']:
control_bs = ff['dataset'].get('biosample_ontology', {}).get('term_id')
control_run = ff.get('run_type', None)
control_length = ff.get('read_length', None)
if control_bs != biosample:
detail = ('controlled_by is a list of files that are used as controls for a given file. '
'This experiment was performed using {}, but '
'file {} contains in controlled_by list a file '
'{} that belongs to experiment with different biosample {}.'.format(
biosample_term_name,
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(ff['@id']), ff['@id']),
ff['dataset'].get('biosample_ontology', {}).get('term_name')
)
)
yield AuditFailure('inconsistent control', detail, level='ERROR')
return
if ff['file_format'] != value['file_format']:
detail = ('controlled_by is a list of files that are used as controls for a given file. '
'File {} with file_format {} contains in controlled_by list '
'a file {} with different file_format {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
value['file_format'],
audit_link(path_to_text(ff['@id']), ff['@id']),
ff['file_format']
)
)
yield AuditFailure('inconsistent control', detail, level='ERROR')
return
if (possible_controls is None) or (ff['dataset']['@id'] not in possible_controls):
detail = ('possible_controls is a list of experiment(s) that can serve as '
'analytical controls for a given experiment. '
'controlled_by is a list of files that are used as '
'controls for a given file. '
'File {} contains in controlled_by list a file {} '
'that belongs to an experiment {} that '
'is not specified in possible_controls list of this experiment.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
audit_link(path_to_text(ff['@id']), ff['@id']),
audit_link(path_to_text(ff['dataset']['@id']), ff['dataset']['@id'])
)
)
yield AuditFailure('inconsistent control', detail, level='ERROR')
return
if (run_type is None) or (control_run is None):
continue
if (read_length is None) or (control_length is None):
continue
if run_type != control_run and \
value['dataset'].get('assay_term_name') not in ['RAMPAGE', 'CAGE']:
detail = ('File {} is {} but its control file {} is {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
run_type,
audit_link(path_to_text(ff['@id']), ff['@id']),
control_run
)
)
yield AuditFailure('inconsistent control run_type',
detail, level='WARNING')
if read_length != control_length and \
abs(read_length - control_length) > 2 and \
value['dataset'].get('assay_term_name') not in \
['shRNA knockdown followed by RNA-seq',
'siRNA knockdown followed by RNA-seq',
'CRISPR genome editing followed by RNA-seq']:
detail = ('File {} is {} but its control file {} is {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
value['read_length'],
audit_link(path_to_text(ff['@id']), ff['@id']),
ff['read_length']
)
)
yield AuditFailure('inconsistent control read length',
detail, level='WARNING')
return
def audit_duplicate_quality_metrics(value, system):
quality_metrics = value.get('quality_metrics')
if not quality_metrics:
return
metric_signatures = []
audit_signatures = []
for metric in quality_metrics:
metric_type = metric.get('@type', [None])[0]
signature = (
metric_type,
metric.get('processing_stage')
)
if signature not in metric_signatures:
metric_signatures.append(signature)
elif signature not in audit_signatures:
# Add so only yields audit once per signature per file.
audit_signatures.append(signature)
detail = ('File {} has more than one {} quality metric.'.format(
audit_link(path_to_text(value.get('@id')), value.get('@id')),
metric_type
)
)
yield AuditFailure(
'duplicate quality metric',
detail,
level='INTERNAL_ACTION'
)
def audit_file_in_correct_bucket(value, system):
request = system.get('request')
file_item = request.root.get_by_uuid(value['uuid'])
result, current_path, destination_path = file_item._file_in_correct_bucket(request)
if not result:
detail = ('Move {} file {} from {} to {}'.format(
value.get('status'),
value.get('accession', value.get('uuid')),
current_path,
destination_path
)
)
yield AuditFailure(
'incorrect file bucket',
detail,
level='INTERNAL_ACTION'
)
def audit_read_structure(value, system):
read_structure = value.get('read_structure', [])
for element in read_structure:
if element['start'] == 0 or element['end'] == 0:
detail = ('The read_stucture is 1-based. '
'Neither start or end can be 0 for sequence element {}.'.format(
element['sequence_element']
)
)
yield AuditFailure(
'invalid read_structure',
detail,
level='ERROR'
)
if element['start'] > element['end']:
detail = ('The start coordinate is bigger than the end coordinate '
'for sequence element {}.'.format(
element['sequence_element']
)
)
yield AuditFailure(
'invalid read_structure',
detail,
level='ERROR'
)
def audit_file_matching_md5sum(value, system):
'''
Files with md5 sums matching other files should be marked with a WARNING audit.
If the other files are listed as matching but in fact have different md5 sums,
the file should be flagged with an ERROR for incorrect metadata.
'''
matching_files = []
checked_statuses = ['released', 'revoked', 'archived', 'in progress']
if 'matching_md5sum' not in value or value.get('status') not in checked_statuses:
return
for file in value.get('matching_md5sum'):
if file.get('md5sum') != value.get('md5sum'):
detail = ('File {} is listed as having a matching md5 sum '
'as file {}, but the files have different md5 sums.'.format(
audit_link(path_to_text(file['@id']), file['@id']),
audit_link(path_to_text(value['@id']), value['@id'])
)
)
yield AuditFailure('Incorrect matching_md5sum', detail, level='ERROR')
else:
matching_files.append(file['@id'])
matching_files_links = [audit_link(path_to_text(file), file) for file in matching_files]
if not matching_files:
return
elif len(matching_files) > 2:
matching_files_joined = 'Files {}, and {}'.format(
', '.join(matching_files_links[:-1]),
matching_files_links[-1]
)
else:
matching_files_joined = ' and '.join(matching_files_links)
detail = ('The md5 sum of file {} '
'matches that of file(s) {}.'.format(
audit_link(path_to_text(value['@id']), value['@id']),
matching_files_joined
)
)
yield AuditFailure('Matching md5 sums', detail, level='WARNING')
return
function_dispatcher = {
'audit_step_run': audit_file_processed_step_run,
'audit_derived_from': audit_file_processed_derived_from,
'audit_assembly': audit_file_assembly,
'audit_replicate_match': audit_file_replicate_match,
'audit_paired_with': audit_paired_with,
'audit_specifications': audit_file_format_specifications,
'audit_controlled_by': audit_file_controlled_by,
'audit_duplicate_quality_metrics': audit_duplicate_quality_metrics,
'audit_file_in_correct_bucket': audit_file_in_correct_bucket,
'audit_read_structure': audit_read_structure,
'audit_file_matching_md5sum': audit_file_matching_md5sum
}
@audit_checker('File',
frame=['derived_from',
'replicate',
'library',
'paired_with',
'file_format_specifications',
'dataset',
'dataset.biosample_ontology',
'dataset.target',
'dataset.award',
'platform',
'controlled_by',
'controlled_by.replicate',
'controlled_by.dataset',
'controlled_by.dataset.biosample_ontology',
'controlled_by.paired_with',
'controlled_by.platform',
'quality_metrics',
'matching_md5sum',
]
)
def audit_file(value, system):
for function_name in function_dispatcher.keys():
for failure in function_dispatcher[function_name](value, system):
yield failure
# def audit_file_chip_seq_control_read_depth(value, system):
# migrated to experiment https://encodedcc.atlassian.net/browse/ENCD-3493
# def audit_file_flowcells(value, system): # removed in version 56
# http://redmine.encodedcc.org/issues/5060
# def audit_modERN_ChIP_pipeline_steps(value, system):
# removed https://encodedcc.atlassian.net/browse/ENCD-3493
# def audit_file_pipeline_status(value, system): removed at release 56
# http://redmine.encodedcc.org/issues/5017
# def audit_file_md5sum_integrity(value, system): # removed release 55
# def audit_file_derived_from_revoked(value, system): removed at release 56
# http://redmine.encodedcc.org/issues/5018
# def audit_file_biological_replicate_number_match
# https://encodedcc.atlassian.net/browse/ENCD-3493
# def audit_file_platform(value, system): removed from release v56
|
import re
from collections import OrderedDict
import osgtest.library.core as core
import osgtest.library.yum as yum
import osgtest.library.osgunittest as osgunittest
class TestInstall(osgunittest.OSGTestCase):
def test_01_yum_repositories(self):
pre = ('rpm', '--verify', '--nomd5', '--nosize', '--nomtime')
core.check_system(pre + ('epel-release',), 'Verify epel-release')
# If osg-release isn't installed, try osg-release-itb
try:
core.check_system(pre + ('osg-release',), 'Verify osg-release')
except AssertionError:
core.check_system(pre + ('osg-release-itb',), 'Verify osg-release + osg-release-itb')
core.config['install.original-release-ver'] = core.osg_release()
def test_02_install_packages(self):
core.state['install.success'] = False
core.state['install.installed'] = []
core.state['install.updated'] = []
core.state['install.replace'] = []
core.state['install.orphaned'] = []
core.state['install.os_updates'] = []
# Install packages
core.state['install.transaction_ids'] = []
fail_msg = ''
pkg_repo_dict = OrderedDict((x, core.options.extrarepos) for x in core.options.packages)
# FIXME: Install slurm out of contrib if we're running 'All' tests until
# SOFTWARE-1733 gives us a generalized solution
if 'osg-tested-internal' in pkg_repo_dict:
all_slurm_packages = core.SLURM_PACKAGES + ['slurm-slurmdbd']
pkg_repo_dict.update(dict((x, ['osg-development']) for x in all_slurm_packages))
for pkg, repos in pkg_repo_dict.items():
# Do not try to re-install packages
if core.rpm_is_installed(pkg):
continue
# Attempt installation
command = ['yum', '-y']
command += ['--enablerepo=%s' % x for x in repos]
command += ['install', pkg]
retry_fail, _, stdout, _ = yum.retry_command(command)
if retry_fail == '': # the command succeeded
core.state['install.transaction_ids'].append(yum.get_transaction_id())
command = ('rpm', '--verify', pkg)
core.check_system(command, 'Verify %s' % (pkg))
yum.parse_output_for_packages(stdout)
fail_msg += retry_fail
if fail_msg:
self.fail(fail_msg)
core.state['install.success'] = True
def test_03_update_osg_release(self):
core.state['install.release-updated'] = False
if not core.options.updaterelease:
return
self.skip_bad_unless(core.state['install.success'], 'Install did not succeed')
command = ['rpm', '-e', 'osg-release']
core.check_system(command, 'Erase osg-release')
self.assert_(re.match('\d+\.\d+', core.options.updaterelease), "Unrecognized updaterelease format")
rpm_url = 'https://repo.opensciencegrid.org/osg/' + core.options.updaterelease + '/osg-' + \
core.options.updaterelease + '-el' + str(core.el_release()) + '-release-latest.rpm'
command = ['rpm', '-Uvh', rpm_url]
core.check_system(command, 'Update osg-release')
core.config['yum.clean_repos'] = ['osg'] + core.options.updaterepos
yum.clean(*core.config['yum.clean_repos'])
# If update repos weren't specified, just use osg-release
if not core.options.updaterepos:
core.options.updaterepos = ['osg']
core.state['install.release-updated'] = True
core.osg_release(update_state=True)
def test_04_remove_bestman2_server_dep_libs(self):
if core.options.updaterelease != "3.4":
return
# bestman2 and jetty have been dropped from OSG 3.4. bestman2-server-dep-libs requires a version of jetty-http
# less than what's available in EPEL, which causes `yum update` fails. We no longer care about bestman2 so we
# can just remove the offending package
command = ['yum', '-y', 'remove', 'bestman2-server-dep-libs']
core.check_system(command, "Failed to remove bestman2-server-dep-libs")
def test_04_update_packages(self):
if not (core.options.updaterepos and core.state['install.installed']):
return
self.skip_bad_unless(core.state['install.success'], 'Install did not succeed')
# Update packages
command = ['yum', 'update', '-y']
for repo in core.options.updaterepos:
command.append('--enablerepo=%s' % repo)
fail_msg, status, stdout, stderr = yum.retry_command(command)
yum.parse_output_for_packages(stdout)
if fail_msg:
self.fail(fail_msg)
else:
core.state['install.transaction_ids'].append(yum.get_transaction_id())
allow installing slurm without all of osg-tested-internal (SOFTWARE-3347)
import re
from collections import OrderedDict
import osgtest.library.core as core
import osgtest.library.yum as yum
import osgtest.library.osgunittest as osgunittest
class TestInstall(osgunittest.OSGTestCase):
def test_01_yum_repositories(self):
pre = ('rpm', '--verify', '--nomd5', '--nosize', '--nomtime')
core.check_system(pre + ('epel-release',), 'Verify epel-release')
# If osg-release isn't installed, try osg-release-itb
try:
core.check_system(pre + ('osg-release',), 'Verify osg-release')
except AssertionError:
core.check_system(pre + ('osg-release-itb',), 'Verify osg-release + osg-release-itb')
core.config['install.original-release-ver'] = core.osg_release()
def test_02_install_packages(self):
core.state['install.success'] = False
core.state['install.installed'] = []
core.state['install.updated'] = []
core.state['install.replace'] = []
core.state['install.orphaned'] = []
core.state['install.os_updates'] = []
# Install packages
core.state['install.transaction_ids'] = []
fail_msg = ''
pkg_repo_dict = OrderedDict((x, core.options.extrarepos) for x in core.options.packages)
# FIXME: Install slurm out of contrib if we're running 'All' tests until
# SOFTWARE-1733 gives us a generalized solution
if 'osg-tested-internal' in pkg_repo_dict or 'slurm' in pkg_repo_dict:
all_slurm_packages = core.SLURM_PACKAGES + ['slurm-slurmdbd']
pkg_repo_dict.update(dict((x, ['osg-development']) for x in all_slurm_packages))
for pkg, repos in pkg_repo_dict.items():
# Do not try to re-install packages
if core.rpm_is_installed(pkg):
continue
# Attempt installation
command = ['yum', '-y']
command += ['--enablerepo=%s' % x for x in repos]
command += ['install', pkg]
retry_fail, _, stdout, _ = yum.retry_command(command)
if retry_fail == '': # the command succeeded
core.state['install.transaction_ids'].append(yum.get_transaction_id())
command = ('rpm', '--verify', pkg)
core.check_system(command, 'Verify %s' % (pkg))
yum.parse_output_for_packages(stdout)
fail_msg += retry_fail
if fail_msg:
self.fail(fail_msg)
core.state['install.success'] = True
def test_03_update_osg_release(self):
core.state['install.release-updated'] = False
if not core.options.updaterelease:
return
self.skip_bad_unless(core.state['install.success'], 'Install did not succeed')
command = ['rpm', '-e', 'osg-release']
core.check_system(command, 'Erase osg-release')
self.assert_(re.match('\d+\.\d+', core.options.updaterelease), "Unrecognized updaterelease format")
rpm_url = 'https://repo.opensciencegrid.org/osg/' + core.options.updaterelease + '/osg-' + \
core.options.updaterelease + '-el' + str(core.el_release()) + '-release-latest.rpm'
command = ['rpm', '-Uvh', rpm_url]
core.check_system(command, 'Update osg-release')
core.config['yum.clean_repos'] = ['osg'] + core.options.updaterepos
yum.clean(*core.config['yum.clean_repos'])
# If update repos weren't specified, just use osg-release
if not core.options.updaterepos:
core.options.updaterepos = ['osg']
core.state['install.release-updated'] = True
core.osg_release(update_state=True)
def test_04_remove_bestman2_server_dep_libs(self):
if core.options.updaterelease != "3.4":
return
# bestman2 and jetty have been dropped from OSG 3.4. bestman2-server-dep-libs requires a version of jetty-http
# less than what's available in EPEL, which causes `yum update` fails. We no longer care about bestman2 so we
# can just remove the offending package
command = ['yum', '-y', 'remove', 'bestman2-server-dep-libs']
core.check_system(command, "Failed to remove bestman2-server-dep-libs")
def test_04_update_packages(self):
if not (core.options.updaterepos and core.state['install.installed']):
return
self.skip_bad_unless(core.state['install.success'], 'Install did not succeed')
# Update packages
command = ['yum', 'update', '-y']
for repo in core.options.updaterepos:
command.append('--enablerepo=%s' % repo)
fail_msg, status, stdout, stderr = yum.retry_command(command)
yum.parse_output_for_packages(stdout)
if fail_msg:
self.fail(fail_msg)
else:
core.state['install.transaction_ids'].append(yum.get_transaction_id())
|
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# chromium_code indicates that the code is not
# third-party code and should be subjected to strict compiler
# warnings/errors in order to catch programming mistakes.
'chromium_code': 1,
'css_parser_root': 'src',
},
'targets': [
{
'target_name': 'css_parser',
'type': '<(library)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
'<(DEPTH)/third_party/google-sparsehash/google-sparsehash.gyp:include',
],
# For .h files included in these .h files.
'direct_dependent_settings': {
'include_dirs': [
# TODO(sligocki): Import these from google-sparsehash.gyp
'<(DEPTH)/third_party/google-sparsehash/gen/arch/<(OS)/<(target_arch)/include',
'<(DEPTH)/third_party/google-sparsehash/src',
],
},
'include_dirs': [
'<(css_parser_root)',
'<(DEPTH)',
],
'cflags': ['-Wno-sign-compare', '-Wno-return-type'],
'sources': [
'<(css_parser_root)/string_using.h',
'<(css_parser_root)/webutil/css/identifier.cc',
'<(css_parser_root)/webutil/css/identifier.h',
'<(css_parser_root)/webutil/css/parser.cc',
'<(css_parser_root)/webutil/css/parser.h',
'<(css_parser_root)/webutil/css/property.cc',
'<(css_parser_root)/webutil/css/property.h',
'<(css_parser_root)/webutil/css/selector.cc',
'<(css_parser_root)/webutil/css/selector.h',
'<(css_parser_root)/webutil/css/string.h',
'<(css_parser_root)/webutil/css/string_util.cc',
'<(css_parser_root)/webutil/css/string_util.h',
'<(css_parser_root)/webutil/css/tostring.cc',
'<(css_parser_root)/webutil/css/util.cc',
'<(css_parser_root)/webutil/css/util.h',
'<(css_parser_root)/webutil/css/value.cc',
'<(css_parser_root)/webutil/css/value.h',
'<(css_parser_root)/webutil/css/valuevalidator.cc',
'<(css_parser_root)/webutil/css/valuevalidator.h',
#'<(css_parser_root)/webutil/css/parse_arg.cc',
# Tests
#'<(css_parser_root)/webutil/css/gtest_main.cc',
#'<(css_parser_root)/webutil/css/identifier_test.cc',
#'<(css_parser_root)/webutil/css/parser_unittest.cc',
#'<(css_parser_root)/webutil/css/property_test.cc',
#'<(css_parser_root)/webutil/css/tostring_test.cc',
#'<(css_parser_root)/webutil/css/util_test.cc',
#'<(css_parser_root)/webutil/css/valuevalidator_test.cc',
'<(css_parser_root)/webutil/html/htmlcolor.cc',
'<(css_parser_root)/webutil/html/htmlcolor.h',
'<(css_parser_root)/webutil/html/htmltagenum.cc',
'<(css_parser_root)/webutil/html/htmltagenum.h',
'<(css_parser_root)/webutil/html/htmltagindex.cc',
'<(css_parser_root)/webutil/html/htmltagindex.h',
# UnicodeText
'<(css_parser_root)/util/utf8/internal/unicodetext.cc',
'<(css_parser_root)/util/utf8/internal/unilib.cc',
'<(css_parser_root)/util/utf8/public/config.h',
'<(css_parser_root)/util/utf8/public/unicodetext.h',
'<(css_parser_root)/util/utf8/public/unilib.h',
# libutf
#'<(css_parser_root)/third_party/utf/Make.Linux-x86_64',
#'<(css_parser_root)/third_party/utf/Makefile',
#'<(css_parser_root)/third_party/utf/NOTICE',
#'<(css_parser_root)/third_party/utf/README',
'<(css_parser_root)/third_party/utf/rune.c',
'<(css_parser_root)/third_party/utf/runestrcat.c',
'<(css_parser_root)/third_party/utf/runestrchr.c',
'<(css_parser_root)/third_party/utf/runestrcmp.c',
'<(css_parser_root)/third_party/utf/runestrcpy.c',
'<(css_parser_root)/third_party/utf/runestrecpy.c',
'<(css_parser_root)/third_party/utf/runestrlen.c',
'<(css_parser_root)/third_party/utf/runestrncat.c',
'<(css_parser_root)/third_party/utf/runestrncmp.c',
'<(css_parser_root)/third_party/utf/runestrncpy.c',
'<(css_parser_root)/third_party/utf/runestrrchr.c',
'<(css_parser_root)/third_party/utf/runestrstr.c',
'<(css_parser_root)/third_party/utf/runetype.c',
# TODO(sligocki): What is the correct format for this?
# runetypebody-5.0.0.c should not be compiled by itself, only #included.
#'<(css_parser_root)/third_party/utf/runetypebody-5.0.0.c',
'<(css_parser_root)/third_party/utf/utf.h',
'<(css_parser_root)/third_party/utf/utfdef.h',
'<(css_parser_root)/third_party/utf/utfecpy.c',
'<(css_parser_root)/third_party/utf/utflen.c',
'<(css_parser_root)/third_party/utf/utfnlen.c',
'<(css_parser_root)/third_party/utf/utfrrune.c',
'<(css_parser_root)/third_party/utf/utfrune.c',
'<(css_parser_root)/third_party/utf/utfutf.c',
# Supporting interfaces.
'<(css_parser_root)/base/commandlineflags.h',
'<(css_parser_root)/base/googleinit.h',
'<(css_parser_root)/base/macros.h',
'<(css_parser_root)/base/paranoid.h',
'<(css_parser_root)/base/stringprintf.h',
'<(css_parser_root)/string_using.h',
'<(css_parser_root)/strings/ascii_ctype.cc',
'<(css_parser_root)/strings/ascii_ctype.h',
'<(css_parser_root)/strings/escaping.h',
'<(css_parser_root)/strings/join.h',
'<(css_parser_root)/strings/memutil.h',
'<(css_parser_root)/strings/stringpiece.h',
'<(css_parser_root)/strings/stringpiece_utils.cc',
'<(css_parser_root)/strings/stringpiece_utils.h',
'<(css_parser_root)/strings/stringprintf.h',
'<(css_parser_root)/strings/strutil.h',
#'<(css_parser_root)/testing/base/public/googletest.h',
#'<(css_parser_root)/testing/base/public/gunit.h',
'<(css_parser_root)/testing/production_stub/public/gunit_prod.h',
'<(css_parser_root)/util/gtl/dense_hash_map.h',
'<(css_parser_root)/util/gtl/map-util.h',
'<(css_parser_root)/util/gtl/singleton.h',
'<(css_parser_root)/util/gtl/stl_util-inl.h',
'<(css_parser_root)/util/hash/hash.h',
],
},
],
}
Add -funsigned-char
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# chromium_code indicates that the code is not
# third-party code and should be subjected to strict compiler
# warnings/errors in order to catch programming mistakes.
'chromium_code': 1,
'css_parser_root': 'src',
},
'targets': [
{
'target_name': 'css_parser',
'type': '<(library)',
'dependencies': [
'<(DEPTH)/base/base.gyp:base',
'<(DEPTH)/third_party/gflags/gflags.gyp:gflags',
'<(DEPTH)/third_party/google-sparsehash/google-sparsehash.gyp:include',
],
# For .h files included in these .h files.
'direct_dependent_settings': {
'include_dirs': [
# TODO(sligocki): Import these from google-sparsehash.gyp
'<(DEPTH)/third_party/google-sparsehash/gen/arch/<(OS)/<(target_arch)/include',
'<(DEPTH)/third_party/google-sparsehash/src',
],
},
'include_dirs': [
'<(css_parser_root)',
'<(DEPTH)',
],
'cflags': ['-funsigned-char', '-Wno-sign-compare', '-Wno-return-type'],
'sources': [
'<(css_parser_root)/string_using.h',
'<(css_parser_root)/webutil/css/identifier.cc',
'<(css_parser_root)/webutil/css/identifier.h',
'<(css_parser_root)/webutil/css/parser.cc',
'<(css_parser_root)/webutil/css/parser.h',
'<(css_parser_root)/webutil/css/property.cc',
'<(css_parser_root)/webutil/css/property.h',
'<(css_parser_root)/webutil/css/selector.cc',
'<(css_parser_root)/webutil/css/selector.h',
'<(css_parser_root)/webutil/css/string.h',
'<(css_parser_root)/webutil/css/string_util.cc',
'<(css_parser_root)/webutil/css/string_util.h',
'<(css_parser_root)/webutil/css/tostring.cc',
'<(css_parser_root)/webutil/css/util.cc',
'<(css_parser_root)/webutil/css/util.h',
'<(css_parser_root)/webutil/css/value.cc',
'<(css_parser_root)/webutil/css/value.h',
'<(css_parser_root)/webutil/css/valuevalidator.cc',
'<(css_parser_root)/webutil/css/valuevalidator.h',
#'<(css_parser_root)/webutil/css/parse_arg.cc',
# Tests
#'<(css_parser_root)/webutil/css/gtest_main.cc',
#'<(css_parser_root)/webutil/css/identifier_test.cc',
#'<(css_parser_root)/webutil/css/parser_unittest.cc',
#'<(css_parser_root)/webutil/css/property_test.cc',
#'<(css_parser_root)/webutil/css/tostring_test.cc',
#'<(css_parser_root)/webutil/css/util_test.cc',
#'<(css_parser_root)/webutil/css/valuevalidator_test.cc',
'<(css_parser_root)/webutil/html/htmlcolor.cc',
'<(css_parser_root)/webutil/html/htmlcolor.h',
'<(css_parser_root)/webutil/html/htmltagenum.cc',
'<(css_parser_root)/webutil/html/htmltagenum.h',
'<(css_parser_root)/webutil/html/htmltagindex.cc',
'<(css_parser_root)/webutil/html/htmltagindex.h',
# UnicodeText
'<(css_parser_root)/util/utf8/internal/unicodetext.cc',
'<(css_parser_root)/util/utf8/internal/unilib.cc',
'<(css_parser_root)/util/utf8/public/config.h',
'<(css_parser_root)/util/utf8/public/unicodetext.h',
'<(css_parser_root)/util/utf8/public/unilib.h',
# libutf
#'<(css_parser_root)/third_party/utf/Make.Linux-x86_64',
#'<(css_parser_root)/third_party/utf/Makefile',
#'<(css_parser_root)/third_party/utf/NOTICE',
#'<(css_parser_root)/third_party/utf/README',
'<(css_parser_root)/third_party/utf/rune.c',
'<(css_parser_root)/third_party/utf/runestrcat.c',
'<(css_parser_root)/third_party/utf/runestrchr.c',
'<(css_parser_root)/third_party/utf/runestrcmp.c',
'<(css_parser_root)/third_party/utf/runestrcpy.c',
'<(css_parser_root)/third_party/utf/runestrecpy.c',
'<(css_parser_root)/third_party/utf/runestrlen.c',
'<(css_parser_root)/third_party/utf/runestrncat.c',
'<(css_parser_root)/third_party/utf/runestrncmp.c',
'<(css_parser_root)/third_party/utf/runestrncpy.c',
'<(css_parser_root)/third_party/utf/runestrrchr.c',
'<(css_parser_root)/third_party/utf/runestrstr.c',
'<(css_parser_root)/third_party/utf/runetype.c',
# TODO(sligocki): What is the correct format for this?
# runetypebody-5.0.0.c should not be compiled by itself, only #included.
#'<(css_parser_root)/third_party/utf/runetypebody-5.0.0.c',
'<(css_parser_root)/third_party/utf/utf.h',
'<(css_parser_root)/third_party/utf/utfdef.h',
'<(css_parser_root)/third_party/utf/utfecpy.c',
'<(css_parser_root)/third_party/utf/utflen.c',
'<(css_parser_root)/third_party/utf/utfnlen.c',
'<(css_parser_root)/third_party/utf/utfrrune.c',
'<(css_parser_root)/third_party/utf/utfrune.c',
'<(css_parser_root)/third_party/utf/utfutf.c',
# Supporting interfaces.
'<(css_parser_root)/base/commandlineflags.h',
'<(css_parser_root)/base/googleinit.h',
'<(css_parser_root)/base/macros.h',
'<(css_parser_root)/base/paranoid.h',
'<(css_parser_root)/base/stringprintf.h',
'<(css_parser_root)/string_using.h',
'<(css_parser_root)/strings/ascii_ctype.cc',
'<(css_parser_root)/strings/ascii_ctype.h',
'<(css_parser_root)/strings/escaping.h',
'<(css_parser_root)/strings/join.h',
'<(css_parser_root)/strings/memutil.h',
'<(css_parser_root)/strings/stringpiece.h',
'<(css_parser_root)/strings/stringpiece_utils.cc',
'<(css_parser_root)/strings/stringpiece_utils.h',
'<(css_parser_root)/strings/stringprintf.h',
'<(css_parser_root)/strings/strutil.h',
#'<(css_parser_root)/testing/base/public/googletest.h',
#'<(css_parser_root)/testing/base/public/gunit.h',
'<(css_parser_root)/testing/production_stub/public/gunit_prod.h',
'<(css_parser_root)/util/gtl/dense_hash_map.h',
'<(css_parser_root)/util/gtl/map-util.h',
'<(css_parser_root)/util/gtl/singleton.h',
'<(css_parser_root)/util/gtl/stl_util-inl.h',
'<(css_parser_root)/util/hash/hash.h',
],
},
],
}
|
#!/usr/bin/env python
"""
This module provides classes for predicting new structures from existing ones.
"""
from __future__ import division
__author__ = "Will Richards, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Will Richards"
__email__ = "wrichard@mit.edu"
__date__ = "Aug 31, 2012"
from pymatgen.serializers.json_coders import MSONable
from pymatgen.structure_prediction.substitution_probability \
import SubstitutionProbability
from pymatgen.transformations.standard_transformations \
import SubstitutionTransformation
from pymatgen.alchemy.transmuters import StandardTransmuter
from pymatgen.alchemy.materials import TransformedStructure
from pymatgen.alchemy.filters import RemoveDuplicatesFilter
import itertools
import logging
from operator import mul
class Substitutor(MSONable):
"""
This object uses a data mined ionic substitution approach to propose
compounds likely to be stable. It relies on an algorithm presented in
Hautier, G., Fischer, C., Ehrlacher, V., Jain, A., and Ceder, G. (2011).
Data Mined Ionic Substitutions for the Discovery of New Compounds.
Inorganic Chemistry, 50(2), 656-663. doi:10.1021/ic102031h
"""
def __init__(self, threshold=1e-3, symprec=0.1, **kwargs):
"""
This substitutor uses the substitution probability class to
find good substitutions for a given chemistry or structure.
Args:
threshold:
probability threshold for predictions
symprec:
symmetry precision to determine if two structures
are duplicates
kwargs:
kwargs for the SubstitutionProbability object
lambda_table, alpha
"""
self._kwargs = kwargs
self._sp = SubstitutionProbability(**kwargs)
self._threshold = threshold
self._symprec = symprec
def get_allowed_species(self):
"""
returns the species in the domain of the probability function
any other specie will not work
"""
return self._sp.species
def pred_from_structures(self, target_species, structures_list,
remove_duplicates=True):
"""
performs a structure prediction targeting compounds containing the
target_species and based on a list of structure (those structures
can for instance come from a database like the ICSD). It will return
all the structures formed by ionic substitutions with a probability
higher than the threshold
Args:
target_species:
a list of species with oxidation states
e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)]
structures_list:
a list of dictionnary of the form {'structure':Structure object
,'id':some id where it comes from}
the id can for instance refer to an ICSD id
Returns:
a list of TransformedStructure objects.
"""
result = []
transmuter = StandardTransmuter([])
if len(list(set(target_species) & set(self.get_allowed_species()))) \
!= len(target_species):
return ValueError("the species in target_species are not allowed"
+ "for the probability model you are using")
for permut in itertools.permutations(target_species):
for s in structures_list:
#check if: species are in the domain,
#and the probability of subst. is above the threshold
els = s['structure'].composition.elements
if len(els) == len(permut) and \
len(list(set(els) & set(self.get_allowed_species()))) == \
len(els) and self._sp.cond_prob_list(permut, els) > \
self._threshold:
clean_subst = {els[i]: permut[i]
for i in xrange(0, len(els))
if els[i] != permut[i]}
if len(clean_subst) == 0:
continue
transf = SubstitutionTransformation(clean_subst)
if Substitutor._is_charge_balanced(
transf.apply_transformation(s['structure'])):
ts = TransformedStructure(
s['structure'], [transf], history=[s['id']],
other_parameters={
'type': 'structure_prediction',
'proba': self._sp.cond_prob_list(permut, els)}
)
result.append(ts)
transmuter.append_transformed_structures([ts])
if remove_duplicates:
transmuter.apply_filter(RemoveDuplicatesFilter(
symprec=self._symprec))
return transmuter.transformed_structures
@staticmethod
def _is_charge_balanced(struct):
"""
checks if the structure object is charge balanced
"""
if sum([s.specie. oxi_state for s in struct.sites]) == 0.0:
return True
else:
return False
def pred_from_list(self, species_list):
"""
There are an exceptionally large number of substitutions to
look at (260^n), where n is the number of species in the
list. We need a more efficient than brute force way of going
through these possibilities. The brute force method would be::
output = []
for p in itertools.product(self._sp.species_list
, repeat = len(species_list)):
if self._sp.conditional_probability_list(p, species_list)
> self._threshold:
output.append(dict(zip(species_list,p)))
return output
Instead of that we do a branch and bound.
Args:
species_list:
list of species in the starting structure
Returns:
list of dictionaries, each including a substitutions
dictionary, and a probability value
"""
#calculate the highest probabilities to help us stop the recursion
max_probabilities = []
for s2 in species_list:
max_p = 0
for s1 in self._sp.species:
max_p = max([self._sp.cond_prob(s1, s2), max_p])
max_probabilities.append(max_p)
output = []
def _recurse(output_prob, output_species):
best_case_prob = list(max_probabilities)
best_case_prob[:len(output_prob)] = output_prob
if reduce(mul, best_case_prob) > self._threshold:
if len(output_species) == len(species_list):
odict = {
'substitutions':
dict(zip(species_list, output_species)),
'probability': reduce(mul, best_case_prob)}
output.append(odict)
return
for sp in self._sp.species:
i = len(output_prob)
prob = self._sp.cond_prob(sp, species_list[i])
_recurse(output_prob + [prob], output_species + [sp])
_recurse([], [])
logging.info('{} substitutions found'.format(len(output)))
return output
def pred_from_comp(self, composition):
"""
Similar to pred_from_list except this method returns a list after
checking that compositions are charge balanced.
"""
output = []
predictions = self.pred_from_list(composition.elements)
for p in predictions:
subs = p['substitutions']
charge = 0
for i_el in composition.elements:
f_el = subs[i_el]
charge += f_el.oxi_state * composition[i_el]
if charge == 0:
output.append(p)
logging.info('{} charge balanced '
'compositions found'.format(len(output)))
return output
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"kwargs": self._kwargs, "threshold": self._threshold,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
t = d['threshold']
kwargs = d['kwargs']
return cls(threshold=t, **kwargs)
Changed return ValueError to raise ValueError in substitutor.
#!/usr/bin/env python
"""
This module provides classes for predicting new structures from existing ones.
"""
from __future__ import division
__author__ = "Will Richards, Geoffroy Hautier"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.2"
__maintainer__ = "Will Richards"
__email__ = "wrichard@mit.edu"
__date__ = "Aug 31, 2012"
from pymatgen.serializers.json_coders import MSONable
from pymatgen.structure_prediction.substitution_probability \
import SubstitutionProbability
from pymatgen.transformations.standard_transformations \
import SubstitutionTransformation
from pymatgen.alchemy.transmuters import StandardTransmuter
from pymatgen.alchemy.materials import TransformedStructure
from pymatgen.alchemy.filters import RemoveDuplicatesFilter
import itertools
import logging
from operator import mul
class Substitutor(MSONable):
"""
This object uses a data mined ionic substitution approach to propose
compounds likely to be stable. It relies on an algorithm presented in
Hautier, G., Fischer, C., Ehrlacher, V., Jain, A., and Ceder, G. (2011).
Data Mined Ionic Substitutions for the Discovery of New Compounds.
Inorganic Chemistry, 50(2), 656-663. doi:10.1021/ic102031h
"""
def __init__(self, threshold=1e-3, symprec=0.1, **kwargs):
"""
This substitutor uses the substitution probability class to
find good substitutions for a given chemistry or structure.
Args:
threshold:
probability threshold for predictions
symprec:
symmetry precision to determine if two structures
are duplicates
kwargs:
kwargs for the SubstitutionProbability object
lambda_table, alpha
"""
self._kwargs = kwargs
self._sp = SubstitutionProbability(**kwargs)
self._threshold = threshold
self._symprec = symprec
def get_allowed_species(self):
"""
returns the species in the domain of the probability function
any other specie will not work
"""
return self._sp.species
def pred_from_structures(self, target_species, structures_list,
remove_duplicates=True):
"""
performs a structure prediction targeting compounds containing the
target_species and based on a list of structure (those structures
can for instance come from a database like the ICSD). It will return
all the structures formed by ionic substitutions with a probability
higher than the threshold
Args:
target_species:
a list of species with oxidation states
e.g., [Specie('Li',1),Specie('Ni',2), Specie('O',-2)]
structures_list:
a list of dictionnary of the form {'structure':Structure object
,'id':some id where it comes from}
the id can for instance refer to an ICSD id
Returns:
a list of TransformedStructure objects.
"""
result = []
transmuter = StandardTransmuter([])
if len(list(set(target_species) & set(self.get_allowed_species()))) \
!= len(target_species):
raise ValueError("the species in target_species are not allowed"
+ "for the probability model you are using")
for permut in itertools.permutations(target_species):
for s in structures_list:
#check if: species are in the domain,
#and the probability of subst. is above the threshold
els = s['structure'].composition.elements
if len(els) == len(permut) and \
len(list(set(els) & set(self.get_allowed_species()))) == \
len(els) and self._sp.cond_prob_list(permut, els) > \
self._threshold:
clean_subst = {els[i]: permut[i]
for i in xrange(0, len(els))
if els[i] != permut[i]}
if len(clean_subst) == 0:
continue
transf = SubstitutionTransformation(clean_subst)
if Substitutor._is_charge_balanced(
transf.apply_transformation(s['structure'])):
ts = TransformedStructure(
s['structure'], [transf], history=[s['id']],
other_parameters={
'type': 'structure_prediction',
'proba': self._sp.cond_prob_list(permut, els)}
)
result.append(ts)
transmuter.append_transformed_structures([ts])
if remove_duplicates:
transmuter.apply_filter(RemoveDuplicatesFilter(
symprec=self._symprec))
return transmuter.transformed_structures
@staticmethod
def _is_charge_balanced(struct):
"""
checks if the structure object is charge balanced
"""
if sum([s.specie. oxi_state for s in struct.sites]) == 0.0:
return True
else:
return False
def pred_from_list(self, species_list):
"""
There are an exceptionally large number of substitutions to
look at (260^n), where n is the number of species in the
list. We need a more efficient than brute force way of going
through these possibilities. The brute force method would be::
output = []
for p in itertools.product(self._sp.species_list
, repeat = len(species_list)):
if self._sp.conditional_probability_list(p, species_list)
> self._threshold:
output.append(dict(zip(species_list,p)))
return output
Instead of that we do a branch and bound.
Args:
species_list:
list of species in the starting structure
Returns:
list of dictionaries, each including a substitutions
dictionary, and a probability value
"""
#calculate the highest probabilities to help us stop the recursion
max_probabilities = []
for s2 in species_list:
max_p = 0
for s1 in self._sp.species:
max_p = max([self._sp.cond_prob(s1, s2), max_p])
max_probabilities.append(max_p)
output = []
def _recurse(output_prob, output_species):
best_case_prob = list(max_probabilities)
best_case_prob[:len(output_prob)] = output_prob
if reduce(mul, best_case_prob) > self._threshold:
if len(output_species) == len(species_list):
odict = {
'substitutions':
dict(zip(species_list, output_species)),
'probability': reduce(mul, best_case_prob)}
output.append(odict)
return
for sp in self._sp.species:
i = len(output_prob)
prob = self._sp.cond_prob(sp, species_list[i])
_recurse(output_prob + [prob], output_species + [sp])
_recurse([], [])
logging.info('{} substitutions found'.format(len(output)))
return output
def pred_from_comp(self, composition):
"""
Similar to pred_from_list except this method returns a list after
checking that compositions are charge balanced.
"""
output = []
predictions = self.pred_from_list(composition.elements)
for p in predictions:
subs = p['substitutions']
charge = 0
for i_el in composition.elements:
f_el = subs[i_el]
charge += f_el.oxi_state * composition[i_el]
if charge == 0:
output.append(p)
logging.info('{} charge balanced '
'compositions found'.format(len(output)))
return output
@property
def to_dict(self):
return {"name": self.__class__.__name__, "version": __version__,
"kwargs": self._kwargs, "threshold": self._threshold,
"@module": self.__class__.__module__,
"@class": self.__class__.__name__}
@classmethod
def from_dict(cls, d):
t = d['threshold']
kwargs = d['kwargs']
return cls(threshold=t, **kwargs)
|
#!/usr/bin/python2
import subprocess
import os
import mimetypes
import argparse
def find(query, settings):
"""
Little fuzzy finder implementation that work with a bash command,
it also work different according to filetype.
"""
command = ["| grep %s " % (elem) for elem in query.split()]
command = " ".join(command)
user = os.path.expanduser('~')
try:
find_array = subprocess.check_output('find %s %s' % (user, command),
shell=True,
executable='/bin/sh').split('\n')
except Exception:
# When 'find' output nothing.
return "{No path found.| %s }" % settings.term
else:
res = ''
find_array.sort(key=len)
if len(find_array[0]) == 0:
find_array.pop(0)
for i in xrange(min(settings.number_of_output, len(find_array))):
clearedOut = find_array[i].strip().replace(' ', '\ ')
# Path with space don't work.
mime_type, encoding = mimetypes.guess_type(clearedOut)
if os.path.isdir(find_array[i]):
# 'foo bar' is considered as a folder in python
# but 'foo\ bar' is not.
dirFile = " " + "%N ".join(os.listdir(str(find_array[i])))
res += "{%s|%s --working-directory=%s |%%CFile in directory%%%%L%s}" % (str(find_array[i]), settings.term, clearedOut, dirFile)
elif "png" in mime_type:
res += "{%s|xdg-open '%s'|%%I%s%%}" % (
str(find_array[i]), str(find_array[i]), clearedOut)
elif "text" in mime_type:
preview_file = open(clearedOut)
res += "{%s|xdg-open %s|%%CPreview%%%%N%s}" % (str(find_array[i]), clearedOut, preview_file.read(100).replace("\n", "%N"))
preview_file.close()
else:
# Check for every file extension the user specified in the
# begining of this script file
res += "{%s|xdg-open %s|Launching it with %%B%s%%}" % (
str(find_array[i]), str(find_array[i]), encoding)
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("query")
parser.add_argument("-number_of_output", default=3, type=int)
parser.add_argument("-term", default="urvxt", type=str)
settings = parser.parse_args()
print(find(settings.query, settings))
find.py improvement
#!/usr/bin/python2
import subprocess
import os
import mimetypes
import argparse
def find(query, settings):
"""
Little fuzzy finder implementation that work with a bash command,
it also work different according to filetype.
"""
command = ["| grep %s " % (elem) for elem in query.split()]
command = " ".join(command)
user = os.path.expanduser('~')
try:
find_array = subprocess.check_output('find %s %s' % (user, command),
shell=True,
executable='/bin/sh').split('\n')
except Exception:
# When 'find' output nothing.
return "{No path found.| %s }" % settings.term
else:
res = ''
find_array.sort(key=len)
if len(find_array[0]) == 0:
find_array.pop(0)
for i in xrange(min(settings.number_of_output, len(find_array))):
clearedOut = find_array[i].strip().replace(' ', '\ ')
# Path with space don't work.
mime_type, encoding = mimetypes.guess_type(clearedOut)
if os.path.isdir(find_array[i]):
# 'foo bar' is considered as a folder in python
# but 'foo\ bar' is not.
dirFile = " " + "%N ".join(os.listdir(str(find_array[i])))
res += "{%s|%s --working-directory=%s |%%CFile in directory%%%%L%s}" % (str(find_array[i]), settings.term, clearedOut, dirFile)
elif mime_type and "image" in mime_type:
res += "{%s|xdg-open '%s'|%%CPreview%%%%L%%I%s%%}" % (
str(find_array[i]), str(find_array[i]), clearedOut)
elif mime_type and "text" in mime_type:
preview_file = open(clearedOut)
res += "{%s|xdg-open %s|%%CPreview%%%%L%s}" % (str(find_array[i]), clearedOut, preview_file.read(100).replace("\n", "%N"))
preview_file.close()
else:
# Check for every file extension the user specified in the
# begining of this script file
res += "{%s|xdg-open %s|Launching it with %%B%s%%}" % (
str(find_array[i]), str(find_array[i]), encoding)
return res
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("query")
parser.add_argument("-number_of_output", default=3, type=int)
parser.add_argument("-term", default="urvxt", type=str)
settings = parser.parse_args()
print(find(settings.query, settings))
|
#!/usr/bin/env python3
from reporter.uhl_reports.i2b2.patient_mapping_tests import (
PatientMappingDuplicatesReport,
PatientMappingMultiplesIdsReport,
)
from reporter.uhl_reports.i2b2.patient_summary_tests import (
PatientSummaryDuplicatesReport,
PatientSummaryMissingData,
PatientSummaryMissingParticipants,
)
from reporter.uhl_reports.i2b2.valid_enrolment_tests import (
ValidEnrolmentsStudyIdDuplicates,
ValidEnrolmentsContactMultipleRecruitments,
RecruitedWithoutFullConsent,
PatientSummaryMissingRecruited,
)
from reporter.emailing import (
RECIPIENT_LIMB_ADMIN as RECIPIENT_ADMIN,
RECIPIENT_IT_DWH,
)
I2B2_DB = "i2b2_app03_limb_Data"
class LimbPatientMappingDuplicatesReport(
PatientMappingDuplicatesReport):
def __init__(self):
super().__init__(I2B2_DB)
class LimbPatientMappingMultiplesIdsReport(
PatientMappingMultiplesIdsReport):
def __init__(self):
super().__init__(I2B2_DB)
class LimbPatientSummaryDuplicatesReport(
PatientSummaryDuplicatesReport):
def __init__(self):
super().__init__(I2B2_DB)
class LimbPatientSummaryMissingData(
PatientSummaryMissingData):
def __init__(self):
super().__init__(
I2B2_DB,
[
'CiviCrmId',
'CiviCrmCaseId',
'NhsNumber',
'UhlSystemNumber',
'StudyNumber',
'consent_date',
'Gender',
'DateOfBirth',
'Ethnicity',
]
)
class LimbPatientSummaryMissingParticipants(
PatientSummaryMissingParticipants):
def __init__(self):
super().__init__(I2B2_DB)
class LimbValidEnrolmentsStudyIdDuplicates(
ValidEnrolmentsStudyIdDuplicates):
def __init__(self):
super().__init__(
I2B2_DB,
[RECIPIENT_ADMIN]
)
class LimbValidEnrolmentsContactMultipleRecruitments(
ValidEnrolmentsContactMultipleRecruitments):
def __init__(self):
super().__init__(
I2B2_DB,
[RECIPIENT_ADMIN]
)
class LimbRecruitedWithoutFullConsent(
RecruitedWithoutFullConsent):
def __init__(self):
super().__init__(
I2B2_DB,
[RECIPIENT_ADMIN]
)
class LimbPatientSummaryMissingRecruited(
PatientSummaryMissingRecruited):
def __init__(self):
super().__init__(
I2B2_DB,
[RECIPIENT_IT_DWH],
)
Disable LimbRecruitedWithoutFullConsent
#!/usr/bin/env python3
from reporter.core import Schedule
from reporter.uhl_reports.i2b2.patient_mapping_tests import (
PatientMappingDuplicatesReport,
PatientMappingMultiplesIdsReport,
)
from reporter.uhl_reports.i2b2.patient_summary_tests import (
PatientSummaryDuplicatesReport,
PatientSummaryMissingData,
PatientSummaryMissingParticipants,
)
from reporter.uhl_reports.i2b2.valid_enrolment_tests import (
ValidEnrolmentsStudyIdDuplicates,
ValidEnrolmentsContactMultipleRecruitments,
RecruitedWithoutFullConsent,
PatientSummaryMissingRecruited,
)
from reporter.emailing import (
RECIPIENT_LIMB_ADMIN as RECIPIENT_ADMIN,
RECIPIENT_IT_DWH,
)
I2B2_DB = "i2b2_app03_limb_Data"
class LimbPatientMappingDuplicatesReport(
PatientMappingDuplicatesReport):
def __init__(self):
super().__init__(I2B2_DB)
class LimbPatientMappingMultiplesIdsReport(
PatientMappingMultiplesIdsReport):
def __init__(self):
super().__init__(I2B2_DB)
class LimbPatientSummaryDuplicatesReport(
PatientSummaryDuplicatesReport):
def __init__(self):
super().__init__(I2B2_DB)
class LimbPatientSummaryMissingData(
PatientSummaryMissingData):
def __init__(self):
super().__init__(
I2B2_DB,
[
'CiviCrmId',
'CiviCrmCaseId',
'NhsNumber',
'UhlSystemNumber',
'StudyNumber',
'consent_date',
'Gender',
'DateOfBirth',
'Ethnicity',
]
)
class LimbPatientSummaryMissingParticipants(
PatientSummaryMissingParticipants):
def __init__(self):
super().__init__(I2B2_DB)
class LimbValidEnrolmentsStudyIdDuplicates(
ValidEnrolmentsStudyIdDuplicates):
def __init__(self):
super().__init__(
I2B2_DB,
[RECIPIENT_ADMIN]
)
class LimbValidEnrolmentsContactMultipleRecruitments(
ValidEnrolmentsContactMultipleRecruitments):
def __init__(self):
super().__init__(
I2B2_DB,
[RECIPIENT_ADMIN]
)
class LimbRecruitedWithoutFullConsent(
RecruitedWithoutFullConsent):
def __init__(self):
super().__init__(
I2B2_DB,
[RECIPIENT_ADMIN],
schedule=Schedule.never,
)
class LimbPatientSummaryMissingRecruited(
PatientSummaryMissingRecruited):
def __init__(self):
super().__init__(
I2B2_DB,
[RECIPIENT_IT_DWH],
)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic arithmetic operators.
See the @{$python/math_ops} guide.
@@add
@@subtract
@@multiply
@@scalar_mul
@@div
@@divide
@@truediv
@@floordiv
@@realdiv
@@truncatediv
@@floor_div
@@truncatemod
@@floormod
@@mod
@@cross
@@add_n
@@abs
@@negative
@@sign
@@reciprocal
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@expm1
@@log
@@log1p
@@sinh
@@cosh
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@atan2
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
@@rint
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@norm
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@qr
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
@@tensordot
@@complex
@@conj
@@imag
@@real
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
@@bincount
@@cumsum
@@cumprod
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@unsorted_segment_max
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
@@argmin
@@argmax
@@setdiff1d
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
# pylint: disable=redefined-builtin
# TODO(aselle): deprecate arg_max
def argmax(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name)
argmax.__doc__ = (gen_math_ops.arg_max.__doc__.replace("dimensions",
"axes").replace(
"dimension", "axis"))
# TODO(aselle:deprecate arg_min)
def argmin(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name)
argmin.__doc__ = (gen_math_ops.arg_min.__doc__.replace("dimensions",
"axes").replace(
"dimension", "axis"))
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
def abs(x, name=None):
r"""Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`,
`int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
Note, for `complex64` or `complex128' input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops._complex_abs(
x.values, Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops._complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops._bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y
def multiply(x, y, name=None):
return gen_math_ops._mul(x, y, name)
multiply.__doc__ = gen_math_ops._mul.__doc__.replace("Mul", "`tf.multiply`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops._mul(x, y, name)
_mul.__doc__ = (gen_math_ops._mul.__doc__ +
("" if _mul.__doc__ is None else _mul.__doc__))
def subtract(x, y, name=None):
return gen_math_ops._sub(x, y, name)
subtract.__doc__ = gen_math_ops._sub.__doc__.replace("`Sub`", "`tf.subtract`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops._sub(x, y, name)
_sub.__doc__ = (gen_math_ops._sub.__doc__ +
("" if _sub.__doc__ is None else _sub.__doc__))
# pylint: disable=g-docstring-has-escape
def negative(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops._neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, dense_shape=x.dense_shape)
else:
return gen_math_ops._neg(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=g-docstring-has-escape
@deprecated("2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0` or `tf.is_nan(x)`; 1 if `x > 0`.
Zero is returned for NaN inputs.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(numpy)
Equivalent to numpy.sign except for the behavior for input values of NaN.
@end_compatibility
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, dense_shape=x.dense_shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, dense_shape=x.dense_shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, dense_shape=x.dense_shape)
else:
return gen_math_ops.erf(x, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
r"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
r"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
# pylint: enable=redefined-outer-name,redefined-builtin
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value,
ops.convert_to_tensor(
dtype.min, dtype=value.dtype,
name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value,
ops.convert_to_tensor(
dtype.max, dtype=value.dtype,
name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops._neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, sparse_tensor.SparseTensor):
try:
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
except TypeError:
# If the RHS is not a tensor, it might be a tensor aware object
# that can implement the operator with knowledge of itself
# and the tensor.
if hasattr(type(y), "__r%s__" % op_name):
return NotImplemented
else:
raise
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(sp_x.indices,
func(
sp_x.indices,
sp_x.values,
sp_x.dense_shape,
y,
name=name), sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv", [sp_indices, sp_values, sp_shape,
y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops._real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics. Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops._real_div(x, y, name=name)
else:
return gen_math_ops._floor_div(x, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
division operator semantics.
This function divides `x` and `y`, forcing Python 2.7 semantics. That is,
if one of `x` or `y` is a float, then the result will be a float.
Otherwise, the output will be an integer type. Flooring semantics are used
for integer division.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
# TODO(aselle): This should be removed
mod = gen_math_ops._floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops._floor_div(x, y, name=name)
realdiv = gen_math_ops._real_div
truncatediv = gen_math_ops._truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops._floor_div
truncatemod = gen_math_ops._truncate_mod
floormod = gen_math_ops._floor_mod
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops._mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python sematnics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops._sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops._floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max(
[arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.sum
@end_compatibility
"""
return gen_math_ops._sum(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def count_nonzero(input_tensor,
axis=None,
keep_dims=False,
dtype=dtypes.int64,
name=None,
reduction_indices=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
axis=axis,
keep_dims=keep_dims,
reduction_indices=reduction_indices),
dtype=dtype)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
@end_compatibility
"""
return gen_math_ops._mean(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_prod(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
return gen_math_ops._prod(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_min(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
return gen_math_ops._min(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_max(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
return gen_math_ops._max(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_all(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
return gen_math_ops._all(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_any(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
return gen_math_ops._any(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_logsumexp(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(
input_tensor,
axis=axis,
reduction_indices=reduction_indices,
keep_dims=True))
result = gen_math_ops.log(
reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
axis,
keep_dims=True,
reduction_indices=reduction_indices)) + my_max
if not keep_dims:
if isinstance(axis, int):
axis = [axis]
result = array_ops.squeeze(result, axis)
return result
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication arguments,
and any further outer dimensions match.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
# 3-D tensor `a`
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3]) => [[[ 1. 2. 3.]
[ 4. 5. 6.]],
[[ 7. 8. 9.]
[10. 11. 12.]]]
# 3-D tensor `b`
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2]) => [[[13. 14.]
[15. 16.]
[17. 18.]],
[[19. 20.]
[21. 22.]
[23. 24.]]]
c = tf.matmul(a, b) => [[[ 94 100]
[229 244]],
[[508 532]
[697 730]]]
# Since python >= 3.5 the @ operator is supported (see PEP 465).
# In TensorFlow, it simply calls the `tf.matmul()` function, so the
# following lines are equivalent:
d = a @ b @ [[10.], [11.]]
d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b
are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_shape = a.get_shape()
b_shape = b.get_shape()
if (not a_is_sparse and not b_is_sparse) and (
(a_shape.ndims is None or a_shape.ndims > 2) and
(b_shape.ndims is None or b_shape.ndims > 2)):
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops._batch_mat_mul(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
_OverrideBinaryOperatorHelper(matmul, "matmul")
sparse_matmul = gen_math_ops._sparse_mat_mul
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values,
cast(o.indices, dtypes.int64), o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
Aside from differentiability, `tf.accumulate_n` performs the same operation as
`tf.add_n`, but does not wait for all of its inputs to be ready before
beginning to sum. This can save memory if inputs are ready at different times,
since minimum temporary storage is proportional to the output size rather than
the inputs size.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(
shape=tensor_shape.vector(0), dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [
state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
@compatibility(numpy)
Equivalent to np.scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._neg(gen_nn_ops.softplus(-x), name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, `int64`, or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x` if
`x.dtype != qint32` otherwise the return type is `quint8`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops._tanh(x, name=name)
def bincount(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32):
"""Counts the number of occurrences of each value in an integer array.
If `minlength` and `maxlength` are not given, returns a vector with length
`tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead
of 1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
dtype: If `weights` is None, determines the type of the output bins.
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
if minlength is not None:
minlength = ops.convert_to_tensor(
minlength, name="minlength", dtype=dtypes.int32)
output_size = gen_math_ops.maximum(minlength, output_size)
if maxlength is not None:
maxlength = ops.convert_to_tensor(
maxlength, name="maxlength", dtype=dtypes.int32)
output_size = gen_math_ops.minimum(maxlength, output_size)
weights = (ops.convert_to_tensor(weights, name="weights")
if weights is not None else constant_op.constant([], dtype))
return gen_math_ops.bincount(arr, output_size, weights)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```python
tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [
common_shapes.broadcast_shape(op.inputs[0].get_shape(),
op.inputs[1].get_shape())
]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: Suppose that \\(a_ijk\\) and \\(b_lmn\\) represent two
tensors of order 3. Then, `contract(a, b, [0], [2])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes
of b in order.
If axes is a list or `Tensor` the first and second row contain the set of
unique integers specifying axes along which the contraction is computed,
for `a` and `b`, respectively. The number of axes for `a` and `b` must
be equal.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims_static = [shape_a[i] for i in free]
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = cast(axes >= 0, dtypes.int32) * axes + cast(
axes < 0, dtypes.int32) * (axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
perm = array_ops.concat([axes_dims, free_dims], 0)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 1:
raise ValueError("'axes' must be at least 1.")
if a_shape.ndims is not None:
return range(a_shape.ndims - axes, a_shape.ndims), range(axes)
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank, dtype=dtypes.int32), range(
axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s.",
len(a_axes), len(b_axes))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(b, b_axes,
True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
# FFT ops were moved to tf.spectral. tf.fft symbols were part of the TensorFlow
# 1.0 API so we leave these here for backwards compatibility.
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
Fixes python doc for tanh (Resolves #10376)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic arithmetic operators.
See the @{$python/math_ops} guide.
@@add
@@subtract
@@multiply
@@scalar_mul
@@div
@@divide
@@truediv
@@floordiv
@@realdiv
@@truncatediv
@@floor_div
@@truncatemod
@@floormod
@@mod
@@cross
@@add_n
@@abs
@@negative
@@sign
@@reciprocal
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@expm1
@@log
@@log1p
@@sinh
@@cosh
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@atan2
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
@@rint
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@norm
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@qr
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
@@tensordot
@@complex
@@conj
@@imag
@@real
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
@@bincount
@@cumsum
@@cumprod
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@unsorted_segment_max
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
@@argmin
@@argmax
@@setdiff1d
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
# pylint: disable=redefined-builtin
# TODO(aselle): deprecate arg_max
def argmax(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name)
argmax.__doc__ = (gen_math_ops.arg_max.__doc__.replace("dimensions",
"axes").replace(
"dimension", "axis"))
# TODO(aselle:deprecate arg_min)
def argmin(input, axis=None, name=None, dimension=None):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name)
argmin.__doc__ = (gen_math_ops.arg_min.__doc__.replace("dimensions",
"axes").replace(
"dimension", "axis"))
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
def abs(x, name=None):
r"""Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`,
`int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
Note, for `complex64` or `complex128' input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops._complex_abs(
x.values, Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops._complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops._bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y
def multiply(x, y, name=None):
return gen_math_ops._mul(x, y, name)
multiply.__doc__ = gen_math_ops._mul.__doc__.replace("Mul", "`tf.multiply`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops._mul(x, y, name)
_mul.__doc__ = (gen_math_ops._mul.__doc__ +
("" if _mul.__doc__ is None else _mul.__doc__))
def subtract(x, y, name=None):
return gen_math_ops._sub(x, y, name)
subtract.__doc__ = gen_math_ops._sub.__doc__.replace("`Sub`", "`tf.subtract`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops._sub(x, y, name)
_sub.__doc__ = (gen_math_ops._sub.__doc__ +
("" if _sub.__doc__ is None else _sub.__doc__))
# pylint: disable=g-docstring-has-escape
def negative(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops._neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, dense_shape=x.dense_shape)
else:
return gen_math_ops._neg(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=g-docstring-has-escape
@deprecated("2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0` or `tf.is_nan(x)`; 1 if `x > 0`.
Zero is returned for NaN inputs.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(numpy)
Equivalent to numpy.sign except for the behavior for input values of NaN.
@end_compatibility
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, dense_shape=x.dense_shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, dense_shape=x.dense_shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, dense_shape=x.dense_shape)
else:
return gen_math_ops.erf(x, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
r"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
r"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
# pylint: enable=redefined-outer-name,redefined-builtin
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value,
ops.convert_to_tensor(
dtype.min, dtype=value.dtype,
name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value,
ops.convert_to_tensor(
dtype.max, dtype=value.dtype,
name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops._neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, sparse_tensor.SparseTensor):
try:
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
except TypeError:
# If the RHS is not a tensor, it might be a tensor aware object
# that can implement the operator with knowledge of itself
# and the tensor.
if hasattr(type(y), "__r%s__" % op_name):
return NotImplemented
else:
raise
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(sp_x.indices,
func(
sp_x.indices,
sp_x.values,
sp_x.dense_shape,
y,
name=name), sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv", [sp_indices, sp_values, sp_shape,
y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops._real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics. Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops._real_div(x, y, name=name)
else:
return gen_math_ops._floor_div(x, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
division operator semantics.
This function divides `x` and `y`, forcing Python 2.7 semantics. That is,
if one of `x` or `y` is a float, then the result will be a float.
Otherwise, the output will be an integer type. Flooring semantics are used
for integer division.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
# TODO(aselle): This should be removed
mod = gen_math_ops._floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops._floor_div(x, y, name=name)
realdiv = gen_math_ops._real_div
truncatediv = gen_math_ops._truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops._floor_div
truncatemod = gen_math_ops._truncate_mod
floormod = gen_math_ops._floor_mod
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops._mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python sematnics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops._sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops._floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max(
[arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.sum
@end_compatibility
"""
return gen_math_ops._sum(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def count_nonzero(input_tensor,
axis=None,
keep_dims=False,
dtype=dtypes.int64,
name=None,
reduction_indices=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
axis=axis,
keep_dims=keep_dims,
reduction_indices=reduction_indices),
dtype=dtype)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
@end_compatibility
"""
return gen_math_ops._mean(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_prod(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
return gen_math_ops._prod(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_min(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
return gen_math_ops._min(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_max(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
return gen_math_ops._max(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_all(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
return gen_math_ops._all(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_any(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
return gen_math_ops._any(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_logsumexp(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(
input_tensor,
axis=axis,
reduction_indices=reduction_indices,
keep_dims=True))
result = gen_math_ops.log(
reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
axis,
keep_dims=True,
reduction_indices=reduction_indices)) + my_max
if not keep_dims:
if isinstance(axis, int):
axis = [axis]
result = array_ops.squeeze(result, axis)
return result
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication arguments,
and any further outer dimensions match.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
# 3-D tensor `a`
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3]) => [[[ 1. 2. 3.]
[ 4. 5. 6.]],
[[ 7. 8. 9.]
[10. 11. 12.]]]
# 3-D tensor `b`
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2]) => [[[13. 14.]
[15. 16.]
[17. 18.]],
[[19. 20.]
[21. 22.]
[23. 24.]]]
c = tf.matmul(a, b) => [[[ 94 100]
[229 244]],
[[508 532]
[697 730]]]
# Since python >= 3.5 the @ operator is supported (see PEP 465).
# In TensorFlow, it simply calls the `tf.matmul()` function, so the
# following lines are equivalent:
d = a @ b @ [[10.], [11.]]
d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b
are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_shape = a.get_shape()
b_shape = b.get_shape()
if (not a_is_sparse and not b_is_sparse) and (
(a_shape.ndims is None or a_shape.ndims > 2) and
(b_shape.ndims is None or b_shape.ndims > 2)):
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops._batch_mat_mul(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
_OverrideBinaryOperatorHelper(matmul, "matmul")
sparse_matmul = gen_math_ops._sparse_mat_mul
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values,
cast(o.indices, dtypes.int64), o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
Aside from differentiability, `tf.accumulate_n` performs the same operation as
`tf.add_n`, but does not wait for all of its inputs to be ready before
beginning to sum. This can save memory if inputs are ready at different times,
since minimum temporary storage is proportional to the output size rather than
the inputs size.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(
shape=tensor_shape.vector(0), dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [
state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
@compatibility(numpy)
Equivalent to np.scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._neg(gen_nn_ops.softplus(-x), name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops._tanh(x, name=name)
def bincount(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32):
"""Counts the number of occurrences of each value in an integer array.
If `minlength` and `maxlength` are not given, returns a vector with length
`tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead
of 1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
dtype: If `weights` is None, determines the type of the output bins.
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
if minlength is not None:
minlength = ops.convert_to_tensor(
minlength, name="minlength", dtype=dtypes.int32)
output_size = gen_math_ops.maximum(minlength, output_size)
if maxlength is not None:
maxlength = ops.convert_to_tensor(
maxlength, name="maxlength", dtype=dtypes.int32)
output_size = gen_math_ops.minimum(maxlength, output_size)
weights = (ops.convert_to_tensor(weights, name="weights")
if weights is not None else constant_op.constant([], dtype))
return gen_math_ops.bincount(arr, output_size, weights)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```python
tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0).
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [
common_shapes.broadcast_shape(op.inputs[0].get_shape(),
op.inputs[1].get_shape())
]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: Suppose that \\(a_ijk\\) and \\(b_lmn\\) represent two
tensors of order 3. Then, `contract(a, b, [0], [2])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes
of b in order.
If axes is a list or `Tensor` the first and second row contain the set of
unique integers specifying axes along which the contraction is computed,
for `a` and `b`, respectively. The number of axes for `a` and `b` must
be equal.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims_static = [shape_a[i] for i in free]
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = cast(axes >= 0, dtypes.int32) * axes + cast(
axes < 0, dtypes.int32) * (axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
perm = array_ops.concat([axes_dims, free_dims], 0)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 1:
raise ValueError("'axes' must be at least 1.")
if a_shape.ndims is not None:
return range(a_shape.ndims - axes, a_shape.ndims), range(axes)
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank, dtype=dtypes.int32), range(
axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s.",
len(a_axes), len(b_axes))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(b, b_axes,
True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
# FFT ops were moved to tf.spectral. tf.fft symbols were part of the TensorFlow
# 1.0 API so we leave these here for backwards compatibility.
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
|
from app_data import multiform_factory
from app_data.admin import AppDataModelAdmin
from django.contrib import admin
from django.forms import models as modelforms
from ella.core.models import Author, Source, Category, Listing, Related
class ListingForm(modelforms.ModelForm):
class Meta:
model = Listing
class ListingInlineAdmin(admin.TabularInline):
model = Listing
extra = 2
fieldsets = ((None, {'fields': ('category', 'publish_from', 'publish_to', 'commercial',)}),)
class RelatedInlineAdmin(admin.TabularInline):
model = Related
extra = 3
# raw_id_fields = ('publishable_id',)
CategoryMultiForm = multiform_factory(modelforms.modelform_factory(Category))
CategoryMultiForm.add_form('ella', {'fields': ('paginate_by', 'propagate_listings')})
class CategoryAdmin(AppDataModelAdmin):
list_filter = ('site',)
list_display = ('draw_title', 'tree_path', '__unicode__')
search_fields = ('title', 'slug',)
#ordering = ('site', 'tree_path',)
prepopulated_fields = {'slug': ('title',)}
declared_fieldsets = ((None, {'fields': ('title', 'slug',
('description', 'content'),
'template', ('site', 'tree_parent'),
'ella.paginate_by',
'ella.propagate_listings')}),)
multiform = CategoryMultiForm
class AuthorAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name',)
raw_id_fields = ('photo',)
class SourceAdmin(admin.ModelAdmin):
list_display = ('name', 'url',)
search_fields = ('name',)
class PublishableAdmin(admin.ModelAdmin):
""" Default admin options for all publishables """
list_display = ('title', 'category', 'publish_from')
list_filter = ('category', 'authors',)
search_fields = ('title', 'description', 'slug', 'authors__name', 'authors__slug',) # FIXME: 'tags__tag__name',)
raw_id_fields = ('photo',)
prepopulated_fields = {'slug': ('title',)}
rich_text_fields = {None: ('description',)}
suggest_fields = {
'category': ('tree_path', 'title', 'slug',),
'authors': ('name', 'slug', 'email',),
'source': ('name', 'url',),
}
class ListingAdmin(admin.ModelAdmin):
date_hierarchy = 'publish_from'
list_display = ('__unicode__', 'publish_from', 'publish_to',)
list_filter = ('category',)
search_fields = ('publishable__title', 'publishable__slug',
'publishable__description',)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Source, SourceAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Listing, ListingAdmin)
new AppData
from app_data import multiform_factory
from app_data.admin import AppDataModelAdmin
from django.contrib import admin
from django.forms import models as modelforms
from ella.core.models import Author, Source, Category, Listing, Related
class ListingForm(modelforms.ModelForm):
class Meta:
model = Listing
class ListingInlineAdmin(admin.TabularInline):
model = Listing
extra = 2
fieldsets = ((None, {'fields': ('category', 'publish_from', 'publish_to', 'commercial',)}),)
class RelatedInlineAdmin(admin.TabularInline):
model = Related
extra = 3
# raw_id_fields = ('publishable_id',)
CategoryMultiForm = multiform_factory(Category)
CategoryMultiForm.add_form('ella', {'fields': ('paginate_by', 'propagate_listings')})
class CategoryAdmin(AppDataModelAdmin):
list_filter = ('site',)
list_display = ('draw_title', 'tree_path', '__unicode__')
search_fields = ('title', 'slug',)
#ordering = ('site', 'tree_path',)
prepopulated_fields = {'slug': ('title',)}
declared_fieldsets = ((None, {'fields': ('title', 'slug',
('description', 'content'),
'template', ('site', 'tree_parent'),
'ella.paginate_by',
'ella.propagate_listings')}),)
multiform = CategoryMultiForm
class AuthorAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name',)
raw_id_fields = ('photo',)
class SourceAdmin(admin.ModelAdmin):
list_display = ('name', 'url',)
search_fields = ('name',)
class PublishableAdmin(admin.ModelAdmin):
""" Default admin options for all publishables """
list_display = ('title', 'category', 'publish_from')
list_filter = ('category', 'authors',)
search_fields = ('title', 'description', 'slug', 'authors__name', 'authors__slug',) # FIXME: 'tags__tag__name',)
raw_id_fields = ('photo',)
prepopulated_fields = {'slug': ('title',)}
rich_text_fields = {None: ('description',)}
suggest_fields = {
'category': ('tree_path', 'title', 'slug',),
'authors': ('name', 'slug', 'email',),
'source': ('name', 'url',),
}
class ListingAdmin(admin.ModelAdmin):
date_hierarchy = 'publish_from'
list_display = ('__unicode__', 'publish_from', 'publish_to',)
list_filter = ('category',)
search_fields = ('publishable__title', 'publishable__slug',
'publishable__description',)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Source, SourceAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Listing, ListingAdmin)
|
#!/usr/bin/env python
##################################################
#
# howdoi - a code search tool.
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
##################################################
import argparse
import re
import requests
import urllib
from pyquery import PyQuery as pq
GOOGLE_SEARCH_URL = "https://www.google.com/search?q=site:stackoverflow.com%20{0}"
DUCK_SEARCH_URL = "http://duckduckgo.com/html?q=site%3Astackoverflow.com%20{0}"
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17"
def get_result(url):
response = requests.get(url, headers={'User-agent': USER_AGENT})
return response.content.decode('utf-8')
def is_question(link):
return re.search('questions/\d+/', link)
def get_google_links(query):
url = GOOGLE_SEARCH_URL.format(urllib.quote(query))
result = get_result(url)
html = pq(result)
return [a.attrib['href'] for a in html('.l')]
def get_duck_links(query):
url = DUCK_SEARCH_URL.format(urllib.quote(query))
result = get_result(url)
html = pq(result)
links = [l.find('a').attrib['href'] for l in html('.links_main')]
def get_link_at_pos(links, pos):
pos = int(pos) - 1
for link in links:
if is_question(link):
if pos == 0:
break
else:
pos = pos - 1
continue
return link
def get_instructions(args):
links = get_google_links(args['query'])
if not links:
return ''
link = get_link_at_pos(links, args['pos'])
if args.get('link'):
return link
link = link + '?answertab=votes'
page = get_result(link)
html = pq(page)
first_answer = html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
if args['all'] or not instructions:
text = first_answer.find('.post-text').eq(0).text()
else:
text = instructions.eq(0).text()
if not text:
return ''
return text
def howdoi(args):
args['query'] = ' '.join(args['query']).replace('?', '')
instructions = get_instructions(args) or 'Sorry, couldn\'t find any help with that topic'
print instructions
def command_line_runner():
parser = argparse.ArgumentParser(description='code search tool')
parser.add_argument('query', metavar='QUERY', type=str, nargs=argparse.REMAINDER,
help='the question to answer')
parser.add_argument('-p','--pos', help='select answer in specified position (default: 1)', default=1)
parser.add_argument('-a','--all', help='display the full text of the answer',
action='store_true')
parser.add_argument('-l','--link', help='display only the answer link',
action='store_true')
args = vars(parser.parse_args())
howdoi(args)
if __name__ == '__main__':
command_line_runner()
Use r.text instead of r.content for automatic decoding.
#!/usr/bin/env python
##################################################
#
# howdoi - a code search tool.
# written by Benjamin Gleitzman (gleitz@mit.edu)
# inspired by Rich Jones (rich@anomos.info)
#
##################################################
import argparse
import re
import requests
import urllib
from pyquery import PyQuery as pq
GOOGLE_SEARCH_URL = "https://www.google.com/search?q=site:stackoverflow.com%20{0}"
DUCK_SEARCH_URL = "http://duckduckgo.com/html?q=site%3Astackoverflow.com%20{0}"
USER_AGENT = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_2) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1309.0 Safari/537.17"
def get_result(url):
return requests.get(url, headers={'User-agent': USER_AGENT}).text
def is_question(link):
return re.search('questions/\d+/', link)
def get_google_links(query):
url = GOOGLE_SEARCH_URL.format(urllib.quote(query))
result = get_result(url)
html = pq(result)
return [a.attrib['href'] for a in html('.l')]
def get_duck_links(query):
url = DUCK_SEARCH_URL.format(urllib.quote(query))
result = get_result(url)
html = pq(result)
links = [l.find('a').attrib['href'] for l in html('.links_main')]
def get_link_at_pos(links, pos):
pos = int(pos) - 1
for link in links:
if is_question(link):
if pos == 0:
break
else:
pos = pos - 1
continue
return link
def get_instructions(args):
links = get_google_links(args['query'])
if not links:
return ''
link = get_link_at_pos(links, args['pos'])
if args.get('link'):
return link
link = link + '?answertab=votes'
page = get_result(link)
html = pq(page)
first_answer = html('.answer').eq(0)
instructions = first_answer.find('pre') or first_answer.find('code')
if args['all'] or not instructions:
text = first_answer.find('.post-text').eq(0).text()
else:
text = instructions.eq(0).text()
if not text:
return ''
return text
def howdoi(args):
args['query'] = ' '.join(args['query']).replace('?', '')
instructions = get_instructions(args) or 'Sorry, couldn\'t find any help with that topic'
print instructions
def command_line_runner():
parser = argparse.ArgumentParser(description='code search tool')
parser.add_argument('query', metavar='QUERY', type=str, nargs=argparse.REMAINDER,
help='the question to answer')
parser.add_argument('-p','--pos', help='select answer in specified position (default: 1)', default=1)
parser.add_argument('-a','--all', help='display the full text of the answer',
action='store_true')
parser.add_argument('-l','--link', help='display only the answer link',
action='store_true')
args = vars(parser.parse_args())
howdoi(args)
if __name__ == '__main__':
command_line_runner()
|
""".. Ignore pydocstyle D400.
.. autoclass:: resolwe.flow.executors.docker.run.FlowExecutor
:members:
"""
# pylint: disable=logging-format-interpolation
import json
import logging
import os
import platform
import time
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
import docker
from .. import constants
from ..global_settings import PROCESS_META, SETTINGS, STORAGE_LOCATION
from ..local.run import FlowExecutor as LocalFlowExecutor
from ..protocol import ExecutorFiles
from .seccomp import SECCOMP_POLICY
# Limits of containers' access to memory. We set the limit to ensure
# processes are stable and do not get killed by OOM signal.
DOCKER_MEMORY_HARD_LIMIT_BUFFER = 100
DOCKER_MEMORY_SWAP_RATIO = 2
DOCKER_MEMORY_SWAPPINESS = 1
logger = logging.getLogger(__name__)
class FlowExecutor(LocalFlowExecutor):
"""Docker executor."""
name = "docker"
def __init__(self, *args, **kwargs):
"""Initialize attributes."""
super().__init__(*args, **kwargs)
container_name_prefix = SETTINGS.get("FLOW_EXECUTOR", {}).get(
"CONTAINER_NAME_PREFIX", "resolwe"
)
self.container_name = self._generate_container_name(container_name_prefix)
self.tools_volumes = []
self.command = SETTINGS.get("FLOW_DOCKER_COMMAND", "docker")
self.runtime_dir = Path(SETTINGS["FLOW_EXECUTOR"].get("RUNTIME_DIR", ""))
# Setup Docker volumes.
def _new_volume(
self,
kind: str,
base_dir_name: Optional[str],
mount_point: Union[str, Path],
path: Union[str, Path] = "",
read_only: bool = True,
) -> Tuple[str, Dict[str, str]]:
"""Generate a new volume entry.
:param kind: Kind of volume, which is used for getting extra options from
settings (the ``FLOW_DOCKER_VOLUME_EXTRA_OPTIONS`` setting)
:param base_dir_name: Name of base directory setting for volume source path
:param volume: Destination volume mount point
:param path: Optional additional path atoms appended to source path
:param read_only: True to make the volume read-only
"""
options = set(
SETTINGS.get("FLOW_DOCKER_VOLUME_EXTRA_OPTIONS", {})
.get(kind, "")
.split(",")
).difference(["", "ro", "rw"])
options.add("ro" if read_only else "rw")
base_path = Path(SETTINGS["FLOW_EXECUTOR"].get(base_dir_name, ""))
return (
os.fspath(base_path / path),
{
"bind": os.fspath(mount_point),
"mode": ",".join(options),
},
)
def _communicator_volumes(self) -> Dict:
"""Prepare volumes for communicator container."""
storage_url = Path(STORAGE_LOCATION["url"])
communicator_volumes = [
self._new_volume(
"data",
"DATA_DIR",
constants.DATA_VOLUME,
storage_url,
read_only=False,
),
self._new_volume(
"data_all", "DATA_DIR", constants.DATA_ALL_VOLUME, read_only=False
),
self._new_volume(
"secrets",
"RUNTIME_DIR",
constants.SECRETS_VOLUME,
storage_url / ExecutorFiles.SECRETS_DIR,
),
self._new_volume(
"settings",
"RUNTIME_DIR",
"/settings",
storage_url / ExecutorFiles.SETTINGS_SUBDIR,
),
self._new_volume(
"sockets",
"RUNTIME_DIR",
constants.SOCKETS_VOLUME,
storage_url / ExecutorFiles.SOCKETS_SUBDIR,
read_only=False,
),
]
return dict(communicator_volumes)
def _processing_volumes(self) -> Dict:
"""Prepare volumes for processing container."""
storage_url = Path(STORAGE_LOCATION["url"])
# Create local data dir.
base_path = Path(SETTINGS["FLOW_EXECUTOR"].get("DATA_DIR", ""))
local_data = base_path / f"{storage_url}_work"
local_data.mkdir()
processing_volumes = [
self._new_volume(
"data",
"DATA_DIR",
constants.DATA_VOLUME,
storage_url,
read_only=False,
),
self._new_volume(
"data_local",
"DATA_DIR",
constants.DATA_LOCAL_VOLUME,
f"{storage_url}_work",
read_only=False,
),
self._new_volume("data_all", "DATA_DIR", constants.DATA_ALL_VOLUME),
self._new_volume(
"upload", "UPLOAD_DIR", constants.UPLOAD_VOLUME, read_only=False
),
self._new_volume(
"secrets",
"RUNTIME_DIR",
constants.SECRETS_VOLUME,
storage_url / ExecutorFiles.SECRETS_DIR,
),
self._new_volume(
"sockets",
"RUNTIME_DIR",
constants.SOCKETS_VOLUME,
storage_url / ExecutorFiles.SOCKETS_SUBDIR,
read_only=False,
),
self._new_volume(
"socket_utils",
"RUNTIME_DIR",
"/socket_utils.py",
storage_url / "executors" / ExecutorFiles.SOCKET_UTILS,
),
self._new_volume(
"socket_utils",
"RUNTIME_DIR",
"/start.py",
storage_url / "executors" / ExecutorFiles.STARTUP_PROCESSING_SCRIPT,
),
self._new_volume(
"constants",
"RUNTIME_DIR",
"/constants.py",
storage_url / "executors" / ExecutorFiles.CONSTANTS,
),
]
# Generate dummy passwd and create mappings for it. This is required because some tools
# inside the container may try to lookup the given UID/GID and will crash if they don't
# exist. So we create minimal user/group files.
passwd_path = self.runtime_dir / storage_url / "passwd"
group_path = self.runtime_dir / storage_url / "group"
with passwd_path.open("wt") as passwd_file:
passwd_file.write(
"root:x:0:0:root:/root:/bin/bash\n"
+ f"user:x:{os.getuid()}:{os.getgid()}:user:{os.fspath(constants.DATA_LOCAL_VOLUME)}:/bin/bash\n"
)
with group_path.open("wt") as group_file:
group_file.write("root:x:0:\n" + f"user:x:{os.getgid()}:user\n")
processing_volumes += [
self._new_volume("users", None, "/etc/passwd", passwd_path),
self._new_volume("users", None, "/etc/group", group_path),
]
# Create volumes for tools.
processing_volumes += [
self._new_volume(
"tools",
None,
Path("/usr/local/bin/resolwe") / str(index),
Path(tool),
)
for index, tool in enumerate(self.get_tools_paths())
]
# Create volumes for runtime (all read-only).
processing_volumes += [
self._new_volume(
"runtime",
"RUNTIME_DIR",
dst,
storage_url / src,
)
for src, dst in SETTINGS.get("RUNTIME_VOLUME_MAPS", {}).items()
]
# Add any extra volumes verbatim.
processing_volumes += SETTINGS.get("FLOW_DOCKER_EXTRA_VOLUMES", [])
return dict(processing_volumes)
def _data_dir_clean(self, storage_url: Path) -> bool:
"""Check if data dir does not contain old log file."""
log = Path(SETTINGS["FLOW_EXECUTOR"]["DATA_DIR"]) / storage_url / "stdout.txt"
return not log.is_file()
async def start(self):
"""Start process execution."""
# Old log file is present, do not run the process again.
storage_url = Path(STORAGE_LOCATION["url"])
if not self._data_dir_clean(storage_url):
logger.error("Stdout or jsonout file already exists, aborting.")
return
memory = (
self.process["resource_limits"]["memory"] + DOCKER_MEMORY_HARD_LIMIT_BUFFER
)
memory_swap = int(memory * DOCKER_MEMORY_SWAP_RATIO)
network = "bridge"
if "network" in self.resources:
# Configure Docker network mode for the container (if specified).
# By default, current Docker versions use the 'bridge' mode which
# creates a network stack on the default Docker bridge.
network = SETTINGS.get("FLOW_EXECUTOR", {}).get("NETWORK", "")
security_options = []
if not SETTINGS.get("FLOW_DOCKER_DISABLE_SECCOMP", False):
security_options.append(f"seccomp={json.dumps(SECCOMP_POLICY)}")
processing_image = self.requirements.get(
"image", constants.DEFAULT_CONTAINER_IMAGE
)
communicator_image = SETTINGS.get(
"DOCKER_COMMUNICATOR_IMAGE", "resolwe/com:python-3.9"
)
ulimits = []
if (
self.process["scheduling_class"]
== PROCESS_META["SCHEDULING_CLASS_INTERACTIVE"]
):
# TODO: This is not very good as each child gets the same limit.
# Note: Ulimit does not work as expected on multithreaded processes
# Limit is increased by factor 1.2 for processes with 2-8 threads.
# TODO: This should be changed for processes with over 8 threads.
cpu_time_interactive = SETTINGS.get(
"FLOW_PROCESS_RESOURCE_DEFAULTS", {}
).get("cpu_time_interactive", 30)
cpu_limit = int(cpu_time_interactive * 1.2)
ulimits.append(
docker.types.Ulimit(name="cpu", soft=cpu_limit, hard=cpu_limit)
)
# Make sure that tmp dir exists.
os.makedirs(constants.TMPDIR, mode=0o755, exist_ok=True)
# Make sure that sockets dir exists.
os.makedirs(
self.runtime_dir / storage_url / ExecutorFiles.SOCKETS_SUBDIR, exist_ok=True
)
logger.debug("Checking existence of docker image: %s.", processing_image)
listener_settings = SETTINGS.get("FLOW_EXECUTOR", {}).get(
"LISTENER_CONNECTION", {}
)
environment = {
"CONTAINER_TIMEOUT": constants.CONTAINER_TIMEOUT,
"SOCKETS_VOLUME": constants.SOCKETS_VOLUME,
"COMMUNICATION_PROCESSING_SOCKET": constants.COMMUNICATION_PROCESSING_SOCKET,
"SCRIPT_SOCKET": constants.SCRIPT_SOCKET,
"LISTENER_IP": listener_settings.get("hosts", {}).get(
"docker", "127.0.0.1"
),
"LISTENER_PORT": listener_settings.get("port", 53893),
"LISTENER_PROTOCOL": listener_settings.get("protocol", "tcp"),
"DATA_ID": self.data_id,
"LOCATION_SUBPATH": os.fspath(storage_url),
"DATA_LOCAL_VOLUME": os.fspath(constants.DATA_LOCAL_VOLUME),
"DATA_ALL_VOLUME": os.fspath(constants.DATA_ALL_VOLUME),
"DATA_VOLUME": os.fspath(constants.DATA_VOLUME),
"UPLOAD_VOLUME": os.fspath(constants.UPLOAD_VOLUME),
"SECRETS_DIR": os.fspath(constants.SECRETS_VOLUME),
"RUNNING_IN_CONTAINER": 1,
"RUNNING_IN_DOCKER": 1,
"FLOW_MANAGER_KEEP_DATA": SETTINGS.get("FLOW_MANAGER_KEEP_DATA", False),
}
# Docker on MacOSX usus different settings
if platform.system() == "Darwin":
environment["LISTENER_IP"] = "host.docker.internal"
communication_arguments = {
"auto_remove": False,
"volumes": self._communicator_volumes(),
"command": ["/usr/local/bin/python", "/startup.py"],
"image": communicator_image,
"name": f"{self.container_name}-communicator",
"detach": True,
"cpu_quota": 100000, # TODO: how much?
"mem_limit": f"4000m", # TODO: how much?
"mem_reservation": f"200m",
"network_mode": network,
"cap_drop": ["all"],
"security_opt": security_options,
"user": f"{os.getuid()}:{os.getgid()}",
"environment": environment,
}
processing_arguments = {
"auto_remove": False,
"volumes": self._processing_volumes(),
"command": ["python3", "/start.py"],
"image": processing_image,
"network_mode": f"container:{self.container_name}-communicator",
"working_dir": os.fspath(constants.DATA_LOCAL_VOLUME),
"detach": True,
"cpu_quota": self.process["resource_limits"]["cores"] * (10 ** 6),
"mem_limit": f"{memory}m",
"mem_reservation": f"{self.process['resource_limits']['memory']}m",
"mem_swappiness": DOCKER_MEMORY_SWAPPINESS,
"memswap_limit": f"{memory_swap}m",
"name": self.container_name,
"cap_drop": ["all"],
"security_opt": security_options,
"user": f"{os.getuid()}:{os.getgid()}",
"ulimits": ulimits,
"environment": environment,
}
logger.info("Starting processing: %s", processing_arguments)
logger.info("Starting com: %s", processing_arguments)
client = docker.from_env()
try:
try:
client.images.get(processing_image)
except docker.errors.ImageNotFound:
client.images.pull(processing_image)
except docker.errors.APIError:
logger.exception("Docker API error")
raise RuntimeError("Docker API error")
start_time = time.time()
# Create docker client from environment.
# Make sure the enviroment is set up correctly.
response = client.containers.run(**communication_arguments)
logger.debug("Com response: %s.", response)
response = client.containers.run(**processing_arguments)
logger.debug("Proccessing response: %s.", response)
end_time = time.time()
logger.info(
"It took {:.2f}s for Docker containers to start".format(
end_time - start_time
)
)
Do not create TMPDIR: it is created in processing container
""".. Ignore pydocstyle D400.
.. autoclass:: resolwe.flow.executors.docker.run.FlowExecutor
:members:
"""
# pylint: disable=logging-format-interpolation
import json
import logging
import os
import platform
import time
from pathlib import Path
from typing import Dict, Optional, Tuple, Union
import docker
from .. import constants
from ..global_settings import PROCESS_META, SETTINGS, STORAGE_LOCATION
from ..local.run import FlowExecutor as LocalFlowExecutor
from ..protocol import ExecutorFiles
from .seccomp import SECCOMP_POLICY
# Limits of containers' access to memory. We set the limit to ensure
# processes are stable and do not get killed by OOM signal.
DOCKER_MEMORY_HARD_LIMIT_BUFFER = 100
DOCKER_MEMORY_SWAP_RATIO = 2
DOCKER_MEMORY_SWAPPINESS = 1
logger = logging.getLogger(__name__)
class FlowExecutor(LocalFlowExecutor):
"""Docker executor."""
name = "docker"
def __init__(self, *args, **kwargs):
"""Initialize attributes."""
super().__init__(*args, **kwargs)
container_name_prefix = SETTINGS.get("FLOW_EXECUTOR", {}).get(
"CONTAINER_NAME_PREFIX", "resolwe"
)
self.container_name = self._generate_container_name(container_name_prefix)
self.tools_volumes = []
self.command = SETTINGS.get("FLOW_DOCKER_COMMAND", "docker")
self.runtime_dir = Path(SETTINGS["FLOW_EXECUTOR"].get("RUNTIME_DIR", ""))
# Setup Docker volumes.
def _new_volume(
self,
kind: str,
base_dir_name: Optional[str],
mount_point: Union[str, Path],
path: Union[str, Path] = "",
read_only: bool = True,
) -> Tuple[str, Dict[str, str]]:
"""Generate a new volume entry.
:param kind: Kind of volume, which is used for getting extra options from
settings (the ``FLOW_DOCKER_VOLUME_EXTRA_OPTIONS`` setting)
:param base_dir_name: Name of base directory setting for volume source path
:param volume: Destination volume mount point
:param path: Optional additional path atoms appended to source path
:param read_only: True to make the volume read-only
"""
options = set(
SETTINGS.get("FLOW_DOCKER_VOLUME_EXTRA_OPTIONS", {})
.get(kind, "")
.split(",")
).difference(["", "ro", "rw"])
options.add("ro" if read_only else "rw")
base_path = Path(SETTINGS["FLOW_EXECUTOR"].get(base_dir_name, ""))
return (
os.fspath(base_path / path),
{
"bind": os.fspath(mount_point),
"mode": ",".join(options),
},
)
def _communicator_volumes(self) -> Dict:
"""Prepare volumes for communicator container."""
storage_url = Path(STORAGE_LOCATION["url"])
communicator_volumes = [
self._new_volume(
"data",
"DATA_DIR",
constants.DATA_VOLUME,
storage_url,
read_only=False,
),
self._new_volume(
"data_all", "DATA_DIR", constants.DATA_ALL_VOLUME, read_only=False
),
self._new_volume(
"secrets",
"RUNTIME_DIR",
constants.SECRETS_VOLUME,
storage_url / ExecutorFiles.SECRETS_DIR,
),
self._new_volume(
"settings",
"RUNTIME_DIR",
"/settings",
storage_url / ExecutorFiles.SETTINGS_SUBDIR,
),
self._new_volume(
"sockets",
"RUNTIME_DIR",
constants.SOCKETS_VOLUME,
storage_url / ExecutorFiles.SOCKETS_SUBDIR,
read_only=False,
),
]
return dict(communicator_volumes)
def _processing_volumes(self) -> Dict:
"""Prepare volumes for processing container."""
storage_url = Path(STORAGE_LOCATION["url"])
# Create local data dir.
base_path = Path(SETTINGS["FLOW_EXECUTOR"].get("DATA_DIR", ""))
local_data = base_path / f"{storage_url}_work"
local_data.mkdir()
processing_volumes = [
self._new_volume(
"data",
"DATA_DIR",
constants.DATA_VOLUME,
storage_url,
read_only=False,
),
self._new_volume(
"data_local",
"DATA_DIR",
constants.DATA_LOCAL_VOLUME,
f"{storage_url}_work",
read_only=False,
),
self._new_volume("data_all", "DATA_DIR", constants.DATA_ALL_VOLUME),
self._new_volume(
"upload", "UPLOAD_DIR", constants.UPLOAD_VOLUME, read_only=False
),
self._new_volume(
"secrets",
"RUNTIME_DIR",
constants.SECRETS_VOLUME,
storage_url / ExecutorFiles.SECRETS_DIR,
),
self._new_volume(
"sockets",
"RUNTIME_DIR",
constants.SOCKETS_VOLUME,
storage_url / ExecutorFiles.SOCKETS_SUBDIR,
read_only=False,
),
self._new_volume(
"socket_utils",
"RUNTIME_DIR",
"/socket_utils.py",
storage_url / "executors" / ExecutorFiles.SOCKET_UTILS,
),
self._new_volume(
"socket_utils",
"RUNTIME_DIR",
"/start.py",
storage_url / "executors" / ExecutorFiles.STARTUP_PROCESSING_SCRIPT,
),
self._new_volume(
"constants",
"RUNTIME_DIR",
"/constants.py",
storage_url / "executors" / ExecutorFiles.CONSTANTS,
),
]
# Generate dummy passwd and create mappings for it. This is required because some tools
# inside the container may try to lookup the given UID/GID and will crash if they don't
# exist. So we create minimal user/group files.
passwd_path = self.runtime_dir / storage_url / "passwd"
group_path = self.runtime_dir / storage_url / "group"
with passwd_path.open("wt") as passwd_file:
passwd_file.write(
"root:x:0:0:root:/root:/bin/bash\n"
+ f"user:x:{os.getuid()}:{os.getgid()}:user:{os.fspath(constants.DATA_LOCAL_VOLUME)}:/bin/bash\n"
)
with group_path.open("wt") as group_file:
group_file.write("root:x:0:\n" + f"user:x:{os.getgid()}:user\n")
processing_volumes += [
self._new_volume("users", None, "/etc/passwd", passwd_path),
self._new_volume("users", None, "/etc/group", group_path),
]
# Create volumes for tools.
processing_volumes += [
self._new_volume(
"tools",
None,
Path("/usr/local/bin/resolwe") / str(index),
Path(tool),
)
for index, tool in enumerate(self.get_tools_paths())
]
# Create volumes for runtime (all read-only).
processing_volumes += [
self._new_volume(
"runtime",
"RUNTIME_DIR",
dst,
storage_url / src,
)
for src, dst in SETTINGS.get("RUNTIME_VOLUME_MAPS", {}).items()
]
# Add any extra volumes verbatim.
processing_volumes += SETTINGS.get("FLOW_DOCKER_EXTRA_VOLUMES", [])
return dict(processing_volumes)
def _data_dir_clean(self, storage_url: Path) -> bool:
"""Check if data dir does not contain old log file."""
log = Path(SETTINGS["FLOW_EXECUTOR"]["DATA_DIR"]) / storage_url / "stdout.txt"
return not log.is_file()
async def start(self):
"""Start process execution."""
# Old log file is present, do not run the process again.
storage_url = Path(STORAGE_LOCATION["url"])
if not self._data_dir_clean(storage_url):
logger.error("Stdout or jsonout file already exists, aborting.")
return
memory = (
self.process["resource_limits"]["memory"] + DOCKER_MEMORY_HARD_LIMIT_BUFFER
)
memory_swap = int(memory * DOCKER_MEMORY_SWAP_RATIO)
network = "bridge"
if "network" in self.resources:
# Configure Docker network mode for the container (if specified).
# By default, current Docker versions use the 'bridge' mode which
# creates a network stack on the default Docker bridge.
network = SETTINGS.get("FLOW_EXECUTOR", {}).get("NETWORK", "")
security_options = []
if not SETTINGS.get("FLOW_DOCKER_DISABLE_SECCOMP", False):
security_options.append(f"seccomp={json.dumps(SECCOMP_POLICY)}")
processing_image = self.requirements.get(
"image", constants.DEFAULT_CONTAINER_IMAGE
)
communicator_image = SETTINGS.get(
"DOCKER_COMMUNICATOR_IMAGE", "resolwe/com:python-3.9"
)
ulimits = []
if (
self.process["scheduling_class"]
== PROCESS_META["SCHEDULING_CLASS_INTERACTIVE"]
):
# TODO: This is not very good as each child gets the same limit.
# Note: Ulimit does not work as expected on multithreaded processes
# Limit is increased by factor 1.2 for processes with 2-8 threads.
# TODO: This should be changed for processes with over 8 threads.
cpu_time_interactive = SETTINGS.get(
"FLOW_PROCESS_RESOURCE_DEFAULTS", {}
).get("cpu_time_interactive", 30)
cpu_limit = int(cpu_time_interactive * 1.2)
ulimits.append(
docker.types.Ulimit(name="cpu", soft=cpu_limit, hard=cpu_limit)
)
# Make sure that sockets dir exists.
os.makedirs(
self.runtime_dir / storage_url / ExecutorFiles.SOCKETS_SUBDIR, exist_ok=True
)
logger.debug("Checking existence of docker image: %s.", processing_image)
listener_settings = SETTINGS.get("FLOW_EXECUTOR", {}).get(
"LISTENER_CONNECTION", {}
)
environment = {
"CONTAINER_TIMEOUT": constants.CONTAINER_TIMEOUT,
"SOCKETS_VOLUME": constants.SOCKETS_VOLUME,
"COMMUNICATION_PROCESSING_SOCKET": constants.COMMUNICATION_PROCESSING_SOCKET,
"SCRIPT_SOCKET": constants.SCRIPT_SOCKET,
"LISTENER_IP": listener_settings.get("hosts", {}).get(
"docker", "127.0.0.1"
),
"LISTENER_PORT": listener_settings.get("port", 53893),
"LISTENER_PROTOCOL": listener_settings.get("protocol", "tcp"),
"DATA_ID": self.data_id,
"LOCATION_SUBPATH": os.fspath(storage_url),
"DATA_LOCAL_VOLUME": os.fspath(constants.DATA_LOCAL_VOLUME),
"DATA_ALL_VOLUME": os.fspath(constants.DATA_ALL_VOLUME),
"DATA_VOLUME": os.fspath(constants.DATA_VOLUME),
"UPLOAD_VOLUME": os.fspath(constants.UPLOAD_VOLUME),
"SECRETS_DIR": os.fspath(constants.SECRETS_VOLUME),
"RUNNING_IN_CONTAINER": 1,
"RUNNING_IN_DOCKER": 1,
"FLOW_MANAGER_KEEP_DATA": SETTINGS.get("FLOW_MANAGER_KEEP_DATA", False),
}
# Docker on MacOSX usus different settings
if platform.system() == "Darwin":
environment["LISTENER_IP"] = "host.docker.internal"
communication_arguments = {
"auto_remove": False,
"volumes": self._communicator_volumes(),
"command": ["/usr/local/bin/python", "/startup.py"],
"image": communicator_image,
"name": f"{self.container_name}-communicator",
"detach": True,
"cpu_quota": 100000, # TODO: how much?
"mem_limit": f"4000m", # TODO: how much?
"mem_reservation": f"200m",
"network_mode": network,
"cap_drop": ["all"],
"security_opt": security_options,
"user": f"{os.getuid()}:{os.getgid()}",
"environment": environment,
}
processing_arguments = {
"auto_remove": False,
"volumes": self._processing_volumes(),
"command": ["python3", "/start.py"],
"image": processing_image,
"network_mode": f"container:{self.container_name}-communicator",
"working_dir": os.fspath(constants.DATA_LOCAL_VOLUME),
"detach": True,
"cpu_quota": self.process["resource_limits"]["cores"] * (10 ** 6),
"mem_limit": f"{memory}m",
"mem_reservation": f"{self.process['resource_limits']['memory']}m",
"mem_swappiness": DOCKER_MEMORY_SWAPPINESS,
"memswap_limit": f"{memory_swap}m",
"name": self.container_name,
"cap_drop": ["all"],
"security_opt": security_options,
"user": f"{os.getuid()}:{os.getgid()}",
"ulimits": ulimits,
"environment": environment,
}
logger.info("Starting processing: %s", processing_arguments)
logger.info("Starting com: %s", processing_arguments)
client = docker.from_env()
try:
try:
client.images.get(processing_image)
except docker.errors.ImageNotFound:
client.images.pull(processing_image)
except docker.errors.APIError:
logger.exception("Docker API error")
raise RuntimeError("Docker API error")
start_time = time.time()
# Create docker client from environment.
# Make sure the enviroment is set up correctly.
response = client.containers.run(**communication_arguments)
logger.debug("Com response: %s.", response)
response = client.containers.run(**processing_arguments)
logger.debug("Proccessing response: %s.", response)
end_time = time.time()
logger.info(
"It took {:.2f}s for Docker containers to start".format(
end_time - start_time
)
)
|
Fix bug introduced last checkin.
|
from flask import Flask
from flask_dbseeder import Seeder
app = Flask(__name__)
seeder = Seeder(app)
if __name__ == '__main__':
#app.run(debug=True)
print(seeder)
Delete usage_test_2.py
|
# coding=utf-8
'''
Created on 02/02/2014
@author: Dani
'''
from lpentities.observation import Observation
from lpentities.country import Country
from lpentities.indicator import Indicator
from lpentities.license import License
from lpentities.measurement_unit import MeasurementUnit
from lpentities.computation import Computation
from lpentities.instant import Instant
from lpentities.data_source import DataSource
from lpentities.organization import Organization
from lpentities.dataset import Dataset
from lpentities.value import Value
from es.weso.faostat.translator.translator_const import TranslatorConst
from reconciler.country_reconciler import CountryReconciler
from datetime import datetime
from lpentities.year_interval import YearInterval
from reconciler.exceptions.unknown_country_error import UnknownCountryError
class ModelObjectBuilder(object):
'''
classdocs
'''
def __init__(self, registers, config, log):
"""
Constructor
"""
self.log = log
self.config = config
self._org_id = self.config.get("TRANSLATOR", "org_id")
self._obs_int = int(self.config.get("TRANSLATOR", "obs_int"))
self._sli_int = int(self.config.get("TRANSLATOR", "sli_int"))
self._dat_int = int(self.config.get("TRANSLATOR", "dat_int"))
self._igr_int = int(self.config.get("TRANSLATOR", "igr_int"))
self._ind_int = int(self.config.get("TRANSLATOR", "ind_int"))
self._sou_int = int(self.config.get("TRANSLATOR", "sou_int"))
self.registers = registers
self.country_dict = {}
self.dataset = self.build_dataset()
self.indicators_dict = {}
self.default_computation = Computation(Computation.RAW)
self.reconciler = CountryReconciler()
def run(self):
# for register in self.registers:
# self.build_model_objects_from_register(register)
for i in range(1, 2000):
self.build_model_objects_from_register(self.registers[i])
self._update_config_id_values()
return self.dataset
def _update_config_id_values(self): # TODO. No actualiza el archivo!!
self.config.set("TRANSLATOR", "org_id", self._org_id)
self.config.set("TRANSLATOR", "obs_int", self._obs_int)
self.config.set("TRANSLATOR", "sli_int", self._sli_int)
self.config.set("TRANSLATOR", "dat_int", self._dat_int)
self.config.set("TRANSLATOR", "igr_int", self._igr_int)
self.config.set("TRANSLATOR", "ind_int", self._ind_int)
self.config.set("TRANSLATOR", "sou_int", self._sou_int)
def build_dataset(self):
#Creating dataset object
print self._org_id
dataset = Dataset(chain_for_id=self._org_id, int_for_id=self._dat_int, frequency="yearly") # TODO: put frequency
self._dat_int += 1 # Updating id value
#creating related objects
#Organization
org = Organization(chain_for_id=self._org_id,
name="FAO: Food and Agriculture Organization of the United Nations",
url="http://www.fao.org/")
#datasource
datasource = DataSource(name="Faostat. Statistcis division of the FAO",
chain_for_id=self._org_id,
int_for_id=self._sou_int)
self._sou_int += 1
#license
license_type = License(description="Attribution and need permission for commercial use",
name="CopyrightFao",
republish=True,
url="http://www.fao.org/contact-us/terms/en/")
#linking objects
org.add_data_source(datasource)
datasource.add_dataset(dataset)
dataset.license_type = license_type
#Returning result
return dataset
def build_model_objects_from_register(self, register):
country = self.get_asociated_country(register[TranslatorConst.COUNTRY_CODE])
if country is None:
return # It means that we are processing an obs from a non recognised country.
# We just have to ignore it
new_observation = Observation(chain_for_id=self._org_id, int_for_id=self._obs_int)
self._obs_int += 1
self.add_indicator_to_observation(new_observation, register) # DONE
self.add_value_to_observation(new_observation, register) # DONE
self.add_computation_to_observation(new_observation) # DONE
self.add_reftime_to_observation(new_observation, register) # DONE
self.add_issued_to_observation(new_observation, register) # DONE
country.add_observation(new_observation)
self.dataset.add_observation(new_observation)
def add_issued_to_observation(self, observation, register):
#Adding time in which the observation has been treated by us
observation.issued = Instant(datetime.now())
def add_reftime_to_observation(self, observation, register):
observation.ref_time = YearInterval(year=register[TranslatorConst.YEAR])
def add_computation_to_observation(self, observation):
observation.computation = self.default_computation
def add_value_to_observation(self, observation, register):
value = Value()
value.value_type = "float"
if register[TranslatorConst.VALUE] is None or register[TranslatorConst.VALUE] == "":
value.obs_status = Value.MISSING
else:
value.obs_status = Value.AVAILABLE
value.value = register[TranslatorConst.VALUE]
observation.value = value
def add_measurement_unit_to_indicator(self, indicator, register):
indicator.measurement_unit = MeasurementUnit(register[TranslatorConst.UNIT])
def add_indicator_to_observation(self, observation, register):
if register[TranslatorConst.ITEM_CODE] not in self.indicators_dict:
self._add_indicator_to_dict(register)
indicator = self.indicators_dict[register[TranslatorConst.ITEM_CODE]]
observation.indicator = indicator
def _add_indicator_to_dict(self, register):
#name, description, id
indicator = Indicator(name=register[TranslatorConst.ITEM],
description=self.get_indicator_description(register[TranslatorConst.ITEM_CODE]),
chain_for_id=self._org_id,
int_for_id=self._ind_int)
self._ind_int += 1 # Updating id value
self.add_measurement_unit_to_indicator(indicator, register)
self.indicators_dict[register[TranslatorConst.ITEM_CODE]] = indicator
def get_indicator_id(self, register):
return "FAOSTAT_" + str(register[TranslatorConst.ITEM_CODE])
def get_indicator_description(self, indicator_code):
if indicator_code == 6601:
return "Land Area. Total area in sq. km of the referred region"
elif indicator_code == 6610:
return "Agricultural land. Total area in sq. km. for agriculture of the referred region"
elif indicator_code == 6661:
return "Forest land. Total forest surface in sq. km of the referred region"
elif indicator_code == 6621:
return "Arable area. Total arable surface in sq. km. of the referred region"
else:
raise RuntimeError("Unknown indicator. No description found")
def get_asociated_country(self, country_code):
if country_code not in self.country_dict:
try:
self.country_dict[country_code] = self.reconciler.get_country_by_faostat_code(country_code)
except UnknownCountryError: # Trying to get an invalid country
self.country_dict[country_code] = None # By this, unsucessfull searches are executed only one time
return None # return None as a signal of "invalid country"
return self.country_dict[country_code]
# country_found = None
# for country in self.country_list:
# if country.country_code == country_code:
# country_found = country
# break
# if country_found is not None:
# return country_found
# else:
# new_country = CountryReconciler.get_country_by_faostat_code()
# self.country_list.append(new_country)
# return new_country
Adapt frequency specification in Dataset
# coding=utf-8
'''
Created on 02/02/2014
@author: Dani
'''
from lpentities.observation import Observation
from lpentities.country import Country
from lpentities.indicator import Indicator
from lpentities.license import License
from lpentities.measurement_unit import MeasurementUnit
from lpentities.computation import Computation
from lpentities.instant import Instant
from lpentities.data_source import DataSource
from lpentities.organization import Organization
from lpentities.dataset import Dataset
from lpentities.value import Value
from es.weso.faostat.translator.translator_const import TranslatorConst
from reconciler.country_reconciler import CountryReconciler
from datetime import datetime
from lpentities.year_interval import YearInterval
from reconciler.exceptions.unknown_country_error import UnknownCountryError
class ModelObjectBuilder(object):
'''
classdocs
'''
def __init__(self, registers, config, log):
"""
Constructor
"""
self.log = log
self.config = config
self._org_id = self.config.get("TRANSLATOR", "org_id")
self._obs_int = int(self.config.get("TRANSLATOR", "obs_int"))
self._sli_int = int(self.config.get("TRANSLATOR", "sli_int"))
self._dat_int = int(self.config.get("TRANSLATOR", "dat_int"))
self._igr_int = int(self.config.get("TRANSLATOR", "igr_int"))
self._ind_int = int(self.config.get("TRANSLATOR", "ind_int"))
self._sou_int = int(self.config.get("TRANSLATOR", "sou_int"))
self.registers = registers
self.country_dict = {}
self.dataset = self.build_dataset()
self.indicators_dict = {}
self.default_computation = Computation(Computation.RAW)
self.reconciler = CountryReconciler()
def run(self):
# for register in self.registers:
# self.build_model_objects_from_register(register)
for i in range(1, 2000):
self.build_model_objects_from_register(self.registers[i])
self._update_config_id_values()
return self.dataset
def _update_config_id_values(self): # TODO. No actualiza el archivo!!
self.config.set("TRANSLATOR", "org_id", self._org_id)
self.config.set("TRANSLATOR", "obs_int", self._obs_int)
self.config.set("TRANSLATOR", "sli_int", self._sli_int)
self.config.set("TRANSLATOR", "dat_int", self._dat_int)
self.config.set("TRANSLATOR", "igr_int", self._igr_int)
self.config.set("TRANSLATOR", "ind_int", self._ind_int)
self.config.set("TRANSLATOR", "sou_int", self._sou_int)
def build_dataset(self):
#Creating dataset object
print self._org_id
dataset = Dataset(chain_for_id=self._org_id, int_for_id=self._dat_int, frequency=Dataset.YEARLY)
self._dat_int += 1 # Updating id value
#creating related objects
#Organization
org = Organization(chain_for_id=self._org_id,
name="FAO: Food and Agriculture Organization of the United Nations",
url="http://www.fao.org/")
#datasource
datasource = DataSource(name="Faostat. Statistcis division of the FAO",
chain_for_id=self._org_id,
int_for_id=self._sou_int)
self._sou_int += 1
#license
license_type = License(description="Attribution and need permission for commercial use",
name="CopyrightFao",
republish=True,
url="http://www.fao.org/contact-us/terms/en/")
#linking objects
org.add_data_source(datasource)
datasource.add_dataset(dataset)
dataset.license_type = license_type
#Returning result
return dataset
def build_model_objects_from_register(self, register):
country = self.get_asociated_country(register[TranslatorConst.COUNTRY_CODE])
if country is None:
return # It means that we are processing an obs from a non recognised country.
# We just have to ignore it
new_observation = Observation(chain_for_id=self._org_id, int_for_id=self._obs_int)
self._obs_int += 1
self.add_indicator_to_observation(new_observation, register) # DONE
self.add_value_to_observation(new_observation, register) # DONE
self.add_computation_to_observation(new_observation) # DONE
self.add_reftime_to_observation(new_observation, register) # DONE
self.add_issued_to_observation(new_observation, register) # DONE
country.add_observation(new_observation)
self.dataset.add_observation(new_observation)
def add_issued_to_observation(self, observation, register):
#Adding time in which the observation has been treated by us
observation.issued = Instant(datetime.now())
def add_reftime_to_observation(self, observation, register):
observation.ref_time = YearInterval(year=register[TranslatorConst.YEAR])
def add_computation_to_observation(self, observation):
observation.computation = self.default_computation
def add_value_to_observation(self, observation, register):
value = Value()
value.value_type = "float"
if register[TranslatorConst.VALUE] is None or register[TranslatorConst.VALUE] == "":
value.obs_status = Value.MISSING
else:
value.obs_status = Value.AVAILABLE
value.value = register[TranslatorConst.VALUE]
observation.value = value
def add_measurement_unit_to_indicator(self, indicator, register):
indicator.measurement_unit = MeasurementUnit(register[TranslatorConst.UNIT])
def add_indicator_to_observation(self, observation, register):
if register[TranslatorConst.ITEM_CODE] not in self.indicators_dict:
self._add_indicator_to_dict(register)
indicator = self.indicators_dict[register[TranslatorConst.ITEM_CODE]]
observation.indicator = indicator
def _add_indicator_to_dict(self, register):
#name, description, id
indicator = Indicator(name=register[TranslatorConst.ITEM],
description=self.get_indicator_description(register[TranslatorConst.ITEM_CODE]),
chain_for_id=self._org_id,
int_for_id=self._ind_int)
self._ind_int += 1 # Updating id value
self.add_measurement_unit_to_indicator(indicator, register)
self.indicators_dict[register[TranslatorConst.ITEM_CODE]] = indicator
def get_indicator_id(self, register):
return "FAOSTAT_" + str(register[TranslatorConst.ITEM_CODE])
def get_indicator_description(self, indicator_code):
if indicator_code == 6601:
return "Land Area. Total area in sq. km of the referred region"
elif indicator_code == 6610:
return "Agricultural land. Total area in sq. km. for agriculture of the referred region"
elif indicator_code == 6661:
return "Forest land. Total forest surface in sq. km of the referred region"
elif indicator_code == 6621:
return "Arable area. Total arable surface in sq. km. of the referred region"
else:
raise RuntimeError("Unknown indicator. No description found")
def get_asociated_country(self, country_code):
if country_code not in self.country_dict:
try:
self.country_dict[country_code] = self.reconciler.get_country_by_faostat_code(country_code)
except UnknownCountryError: # Trying to get an invalid country
self.country_dict[country_code] = None # By this, unsucessfull searches are executed only one time
return None # return None as a signal of "invalid country"
return self.country_dict[country_code]
# country_found = None
# for country in self.country_list:
# if country.country_code == country_code:
# country_found = country
# break
# if country_found is not None:
# return country_found
# else:
# new_country = CountryReconciler.get_country_by_faostat_code()
# self.country_list.append(new_country)
# return new_country
|
#!/usr/bin/env python
# encoding: utf-8
"""
Tools to handle reads sequenced with unique molecular identifiers (UMIs).
"""
from __future__ import print_function
import doctest
import editdistance
import os
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from collections import Counter, defaultdict
from itertools import islice, izip
from pysam import Samfile
from re import findall
from toolshed import nopen
from ._version import __version__
os.environ['TERM'] = 'linux'
IUPAC = {
"A": "A",
"T": "T",
"C": "C",
"G": "G",
"R": "GA",
"Y": "TC",
"M": "AC",
"K": "GT",
"S": "GC",
"W": "AT",
"H": "ACT",
"B": "GTC",
"V": "GCA",
"D": "GAT",
"N": "GATC"
}
class Fastq(object):
def __init__(self, args):
self.name = args[0][1:]
self.seq = args[1]
self.qual = args[3]
assert len(self.seq) == len(self.qual)
def __repr__(self):
return "Fastq(%s)" % self.name
def __str__(self):
return "@%s\n%s\n+\n%s" % (self.name, self.seq, self.qual)
def umi_from_name(name):
"""
extract the UMI sequence from the read name.
>>> umi_from_name("cluster_1017333:UMI_GCCGCA")
'GCCGCA'
"""
return findall(r'UMI_([\w]*)', name)[0].strip()
def passing_distances(subject, target_set, n):
"""
>>> s = "ACTGA"
>>> ts_1 = {"ACTGG"}
>>> ts_2 = {"ACTCC", "ACTGG"}
>>> ts_3 = {"ACTCC", "ACTTT"}
>>> n = 1
>>> passing_distances(s, ts_1, n)
True
>>> passing_distances(s, ts_2, n)
True
>>> passing_distances(s, ts_3, n)
False
"""
for target in target_set:
if editdistance.distance(target, subject) <= n:
return True
return False
def process_bam(args):
"""
removes duplicate reads characterized by their UMI at any given start
location.
abam input bam with potential duplicate UMIs
bbam output bam after removing duplicate UMIs
mismatches allowable edit distance between UMIs
"""
with Samfile(args.abam, 'rb') as in_bam, Samfile(args.bbam, 'wb', template=in_bam) as out_bam:
for chrom in in_bam.references:
print("processing chromosome", chrom, file=sys.stderr)
umi_idx = defaultdict(set)
read_counts = Counter()
for read in in_bam.fetch(chrom):
if read.is_unmapped:
continue
# get the iupac umi sequence
umi = umi_from_name(read.qname)
# get actual read start
# read.pos accounts for 5' soft clipping
if read.is_reverse:
# read.alen alignment length accounting for 3' soft clipping
# UMIs are then compared to reads with the same start
read_start = read.pos + read.alen
else:
read_start = read.pos
# add count for this start; counts all reads
read_counts[read_start] += 1
# check if UMI seen
if umi in umi_idx[read_start]:
continue
elif args.mismatches > 0 and passing_distances(umi, umi_idx[read_start], args.mismatches):
# add UMI as unique hit
umi_idx[read_start].add(umi)
continue
# keep track of unique UMIs - set eliminates duplicates
umi_idx[read_start].add(umi)
out_bam.write(read)
# process before and after counts over chrom
for start, before_count in sorted(read_counts.items()):
print(chrom, start, start + 1, before_count, len(umi_idx[start]), sep="\t")
def readfq(fq):
with nopen(fq) as fh:
fqclean = (x.strip("\r\n") for x in fh if x.strip())
while True:
rd = [x for x in islice(fqclean, 4)]
if not rd:
raise StopIteration
assert all(rd) and len(rd) == 4
yield Fastq(rd)
def valid_umi(iupac, umi):
"""
parse UMI sequence to validate against IUPAC sequence.
>>> valid_umi("NNNV", "ACGT")
False
>>> valid_umi("NNNV", "ACGG")
True
"""
for code, base in izip(iupac, umi):
try:
if base not in IUPAC[code]:
return False
except KeyError:
return False
return True
def clip_umi(record, iupac_umi, n, end):
"""
>>> fq = Fastq(["@cluster_455 2",\
"GGGGGAGCCACGAGGTGTGTTTTATTTTCATTATTC",\
"+",\
"C===>=B=@:<;4A;8=9?6EEC0?DDA72B@3EB4"])
>>> clip_umi(fq, "NNNNNV", 6, "5")
Fastq(cluster_455:UMI_GGGGGA 2)
>>> fq = Fastq(["@cluster_455 2",\
"GGXXGAGCCACGAGGTGTGTTTTATTTTCATTATTC",\
"+",\
"C===>=B=@:<;4A;8=9?6EEC0?DDA72B@3EB4"])
>>> clip_umi(fq, "NNNNNV", 6, "5")
'GGXXGA'
"""
if end == "5":
umi = record.seq[:n]
record.seq = record.seq[n:]
record.qual = record.qual[n:]
else:
umi = record.seq[-n:]
record.seq = record.seq[:-n]
record.qual = record.qual[:-n]
if not valid_umi(iupac_umi, umi):
return umi
try:
name, pair = record.name.split(" ", 1)
record.name = "{name}:UMI_{umi} {pair}".format(name=name,
umi=umi,
pair=pair)
except ValueError:
record.name = "{name}:UMI_{umi}".format(name=record.name, umi=umi)
return record
def process_fastq(args):
"""
for every valid umi, trim while incorporating into read name.
args:
fastq reads to process
umi IUPAC sequence
end 5 or 3 as a string
top int number of invalid sequences to output
verbose
"""
umi_stats = Counter()
iupac = args.umi.upper()
u_leng = len(iupac)
end = args.end
for r in readfq(args.fastq):
r = clip_umi(r, iupac, u_leng, end)
if type(r) is Fastq:
print(r)
else:
umi_stats.update([r])
if args.verbose:
print("Invalid UMI Total:", sum(umi_stats.values()), file=sys.stderr)
print("Unique UMIs Removed:", len(list(umi_stats)), file=sys.stderr)
print("Top %d Invalid UMIs:" % args.top, file=sys.stderr)
for umi, val in umi_stats.most_common(args.top):
print(umi, val, sep="\t", file=sys.stderr)
def main():
p = ArgumentParser(description=__doc__, version=__version__)
subp = p.add_subparsers(help='commands')
# fastq processing
fastq = subp.add_parser('trim', description=("Trims the UMI sequence from the read, incorporating the unique "
"sequence in the read name facilitating filtering of the alignments."),
formatter_class=ArgumentDefaultsHelpFormatter,
help="trim UMI and incorporate sequence into read name")
fastq.add_argument('fastq', metavar='FASTQ',
help='reads with untrimmed UMI')
fastq.add_argument('umi', metavar='UMI',
help='IUPAC UMI sequence, e.g. NNNNNV')
fastq.add_argument('--end', choices=['5', '3'], default="5",
help="UMI location on the read")
fastq.add_argument('--verbose', action='store_true',
help="print UMI stats to stderr")
fastq.add_argument('--top', type=int, default=10,
help="when verbose, print this many of the top filtered UMI sequences")
fastq.set_defaults(func=process_fastq)
# bam processing
bam = subp.add_parser('rmdup', description=("Removes duplicate reads, that were previously characterized by "
"their UMI, at any given start location. Coverage differences before "
"and after are written to STDOUT as BED3+."),
formatter_class=ArgumentDefaultsHelpFormatter,
help="remove duplicate UMI entries from all start positions")
bam.add_argument('abam', metavar='INPUT_BAM',
help='bam with UMI in read name')
bam.add_argument('bbam', metavar='OUTPUT_BAM',
help='non-duplicate UMIs at any given start position')
bam.add_argument('-m', '--mismatches', default=0, type=int,
help="allowable mismatches when comparing UMIs at any given start location")
bam.set_defaults(func=process_bam)
args = p.parse_args()
args.func(args)
if __name__ == "__main__":
if doctest.testmod(optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE).failed == 0:
main()
parser changes, py3 compatibility, compiled regex, docs
#!/usr/bin/env python
# encoding: utf-8
"""
Tools to handle reads sequenced with unique molecular identifiers (UMIs).
"""
from __future__ import print_function
import editdistance
import gzip
import os
import re
import sys
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from collections import Counter, defaultdict
from itertools import islice, izip
from pysam import Samfile
try:
from itertools import izip as zip
except ImportError:
pass
from ._version import __version__
IUPAC = {
"A": "A",
"T": "T",
"C": "C",
"G": "G",
"R": "GA",
"Y": "TC",
"M": "AC",
"K": "GT",
"S": "GC",
"W": "AT",
"H": "ACT",
"B": "GTC",
"V": "GCA",
"D": "GAT",
"N": "GATC"
}
UMI_REGEX = re.compile(r'UMI_([\w]*)')
gzopen = lambda f: gzip.open(f) if f.endswith(".gz") else open(f)
class Fastq(object):
"""FASTQ record. Holds record of name, sequence, and quality scores.
"""
def __init__(self, args):
self.name = args[0][1:]
self.seq = args[1]
self.qual = args[3]
assert len(self.seq) == len(self.qual)
def __repr__(self):
return "Fastq(%s)" % self.name
def __str__(self):
return "@%s\n%s\n+\n%s" % (self.name, self.seq, self.qual)
def umi_from_name(name):
"""Extracts the UMI sequence from the read name.
Args:
name (str): Name of the sequence
Returns:
str: UMI sequence
>>> umi_from_name("cluster_1017333:UMI_GCCGCA")
'GCCGCA'
"""
return UMI_REGEX.findall(name)[0].strip()
def passing_distances(query, targets, n):
"""Tests target set of sequences to the query.
Args:
query (str): query sequence
targets (set): unique sequences
n (int): allowable mismatches when comparing a query to a given sequence of the targets
Returns:
bool
>>> s = "ACTGA"
>>> ts_1 = {"ACTGG"}
>>> ts_2 = {"ACTCC", "ACTGG"}
>>> ts_3 = {"ACTCC", "ACTTT"}
>>> n = 1
>>> passing_distances(s, ts_1, n)
True
>>> passing_distances(s, ts_2, n)
True
>>> passing_distances(s, ts_3, n)
False
"""
for target in targets:
if editdistance.distance(target, query) <= n:
return True
return False
def process_bam(abam, bbam, mismatches=0):
"""Removes duplicate reads characterized by their UMI at any given start location.
Args:
abam (str): Input bam with potential duplicate UMIs
bbam (str): Output bam after removing duplicate UMIs
mismatches (Optional[int]): Allowable edit distance between UMIs
"""
with Samfile(args.abam, 'rb') as in_bam, Samfile(args.bbam, 'wb', template=in_bam) as out_bam:
for chrom in in_bam.references:
print("processing chromosome", chrom, file=sys.stderr)
umi_idx = defaultdict(set)
read_counts = Counter()
for read in in_bam.fetch(chrom):
if read.is_unmapped:
continue
# get the iupac umi sequence
umi = umi_from_name(read.qname)
# get actual read start
# read.pos accounts for 5' soft clipping
if read.is_reverse:
# read.alen alignment length accounting for 3' soft clipping
# UMIs are then compared to reads with the same start
read_start = read.pos + read.alen
else:
read_start = read.pos
# add count for this start; counts all reads
read_counts[read_start] += 1
# check if UMI seen
if umi in umi_idx[read_start]:
continue
elif args.mismatches > 0 and passing_distances(umi, umi_idx[read_start], args.mismatches):
# add UMI as unique hit
umi_idx[read_start].add(umi)
continue
# keep track of unique UMIs - set eliminates duplicates
umi_idx[read_start].add(umi)
out_bam.write(read)
# process before and after counts over chrom
for start, before_count in sorted(read_counts.items()):
print(chrom, start, start + 1, before_count, len(umi_idx[start]), sep="\t")
def readfq(filehandle):
"""Fastq iterator.
Args:
filehandle (file): open file handle
Yields:
Fastq
"""
fqclean = (x.strip("\r\n") for x in filehandle if x.strip())
while True:
rd = [x for x in islice(fqclean, 4)]
if not rd:
raise StopIteration
assert all(rd) and len(rd) == 4
yield Fastq(rd)
def valid_umi(iupac, umi):
"""Parse UMI sequence to validate against IUPAC sequence.
Args:
iupac (str): IUPAC sequence
umi (str): observed sequence
Returns:
bool
>>> valid_umi("NNNV", "ACGT")
False
>>> valid_umi("NNNV", "ACGG")
True
"""
for code, base in zip(iupac, umi):
try:
if base not in IUPAC[code]:
return False
except KeyError:
return False
return True
def clip_umi(record, iupac_umi, n, end):
"""Removed UMI sequence from read, trims respective length from qual, then appends UMI onto read name.
Args:
record (Fastq): `Fastq` record
iupac_umi (str): IUPAC sequence of the UMI
n (int): Length of the UMI
end (int): The end of the read on which the UMI resides
Returns:
Fastq else str: The record or the failed UMI sequence
>>> fq = Fastq(["@cluster_455 2","GGGGGAGCCACGAGGTGTGTTTTATTTTCATTATTC","+","C===>=B=@:<;4A;8=9?6EEC0?DDA72B@3EB4"])
>>> clip_umi(fq, "NNNNNV", 6, 5)
Fastq(cluster_455:UMI_GGGGGA 2)
>>> fq = Fastq(["@cluster_455 2","GGXXGAGCCACGAGGTGTGTTTTATTTTCATTATTC","+","C===>=B=@:<;4A;8=9?6EEC0?DDA72B@3EB4"])
>>> clip_umi(fq, "NNNNNV", 6, 5)
'GGXXGA'
"""
if end == 5:
umi = record.seq[:n]
record.seq = record.seq[n:]
record.qual = record.qual[n:]
else:
umi = record.seq[-n:]
record.seq = record.seq[:-n]
record.qual = record.qual[:-n]
if not valid_umi(iupac_umi, umi):
return umi
try:
name, pair = record.name.split(" ", 1)
record.name = "{name}:UMI_{umi} {pair}".format(name=name, umi=umi, pair=pair)
except ValueError:
record.name = "{name}:UMI_{umi}".format(name=record.name, umi=umi)
return record
def process_fastq(fastq, umi, end=5, verbose=False, top=10):
"""For every valid umi, trim while incorporating UMI into read name.
Args:
fastq (str): file path to unprocessed FASTQ file
umi (str): IUPAC sequence of UMI
end (Optional[int]): 5 or 3, which ever end you're UMI is located on
verbose (Optional[bool]): True prints basic stats on observed UMIs
top (Optional[int]): Number of the the top invalid UMIs to print out
"""
umi_stats = Counter()
umi = umi.upper()
u_leng = len(umi)
with gzopen(fastq) as fq:
for read in readfq(fq):
read = clip_umi(read, umi, u_leng, end)
if type(read) is Fastq:
print(read)
else:
umi_stats.update([read])
if verbose:
print("Invalid UMI Total:", sum(umi_stats.values()), file=sys.stderr)
print("Unique UMIs Removed:", len(list(umi_stats)), file=sys.stderr)
print("Top", top, "Invalid UMIs:", file=sys.stderr)
for umi, val in umi_stats.most_common(top):
print(umi, val, sep="\t", file=sys.stderr)
def main():
def _file_exists(parser, arg):
if not os.path.exists(arg):
parser.error("The file %s does not exist" % arg)
if not os.path.isfile(arg):
parser.error("Expected file, not folder (%s)" % arg)
return arg
p = ArgumentParser(description=__doc__)
p.add_argument('--version', action='version', version='%(prog)s {version}'.format(version=__version__))
subp = p.add_subparsers(help='commands', dest='command')
# fastq processing
fastq = subp.add_parser('trim', description=("Trims the UMI sequence from the read, incorporating the unique "
"sequence in the read name facilitating filtering of the alignments."),
formatter_class=ArgumentDefaultsHelpFormatter,
help="trim UMI and incorporate sequence into read name")
fastq.add_argument('fastq', metavar='FASTQ', type=lambda x: _file_exists(p, x),
help='reads with untrimmed UMI')
fastq.add_argument('umi', metavar='UMI',
help='IUPAC UMI sequence, e.g. NNNNNV')
fastq.add_argument('--end', choices=['5', '3'], default=5, type=int,
help="UMI location on the read")
fastq.add_argument('--verbose', action='store_true',
help="print UMI stats to stderr")
fastq.add_argument('--top', type=int, default=10,
help="when verbose, print this many of the top filtered UMI sequences")
# bam processing
bam = subp.add_parser('rmdup', description=("Removes duplicate reads, that were previously characterized by "
"their UMI, at any given start location. Coverage differences before "
"and after are written to STDOUT as BED3+."),
formatter_class=ArgumentDefaultsHelpFormatter,
help="remove duplicate UMI entries from all start positions")
bam.add_argument('abam', metavar='INPUT_BAM', type=lambda x: _file_exists(p, x),
help='bam with UMI in read name')
bam.add_argument('bbam', metavar='OUTPUT_BAM',
help='non-duplicate UMIs at any given start position')
bam.add_argument('-m', '--mismatches', default=0, type=int,
help="allowable mismatches when comparing UMIs at any given start location")
args = p.parse_args()
if args.command == 'trim':
process_fastq(args.fastq, args.umi, args.end, args.verbose, args.top)
elif args.command == 'rmdup':
process_bam(args.abam, args.bbam, args.mismatches)
if __name__ == "__main__":
main()
|
from snovault import (
AfterModified,
BeforeModified,
CONNECTION,
calculated_property,
collection,
load_schema,
abstract_collection,
)
from snovault.schema_utils import schema_validator
from snovault.validators import (
validate_item_content_post,
validate_item_content_put,
validate_item_content_patch
)
from snovault.attachment import ItemWithAttachment
from .base import (
Item,
collection_add,
item_edit,
ALLOW_SUBMITTER_ADD,
get_item_if_you_can,
lab_award_attribution_embed_list
)
from pyramid.httpexceptions import (
HTTPForbidden,
HTTPTemporaryRedirect,
HTTPNotFound,
)
from pyramid.response import Response
from pyramid.settings import asbool
from pyramid.view import view_config
from urllib.parse import (
parse_qs,
urlparse,
)
import boto3
from botocore.exceptions import ClientError
import datetime
import json
import pytz
import os
from pyramid.traversal import resource_path
from encoded.search import make_search_subreq
from snovault.elasticsearch import ELASTIC_SEARCH
from . import TrackingItem
from ..authentication import session_properties
import logging
logging.getLogger('boto3').setLevel(logging.CRITICAL)
log = logging.getLogger(__name__)
BEANSTALK_ENV_PATH = "/opt/python/current/env"
file_workflow_run_embeds = [
'workflow_run_inputs.workflow.title',
'workflow_run_inputs.input_files.workflow_argument_name',
'workflow_run_inputs.input_files.value.filename',
'workflow_run_inputs.input_files.value.display_title',
'workflow_run_inputs.input_files.value.file_format',
'workflow_run_inputs.input_files.value.uuid',
'workflow_run_inputs.input_files.value.accession',
'workflow_run_inputs.output_files.workflow_argument_name',
'workflow_run_inputs.output_files.value.display_title',
'workflow_run_inputs.output_files.value.file_format',
'workflow_run_inputs.output_files.value.uuid',
'workflow_run_inputs.output_files.value.accession',
'workflow_run_inputs.output_quality_metrics.name',
'workflow_run_inputs.output_quality_metrics.value.uuid'
]
file_workflow_run_embeds_processed = file_workflow_run_embeds + [e.replace('workflow_run_inputs.', 'workflow_run_outputs.') for e in file_workflow_run_embeds]
def show_upload_credentials(request=None, context=None, status=None):
if request is None or status not in ('uploading', 'to be uploaded by workflow', 'upload failed'):
return False
return request.has_permission('edit', context)
def force_beanstalk_env(profile_name, config_file=None):
# set env variables if we are on elasticbeanstalk
if not config_file:
config_file = BEANSTALK_ENV_PATH
if os.path.exists(config_file):
if not os.environ.get("AWS_ACCESS_KEY_ID"):
import subprocess
command = ['bash', '-c', 'source ' + config_file + ' && env']
proc = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
for line in proc.stdout:
key, _, value = line.partition("=")
os.environ[key] = value[:-1]
proc.communicate()
conn = boto3.client('sts', aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"))
return conn
def external_creds(bucket, key, name=None, profile_name=None):
'''
if name is None, we want the link to s3 but no need to generate
an access token. This is useful for linking metadata to files that
already exist on s3.
'''
import logging
logging.getLogger('boto3').setLevel(logging.CRITICAL)
credentials = {}
if name is not None:
policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::{bucket}/{key}'.format(bucket=bucket, key=key),
}
]
}
# boto.set_stream_logger('boto3')
conn = force_beanstalk_env(profile_name)
token = conn.get_federation_token(Name=name, Policy=json.dumps(policy))
# 'access_key' 'secret_key' 'expiration' 'session_token'
credentials = token.get('Credentials')
credentials.update({
'upload_url': 's3://{bucket}/{key}'.format(bucket=bucket, key=key),
'federated_user_arn': token.get('FederatedUser').get('Arn'),
'federated_user_id': token.get('FederatedUser').get('FederatedUserId'),
'request_id': token.get('ResponseMetadata').get('RequestId'),
'key': key
})
return {
'service': 's3',
'bucket': bucket,
'key': key,
'upload_credentials': credentials,
}
def property_closure(request, propname, root_uuid):
# Must avoid cycles
conn = request.registry[CONNECTION]
seen = set()
remaining = {str(root_uuid)}
while remaining:
seen.update(remaining)
next_remaining = set()
for uuid in remaining:
obj = conn.get_by_uuid(uuid)
next_remaining.update(obj.__json__(request).get(propname, ()))
remaining = next_remaining - seen
return seen
@collection(
name='file-sets',
unique_key='accession',
properties={
'title': 'File Sets',
'description': 'Listing of File Sets',
})
class FileSet(Item):
"""Collection of files stored under fileset."""
item_type = 'file_set'
schema = load_schema('encoded:schemas/file_set.json')
name_key = 'accession'
embedded_list = []
@collection(
name='file-set-calibrations',
unique_key='accession',
properties={
'title': 'Calibration File Sets',
'description': 'Listing of File Sets',
})
class FileSetCalibration(FileSet):
"""Collection of files stored under fileset."""
base_types = ['FileSet'] + Item.base_types
item_type = 'file_set_calibration'
schema = load_schema('encoded:schemas/file_set_calibration.json')
name_key = 'accession'
embedded_list = ['files_in_set.submitted_by.job_title',
'files_in_set.lab.title',
'files_in_set.accession',
'files_in_set.href',
'files_in_set.file_size',
'files_in_set.upload_key',
'files_in_set.file_format.file_format',
'files_in_set.file_classification'
]
@collection(
name='file-set-microscope-qcs',
unique_key='accession',
properties={
'title': 'Microscope QC File Sets',
'description': 'Listing of File Sets',
})
class FileSetMicroscopeQc(ItemWithAttachment, FileSet):
"""Collection of files stored under fileset."""
base_types = ['FileSet'] + Item.base_types
item_type = 'file_set_microscope_qc'
schema = load_schema('encoded:schemas/file_set_microscope_qc.json')
name_key = 'accession'
embedded_list = [
'files_in_set.submitted_by.job_title',
'files_in_set.lab.title',
'files_in_set.accession',
'files_in_set.href',
'files_in_set.file_size',
'files_in_set.upload_key',
'files_in_set.file_format.file_format',
'files_in_set.file_classification'
]
@abstract_collection(
name='files',
unique_key='accession',
acl=ALLOW_SUBMITTER_ADD,
properties={
'title': 'Files',
'description': 'Listing of Files',
})
class File(Item):
"""Collection for individual files."""
item_type = 'file'
base_types = ['File'] + Item.base_types
schema = load_schema('encoded:schemas/file.json')
embedded_list = lab_award_attribution_embed_list + [
'experiments.display_title',
'experiments.accession',
'experiments.experiment_type',
'experiments.experiment_sets.accession',
'experiments.experiment_sets.experimentset_type',
'experiments.experiment_sets.@type',
'experiments.biosample.biosource.display_title',
'experiments.biosample.biosource.biosource_type',
'experiments.biosample.biosource_summary',
'experiments.biosample.modifications_summary',
'experiments.biosample.treatments_summary',
'experiments.biosample.biosource.individual.organism.name',
'experiments.digestion_enzyme.name',
'file_format.file_format',
'related_files.relationship_type',
'related_files.file.accession'
]
name_key = 'accession'
rev = {
'experiments': ('Experiment', 'files'),
}
@calculated_property(schema={
"title": "Experiments",
"description": "Experiments that this file is associated with",
"type": "array",
"items": {
"title": "Experiments",
"type": ["string", "object"],
"linkTo": "Experiment"
}
})
def experiments(self, request):
return self.rev_link_atids(request, "experiments")
@calculated_property(schema={
"title": "Display Title",
"description": "Name of this File",
"type": "string"
})
def display_title(self, request, file_format, accession=None, external_accession=None):
accession = accession or external_accession
file_format_item = get_item_if_you_can(request, file_format, 'file-formats')
try:
file_extension = '.' + file_format_item.get('standard_file_extension')
except AttributeError:
file_extension = ''
return '{}{}'.format(accession, file_extension)
@calculated_property(schema={
"title": "File Type",
"description": "Type of File",
"type": "string"
})
def file_type_detailed(self, request, file_format, file_type=None):
outString = (file_type or 'other')
file_format_item = get_item_if_you_can(request, file_format, 'file-formats')
try:
fformat = file_format_item.get('file_format')
outString = outString + ' (' + fformat + ')'
except AttributeError:
pass
return outString
def _update(self, properties, sheets=None):
if not properties:
return
# ensure we always have s3 links setup
sheets = {} if sheets is None else sheets.copy()
uuid = self.uuid
old_creds = self.propsheets.get('external', None)
new_creds = old_creds
# don't get new creds
if properties.get('status', None) in ('uploading', 'to be uploaded by workflow',
'upload failed'):
new_creds = self.build_external_creds(self.registry, uuid, properties)
sheets['external'] = new_creds
# handle extra files
updated_extra_files = []
extra_files = properties.get('extra_files', [])
if extra_files:
# get @id for parent file
try:
at_id = resource_path(self)
except:
at_id = "/" + str(uuid) + "/"
# ensure at_id ends with a slash
if not at_id.endswith('/'):
at_id += '/'
file_formats = []
for xfile in extra_files:
# ensure a file_format (identifier for extra_file) is given and non-null
if not('file_format' in xfile and bool(xfile['file_format'])):
continue
eformat = xfile['file_format']
if eformat.startswith('/file-formats/'):
eformat = eformat[len('/file-formats/'):-1]
xfile_format = self.registry['collections']['FileFormat'].get(eformat)
xff_uuid = str(xfile_format.uuid)
if not xff_uuid:
raise Exception("Cannot find format item for the extra file")
if xff_uuid in file_formats:
raise Exception("Each file in extra_files must have unique file_format")
file_formats.append(xff_uuid)
xfile['file_format'] = xff_uuid
xfile['accession'] = properties.get('accession')
# just need a filename to trigger creation of credentials
xfile['filename'] = xfile['accession']
xfile['uuid'] = str(uuid)
xfile['status'] = properties.get('status')
ext = self.build_external_creds(self.registry, uuid, xfile)
# build href
file_extension = xfile_format.properties.get('standard_file_extension')
filename = '{}.{}'.format(xfile['accession'], file_extension)
xfile['href'] = at_id + '@@download/' + filename
xfile['upload_key'] = ext['key']
sheets['external' + xfile['file_format']] = ext
updated_extra_files.append(xfile)
if extra_files:
properties['extra_files'] = updated_extra_files
if old_creds:
if old_creds.get('key') != new_creds.get('key'):
try:
# delete the old sumabeach
conn = boto3.client('s3')
bname = old_creds['bucket']
conn.delete_object(Bucket=bname, Key=old_creds['key'])
except Exception as e:
print(e)
# update self first to ensure 'related_files' are stored in self.properties
super(File, self)._update(properties, sheets)
DicRefRelation = {
"derived from": "parent of",
"parent of": "derived from",
"supercedes": "is superceded by",
"is superceded by": "supercedes",
"paired with": "paired with"
}
acc = str(self.uuid)
if 'related_files' in properties.keys():
for relation in properties["related_files"]:
try:
switch = relation["relationship_type"]
rev_switch = DicRefRelation[switch]
related_fl = relation["file"]
relationship_entry = {"relationship_type": rev_switch, "file": acc}
rel_dic = {'related_files': [relationship_entry, ]}
except:
print("invalid data, can't update correctly")
return
target_fl = self.collection.get(related_fl)
# case one we don't have relations
if 'related_files' not in target_fl.properties.keys():
target_fl.properties.update(rel_dic)
target_fl.update(target_fl.properties)
else:
# case two we have relations but not the one we need
for target_relation in target_fl.properties['related_files']:
if target_relation.get('file') == acc:
break
else:
# make data for new related_files
target_fl.properties['related_files'].append(relationship_entry)
target_fl.update(target_fl.properties)
@property
def __name__(self):
properties = self.upgrade_properties()
if properties.get('status') == 'replaced':
return self.uuid
return properties.get(self.name_key, None) or self.uuid
def unique_keys(self, properties):
keys = super(File, self).unique_keys(properties)
if properties.get('status') != 'replaced':
if 'md5sum' in properties:
value = 'md5:{md5sum}'.format(**properties)
keys.setdefault('alias', []).append(value)
return keys
@calculated_property(schema={
"title": "Title",
"type": "string",
"description": "Accession of this file"
})
def title(self):
return self.properties.get('accession', self.properties.get('external_accession'))
@calculated_property(schema={
"title": "Download URL",
"type": "string",
"description": "Use this link to download this file."
})
def href(self, request):
file_format = self.properties.get('file_format')
fformat = get_item_if_you_can(request, file_format, 'file-formats')
try:
file_extension = '.' + fformat.get('standard_file_extension')
except AttributeError:
file_extension = ''
accession = self.properties.get('accession', self.properties.get('external_accession'))
filename = '{}{}'.format(accession, file_extension)
return request.resource_path(self) + '@@download/' + filename
@calculated_property(schema={
"title": "Upload Key",
"type": "string",
})
def upload_key(self, request):
properties = self.properties
external = self.propsheets.get('external', {})
if not external:
try:
external = self.build_external_creds(self.registry, self.uuid, properties)
except ClientError:
log.error(os.environ)
log.error(self.properties)
return 'UPLOAD KEY FAILED'
return external['key']
@calculated_property(condition=show_upload_credentials, schema={
"type": "object",
})
def upload_credentials(self):
external = self.propsheets.get('external', None)
if external is not None:
return external['upload_credentials']
@calculated_property(condition=show_upload_credentials, schema={
"type": "object",
})
def extra_files_creds(self):
external = self.propsheets.get('external', None)
if external is not None:
extras = []
for extra in self.properties.get('extra_files', []):
eformat = extra.get('file_format')
xfile_format = self.registry['collections']['FileFormat'].get(eformat)
try:
xff_uuid = str(xfile_format.uuid)
except AttributeError:
print("Can't find required format uuid for %s" % eformat)
continue
extra_creds = self.propsheets.get('external' + xff_uuid)
extra['upload_credentials'] = extra_creds['upload_credentials']
extras.append(extra)
return extras
@classmethod
def get_bucket(cls, registry):
return registry.settings['file_upload_bucket']
@classmethod
def build_external_creds(cls, registry, uuid, properties):
bucket = cls.get_bucket(registry)
fformat = properties.get('file_format')
if fformat.startswith('/file-formats/'):
fformat = fformat[len('/file-formats/'):-1]
prop_format = registry['collections']['FileFormat'].get(fformat)
try:
file_extension = prop_format.properties['standard_file_extension']
except KeyError:
raise Exception('File format not in list of supported file types')
key = '{uuid}/{accession}.{file_extension}'.format(
file_extension=file_extension, uuid=uuid,
accession=properties.get('accession'))
# remove the path from the file name and only take first 32 chars
fname = properties.get('filename')
name = None
if fname:
name = fname.split('/')[-1][:32]
profile_name = registry.settings.get('file_upload_profile_name')
return external_creds(bucket, key, name, profile_name)
@classmethod
def create(cls, registry, uuid, properties, sheets=None):
if properties.get('status') in ('uploading', 'to be uploaded by workflow'):
sheets = {} if sheets is None else sheets.copy()
sheets['external'] = cls.build_external_creds(registry, uuid, properties)
return super(File, cls).create(registry, uuid, properties, sheets)
class Collection(Item.Collection):
pass
@collection(
name='files-fastq',
unique_key='accession',
properties={
'title': 'FASTQ Files',
'description': 'Listing of FASTQ Files',
})
class FileFastq(File):
"""Collection for individual fastq files."""
item_type = 'file_fastq'
schema = load_schema('encoded:schemas/file_fastq.json')
embedded_list = File.embedded_list + file_workflow_run_embeds + [
"quality_metric.overall_quality_status",
"quality_metric.Total Sequences",
"quality_metric.Sequence length",
"quality_metric.url"
]
name_key = 'accession'
rev = dict(File.rev, **{
'workflow_run_inputs': ('WorkflowRun', 'input_files.value'),
'workflow_run_outputs': ('WorkflowRun', 'output_files.value'),
})
@calculated_property(schema={
"title": "Input of Workflow Runs",
"description": "All workflow runs that this file serves as an input to",
"type": "array",
"items": {
"title": "Input of Workflow Run",
"type": ["string", "object"],
"linkTo": "WorkflowRun"
}
})
def workflow_run_inputs(self, request):
return self.rev_link_atids(request, "workflow_run_inputs")
@calculated_property(schema={
"title": "Output of Workflow Runs",
"description": "All workflow runs that this file serves as an output from",
"type": "array",
"items": {
"title": "Output of Workflow Run",
"type": "string",
"linkTo": "WorkflowRun"
}
})
def workflow_run_outputs(self, request):
return self.rev_link_atids(request, "workflow_run_outputs")
@collection(
name='files-processed',
unique_key='accession',
properties={
'title': 'Processed Files',
'description': 'Listing of Processed Files',
})
class FileProcessed(File):
"""Collection for individual processed files."""
item_type = 'file_processed'
schema = load_schema('encoded:schemas/file_processed.json')
embedded_list = File.embedded_list + file_workflow_run_embeds_processed + [
"quality_metric.Total reads",
"quality_metric.Trans reads",
"quality_metric.Cis reads (>20kb)",
"quality_metric.Short cis reads (<20kb)",
"quality_metric.url"
]
name_key = 'accession'
rev = dict(File.rev, **{
'workflow_run_inputs': ('WorkflowRun', 'input_files.value'),
'workflow_run_outputs': ('WorkflowRun', 'output_files.value'),
'experiments': ('Experiment', 'processed_files'),
'experiment_sets': ('ExperimentSet', 'processed_files'),
'other_experiments': ('Experiment', 'other_processed_files.files'),
'other_experiment_sets': ('ExperimentSet', 'other_processed_files.files')
})
@classmethod
def get_bucket(cls, registry):
return registry.settings['file_wfout_bucket']
@calculated_property(schema={
"title": "Input of Workflow Runs",
"description": "All workflow runs that this file serves as an input to",
"type": "array",
"items": {
"title": "Input of Workflow Run",
"type": ["string", "object"],
"linkTo": "WorkflowRun"
}
})
def workflow_run_inputs(self, request):
return self.rev_link_atids(request, "workflow_run_inputs")
@calculated_property(schema={
"title": "Output of Workflow Runs",
"description": "All workflow runs that this file serves as an output from",
"type": "array",
"items": {
"title": "Output of Workflow Run",
"type": "string",
"linkTo": "WorkflowRun"
}
})
def workflow_run_outputs(self, request):
return self.rev_link_atids(request, "workflow_run_outputs")
@calculated_property(schema={
"title": "Experiment Sets",
"description": "All Experiment Sets that this file belongs to",
"type": "array",
"items": {
"title": "Experiment Set",
"type": "string",
"linkTo": "ExperimentSet"
}
})
def experiment_sets(self, request):
return self.rev_link_atids(request, "experiment_sets") + self.rev_link_atids(request, "other_experiment_sets")
@calculated_property(schema={
"title": "Experiments",
"description": "Experiments that this file belongs to",
"type": "array",
"items": {
"title": "Experiment",
"type": "string",
"linkTo": "Experiment"
}
})
def experiments(self, request):
return self.rev_link_atids(request, "experiments") + self.rev_link_atids(request, "other_experiments")
# processed files don't want md5 as unique key
def unique_keys(self, properties):
keys = super(FileProcessed, self).unique_keys(properties)
if keys.get('alias'):
keys['alias'] = [k for k in keys['alias'] if not k.startswith('md5:')]
return keys
@collection(
name='files-reference',
unique_key='accession',
properties={
'title': 'Refenrence Files',
'description': 'Listing of Reference Files',
})
class FileReference(File):
"""Collection for individual reference files."""
item_type = 'file_reference'
schema = load_schema('encoded:schemas/file_reference.json')
embedded_list = File.embedded_list
name_key = 'accession'
@collection(
name='files-calibration',
unique_key='accession',
properties={
'title': 'Calibration Files',
'description': 'Listing of Calibration Files',
})
class FileCalibration(ItemWithAttachment, File):
"""Collection for individual calibration files."""
item_type = 'file_calibration'
schema = load_schema('encoded:schemas/file_calibration.json')
embedded_list = File.embedded_list
name_key = 'accession'
@collection(
name='files-microscopy',
unique_key='accession',
properties={
'title': 'Microscopy Files',
'description': 'Listing of Microscopy Files',
})
class FileMicroscopy(ItemWithAttachment, File):
"""Collection for individual microscopy files."""
item_type = 'file_microscopy'
schema = load_schema('encoded:schemas/file_microscopy.json')
embedded_list = File.embedded_list + [
"experiments.@type",
"experiments.imaging_paths.channel",
"experiments.imaging_paths.path",
"experiments.files.microscope_settings.ch00_light_source_center_wl",
"experiments.files.microscope_settings.ch01_light_source_center_wl",
"experiments.files.microscope_settings.ch02_light_source_center_wl",
"experiments.files.microscope_settings.ch03_light_source_center_wl",
"experiments.files.microscope_settings.ch00_lasers_diodes",
"experiments.files.microscope_settings.ch01_lasers_diodes",
"experiments.files.microscope_settings.ch02_lasers_diodes",
"experiments.files.microscope_settings.ch03_lasers_diodes"
]
name_key = 'accession'
@view_config(name='upload', context=File, request_method='GET',
permission='edit')
def get_upload(context, request):
external = context.propsheets.get('external', {})
upload_credentials = external.get('upload_credentials')
# Show s3 location info for files originally submitted to EDW.
if upload_credentials is None and external.get('service') == 's3':
upload_credentials = {
'upload_url': 's3://{bucket}/{key}'.format(**external),
}
return {
'@graph': [{
'@id': request.resource_path(context),
'upload_credentials': upload_credentials,
'extra_files_creds': context.extra_files_creds(),
}],
}
@view_config(name='upload', context=File, request_method='POST',
permission='edit', validators=[schema_validator({"type": "object"})])
def post_upload(context, request):
properties = context.upgrade_properties()
if properties['status'] not in ('uploading', 'to be uploaded by workflow', 'upload failed'):
raise HTTPForbidden('status must be "uploading" to issue new credentials')
accession_or_external = properties.get('accession')
external = context.propsheets.get('external', None)
if external is None:
# Handle objects initially posted as another state.
bucket = request.registry.settings['file_upload_bucket']
# maybe this should be properties.uuid
uuid = context.uuid
file_format = get_item_if_you_can(request, properties.get('file_format'), 'file-formats')
try:
file_extension = '.' + file_format.get('standard_file_extension')
except AttributeError:
file_extension = ''
key = '{uuid}/{accession}.{file_extension}'.format(
file_extension=file_extension, uuid=uuid, **properties)
elif external.get('service') == 's3':
bucket = external['bucket']
key = external['key']
else:
raise ValueError(external.get('service'))
# remove the path from the file name and only take first 32 chars
name = None
if properties.get('filename'):
name = properties.get('filename').split('/')[-1][:32]
profile_name = request.registry.settings.get('file_upload_profile_name')
creds = external_creds(bucket, key, name, profile_name)
# in case we haven't uploaded a file before
context.propsheets['external'] = creds
new_properties = properties.copy()
if properties['status'] == 'upload failed':
new_properties['status'] = 'uploading'
registry = request.registry
registry.notify(BeforeModified(context, request))
context.update(new_properties, {'external': creds})
registry.notify(AfterModified(context, request))
rendered = request.embed('/%s/@@object' % context.uuid, as_user=True)
result = {
'status': 'success',
'@type': ['result'],
'@graph': [rendered],
}
return result
def is_file_to_download(properties, file_format, expected_filename=None):
try:
file_extension = '.' + file_format.get('standard_file_extension')
except AttributeError:
file_extension = ''
accession_or_external = properties.get('accession') or properties.get('external_accession')
if not accession_or_external:
return False
filename = '{accession}{file_extension}'.format(
accession=accession_or_external, file_extension=file_extension)
if expected_filename is None:
return filename
elif expected_filename != filename:
return False
else:
return filename
@view_config(name='download', context=File, request_method='GET',
permission='view', subpath_segments=[0, 1])
def download(context, request):
try:
user_props = session_properties(request)
except Exception as e:
user_props = {'error': str(e)}
tracking_values = {'user_agent': request.user_agent, 'remote_ip': request.remote_addr,
'user_email': user_props.get('details', {}).get('email', 'anonymous'),
'request_path': request.path_info}
# proxy triggers if we should use Axel-redirect, useful for s3 range byte queries
try:
use_download_proxy = request.client_addr not in request.registry['aws_ipset']
except TypeError:
# this fails in testing due to testapp not having ip
use_download_proxy = False
# with extra_files the user may be trying to download the main file
# or one of the files in extra files, the following logic will
# search to find the "right" file and redirect to a download link for that one
properties = context.upgrade_properties()
file_format = get_item_if_you_can(request, properties.get('file_format'), 'file-formats')
_filename = None
if request.subpath:
_filename, = request.subpath
filename = is_file_to_download(properties, file_format, _filename)
if not filename:
found = False
for extra in properties.get('extra_files', []):
eformat = get_item_if_you_can(request, extra.get('file_format'), 'file-formats')
filename = is_file_to_download(extra, eformat, _filename)
if filename:
found = True
properties = extra
external = context.propsheets.get('external' + eformat.get('uuid'))
if eformat is not None:
tracking_values['file_format'] = eformat.get('file_format')
break
if not found:
raise HTTPNotFound(_filename)
else:
external = context.propsheets.get('external', {})
if file_format is not None:
tracking_values['file_format'] = file_format.get('file_format')
tracking_values['filename'] = filename
if not external:
external = context.build_external_creds(request.registry, context.uuid, properties)
if external.get('service') == 's3':
conn = boto3.client('s3')
param_get_object = {
'Bucket': external['bucket'],
'Key': external['key'],
'ResponseContentDisposition': "attachment; filename=" + filename
}
if 'Range' in request.headers:
tracking_values['range_query'] = True
param_get_object.update({'Range': request.headers.get('Range')})
else:
tracking_values['range_query'] = False
location = conn.generate_presigned_url(
ClientMethod='get_object',
Params=param_get_object,
ExpiresIn=36*60*60
)
else:
raise ValueError(external.get('service'))
# get the experiment type associated with this file
experiments_using_file = context.experiments(request)
found_experiment_type = None
for file_experiment in experiments_using_file:
exp_info = get_item_if_you_can(request, file_experiment)
if exp_info is None:
break
exp_type = exp_info.get('experiment_type')
if found_experiment_type is None or found_experiment_type == exp_type:
found_experiment_type = exp_type
else: # conflicting experiment types
found_experiment_type = 'Multiple types'
break
tracking_values['experiment_type'] = found_experiment_type
tracking_values['is_visualization'] = False
# create a tracking_item to track this download
tracking_item = {'date_created': datetime.datetime.now(datetime.timezone.utc),
'status': 'in review by lab', 'tracking_type': 'download_tracking',
'download_tracking': tracking_values}
TrackingItem.create_and_commit(request, tracking_item)
if asbool(request.params.get('soft')):
expires = int(parse_qs(urlparse(location).query)['Expires'][0])
return {
'@type': ['SoftRedirect'],
'location': location,
'expires': datetime.datetime.fromtimestamp(expires, pytz.utc).isoformat(),
}
if 'Range' in request.headers:
try:
response_body = conn.get_object(**param_get_object)
except Exception as e:
raise e
response_dict = {
'body': response_body.get('Body').read(),
# status_code : 206 if partial, 200 if the ragne covers whole file
'status_code': response_body.get('ResponseMetadata').get('HTTPStatusCode'),
'accept_ranges': response_body.get('AcceptRanges'),
'content_length': response_body.get('ContentLength'),
'content_range': response_body.get('ContentRange')
}
return Response(**response_dict)
# We don't use X-Accel-Redirect here so that client behaviour is similar for
# both aws and non-aws users.
if use_download_proxy:
location = request.registry.settings.get('download_proxy', '') + str(location)
# 307 redirect specifies to keep original method
raise HTTPTemporaryRedirect(location=location)
def validate_file_format_validity_for_file_type(context, request):
"""Check if the specified file format (e.g. fastq) is allowed for the file type (e.g. FileFastq).
"""
data = request.json
if 'file_format' in data:
file_format_item = get_item_if_you_can(request, data['file_format'], 'file-formats')
if not file_format_item:
# item level validation will take care of generating the error
return
file_format_name = file_format_item['file_format']
allowed_types = file_format_item.get('valid_item_types', [])
file_type = context.type_info.name
if file_type not in allowed_types:
msg = 'File format {} is not allowed for {}'.format(file_format_name, file_type)
request.errors.add('body', None, msg)
else:
request.validated.update({})
def validate_file_filename(context, request):
''' validator for filename field '''
found_match = False
data = request.json
if 'filename' not in data:
# see if there is an existing file_name
filename = context.properties.get('filename')
if not filename:
return
else:
filename = data['filename']
ff = data.get('file_format')
if not ff:
ff = context.properties.get('file_format')
file_format_item = get_item_if_you_can(request, ff, 'file-formats')
if not file_format_item:
msg = 'Problem getting file_format for %s' % filename
request.errors.add('body', None, msg)
return
msg = None
try:
file_extensions = [file_format_item.get('standard_file_extension')]
if file_format_item.get('other_allowed_extensions'):
file_extensions.extend(file_format_item.get('other_allowed_extensions'))
file_extensions = list(set(file_extensions))
except (AttributeError, TypeError):
msg = 'Problem getting file_format for %s' % filename
else:
if file_format_item.get('file_format') == 'other':
found_match = True
elif not file_extensions: # this shouldn't happen
pass
for extension in file_extensions:
if filename[-(len(extension) + 1):] == '.' + extension:
found_match = True
break
if found_match:
request.validated.update({})
else:
if not msg:
msg = ["'." + ext + "'" for ext in file_extensions]
msg = ', '.join(msg)
msg = 'Filename %s extension does not agree with specified file format. Valid extension(s): %s' % (filename, msg)
request.errors.add('body', None, msg)
def validate_processed_file_unique_md5_with_bypass(context, request):
'''validator to check md5 on processed files, unless you tell it
not to'''
# skip validator if not file processed
if context.type_info.item_type != 'file_processed':
return
data = request.json
if 'md5sum' not in data or not data['md5sum']:
return
if 'force_md5' in request.query_string:
return
# we can of course patch / put to ourselves the same md5 we previously had
if context.properties.get('md5sum') == data['md5sum']:
return
if ELASTIC_SEARCH in request.registry:
search = make_search_subreq(request, '/search/?type=File&md5sum=%s' % data['md5sum'])
search_resp = request.invoke_subrequest(search, True)
if search_resp.status_int < 400:
# already got this md5
found = search_resp.json['@graph'][0]['accession']
request.errors.add('body', None, 'md5sum %s already exists for accession %s' %
(data['md5sum'], found))
else: # find it in the database
conn = request.registry['connection']
res = conn.get_by_json('md5sum', data['md5sum'], 'file_processed')
if res is not None:
# md5 already exists
found = res.properties['accession']
request.errors.add('body', None, 'md5sum %s already exists for accession %s' %
(data['md5sum'], found))
def validate_processed_file_produced_from_field(context, request):
'''validator to make sure that the values in the
produced_from field are valid file identifiers'''
# skip validator if not file processed
if context.type_info.item_type != 'file_processed':
return
data = request.json
if 'produced_from' not in data:
return
files_ok = True
files2chk = data['produced_from']
for i, f in enumerate(files2chk):
try:
fid = get_item_if_you_can(request, f, 'files').get('uuid')
except AttributeError:
files_ok = False
request.errors.add('body', ['produced_from', i], "'%s' not found" % f)
# bad_files.append(f)
else:
if not fid:
files_ok = False
request.errors.add('body', ['produced_from', i], "'%s' not found" % f)
if files_ok:
request.validated.update({})
def validate_extra_file_format(context, request):
'''validator to check to be sure that file_format of extrafile is not the
same as the file and is a known format for the schema
'''
files_ok = True
data = request.json
if 'extra_files' not in data:
return
extras = data['extra_files']
# post should always have file_format as it is required patch may or may not
ff = data.get('file_format')
if not ff:
ff = context.properties.get('file_format')
file_format_item = get_item_if_you_can(request, ff, 'file-formats')
if not file_format_item or 'standard_file_extension' not in file_format_item:
request.errors.add('body', None, "Can't find parent file format for extra_files")
return
parent_format = file_format_item['uuid']
schema_eformats = file_format_item.get('extrafile_formats')
if not schema_eformats: # means this parent file shouldn't have any extra files
request.errors.add('body', None, "File with format %s should not have extra_files" % file_format_item.get('file_format'))
return
else:
valid_ext_formats = []
for ok_format in schema_eformats:
ok_format_item = get_item_if_you_can(request, ok_format, 'file-formats')
try:
off_uuid = ok_format_item.get('uuid')
except AttributeError:
raise "FileFormat Item %s contains unknown FileFormats in the extrafile_formats property" % file_format_item.get('uuid')
valid_ext_formats.append(off_uuid)
seen_ext_formats = []
# formats = request.registry['collections']['FileFormat']
for i, ef in enumerate(extras):
eformat = ef.get('file_format')
if eformat is None:
return # will fail the required extra_file.file_format
eformat_item = get_item_if_you_can(request, eformat, 'file-formats')
try:
ef_uuid = eformat_item.get('uuid')
except AttributeError:
request.errors.add('body', ['extra_files', i], "'%s' not a valid or known file format" % eformat)
files_ok = False
break
if ef_uuid in seen_ext_formats:
request.errors.add('body', ['extra_files', i], "Multple extra files with '%s' format cannot be submitted at the same time" % eformat)
files_ok = False
break
else:
seen_ext_formats.append(ef_uuid)
if ef_uuid == parent_format:
request.errors.add('body', ['extra_files', i], "'%s' format cannot be the same for file and extra_file" % file_format_item.get('file_format'))
files_ok = False
break
if ef_uuid not in valid_ext_formats:
request.errors.add('body', ['extra_files', i], "'%s' not a valid extrafile_format for '%s'" % (eformat, file_format_item.get('file_format')))
files_ok = False
if files_ok:
request.validated.update({})
@view_config(context=File.Collection, permission='add', request_method='POST',
validators=[validate_item_content_post,
validate_file_filename,
validate_extra_file_format,
validate_file_format_validity_for_file_type,
validate_processed_file_unique_md5_with_bypass,
validate_processed_file_produced_from_field])
def file_add(context, request, render=None):
return collection_add(context, request, render)
@view_config(context=File, permission='edit', request_method='PUT',
validators=[validate_item_content_put,
validate_file_filename,
validate_extra_file_format,
validate_file_format_validity_for_file_type,
validate_processed_file_unique_md5_with_bypass,
validate_processed_file_produced_from_field])
@view_config(context=File, permission='edit', request_method='PATCH',
validators=[validate_item_content_patch,
validate_file_filename,
validate_extra_file_format,
validate_file_format_validity_for_file_type,
validate_processed_file_unique_md5_with_bypass,
validate_processed_file_produced_from_field])
def file_edit(context, request, render=None):
return item_edit(context, request, render)
With multiple experiment types in a download tracking, call it integrative analysis
from snovault import (
AfterModified,
BeforeModified,
CONNECTION,
calculated_property,
collection,
load_schema,
abstract_collection,
)
from snovault.schema_utils import schema_validator
from snovault.validators import (
validate_item_content_post,
validate_item_content_put,
validate_item_content_patch
)
from snovault.attachment import ItemWithAttachment
from .base import (
Item,
collection_add,
item_edit,
ALLOW_SUBMITTER_ADD,
get_item_if_you_can,
lab_award_attribution_embed_list
)
from pyramid.httpexceptions import (
HTTPForbidden,
HTTPTemporaryRedirect,
HTTPNotFound,
)
from pyramid.response import Response
from pyramid.settings import asbool
from pyramid.view import view_config
from urllib.parse import (
parse_qs,
urlparse,
)
import boto3
from botocore.exceptions import ClientError
import datetime
import json
import pytz
import os
from pyramid.traversal import resource_path
from encoded.search import make_search_subreq
from snovault.elasticsearch import ELASTIC_SEARCH
from . import TrackingItem
from ..authentication import session_properties
import logging
logging.getLogger('boto3').setLevel(logging.CRITICAL)
log = logging.getLogger(__name__)
BEANSTALK_ENV_PATH = "/opt/python/current/env"
file_workflow_run_embeds = [
'workflow_run_inputs.workflow.title',
'workflow_run_inputs.input_files.workflow_argument_name',
'workflow_run_inputs.input_files.value.filename',
'workflow_run_inputs.input_files.value.display_title',
'workflow_run_inputs.input_files.value.file_format',
'workflow_run_inputs.input_files.value.uuid',
'workflow_run_inputs.input_files.value.accession',
'workflow_run_inputs.output_files.workflow_argument_name',
'workflow_run_inputs.output_files.value.display_title',
'workflow_run_inputs.output_files.value.file_format',
'workflow_run_inputs.output_files.value.uuid',
'workflow_run_inputs.output_files.value.accession',
'workflow_run_inputs.output_quality_metrics.name',
'workflow_run_inputs.output_quality_metrics.value.uuid'
]
file_workflow_run_embeds_processed = file_workflow_run_embeds + [e.replace('workflow_run_inputs.', 'workflow_run_outputs.') for e in file_workflow_run_embeds]
def show_upload_credentials(request=None, context=None, status=None):
if request is None or status not in ('uploading', 'to be uploaded by workflow', 'upload failed'):
return False
return request.has_permission('edit', context)
def force_beanstalk_env(profile_name, config_file=None):
# set env variables if we are on elasticbeanstalk
if not config_file:
config_file = BEANSTALK_ENV_PATH
if os.path.exists(config_file):
if not os.environ.get("AWS_ACCESS_KEY_ID"):
import subprocess
command = ['bash', '-c', 'source ' + config_file + ' && env']
proc = subprocess.Popen(command, stdout=subprocess.PIPE, universal_newlines=True)
for line in proc.stdout:
key, _, value = line.partition("=")
os.environ[key] = value[:-1]
proc.communicate()
conn = boto3.client('sts', aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"))
return conn
def external_creds(bucket, key, name=None, profile_name=None):
'''
if name is None, we want the link to s3 but no need to generate
an access token. This is useful for linking metadata to files that
already exist on s3.
'''
import logging
logging.getLogger('boto3').setLevel(logging.CRITICAL)
credentials = {}
if name is not None:
policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': 's3:PutObject',
'Resource': 'arn:aws:s3:::{bucket}/{key}'.format(bucket=bucket, key=key),
}
]
}
# boto.set_stream_logger('boto3')
conn = force_beanstalk_env(profile_name)
token = conn.get_federation_token(Name=name, Policy=json.dumps(policy))
# 'access_key' 'secret_key' 'expiration' 'session_token'
credentials = token.get('Credentials')
credentials.update({
'upload_url': 's3://{bucket}/{key}'.format(bucket=bucket, key=key),
'federated_user_arn': token.get('FederatedUser').get('Arn'),
'federated_user_id': token.get('FederatedUser').get('FederatedUserId'),
'request_id': token.get('ResponseMetadata').get('RequestId'),
'key': key
})
return {
'service': 's3',
'bucket': bucket,
'key': key,
'upload_credentials': credentials,
}
def property_closure(request, propname, root_uuid):
# Must avoid cycles
conn = request.registry[CONNECTION]
seen = set()
remaining = {str(root_uuid)}
while remaining:
seen.update(remaining)
next_remaining = set()
for uuid in remaining:
obj = conn.get_by_uuid(uuid)
next_remaining.update(obj.__json__(request).get(propname, ()))
remaining = next_remaining - seen
return seen
@collection(
name='file-sets',
unique_key='accession',
properties={
'title': 'File Sets',
'description': 'Listing of File Sets',
})
class FileSet(Item):
"""Collection of files stored under fileset."""
item_type = 'file_set'
schema = load_schema('encoded:schemas/file_set.json')
name_key = 'accession'
embedded_list = []
@collection(
name='file-set-calibrations',
unique_key='accession',
properties={
'title': 'Calibration File Sets',
'description': 'Listing of File Sets',
})
class FileSetCalibration(FileSet):
"""Collection of files stored under fileset."""
base_types = ['FileSet'] + Item.base_types
item_type = 'file_set_calibration'
schema = load_schema('encoded:schemas/file_set_calibration.json')
name_key = 'accession'
embedded_list = ['files_in_set.submitted_by.job_title',
'files_in_set.lab.title',
'files_in_set.accession',
'files_in_set.href',
'files_in_set.file_size',
'files_in_set.upload_key',
'files_in_set.file_format.file_format',
'files_in_set.file_classification'
]
@collection(
name='file-set-microscope-qcs',
unique_key='accession',
properties={
'title': 'Microscope QC File Sets',
'description': 'Listing of File Sets',
})
class FileSetMicroscopeQc(ItemWithAttachment, FileSet):
"""Collection of files stored under fileset."""
base_types = ['FileSet'] + Item.base_types
item_type = 'file_set_microscope_qc'
schema = load_schema('encoded:schemas/file_set_microscope_qc.json')
name_key = 'accession'
embedded_list = [
'files_in_set.submitted_by.job_title',
'files_in_set.lab.title',
'files_in_set.accession',
'files_in_set.href',
'files_in_set.file_size',
'files_in_set.upload_key',
'files_in_set.file_format.file_format',
'files_in_set.file_classification'
]
@abstract_collection(
name='files',
unique_key='accession',
acl=ALLOW_SUBMITTER_ADD,
properties={
'title': 'Files',
'description': 'Listing of Files',
})
class File(Item):
"""Collection for individual files."""
item_type = 'file'
base_types = ['File'] + Item.base_types
schema = load_schema('encoded:schemas/file.json')
embedded_list = lab_award_attribution_embed_list + [
'experiments.display_title',
'experiments.accession',
'experiments.experiment_type',
'experiments.experiment_sets.accession',
'experiments.experiment_sets.experimentset_type',
'experiments.experiment_sets.@type',
'experiments.biosample.biosource.display_title',
'experiments.biosample.biosource.biosource_type',
'experiments.biosample.biosource_summary',
'experiments.biosample.modifications_summary',
'experiments.biosample.treatments_summary',
'experiments.biosample.biosource.individual.organism.name',
'experiments.digestion_enzyme.name',
'file_format.file_format',
'related_files.relationship_type',
'related_files.file.accession'
]
name_key = 'accession'
rev = {
'experiments': ('Experiment', 'files'),
}
@calculated_property(schema={
"title": "Experiments",
"description": "Experiments that this file is associated with",
"type": "array",
"items": {
"title": "Experiments",
"type": ["string", "object"],
"linkTo": "Experiment"
}
})
def experiments(self, request):
return self.rev_link_atids(request, "experiments")
@calculated_property(schema={
"title": "Display Title",
"description": "Name of this File",
"type": "string"
})
def display_title(self, request, file_format, accession=None, external_accession=None):
accession = accession or external_accession
file_format_item = get_item_if_you_can(request, file_format, 'file-formats')
try:
file_extension = '.' + file_format_item.get('standard_file_extension')
except AttributeError:
file_extension = ''
return '{}{}'.format(accession, file_extension)
@calculated_property(schema={
"title": "File Type",
"description": "Type of File",
"type": "string"
})
def file_type_detailed(self, request, file_format, file_type=None):
outString = (file_type or 'other')
file_format_item = get_item_if_you_can(request, file_format, 'file-formats')
try:
fformat = file_format_item.get('file_format')
outString = outString + ' (' + fformat + ')'
except AttributeError:
pass
return outString
def _update(self, properties, sheets=None):
if not properties:
return
# ensure we always have s3 links setup
sheets = {} if sheets is None else sheets.copy()
uuid = self.uuid
old_creds = self.propsheets.get('external', None)
new_creds = old_creds
# don't get new creds
if properties.get('status', None) in ('uploading', 'to be uploaded by workflow',
'upload failed'):
new_creds = self.build_external_creds(self.registry, uuid, properties)
sheets['external'] = new_creds
# handle extra files
updated_extra_files = []
extra_files = properties.get('extra_files', [])
if extra_files:
# get @id for parent file
try:
at_id = resource_path(self)
except:
at_id = "/" + str(uuid) + "/"
# ensure at_id ends with a slash
if not at_id.endswith('/'):
at_id += '/'
file_formats = []
for xfile in extra_files:
# ensure a file_format (identifier for extra_file) is given and non-null
if not('file_format' in xfile and bool(xfile['file_format'])):
continue
eformat = xfile['file_format']
if eformat.startswith('/file-formats/'):
eformat = eformat[len('/file-formats/'):-1]
xfile_format = self.registry['collections']['FileFormat'].get(eformat)
xff_uuid = str(xfile_format.uuid)
if not xff_uuid:
raise Exception("Cannot find format item for the extra file")
if xff_uuid in file_formats:
raise Exception("Each file in extra_files must have unique file_format")
file_formats.append(xff_uuid)
xfile['file_format'] = xff_uuid
xfile['accession'] = properties.get('accession')
# just need a filename to trigger creation of credentials
xfile['filename'] = xfile['accession']
xfile['uuid'] = str(uuid)
xfile['status'] = properties.get('status')
ext = self.build_external_creds(self.registry, uuid, xfile)
# build href
file_extension = xfile_format.properties.get('standard_file_extension')
filename = '{}.{}'.format(xfile['accession'], file_extension)
xfile['href'] = at_id + '@@download/' + filename
xfile['upload_key'] = ext['key']
sheets['external' + xfile['file_format']] = ext
updated_extra_files.append(xfile)
if extra_files:
properties['extra_files'] = updated_extra_files
if old_creds:
if old_creds.get('key') != new_creds.get('key'):
try:
# delete the old sumabeach
conn = boto3.client('s3')
bname = old_creds['bucket']
conn.delete_object(Bucket=bname, Key=old_creds['key'])
except Exception as e:
print(e)
# update self first to ensure 'related_files' are stored in self.properties
super(File, self)._update(properties, sheets)
DicRefRelation = {
"derived from": "parent of",
"parent of": "derived from",
"supercedes": "is superceded by",
"is superceded by": "supercedes",
"paired with": "paired with"
}
acc = str(self.uuid)
if 'related_files' in properties.keys():
for relation in properties["related_files"]:
try:
switch = relation["relationship_type"]
rev_switch = DicRefRelation[switch]
related_fl = relation["file"]
relationship_entry = {"relationship_type": rev_switch, "file": acc}
rel_dic = {'related_files': [relationship_entry, ]}
except:
print("invalid data, can't update correctly")
return
target_fl = self.collection.get(related_fl)
# case one we don't have relations
if 'related_files' not in target_fl.properties.keys():
target_fl.properties.update(rel_dic)
target_fl.update(target_fl.properties)
else:
# case two we have relations but not the one we need
for target_relation in target_fl.properties['related_files']:
if target_relation.get('file') == acc:
break
else:
# make data for new related_files
target_fl.properties['related_files'].append(relationship_entry)
target_fl.update(target_fl.properties)
@property
def __name__(self):
properties = self.upgrade_properties()
if properties.get('status') == 'replaced':
return self.uuid
return properties.get(self.name_key, None) or self.uuid
def unique_keys(self, properties):
keys = super(File, self).unique_keys(properties)
if properties.get('status') != 'replaced':
if 'md5sum' in properties:
value = 'md5:{md5sum}'.format(**properties)
keys.setdefault('alias', []).append(value)
return keys
@calculated_property(schema={
"title": "Title",
"type": "string",
"description": "Accession of this file"
})
def title(self):
return self.properties.get('accession', self.properties.get('external_accession'))
@calculated_property(schema={
"title": "Download URL",
"type": "string",
"description": "Use this link to download this file."
})
def href(self, request):
file_format = self.properties.get('file_format')
fformat = get_item_if_you_can(request, file_format, 'file-formats')
try:
file_extension = '.' + fformat.get('standard_file_extension')
except AttributeError:
file_extension = ''
accession = self.properties.get('accession', self.properties.get('external_accession'))
filename = '{}{}'.format(accession, file_extension)
return request.resource_path(self) + '@@download/' + filename
@calculated_property(schema={
"title": "Upload Key",
"type": "string",
})
def upload_key(self, request):
properties = self.properties
external = self.propsheets.get('external', {})
if not external:
try:
external = self.build_external_creds(self.registry, self.uuid, properties)
except ClientError:
log.error(os.environ)
log.error(self.properties)
return 'UPLOAD KEY FAILED'
return external['key']
@calculated_property(condition=show_upload_credentials, schema={
"type": "object",
})
def upload_credentials(self):
external = self.propsheets.get('external', None)
if external is not None:
return external['upload_credentials']
@calculated_property(condition=show_upload_credentials, schema={
"type": "object",
})
def extra_files_creds(self):
external = self.propsheets.get('external', None)
if external is not None:
extras = []
for extra in self.properties.get('extra_files', []):
eformat = extra.get('file_format')
xfile_format = self.registry['collections']['FileFormat'].get(eformat)
try:
xff_uuid = str(xfile_format.uuid)
except AttributeError:
print("Can't find required format uuid for %s" % eformat)
continue
extra_creds = self.propsheets.get('external' + xff_uuid)
extra['upload_credentials'] = extra_creds['upload_credentials']
extras.append(extra)
return extras
@classmethod
def get_bucket(cls, registry):
return registry.settings['file_upload_bucket']
@classmethod
def build_external_creds(cls, registry, uuid, properties):
bucket = cls.get_bucket(registry)
fformat = properties.get('file_format')
if fformat.startswith('/file-formats/'):
fformat = fformat[len('/file-formats/'):-1]
prop_format = registry['collections']['FileFormat'].get(fformat)
try:
file_extension = prop_format.properties['standard_file_extension']
except KeyError:
raise Exception('File format not in list of supported file types')
key = '{uuid}/{accession}.{file_extension}'.format(
file_extension=file_extension, uuid=uuid,
accession=properties.get('accession'))
# remove the path from the file name and only take first 32 chars
fname = properties.get('filename')
name = None
if fname:
name = fname.split('/')[-1][:32]
profile_name = registry.settings.get('file_upload_profile_name')
return external_creds(bucket, key, name, profile_name)
@classmethod
def create(cls, registry, uuid, properties, sheets=None):
if properties.get('status') in ('uploading', 'to be uploaded by workflow'):
sheets = {} if sheets is None else sheets.copy()
sheets['external'] = cls.build_external_creds(registry, uuid, properties)
return super(File, cls).create(registry, uuid, properties, sheets)
class Collection(Item.Collection):
pass
@collection(
name='files-fastq',
unique_key='accession',
properties={
'title': 'FASTQ Files',
'description': 'Listing of FASTQ Files',
})
class FileFastq(File):
"""Collection for individual fastq files."""
item_type = 'file_fastq'
schema = load_schema('encoded:schemas/file_fastq.json')
embedded_list = File.embedded_list + file_workflow_run_embeds + [
"quality_metric.overall_quality_status",
"quality_metric.Total Sequences",
"quality_metric.Sequence length",
"quality_metric.url"
]
name_key = 'accession'
rev = dict(File.rev, **{
'workflow_run_inputs': ('WorkflowRun', 'input_files.value'),
'workflow_run_outputs': ('WorkflowRun', 'output_files.value'),
})
@calculated_property(schema={
"title": "Input of Workflow Runs",
"description": "All workflow runs that this file serves as an input to",
"type": "array",
"items": {
"title": "Input of Workflow Run",
"type": ["string", "object"],
"linkTo": "WorkflowRun"
}
})
def workflow_run_inputs(self, request):
return self.rev_link_atids(request, "workflow_run_inputs")
@calculated_property(schema={
"title": "Output of Workflow Runs",
"description": "All workflow runs that this file serves as an output from",
"type": "array",
"items": {
"title": "Output of Workflow Run",
"type": "string",
"linkTo": "WorkflowRun"
}
})
def workflow_run_outputs(self, request):
return self.rev_link_atids(request, "workflow_run_outputs")
@collection(
name='files-processed',
unique_key='accession',
properties={
'title': 'Processed Files',
'description': 'Listing of Processed Files',
})
class FileProcessed(File):
"""Collection for individual processed files."""
item_type = 'file_processed'
schema = load_schema('encoded:schemas/file_processed.json')
embedded_list = File.embedded_list + file_workflow_run_embeds_processed + [
"quality_metric.Total reads",
"quality_metric.Trans reads",
"quality_metric.Cis reads (>20kb)",
"quality_metric.Short cis reads (<20kb)",
"quality_metric.url"
]
name_key = 'accession'
rev = dict(File.rev, **{
'workflow_run_inputs': ('WorkflowRun', 'input_files.value'),
'workflow_run_outputs': ('WorkflowRun', 'output_files.value'),
'experiments': ('Experiment', 'processed_files'),
'experiment_sets': ('ExperimentSet', 'processed_files'),
'other_experiments': ('Experiment', 'other_processed_files.files'),
'other_experiment_sets': ('ExperimentSet', 'other_processed_files.files')
})
@classmethod
def get_bucket(cls, registry):
return registry.settings['file_wfout_bucket']
@calculated_property(schema={
"title": "Input of Workflow Runs",
"description": "All workflow runs that this file serves as an input to",
"type": "array",
"items": {
"title": "Input of Workflow Run",
"type": ["string", "object"],
"linkTo": "WorkflowRun"
}
})
def workflow_run_inputs(self, request):
return self.rev_link_atids(request, "workflow_run_inputs")
@calculated_property(schema={
"title": "Output of Workflow Runs",
"description": "All workflow runs that this file serves as an output from",
"type": "array",
"items": {
"title": "Output of Workflow Run",
"type": "string",
"linkTo": "WorkflowRun"
}
})
def workflow_run_outputs(self, request):
return self.rev_link_atids(request, "workflow_run_outputs")
@calculated_property(schema={
"title": "Experiment Sets",
"description": "All Experiment Sets that this file belongs to",
"type": "array",
"items": {
"title": "Experiment Set",
"type": "string",
"linkTo": "ExperimentSet"
}
})
def experiment_sets(self, request):
return self.rev_link_atids(request, "experiment_sets") + self.rev_link_atids(request, "other_experiment_sets")
@calculated_property(schema={
"title": "Experiments",
"description": "Experiments that this file belongs to",
"type": "array",
"items": {
"title": "Experiment",
"type": "string",
"linkTo": "Experiment"
}
})
def experiments(self, request):
return self.rev_link_atids(request, "experiments") + self.rev_link_atids(request, "other_experiments")
# processed files don't want md5 as unique key
def unique_keys(self, properties):
keys = super(FileProcessed, self).unique_keys(properties)
if keys.get('alias'):
keys['alias'] = [k for k in keys['alias'] if not k.startswith('md5:')]
return keys
@collection(
name='files-reference',
unique_key='accession',
properties={
'title': 'Refenrence Files',
'description': 'Listing of Reference Files',
})
class FileReference(File):
"""Collection for individual reference files."""
item_type = 'file_reference'
schema = load_schema('encoded:schemas/file_reference.json')
embedded_list = File.embedded_list
name_key = 'accession'
@collection(
name='files-calibration',
unique_key='accession',
properties={
'title': 'Calibration Files',
'description': 'Listing of Calibration Files',
})
class FileCalibration(ItemWithAttachment, File):
"""Collection for individual calibration files."""
item_type = 'file_calibration'
schema = load_schema('encoded:schemas/file_calibration.json')
embedded_list = File.embedded_list
name_key = 'accession'
@collection(
name='files-microscopy',
unique_key='accession',
properties={
'title': 'Microscopy Files',
'description': 'Listing of Microscopy Files',
})
class FileMicroscopy(ItemWithAttachment, File):
"""Collection for individual microscopy files."""
item_type = 'file_microscopy'
schema = load_schema('encoded:schemas/file_microscopy.json')
embedded_list = File.embedded_list + [
"experiments.@type",
"experiments.imaging_paths.channel",
"experiments.imaging_paths.path",
"experiments.files.microscope_settings.ch00_light_source_center_wl",
"experiments.files.microscope_settings.ch01_light_source_center_wl",
"experiments.files.microscope_settings.ch02_light_source_center_wl",
"experiments.files.microscope_settings.ch03_light_source_center_wl",
"experiments.files.microscope_settings.ch00_lasers_diodes",
"experiments.files.microscope_settings.ch01_lasers_diodes",
"experiments.files.microscope_settings.ch02_lasers_diodes",
"experiments.files.microscope_settings.ch03_lasers_diodes"
]
name_key = 'accession'
@view_config(name='upload', context=File, request_method='GET',
permission='edit')
def get_upload(context, request):
external = context.propsheets.get('external', {})
upload_credentials = external.get('upload_credentials')
# Show s3 location info for files originally submitted to EDW.
if upload_credentials is None and external.get('service') == 's3':
upload_credentials = {
'upload_url': 's3://{bucket}/{key}'.format(**external),
}
return {
'@graph': [{
'@id': request.resource_path(context),
'upload_credentials': upload_credentials,
'extra_files_creds': context.extra_files_creds(),
}],
}
@view_config(name='upload', context=File, request_method='POST',
permission='edit', validators=[schema_validator({"type": "object"})])
def post_upload(context, request):
properties = context.upgrade_properties()
if properties['status'] not in ('uploading', 'to be uploaded by workflow', 'upload failed'):
raise HTTPForbidden('status must be "uploading" to issue new credentials')
accession_or_external = properties.get('accession')
external = context.propsheets.get('external', None)
if external is None:
# Handle objects initially posted as another state.
bucket = request.registry.settings['file_upload_bucket']
# maybe this should be properties.uuid
uuid = context.uuid
file_format = get_item_if_you_can(request, properties.get('file_format'), 'file-formats')
try:
file_extension = '.' + file_format.get('standard_file_extension')
except AttributeError:
file_extension = ''
key = '{uuid}/{accession}.{file_extension}'.format(
file_extension=file_extension, uuid=uuid, **properties)
elif external.get('service') == 's3':
bucket = external['bucket']
key = external['key']
else:
raise ValueError(external.get('service'))
# remove the path from the file name and only take first 32 chars
name = None
if properties.get('filename'):
name = properties.get('filename').split('/')[-1][:32]
profile_name = request.registry.settings.get('file_upload_profile_name')
creds = external_creds(bucket, key, name, profile_name)
# in case we haven't uploaded a file before
context.propsheets['external'] = creds
new_properties = properties.copy()
if properties['status'] == 'upload failed':
new_properties['status'] = 'uploading'
registry = request.registry
registry.notify(BeforeModified(context, request))
context.update(new_properties, {'external': creds})
registry.notify(AfterModified(context, request))
rendered = request.embed('/%s/@@object' % context.uuid, as_user=True)
result = {
'status': 'success',
'@type': ['result'],
'@graph': [rendered],
}
return result
def is_file_to_download(properties, file_format, expected_filename=None):
try:
file_extension = '.' + file_format.get('standard_file_extension')
except AttributeError:
file_extension = ''
accession_or_external = properties.get('accession') or properties.get('external_accession')
if not accession_or_external:
return False
filename = '{accession}{file_extension}'.format(
accession=accession_or_external, file_extension=file_extension)
if expected_filename is None:
return filename
elif expected_filename != filename:
return False
else:
return filename
@view_config(name='download', context=File, request_method='GET',
permission='view', subpath_segments=[0, 1])
def download(context, request):
try:
user_props = session_properties(request)
except Exception as e:
user_props = {'error': str(e)}
tracking_values = {'user_agent': request.user_agent, 'remote_ip': request.remote_addr,
'user_email': user_props.get('details', {}).get('email', 'anonymous'),
'request_path': request.path_info}
# proxy triggers if we should use Axel-redirect, useful for s3 range byte queries
try:
use_download_proxy = request.client_addr not in request.registry['aws_ipset']
except TypeError:
# this fails in testing due to testapp not having ip
use_download_proxy = False
# with extra_files the user may be trying to download the main file
# or one of the files in extra files, the following logic will
# search to find the "right" file and redirect to a download link for that one
properties = context.upgrade_properties()
file_format = get_item_if_you_can(request, properties.get('file_format'), 'file-formats')
_filename = None
if request.subpath:
_filename, = request.subpath
filename = is_file_to_download(properties, file_format, _filename)
if not filename:
found = False
for extra in properties.get('extra_files', []):
eformat = get_item_if_you_can(request, extra.get('file_format'), 'file-formats')
filename = is_file_to_download(extra, eformat, _filename)
if filename:
found = True
properties = extra
external = context.propsheets.get('external' + eformat.get('uuid'))
if eformat is not None:
tracking_values['file_format'] = eformat.get('file_format')
break
if not found:
raise HTTPNotFound(_filename)
else:
external = context.propsheets.get('external', {})
if file_format is not None:
tracking_values['file_format'] = file_format.get('file_format')
tracking_values['filename'] = filename
if not external:
external = context.build_external_creds(request.registry, context.uuid, properties)
if external.get('service') == 's3':
conn = boto3.client('s3')
param_get_object = {
'Bucket': external['bucket'],
'Key': external['key'],
'ResponseContentDisposition': "attachment; filename=" + filename
}
if 'Range' in request.headers:
tracking_values['range_query'] = True
param_get_object.update({'Range': request.headers.get('Range')})
else:
tracking_values['range_query'] = False
location = conn.generate_presigned_url(
ClientMethod='get_object',
Params=param_get_object,
ExpiresIn=36*60*60
)
else:
raise ValueError(external.get('service'))
# get the experiment type associated with this file
experiments_using_file = context.experiments(request)
found_experiment_type = None
for file_experiment in experiments_using_file:
exp_info = get_item_if_you_can(request, file_experiment)
if exp_info is None:
break
exp_type = exp_info.get('experiment_type')
if found_experiment_type is None or found_experiment_type == exp_type:
found_experiment_type = exp_type
else: # multiple experiment types
found_experiment_type = 'Integrative analysis'
break
tracking_values['experiment_type'] = found_experiment_type
tracking_values['is_visualization'] = False
# create a tracking_item to track this download
tracking_item = {'date_created': datetime.datetime.now(datetime.timezone.utc),
'status': 'in review by lab', 'tracking_type': 'download_tracking',
'download_tracking': tracking_values}
TrackingItem.create_and_commit(request, tracking_item)
if asbool(request.params.get('soft')):
expires = int(parse_qs(urlparse(location).query)['Expires'][0])
return {
'@type': ['SoftRedirect'],
'location': location,
'expires': datetime.datetime.fromtimestamp(expires, pytz.utc).isoformat(),
}
if 'Range' in request.headers:
try:
response_body = conn.get_object(**param_get_object)
except Exception as e:
raise e
response_dict = {
'body': response_body.get('Body').read(),
# status_code : 206 if partial, 200 if the ragne covers whole file
'status_code': response_body.get('ResponseMetadata').get('HTTPStatusCode'),
'accept_ranges': response_body.get('AcceptRanges'),
'content_length': response_body.get('ContentLength'),
'content_range': response_body.get('ContentRange')
}
return Response(**response_dict)
# We don't use X-Accel-Redirect here so that client behaviour is similar for
# both aws and non-aws users.
if use_download_proxy:
location = request.registry.settings.get('download_proxy', '') + str(location)
# 307 redirect specifies to keep original method
raise HTTPTemporaryRedirect(location=location)
def validate_file_format_validity_for_file_type(context, request):
"""Check if the specified file format (e.g. fastq) is allowed for the file type (e.g. FileFastq).
"""
data = request.json
if 'file_format' in data:
file_format_item = get_item_if_you_can(request, data['file_format'], 'file-formats')
if not file_format_item:
# item level validation will take care of generating the error
return
file_format_name = file_format_item['file_format']
allowed_types = file_format_item.get('valid_item_types', [])
file_type = context.type_info.name
if file_type not in allowed_types:
msg = 'File format {} is not allowed for {}'.format(file_format_name, file_type)
request.errors.add('body', None, msg)
else:
request.validated.update({})
def validate_file_filename(context, request):
''' validator for filename field '''
found_match = False
data = request.json
if 'filename' not in data:
# see if there is an existing file_name
filename = context.properties.get('filename')
if not filename:
return
else:
filename = data['filename']
ff = data.get('file_format')
if not ff:
ff = context.properties.get('file_format')
file_format_item = get_item_if_you_can(request, ff, 'file-formats')
if not file_format_item:
msg = 'Problem getting file_format for %s' % filename
request.errors.add('body', None, msg)
return
msg = None
try:
file_extensions = [file_format_item.get('standard_file_extension')]
if file_format_item.get('other_allowed_extensions'):
file_extensions.extend(file_format_item.get('other_allowed_extensions'))
file_extensions = list(set(file_extensions))
except (AttributeError, TypeError):
msg = 'Problem getting file_format for %s' % filename
else:
if file_format_item.get('file_format') == 'other':
found_match = True
elif not file_extensions: # this shouldn't happen
pass
for extension in file_extensions:
if filename[-(len(extension) + 1):] == '.' + extension:
found_match = True
break
if found_match:
request.validated.update({})
else:
if not msg:
msg = ["'." + ext + "'" for ext in file_extensions]
msg = ', '.join(msg)
msg = 'Filename %s extension does not agree with specified file format. Valid extension(s): %s' % (filename, msg)
request.errors.add('body', None, msg)
def validate_processed_file_unique_md5_with_bypass(context, request):
'''validator to check md5 on processed files, unless you tell it
not to'''
# skip validator if not file processed
if context.type_info.item_type != 'file_processed':
return
data = request.json
if 'md5sum' not in data or not data['md5sum']:
return
if 'force_md5' in request.query_string:
return
# we can of course patch / put to ourselves the same md5 we previously had
if context.properties.get('md5sum') == data['md5sum']:
return
if ELASTIC_SEARCH in request.registry:
search = make_search_subreq(request, '/search/?type=File&md5sum=%s' % data['md5sum'])
search_resp = request.invoke_subrequest(search, True)
if search_resp.status_int < 400:
# already got this md5
found = search_resp.json['@graph'][0]['accession']
request.errors.add('body', None, 'md5sum %s already exists for accession %s' %
(data['md5sum'], found))
else: # find it in the database
conn = request.registry['connection']
res = conn.get_by_json('md5sum', data['md5sum'], 'file_processed')
if res is not None:
# md5 already exists
found = res.properties['accession']
request.errors.add('body', None, 'md5sum %s already exists for accession %s' %
(data['md5sum'], found))
def validate_processed_file_produced_from_field(context, request):
'''validator to make sure that the values in the
produced_from field are valid file identifiers'''
# skip validator if not file processed
if context.type_info.item_type != 'file_processed':
return
data = request.json
if 'produced_from' not in data:
return
files_ok = True
files2chk = data['produced_from']
for i, f in enumerate(files2chk):
try:
fid = get_item_if_you_can(request, f, 'files').get('uuid')
except AttributeError:
files_ok = False
request.errors.add('body', ['produced_from', i], "'%s' not found" % f)
# bad_files.append(f)
else:
if not fid:
files_ok = False
request.errors.add('body', ['produced_from', i], "'%s' not found" % f)
if files_ok:
request.validated.update({})
def validate_extra_file_format(context, request):
'''validator to check to be sure that file_format of extrafile is not the
same as the file and is a known format for the schema
'''
files_ok = True
data = request.json
if 'extra_files' not in data:
return
extras = data['extra_files']
# post should always have file_format as it is required patch may or may not
ff = data.get('file_format')
if not ff:
ff = context.properties.get('file_format')
file_format_item = get_item_if_you_can(request, ff, 'file-formats')
if not file_format_item or 'standard_file_extension' not in file_format_item:
request.errors.add('body', None, "Can't find parent file format for extra_files")
return
parent_format = file_format_item['uuid']
schema_eformats = file_format_item.get('extrafile_formats')
if not schema_eformats: # means this parent file shouldn't have any extra files
request.errors.add('body', None, "File with format %s should not have extra_files" % file_format_item.get('file_format'))
return
else:
valid_ext_formats = []
for ok_format in schema_eformats:
ok_format_item = get_item_if_you_can(request, ok_format, 'file-formats')
try:
off_uuid = ok_format_item.get('uuid')
except AttributeError:
raise "FileFormat Item %s contains unknown FileFormats in the extrafile_formats property" % file_format_item.get('uuid')
valid_ext_formats.append(off_uuid)
seen_ext_formats = []
# formats = request.registry['collections']['FileFormat']
for i, ef in enumerate(extras):
eformat = ef.get('file_format')
if eformat is None:
return # will fail the required extra_file.file_format
eformat_item = get_item_if_you_can(request, eformat, 'file-formats')
try:
ef_uuid = eformat_item.get('uuid')
except AttributeError:
request.errors.add('body', ['extra_files', i], "'%s' not a valid or known file format" % eformat)
files_ok = False
break
if ef_uuid in seen_ext_formats:
request.errors.add('body', ['extra_files', i], "Multple extra files with '%s' format cannot be submitted at the same time" % eformat)
files_ok = False
break
else:
seen_ext_formats.append(ef_uuid)
if ef_uuid == parent_format:
request.errors.add('body', ['extra_files', i], "'%s' format cannot be the same for file and extra_file" % file_format_item.get('file_format'))
files_ok = False
break
if ef_uuid not in valid_ext_formats:
request.errors.add('body', ['extra_files', i], "'%s' not a valid extrafile_format for '%s'" % (eformat, file_format_item.get('file_format')))
files_ok = False
if files_ok:
request.validated.update({})
@view_config(context=File.Collection, permission='add', request_method='POST',
validators=[validate_item_content_post,
validate_file_filename,
validate_extra_file_format,
validate_file_format_validity_for_file_type,
validate_processed_file_unique_md5_with_bypass,
validate_processed_file_produced_from_field])
def file_add(context, request, render=None):
return collection_add(context, request, render)
@view_config(context=File, permission='edit', request_method='PUT',
validators=[validate_item_content_put,
validate_file_filename,
validate_extra_file_format,
validate_file_format_validity_for_file_type,
validate_processed_file_unique_md5_with_bypass,
validate_processed_file_produced_from_field])
@view_config(context=File, permission='edit', request_method='PATCH',
validators=[validate_item_content_patch,
validate_file_filename,
validate_extra_file_format,
validate_file_format_validity_for_file_type,
validate_processed_file_unique_md5_with_bypass,
validate_processed_file_produced_from_field])
def file_edit(context, request, render=None):
return item_edit(context, request, render)
|
from PyQt5 import QtWidgets
from .main_window_ui import Ui_MainWindow
from scrappy.parse.nltk_scraps import ScrapExtracter
from scrappy.document import Document
class MainWindow(QtWidgets.QMainWindow):
"""
The main editor window.
"""
def __init__(self):
super(MainWindow, self).__init__()
self._document = Document()
self._scrapExtracter = ScrapExtracter()
# set up UI using generated code from designer file
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# handle save button click
self.ui.actionSave.triggered.connect(self._update_document)
def _update_document(self):
"""
Add text in the editor to the document object.
"""
self._document.parse_tree = self._scrapExtracter.extract_scraps(self.ui.textEdit.toPlainText())
make UI private to window
from PyQt5 import QtWidgets
from .main_window_ui import Ui_MainWindow
from scrappy.parse.nltk_scraps import ScrapExtracter
from scrappy.document import Document
class MainWindow(QtWidgets.QMainWindow):
"""
The main editor window.
"""
def __init__(self):
super(MainWindow, self).__init__()
self._document = Document()
self._scrapExtracter = ScrapExtracter()
# set up UI using generated code from designer file
self._ui = Ui_MainWindow()
self._ui.setupUi(self)
# handle save button click
self._ui.actionSave.triggered.connect(self._update_document)
def _update_document(self):
"""
Add text in the editor to the document object.
"""
self._document.parse_tree = self._scrapExtracter.extract_scraps(self._ui.textEdit.toPlainText()) |
"""
This module contains the splittable tab widget API
"""
import inspect
import logging
import mimetypes
import os
import sys
import uuid
import weakref
from pyqode.qt import QtCore, QtWidgets, QtGui
from pyqode.core.api import utils, CodeEdit
from pyqode.core.dialogs import DlgUnsavedFiles
from pyqode.core._forms import popup_open_files_ui
from .tab_bar import TabBar
from .code_edits import GenericCodeEdit, TextCodeEdit
def _logger():
return logging.getLogger(__name__)
class DraggableTabBar(TabBar):
"""
A draggable tab bar that allow to drag & drop tabs.
Implementation is based on this qt article:
http://www.qtcentre.org/wiki/index.php?title=Movable_Tabs
"""
#: Signal emitted when a tab must be moved to the specified
#: index (the tab might come from another tab bar (split)).
tab_move_request = QtCore.Signal(QtWidgets.QWidget, int)
def __init__(self, parent):
super(DraggableTabBar, self).__init__(parent)
self._pos = QtCore.QPoint()
self.setAcceptDrops(True)
self.setMouseTracking(True)
self.setElideMode(QtCore.Qt.ElideNone)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self._pos = event.pos() # _pos is a QPoint defined in the header
super(DraggableTabBar, self).mousePressEvent(event)
def widget_under_mouse(self, event):
index = self.tabAt(event.pos())
tab = self.parent().widget(index)
return tab
def mouseMoveEvent(self, event):
# update tooltip with the tooltip of the tab under mouse cursor.
tab = self.widget_under_mouse(event)
if tab is not None:
tooltip = tab.toolTip()
if not tooltip:
try:
tooltip = tab.file.path
except AttributeError:
pass
self.setToolTip(tooltip)
# If the distance is too small then return
if (event.pos() - self._pos).manhattanLength() < \
QtWidgets.QApplication.startDragDistance():
return
# If the left button isn't pressed anymore then return
if not event.buttons() & QtCore.Qt.LeftButton:
return
drag = QtGui.QDrag(self)
data = QtCore.QMimeData()
data.tab = tab
data.widget = self
# a crude way to distinguish tab-reodering drags from other drags
data.setData("action", b"tab-reordering")
drag.setMimeData(data)
drag.setPixmap(self.tabIcon(self.tabAt(event.pos())).pixmap(32, 32))
drag.exec_()
def dragEnterEvent(self, event):
# Only accept if it's an tab-reordering request
m = event.mimeData()
formats = m.formats()
if "action" in formats and m.data("action") == "tab-reordering":
event.acceptProposedAction()
def dropEvent(self, event):
# drop a tab in a split (may be the same split or another one).
m = event.mimeData()
index = self.tabAt(event.pos())
# Tell interested objects that a tab should be moved.
if m.tab != self.parent().widget(index):
self.tab_move_request.emit(m.tab, index)
event.acceptProposedAction()
class BaseTabWidget(QtWidgets.QTabWidget):
"""
Base tab widget class used by SplittableTabWidget. This tab widget adds a
context menu to the tab bar that allow the user to:
- split the current tab (horizontally or vertically)
- close the current tab
- close all tabs
- close all other tabs
"""
#: Signal emitted when the last tab has been closed
last_tab_closed = QtCore.Signal()
#: Signal emitted when a tab has been closed
tab_closed = QtCore.Signal(QtWidgets.QWidget)
#: Signal emitted when the user clicked on split vertical or split
#: horizontal
#: **Parameters**:
#: - widget: the widget to split
#: - orientation: split orientation (horizontal/vertical)
split_requested = QtCore.Signal(QtWidgets.QWidget, int)
#: Signal emitted when a tab got detached from the TabWidget
#: **Parameters**:
#: - old_tab: the old tab instance (before it get closed)
#: - new_tab: the new tab instance (the one that is detached)
tab_detached = QtCore.Signal(QtWidgets.QWidget, QtWidgets.QWidget)
_detached_window_class = None
def __init__(self, parent):
super(BaseTabWidget, self).__init__(parent)
self._current = None
self.currentChanged.connect(self._on_current_changed)
self.tabCloseRequested.connect(self._on_tab_close_requested)
tab_bar = DraggableTabBar(self)
tab_bar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
tab_bar.customContextMenuRequested.connect(self._show_tab_context_menu)
tab_bar.tab_move_request.connect(self._on_tab_move_request)
self.setTabBar(tab_bar)
self.setAcceptDrops(True)
self.setUsesScrollButtons(True)
#: A list of additional context menu actions
self.context_actions = []
self.detached_tabs = []
def tab_under_menu(self):
"""
Returns the tab that sits under the context menu.
:return: QWidget
"""
return self.tabBar().tabAt(self._menu_pos)
@QtCore.Slot()
def close(self):
"""
Closes the active editor
"""
self.tabCloseRequested.emit(self.tab_under_menu())
@QtCore.Slot()
def close_others(self):
"""
Closes every editors tabs except the current one.
"""
current_widget = self.widget(self.tab_under_menu())
self._try_close_dirty_tabs(exept=current_widget)
i = 0
while self.count() > 1:
widget = self.widget(i)
if widget != current_widget:
self.remove_tab(i)
else:
i = 1
@QtCore.Slot()
def close_all(self):
"""
Closes all editors
"""
if self._try_close_dirty_tabs():
while self.count():
widget = self.widget(0)
self.remove_tab(0)
self.tab_closed.emit(widget)
return True
return False
@QtCore.Slot()
def detach_tab(self):
tab_index = self.tab_under_menu()
tab = self.widget(tab_index)
try:
open_parameters = tab.open_parameters
except AttributeError:
open_parameters = {
'encoding': None,
'replace_tabs_by_spaces': True,
'clean_trailing_whitespaces': True,
'safe_save': True,
'restore_cursor_position': True,
'preferred_eol': 0,
'autodetect_eol': True,
'show_whitespaces': False,
'kwargs': {}
}
path = tab.file.path
self.tabCloseRequested.emit(tab_index)
# create a new top level widget and add the tab
new_tab_widget = self.parent().__class__()
# reopen document with same open settings.
new_tab = new_tab_widget.open_document(
path, encoding=open_parameters['encoding'],
replace_tabs_by_spaces=open_parameters['replace_tabs_by_spaces'],
clean_trailing_whitespaces=open_parameters[
'clean_trailing_whitespaces'],
safe_save=open_parameters['safe_save'],
restore_cursor_position=open_parameters['restore_cursor_position'],
preferred_eol=open_parameters['preferred_eol'],
autodetect_eol=open_parameters['autodetect_eol'],
show_whitespaces=open_parameters['show_whitespaces'],
**open_parameters['kwargs'])
if self._detached_window_class is None:
win = new_tab_widget
else:
win = self._detached_window_class()
#: detached window must be an instance of QMainWindow
win.setCentralWidget(new_tab_widget)
self.detached_tabs.append(win)
win.resize(800, 600)
win.show()
self.tab_detached.emit(tab, new_tab)
# if the user has two monitor, move the window to the second monitor
desktop = QtWidgets.qApp.desktop()
if desktop.screenCount() > 1:
primary_screen = desktop.screenNumber(self)
other_screen = {0: 1, 1: 0}[primary_screen]
l = desktop.screenGeometry(other_screen).left()
new_tab_widget.move(l, 0)
new_tab_widget.showMaximized()
new_tab_widget.last_tab_closed.connect(self._remove_detached_tab)
def _remove_detached_tab(self):
self.detached_tabs.remove(self.sender())
self.sender().close()
def save_widget(self, editor):
"""
Saves the widget. The base implementation does nothing.
The function must return a bool that tells whether the save succeeded
or not.
:param editor: editor widget to save.
"""
return True
def _create_tab_bar_menu(self):
context_mnu = QtWidgets.QMenu()
for name, slot, icon in [
('Close', self.close, 'window-close'),
('Close others', self.close_others, 'tab-close-other'),
('Close all', self.close_all, 'project-development-close-all'),
(None, None, None),
('Detach tab', self.detach_tab, 'tab-detach')]:
if name is None and slot is None:
qaction = QtWidgets.QAction(self)
qaction.setSeparator(True)
else:
qaction = QtWidgets.QAction(name, self)
qaction.triggered.connect(slot)
if icon:
qaction.setIcon(QtGui.QIcon.fromTheme(icon))
context_mnu.addAction(qaction)
self.addAction(qaction)
context_mnu.addSeparator()
menu = QtWidgets.QMenu('Split', context_mnu)
menu.setIcon(QtGui.QIcon.fromTheme('split'))
a = menu.addAction('Split horizontally')
a.triggered.connect(self._on_split_requested)
a.setIcon(QtGui.QIcon.fromTheme('view-split-left-right'))
a = menu.addAction('Split vertically')
a.setIcon(QtGui.QIcon.fromTheme('view-split-top-bottom'))
a.triggered.connect(self._on_split_requested)
context_mnu.addMenu(menu)
context_mnu.addSeparator()
if self.context_actions:
context_mnu.addSeparator()
for action in self.context_actions:
context_mnu.addAction(action)
self._context_mnu = context_mnu
return context_mnu
def _show_tab_context_menu(self, position):
if self.count():
self._menu_pos = position
SplittableTabWidget.tab_under_menu = self.widget(
self.tab_under_menu())
self._create_tab_bar_menu().popup(self.tabBar().mapToGlobal(
position))
def _collect_dirty_tabs(self, skip=None):
"""
Collects the list of dirty tabs
:param skip: Tab to skip (used for close_others).
"""
widgets = []
filenames = []
for i in range(self.count()):
widget = self.widget(i)
try:
if widget.dirty and widget != skip:
widgets.append(widget)
filenames.append(widget.file.path)
except AttributeError:
pass
return widgets, filenames
def _try_close_dirty_tabs(self, exept=None):
"""
Tries to close dirty tabs. Uses DlgUnsavedFiles to ask the user
what he wants to do.
"""
widgets, filenames = self._collect_dirty_tabs(skip=exept)
if not len(filenames):
return True
dlg = DlgUnsavedFiles(self, files=filenames)
if dlg.exec_() == dlg.Accepted:
if not dlg.discarded:
for item in dlg.listWidget.selectedItems():
filename = item.text()
widget = None
for widget in widgets:
if widget.path == filename:
break
if widget != exept:
self.save_widget(widget)
self.remove_tab(self.indexOf(widget))
return True
return False
def _get_widget_path(self, widget):
try:
return widget.path
except AttributeError:
return ''
def _on_tab_close_requested(self, index):
widget = self.widget(index)
dirty = False
try:
if widget.original is None:
dirty = widget.dirty
except AttributeError:
pass
if not dirty:
self.remove_tab(index)
else:
# unsaved widget
path = self._get_widget_path(widget)
if not path:
path = self.tabText(self.indexOf(widget))
dlg = DlgUnsavedFiles(
self, files=[path])
if dlg.exec_() == dlg.Accepted:
rm = True
if not dlg.discarded:
try:
rm = self.save_widget(widget)
except OSError:
pass
if rm:
self.remove_tab(index)
cnt = sys.getrefcount(widget)
if cnt > 2:
try:
import objgraph
except ImportError:
_logger().warning(
'potential memory leak detected on widget: %r\n'
'Install the objgraph package to know what objects are '
'holding references the editor widget...' % widget)
else:
_logger().warning('potential memory detected on widget: %r\n'
'see stderr for a backrefs dot graph...' %
widget)
objgraph.show_backrefs([widget], output=sys.stderr)
@staticmethod
def _close_widget(widget):
"""
Closes the given widgets and handles cases where the widget has been
clone or is a clone of another widget
"""
if widget is None:
return
try:
widget.document().setParent(None)
widget.syntax_highlighter.setParent(None)
except AttributeError:
pass # not a QPlainTextEdit subclass
# handled cloned widgets
clones = []
if hasattr(widget, 'original') and widget.original:
# cloned widget needs to be removed from the original
widget.original.clones.remove(widget)
try:
widget.setDocument(None)
except AttributeError:
# not a QTextEdit/QPlainTextEdit
pass
elif hasattr(widget, 'clones'):
clones = widget.clones
try:
# only clear current editor if it does not have any other clones
widget.close(clear=len(clones) == 0)
except (AttributeError, TypeError):
# not a CodeEdit
widget.close()
return clones
def _restore_original(self, clones):
try:
first = clones[0]
except (IndexError, TypeError):
# empty or None
pass
else:
first.clones = clones[1:]
first.original = None
for c in first.clones:
c.original = first
def remove_tab(self, index):
"""
Overrides removeTab to emit tab_closed and last_tab_closed signals.
:param index: index of the tab to remove.
"""
widget = self.widget(index)
try:
document = widget.document()
except AttributeError:
document = None # not a QPlainTextEdit
clones = self._close_widget(widget)
self.tab_closed.emit(widget)
self.removeTab(index)
self._restore_original(clones)
widget._original_tab_widget._tabs.remove(widget)
if self.count() == 0:
self.last_tab_closed.emit()
if SplittableTabWidget.tab_under_menu == widget:
SplittableTabWidget.tab_under_menu = None
if not clones:
widget.setParent(None)
widget.deleteLater()
del widget
else:
try:
clones[0].syntax_highlighter.setDocument(document)
except AttributeError:
pass # not a QPlainTextEdit
def _on_split_requested(self):
"""
Emits the split requested signal with the desired orientation.
"""
orientation = self.sender().text()
widget = self.widget(self.tab_under_menu())
if 'horizontally' in orientation:
self.split_requested.emit(
widget, QtCore.Qt.Horizontal)
else:
self.split_requested.emit(
widget, QtCore.Qt.Vertical)
def _on_current_changed(self, index):
tab = self.widget(index)
if tab:
tab.setFocus()
def _on_tab_move_request(self, widget, new_index):
parent = widget.parent_tab_widget
index = parent.indexOf(widget)
text = parent.tabText(index)
icon = parent.tabIcon(index)
parent.removeTab(index)
widget.parent_tab_widget = self
self.insertTab(new_index, widget, icon, text)
self.setCurrentIndex(new_index)
widget.setFocus()
if parent.count() == 0:
parent.last_tab_closed.emit()
def dragEnterEvent(self, event):
# Only accept if it's an tab-reordering request
m = event.mimeData()
formats = m.formats()
if "action" in formats and m.data("action") == "tab-reordering":
event.acceptProposedAction()
def dropEvent(self, event):
m = event.mimeData()
index = self.tabBar().tabAt(event.pos())
# Tell interested objects that a tab should be moved.
if m.tab != self.widget(index):
self._on_tab_move_request(m.tab, index)
event.acceptProposedAction()
def addTab(self, tab, *args):
"""
Adds a tab to the tab widget, this function set the parent_tab_widget
attribute on the tab instance.
"""
tab.parent_tab_widget = self
super(BaseTabWidget, self).addTab(tab, *args)
class OpenFilesPopup(QtWidgets.QDialog):
triggered = QtCore.Signal(str)
def __init__(self, *args):
super(OpenFilesPopup, self).__init__(*args)
self.ui = popup_open_files_ui.Ui_Dialog()
self.ui.setupUi(self)
self.ui.tableWidget.itemActivated.connect(self._on_item_activated)
self.ui.tableWidget.itemDoubleClicked.connect(self._on_item_activated)
settings = QtCore.QSettings('pyQode', 'pyqode.core')
self.sort_enabled = bool(settings.value(
'sortOpenFilesAlphabetically', False))
self.ui.checkBox.setChecked(self.sort_enabled)
self.ui.checkBox.stateChanged.connect(self._on_sort_changed)
def set_filenames(self, filenames):
def clean(filenames):
ret_val = []
new_count = 0
for filename in filenames:
if not filename:
filename = 'New document %d.txt' % (new_count + 1)
new_count += 1
ret_val.append(filename)
return ret_val
self._filenames = filenames
filenames = clean(filenames)
if self.sort_enabled:
filenames = sorted(filenames, key=lambda x:
QtCore.QFileInfo(x).fileName().lower())
self.ui.tableWidget.clearContents()
icon_provider = SplittableCodeEditTabWidget.icon_provider_klass()
self.ui.tableWidget.setRowCount(len(filenames))
self.ui.tableWidget.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents)
for row, path in enumerate(filenames):
finfo = QtCore.QFileInfo(path)
filename = finfo.fileName()
if finfo.exists():
icon = icon_provider.icon(finfo)
else:
icon = icon_provider.icon(icon_provider.File)
# file name
item = QtWidgets.QTableWidgetItem()
item.setText(filename)
item.setIcon(icon)
item.setToolTip(path)
item.setData(QtCore.Qt.UserRole, bytes(path, 'utf-8'))
self.ui.tableWidget.setItem(row, 0, item)
# path
item = QtWidgets.QTableWidgetItem()
item.setText(path)
item.setToolTip(path)
item.setData(QtCore.Qt.UserRole, bytes(path, 'utf-8'))
self.ui.tableWidget.setItem(row, 1, item)
def _on_sort_changed(self, *_):
self.sort_enabled = self.ui.checkBox.isChecked()
settings = QtCore.QSettings('pyQode', 'pyqode.core')
settings.setValue(
'sortOpenFilesAlphabetically', self.sort_enabled)
self.set_filenames(self._filenames)
def _on_item_activated(self, item):
self.hide()
self.triggered.emit(item.data(QtCore.Qt.UserRole).decode('utf-8'))
def show(self):
super(OpenFilesPopup, self).show()
self.ui.tableWidget.setFocus()
self.ui.tableWidget.selectRow(0)
class SplittableTabWidget(QtWidgets.QSplitter):
"""
A splittable tab widget. The widget is implemented as a splitter which
contains a main tab widget and a collection of child SplittableTabWidget.
Widgets added to the the tab widget **must** have a ``split`` method which
returns a clone of the widget instance.
You can add new tabs to the main tab widget by using the ``add_tab``
method. Tabs are always closable.
To change the underlying tab widget class, just set the
``tab_widget_klass`` class attribute.
The splittable tab widget works with any kind of widget. There is a
specialisation made specifically for managing a collection code editor
widgets: SplittableCodeEditTabWidget.
The implementation uses duck typing and will automatically show a dialog
when closing an editor which has a ``dirty`` property. To actually save the
widget, you must reimplement :meth:`SplittableTabWidget.save_widget``.
"""
#: Signal emitted when the last tab has been closed.
last_tab_closed = QtCore.Signal(QtWidgets.QSplitter)
#: Signal emitted when the active tab changed (takes child tab widgets
#: into account). Parameter is the new tab widget.
current_changed = QtCore.Signal(QtWidgets.QWidget)
#: Signal emitted when a tab got detached from the TabWidget
#: **Parameters**:
#: - old_tab: the old tab instance (before it get closed)
#: - new_tab: the new tab instance (the one that is detached)
tab_detached = QtCore.Signal(QtWidgets.QWidget, QtWidgets.QWidget)
#: The window to use when a type is detached. If None, the detached tab
#: widget will be shown directly.
detached_window_klass = None
#: underlying tab widget class
tab_widget_klass = BaseTabWidget
#: Reference to the widget under the tab bar menu
tab_under_menu = None
@property
def popup_shortcut(self):
"""
Gets/sets the open files popup shortcut (ctrl+t by default).
"""
if hasattr(self, '_action_popup'):
return self._shortcut
return None
@popup_shortcut.setter
def popup_shortcut(self, value):
if hasattr(self, '_action_popup'):
self._shortcut = value
self._action_popup.setShortcut(self._shortcut)
def __init__(self, parent=None, root=True, create_popup=True):
super(SplittableTabWidget, self).__init__(parent)
SplittableTabWidget.tab_widget_klass._detached_window_class = \
SplittableTabWidget.detached_window_klass
if root:
self._action_popup = QtWidgets.QAction(self)
self._action_popup.setShortcutContext(QtCore.Qt.WindowShortcut)
self._shortcut = 'Ctrl+T'
self._action_popup.setShortcut(self._shortcut)
self._action_popup.triggered.connect(self._show_popup)
self.addAction(self._action_popup)
self.popup = OpenFilesPopup()
self.popup.setWindowFlags(
QtCore.Qt.Popup | QtCore.Qt.FramelessWindowHint)
self.popup.triggered.connect(self._on_popup_triggered)
self.child_splitters = []
self.main_tab_widget = self.tab_widget_klass(self)
self.main_tab_widget.last_tab_closed.connect(
self._on_last_tab_closed)
self.main_tab_widget.tab_detached.connect(self.tab_detached.emit)
self.main_tab_widget.split_requested.connect(self.split)
self.addWidget(self.main_tab_widget)
self._parent_splitter = None
self._current = None
self.root = root
if root:
QtWidgets.QApplication.instance().focusChanged.connect(
self._on_focus_changed)
self._uuid = uuid.uuid1()
self._tabs = []
def add_context_action(self, action):
"""
Adds a custom context menu action
:param action: action to add.
"""
self.main_tab_widget.context_actions.append(action)
for child_splitter in self.child_splitters:
child_splitter.add_context_action(action)
def add_tab(self, tab, title='', icon=None):
"""
Adds a tab to main tab widget.
:param tab: Widget to add as a new tab of the main tab widget.
:param title: Tab title
:param icon: Tab icon
"""
if icon:
tab._icon = icon
if not hasattr(tab, 'clones'):
tab.clones = []
if not hasattr(tab, 'original'):
tab.original = None
if icon:
self.main_tab_widget.addTab(tab, icon, title)
else:
self.main_tab_widget.addTab(tab, title)
self.main_tab_widget.setCurrentIndex(
self.main_tab_widget.indexOf(tab))
self.main_tab_widget.show()
tab._uuid = self._uuid
tab.horizontalScrollBar().setValue(0)
tab.setFocus()
tab._original_tab_widget = self
self._tabs.append(tab)
self._on_focus_changed(None, tab)
def _on_popup_triggered(self, path):
new_count = 0
for w in self.widgets():
if w.file.path == path:
index = w.parent_tab_widget.indexOf(w)
w.parent_tab_widget.setCurrentIndex(index)
break
elif w.file.path == '':
# New document
fpath = 'New document %d.txt' % (new_count + 1)
if fpath == path:
index = w.parent_tab_widget.indexOf(w)
w.parent_tab_widget.setCurrentIndex(index)
break
new_count += 1
def _show_popup(self):
parent_pos = self.main_tab_widget.pos()
parent_size = self.main_tab_widget.size()
size = self.popup.size()
x, y = parent_pos.x(), parent_pos.y()
pw, ph = parent_size.width(), parent_size.height()
w = size.width()
x += pw / 2 - w / 2
y += ph / 10
self.popup.move(self.mapToGlobal(QtCore.QPoint(x, y)))
self.popup.set_filenames(
[editor.file.path for editor in self.widgets()])
self.popup.show()
def _make_splitter(self):
splitter = None
for widget in reversed(self.child_splitters):
if widget.parent() is None:
widget.setParent(self)
splitter = widget
break
if splitter is None:
splitter = self.__class__(self, root=False)
for action in self.main_tab_widget.context_actions:
splitter.add_context_action(action)
return splitter
def split(self, widget, orientation):
"""
Split the the current widget in new SplittableTabWidget.
:param widget: widget to split
:param orientation: orientation of the splitter
:return: the new splitter
"""
if orientation == int(QtCore.Qt.Horizontal):
orientation = QtCore.Qt.Horizontal
else:
orientation = QtCore.Qt.Vertical
self.setOrientation(orientation)
splitter = self._make_splitter()
splitter.show()
self.addWidget(splitter)
self.child_splitters.append(splitter)
if widget.original:
base = widget.original
else:
base = widget
clone = base.split()
if clone not in base.clones:
# code editors maintain the list of clones internally but some
# other widgets (user widgets) might not.
base.clones.append(clone)
clone.original = base
splitter._parent_splitter = self
splitter.last_tab_closed.connect(self._on_last_child_tab_closed)
splitter.tab_detached.connect(self.tab_detached.emit)
if hasattr(base, '_icon'):
icon = base._icon
else:
icon = None
# same group of tab splitter (user might have a group for editors and
# another group for consoles or whatever).
splitter._uuid = self._uuid
splitter.add_tab(clone, title=self.main_tab_widget.tabText(
self.main_tab_widget.indexOf(widget)), icon=icon)
self.setSizes([1 for i in range(self.count())])
return splitter
def has_children(self):
"""
Checks if there are children tab widgets.
:return: True if there is at least one tab in the children tab widget.
"""
for splitter in self.child_splitters:
if splitter.has_children():
return splitter
return self.main_tab_widget.count() != 0
def current_widget(self):
"""
Returns a reference to the current widget, i.e. the last widget that
got the focus.
:return: QWidget
"""
if self._current:
return self._current()
return None
def widgets(self, include_clones=False):
"""
Recursively gets the list of widgets.
:param include_clones: True to retrieve all tabs, including clones,
otherwise only original widgets are returned.
"""
widgets = []
for i in range(self.main_tab_widget.count()):
widget = self.main_tab_widget.widget(i)
try:
if widget.original is None or include_clones:
widgets.append(widget)
except AttributeError:
pass
for child in self.child_splitters:
widgets += child.widgets(include_clones=include_clones)
return widgets
def _on_last_tab_closed(self, *args):
has_children = self.has_children()
if has_children:
# hide the tab widget if there is not tabs
if not self.main_tab_widget.count():
self.main_tab_widget.hide()
else:
if self.root:
# ensure root is visible when there are no children
self.show()
self.main_tab_widget.show()
else:
# hide ourselves (we don't have any other tabs or children)
self._remove_from_parent()
if not self.has_children():
self.last_tab_closed.emit(self)
def _on_focus_changed(self, old, new):
try:
result = new._uuid == self._uuid
except (AttributeError, TypeError):
pass
else:
if result:
if new != self.current_widget():
self._on_current_changed(new)
def _on_current_changed(self, new):
old = self.current_widget()
self._current = weakref.ref(new)
_logger().debug(
'current tab changed (old=%r, new=%r)', old, new)
self.current_changed.emit(new)
return old, new
def _remove_from_parent(self):
self.hide()
self.setParent(None)
self.main_tab_widget.hide()
if not self.root:
self._parent_splitter.child_splitters.remove(self)
self._parent_splitter = None
def _on_last_child_tab_closed(self):
if not self.has_children():
self.last_tab_closed.emit(self)
if self.root:
self.show()
self.main_tab_widget.show()
else:
self._remove_from_parent()
def count(self):
"""
Returns the number of widgets currently displayed (takes child splits
into account).
"""
c = self.main_tab_widget.count()
for child in self.child_splitters:
c += child.count()
return c
class CodeEditTabWidget(BaseTabWidget):
"""
Tab widget specialised to hold pyqode's code editor widgets.
It will manage the saving of editors
"""
default_directory = os.path.expanduser('~')
dirty_changed = QtCore.Signal(bool)
@classmethod
@utils.memoized
def get_filter(cls, mimetype):
"""
Returns a filter string for the file dialog. The filter is based
on the mime type.
:param mimetype: path from which the filter must be derived.
:return: Filter string
"""
filters = ' '.join(
['*%s' % ext for ext in mimetypes.guess_all_extensions(mimetype)])
return '%s (%s)' % (mimetype, filters)
def addTab(self, widget, *args):
"""
Re-implements addTab to connect to the dirty changed signal and setup
some helper attributes.
:param widget: widget to add
:param args: optional addtional arguments (name and/or icon).
"""
widget.dirty_changed.connect(self._on_dirty_changed)
super(CodeEditTabWidget, self).addTab(widget, *args)
def _on_dirty_changed(self, dirty):
"""
Adds a star in front of a dirtt tab and emits dirty_changed.
"""
widget = self.sender()
if isinstance(widget, CodeEdit):
parent = widget.parent_tab_widget
index = parent.indexOf(widget)
title = parent.tabText(index)
title = title.replace('* ', '')
if dirty:
parent.setTabText(index, "* " + title)
else:
parent.setTabText(index, title)
parent.dirty_changed.emit(dirty)
@classmethod
def _ask_path(cls, editor):
"""
Shows a QFileDialog and ask for a save filename.
:return: save filename
"""
try:
filter = cls.get_filter(editor.mimetypes[0])
except IndexError:
filter = 'All files (*)'
return QtWidgets.QFileDialog.getSaveFileName(
editor, 'Save file as', cls.default_directory, filter)
@classmethod
def save_widget(cls, editor):
"""
Implements SplittableTabWidget.save_widget to actually save the
code editor widget.
If the editor.file.path is None or empty or the file does not exist,
a save as dialog is shown (save as).
:param editor: editor widget to save.
:return: False if there was a problem saving the editor (e.g. the save
as dialog has been canceled by the user, or a permission error,...)
"""
if editor.original:
editor = editor.original
if editor.file.path is None or not os.path.exists(editor.file.path):
# save as
path, filter = cls._ask_path(editor)
if not path:
return False
if not os.path.splitext(path)[1]:
if len(editor.mimetypes):
path += mimetypes.guess_extension(editor.mimetypes[0])
try:
_logger().debug('saving %r as %r', editor.file._old_path, path)
except AttributeError:
_logger().debug('saving %r as %r', editor.file.path, path)
editor.file._path = path
else:
path = editor.file.path
editor.file.save(path)
tw = editor.parent_tab_widget
text = tw.tabText(tw.indexOf(editor)).replace('*', '')
tw.setTabText(tw.indexOf(editor), text)
for clone in [editor] + editor.clones:
if clone != editor:
tw = clone.parent_tab_widget
tw.setTabText(tw.indexOf(clone), text)
return True
def _get_widget_path(self, editor):
return editor.file.path
class DetachedEditorWindow(QtWidgets.QMainWindow):
def __init__(self):
super(DetachedEditorWindow, self).__init__()
tb = QtWidgets.QToolBar('File')
action = tb.addAction(QtGui.QIcon.fromTheme('document-save'), 'Save')
action.triggered.connect(self._save)
action.setShortcut('Ctrl+S')
self.addToolBar(tb)
def _save(self):
self.centralWidget().save_current()
class SplittableCodeEditTabWidget(SplittableTabWidget):
"""
SplittableTabWidget specialised for CodeEdit and subclasses.
Offers some convenience function for opening/saving files.
The widget supports multiple type of code editors. Each editor type must
be explicitly registered using ``register_editor``. If there is no
registered editor for the given mime-type, ``fallback_editor`` is used.
"""
#: Signal emitted when a tab bar is double clicked, this should work
#: even with child tab bars
tab_bar_double_clicked = QtCore.Signal()
#: Signal emitted when a document has been saved.
#: Parameters:
# - save_file_path
# - old_content
document_saved = QtCore.Signal(str, str)
#: uses a CodeEditTabWidget which is able to save code editor widgets.
tab_widget_klass = CodeEditTabWidget
#: the icon provider class to use when creating new document. Must be
#: a subclass of QtWidgets.QFileIconProvider. By default, QFileIconProvider
#: is used.
icon_provider_klass = QtWidgets.QFileIconProvider
#: Maps a mime-type with an editor type.
#: This map is used to instantiate the proper editor type when
#: opening/creating a document.
editors = {mimetype: TextCodeEdit for mimetype in TextCodeEdit.mimetypes}
#: Fallback editor is used in case not editors matching the requested
#: mime-type could not be found in the editors map.
#: By default the fallback_editor is a
#: :class:`pyqode.core.widgets.GenericCodeEdit`
fallback_editor = GenericCodeEdit
#: signal emitted when the dirty_changed signal of the current editor
#: has been emitted.
dirty_changed = QtCore.Signal(bool)
#: signal emitted when an editor has been created but just before the file
#: is open. This give you a chance to change some editor settings that
#: influence file opening.
editor_created = QtCore.Signal(object)
#: signal emitted when en editor has been created and the document has
#: been sucessfully open
document_opened = QtCore.Signal(object)
#: Store the number of new documents created, for internal use.
_new_count = 0
def __init__(self, parent=None, root=True):
SplittableTabWidget.detached_window_klass = DetachedEditorWindow
super(SplittableCodeEditTabWidget, self).__init__(parent, root)
self.main_tab_widget.tabBar().double_clicked.connect(
self.tab_bar_double_clicked.emit)
@classmethod
def register_code_edit(cls, code_edit_class):
"""
Register an additional code edit **class**
.. warning: This method expect a class, not an instance!
:param code_edit_class: code edit class to register.
"""
if not inspect.isclass(code_edit_class):
raise TypeError('must be a class, not an instance.')
for mimetype in code_edit_class.mimetypes:
if mimetype in cls.editors:
_logger().warn('editor for mimetype already registered, '
'skipping')
cls.editors[mimetype] = code_edit_class
_logger().log(5, 'registered editors: %r', cls.editors)
def save_current_as(self):
"""
Save current widget as.
"""
if not self.current_widget():
return
mem = self.current_widget().file.path
self.current_widget().file._path = None
self.current_widget().file._old_path = mem
CodeEditTabWidget.default_directory = os.path.dirname(mem)
widget = self.current_widget()
try:
success = self.main_tab_widget.save_widget(widget)
except Exception as e:
QtWidgets.QMessageBox.warning(
self, 'Failed to save file as',
'Failed to save file as %s\nError=%s' % (
widget.file.path, str(e)))
widget.file._path = mem
else:
if not success:
widget.file._path = mem
else:
CodeEditTabWidget.default_directory = os.path.expanduser('~')
self.document_saved.emit(widget.file.path, '')
# rename tab
tw = widget.parent_tab_widget
tw.setTabText(tw.indexOf(widget),
os.path.split(widget.file.path)[1])
return self.current_widget().file.path
def save_current(self):
"""
Save current editor. If the editor.file.path is None, a save as dialog
will be shown.
"""
if self.current_widget() is not None:
editor = self.current_widget()
self._save(editor)
def _save(self, widget):
path = widget.file.path
try:
encoding = widget.file.encoding
except AttributeError:
# not a code edit
pass
else:
with open(path, encoding=encoding) as f:
old_content = f.read()
if widget.dirty:
try:
self.main_tab_widget.save_widget(widget)
except Exception as e:
QtWidgets.QMessageBox.warning(
self, 'Failed to save file',
'Failed to save file: %s\nError=%s' % (
widget.file.path, str(e)))
else:
self.document_saved.emit(path, old_content)
def save_all(self):
"""
Save all editors.
"""
for w in self.widgets():
try:
self._save(w)
except OSError:
_logger().exception('failed to save %s', w.file.path)
def _create_code_edit(self, mimetype, *args, **kwargs):
"""
Create a code edit instance based on the mimetype of the file to
open/create.
:type mimetype: mime type
:param args: Positional arguments that must be forwarded to the editor
widget constructor.
:param kwargs: Keyworded arguments that must be forwarded to the editor
widget constructor.
:return: Code editor widget instance.
"""
if mimetype in self.editors.keys():
return self.editors[mimetype](
*args, parent=self.main_tab_widget, **kwargs)
editor = self.fallback_editor(*args, parent=self.main_tab_widget,
**kwargs)
return editor
def create_new_document(self, base_name='New Document',
extension='.txt', preferred_eol=0,
autodetect_eol=True, **kwargs):
"""
Creates a new document.
The document name will be ``base_name + count + extension``
:param base_name: Base name of the document. An int will be appended.
:param extension: Document extension (dotted)
:param args: Positional arguments that must be forwarded to the editor
widget constructor.
:param preferred_eol: Preferred EOL convention. This setting will be
used for saving the document unless autodetect_eol is True.
:param autodetect_eol: If true, automatically detects file EOL and
use it instead of the preferred EOL when saving files.
:param kwargs: Keyworded arguments that must be forwarded to the editor
widget constructor.
:return: Code editor widget instance.
"""
SplittableCodeEditTabWidget._new_count += 1
name = '%s%d%s' % (base_name, self._new_count, extension)
tab = self._create_code_edit(
self.guess_mimetype(name), **kwargs)
self.editor_created.emit(tab)
tab.file.autodetect_eol = autodetect_eol
tab.file.preferred_eol = preferred_eol
tab.setDocumentTitle(name)
self.add_tab(tab, title=name, icon=self._icon(name))
self.document_opened.emit(tab)
return tab
def guess_mimetype(self, path):
if 'CMakeLists.txt' in path:
return 'text/x-cmake-project'
else:
return mimetypes.guess_type(path)[0]
@utils.with_wait_cursor
def open_document(self, path, encoding=None, replace_tabs_by_spaces=True,
clean_trailing_whitespaces=True, safe_save=True,
restore_cursor_position=True, preferred_eol=0,
autodetect_eol=True, show_whitespaces=False, **kwargs):
"""
Opens a document.
:param path: Path of the document to open
:param encoding: The encoding to use to open the file. Default is
locale.getpreferredencoding().
:param replace_tabs_by_spaces: Enable/Disable replace tabs by spaces.
Default is true.
:param clean_trailing_whitespaces: Enable/Disable clean trailing
whitespaces (on save). Default is True.
:param safe_save: If True, the file is saved to a temporary file first.
If the save went fine, the temporary file is renamed to the final
filename.
:param restore_cursor_position: If true, last cursor position will be
restored. Default is True.
:param preferred_eol: Preferred EOL convention. This setting will be
used for saving the document unless autodetect_eol is True.
:param autodetect_eol: If true, automatically detects file EOL and
use it instead of the preferred EOL when saving files.
:param show_whitespaces: True to show white spaces.
:param kwargs: addtional keyword args to pass to the widget
constructor.
:return: The created code editor
"""
original_path = os.path.normpath(path)
path = os.path.normcase(original_path)
paths = []
widgets = []
for w in self.widgets(include_clones=False):
if os.path.exists(w.file.path):
# skip new docs
widgets.append(w)
paths.append(os.path.normcase(w.file.path))
if path in paths:
i = paths.index(path)
w = widgets[i]
tw = w.parent_tab_widget
tw.setCurrentIndex(tw.indexOf(w))
return w
else:
assert os.path.exists(original_path)
name = os.path.split(original_path)[1]
use_parent_dir = False
for tab in self.widgets():
title = QtCore.QFileInfo(tab.file.path).fileName()
if title == name:
tw = tab.parent_tab_widget
new_name = os.path.join(os.path.split(os.path.dirname(
tab.file.path))[1], title)
tw.setTabText(tw.indexOf(tab), new_name)
use_parent_dir = True
if use_parent_dir:
name = os.path.join(
os.path.split(os.path.dirname(path))[1], name)
use_parent_dir = False
tab = self._create_code_edit(self.guess_mimetype(path), **kwargs)
self.editor_created.emit(tab)
tab.open_parameters = {
'encoding': encoding,
'replace_tabs_by_spaces': replace_tabs_by_spaces,
'clean_trailing_whitespaces': clean_trailing_whitespaces,
'safe_save': safe_save,
'restore_cursor_position': restore_cursor_position,
'preferred_eol': preferred_eol,
'autodetect_eol': autodetect_eol,
'show_whitespaces': show_whitespaces,
'kwargs': kwargs
}
tab.file.clean_trailing_whitespaces = clean_trailing_whitespaces
tab.file.safe_save = safe_save
tab.file.restore_cursor = restore_cursor_position
tab.file.replace_tabs_by_spaces = replace_tabs_by_spaces
tab.file.autodetect_eol = autodetect_eol
tab.file.preferred_eol = preferred_eol
tab.show_whitespaces = show_whitespaces
try:
tab.file.open(original_path, encoding=encoding)
except Exception as e:
tab.close()
tab.setParent(None)
tab.deleteLater()
raise e
else:
tab.setDocumentTitle(name)
tab.file._path = original_path
icon = self._icon(path)
self.add_tab(tab, title=name, icon=icon)
self.document_opened.emit(tab)
return tab
def close_document(self, path):
"""
Closes a text document.
:param path: Path of the document to close.
"""
to_close = []
for widget in self.widgets(include_clones=True):
if widget.file.path == path:
to_close.append(widget)
for widget in to_close:
tw = widget.parent_tab_widget
tw.remove_tab(tw.indexOf(widget))
def rename_document(self, old_path, new_path):
"""
Renames an already opened document (this will not rename the file,
just update the file path and tab title).
Use that function to update a file that has been renamed externally.
:param old_path: old path (path of the widget to rename with
``new_path``
:param new_path: new path that will be used to rename the tab.
"""
to_rename = []
title = os.path.split(new_path)[1]
for widget in self.widgets(include_clones=True):
if widget.file.path == old_path:
to_rename.append(widget)
for widget in to_rename:
tw = widget.parent_tab_widget
widget.file._path = new_path
tw.setTabText(tw.indexOf(widget), title)
def closeEvent(self, event):
"""
Saves dirty editors on close and cancel the event if the user choosed
to continue to work.
:param event: close event
"""
dirty_widgets = []
for w in self.widgets(include_clones=False):
if w.dirty:
dirty_widgets.append(w)
filenames = []
for w in dirty_widgets:
if os.path.exists(w.file.path):
filenames.append(w.file.path)
else:
filenames.append(w.documentTitle())
if len(filenames) == 0:
self.close_all()
return
dlg = DlgUnsavedFiles(self, files=filenames)
if dlg.exec_() == dlg.Accepted:
if not dlg.discarded:
for item in dlg.listWidget.selectedItems():
filename = item.text()
widget = None
for widget in dirty_widgets:
if widget.file.path == filename or \
widget.documentTitle() == filename:
break
tw = widget.parent_tab_widget
tw.save_widget(widget)
tw.remove_tab(tw.indexOf(widget))
self.close_all()
else:
event.ignore()
def close_all(self):
for w in self.widgets(include_clones=True):
tw = w.parent_tab_widget
tw.remove_tab(tw.indexOf(w))
def _icon(self, path):
provider = self.icon_provider_klass()
if not os.path.exists(path):
return provider.icon(provider.File)
return provider.icon(QtCore.QFileInfo(path))
def _on_current_changed(self, new):
old, new = super(
SplittableCodeEditTabWidget, self)._on_current_changed(new)
if new:
new.dirty_changed.connect(self.dirty_changed.emit)
self.dirty_changed.emit(new.dirty)
return old, new
def split(self, widget, orientation):
splitter = super(SplittableCodeEditTabWidget, self).split(
widget, orientation)
splitter.tab_bar_double_clicked.connect(
self.tab_bar_double_clicked.emit)
SplittableCodeEditTabWidget: a few tweak to allow to add widgets that is not a sublcass of QPlainTextEdit
"""
This module contains the splittable tab widget API
"""
import inspect
import logging
import mimetypes
import os
import sys
import uuid
import weakref
from pyqode.qt import QtCore, QtWidgets, QtGui
from pyqode.core.api import utils, CodeEdit
from pyqode.core.dialogs import DlgUnsavedFiles
from pyqode.core._forms import popup_open_files_ui
from .tab_bar import TabBar
from .code_edits import GenericCodeEdit, TextCodeEdit
def _logger():
return logging.getLogger(__name__)
class DraggableTabBar(TabBar):
"""
A draggable tab bar that allow to drag & drop tabs.
Implementation is based on this qt article:
http://www.qtcentre.org/wiki/index.php?title=Movable_Tabs
"""
#: Signal emitted when a tab must be moved to the specified
#: index (the tab might come from another tab bar (split)).
tab_move_request = QtCore.Signal(QtWidgets.QWidget, int)
def __init__(self, parent):
super(DraggableTabBar, self).__init__(parent)
self._pos = QtCore.QPoint()
self.setAcceptDrops(True)
self.setMouseTracking(True)
self.setElideMode(QtCore.Qt.ElideNone)
def mousePressEvent(self, event):
if event.button() == QtCore.Qt.LeftButton:
self._pos = event.pos() # _pos is a QPoint defined in the header
super(DraggableTabBar, self).mousePressEvent(event)
def widget_under_mouse(self, event):
index = self.tabAt(event.pos())
tab = self.parent().widget(index)
return tab
def mouseMoveEvent(self, event):
# update tooltip with the tooltip of the tab under mouse cursor.
tab = self.widget_under_mouse(event)
if tab is not None:
tooltip = tab.toolTip()
if not tooltip:
try:
tooltip = tab.file.path
except AttributeError:
pass
self.setToolTip(tooltip)
# If the distance is too small then return
if (event.pos() - self._pos).manhattanLength() < \
QtWidgets.QApplication.startDragDistance():
return
# If the left button isn't pressed anymore then return
if not event.buttons() & QtCore.Qt.LeftButton:
return
drag = QtGui.QDrag(self)
data = QtCore.QMimeData()
data.tab = tab
data.widget = self
# a crude way to distinguish tab-reodering drags from other drags
data.setData("action", b"tab-reordering")
drag.setMimeData(data)
drag.setPixmap(self.tabIcon(self.tabAt(event.pos())).pixmap(32, 32))
drag.exec_()
def dragEnterEvent(self, event):
# Only accept if it's an tab-reordering request
m = event.mimeData()
formats = m.formats()
if "action" in formats and m.data("action") == "tab-reordering":
event.acceptProposedAction()
def dropEvent(self, event):
# drop a tab in a split (may be the same split or another one).
m = event.mimeData()
index = self.tabAt(event.pos())
# Tell interested objects that a tab should be moved.
if m.tab != self.parent().widget(index):
self.tab_move_request.emit(m.tab, index)
event.acceptProposedAction()
class BaseTabWidget(QtWidgets.QTabWidget):
"""
Base tab widget class used by SplittableTabWidget. This tab widget adds a
context menu to the tab bar that allow the user to:
- split the current tab (horizontally or vertically)
- close the current tab
- close all tabs
- close all other tabs
"""
#: Signal emitted when the last tab has been closed
last_tab_closed = QtCore.Signal()
#: Signal emitted when a tab has been closed
tab_closed = QtCore.Signal(QtWidgets.QWidget)
#: Signal emitted when the user clicked on split vertical or split
#: horizontal
#: **Parameters**:
#: - widget: the widget to split
#: - orientation: split orientation (horizontal/vertical)
split_requested = QtCore.Signal(QtWidgets.QWidget, int)
#: Signal emitted when a tab got detached from the TabWidget
#: **Parameters**:
#: - old_tab: the old tab instance (before it get closed)
#: - new_tab: the new tab instance (the one that is detached)
tab_detached = QtCore.Signal(QtWidgets.QWidget, QtWidgets.QWidget)
_detached_window_class = None
def __init__(self, parent):
super(BaseTabWidget, self).__init__(parent)
self._current = None
self.currentChanged.connect(self._on_current_changed)
self.tabCloseRequested.connect(self._on_tab_close_requested)
tab_bar = DraggableTabBar(self)
tab_bar.setContextMenuPolicy(QtCore.Qt.CustomContextMenu)
tab_bar.customContextMenuRequested.connect(self._show_tab_context_menu)
tab_bar.tab_move_request.connect(self._on_tab_move_request)
self.setTabBar(tab_bar)
self.setAcceptDrops(True)
self.setUsesScrollButtons(True)
#: A list of additional context menu actions
self.context_actions = []
self.detached_tabs = []
def tab_under_menu(self):
"""
Returns the tab that sits under the context menu.
:return: QWidget
"""
return self.tabBar().tabAt(self._menu_pos)
@QtCore.Slot()
def close(self):
"""
Closes the active editor
"""
self.tabCloseRequested.emit(self.tab_under_menu())
@QtCore.Slot()
def close_others(self):
"""
Closes every editors tabs except the current one.
"""
current_widget = self.widget(self.tab_under_menu())
self._try_close_dirty_tabs(exept=current_widget)
i = 0
while self.count() > 1:
widget = self.widget(i)
if widget != current_widget:
self.remove_tab(i)
else:
i = 1
@QtCore.Slot()
def close_all(self):
"""
Closes all editors
"""
if self._try_close_dirty_tabs():
while self.count():
widget = self.widget(0)
self.remove_tab(0)
self.tab_closed.emit(widget)
return True
return False
@QtCore.Slot()
def detach_tab(self):
tab_index = self.tab_under_menu()
tab = self.widget(tab_index)
try:
open_parameters = tab.open_parameters
except AttributeError:
open_parameters = {
'encoding': None,
'replace_tabs_by_spaces': True,
'clean_trailing_whitespaces': True,
'safe_save': True,
'restore_cursor_position': True,
'preferred_eol': 0,
'autodetect_eol': True,
'show_whitespaces': False,
'kwargs': {}
}
path = tab.file.path
self.tabCloseRequested.emit(tab_index)
# create a new top level widget and add the tab
new_tab_widget = self.parent().__class__()
# reopen document with same open settings.
new_tab = new_tab_widget.open_document(
path, encoding=open_parameters['encoding'],
replace_tabs_by_spaces=open_parameters['replace_tabs_by_spaces'],
clean_trailing_whitespaces=open_parameters[
'clean_trailing_whitespaces'],
safe_save=open_parameters['safe_save'],
restore_cursor_position=open_parameters['restore_cursor_position'],
preferred_eol=open_parameters['preferred_eol'],
autodetect_eol=open_parameters['autodetect_eol'],
show_whitespaces=open_parameters['show_whitespaces'],
**open_parameters['kwargs'])
if self._detached_window_class is None:
win = new_tab_widget
else:
win = self._detached_window_class()
#: detached window must be an instance of QMainWindow
win.setCentralWidget(new_tab_widget)
self.detached_tabs.append(win)
win.resize(800, 600)
win.show()
self.tab_detached.emit(tab, new_tab)
# if the user has two monitor, move the window to the second monitor
desktop = QtWidgets.qApp.desktop()
if desktop.screenCount() > 1:
primary_screen = desktop.screenNumber(self)
other_screen = {0: 1, 1: 0}[primary_screen]
l = desktop.screenGeometry(other_screen).left()
new_tab_widget.move(l, 0)
new_tab_widget.showMaximized()
new_tab_widget.last_tab_closed.connect(self._remove_detached_tab)
def _remove_detached_tab(self):
self.detached_tabs.remove(self.sender())
self.sender().close()
def save_widget(self, editor):
"""
Saves the widget. The base implementation does nothing.
The function must return a bool that tells whether the save succeeded
or not.
:param editor: editor widget to save.
"""
return True
def _create_tab_bar_menu(self):
context_mnu = QtWidgets.QMenu()
for name, slot, icon in [
('Close', self.close, 'window-close'),
('Close others', self.close_others, 'tab-close-other'),
('Close all', self.close_all, 'project-development-close-all'),
(None, None, None),
('Detach tab', self.detach_tab, 'tab-detach')]:
if name is None and slot is None:
qaction = QtWidgets.QAction(self)
qaction.setSeparator(True)
else:
qaction = QtWidgets.QAction(name, self)
qaction.triggered.connect(slot)
if icon:
qaction.setIcon(QtGui.QIcon.fromTheme(icon))
context_mnu.addAction(qaction)
self.addAction(qaction)
context_mnu.addSeparator()
menu = QtWidgets.QMenu('Split', context_mnu)
menu.setIcon(QtGui.QIcon.fromTheme('split'))
a = menu.addAction('Split horizontally')
a.triggered.connect(self._on_split_requested)
a.setIcon(QtGui.QIcon.fromTheme('view-split-left-right'))
a = menu.addAction('Split vertically')
a.setIcon(QtGui.QIcon.fromTheme('view-split-top-bottom'))
a.triggered.connect(self._on_split_requested)
context_mnu.addMenu(menu)
context_mnu.addSeparator()
if self.context_actions:
context_mnu.addSeparator()
for action in self.context_actions:
context_mnu.addAction(action)
self._context_mnu = context_mnu
return context_mnu
def _show_tab_context_menu(self, position):
if self.count():
self._menu_pos = position
SplittableTabWidget.tab_under_menu = self.widget(
self.tab_under_menu())
self._create_tab_bar_menu().popup(self.tabBar().mapToGlobal(
position))
def _collect_dirty_tabs(self, skip=None):
"""
Collects the list of dirty tabs
:param skip: Tab to skip (used for close_others).
"""
widgets = []
filenames = []
for i in range(self.count()):
widget = self.widget(i)
try:
if widget.dirty and widget != skip:
widgets.append(widget)
filenames.append(widget.file.path)
except AttributeError:
pass
return widgets, filenames
def _try_close_dirty_tabs(self, exept=None):
"""
Tries to close dirty tabs. Uses DlgUnsavedFiles to ask the user
what he wants to do.
"""
widgets, filenames = self._collect_dirty_tabs(skip=exept)
if not len(filenames):
return True
dlg = DlgUnsavedFiles(self, files=filenames)
if dlg.exec_() == dlg.Accepted:
if not dlg.discarded:
for item in dlg.listWidget.selectedItems():
filename = item.text()
widget = None
for widget in widgets:
if widget.path == filename:
break
if widget != exept:
self.save_widget(widget)
self.remove_tab(self.indexOf(widget))
return True
return False
def _get_widget_path(self, widget):
try:
return widget.path
except AttributeError:
return ''
def _on_tab_close_requested(self, index):
widget = self.widget(index)
dirty = False
try:
if widget.original is None:
dirty = widget.dirty
except AttributeError:
pass
if not dirty:
self.remove_tab(index)
else:
# unsaved widget
path = self._get_widget_path(widget)
if not path:
path = self.tabText(self.indexOf(widget))
dlg = DlgUnsavedFiles(
self, files=[path])
if dlg.exec_() == dlg.Accepted:
rm = True
if not dlg.discarded:
try:
rm = self.save_widget(widget)
except OSError:
pass
if rm:
self.remove_tab(index)
cnt = sys.getrefcount(widget)
if cnt > 2:
try:
import objgraph
except ImportError:
_logger().warning(
'potential memory leak detected on widget: %r\n'
'Install the objgraph package to know what objects are '
'holding references the editor widget...' % widget)
else:
_logger().warning('potential memory detected on widget: %r\n'
'see stderr for a backrefs dot graph...' %
widget)
objgraph.show_backrefs([widget], output=sys.stderr)
@staticmethod
def _close_widget(widget):
"""
Closes the given widgets and handles cases where the widget has been
clone or is a clone of another widget
"""
if widget is None:
return
try:
widget.document().setParent(None)
widget.syntax_highlighter.setParent(None)
except AttributeError:
pass # not a QPlainTextEdit subclass
# handled cloned widgets
clones = []
if hasattr(widget, 'original') and widget.original:
# cloned widget needs to be removed from the original
widget.original.clones.remove(widget)
try:
widget.setDocument(None)
except AttributeError:
# not a QTextEdit/QPlainTextEdit
pass
elif hasattr(widget, 'clones'):
clones = widget.clones
try:
# only clear current editor if it does not have any other clones
widget.close(clear=len(clones) == 0)
except (AttributeError, TypeError):
# not a CodeEdit
widget.close()
return clones
def _restore_original(self, clones):
try:
first = clones[0]
except (IndexError, TypeError):
# empty or None
pass
else:
first.clones = clones[1:]
first.original = None
for c in first.clones:
c.original = first
def remove_tab(self, index):
"""
Overrides removeTab to emit tab_closed and last_tab_closed signals.
:param index: index of the tab to remove.
"""
widget = self.widget(index)
try:
document = widget.document()
except AttributeError:
document = None # not a QPlainTextEdit
clones = self._close_widget(widget)
self.tab_closed.emit(widget)
self.removeTab(index)
self._restore_original(clones)
widget._original_tab_widget._tabs.remove(widget)
if self.count() == 0:
self.last_tab_closed.emit()
if SplittableTabWidget.tab_under_menu == widget:
SplittableTabWidget.tab_under_menu = None
if not clones:
widget.setParent(None)
widget.deleteLater()
del widget
else:
try:
clones[0].syntax_highlighter.setDocument(document)
except AttributeError:
pass # not a QPlainTextEdit
def _on_split_requested(self):
"""
Emits the split requested signal with the desired orientation.
"""
orientation = self.sender().text()
widget = self.widget(self.tab_under_menu())
if 'horizontally' in orientation:
self.split_requested.emit(
widget, QtCore.Qt.Horizontal)
else:
self.split_requested.emit(
widget, QtCore.Qt.Vertical)
def _on_current_changed(self, index):
tab = self.widget(index)
if tab:
tab.setFocus()
def _on_tab_move_request(self, widget, new_index):
parent = widget.parent_tab_widget
index = parent.indexOf(widget)
text = parent.tabText(index)
icon = parent.tabIcon(index)
parent.removeTab(index)
widget.parent_tab_widget = self
self.insertTab(new_index, widget, icon, text)
self.setCurrentIndex(new_index)
widget.setFocus()
if parent.count() == 0:
parent.last_tab_closed.emit()
def dragEnterEvent(self, event):
# Only accept if it's an tab-reordering request
m = event.mimeData()
formats = m.formats()
if "action" in formats and m.data("action") == "tab-reordering":
event.acceptProposedAction()
def dropEvent(self, event):
m = event.mimeData()
index = self.tabBar().tabAt(event.pos())
# Tell interested objects that a tab should be moved.
if m.tab != self.widget(index):
self._on_tab_move_request(m.tab, index)
event.acceptProposedAction()
def addTab(self, tab, *args):
"""
Adds a tab to the tab widget, this function set the parent_tab_widget
attribute on the tab instance.
"""
tab.parent_tab_widget = self
super(BaseTabWidget, self).addTab(tab, *args)
class OpenFilesPopup(QtWidgets.QDialog):
triggered = QtCore.Signal(str)
def __init__(self, *args):
super(OpenFilesPopup, self).__init__(*args)
self.ui = popup_open_files_ui.Ui_Dialog()
self.ui.setupUi(self)
self.ui.tableWidget.itemActivated.connect(self._on_item_activated)
self.ui.tableWidget.itemDoubleClicked.connect(self._on_item_activated)
settings = QtCore.QSettings('pyQode', 'pyqode.core')
self.sort_enabled = bool(settings.value(
'sortOpenFilesAlphabetically', False))
self.ui.checkBox.setChecked(self.sort_enabled)
self.ui.checkBox.stateChanged.connect(self._on_sort_changed)
def set_filenames(self, filenames):
def clean(filenames):
ret_val = []
new_count = 0
for filename in filenames:
if not filename:
filename = 'New document %d.txt' % (new_count + 1)
new_count += 1
ret_val.append(filename)
return ret_val
self._filenames = filenames
filenames = clean(filenames)
if self.sort_enabled:
filenames = sorted(filenames, key=lambda x:
QtCore.QFileInfo(x).fileName().lower())
self.ui.tableWidget.clearContents()
icon_provider = SplittableCodeEditTabWidget.icon_provider_klass()
self.ui.tableWidget.setRowCount(len(filenames))
self.ui.tableWidget.horizontalHeader().setSectionResizeMode(
QtWidgets.QHeaderView.ResizeToContents)
for row, path in enumerate(filenames):
finfo = QtCore.QFileInfo(path)
filename = finfo.fileName()
if finfo.exists():
icon = icon_provider.icon(finfo)
else:
icon = icon_provider.icon(icon_provider.File)
# file name
item = QtWidgets.QTableWidgetItem()
item.setText(filename)
item.setIcon(icon)
item.setToolTip(path)
item.setData(QtCore.Qt.UserRole, bytes(path, 'utf-8'))
self.ui.tableWidget.setItem(row, 0, item)
# path
item = QtWidgets.QTableWidgetItem()
item.setText(path)
item.setToolTip(path)
item.setData(QtCore.Qt.UserRole, bytes(path, 'utf-8'))
self.ui.tableWidget.setItem(row, 1, item)
def _on_sort_changed(self, *_):
self.sort_enabled = self.ui.checkBox.isChecked()
settings = QtCore.QSettings('pyQode', 'pyqode.core')
settings.setValue(
'sortOpenFilesAlphabetically', self.sort_enabled)
self.set_filenames(self._filenames)
def _on_item_activated(self, item):
self.hide()
self.triggered.emit(item.data(QtCore.Qt.UserRole).decode('utf-8'))
def show(self):
super(OpenFilesPopup, self).show()
self.ui.tableWidget.setFocus()
self.ui.tableWidget.selectRow(0)
class SplittableTabWidget(QtWidgets.QSplitter):
"""
A splittable tab widget. The widget is implemented as a splitter which
contains a main tab widget and a collection of child SplittableTabWidget.
Widgets added to the the tab widget **must** have a ``split`` method which
returns a clone of the widget instance.
You can add new tabs to the main tab widget by using the ``add_tab``
method. Tabs are always closable.
To change the underlying tab widget class, just set the
``tab_widget_klass`` class attribute.
The splittable tab widget works with any kind of widget. There is a
specialisation made specifically for managing a collection code editor
widgets: SplittableCodeEditTabWidget.
The implementation uses duck typing and will automatically show a dialog
when closing an editor which has a ``dirty`` property. To actually save the
widget, you must reimplement :meth:`SplittableTabWidget.save_widget``.
"""
#: Signal emitted when the last tab has been closed.
last_tab_closed = QtCore.Signal(QtWidgets.QSplitter)
#: Signal emitted when the active tab changed (takes child tab widgets
#: into account). Parameter is the new tab widget.
current_changed = QtCore.Signal(QtWidgets.QWidget)
#: Signal emitted when a tab got detached from the TabWidget
#: **Parameters**:
#: - old_tab: the old tab instance (before it get closed)
#: - new_tab: the new tab instance (the one that is detached)
tab_detached = QtCore.Signal(QtWidgets.QWidget, QtWidgets.QWidget)
#: The window to use when a type is detached. If None, the detached tab
#: widget will be shown directly.
detached_window_klass = None
#: underlying tab widget class
tab_widget_klass = BaseTabWidget
#: Reference to the widget under the tab bar menu
tab_under_menu = None
@property
def popup_shortcut(self):
"""
Gets/sets the open files popup shortcut (ctrl+t by default).
"""
if hasattr(self, '_action_popup'):
return self._shortcut
return None
@popup_shortcut.setter
def popup_shortcut(self, value):
if hasattr(self, '_action_popup'):
self._shortcut = value
self._action_popup.setShortcut(self._shortcut)
def __init__(self, parent=None, root=True, create_popup=True):
super(SplittableTabWidget, self).__init__(parent)
SplittableTabWidget.tab_widget_klass._detached_window_class = \
SplittableTabWidget.detached_window_klass
if root:
self._action_popup = QtWidgets.QAction(self)
self._action_popup.setShortcutContext(QtCore.Qt.WindowShortcut)
self._shortcut = 'Ctrl+T'
self._action_popup.setShortcut(self._shortcut)
self._action_popup.triggered.connect(self._show_popup)
self.addAction(self._action_popup)
self.popup = OpenFilesPopup()
self.popup.setWindowFlags(
QtCore.Qt.Popup | QtCore.Qt.FramelessWindowHint)
self.popup.triggered.connect(self._on_popup_triggered)
self.child_splitters = []
self.main_tab_widget = self.tab_widget_klass(self)
self.main_tab_widget.last_tab_closed.connect(
self._on_last_tab_closed)
self.main_tab_widget.tab_detached.connect(self.tab_detached.emit)
self.main_tab_widget.split_requested.connect(self.split)
self.addWidget(self.main_tab_widget)
self._parent_splitter = None
self._current = None
self.root = root
if root:
QtWidgets.QApplication.instance().focusChanged.connect(
self._on_focus_changed)
self._uuid = uuid.uuid1()
self._tabs = []
def add_context_action(self, action):
"""
Adds a custom context menu action
:param action: action to add.
"""
self.main_tab_widget.context_actions.append(action)
for child_splitter in self.child_splitters:
child_splitter.add_context_action(action)
def add_tab(self, tab, title='', icon=None):
"""
Adds a tab to main tab widget.
:param tab: Widget to add as a new tab of the main tab widget.
:param title: Tab title
:param icon: Tab icon
"""
if icon:
tab._icon = icon
if not hasattr(tab, 'clones'):
tab.clones = []
if not hasattr(tab, 'original'):
tab.original = None
if icon:
self.main_tab_widget.addTab(tab, icon, title)
else:
self.main_tab_widget.addTab(tab, title)
self.main_tab_widget.setCurrentIndex(
self.main_tab_widget.indexOf(tab))
self.main_tab_widget.show()
tab._uuid = self._uuid
try:
scroll_bar = tab.horizontalScrollBar()
except AttributeError:
# not a QPlainTextEdit class
pass
else:
scroll_bar.setValue(0)
tab.setFocus()
tab._original_tab_widget = self
self._tabs.append(tab)
self._on_focus_changed(None, tab)
def _on_popup_triggered(self, path):
new_count = 0
for w in self.widgets():
if w.file.path == path:
index = w.parent_tab_widget.indexOf(w)
w.parent_tab_widget.setCurrentIndex(index)
break
elif w.file.path == '':
# New document
fpath = 'New document %d.txt' % (new_count + 1)
if fpath == path:
index = w.parent_tab_widget.indexOf(w)
w.parent_tab_widget.setCurrentIndex(index)
break
new_count += 1
def _show_popup(self):
parent_pos = self.main_tab_widget.pos()
parent_size = self.main_tab_widget.size()
size = self.popup.size()
x, y = parent_pos.x(), parent_pos.y()
pw, ph = parent_size.width(), parent_size.height()
w = size.width()
x += pw / 2 - w / 2
y += ph / 10
self.popup.move(self.mapToGlobal(QtCore.QPoint(x, y)))
self.popup.set_filenames(
[editor.file.path for editor in self.widgets()])
self.popup.show()
def _make_splitter(self):
splitter = None
for widget in reversed(self.child_splitters):
if widget.parent() is None:
widget.setParent(self)
splitter = widget
break
if splitter is None:
splitter = self.__class__(self, root=False)
for action in self.main_tab_widget.context_actions:
splitter.add_context_action(action)
return splitter
def split(self, widget, orientation):
"""
Split the the current widget in new SplittableTabWidget.
:param widget: widget to split
:param orientation: orientation of the splitter
:return: the new splitter
"""
if widget.original:
base = widget.original
else:
base = widget
clone = base.split()
if not clone:
return
if orientation == int(QtCore.Qt.Horizontal):
orientation = QtCore.Qt.Horizontal
else:
orientation = QtCore.Qt.Vertical
self.setOrientation(orientation)
splitter = self._make_splitter()
splitter.show()
self.addWidget(splitter)
self.child_splitters.append(splitter)
if clone not in base.clones:
# code editors maintain the list of clones internally but some
# other widgets (user widgets) might not.
base.clones.append(clone)
clone.original = base
splitter._parent_splitter = self
splitter.last_tab_closed.connect(self._on_last_child_tab_closed)
splitter.tab_detached.connect(self.tab_detached.emit)
if hasattr(base, '_icon'):
icon = base._icon
else:
icon = None
# same group of tab splitter (user might have a group for editors and
# another group for consoles or whatever).
splitter._uuid = self._uuid
splitter.add_tab(clone, title=self.main_tab_widget.tabText(
self.main_tab_widget.indexOf(widget)), icon=icon)
self.setSizes([1 for i in range(self.count())])
return splitter
def has_children(self):
"""
Checks if there are children tab widgets.
:return: True if there is at least one tab in the children tab widget.
"""
for splitter in self.child_splitters:
if splitter.has_children():
return splitter
return self.main_tab_widget.count() != 0
def current_widget(self):
"""
Returns a reference to the current widget, i.e. the last widget that
got the focus.
:return: QWidget
"""
if self._current:
return self._current()
return None
def widgets(self, include_clones=False):
"""
Recursively gets the list of widgets.
:param include_clones: True to retrieve all tabs, including clones,
otherwise only original widgets are returned.
"""
widgets = []
for i in range(self.main_tab_widget.count()):
widget = self.main_tab_widget.widget(i)
try:
if widget.original is None or include_clones:
widgets.append(widget)
except AttributeError:
pass
for child in self.child_splitters:
widgets += child.widgets(include_clones=include_clones)
return widgets
def _on_last_tab_closed(self, *args):
has_children = self.has_children()
if has_children:
# hide the tab widget if there is not tabs
if not self.main_tab_widget.count():
self.main_tab_widget.hide()
else:
if self.root:
# ensure root is visible when there are no children
self.show()
self.main_tab_widget.show()
else:
# hide ourselves (we don't have any other tabs or children)
self._remove_from_parent()
if not self.has_children():
self.last_tab_closed.emit(self)
def _on_focus_changed(self, old, new):
try:
result = new._uuid == self._uuid
except (AttributeError, TypeError):
pass
else:
if result:
if new != self.current_widget():
self._on_current_changed(new)
def _on_current_changed(self, new):
old = self.current_widget()
self._current = weakref.ref(new)
_logger().debug(
'current tab changed (old=%r, new=%r)', old, new)
self.current_changed.emit(new)
return old, new
def _remove_from_parent(self):
self.hide()
self.setParent(None)
self.main_tab_widget.hide()
if not self.root:
self._parent_splitter.child_splitters.remove(self)
self._parent_splitter = None
def _on_last_child_tab_closed(self):
if not self.has_children():
self.last_tab_closed.emit(self)
if self.root:
self.show()
self.main_tab_widget.show()
else:
self._remove_from_parent()
def count(self):
"""
Returns the number of widgets currently displayed (takes child splits
into account).
"""
c = self.main_tab_widget.count()
for child in self.child_splitters:
c += child.count()
return c
class CodeEditTabWidget(BaseTabWidget):
"""
Tab widget specialised to hold pyqode's code editor widgets.
It will manage the saving of editors
"""
default_directory = os.path.expanduser('~')
dirty_changed = QtCore.Signal(bool)
@classmethod
@utils.memoized
def get_filter(cls, mimetype):
"""
Returns a filter string for the file dialog. The filter is based
on the mime type.
:param mimetype: path from which the filter must be derived.
:return: Filter string
"""
filters = ' '.join(
['*%s' % ext for ext in mimetypes.guess_all_extensions(mimetype)])
return '%s (%s)' % (mimetype, filters)
def addTab(self, widget, *args):
"""
Re-implements addTab to connect to the dirty changed signal and setup
some helper attributes.
:param widget: widget to add
:param args: optional addtional arguments (name and/or icon).
"""
widget.dirty_changed.connect(self._on_dirty_changed)
super(CodeEditTabWidget, self).addTab(widget, *args)
def _on_dirty_changed(self, dirty):
"""
Adds a star in front of a dirtt tab and emits dirty_changed.
"""
widget = self.sender()
if isinstance(widget, CodeEdit):
parent = widget.parent_tab_widget
index = parent.indexOf(widget)
title = parent.tabText(index)
title = title.replace('* ', '')
if dirty:
parent.setTabText(index, "* " + title)
else:
parent.setTabText(index, title)
parent.dirty_changed.emit(dirty)
@classmethod
def _ask_path(cls, editor):
"""
Shows a QFileDialog and ask for a save filename.
:return: save filename
"""
try:
filter = cls.get_filter(editor.mimetypes[0])
except IndexError:
filter = 'All files (*)'
return QtWidgets.QFileDialog.getSaveFileName(
editor, 'Save file as', cls.default_directory, filter)
@classmethod
def save_widget(cls, editor):
"""
Implements SplittableTabWidget.save_widget to actually save the
code editor widget.
If the editor.file.path is None or empty or the file does not exist,
a save as dialog is shown (save as).
:param editor: editor widget to save.
:return: False if there was a problem saving the editor (e.g. the save
as dialog has been canceled by the user, or a permission error,...)
"""
if editor.original:
editor = editor.original
if editor.file.path is None or not os.path.exists(editor.file.path):
# save as
path, filter = cls._ask_path(editor)
if not path:
return False
if not os.path.splitext(path)[1]:
if len(editor.mimetypes):
path += mimetypes.guess_extension(editor.mimetypes[0])
try:
_logger().debug('saving %r as %r', editor.file._old_path, path)
except AttributeError:
_logger().debug('saving %r as %r', editor.file.path, path)
editor.file._path = path
else:
path = editor.file.path
editor.file.save(path)
tw = editor.parent_tab_widget
text = tw.tabText(tw.indexOf(editor)).replace('*', '')
tw.setTabText(tw.indexOf(editor), text)
for clone in [editor] + editor.clones:
if clone != editor:
tw = clone.parent_tab_widget
tw.setTabText(tw.indexOf(clone), text)
return True
def _get_widget_path(self, editor):
return editor.file.path
class DetachedEditorWindow(QtWidgets.QMainWindow):
def __init__(self):
super(DetachedEditorWindow, self).__init__()
tb = QtWidgets.QToolBar('File')
action = tb.addAction(QtGui.QIcon.fromTheme('document-save'), 'Save')
action.triggered.connect(self._save)
action.setShortcut('Ctrl+S')
self.addToolBar(tb)
def _save(self):
self.centralWidget().save_current()
class SplittableCodeEditTabWidget(SplittableTabWidget):
"""
SplittableTabWidget specialised for CodeEdit and subclasses.
Offers some convenience function for opening/saving files.
The widget supports multiple type of code editors. Each editor type must
be explicitly registered using ``register_editor``. If there is no
registered editor for the given mime-type, ``fallback_editor`` is used.
"""
#: Signal emitted when a tab bar is double clicked, this should work
#: even with child tab bars
tab_bar_double_clicked = QtCore.Signal()
#: Signal emitted when a document has been saved.
#: Parameters:
# - save_file_path
# - old_content
document_saved = QtCore.Signal(str, str)
#: uses a CodeEditTabWidget which is able to save code editor widgets.
tab_widget_klass = CodeEditTabWidget
#: the icon provider class to use when creating new document. Must be
#: a subclass of QtWidgets.QFileIconProvider. By default, QFileIconProvider
#: is used.
icon_provider_klass = QtWidgets.QFileIconProvider
#: Maps a mime-type with an editor type.
#: This map is used to instantiate the proper editor type when
#: opening/creating a document.
editors = {mimetype: TextCodeEdit for mimetype in TextCodeEdit.mimetypes}
#: Fallback editor is used in case not editors matching the requested
#: mime-type could not be found in the editors map.
#: By default the fallback_editor is a
#: :class:`pyqode.core.widgets.GenericCodeEdit`
fallback_editor = GenericCodeEdit
#: signal emitted when the dirty_changed signal of the current editor
#: has been emitted.
dirty_changed = QtCore.Signal(bool)
#: signal emitted when an editor has been created but just before the file
#: is open. This give you a chance to change some editor settings that
#: influence file opening.
editor_created = QtCore.Signal(object)
#: signal emitted when en editor has been created and the document has
#: been sucessfully open
document_opened = QtCore.Signal(object)
#: Store the number of new documents created, for internal use.
_new_count = 0
def __init__(self, parent=None, root=True):
SplittableTabWidget.detached_window_klass = DetachedEditorWindow
super(SplittableCodeEditTabWidget, self).__init__(parent, root)
self.main_tab_widget.tabBar().double_clicked.connect(
self.tab_bar_double_clicked.emit)
@classmethod
def register_code_edit(cls, code_edit_class):
"""
Register an additional code edit **class**
.. warning: This method expect a class, not an instance!
:param code_edit_class: code edit class to register.
"""
if not inspect.isclass(code_edit_class):
raise TypeError('must be a class, not an instance.')
for mimetype in code_edit_class.mimetypes:
if mimetype in cls.editors:
_logger().warn('editor for mimetype already registered, '
'skipping')
cls.editors[mimetype] = code_edit_class
_logger().log(5, 'registered editors: %r', cls.editors)
def save_current_as(self):
"""
Save current widget as.
"""
if not self.current_widget():
return
mem = self.current_widget().file.path
self.current_widget().file._path = None
self.current_widget().file._old_path = mem
CodeEditTabWidget.default_directory = os.path.dirname(mem)
widget = self.current_widget()
try:
success = self.main_tab_widget.save_widget(widget)
except Exception as e:
QtWidgets.QMessageBox.warning(
self, 'Failed to save file as',
'Failed to save file as %s\nError=%s' % (
widget.file.path, str(e)))
widget.file._path = mem
else:
if not success:
widget.file._path = mem
else:
CodeEditTabWidget.default_directory = os.path.expanduser('~')
self.document_saved.emit(widget.file.path, '')
# rename tab
tw = widget.parent_tab_widget
tw.setTabText(tw.indexOf(widget),
os.path.split(widget.file.path)[1])
return self.current_widget().file.path
def save_current(self):
"""
Save current editor. If the editor.file.path is None, a save as dialog
will be shown.
"""
if self.current_widget() is not None:
editor = self.current_widget()
self._save(editor)
def _save(self, widget):
path = widget.file.path
try:
encoding = widget.file.encoding
except AttributeError:
# not a code edit
old_content = ''
else:
with open(path, encoding=encoding) as f:
old_content = f.read()
if widget.dirty:
try:
self.main_tab_widget.save_widget(widget)
except Exception as e:
QtWidgets.QMessageBox.warning(
self, 'Failed to save file',
'Failed to save file: %s\nError=%s' % (
widget.file.path, str(e)))
else:
self.document_saved.emit(path, old_content)
def save_all(self):
"""
Save all editors.
"""
for w in self.widgets():
try:
self._save(w)
except OSError:
_logger().exception('failed to save %s', w.file.path)
def _create_code_edit(self, mimetype, *args, **kwargs):
"""
Create a code edit instance based on the mimetype of the file to
open/create.
:type mimetype: mime type
:param args: Positional arguments that must be forwarded to the editor
widget constructor.
:param kwargs: Keyworded arguments that must be forwarded to the editor
widget constructor.
:return: Code editor widget instance.
"""
if mimetype in self.editors.keys():
return self.editors[mimetype](
*args, parent=self.main_tab_widget, **kwargs)
editor = self.fallback_editor(*args, parent=self.main_tab_widget,
**kwargs)
return editor
def create_new_document(self, base_name='New Document',
extension='.txt', preferred_eol=0,
autodetect_eol=True, **kwargs):
"""
Creates a new document.
The document name will be ``base_name + count + extension``
:param base_name: Base name of the document. An int will be appended.
:param extension: Document extension (dotted)
:param args: Positional arguments that must be forwarded to the editor
widget constructor.
:param preferred_eol: Preferred EOL convention. This setting will be
used for saving the document unless autodetect_eol is True.
:param autodetect_eol: If true, automatically detects file EOL and
use it instead of the preferred EOL when saving files.
:param kwargs: Keyworded arguments that must be forwarded to the editor
widget constructor.
:return: Code editor widget instance.
"""
SplittableCodeEditTabWidget._new_count += 1
name = '%s%d%s' % (base_name, self._new_count, extension)
tab = self._create_code_edit(
self.guess_mimetype(name), **kwargs)
self.editor_created.emit(tab)
tab.file.autodetect_eol = autodetect_eol
tab.file.preferred_eol = preferred_eol
tab.setDocumentTitle(name)
self.add_tab(tab, title=name, icon=self._icon(name))
self.document_opened.emit(tab)
return tab
def guess_mimetype(self, path):
if 'CMakeLists.txt' in path:
return 'text/x-cmake-project'
else:
return mimetypes.guess_type(path)[0]
@utils.with_wait_cursor
def open_document(self, path, encoding=None, replace_tabs_by_spaces=True,
clean_trailing_whitespaces=True, safe_save=True,
restore_cursor_position=True, preferred_eol=0,
autodetect_eol=True, show_whitespaces=False, **kwargs):
"""
Opens a document.
:param path: Path of the document to open
:param encoding: The encoding to use to open the file. Default is
locale.getpreferredencoding().
:param replace_tabs_by_spaces: Enable/Disable replace tabs by spaces.
Default is true.
:param clean_trailing_whitespaces: Enable/Disable clean trailing
whitespaces (on save). Default is True.
:param safe_save: If True, the file is saved to a temporary file first.
If the save went fine, the temporary file is renamed to the final
filename.
:param restore_cursor_position: If true, last cursor position will be
restored. Default is True.
:param preferred_eol: Preferred EOL convention. This setting will be
used for saving the document unless autodetect_eol is True.
:param autodetect_eol: If true, automatically detects file EOL and
use it instead of the preferred EOL when saving files.
:param show_whitespaces: True to show white spaces.
:param kwargs: addtional keyword args to pass to the widget
constructor.
:return: The created code editor
"""
original_path = os.path.normpath(path)
path = os.path.normcase(original_path)
paths = []
widgets = []
for w in self.widgets(include_clones=False):
if os.path.exists(w.file.path):
# skip new docs
widgets.append(w)
paths.append(os.path.normcase(w.file.path))
if path in paths:
i = paths.index(path)
w = widgets[i]
tw = w.parent_tab_widget
tw.setCurrentIndex(tw.indexOf(w))
return w
else:
assert os.path.exists(original_path)
name = os.path.split(original_path)[1]
use_parent_dir = False
for tab in self.widgets():
title = QtCore.QFileInfo(tab.file.path).fileName()
if title == name:
tw = tab.parent_tab_widget
new_name = os.path.join(os.path.split(os.path.dirname(
tab.file.path))[1], title)
tw.setTabText(tw.indexOf(tab), new_name)
use_parent_dir = True
if use_parent_dir:
name = os.path.join(
os.path.split(os.path.dirname(path))[1], name)
use_parent_dir = False
tab = self._create_code_edit(self.guess_mimetype(path), **kwargs)
self.editor_created.emit(tab)
tab.open_parameters = {
'encoding': encoding,
'replace_tabs_by_spaces': replace_tabs_by_spaces,
'clean_trailing_whitespaces': clean_trailing_whitespaces,
'safe_save': safe_save,
'restore_cursor_position': restore_cursor_position,
'preferred_eol': preferred_eol,
'autodetect_eol': autodetect_eol,
'show_whitespaces': show_whitespaces,
'kwargs': kwargs
}
tab.file.clean_trailing_whitespaces = clean_trailing_whitespaces
tab.file.safe_save = safe_save
tab.file.restore_cursor = restore_cursor_position
tab.file.replace_tabs_by_spaces = replace_tabs_by_spaces
tab.file.autodetect_eol = autodetect_eol
tab.file.preferred_eol = preferred_eol
tab.show_whitespaces = show_whitespaces
try:
tab.file.open(original_path, encoding=encoding)
except Exception as e:
tab.close()
tab.setParent(None)
tab.deleteLater()
raise e
else:
tab.setDocumentTitle(name)
tab.file._path = original_path
icon = self._icon(path)
self.add_tab(tab, title=name, icon=icon)
self.document_opened.emit(tab)
return tab
def close_document(self, path):
"""
Closes a text document.
:param path: Path of the document to close.
"""
to_close = []
for widget in self.widgets(include_clones=True):
if widget.file.path == path:
to_close.append(widget)
for widget in to_close:
tw = widget.parent_tab_widget
tw.remove_tab(tw.indexOf(widget))
def rename_document(self, old_path, new_path):
"""
Renames an already opened document (this will not rename the file,
just update the file path and tab title).
Use that function to update a file that has been renamed externally.
:param old_path: old path (path of the widget to rename with
``new_path``
:param new_path: new path that will be used to rename the tab.
"""
to_rename = []
title = os.path.split(new_path)[1]
for widget in self.widgets(include_clones=True):
if widget.file.path == old_path:
to_rename.append(widget)
for widget in to_rename:
tw = widget.parent_tab_widget
widget.file._path = new_path
tw.setTabText(tw.indexOf(widget), title)
def closeEvent(self, event):
"""
Saves dirty editors on close and cancel the event if the user choosed
to continue to work.
:param event: close event
"""
dirty_widgets = []
for w in self.widgets(include_clones=False):
if w.dirty:
dirty_widgets.append(w)
filenames = []
for w in dirty_widgets:
if os.path.exists(w.file.path):
filenames.append(w.file.path)
else:
filenames.append(w.documentTitle())
if len(filenames) == 0:
self.close_all()
return
dlg = DlgUnsavedFiles(self, files=filenames)
if dlg.exec_() == dlg.Accepted:
if not dlg.discarded:
for item in dlg.listWidget.selectedItems():
filename = item.text()
widget = None
for widget in dirty_widgets:
if widget.file.path == filename or \
widget.documentTitle() == filename:
break
tw = widget.parent_tab_widget
tw.save_widget(widget)
tw.remove_tab(tw.indexOf(widget))
self.close_all()
else:
event.ignore()
def close_all(self):
for w in self.widgets(include_clones=True):
tw = w.parent_tab_widget
tw.remove_tab(tw.indexOf(w))
def _icon(self, path):
provider = self.icon_provider_klass()
if not os.path.exists(path):
return provider.icon(provider.File)
return provider.icon(QtCore.QFileInfo(path))
def _on_current_changed(self, new):
old, new = super(
SplittableCodeEditTabWidget, self)._on_current_changed(new)
if new:
new.dirty_changed.connect(self.dirty_changed.emit)
self.dirty_changed.emit(new.dirty)
return old, new
def split(self, widget, orientation):
splitter = super(SplittableCodeEditTabWidget, self).split(
widget, orientation)
if splitter:
splitter.tab_bar_double_clicked.connect(
self.tab_bar_double_clicked.emit)
|
"""
Merge the counts and abstracts. We take the abstracts file from partie, and the counts generated
by SearchSRA and create a single file with both all counts and average counts
"""
import os
import sys
import argparse
import gzip
__author__ = 'Rob Edwards'
class colors(object):
color = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
'PINK': '\033[95m',
'BLUE': '\033[94m',
'GREEN': '\033[92m',
'YELLOW': '\033[93m',
'RED': '\033[91m',
'WHITE': '\033[0m',
}
def message(msg, color):
"""
Print a message to stderr using color
:param msg: the message to print
:param color: the color to use
:return: nothing
"""
color = color.upper()
if color not in colors.color:
raise ColorNotFoundError(f"There is no color {color}")
if os.fstat(0) == os.fstat(1):
# stderr is not redirected
sys.stderr.write(f"{colors.color[color]}{msg}{colors.color['ENDC']}\n")
else:
sys.stderr.write(f"{msg}\n")
def counts_per_sample(counts_dir, verbose=False):
"""
Read the counts file and return a dict of the number of counts per sample
We expect the counts file to have three columns, [sra_id, contig_id, count]. This is the output from
process.smk using idxstats.
:param str counts_dir: the directory of counts files
:param bool verbose: more outputs
:return dict[str, int]: the counts per sample, and a set of the contigs
:rtype: dict[str, dict[str, int]], set
"""
counts = {}
all_contigs = set()
if verbose:
message(f"Reading counts_dir", "GREEN")
for f in os.listdir(counts_dir):
with open(os.path.join(counts_dir, f), 'r') as f:
for l in f:
if l.startswith('Sample'):
continue
p = l.strip().split("\t")
if int(p[2]) == 0:
continue
if p[0] not in counts:
counts[p[0]] = {}
counts[p[0]][p[1]] = int(p[2])
all_contigs.add(p[1])
return counts, all_contigs
def read_abstracts(abstractsf, reads_per_sample_file, average_per_proj, counts, all_contigs, verbose=False):
"""
Read the abstracts file, and write the reads per sample and the average per project
"""
if verbose:
message("Received {len(counts)} counts and {len(all_contigs)} contigs", "PINK")
if verbose:
message("Reading the abstracts and writing the output", "GREEN")
if isinstance(all_contigs, set):
all_contigs = sorted(all_contigs)
if abstractsf.endswith('.gz'):
abst = gzip.open(abstractsf, 'rt')
else:
abst = open(abstractsf, 'r')
with open(reads_per_sample_file, 'w') as reads_out, open(average_per_proj, 'w') as average_out:
average_out.write("Project\tTitle\tAbstract\tAnnotation\tComment")
for c in all_contigs:
average_out.write(f"\t{c}")
average_out.write("\n")
for l in abst:
# SRA Project Title Abstract Annotation Comment Runs
p = l.strip().split("\t")
if len(p) != 6:
if verbose:
message(f"Malformed Abstracts (len p: {len(p)}: {l}", "RED")
continue
project = p[0]
runids = p[6].split(',')
run_counts = {c:[] for c in all_contigs}
for r in runids:
if r not in counts:
message(f"Run {r} not found", "RED")
continue
for c in all_contigs:
if c in counts[r]:
run_counts[c].append(counts[r][c])
reads_out.write(f"{project}\t{r}\t{c}\t{counts[r]}\n")
average_out.write("\t".join(p[0:5]))
num = len(runids)
for c in all_contigs:
average_out.write(f"\t{sum(run_counts[c])/num}")
average_out.write("\n")
abst.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-d', help='directory of SRA Run counts per sample', required=True)
parser.add_argument('-a', help='abstracts file that includes annotations and comments', required=True)
parser.add_argument('-r', help='file to write reads per sample to', required=True)
parser.add_argument('-p', help='file to write average counts per project to', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
cpr, ac = counts_per_sample(args.d, args.v)
read_abstracts(args.a, args.r, args.p, cpr, ac, args.v)
Checking for malformed abstracts
"""
Merge the counts and abstracts. We take the abstracts file from partie, and the counts generated
by SearchSRA and create a single file with both all counts and average counts
"""
import os
import sys
import argparse
import gzip
__author__ = 'Rob Edwards'
class colors(object):
color = {
'HEADER': '\033[95m',
'OKBLUE': '\033[94m',
'OKGREEN': '\033[92m',
'WARNING': '\033[93m',
'FAIL': '\033[91m',
'ENDC': '\033[0m',
'BOLD': '\033[1m',
'UNDERLINE': '\033[4m',
'PINK': '\033[95m',
'BLUE': '\033[94m',
'GREEN': '\033[92m',
'YELLOW': '\033[93m',
'RED': '\033[91m',
'WHITE': '\033[0m',
}
def message(msg, color):
"""
Print a message to stderr using color
:param msg: the message to print
:param color: the color to use
:return: nothing
"""
color = color.upper()
if color not in colors.color:
raise ColorNotFoundError(f"There is no color {color}")
if os.fstat(0) == os.fstat(1):
# stderr is not redirected
sys.stderr.write(f"{colors.color[color]}{msg}{colors.color['ENDC']}\n")
else:
sys.stderr.write(f"{msg}\n")
def counts_per_sample(counts_dir, verbose=False):
"""
Read the counts file and return a dict of the number of counts per sample
We expect the counts file to have three columns, [sra_id, contig_id, count]. This is the output from
process.smk using idxstats.
:param str counts_dir: the directory of counts files
:param bool verbose: more outputs
:return dict[str, int]: the counts per sample, and a set of the contigs
:rtype: dict[str, dict[str, int]], set
"""
counts = {}
all_contigs = set()
if verbose:
message(f"Reading counts_dir", "GREEN")
for f in os.listdir(counts_dir):
with open(os.path.join(counts_dir, f), 'r') as f:
for l in f:
if l.startswith('Sample'):
continue
p = l.strip().split("\t")
if int(p[2]) == 0:
continue
if p[0] not in counts:
counts[p[0]] = {}
counts[p[0]][p[1]] = int(p[2])
all_contigs.add(p[1])
return counts, all_contigs
def read_abstracts(abstractsf, reads_per_sample_file, average_per_proj, counts, all_contigs, verbose=False):
"""
Read the abstracts file, and write the reads per sample and the average per project
"""
if verbose:
message("Received {len(counts)} counts and {len(all_contigs)} contigs", "PINK")
if verbose:
message("Reading the abstracts and writing the output", "GREEN")
if isinstance(all_contigs, set):
all_contigs = sorted(all_contigs)
if abstractsf.endswith('.gz'):
abst = gzip.open(abstractsf, 'rt')
else:
abst = open(abstractsf, 'r')
with open(reads_per_sample_file, 'w') as reads_out, open(average_per_proj, 'w') as average_out:
average_out.write("Project\tTitle\tAbstract\tAnnotation\tComment")
for c in all_contigs:
average_out.write(f"\t{c}")
average_out.write("\n")
for l in abst:
# SRA Project Title Abstract Annotation Comment Runs
p = l.strip().split("\t")
if len(p) == 4 :
project, title, abstract, runs = p
annotation = ""
comment = ""
elif len(p) == 6:
project, title, abstract, annotation, comment, runs = p
else:
if verbose:
message(f"Malformed Abstracts (len p: {len(p)}: {l}", "RED")
continue
runids = runs.split(',')
run_counts = {c:[] for c in all_contigs}
for r in runids:
if r not in counts:
message(f"Run {r} not found", "RED")
continue
for c in all_contigs:
if c in counts[r]:
run_counts[c].append(counts[r][c])
reads_out.write(f"{project}\t{r}\t{c}\t{counts[r]}\n")
average_out.write("\t".join([project, title, abstract, annotation, comment]))
num = len(runids)
for c in all_contigs:
average_out.write(f"\t{sum(run_counts[c])/num}")
average_out.write("\n")
abst.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=' ')
parser.add_argument('-d', help='directory of SRA Run counts per sample', required=True)
parser.add_argument('-a', help='abstracts file that includes annotations and comments', required=True)
parser.add_argument('-r', help='file to write reads per sample to', required=True)
parser.add_argument('-p', help='file to write average counts per project to', required=True)
parser.add_argument('-v', help='verbose output', action='store_true')
args = parser.parse_args()
cpr, ac = counts_per_sample(args.d, args.v)
read_abstracts(args.a, args.r, args.p, cpr, ac, args.v)
|
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from os import listdir, makedirs
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from packaging.version import Version, parse
from transformers.file_utils import ModelOutput, is_tf_available, is_torch_available
from transformers.pipelines import Pipeline, pipeline
from transformers.tokenization_utils import BatchEncoding
# This is the minimal required version to
# support some ONNX Runtime features
ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0")
SUPPORTED_PIPELINES = [
"feature-extraction",
"ner",
"sentiment-analysis",
"fill-mask",
"question-answering",
"text-generation",
"translation_en_to_fr",
"translation_en_to_de",
"translation_en_to_ro",
]
class OnnxConverterArgumentParser(ArgumentParser):
"""
Wraps all the script arguments supported to export transformers models to ONNX IR
"""
def __init__(self):
super().__init__("ONNX Converter")
self.add_argument(
"--pipeline",
type=str,
choices=SUPPORTED_PIPELINES,
default="feature-extraction",
)
self.add_argument(
"--model",
type=str,
required=True,
help="Model's id or path (ex: bert-base-cased)",
)
self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: bert-base-cased)")
self.add_argument(
"--framework",
type=str,
choices=["pt", "tf"],
help="Framework for loading the model",
)
self.add_argument("--opset", type=int, default=11, help="ONNX opset to use")
self.add_argument(
"--check-loading",
action="store_true",
help="Check ONNX is able to load the model",
)
self.add_argument(
"--use-external-format",
action="store_true",
help="Allow exporting model >= than 2Gb",
)
self.add_argument(
"--quantize",
action="store_true",
help="Quantize the neural network to be run with int8",
)
self.add_argument("output")
def generate_identified_filename(filename: Path, identifier: str) -> Path:
"""
Append a string-identifier at the end (before the extension, if any) to the provided filepath
Args:
filename: pathlib.Path The actual path object we would like to add an identifier suffix
identifier: The suffix to add
Returns: String with concatenated identifier at the end of the filename
"""
return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
def check_onnxruntime_requirements(minimum_version: Version):
"""
Check onnxruntime is installed and if the installed version match is recent enough
Raises:
ImportError: If onnxruntime is not installed or too old version is found
"""
try:
import onnxruntime
# Parse the version of the installed onnxruntime
ort_version = parse(onnxruntime.__version__)
# We require 1.4.0 minimum
if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:
raise ImportError(
f"We found an older version of onnxruntime ({onnxruntime.__version__}) "
f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n"
f"Please update onnxruntime by running `pip install --upgrade onnxruntime`"
)
except ImportError:
raise ImportError(
"onnxruntime doesn't seem to be currently installed. "
"Please install the onnxruntime by running `pip install onnxruntime`"
" and relaunch the conversion."
)
def ensure_valid_input(model, tokens, input_names):
"""
Ensure input are presented in the correct order, without any Non
Args:
model: The model used to forward the input data
tokens: BatchEncoding holding the input data
input_names: The name of the inputs
Returns: Tuple
"""
print("Ensuring inputs are in correct order")
model_args_name = model.forward.__code__.co_varnames
model_args, ordered_input_names = [], []
for arg_name in model_args_name[1:]: # start at index 1 to skip "self" argument
if arg_name in input_names:
ordered_input_names.append(arg_name)
model_args.append(tokens[arg_name])
else:
print(f"{arg_name} is not present in the generated input list.")
break
print(f"Generated inputs order: {ordered_input_names}")
return ordered_input_names, tuple(model_args)
def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:
"""
Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model
Args:
nlp: The pipeline object holding the model to be exported
framework: The framework identifier to dispatch to the correct inference scheme (pt/tf)
Returns:
- List of the inferred input variable names
- List of the inferred output variable names
- Dictionary with input/output variables names as key and shape tensor as value
- a BatchEncoding reference which was used to infer all the above information
"""
def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):
if isinstance(tensor, (tuple, list)):
return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]
else:
# Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)
axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: "batch"}
if is_input:
if len(tensor.shape) == 2:
axes[1] = "sequence"
else:
raise ValueError(f"Unable to infer tensor axes ({len(tensor.shape)})")
else:
seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]
axes.update({dim: "sequence" for dim in seq_axes})
print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}")
return axes
tokens = nlp.tokenizer("This is a sample output", return_tensors=framework)
seq_len = tokens.input_ids.shape[-1]
outputs = nlp.model(**tokens) if framework == "pt" else nlp.model(tokens)
if isinstance(outputs, ModelOutput):
outputs = outputs.to_tuple()
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
# Generate input names & axes
input_vars = list(tokens.keys())
input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}
# flatten potentially grouped outputs (past for gpt2, attentions)
outputs_flat = []
for output in outputs:
if isinstance(output, (tuple, list)):
outputs_flat.extend(output)
else:
outputs_flat.append(output)
# Generate output names & axes
output_names = [f"output_{i}" for i in range(len(outputs_flat))]
output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}
# Create the aggregated axes representation
dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)
return input_vars, output_names, dynamic_axes, tokens
def load_graph_from_args(
pipeline_name: str, framework: str, model: str, tokenizer: Optional[str] = None, **models_kwargs
) -> Pipeline:
"""
Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model
Args:
pipeline_name: The kind of pipeline to use (ner, question-answering, etc.)
framework: The actual model to convert the pipeline from ("pt" or "tf")
model: The model name which will be loaded by the pipeline
tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value
Returns: Pipeline object
"""
# If no tokenizer provided
if tokenizer is None:
tokenizer = model
# Check the wanted framework is available
if framework == "pt" and not is_torch_available():
raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
if framework == "tf" and not is_tf_available():
raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
print(f"Loading pipeline (model: {model}, tokenizer: {tokenizer})")
# Allocate tokenizer and model
return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)
def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):
"""
Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR
Args:
nlp: The pipeline to be exported
opset: The actual version of the ONNX operator set to use
output: Path where will be stored the generated ONNX model
use_external_format: Split the model definition from its parameters to allow model bigger than 2GB
Returns:
"""
if not is_torch_available():
raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
import torch
from torch.onnx import export
print(f"Using framework PyTorch: {torch.__version__}")
with torch.no_grad():
input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "pt")
ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names)
export(
nlp.model,
model_args,
f=output.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
use_external_data_format=use_external_format,
enable_onnx_checker=True,
opset_version=opset,
)
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
"""
Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR)
Args:
nlp: The pipeline to be exported
opset: The actual version of the ONNX operator set to use
output: Path where will be stored the generated ONNX model
Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow
"""
if not is_tf_available():
raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")
try:
import tensorflow as tf
from tf2onnx import __version__ as t2ov
from tf2onnx import convert_keras, save_model
print(f"Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}")
# Build
input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf")
# Forward
nlp.model.predict(tokens.data)
onnx_model = convert_keras(nlp.model, nlp.model.name, target_opset=opset)
save_model(onnx_model, output.as_posix())
except ImportError as e:
raise Exception(f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first.")
def convert(
framework: str,
model: str,
output: Path,
opset: int,
tokenizer: Optional[str] = None,
use_external_format: bool = False,
pipeline_name: str = "feature-extraction",
**model_kwargs
):
"""
Convert the pipeline object to the ONNX Intermediate Representation (IR) format
Args:
framework: The framework the pipeline is backed by ("pt" or "tf")
model: The name of the model to load for the pipeline
output: The path where the ONNX graph will be stored
opset: The actual version of the ONNX operator set to use
tokenizer: The name of the model to load for the pipeline, default to the model's name if not provided
use_external_format:
Split the model definition from its parameters to allow model bigger than 2GB (PyTorch only)
pipeline_name: The kind of pipeline to instantiate (ner, question-answering, etc.)
model_kwargs: Keyword arguments to be forwarded to the model constructor
Returns:
"""
print(f"ONNX opset version set to: {opset}")
# Load the pipeline
nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs)
if not output.parent.exists():
print(f"Creating folder {output.parent}")
makedirs(output.parent.as_posix())
elif len(listdir(output.parent.as_posix())) > 0:
raise Exception(f"Folder {output.parent.as_posix()} is not empty, aborting conversion")
# Export the graph
if framework == "pt":
convert_pytorch(nlp, opset, output, use_external_format)
else:
convert_tensorflow(nlp, opset, output)
def optimize(onnx_model_path: Path) -> Path:
"""
Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the
optimizations possible
Args:
onnx_model_path: filepath where the model binary description is stored
Returns: Path where the optimized model binary description has been saved
"""
from onnxruntime import InferenceSession, SessionOptions
# Generate model name with suffix "optimized"
opt_model_path = generate_identified_filename(onnx_model_path, "-optimized")
sess_option = SessionOptions()
sess_option.optimized_model_filepath = opt_model_path.as_posix()
_ = InferenceSession(onnx_model_path.as_posix(), sess_option)
print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}")
print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\")
return opt_model_path
def quantize(onnx_model_path: Path) -> Path:
"""
Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU
Args:
onnx_model_path: Path to location the exported ONNX model is stored
Returns: The Path generated for the quantized
"""
import onnx
from onnxruntime.quantization import QuantizationMode, quantize
onnx_model = onnx.load(onnx_model_path.as_posix())
# Discussed with @yufenglee from ONNX runtime, this will be address in the next release of onnxruntime
print(
"As of onnxruntime 1.4.0, models larger than 2GB will fail to quantize due to protobuf constraint.\n"
"This limitation will be removed in the next release of onnxruntime."
)
quantized_model = quantize(
model=onnx_model,
quantization_mode=QuantizationMode.IntegerOps,
force_fusions=True,
symmetric_weight=True,
)
# Append "-quantized" at the end of the model's name
quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized")
# Save model
print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}")
onnx.save_model(quantized_model, quantized_model_path.as_posix())
return quantized_model_path
def verify(path: Path):
from onnxruntime import InferenceSession, SessionOptions
from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException
print(f"Checking ONNX model loading from: {path} ...")
try:
onnx_options = SessionOptions()
_ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"])
print(f"Model {path} correctly loaded: \N{heavy check mark}")
except RuntimeException as re:
print(f"Error while loading the model {re}: \N{heavy ballot x}")
if __name__ == "__main__":
parser = OnnxConverterArgumentParser()
args = parser.parse_args()
# Make sure output is absolute path
args.output = Path(args.output).absolute()
try:
print("\n====== Converting model to ONNX ======")
# Convert
convert(
args.framework,
args.model,
args.output,
args.opset,
args.tokenizer,
args.use_external_format,
args.pipeline,
)
if args.quantize:
# Ensure requirements for quantization on onnxruntime is met
check_onnxruntime_requirements(ORT_QUANTIZE_MINIMUM_VERSION)
# onnxruntime optimizations doesn't provide the same level of performances on TensorFlow than PyTorch
if args.framework == "tf":
print(
"\t Using TensorFlow might not provide the same optimization level compared to PyTorch.\n"
"\t For TensorFlow users you can try optimizing the model directly through onnxruntime_tools.\n"
"\t For more information, please refer to the onnxruntime documentation:\n"
"\t\thttps://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers\n"
)
print("\n====== Optimizing ONNX model ======")
# Quantization works best when using the optimized version of the model
args.optimized_output = optimize(args.output)
# Do the quantization on the right graph
args.quantized_output = quantize(args.optimized_output)
# And verify
if args.check_loading:
print("\n====== Check exported ONNX model(s) ======")
verify(args.output)
if hasattr(args, "optimized_output"):
verify(args.optimized_output)
if hasattr(args, "quantized_output"):
verify(args.quantized_output)
except Exception as e:
print(f"Error while converting the model: {e}")
exit(1)
Prepare deprecated ONNX exporter for torch v1.11 (#15388)
* Prepare deprecated ONNX exporter for PyTorch v1.11
* Add deprecation warning
# Copyright 2020 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
from argparse import ArgumentParser
from os import listdir, makedirs
from pathlib import Path
from typing import Dict, List, Optional, Tuple
from packaging.version import Version, parse
from transformers.file_utils import ModelOutput, is_tf_available, is_torch_available
from transformers.pipelines import Pipeline, pipeline
from transformers.tokenization_utils import BatchEncoding
# This is the minimal required version to
# support some ONNX Runtime features
ORT_QUANTIZE_MINIMUM_VERSION = parse("1.4.0")
SUPPORTED_PIPELINES = [
"feature-extraction",
"ner",
"sentiment-analysis",
"fill-mask",
"question-answering",
"text-generation",
"translation_en_to_fr",
"translation_en_to_de",
"translation_en_to_ro",
]
class OnnxConverterArgumentParser(ArgumentParser):
"""
Wraps all the script arguments supported to export transformers models to ONNX IR
"""
def __init__(self):
super().__init__("ONNX Converter")
self.add_argument(
"--pipeline",
type=str,
choices=SUPPORTED_PIPELINES,
default="feature-extraction",
)
self.add_argument(
"--model",
type=str,
required=True,
help="Model's id or path (ex: bert-base-cased)",
)
self.add_argument("--tokenizer", type=str, help="Tokenizer's id or path (ex: bert-base-cased)")
self.add_argument(
"--framework",
type=str,
choices=["pt", "tf"],
help="Framework for loading the model",
)
self.add_argument("--opset", type=int, default=11, help="ONNX opset to use")
self.add_argument(
"--check-loading",
action="store_true",
help="Check ONNX is able to load the model",
)
self.add_argument(
"--use-external-format",
action="store_true",
help="Allow exporting model >= than 2Gb",
)
self.add_argument(
"--quantize",
action="store_true",
help="Quantize the neural network to be run with int8",
)
self.add_argument("output")
def generate_identified_filename(filename: Path, identifier: str) -> Path:
"""
Append a string-identifier at the end (before the extension, if any) to the provided filepath
Args:
filename: pathlib.Path The actual path object we would like to add an identifier suffix
identifier: The suffix to add
Returns: String with concatenated identifier at the end of the filename
"""
return filename.parent.joinpath(filename.stem + identifier).with_suffix(filename.suffix)
def check_onnxruntime_requirements(minimum_version: Version):
"""
Check onnxruntime is installed and if the installed version match is recent enough
Raises:
ImportError: If onnxruntime is not installed or too old version is found
"""
try:
import onnxruntime
# Parse the version of the installed onnxruntime
ort_version = parse(onnxruntime.__version__)
# We require 1.4.0 minimum
if ort_version < ORT_QUANTIZE_MINIMUM_VERSION:
raise ImportError(
f"We found an older version of onnxruntime ({onnxruntime.__version__}) "
f"but we require onnxruntime to be >= {minimum_version} to enable all the conversions options.\n"
f"Please update onnxruntime by running `pip install --upgrade onnxruntime`"
)
except ImportError:
raise ImportError(
"onnxruntime doesn't seem to be currently installed. "
"Please install the onnxruntime by running `pip install onnxruntime`"
" and relaunch the conversion."
)
def ensure_valid_input(model, tokens, input_names):
"""
Ensure input are presented in the correct order, without any Non
Args:
model: The model used to forward the input data
tokens: BatchEncoding holding the input data
input_names: The name of the inputs
Returns: Tuple
"""
print("Ensuring inputs are in correct order")
model_args_name = model.forward.__code__.co_varnames
model_args, ordered_input_names = [], []
for arg_name in model_args_name[1:]: # start at index 1 to skip "self" argument
if arg_name in input_names:
ordered_input_names.append(arg_name)
model_args.append(tokens[arg_name])
else:
print(f"{arg_name} is not present in the generated input list.")
break
print(f"Generated inputs order: {ordered_input_names}")
return ordered_input_names, tuple(model_args)
def infer_shapes(nlp: Pipeline, framework: str) -> Tuple[List[str], List[str], Dict, BatchEncoding]:
"""
Attempt to infer the static vs dynamic axes for each input and output tensors for a specific model
Args:
nlp: The pipeline object holding the model to be exported
framework: The framework identifier to dispatch to the correct inference scheme (pt/tf)
Returns:
- List of the inferred input variable names
- List of the inferred output variable names
- Dictionary with input/output variables names as key and shape tensor as value
- a BatchEncoding reference which was used to infer all the above information
"""
def build_shape_dict(name: str, tensor, is_input: bool, seq_len: int):
if isinstance(tensor, (tuple, list)):
return [build_shape_dict(name, t, is_input, seq_len) for t in tensor]
else:
# Let's assume batch is the first axis with only 1 element (~~ might not be always true ...)
axes = {[axis for axis, numel in enumerate(tensor.shape) if numel == 1][0]: "batch"}
if is_input:
if len(tensor.shape) == 2:
axes[1] = "sequence"
else:
raise ValueError(f"Unable to infer tensor axes ({len(tensor.shape)})")
else:
seq_axes = [dim for dim, shape in enumerate(tensor.shape) if shape == seq_len]
axes.update({dim: "sequence" for dim in seq_axes})
print(f"Found {'input' if is_input else 'output'} {name} with shape: {axes}")
return axes
tokens = nlp.tokenizer("This is a sample output", return_tensors=framework)
seq_len = tokens.input_ids.shape[-1]
outputs = nlp.model(**tokens) if framework == "pt" else nlp.model(tokens)
if isinstance(outputs, ModelOutput):
outputs = outputs.to_tuple()
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
# Generate input names & axes
input_vars = list(tokens.keys())
input_dynamic_axes = {k: build_shape_dict(k, v, True, seq_len) for k, v in tokens.items()}
# flatten potentially grouped outputs (past for gpt2, attentions)
outputs_flat = []
for output in outputs:
if isinstance(output, (tuple, list)):
outputs_flat.extend(output)
else:
outputs_flat.append(output)
# Generate output names & axes
output_names = [f"output_{i}" for i in range(len(outputs_flat))]
output_dynamic_axes = {k: build_shape_dict(k, v, False, seq_len) for k, v in zip(output_names, outputs_flat)}
# Create the aggregated axes representation
dynamic_axes = dict(input_dynamic_axes, **output_dynamic_axes)
return input_vars, output_names, dynamic_axes, tokens
def load_graph_from_args(
pipeline_name: str, framework: str, model: str, tokenizer: Optional[str] = None, **models_kwargs
) -> Pipeline:
"""
Convert the set of arguments provided through the CLI to an actual pipeline reference (tokenizer + model
Args:
pipeline_name: The kind of pipeline to use (ner, question-answering, etc.)
framework: The actual model to convert the pipeline from ("pt" or "tf")
model: The model name which will be loaded by the pipeline
tokenizer: The tokenizer name which will be loaded by the pipeline, default to the model's value
Returns: Pipeline object
"""
# If no tokenizer provided
if tokenizer is None:
tokenizer = model
# Check the wanted framework is available
if framework == "pt" and not is_torch_available():
raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
if framework == "tf" and not is_tf_available():
raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
print(f"Loading pipeline (model: {model}, tokenizer: {tokenizer})")
# Allocate tokenizer and model
return pipeline(pipeline_name, model=model, tokenizer=tokenizer, framework=framework, model_kwargs=models_kwargs)
def convert_pytorch(nlp: Pipeline, opset: int, output: Path, use_external_format: bool):
"""
Export a PyTorch backed pipeline to ONNX Intermediate Representation (IR
Args:
nlp: The pipeline to be exported
opset: The actual version of the ONNX operator set to use
output: Path where will be stored the generated ONNX model
use_external_format: Split the model definition from its parameters to allow model bigger than 2GB
Returns:
"""
if not is_torch_available():
raise Exception("Cannot convert because PyTorch is not installed. Please install torch first.")
import torch
from torch.onnx import export
print(f"Using framework PyTorch: {torch.__version__}")
with torch.no_grad():
input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "pt")
ordered_input_names, model_args = ensure_valid_input(nlp.model, tokens, input_names)
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if parse(torch.__version__) <= parse("1.10.99"):
export(
nlp.model,
model_args,
f=output.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
use_external_data_format=use_external_format,
enable_onnx_checker=True,
opset_version=opset,
)
else:
export(
nlp.model,
model_args,
f=output.as_posix(),
input_names=ordered_input_names,
output_names=output_names,
dynamic_axes=dynamic_axes,
do_constant_folding=True,
opset_version=opset,
)
def convert_tensorflow(nlp: Pipeline, opset: int, output: Path):
"""
Export a TensorFlow backed pipeline to ONNX Intermediate Representation (IR)
Args:
nlp: The pipeline to be exported
opset: The actual version of the ONNX operator set to use
output: Path where will be stored the generated ONNX model
Notes: TensorFlow cannot export model bigger than 2GB due to internal constraint from TensorFlow
"""
if not is_tf_available():
raise Exception("Cannot convert because TF is not installed. Please install tensorflow first.")
print("/!\\ Please note TensorFlow doesn't support exporting model > 2Gb /!\\")
try:
import tensorflow as tf
from tf2onnx import __version__ as t2ov
from tf2onnx import convert_keras, save_model
print(f"Using framework TensorFlow: {tf.version.VERSION}, tf2onnx: {t2ov}")
# Build
input_names, output_names, dynamic_axes, tokens = infer_shapes(nlp, "tf")
# Forward
nlp.model.predict(tokens.data)
onnx_model = convert_keras(nlp.model, nlp.model.name, target_opset=opset)
save_model(onnx_model, output.as_posix())
except ImportError as e:
raise Exception(f"Cannot import {e.name} required to convert TF model to ONNX. Please install {e.name} first.")
def convert(
framework: str,
model: str,
output: Path,
opset: int,
tokenizer: Optional[str] = None,
use_external_format: bool = False,
pipeline_name: str = "feature-extraction",
**model_kwargs
):
"""
Convert the pipeline object to the ONNX Intermediate Representation (IR) format
Args:
framework: The framework the pipeline is backed by ("pt" or "tf")
model: The name of the model to load for the pipeline
output: The path where the ONNX graph will be stored
opset: The actual version of the ONNX operator set to use
tokenizer: The name of the model to load for the pipeline, default to the model's name if not provided
use_external_format:
Split the model definition from its parameters to allow model bigger than 2GB (PyTorch only)
pipeline_name: The kind of pipeline to instantiate (ner, question-answering, etc.)
model_kwargs: Keyword arguments to be forwarded to the model constructor
Returns:
"""
warnings.warn(
"The `transformers.convert_graph_to_onnx` package is deprecated and will be removed in version 5 of Transformers",
FutureWarning,
)
print(f"ONNX opset version set to: {opset}")
# Load the pipeline
nlp = load_graph_from_args(pipeline_name, framework, model, tokenizer, **model_kwargs)
if not output.parent.exists():
print(f"Creating folder {output.parent}")
makedirs(output.parent.as_posix())
elif len(listdir(output.parent.as_posix())) > 0:
raise Exception(f"Folder {output.parent.as_posix()} is not empty, aborting conversion")
# Export the graph
if framework == "pt":
convert_pytorch(nlp, opset, output, use_external_format)
else:
convert_tensorflow(nlp, opset, output)
def optimize(onnx_model_path: Path) -> Path:
"""
Load the model at the specified path and let onnxruntime look at transformations on the graph to enable all the
optimizations possible
Args:
onnx_model_path: filepath where the model binary description is stored
Returns: Path where the optimized model binary description has been saved
"""
from onnxruntime import InferenceSession, SessionOptions
# Generate model name with suffix "optimized"
opt_model_path = generate_identified_filename(onnx_model_path, "-optimized")
sess_option = SessionOptions()
sess_option.optimized_model_filepath = opt_model_path.as_posix()
_ = InferenceSession(onnx_model_path.as_posix(), sess_option)
print(f"Optimized model has been written at {opt_model_path}: \N{heavy check mark}")
print("/!\\ Optimized model contains hardware specific operators which might not be portable. /!\\")
return opt_model_path
def quantize(onnx_model_path: Path) -> Path:
"""
Quantize the weights of the model from float32 to in8 to allow very efficient inference on modern CPU
Args:
onnx_model_path: Path to location the exported ONNX model is stored
Returns: The Path generated for the quantized
"""
import onnx
from onnxruntime.quantization import QuantizationMode, quantize
onnx_model = onnx.load(onnx_model_path.as_posix())
# Discussed with @yufenglee from ONNX runtime, this will be address in the next release of onnxruntime
print(
"As of onnxruntime 1.4.0, models larger than 2GB will fail to quantize due to protobuf constraint.\n"
"This limitation will be removed in the next release of onnxruntime."
)
quantized_model = quantize(
model=onnx_model,
quantization_mode=QuantizationMode.IntegerOps,
force_fusions=True,
symmetric_weight=True,
)
# Append "-quantized" at the end of the model's name
quantized_model_path = generate_identified_filename(onnx_model_path, "-quantized")
# Save model
print(f"Quantized model has been written at {quantized_model_path}: \N{heavy check mark}")
onnx.save_model(quantized_model, quantized_model_path.as_posix())
return quantized_model_path
def verify(path: Path):
from onnxruntime import InferenceSession, SessionOptions
from onnxruntime.capi.onnxruntime_pybind11_state import RuntimeException
print(f"Checking ONNX model loading from: {path} ...")
try:
onnx_options = SessionOptions()
_ = InferenceSession(path.as_posix(), onnx_options, providers=["CPUExecutionProvider"])
print(f"Model {path} correctly loaded: \N{heavy check mark}")
except RuntimeException as re:
print(f"Error while loading the model {re}: \N{heavy ballot x}")
if __name__ == "__main__":
parser = OnnxConverterArgumentParser()
args = parser.parse_args()
# Make sure output is absolute path
args.output = Path(args.output).absolute()
try:
print("\n====== Converting model to ONNX ======")
# Convert
convert(
args.framework,
args.model,
args.output,
args.opset,
args.tokenizer,
args.use_external_format,
args.pipeline,
)
if args.quantize:
# Ensure requirements for quantization on onnxruntime is met
check_onnxruntime_requirements(ORT_QUANTIZE_MINIMUM_VERSION)
# onnxruntime optimizations doesn't provide the same level of performances on TensorFlow than PyTorch
if args.framework == "tf":
print(
"\t Using TensorFlow might not provide the same optimization level compared to PyTorch.\n"
"\t For TensorFlow users you can try optimizing the model directly through onnxruntime_tools.\n"
"\t For more information, please refer to the onnxruntime documentation:\n"
"\t\thttps://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers\n"
)
print("\n====== Optimizing ONNX model ======")
# Quantization works best when using the optimized version of the model
args.optimized_output = optimize(args.output)
# Do the quantization on the right graph
args.quantized_output = quantize(args.optimized_output)
# And verify
if args.check_loading:
print("\n====== Check exported ONNX model(s) ======")
verify(args.output)
if hasattr(args, "optimized_output"):
verify(args.optimized_output)
if hasattr(args, "quantized_output"):
verify(args.quantized_output)
except Exception as e:
print(f"Error while converting the model: {e}")
exit(1)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic arithmetic operators.
See the @{$python/math_ops} guide.
@@add
@@subtract
@@multiply
@@scalar_mul
@@div
@@divide
@@truediv
@@floordiv
@@realdiv
@@truncatediv
@@floor_div
@@truncatemod
@@floormod
@@mod
@@cross
@@add_n
@@abs
@@negative
@@sign
@@reciprocal
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@expm1
@@log
@@log1p
@@sinh
@@cosh
@@asinh
@@acosh
@@atanh
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@atan2
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
@@rint
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@norm
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@qr
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
@@tensordot
@@complex
@@conj
@@imag
@@real
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
@@bincount
@@cumsum
@@cumprod
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@unsorted_segment_max
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
@@argmin
@@argmax
@@setdiff1d
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_args
# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
arg_max = deprecated(None, "Use `argmax` instead")(arg_max) # pylint: disable=used-before-assignment
arg_min = deprecated(None, "Use `argmin` instead")(arg_min) # pylint: disable=used-before-assignment
def _set_doc(doc):
def _decorator(func):
func.__doc__ = doc
return func
return _decorator
# pylint: disable=redefined-builtin
@deprecated_args(None, "Use the `axis` argument instead", "dimension")
@_set_doc(gen_math_ops.arg_max.__doc__
.replace("dimensions", "axes")
.replace("dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
@deprecated_args(None, "Use the `axis` argument instead", "dimension")
@_set_doc(gen_math_ops.arg_min.__doc__
.replace("dimensions", "axes")
.replace("dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
def abs(x, name=None):
r"""Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`,
`int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
Note, for `complex64` or `complex128' input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops._complex_abs(
x.values, Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops._complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops._bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y
def multiply(x, y, name=None):
return gen_math_ops._mul(x, y, name)
multiply.__doc__ = gen_math_ops._mul.__doc__.replace("Mul", "`tf.multiply`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops._mul(x, y, name)
_mul.__doc__ = (gen_math_ops._mul.__doc__ +
("" if _mul.__doc__ is None else _mul.__doc__))
def subtract(x, y, name=None):
return gen_math_ops._sub(x, y, name)
subtract.__doc__ = gen_math_ops._sub.__doc__.replace("`Sub`", "`tf.subtract`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops._sub(x, y, name)
_sub.__doc__ = (gen_math_ops._sub.__doc__ +
("" if _sub.__doc__ is None else _sub.__doc__))
# pylint: disable=g-docstring-has-escape
def negative(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops._neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, dense_shape=x.dense_shape)
else:
return gen_math_ops._neg(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=g-docstring-has-escape
@deprecated("2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0` or `tf.is_nan(x)`; 1 if `x > 0`.
Zero is returned for NaN inputs.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(numpy)
Equivalent to numpy.sign except for the behavior for input values of NaN.
@end_compatibility
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, dense_shape=x.dense_shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, dense_shape=x.dense_shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, dense_shape=x.dense_shape)
else:
return gen_math_ops.erf(x, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
r"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
r"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
# pylint: enable=redefined-outer-name,redefined-builtin
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value,
ops.convert_to_tensor(
dtype.min, dtype=value.dtype,
name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value,
ops.convert_to_tensor(
dtype.max, dtype=value.dtype,
name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops._neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, sparse_tensor.SparseTensor):
try:
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
except TypeError:
# If the RHS is not a tensor, it might be a tensor aware object
# that can implement the operator with knowledge of itself
# and the tensor.
if hasattr(type(y), "__r%s__" % op_name):
return NotImplemented
else:
raise
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(sp_x.indices,
func(
sp_x.indices,
sp_x.values,
sp_x.dense_shape,
y,
name=name), sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv", [sp_indices, sp_values, sp_shape,
y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops._real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics. Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops._real_div(x, y, name=name)
else:
return gen_math_ops._floor_div(x, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
division operator semantics.
This function divides `x` and `y`, forcing Python 2.7 semantics. That is,
if one of `x` or `y` is a float, then the result will be a float.
Otherwise, the output will be an integer type. Flooring semantics are used
for integer division.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
# TODO(aselle): This should be removed
mod = gen_math_ops._floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops._floor_div(x, y, name=name)
realdiv = gen_math_ops._real_div
truncatediv = gen_math_ops._truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops._floor_div
truncatemod = gen_math_ops._truncate_mod
floormod = gen_math_ops._floor_mod
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops._mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python sematnics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops._sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops._floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max(
[arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.sum
@end_compatibility
"""
return gen_math_ops._sum(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def count_nonzero(input_tensor,
axis=None,
keep_dims=False,
dtype=dtypes.int64,
name=None,
reduction_indices=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
axis=axis,
keep_dims=keep_dims,
reduction_indices=reduction_indices),
dtype=dtype)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
@end_compatibility
"""
return gen_math_ops._mean(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_prod(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
return gen_math_ops._prod(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_min(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
return gen_math_ops._min(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_max(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
return gen_math_ops._max(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_all(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
return gen_math_ops._all(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_any(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
return gen_math_ops._any(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_logsumexp(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(
input_tensor,
axis=axis,
reduction_indices=reduction_indices,
keep_dims=True))
result = gen_math_ops.log(
reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
axis,
keep_dims=True,
reduction_indices=reduction_indices)) + my_max
if not keep_dims:
if isinstance(axis, int):
axis = [axis]
result = array_ops.squeeze(result, axis)
return result
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication arguments,
and any further outer dimensions match.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
# 3-D tensor `a`
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3]) => [[[ 1. 2. 3.]
[ 4. 5. 6.]],
[[ 7. 8. 9.]
[10. 11. 12.]]]
# 3-D tensor `b`
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2]) => [[[13. 14.]
[15. 16.]
[17. 18.]],
[[19. 20.]
[21. 22.]
[23. 24.]]]
c = tf.matmul(a, b) => [[[ 94 100]
[229 244]],
[[508 532]
[697 730]]]
# Since python >= 3.5 the @ operator is supported (see PEP 465).
# In TensorFlow, it simply calls the `tf.matmul()` function, so the
# following lines are equivalent:
d = a @ b @ [[10.], [11.]]
d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b
are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_shape = a.get_shape()
b_shape = b.get_shape()
if (not a_is_sparse and not b_is_sparse) and (
(a_shape.ndims is None or a_shape.ndims > 2) and
(b_shape.ndims is None or b_shape.ndims > 2)):
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops._batch_mat_mul(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
_OverrideBinaryOperatorHelper(matmul, "matmul")
sparse_matmul = gen_math_ops._sparse_mat_mul
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values,
cast(o.indices, dtypes.int64), o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
Aside from differentiability, `tf.accumulate_n` performs the same operation as
`tf.add_n`, but does not wait for all of its inputs to be ready before
beginning to sum. This can save memory if inputs are ready at different times,
since minimum temporary storage is proportional to the output size rather than
the inputs size.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(
shape=tensor_shape.vector(0), dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [
state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
@compatibility(numpy)
Equivalent to np.scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._neg(gen_nn_ops.softplus(-x), name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops._tanh(x, name=name)
def bincount(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32):
"""Counts the number of occurrences of each value in an integer array.
If `minlength` and `maxlength` are not given, returns a vector with length
`tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead
of 1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
dtype: If `weights` is None, determines the type of the output bins.
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
if minlength is not None:
minlength = ops.convert_to_tensor(
minlength, name="minlength", dtype=dtypes.int32)
output_size = gen_math_ops.maximum(minlength, output_size)
if maxlength is not None:
maxlength = ops.convert_to_tensor(
maxlength, name="maxlength", dtype=dtypes.int32)
output_size = gen_math_ops.minimum(maxlength, output_size)
weights = (ops.convert_to_tensor(weights, name="weights")
if weights is not None else constant_op.constant([], dtype))
return gen_math_ops.bincount(arr, output_size, weights)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```python
tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [
common_shapes.broadcast_shape(op.inputs[0].get_shape(),
op.inputs[1].get_shape())
]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: Suppose that \\(a_ijk\\) and \\(b_lmn\\) represent two
tensors of order 3. Then, `contract(a, b, [0], [2])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes
of b in order.
If axes is a list or `Tensor` the first and second row contain the set of
unique integers specifying axes along which the contraction is computed,
for `a` and `b`, respectively. The number of axes for `a` and `b` must
be equal.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims_static = [shape_a[i] for i in free]
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = cast(axes >= 0, dtypes.int32) * axes + cast(
axes < 0, dtypes.int32) * (axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
perm = array_ops.concat([axes_dims, free_dims], 0)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 1:
raise ValueError("'axes' must be at least 1.")
if a_shape.ndims is not None:
return range(a_shape.ndims - axes, a_shape.ndims), range(axes)
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank, dtype=dtypes.int32), range(
axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s.",
len(a_axes), len(b_axes))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(b, b_axes,
True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
# FFT ops were moved to tf.spectral. tf.fft symbols were part of the TensorFlow
# 1.0 API so we leave these here for backwards compatibility.
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
Minor corrections to tensordot documentation (#11764)
Changed some math indices formating and the format of the `axes` argument for Example 3.
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Basic arithmetic operators.
See the @{$python/math_ops} guide.
@@add
@@subtract
@@multiply
@@scalar_mul
@@div
@@divide
@@truediv
@@floordiv
@@realdiv
@@truncatediv
@@floor_div
@@truncatemod
@@floormod
@@mod
@@cross
@@add_n
@@abs
@@negative
@@sign
@@reciprocal
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@expm1
@@log
@@log1p
@@sinh
@@cosh
@@asinh
@@acosh
@@atanh
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
@@lbeta
@@tan
@@acos
@@asin
@@atan
@@atan2
@@lgamma
@@digamma
@@erf
@@erfc
@@squared_difference
@@igamma
@@igammac
@@zeta
@@polygamma
@@betainc
@@rint
@@diag
@@diag_part
@@trace
@@transpose
@@eye
@@matrix_diag
@@matrix_diag_part
@@matrix_band_part
@@matrix_set_diag
@@matrix_transpose
@@matmul
@@norm
@@matrix_determinant
@@matrix_inverse
@@cholesky
@@cholesky_solve
@@matrix_solve
@@matrix_triangular_solve
@@matrix_solve_ls
@@qr
@@self_adjoint_eig
@@self_adjoint_eigvals
@@svd
@@tensordot
@@complex
@@conj
@@imag
@@real
@@fft
@@ifft
@@fft2d
@@ifft2d
@@fft3d
@@ifft3d
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@reduce_logsumexp
@@count_nonzero
@@accumulate_n
@@einsum
@@bincount
@@cumsum
@@cumprod
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@unsorted_segment_max
@@sparse_segment_sum
@@sparse_segment_mean
@@sparse_segment_sqrt_n
@@argmin
@@argmax
@@setdiff1d
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
from tensorflow.python.ops import gen_spectral_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import state_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
from tensorflow.python.util.deprecation import deprecated_args
# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
arg_max = deprecated(None, "Use `argmax` instead")(arg_max) # pylint: disable=used-before-assignment
arg_min = deprecated(None, "Use `argmin` instead")(arg_min) # pylint: disable=used-before-assignment
def _set_doc(doc):
def _decorator(func):
func.__doc__ = doc
return func
return _decorator
# pylint: disable=redefined-builtin
@deprecated_args(None, "Use the `axis` argument instead", "dimension")
@_set_doc(gen_math_ops.arg_max.__doc__
.replace("dimensions", "axes")
.replace("dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
@deprecated_args(None, "Use the `axis` argument instead", "dimension")
@_set_doc(gen_math_ops.arg_min.__doc__
.replace("dimensions", "axes")
.replace("dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
if dimension is not None:
if axis is not None:
raise ValueError("Cannot specify both 'axis' and 'dimension'")
axis = dimension
elif axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
def abs(x, name=None):
r"""Computes the absolute value of a tensor.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. All
elements in `x` must be complex numbers of the form \\(a + bj\\). The
absolute value is computed as \\( \sqrt{a^2 + b^2}\\). For example:
```
# tensor 'x' is [[-2.25 + 4.75j], [-3.25 + 5.75j]]
tf.complex_abs(x) ==> [5.25594902, 6.60492229]
```
Args:
x: A `Tensor` or `SparseTensor` of type `float32`, `float64`, `int32`,
`int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` the same size and type as `x` with absolute
values.
Note, for `complex64` or `complex128' input, the returned `Tensor` will be
of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
if x.values.dtype in (dtypes.complex64, dtypes.complex128):
x_abs = gen_math_ops._complex_abs(
x.values, Tout=x.values.dtype.real_dtype, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
x_abs = gen_math_ops._abs(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_abs, dense_shape=x.dense_shape)
else:
x = ops.convert_to_tensor(x, name="x")
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops._complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops._bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`."""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y
def multiply(x, y, name=None):
return gen_math_ops._mul(x, y, name)
multiply.__doc__ = gen_math_ops._mul.__doc__.replace("Mul", "`tf.multiply`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated, please use `tf.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops._mul(x, y, name)
_mul.__doc__ = (gen_math_ops._mul.__doc__ +
("" if _mul.__doc__ is None else _mul.__doc__))
def subtract(x, y, name=None):
return gen_math_ops._sub(x, y, name)
subtract.__doc__ = gen_math_ops._sub.__doc__.replace("`Sub`", "`tf.subtract`")
# TODO(aselle): put deprecation in after another round of global code changes
@deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops._sub(x, y, name)
_sub.__doc__ = (gen_math_ops._sub.__doc__ +
("" if _sub.__doc__ is None else _sub.__doc__))
# pylint: disable=g-docstring-has-escape
def negative(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Neg", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_neg = gen_math_ops._neg(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_neg, dense_shape=x.dense_shape)
else:
return gen_math_ops._neg(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=g-docstring-has-escape
@deprecated("2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1` if `x < 0`; 0 if `x == 0` or `tf.is_nan(x)`; 1 if `x > 0`.
Zero is returned for NaN inputs.
For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`.
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
@compatibility(numpy)
Equivalent to numpy.sign except for the behavior for input values of NaN.
@end_compatibility
"""
with ops.name_scope(name, "Sign", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sign = gen_math_ops.sign(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sign, dense_shape=x.dense_shape)
else:
return gen_math_ops.sign(x, name=name)
def square(x, name=None):
r"""Computes square of x element-wise.
I.e., \\(y = x * x = x^2\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Square", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_square = gen_math_ops.square(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_square, dense_shape=x.dense_shape)
else:
return gen_math_ops.square(x, name=name)
def sqrt(x, name=None):
r"""Computes square root of x element-wise.
I.e., \\(y = \sqrt{x} = x^{1/2}\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Sqrt", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_sqrt = gen_math_ops.sqrt(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_sqrt, dense_shape=x.dense_shape)
else:
return gen_math_ops.sqrt(x, name=name)
def erf(x, name=None):
"""Computes the Gauss error function of `x` element-wise.
Args:
x: A `Tensor` of `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
with ops.name_scope(name, "Erf", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_erf = gen_math_ops.erf(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_erf, dense_shape=x.dense_shape)
else:
return gen_math_ops.erf(x, name=name)
def scalar_mul(scalar, x):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
Intended for use in gradient code which might deal with `IndexedSlices`
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(scalar * x.values, x.indices, x.dense_shape)
else:
return scalar * x
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
def pow(x, y, name=None):
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
y: A `Tensor` of type `float32`, `float64`, `int32`, `int64`, `complex64`,
or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`,
`float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
def real(input, name=None):
r"""Returns the real part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the real part of each element in `input`.
All elements in `input` must be complex numbers of the form \\(a + bj\\),
where *a* is the real part returned by this operation and *b* is the
imaginary part.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.real(input) ==> [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
real_dtype = input.dtype.real_dtype
if input.dtype.base_dtype == real_dtype:
return input
return gen_math_ops.real(input, Tout=real_dtype, name=name)
def imag(input, name=None):
r"""Returns the imaginary part of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
type `float32` or `float64` that is the imaginary part of each element in
`input`. All elements in `input` must be complex numbers of the form \\(a +
bj\\), where *a* is the real part and *b* is the imaginary part returned by
this operation.
For example:
```
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.imag(input) ==> [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `complex64`,
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
# pylint: enable=redefined-outer-name,redefined-builtin
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, 1.5, -4.5]
tf.round(a) ==> [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
return sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == base_type:
return x
return gen_math_ops.cast(x, base_type, name=name)
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(value,
ops.convert_to_tensor(
dtype.min, dtype=value.dtype,
name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(value,
ops.convert_to_tensor(
dtype.max, dtype=value.dtype,
name="max"))
return cast(value, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops._neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if not isinstance(y, sparse_tensor.SparseTensor):
try:
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
except TypeError:
# If the RHS is not a tensor, it might be a tensor aware object
# that can implement the operator with knowledge of itself
# and the tensor.
if hasattr(type(y), "__r%s__" % op_name):
return NotImplemented
else:
raise
return func(x, y, name=name)
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(sp_x.indices,
func(
sp_x.indices,
sp_x.values,
sp_x.dense_shape,
y,
name=name), sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv", [sp_indices, sp_values, sp_shape,
y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops._real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics. Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops._real_div(x, y, name=name)
else:
return gen_math_ops._floor_div(x, y, name=name)
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
division operator semantics.
This function divides `x` and `y`, forcing Python 2.7 semantics. That is,
if one of `x` or `y` is a float, then the result will be a float.
Otherwise, the output will be an integer type. Flooring semantics are used
for integer division.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
# TODO(aselle): This should be removed
mod = gen_math_ops._floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.div(x,y)` for integers, but uses `tf.floor(tf.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, `floordiv` uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly towards zero for negative integers).
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops._floor_div(x, y, name=name)
realdiv = gen_math_ops._real_div
truncatediv = gen_math_ops._truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops._floor_div
truncatemod = gen_math_ops._truncate_mod
floormod = gen_math_ops._floor_mod
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops._mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor) # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python sematnics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_math_ops.add, "add")
_OverrideBinaryOperatorHelper(gen_math_ops._sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops._floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)
def range(start, limit=None, delta=1, dtype=None, name="range"):
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```python
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'start' is 3
# 'limit' is 1
# 'delta' is -0.5
tf.range(start, limit, delta) ==> [3, 2.5, 2, 1.5]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if
`limit` is not None; otherwise, acts as range limit and first entry
defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence,
exclusive. If None, defaults to the value of `start` while the first
entry of the range defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments
`start`. Defaults to 1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max(
[arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
# Reduction operations
def _ReductionDims(x, axis, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor) and x.get_shape().ndims is not None:
return constant_op.constant(
np.arange(x.get_shape().ndims), dtype=dtypes.int32)
if (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.get_shape().is_fully_defined()):
rank = x.dense_shape.get_shape()[0].value # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank), dtype=dtypes.int32)
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.sum
@end_compatibility
"""
return gen_math_ops._sum(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def count_nonzero(input_tensor,
axis=None,
keep_dims=False,
dtype=dtypes.int64,
name=None,
reduction_indices=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
# 'x' is [[0, 1, 0]
# [1, 1, 0]]
tf.count_nonzero(x) ==> 3
tf.count_nonzero(x, 0) ==> [1, 2, 0]
tf.count_nonzero(x, 1) ==> [1, 2]
tf.count_nonzero(x, 1, keep_dims=True) ==> [[1], [2]]
tf.count_nonzero(x, [0, 1]) ==> 3
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, or `bool`.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor (number of nonzero values).
"""
with ops.name_scope(name, "count_nonzero", [input_tensor]):
input_tensor = ops.convert_to_tensor(input_tensor, name="input_tensor")
zero = input_tensor.dtype.as_numpy_dtype()
return cast(
reduce_sum(
# int64 reduction happens on GPU
to_int64(gen_math_ops.not_equal(input_tensor, zero)),
axis=axis,
keep_dims=keep_dims,
reduction_indices=reduction_indices),
dtype=dtype)
def reduce_mean(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1.]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
@end_compatibility
"""
return gen_math_ops._mean(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_prod(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
return gen_math_ops._prod(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_min(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
return gen_math_ops._min(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_max(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
return gen_math_ops._max(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_all(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
return gen_math_ops._all(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_any(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
return gen_math_ops._any(
input_tensor,
_ReductionDims(input_tensor, axis, reduction_indices),
keep_dims,
name=name)
def reduce_logsumexp(input_tensor,
axis=None,
keep_dims=False,
name=None,
reduction_indices=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
# 'x' is [[0, 0, 0]]
# [0, 0, 0]]
tf.reduce_logsumexp(x) ==> log(6)
tf.reduce_logsumexp(x, 0) ==> [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) ==> [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keep_dims=True) ==> [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) ==> log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default),
reduces all dimensions. Must be in the range
`[-rank(input_tensor), rank(input_tensor))`.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
Returns:
The reduced tensor.
"""
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
my_max = array_ops.stop_gradient(
reduce_max(
input_tensor,
axis=axis,
reduction_indices=reduction_indices,
keep_dims=True))
result = gen_math_ops.log(
reduce_sum(
gen_math_ops.exp(input_tensor - my_max),
axis,
keep_dims=True,
reduction_indices=reduction_indices)) + my_max
if not keep_dims:
if isinstance(axis, int):
axis = [axis]
result = array_ops.squeeze(result, axis)
return result
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])`
For example:
```python
# 'x' is [[1, 2],
# [3, 4]]
tf.trace(x) ==> 5
# 'x' is [[1,2,3],
# [4,5,6],
# [7,8,9]]
tf.trace(x) ==> 15
# 'x' is [[[1,2,3],
# [4,5,6],
# [7,8,9]],
# [[-1,-2,-3],
# [-4,-5,-6],
# [-7,-8,-9]]]
tf.trace(x) ==> [15,-15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication arguments,
and any further outer dimensions match.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
# 3-D tensor `a`
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3]) => [[[ 1. 2. 3.]
[ 4. 5. 6.]],
[[ 7. 8. 9.]
[10. 11. 12.]]]
# 3-D tensor `b`
b = tf.constant(np.arange(13, 25, dtype=np.int32),
shape=[2, 3, 2]) => [[[13. 14.]
[15. 16.]
[17. 18.]],
[[19. 20.]
[21. 22.]
[23. 24.]]]
c = tf.matmul(a, b) => [[[ 94 100]
[229 244]],
[[508 532]
[697 730]]]
# Since python >= 3.5 the @ operator is supported (see PEP 465).
# In TensorFlow, it simply calls the `tf.matmul()` function, so the
# following lines are equivalent:
d = a @ b @ [[10.], [11.]]
d = tf.matmul(tf.matmul(a, b), [[10.], [11.]])
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most matrix is
the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output`[..., i, j] = sum_k (`a`[..., i, k] * `b`[..., k, j]),
for all indices i, j.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and adjoint_b
are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_shape = a.get_shape()
b_shape = b.get_shape()
if (not a_is_sparse and not b_is_sparse) and (
(a_shape.ndims is None or a_shape.ndims > 2) and
(b_shape.ndims is None or b_shape.ndims > 2)):
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
return gen_math_ops._batch_mat_mul(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (a.dtype in sparse_matmul_types and
b.dtype in sparse_matmul_types and
(a_is_sparse or b_is_sparse))
if dtypes.bfloat16 in (a.dtype, b.dtype):
# matmul currently doesn't handle bfloat16 inputs.
use_sparse_matmul = True
if use_sparse_matmul:
return sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
_OverrideBinaryOperatorHelper(matmul, "matmul")
sparse_matmul = gen_math_ops._sparse_mat_mul
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values,
cast(o.indices, dtypes.int64), o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if len(inputs) == 1:
if name:
return array_ops.identity(inputs[0], name=name)
return inputs[0]
return gen_math_ops._add_n(inputs, name=name)
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
NOTE: This operation is not differentiable and cannot be used if inputs depend
on trainable variables. Please use `tf.add_n` for such cases.
Aside from differentiability, `tf.accumulate_n` performs the same operation as
`tf.add_n`, but does not wait for all of its inputs to be ready before
beginning to sum. This can save memory if inputs are ready at different times,
since minimum temporary storage is proportional to the output size rather than
the inputs size.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if len(inputs) == 1:
return inputs[0]
if tensor_dtype is None:
tensor_dtype = inputs[0].dtype
with ops.name_scope(name, "AccumulateN", inputs) as name:
var = gen_state_ops._temporary_variable(
shape=tensor_shape.vector(0), dtype=tensor_dtype)
with ops.colocate_with(var):
zeros = array_ops.zeros_like(gen_control_flow_ops._merge(inputs)[0])
zeros.set_shape(shape)
ref = state_ops.assign(var, zeros, validate_shape=False)
update_ops = [
state_ops.assign_add(ref, input_tensor, use_locking=True)
for input_tensor in inputs
]
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(
ref, var_name=var.op.name, name=name)
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float32`, `float64`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
@compatibility(numpy)
Equivalent to np.scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._neg(gen_nn_ops.softplus(-x), name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor or SparseTensor with type `float`, `double`, `int32`,
`complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A Tensor or SparseTensor respectively with the same type as `x`.
"""
with ops.name_scope(name, "Tanh", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
x_tanh = gen_math_ops._tanh(x.values, name=name)
return sparse_tensor.SparseTensor(
indices=x.indices, values=x_tanh, dense_shape=x.dense_shape)
else:
return gen_math_ops._tanh(x, name=name)
def bincount(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32):
"""Counts the number of occurrences of each value in an integer array.
If `minlength` and `maxlength` are not given, returns a vector with length
`tf.reduce_max(arr) + 1` if `arr` is non-empty, and length 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead
of 1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
dtype: If `weights` is None, determines the type of the output bins.
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
if minlength is not None:
minlength = ops.convert_to_tensor(
minlength, name="minlength", dtype=dtypes.int32)
output_size = gen_math_ops.maximum(minlength, output_size)
if maxlength is not None:
maxlength = ops.convert_to_tensor(
maxlength, name="maxlength", dtype=dtypes.int32)
output_size = gen_math_ops.minimum(maxlength, output_size)
weights = (ops.convert_to_tensor(weights, name="weights")
if weights is not None else constant_op.constant([], dtype))
return gen_math_ops.bincount(arr, output_size, weights)
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
```python
tf.cumsum([a, b, c]) # => [a, a + b, a + b + c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
```python
tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b]
```
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
```python
tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.cumprod([a, b, c]) # => [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `input` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `input`. The
complex numbers in `input` must be of the form \\(a + bj\\), where *a* is the
real part and *b* is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]
If `x` is real, it is returned unchanged.
Args:
x: `Tensor` to conjugate. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
"""
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops._conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric tensor, got dtype %r" % x.dtype)
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
return [
common_shapes.broadcast_shape(op.inputs[0].get_shape(),
op.inputs[1].get_shape())
]
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keep_dims were set to True.
"""
# Example:
# cast needed for SparseTensor reductions
input_shape = to_int32(input_shape) # [2, 3, 5, 7]
axes = to_int32(axes) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
]) # [1, 1]
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes
of b in order.
If axes is a list or `Tensor` the first and second row contain the set of
unique integers specifying axes along which the contraction is computed,
for `a` and `b`, respectively. The number of axes for `a` and `b` must
be equal.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims_static = [shape_a[i] for i in free]
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = cast(axes >= 0, dtypes.int32) * axes + cast(
axes < 0, dtypes.int32) * (axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
perm = array_ops.concat([axes_dims, free_dims], 0)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 1:
raise ValueError("'axes' must be at least 1.")
if a_shape.ndims is not None:
return range(a_shape.ndims - axes, a_shape.ndims), range(axes)
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank, dtype=dtypes.int32), range(
axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s.",
len(a_axes), len(b_axes))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(b, b_axes,
True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
return array_ops.reshape(ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
# FFT ops were moved to tf.spectral. tf.fft symbols were part of the TensorFlow
# 1.0 API so we leave these here for backwards compatibility.
fft = gen_spectral_ops.fft
ifft = gen_spectral_ops.ifft
fft2d = gen_spectral_ops.fft2d
ifft2d = gen_spectral_ops.ifft2d
fft3d = gen_spectral_ops.fft3d
ifft3d = gen_spectral_ops.ifft3d
|
# -*- coding: utf-8 -*-
import os
import random
import unittest
import zipfile
from cStringIO import StringIO
from flask import url_for, escape, session, current_app, g
from flask_testing import TestCase
from mock import patch
from pyotp import TOTP
from sqlalchemy.orm.exc import StaleDataError
from sqlalchemy.exc import IntegrityError
import crypto_util
import models
import journalist
import journalist_app as journalist_app_module
import utils
os.environ['SECUREDROP_ENV'] = 'test' # noqa
from sdconfig import SDConfig, config
from db import db
from models import (InvalidPasswordLength, Journalist, Reply, Source,
Submission)
from utils.instrument import InstrumentedApp
# Smugly seed the RNG for deterministic testing
random.seed('¯\_(ツ)_/¯')
VALID_PASSWORD = 'correct horse battery staple generic passphrase hooray'
VALID_PASSWORD_2 = 'another correct horse battery staple generic passphrase'
# These are factored out of the tests because some test have a
# postive/negative case under varying conditions, and we don't want
# false postives after modifying a string in the application.
EMPTY_REPLY_TEXT = "You cannot send an empty reply."
ADMIN_LINK = '<a href="/admin/" id="link-admin-index">'
def _login_user(app, username, password, otp_secret):
resp = app.post('/login', data={'username': username,
'password': password,
'token': TOTP(otp_secret).now()},
follow_redirects=True)
assert resp.status_code == 200
assert hasattr(g, 'user') # ensure logged in
class TestPytestJournalistApp:
def test_make_password(self, journalist_app):
with patch.object(crypto_util.CryptoUtil,
'genrandomid',
side_effect=['bad', VALID_PASSWORD]):
fake_config = SDConfig()
with journalist_app.test_request_context('/'):
password = journalist_app_module.utils.make_password(
fake_config)
assert password == VALID_PASSWORD
def test_reply_error_logging(self, journalist_app):
with journalist_app.app_context():
source, _ = utils.db_helper.init_source()
filesystem_id = source.filesystem_id
user, password = utils.db_helper.init_journalist()
username = user.username
user_id = user.id
otp_secret = user.otp_secret
exception_class = StaleDataError
exception_msg = 'Potentially sensitive content!'
with journalist_app.test_client() as app:
_login_user(app, username, password, otp_secret)
with patch.object(journalist_app.logger, 'error') \
as mocked_error_logger:
with patch.object(db.session,
'commit',
side_effect=exception_class(exception_msg)):
resp = app.post('/reply',
data={'filesystem_id': filesystem_id,
'message': '_'},
follow_redirects=True)
assert resp.status_code == 200
# Notice the "potentially sensitive" exception_msg is not present in
# the log event.
mocked_error_logger.assert_called_once_with(
"Reply from '{}' (ID {}) failed: {}!".format(username,
user_id,
exception_class))
def test_reply_error_flashed_message(self, journalist_app):
with journalist_app.app_context():
source, _ = utils.db_helper.init_source()
filesystem_id = source.filesystem_id
user, password = utils.db_helper.init_journalist()
username = user.username
otp_secret = user.otp_secret
exception_class = StaleDataError
with journalist_app.test_client() as app:
_login_user(app, username, password, otp_secret)
with InstrumentedApp(app) as ins:
with patch.object(db.session, 'commit',
side_effect=exception_class()):
app.post('/reply',
data={'filesystem_id': filesystem_id,
'message': '_'})
ins.assert_message_flashed(
'An unexpected error occurred! Please '
'inform your administrator.', 'error')
def test_empty_replies_are_rejected(self, journalist_app):
with journalist_app.app_context():
source, _ = utils.db_helper.init_source()
filesystem_id = source.filesystem_id
user, password = utils.db_helper.init_journalist()
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
_login_user(app, username, password, otp_secret)
resp = app.post(url_for('main.reply'),
data={'filesystem_id': filesystem_id,
'message': ''},
follow_redirects=True)
text = resp.data.decode('utf-8')
assert EMPTY_REPLY_TEXT in text
def test_nonempty_replies_are_accepted(self, journalist_app):
with journalist_app.app_context():
source, _ = utils.db_helper.init_source()
filesystem_id = source.filesystem_id
user, password = utils.db_helper.init_journalist()
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
_login_user(app, username, password, otp_secret)
resp = app.post(url_for('main.reply'),
data={'filesystem_id': filesystem_id,
'message': '_'},
follow_redirects=True)
text = resp.data.decode('utf-8')
assert EMPTY_REPLY_TEXT not in text
def test_unauthorized_access_redirects_to_login(self, journalist_app):
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
resp = app.get('/')
ins.assert_redirects(resp, '/login')
def test_login_throttle(self, journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist()
username = user.username
# Overwrite the default value used during testing
# TODO this may break other tests during parallel testing
models.LOGIN_HARDENING = True
try:
with journalist_app.test_client() as app:
for _ in range(Journalist._MAX_LOGIN_ATTEMPTS_PER_PERIOD):
resp = app.post('/login',
data=dict(username=username,
password='invalid',
token='invalid'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Login failed" in text
resp = app.post('/login',
data=dict(username=username,
password='invalid',
token='invalid'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert ("Please wait at least {} seconds".format(
Journalist._LOGIN_ATTEMPT_PERIOD) in text)
finally:
models.LOGIN_HARDENING = False
def test_login_invalid_credentials(self, journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist()
username = user.username
with journalist_app.test_client() as app:
resp = app.post('/login',
data=dict(username=username,
password='invalid',
token='mocked'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Login failed" in text
def test_validate_redirect(self, journalist_app):
with journalist_app.test_client() as app:
resp = app.post('/', follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Login to access" in text
def test_login_valid_credentials(self, journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist()
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
resp = app.post('/login',
data=dict(username=username,
password=password,
token=TOTP(otp_secret).now()),
follow_redirects=True)
assert resp.status_code == 200 # successful login redirects to index
text = resp.data.decode('utf-8')
assert "Sources" in text
assert "No documents have been submitted!" in text
def test_admin_login_redirects_to_index(self, journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist(is_admin=True)
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
resp = app.post('/login',
data=dict(username=username,
password=password,
token=TOTP(otp_secret).now()),
follow_redirects=False)
ins.assert_redirects(resp, '/')
def test_user_login_redirects_to_index(self, journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist(is_admin=False)
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
resp = app.post('/login',
data=dict(username=username,
password=password,
token=TOTP(otp_secret).now()),
follow_redirects=False)
ins.assert_redirects(resp, '/')
def test_admin_has_link_to_edit_account_page_in_index_page(self,
journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist(is_admin=True)
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
resp = app.post('/login',
data=dict(username=username,
password=password,
token=TOTP(otp_secret).now()),
follow_redirects=True)
edit_account_link = ('<a href="/account/account" '
'id="link-edit-account">')
text = resp.data.decode('utf-8')
assert edit_account_link in text
def test_user_has_link_to_edit_account_page_in_index_page(self,
journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist()
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
resp = app.post('/login',
data=dict(username=username,
password=password,
token=TOTP(otp_secret).now()),
follow_redirects=True)
edit_account_link = ('<a href="/account/account" '
'id="link-edit-account">')
text = resp.data.decode('utf-8')
assert edit_account_link in text
def test_admin_has_link_to_admin_index_page_in_index_page(self,
journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist(is_admin=True)
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
resp = app.post('/login',
data=dict(username=username,
password=password,
token=TOTP(otp_secret).now()),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert ADMIN_LINK in text
def test_user_lacks_link_to_admin_index_page_in_index_page(self,
journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist()
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
resp = app.post('/login',
data=dict(username=username,
password=password,
token=TOTP(otp_secret).now()),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert ADMIN_LINK not in text
def test_admin_logout_redirects_to_index(self, journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist(is_admin=True)
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
_login_user(app, username, password, otp_secret)
resp = app.get('/logout')
ins.assert_redirects(resp, '/')
def test_user_logout_redirects_to_index(self, journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist()
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
_login_user(app, username, password, otp_secret)
resp = app.get('/logout')
ins.assert_redirects(resp, '/')
def test_admin_index(self, journalist_app):
with journalist_app.app_context():
user, password = utils.db_helper.init_journalist(is_admin=True)
username = user.username
otp_secret = user.otp_secret
with journalist_app.test_client() as app:
_login_user(app, username, password, otp_secret)
resp = app.get('/admin/')
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Admin Interface" in text
class TestJournalistApp(TestCase):
# A method required by flask_testing.TestCase
def create_app(self):
return journalist.app
def setUp(self):
utils.env.setup()
# Patch the two-factor verification to avoid intermittent errors
utils.db_helper.mock_verify_token(self)
# Setup test users: user & admin
self.user, self.user_pw = utils.db_helper.init_journalist()
self.admin, self.admin_pw = utils.db_helper.init_journalist(
is_admin=True)
def tearDown(self):
utils.env.teardown()
# WARNING: we are purposely doing something that would not work in
# production in the _login_user and _login_admin methods. This is done as a
# reminder to the test developer that the flask_testing.TestCase only uses
# one request context per method (see
# https://github.com/freedomofpress/securedrop/issues/1444). By explicitly
# making a point of this, we hope to avoid the introduction of new tests,
# that do not truly prove their result because of this disconnect between
# request context in Flask Testing and production.
#
# TODO: either ditch Flask Testing or subclass it as discussed in the
# aforementioned issue to fix the described problem.
def _login_admin(self):
self._ctx.g.user = self.admin
def _login_user(self):
self._ctx.g.user = self.user
def test_admin_delete_user(self):
# Verify journalist is in the database
self.assertNotEqual(Journalist.query.get(self.user.id), None)
self._login_admin()
resp = self.client.post(url_for('admin.delete_user',
user_id=self.user.id),
follow_redirects=True)
# Assert correct interface behavior
self.assert200(resp)
self.assertIn(escape("Deleted user '{}'".format(self.user.username)),
resp.data)
# Verify journalist is no longer in the database
self.assertEqual(Journalist.query.get(self.user.id), None)
def test_admin_cannot_delete_self(self):
# Verify journalist is in the database
self.assertNotEqual(Journalist.query.get(self.user.id), None)
self._login_admin()
resp = self.client.post(url_for('admin.delete_user',
user_id=self.admin.id),
follow_redirects=True)
# Assert correct interface behavior
self.assert403(resp)
resp = self.client.get(url_for('admin.index'))
self.assert200(resp)
self.assertIn("Admin Interface", resp.data)
# The user can be edited and deleted
self.assertIn(escape("Edit user {}".format(self.user.username)),
resp.data)
self.assertIn(
escape("Delete user {}".format(self.user.username)),
resp.data)
# The admin can be edited but cannot deleted
self.assertIn(escape("Edit user {}".format(self.admin.username)),
resp.data)
self.assertNotIn(
escape("Delete user {}".format(self.admin.username)),
resp.data)
def test_admin_deletes_invalid_user_404(self):
self._login_admin()
invalid_user_pk = max([user.id for user in Journalist.query.all()]) + 1
resp = self.client.post(url_for('admin.delete_user',
user_id=invalid_user_pk))
self.assert404(resp)
def test_admin_edits_user_password_success_response(self):
self._login_admin()
resp = self.client.post(
url_for('admin.new_password', user_id=self.user.id),
data=dict(password=VALID_PASSWORD_2),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert 'Password updated.' in text
assert VALID_PASSWORD_2 in text
def test_admin_edits_user_password_error_response(self):
self._login_admin()
with patch('sqlalchemy.orm.scoping.scoped_session.commit',
side_effect=Exception()):
resp = self.client.post(
url_for('admin.new_password', user_id=self.user.id),
data=dict(password=VALID_PASSWORD_2),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert ('There was an error, and the new password might not have '
'been saved correctly.') in text, text
def test_user_edits_password_success_response(self):
self._login_user()
resp = self.client.post(
url_for('account.new_password'),
data=dict(current_password=self.user_pw,
token='mocked',
password=VALID_PASSWORD_2),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert "Password updated." in text
assert VALID_PASSWORD_2 in text
def test_user_edits_password_expires_session(self):
with self.client as client:
# do a real login to get a real session
# (none of the mocking `g` hacks)
resp = client.post(url_for('main.login'),
data=dict(username=self.user.username,
password=self.user_pw,
token='mocked'))
self.assertRedirects(resp, url_for('main.index'))
assert 'uid' in session
resp = client.post(
url_for('account.new_password'),
data=dict(current_password=self.user_pw,
token='mocked',
password=VALID_PASSWORD_2))
self.assertRedirects(resp, url_for('main.login'))
# verify the session was expired after the password was changed
assert 'uid' not in session
def test_user_edits_password_error_reponse(self):
self._login_user()
with patch('sqlalchemy.orm.scoping.scoped_session.commit',
side_effect=Exception()):
resp = self.client.post(
url_for('account.new_password'),
data=dict(current_password=self.user_pw,
token='mocked',
password=VALID_PASSWORD_2),
follow_redirects=True)
assert ('There was an error, and the new password might not have '
'been saved correctly.') in resp.data.decode('utf-8')
def test_admin_add_user_when_username_already_taken(self):
self._login_admin()
resp = self.client.post(url_for('admin.add_user'),
data=dict(username=self.admin.username,
password=VALID_PASSWORD,
is_admin=None))
self.assertIn('already taken', resp.data)
def test_max_password_length(self):
"""Creating a Journalist with a password that is greater than the
maximum password length should raise an exception"""
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
with self.assertRaises(InvalidPasswordLength):
Journalist(username="My Password is Too Big!",
password=overly_long_password)
def test_min_password_length(self):
"""Creating a Journalist with a password that is smaller than the
minimum password length should raise an exception. This uses the
magic number 7 below to get around the "diceware-like" requirement
that may cause a failure before the length check.
"""
password = ('a ' * 7)[0:(Journalist.MIN_PASSWORD_LEN - 1)]
with self.assertRaises(InvalidPasswordLength):
Journalist(username="My Password is Too Small!",
password=password)
def test_admin_edits_user_password_too_long_warning(self):
self._login_admin()
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
self.client.post(
url_for('admin.new_password', user_id=self.user.id),
data=dict(username=self.user.username, is_admin=None,
password=overly_long_password),
follow_redirects=True)
self.assertMessageFlashed('You submitted a bad password! '
'Password not changed.', 'error')
def test_user_edits_password_too_long_warning(self):
self._login_user()
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
self.client.post(url_for('account.new_password'),
data=dict(password=overly_long_password,
token='mocked',
current_password=self.user_pw),
follow_redirects=True)
self.assertMessageFlashed('You submitted a bad password! '
'Password not changed.', 'error')
def test_admin_add_user_password_too_long_warning(self):
self._login_admin()
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
self.client.post(
url_for('admin.add_user'),
data=dict(username='dellsberg',
password=overly_long_password,
is_admin=None))
self.assertMessageFlashed('There was an error with the autogenerated '
'password. User not created. '
'Please try again.', 'error')
def test_admin_edits_user_invalid_username(self):
"""Test expected error message when admin attempts to change a user's
username to a username that is taken by another user."""
self._login_admin()
new_username = self.admin.username
self.client.post(
url_for('admin.edit_user', user_id=self.user.id),
data=dict(username=new_username, is_admin=None))
self.assertMessageFlashed('Username "{}" already taken.'.format(
new_username), 'error')
def test_admin_resets_user_hotp(self):
self._login_admin()
old_hotp = self.user.hotp
resp = self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret=123456))
new_hotp = self.user.hotp
# check that hotp is different
self.assertNotEqual(old_hotp.secret, new_hotp.secret)
# Redirect to admin 2FA view
self.assertRedirects(
resp,
url_for('admin.new_user_two_factor', uid=self.user.id))
def test_admin_resets_user_hotp_format_non_hexa(self):
self._login_admin()
old_hotp = self.user.hotp.secret
self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret='ZZ'))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed(
"Invalid secret format: "
"please only submit letters A-F and numbers 0-9.", "error")
def test_admin_resets_user_hotp_format_odd(self):
self._login_admin()
old_hotp = self.user.hotp.secret
self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret='Z'))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed(
"Invalid secret format: "
"odd-length secret. Did you mistype the secret?", "error")
@patch('models.Journalist.set_hotp_secret')
@patch('journalist.app.logger.error')
def test_admin_resets_user_hotp_error(self,
mocked_error_logger,
mock_set_hotp_secret):
self._login_admin()
old_hotp = self.user.hotp.secret
error_message = 'SOMETHING WRONG!'
mock_set_hotp_secret.side_effect = TypeError(error_message)
otp_secret = '1234'
self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret=otp_secret))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed("An unexpected error occurred! "
"Please inform your administrator.", "error")
mocked_error_logger.assert_called_once_with(
"set_hotp_secret '{}' (id {}) failed: {}".format(
otp_secret, self.user.id, error_message))
def test_user_resets_hotp(self):
self._login_user()
old_hotp = self.user.hotp
resp = self.client.post(url_for('account.reset_two_factor_hotp'),
data=dict(otp_secret=123456))
new_hotp = self.user.hotp
# check that hotp is different
self.assertNotEqual(old_hotp.secret, new_hotp.secret)
# should redirect to verification page
self.assertRedirects(resp, url_for('account.new_two_factor'))
def test_user_resets_user_hotp_format_odd(self):
self._login_user()
old_hotp = self.user.hotp.secret
self.client.post(url_for('account.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret='123'))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed(
"Invalid secret format: "
"odd-length secret. Did you mistype the secret?", "error")
def test_user_resets_user_hotp_format_non_hexa(self):
self._login_user()
old_hotp = self.user.hotp.secret
self.client.post(url_for('account.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret='ZZ'))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed(
"Invalid secret format: "
"please only submit letters A-F and numbers 0-9.", "error")
@patch('models.Journalist.set_hotp_secret')
@patch('journalist.app.logger.error')
def test_user_resets_user_hotp_error(self,
mocked_error_logger,
mock_set_hotp_secret):
self._login_user()
old_hotp = self.user.hotp.secret
error_message = 'SOMETHING WRONG!'
mock_set_hotp_secret.side_effect = TypeError(error_message)
otp_secret = '1234'
self.client.post(url_for('account.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret=otp_secret))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed("An unexpected error occurred! "
"Please inform your administrator.", "error")
mocked_error_logger.assert_called_once_with(
"set_hotp_secret '{}' (id {}) failed: {}".format(
otp_secret, self.user.id, error_message))
def test_admin_resets_user_totp(self):
self._login_admin()
old_totp = self.user.totp
resp = self.client.post(
url_for('admin.reset_two_factor_totp'),
data=dict(uid=self.user.id))
new_totp = self.user.totp
self.assertNotEqual(old_totp.secret, new_totp.secret)
self.assertRedirects(
resp,
url_for('admin.new_user_two_factor', uid=self.user.id))
def test_user_resets_totp(self):
self._login_user()
old_totp = self.user.totp
resp = self.client.post(url_for('account.reset_two_factor_totp'))
new_totp = self.user.totp
# check that totp is different
self.assertNotEqual(old_totp.secret, new_totp.secret)
# should redirect to verification page
self.assertRedirects(resp, url_for('account.new_two_factor'))
def test_admin_resets_hotp_with_missing_otp_secret_key(self):
self._login_admin()
resp = self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id))
self.assertIn('Change Secret', resp.data)
def test_admin_new_user_2fa_redirect(self):
self._login_admin()
resp = self.client.post(
url_for('admin.new_user_two_factor', uid=self.user.id),
data=dict(token='mocked'))
self.assertRedirects(resp, url_for('admin.index'))
def test_http_get_on_admin_new_user_two_factor_page(self):
self._login_admin()
resp = self.client.get(url_for('admin.new_user_two_factor',
uid=self.user.id))
# any GET req should take a user to the admin.new_user_two_factor page
self.assertIn('FreeOTP', resp.data)
def test_http_get_on_admin_add_user_page(self):
self._login_admin()
resp = self.client.get(url_for('admin.add_user'))
# any GET req should take a user to the admin_add_user page
self.assertIn('ADD USER', resp.data)
def test_admin_add_user(self):
self._login_admin()
max_journalist_pk = max([user.id for user in Journalist.query.all()])
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='dellsberg',
password=VALID_PASSWORD,
is_admin=None))
self.assertRedirects(resp, url_for('admin.new_user_two_factor',
uid=max_journalist_pk+1))
def test_admin_add_user_without_username(self):
self._login_admin()
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='',
password=VALID_PASSWORD,
is_admin=None))
self.assertIn('This field is required.', resp.data)
def test_admin_add_user_too_short_username(self):
self._login_admin()
username = 'a' * (Journalist.MIN_USERNAME_LEN - 1)
resp = self.client.post(url_for('admin.add_user'),
data=dict(username=username,
password='pentagonpapers',
password_again='pentagonpapers',
is_admin=None))
self.assertIn('Field must be at least {} characters long'.format(
Journalist.MIN_USERNAME_LEN),
resp.data)
def test_admin_add_user_yubikey_odd_length(self):
self._login_admin()
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='dellsberg',
password=VALID_PASSWORD,
password_again=VALID_PASSWORD,
is_admin=None,
is_hotp=True,
otp_secret='123'))
self.assertIn('HOTP secrets are 40 characters', resp.data)
def test_admin_add_user_yubikey_valid_length(self):
self._login_admin()
otp = '1234567890123456789012345678901234567890'
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='dellsberg',
password=VALID_PASSWORD,
password_again=VALID_PASSWORD,
is_admin=None,
is_hotp=True,
otp_secret=otp),
follow_redirects=True)
# Should redirect to the token verification page
self.assertIn('Enable YubiKey (OATH-HOTP)', resp.data)
def test_admin_add_user_yubikey_correct_length_with_whitespace(self):
self._login_admin()
otp = '12 34 56 78 90 12 34 56 78 90 12 34 56 78 90 12 34 56 78 90'
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='dellsberg',
password=VALID_PASSWORD,
password_again=VALID_PASSWORD,
is_admin=None,
is_hotp=True,
otp_secret=otp),
follow_redirects=True)
# Should redirect to the token verification page
self.assertIn('Enable YubiKey (OATH-HOTP)', resp.data)
def test_admin_sets_user_to_admin(self):
self._login_admin()
new_user = 'admin-set-user-to-admin-test'
resp = self.client.post(url_for('admin.add_user'),
data=dict(username=new_user,
password=VALID_PASSWORD,
is_admin=None))
assert resp.status_code in (200, 302)
journo = Journalist.query.filter(Journalist.username == new_user).one()
assert not journo.is_admin
resp = self.client.post(url_for('admin.edit_user', user_id=journo.id),
data=dict(is_admin=True))
assert resp.status_code in (200, 302), resp.data.decode('utf-8')
# there are better ways to do this, but flake8 complains
journo = Journalist.query.filter(Journalist.username == new_user).one()
assert journo.is_admin is True
def test_admin_renames_user(self):
self._login_admin()
new_user = 'admin-renames-user-test'
resp = self.client.post(url_for('admin.add_user'),
data=dict(username=new_user,
password=VALID_PASSWORD,
is_admin=None))
assert resp.status_code in (200, 302)
journo = Journalist.query.filter(Journalist.username == new_user).one()
new_user = new_user + 'a'
resp = self.client.post(url_for('admin.edit_user', user_id=journo.id),
data=dict(username=new_user))
assert resp.status_code in (200, 302), resp.data.decode('utf-8')
# the following will throw an exception if new_user is not found
# therefore asserting it has been created
Journalist.query.filter(Journalist.username == new_user).one()
@patch('journalist_app.admin.current_app.logger.error')
@patch('journalist_app.admin.Journalist',
side_effect=IntegrityError('STATEMENT', 'PARAMETERS', None))
def test_admin_add_user_integrity_error(self,
mock_journalist,
mocked_error_logger):
self._login_admin()
self.client.post(url_for('admin.add_user'),
data=dict(username='username',
password=VALID_PASSWORD,
is_admin=None))
log_event = mocked_error_logger.call_args[0][0]
self.assertIn(
"Adding user 'username' failed: (__builtin__.NoneType) "
"None [SQL: 'STATEMENT'] [parameters: 'PARAMETERS']",
log_event)
self.assertMessageFlashed(
"An error occurred saving this user to the database."
" Please inform your administrator.",
"error")
def test_logo_upload_with_valid_image_succeeds(self):
# Save original logo to restore after test run
logo_image_location = os.path.join(config.SECUREDROP_ROOT,
"static/i/logo.png")
with open(logo_image_location) as logo_file:
original_image = logo_file.read()
try:
self._login_admin()
form = journalist_app_module.forms.LogoForm(
logo=(StringIO('imagedata'), 'test.png')
)
self.client.post(url_for('admin.manage_config'),
data=form.data,
follow_redirects=True)
self.assertMessageFlashed("Image updated.", "logo-success")
finally:
# Restore original image to logo location for subsequent tests
with open(logo_image_location, 'w') as logo_file:
logo_file.write(original_image)
def test_logo_upload_with_invalid_filetype_fails(self):
self._login_admin()
form = journalist_app_module.forms.LogoForm(
logo=(StringIO('filedata'), 'bad.exe')
)
resp = self.client.post(url_for('admin.manage_config'),
data=form.data,
follow_redirects=True)
self.assertMessageFlashed("Upload images only.", "logo-error")
self.assertIn('Upload images only.', resp.data)
def test_logo_upload_with_empty_input_field_fails(self):
self._login_admin()
form = journalist_app_module.forms.LogoForm(
logo=(StringIO(''), '')
)
resp = self.client.post(url_for('admin.manage_config'),
data=form.data,
follow_redirects=True)
self.assertMessageFlashed("File required.", "logo-error")
self.assertIn('File required.', resp.data)
@patch('journalist.app.logger.error')
def test_creation_of_ossec_test_log_event(self, mocked_error_logger):
self._login_admin()
self.client.get(url_for('admin.ossec_test'))
mocked_error_logger.assert_called_once_with(
"This is a test OSSEC alert"
)
def test_admin_page_restriction_http_gets(self):
admin_urls = [url_for('admin.index'), url_for('admin.add_user'),
url_for('admin.edit_user', user_id=self.user.id)]
self._login_user()
for admin_url in admin_urls:
resp = self.client.get(admin_url)
self.assertStatus(resp, 302)
def test_admin_page_restriction_http_posts(self):
admin_urls = [url_for('admin.reset_two_factor_totp'),
url_for('admin.reset_two_factor_hotp'),
url_for('admin.add_user', user_id=self.user.id),
url_for('admin.new_user_two_factor'),
url_for('admin.reset_two_factor_totp'),
url_for('admin.reset_two_factor_hotp'),
url_for('admin.edit_user', user_id=self.user.id),
url_for('admin.delete_user', user_id=self.user.id)]
self._login_user()
for admin_url in admin_urls:
resp = self.client.post(admin_url)
self.assertStatus(resp, 302)
def test_user_authorization_for_gets(self):
urls = [url_for('main.index'), url_for('col.col', filesystem_id='1'),
url_for('col.download_single_submission',
filesystem_id='1', fn='1'),
url_for('account.edit')]
for url in urls:
resp = self.client.get(url)
self.assertStatus(resp, 302)
def test_user_authorization_for_posts(self):
urls = [url_for('col.add_star', filesystem_id='1'),
url_for('col.remove_star', filesystem_id='1'),
url_for('col.process'),
url_for('col.delete_single', filesystem_id='1'),
url_for('main.reply'),
url_for('main.regenerate_code'),
url_for('main.bulk'),
url_for('account.new_two_factor'),
url_for('account.reset_two_factor_totp'),
url_for('account.reset_two_factor_hotp')]
for url in urls:
res = self.client.post(url)
self.assertStatus(res, 302)
def test_incorrect_current_password_change(self):
self._login_user()
resp = self.client.post(url_for('account.new_password'),
data=dict(password=VALID_PASSWORD,
token='mocked',
current_password='badpw'),
follow_redirects=True)
text = resp.data.decode('utf-8')
self.assertIn('Incorrect password or two-factor code', text)
def test_too_long_user_password_change(self):
self._login_user()
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
self.client.post(url_for('account.new_password'),
data=dict(password=overly_long_password,
token='mocked',
current_password=self.user_pw),
follow_redirects=True)
self.assertMessageFlashed('You submitted a bad password! Password not '
'changed.', 'error')
def test_valid_user_password_change(self):
self._login_user()
resp = self.client.post(
url_for('account.new_password'),
data=dict(password=VALID_PASSWORD_2,
token='mocked',
current_password=self.user_pw),
follow_redirects=True)
assert 'Password updated.' in \
resp.data.decode('utf-8')
def test_regenerate_totp(self):
self._login_user()
old_totp = self.user.totp
res = self.client.post(url_for('account.reset_two_factor_totp'))
new_totp = self.user.totp
# check that totp is different
self.assertNotEqual(old_totp.secret, new_totp.secret)
# should redirect to verification page
self.assertRedirects(res, url_for('account.new_two_factor'))
def test_edit_hotp(self):
self._login_user()
old_hotp = self.user.hotp
res = self.client.post(
url_for('account.reset_two_factor_hotp'),
data=dict(otp_secret=123456)
)
new_hotp = self.user.hotp
# check that hotp is different
self.assertNotEqual(old_hotp.secret, new_hotp.secret)
# should redirect to verification page
self.assertRedirects(res, url_for('account.new_two_factor'))
def test_delete_source_deletes_submissions(self):
"""Verify that when a source is deleted, the submissions that
correspond to them are also deleted."""
self._delete_collection_setup()
journalist_app_module.utils.delete_collection(
self.source.filesystem_id)
# Source should be gone
results = db.session.query(Source).filter(
Source.id == self.source.id).all()
self.assertEqual(results, [])
def _delete_collection_setup(self):
self.source, _ = utils.db_helper.init_source()
utils.db_helper.submit(self.source, 2)
utils.db_helper.reply(self.user, self.source, 2)
def test_delete_collection_updates_db(self):
"""Verify that when a source is deleted, their Source identity
record, as well as Reply & Submission records associated with
that record are purged from the database."""
self._delete_collection_setup()
journalist_app_module.utils.delete_collection(
self.source.filesystem_id)
results = Source.query.filter(Source.id == self.source.id).all()
self.assertEqual(results, [])
results = db.session.query(
Submission.source_id == self.source.id).all()
self.assertEqual(results, [])
results = db.session.query(Reply.source_id == self.source.id).all()
self.assertEqual(results, [])
def test_delete_source_deletes_source_key(self):
"""Verify that when a source is deleted, the PGP key that corresponds
to them is also deleted."""
self._delete_collection_setup()
# Source key exists
source_key = current_app.crypto_util.getkey(self.source.filesystem_id)
self.assertNotEqual(source_key, None)
journalist_app_module.utils.delete_collection(
self.source.filesystem_id)
# Source key no longer exists
source_key = current_app.crypto_util.getkey(self.source.filesystem_id)
self.assertEqual(source_key, None)
def test_delete_source_deletes_docs_on_disk(self):
"""Verify that when a source is deleted, the encrypted documents that
exist on disk is also deleted."""
self._delete_collection_setup()
# Encrypted documents exists
dir_source_docs = os.path.join(config.STORE_DIR,
self.source.filesystem_id)
self.assertTrue(os.path.exists(dir_source_docs))
job = journalist_app_module.utils.delete_collection(
self.source.filesystem_id)
# Wait up to 5s to wait for Redis worker `srm` operation to complete
utils.async.wait_for_redis_worker(job)
# Encrypted documents no longer exist
self.assertFalse(os.path.exists(dir_source_docs))
def test_download_selected_submissions_from_source(self):
source, _ = utils.db_helper.init_source()
submissions = utils.db_helper.submit(source, 4)
selected_submissions = random.sample(submissions, 2)
selected_fnames = [submission.filename
for submission in selected_submissions]
selected_fnames.sort()
self._login_user()
resp = self.client.post(
'/bulk', data=dict(action='download',
filesystem_id=source.filesystem_id,
doc_names_selected=selected_fnames))
# The download request was succesful, and the app returned a zipfile
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(resp.data)))
# The submissions selected are in the zipfile
for filename in selected_fnames:
self.assertTrue(
# Check that the expected filename is in the zip file
zipfile.ZipFile(StringIO(resp.data)).getinfo(
os.path.join(
source.journalist_filename,
"%s_%s" % (filename.split('-')[0],
source.last_updated.date()),
filename
))
)
# The submissions not selected are absent from the zipfile
not_selected_submissions = set(submissions).difference(
selected_submissions)
not_selected_fnames = [submission.filename
for submission in not_selected_submissions]
for filename in not_selected_fnames:
with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(resp.data)).getinfo(
os.path.join(
source.journalist_filename,
source.journalist_designation,
"%s_%s" % (filename.split('-')[0],
source.last_updated.date()),
filename
))
def _bulk_download_setup(self):
"""Create a couple sources, make some submissions on their behalf,
mark some of them as downloaded, and then perform *action* on all
sources."""
self.source0, _ = utils.db_helper.init_source()
self.source1, _ = utils.db_helper.init_source()
self.journo0, _ = utils.db_helper.init_journalist()
self.submissions0 = utils.db_helper.submit(self.source0, 2)
self.submissions1 = utils.db_helper.submit(self.source1, 3)
self.downloaded0 = random.sample(self.submissions0, 1)
utils.db_helper.mark_downloaded(*self.downloaded0)
self.not_downloaded0 = set(self.submissions0).difference(
self.downloaded0)
self.downloaded1 = random.sample(self.submissions1, 2)
utils.db_helper.mark_downloaded(*self.downloaded1)
self.not_downloaded1 = set(self.submissions1).difference(
self.downloaded1)
def test_download_unread_all_sources(self):
self._bulk_download_setup()
self._login_user()
# Download all unread messages from all sources
self.resp = self.client.post(
url_for('col.process'),
data=dict(action='download-unread',
cols_selected=[self.source0.filesystem_id,
self.source1.filesystem_id]))
# The download request was succesful, and the app returned a zipfile
self.assertEqual(self.resp.status_code, 200)
self.assertEqual(self.resp.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(self.resp.data)))
# All the not dowloaded submissions are in the zipfile
for submission in self.not_downloaded0:
self.assertTrue(
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
os.path.join(
"unread",
self.source0.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source0.last_updated.date()),
submission.filename
))
)
for submission in self.not_downloaded1:
self.assertTrue(
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
os.path.join(
"unread",
self.source1.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source1.last_updated.date()),
submission.filename
))
)
# All the downloaded submissions are absent from the zipfile
for submission in self.downloaded0:
with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
os.path.join(
"unread",
self.source0.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source0.last_updated.date()),
submission.filename
))
for submission in self.downloaded1:
with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
os.path.join(
"unread",
self.source1.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source1.last_updated.date()),
submission.filename
))
def test_download_all_selected_sources(self):
self._bulk_download_setup()
self._login_user()
# Dowload all messages from self.source1
self.resp = self.client.post(
url_for('col.process'),
data=dict(action='download-all',
cols_selected=[self.source1.filesystem_id]))
resp = self.client.post(
url_for('col.process'),
data=dict(action='download-all',
cols_selected=[self.source1.filesystem_id]))
# The download request was succesful, and the app returned a zipfile
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(resp.data)))
# All messages from self.source1 are in the zipfile
for submission in self.submissions1:
self.assertTrue(
zipfile.ZipFile(StringIO(resp.data)).getinfo(
os.path.join(
"all",
self.source1.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source1.last_updated.date()),
submission.filename)
)
)
# All messages from self.source0 are absent from the zipfile
for submission in self.submissions0:
with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(resp.data)).getinfo(
os.path.join(
"all",
self.source0.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source0.last_updated.date()),
submission.filename)
)
def test_single_source_is_successfully_starred(self):
source, _ = utils.db_helper.init_source()
self._login_user()
resp = self.client.post(url_for('col.add_star',
filesystem_id=source.filesystem_id))
self.assertRedirects(resp, url_for('main.index'))
# Assert source is starred
self.assertTrue(source.star.starred)
def test_single_source_is_successfully_unstarred(self):
source, _ = utils.db_helper.init_source()
self._login_user()
# First star the source
self.client.post(url_for('col.add_star',
filesystem_id=source.filesystem_id))
# Now unstar the source
resp = self.client.post(url_for('col.remove_star',
filesystem_id=source.filesystem_id))
self.assertRedirects(resp, url_for('main.index'))
# Assert source is not starred
self.assertFalse(source.star.starred)
def test_journalist_session_expiration(self):
try:
old_expiration = config.SESSION_EXPIRATION_MINUTES
has_session_expiration = True
except AttributeError:
has_session_expiration = False
try:
with self.client as client:
# set the expiration to ensure we trigger an expiration
config.SESSION_EXPIRATION_MINUTES = -1
# do a real login to get a real session
# (none of the mocking `g` hacks)
resp = client.post(url_for('main.login'),
data=dict(username=self.user.username,
password=self.user_pw,
token='mocked'))
self.assertRedirects(resp, url_for('main.index'))
assert 'uid' in session
resp = client.get(url_for('account.edit'),
follow_redirects=True)
# check that the session was cleared (apart from 'expires'
# which is always present and 'csrf_token' which leaks no info)
session.pop('expires', None)
session.pop('csrf_token', None)
assert not session, session
assert ('You have been logged out due to inactivity' in
resp.data.decode('utf-8'))
finally:
if has_session_expiration:
config.SESSION_EXPIRATION_MINUTES = old_expiration
else:
del config.SESSION_EXPIRATION_MINUTES
def test_csrf_error_page(self):
old_enabled = self.app.config['WTF_CSRF_ENABLED']
self.app.config['WTF_CSRF_ENABLED'] = True
try:
with self.app.test_client() as app:
resp = app.post(url_for('main.login'))
self.assertRedirects(resp, url_for('main.login'))
resp = app.post(url_for('main.login'), follow_redirects=True)
self.assertIn('You have been logged out due to inactivity',
resp.data)
finally:
self.app.config['WTF_CSRF_ENABLED'] = old_enabled
def test_col_process_aborts_with_bad_action(self):
"""If the action is not a valid choice, a 500 should occur"""
self._login_user()
form_data = {'cols_selected': 'does not matter',
'action': 'this action does not exist'}
resp = self.client.post(url_for('col.process'), data=form_data)
self.assert500(resp)
def test_col_process_successfully_deletes_multiple_sources(self):
# Create two sources with one submission each
source_1, _ = utils.db_helper.init_source()
utils.db_helper.submit(source_1, 1)
source_2, _ = utils.db_helper.init_source()
utils.db_helper.submit(source_2, 1)
self._login_user()
form_data = {'cols_selected': [source_1.filesystem_id,
source_2.filesystem_id],
'action': 'delete'}
resp = self.client.post(url_for('col.process'), data=form_data,
follow_redirects=True)
self.assert200(resp)
# Verify there are no remaining sources
remaining_sources = db.session.query(models.Source).all()
self.assertEqual(len(remaining_sources), 0)
def test_col_process_successfully_stars_sources(self):
source_1, _ = utils.db_helper.init_source()
utils.db_helper.submit(source_1, 1)
self._login_user()
form_data = {'cols_selected': [source_1.filesystem_id],
'action': 'star'}
resp = self.client.post(url_for('col.process'), data=form_data,
follow_redirects=True)
self.assert200(resp)
# Verify the source is starred
self.assertTrue(source_1.star.starred)
def test_col_process_successfully_unstars_sources(self):
source_1, _ = utils.db_helper.init_source()
utils.db_helper.submit(source_1, 1)
self._login_user()
# First star the source
form_data = {'cols_selected': [source_1.filesystem_id],
'action': 'star'}
self.client.post(url_for('col.process'), data=form_data,
follow_redirects=True)
# Now unstar the source
form_data = {'cols_selected': [source_1.filesystem_id],
'action': 'un-star'}
resp = self.client.post(url_for('col.process'), data=form_data,
follow_redirects=True)
self.assert200(resp)
# Verify the source is not starred
self.assertFalse(source_1.star.starred)
class TestJournalistLocale(TestCase):
def setUp(self):
utils.env.setup()
# Patch the two-factor verification to avoid intermittent errors
utils.db_helper.mock_verify_token(self)
# Setup test user
self.user, self.user_pw = utils.db_helper.init_journalist()
def tearDown(self):
utils.env.teardown()
def get_fake_config(self):
return SDConfig()
# A method required by flask_testing.TestCase
def create_app(self):
fake_config = self.get_fake_config()
fake_config.SUPPORTED_LOCALES = ['en_US', 'fr_FR']
return journalist_app_module.create_app(fake_config)
def test_render_locales(self):
"""the locales.html template must collect both request.args (l=XX) and
request.view_args (/<filesystem_id>) to build the URL to
change the locale
"""
source, _ = utils.db_helper.init_source()
self._ctx.g.user = self.user
url = url_for('col.col', filesystem_id=source.filesystem_id)
resp = self.client.get(url + '?l=fr_FR')
self.assertNotIn('?l=fr_FR', resp.data)
self.assertIn(url + '?l=en_US', resp.data)
class TestJournalistLogin(unittest.TestCase):
def setUp(self):
self.__context = journalist_app_module.create_app(config).app_context()
self.__context.push()
utils.env.setup()
# Patch the two-factor verification so it always succeeds
utils.db_helper.mock_verify_token(self)
self.user, self.user_pw = utils.db_helper.init_journalist()
def tearDown(self):
utils.env.teardown()
self.__context.pop()
@patch('models.Journalist._scrypt_hash')
@patch('models.Journalist.valid_password', return_value=True)
def test_valid_login_calls_scrypt(self,
mock_scrypt_hash,
mock_valid_password):
Journalist.login(self.user.username, self.user_pw, 'mocked')
self.assertTrue(
mock_scrypt_hash.called,
"Failed to call _scrypt_hash for password w/ valid length")
@patch('models.Journalist._scrypt_hash')
def test_login_with_invalid_password_doesnt_call_scrypt(self,
mock_scrypt_hash):
invalid_pw = 'a'*(Journalist.MAX_PASSWORD_LEN + 1)
with self.assertRaises(InvalidPasswordLength):
Journalist.login(self.user.username, invalid_pw, 'mocked')
self.assertFalse(
mock_scrypt_hash.called,
"Called _scrypt_hash for password w/ invalid length")
Unit tests: Use test data fixtures in journalist unit tests
# -*- coding: utf-8 -*-
import os
import random
import unittest
import zipfile
from cStringIO import StringIO
from flask import url_for, escape, session, current_app, g
from flask_testing import TestCase
from mock import patch
from pyotp import TOTP
from sqlalchemy.orm.exc import StaleDataError
from sqlalchemy.exc import IntegrityError
import crypto_util
import models
import journalist
import journalist_app as journalist_app_module
import utils
os.environ['SECUREDROP_ENV'] = 'test' # noqa
from sdconfig import SDConfig, config
from db import db
from models import (InvalidPasswordLength, Journalist, Reply, Source,
Submission)
from utils.instrument import InstrumentedApp
# Smugly seed the RNG for deterministic testing
random.seed('¯\_(ツ)_/¯')
VALID_PASSWORD = 'correct horse battery staple generic passphrase hooray'
VALID_PASSWORD_2 = 'another correct horse battery staple generic passphrase'
# These are factored out of the tests because some test have a
# postive/negative case under varying conditions, and we don't want
# false postives after modifying a string in the application.
EMPTY_REPLY_TEXT = "You cannot send an empty reply."
ADMIN_LINK = '<a href="/admin/" id="link-admin-index">'
def _login_user(app, username, password, otp_secret):
resp = app.post('/login', data={'username': username,
'password': password,
'token': TOTP(otp_secret).now()},
follow_redirects=True)
assert resp.status_code == 200
assert hasattr(g, 'user') # ensure logged in
class TestPytestJournalistApp:
def test_make_password(self, journalist_app):
with patch.object(crypto_util.CryptoUtil,
'genrandomid',
side_effect=['bad', VALID_PASSWORD]):
fake_config = SDConfig()
with journalist_app.test_request_context('/'):
password = journalist_app_module.utils.make_password(
fake_config)
assert password == VALID_PASSWORD
def test_reply_error_logging(self, journalist_app, test_journo):
with journalist_app.app_context():
source, _ = utils.db_helper.init_source()
filesystem_id = source.filesystem_id
exception_class = StaleDataError
exception_msg = 'Potentially sensitive content!'
with journalist_app.test_client() as app:
_login_user(app, test_journo['username'],
test_journo['password'], test_journo['otp_secret'])
with patch.object(journalist_app.logger, 'error') \
as mocked_error_logger:
with patch.object(db.session,
'commit',
side_effect=exception_class(exception_msg)):
resp = app.post('/reply',
data={'filesystem_id': filesystem_id,
'message': '_'},
follow_redirects=True)
assert resp.status_code == 200
# Notice the "potentially sensitive" exception_msg is not present in
# the log event.
mocked_error_logger.assert_called_once_with(
"Reply from '{}' (ID {}) failed: {}!".format(
test_journo['username'],
test_journo['id'],
exception_class))
def test_reply_error_flashed_message(self, journalist_app, test_journo):
with journalist_app.app_context():
source, _ = utils.db_helper.init_source()
filesystem_id = source.filesystem_id
exception_class = StaleDataError
with journalist_app.test_client() as app:
_login_user(app, test_journo['username'],
test_journo['password'], test_journo['otp_secret'])
with InstrumentedApp(app) as ins:
with patch.object(db.session, 'commit',
side_effect=exception_class()):
app.post('/reply',
data={'filesystem_id': filesystem_id,
'message': '_'})
ins.assert_message_flashed(
'An unexpected error occurred! Please '
'inform your administrator.', 'error')
def test_empty_replies_are_rejected(self, journalist_app, test_journo):
with journalist_app.app_context():
source, _ = utils.db_helper.init_source()
filesystem_id = source.filesystem_id
with journalist_app.test_client() as app:
_login_user(app, test_journo['username'],
test_journo['password'], test_journo['otp_secret'])
resp = app.post(url_for('main.reply'),
data={'filesystem_id': filesystem_id,
'message': ''},
follow_redirects=True)
text = resp.data.decode('utf-8')
assert EMPTY_REPLY_TEXT in text
def test_nonempty_replies_are_accepted(self, journalist_app,
test_journo):
with journalist_app.app_context():
source, _ = utils.db_helper.init_source()
filesystem_id = source.filesystem_id
with journalist_app.test_client() as app:
_login_user(app, test_journo['username'],
test_journo['password'],
test_journo['otp_secret'])
resp = app.post(url_for('main.reply'),
data={'filesystem_id': filesystem_id,
'message': '_'},
follow_redirects=True)
text = resp.data.decode('utf-8')
assert EMPTY_REPLY_TEXT not in text
def test_unauthorized_access_redirects_to_login(self, journalist_app):
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
resp = app.get('/')
ins.assert_redirects(resp, '/login')
def test_login_throttle(self, journalist_app, test_journo):
# Overwrite the default value used during testing
# TODO this may break other tests during parallel testing
models.LOGIN_HARDENING = True
try:
with journalist_app.test_client() as app:
for _ in range(Journalist._MAX_LOGIN_ATTEMPTS_PER_PERIOD):
resp = app.post(
'/login',
data=dict(username=test_journo['username'],
password='invalid',
token='invalid'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Login failed" in text
resp = app.post(
'/login',
data=dict(username=test_journo['username'],
password='invalid',
token='invalid'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert ("Please wait at least {} seconds".format(
Journalist._LOGIN_ATTEMPT_PERIOD) in text)
finally:
models.LOGIN_HARDENING = False
def test_login_invalid_credentials(self, journalist_app, test_journo):
with journalist_app.test_client() as app:
resp = app.post('/login',
data=dict(username=test_journo['username'],
password='invalid',
token='mocked'))
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Login failed" in text
def test_validate_redirect(self, journalist_app):
with journalist_app.test_client() as app:
resp = app.post('/', follow_redirects=True)
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Login to access" in text
def test_login_valid_credentials(self, journalist_app, test_journo):
with journalist_app.test_client() as app:
resp = app.post(
'/login',
data=dict(username=test_journo['username'],
password=test_journo['password'],
token=TOTP(test_journo['otp_secret']).now()),
follow_redirects=True)
assert resp.status_code == 200 # successful login redirects to index
text = resp.data.decode('utf-8')
assert "Sources" in text
assert "No documents have been submitted!" in text
def test_admin_login_redirects_to_index(self, journalist_app, test_admin):
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
resp = app.post(
'/login',
data=dict(username=test_admin['username'],
password=test_admin['password'],
token=TOTP(test_admin['otp_secret']).now()),
follow_redirects=False)
ins.assert_redirects(resp, '/')
def test_user_login_redirects_to_index(self, journalist_app,
test_journo):
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
resp = app.post(
'/login',
data=dict(username=test_journo['username'],
password=test_journo['password'],
token=TOTP(test_journo['otp_secret']).now()),
follow_redirects=False)
ins.assert_redirects(resp, '/')
def test_admin_has_link_to_edit_account_page_in_index_page(self,
journalist_app,
test_admin):
with journalist_app.test_client() as app:
resp = app.post(
'/login',
data=dict(username=test_admin['username'],
password=test_admin['password'],
token=TOTP(test_admin['otp_secret']).now()),
follow_redirects=True)
edit_account_link = ('<a href="/account/account" '
'id="link-edit-account">')
text = resp.data.decode('utf-8')
assert edit_account_link in text
def test_user_has_link_to_edit_account_page_in_index_page(self,
journalist_app,
test_journo):
with journalist_app.test_client() as app:
resp = app.post(
'/login',
data=dict(username=test_journo['username'],
password=test_journo['password'],
token=TOTP(test_journo['otp_secret']).now()),
follow_redirects=True)
edit_account_link = ('<a href="/account/account" '
'id="link-edit-account">')
text = resp.data.decode('utf-8')
assert edit_account_link in text
def test_admin_has_link_to_admin_index_page_in_index_page(self,
journalist_app,
test_admin):
with journalist_app.test_client() as app:
resp = app.post(
'/login',
data=dict(username=test_admin['username'],
password=test_admin['password'],
token=TOTP(test_admin['otp_secret']).now()),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert ADMIN_LINK in text
def test_user_lacks_link_to_admin_index_page_in_index_page(self,
journalist_app,
test_journo):
with journalist_app.test_client() as app:
resp = app.post(
'/login',
data=dict(username=test_journo['username'],
password=test_journo['password'],
token=TOTP(test_journo['otp_secret']).now()),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert ADMIN_LINK not in text
def test_admin_logout_redirects_to_index(self, journalist_app, test_admin):
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
_login_user(app, test_admin['username'],
test_admin['password'],
test_admin['otp_secret'])
resp = app.get('/logout')
ins.assert_redirects(resp, '/')
def test_user_logout_redirects_to_index(self, journalist_app,
test_journo):
with journalist_app.test_client() as app:
with InstrumentedApp(journalist_app) as ins:
_login_user(app, test_journo['username'],
test_journo['password'],
test_journo['otp_secret'])
resp = app.get('/logout')
ins.assert_redirects(resp, '/')
def test_admin_index(self, journalist_app, test_admin):
with journalist_app.test_client() as app:
_login_user(app, test_admin['username'], test_admin['password'],
test_admin['otp_secret'])
resp = app.get('/admin/')
assert resp.status_code == 200
text = resp.data.decode('utf-8')
assert "Admin Interface" in text
class TestJournalistApp(TestCase):
# A method required by flask_testing.TestCase
def create_app(self):
return journalist.app
def setUp(self):
utils.env.setup()
# Patch the two-factor verification to avoid intermittent errors
utils.db_helper.mock_verify_token(self)
# Setup test users: user & admin
self.user, self.user_pw = utils.db_helper.init_journalist()
self.admin, self.admin_pw = utils.db_helper.init_journalist(
is_admin=True)
def tearDown(self):
utils.env.teardown()
# WARNING: we are purposely doing something that would not work in
# production in the _login_user and _login_admin methods. This is done as a
# reminder to the test developer that the flask_testing.TestCase only uses
# one request context per method (see
# https://github.com/freedomofpress/securedrop/issues/1444). By explicitly
# making a point of this, we hope to avoid the introduction of new tests,
# that do not truly prove their result because of this disconnect between
# request context in Flask Testing and production.
#
# TODO: either ditch Flask Testing or subclass it as discussed in the
# aforementioned issue to fix the described problem.
def _login_admin(self):
self._ctx.g.user = self.admin
def _login_user(self):
self._ctx.g.user = self.user
def test_admin_delete_user(self):
# Verify journalist is in the database
self.assertNotEqual(Journalist.query.get(self.user.id), None)
self._login_admin()
resp = self.client.post(url_for('admin.delete_user',
user_id=self.user.id),
follow_redirects=True)
# Assert correct interface behavior
self.assert200(resp)
self.assertIn(escape("Deleted user '{}'".format(self.user.username)),
resp.data)
# Verify journalist is no longer in the database
self.assertEqual(Journalist.query.get(self.user.id), None)
def test_admin_cannot_delete_self(self):
# Verify journalist is in the database
self.assertNotEqual(Journalist.query.get(self.user.id), None)
self._login_admin()
resp = self.client.post(url_for('admin.delete_user',
user_id=self.admin.id),
follow_redirects=True)
# Assert correct interface behavior
self.assert403(resp)
resp = self.client.get(url_for('admin.index'))
self.assert200(resp)
self.assertIn("Admin Interface", resp.data)
# The user can be edited and deleted
self.assertIn(escape("Edit user {}".format(self.user.username)),
resp.data)
self.assertIn(
escape("Delete user {}".format(self.user.username)),
resp.data)
# The admin can be edited but cannot deleted
self.assertIn(escape("Edit user {}".format(self.admin.username)),
resp.data)
self.assertNotIn(
escape("Delete user {}".format(self.admin.username)),
resp.data)
def test_admin_deletes_invalid_user_404(self):
self._login_admin()
invalid_user_pk = max([user.id for user in Journalist.query.all()]) + 1
resp = self.client.post(url_for('admin.delete_user',
user_id=invalid_user_pk))
self.assert404(resp)
def test_admin_edits_user_password_success_response(self):
self._login_admin()
resp = self.client.post(
url_for('admin.new_password', user_id=self.user.id),
data=dict(password=VALID_PASSWORD_2),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert 'Password updated.' in text
assert VALID_PASSWORD_2 in text
def test_admin_edits_user_password_error_response(self):
self._login_admin()
with patch('sqlalchemy.orm.scoping.scoped_session.commit',
side_effect=Exception()):
resp = self.client.post(
url_for('admin.new_password', user_id=self.user.id),
data=dict(password=VALID_PASSWORD_2),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert ('There was an error, and the new password might not have '
'been saved correctly.') in text, text
def test_user_edits_password_success_response(self):
self._login_user()
resp = self.client.post(
url_for('account.new_password'),
data=dict(current_password=self.user_pw,
token='mocked',
password=VALID_PASSWORD_2),
follow_redirects=True)
text = resp.data.decode('utf-8')
assert "Password updated." in text
assert VALID_PASSWORD_2 in text
def test_user_edits_password_expires_session(self):
with self.client as client:
# do a real login to get a real session
# (none of the mocking `g` hacks)
resp = client.post(url_for('main.login'),
data=dict(username=self.user.username,
password=self.user_pw,
token='mocked'))
self.assertRedirects(resp, url_for('main.index'))
assert 'uid' in session
resp = client.post(
url_for('account.new_password'),
data=dict(current_password=self.user_pw,
token='mocked',
password=VALID_PASSWORD_2))
self.assertRedirects(resp, url_for('main.login'))
# verify the session was expired after the password was changed
assert 'uid' not in session
def test_user_edits_password_error_reponse(self):
self._login_user()
with patch('sqlalchemy.orm.scoping.scoped_session.commit',
side_effect=Exception()):
resp = self.client.post(
url_for('account.new_password'),
data=dict(current_password=self.user_pw,
token='mocked',
password=VALID_PASSWORD_2),
follow_redirects=True)
assert ('There was an error, and the new password might not have '
'been saved correctly.') in resp.data.decode('utf-8')
def test_admin_add_user_when_username_already_taken(self):
self._login_admin()
resp = self.client.post(url_for('admin.add_user'),
data=dict(username=self.admin.username,
password=VALID_PASSWORD,
is_admin=None))
self.assertIn('already taken', resp.data)
def test_max_password_length(self):
"""Creating a Journalist with a password that is greater than the
maximum password length should raise an exception"""
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
with self.assertRaises(InvalidPasswordLength):
Journalist(username="My Password is Too Big!",
password=overly_long_password)
def test_min_password_length(self):
"""Creating a Journalist with a password that is smaller than the
minimum password length should raise an exception. This uses the
magic number 7 below to get around the "diceware-like" requirement
that may cause a failure before the length check.
"""
password = ('a ' * 7)[0:(Journalist.MIN_PASSWORD_LEN - 1)]
with self.assertRaises(InvalidPasswordLength):
Journalist(username="My Password is Too Small!",
password=password)
def test_admin_edits_user_password_too_long_warning(self):
self._login_admin()
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
self.client.post(
url_for('admin.new_password', user_id=self.user.id),
data=dict(username=self.user.username, is_admin=None,
password=overly_long_password),
follow_redirects=True)
self.assertMessageFlashed('You submitted a bad password! '
'Password not changed.', 'error')
def test_user_edits_password_too_long_warning(self):
self._login_user()
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
self.client.post(url_for('account.new_password'),
data=dict(password=overly_long_password,
token='mocked',
current_password=self.user_pw),
follow_redirects=True)
self.assertMessageFlashed('You submitted a bad password! '
'Password not changed.', 'error')
def test_admin_add_user_password_too_long_warning(self):
self._login_admin()
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
self.client.post(
url_for('admin.add_user'),
data=dict(username='dellsberg',
password=overly_long_password,
is_admin=None))
self.assertMessageFlashed('There was an error with the autogenerated '
'password. User not created. '
'Please try again.', 'error')
def test_admin_edits_user_invalid_username(self):
"""Test expected error message when admin attempts to change a user's
username to a username that is taken by another user."""
self._login_admin()
new_username = self.admin.username
self.client.post(
url_for('admin.edit_user', user_id=self.user.id),
data=dict(username=new_username, is_admin=None))
self.assertMessageFlashed('Username "{}" already taken.'.format(
new_username), 'error')
def test_admin_resets_user_hotp(self):
self._login_admin()
old_hotp = self.user.hotp
resp = self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret=123456))
new_hotp = self.user.hotp
# check that hotp is different
self.assertNotEqual(old_hotp.secret, new_hotp.secret)
# Redirect to admin 2FA view
self.assertRedirects(
resp,
url_for('admin.new_user_two_factor', uid=self.user.id))
def test_admin_resets_user_hotp_format_non_hexa(self):
self._login_admin()
old_hotp = self.user.hotp.secret
self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret='ZZ'))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed(
"Invalid secret format: "
"please only submit letters A-F and numbers 0-9.", "error")
def test_admin_resets_user_hotp_format_odd(self):
self._login_admin()
old_hotp = self.user.hotp.secret
self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret='Z'))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed(
"Invalid secret format: "
"odd-length secret. Did you mistype the secret?", "error")
@patch('models.Journalist.set_hotp_secret')
@patch('journalist.app.logger.error')
def test_admin_resets_user_hotp_error(self,
mocked_error_logger,
mock_set_hotp_secret):
self._login_admin()
old_hotp = self.user.hotp.secret
error_message = 'SOMETHING WRONG!'
mock_set_hotp_secret.side_effect = TypeError(error_message)
otp_secret = '1234'
self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret=otp_secret))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed("An unexpected error occurred! "
"Please inform your administrator.", "error")
mocked_error_logger.assert_called_once_with(
"set_hotp_secret '{}' (id {}) failed: {}".format(
otp_secret, self.user.id, error_message))
def test_user_resets_hotp(self):
self._login_user()
old_hotp = self.user.hotp
resp = self.client.post(url_for('account.reset_two_factor_hotp'),
data=dict(otp_secret=123456))
new_hotp = self.user.hotp
# check that hotp is different
self.assertNotEqual(old_hotp.secret, new_hotp.secret)
# should redirect to verification page
self.assertRedirects(resp, url_for('account.new_two_factor'))
def test_user_resets_user_hotp_format_odd(self):
self._login_user()
old_hotp = self.user.hotp.secret
self.client.post(url_for('account.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret='123'))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed(
"Invalid secret format: "
"odd-length secret. Did you mistype the secret?", "error")
def test_user_resets_user_hotp_format_non_hexa(self):
self._login_user()
old_hotp = self.user.hotp.secret
self.client.post(url_for('account.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret='ZZ'))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed(
"Invalid secret format: "
"please only submit letters A-F and numbers 0-9.", "error")
@patch('models.Journalist.set_hotp_secret')
@patch('journalist.app.logger.error')
def test_user_resets_user_hotp_error(self,
mocked_error_logger,
mock_set_hotp_secret):
self._login_user()
old_hotp = self.user.hotp.secret
error_message = 'SOMETHING WRONG!'
mock_set_hotp_secret.side_effect = TypeError(error_message)
otp_secret = '1234'
self.client.post(url_for('account.reset_two_factor_hotp'),
data=dict(uid=self.user.id, otp_secret=otp_secret))
new_hotp = self.user.hotp.secret
self.assertEqual(old_hotp, new_hotp)
self.assertMessageFlashed("An unexpected error occurred! "
"Please inform your administrator.", "error")
mocked_error_logger.assert_called_once_with(
"set_hotp_secret '{}' (id {}) failed: {}".format(
otp_secret, self.user.id, error_message))
def test_admin_resets_user_totp(self):
self._login_admin()
old_totp = self.user.totp
resp = self.client.post(
url_for('admin.reset_two_factor_totp'),
data=dict(uid=self.user.id))
new_totp = self.user.totp
self.assertNotEqual(old_totp.secret, new_totp.secret)
self.assertRedirects(
resp,
url_for('admin.new_user_two_factor', uid=self.user.id))
def test_user_resets_totp(self):
self._login_user()
old_totp = self.user.totp
resp = self.client.post(url_for('account.reset_two_factor_totp'))
new_totp = self.user.totp
# check that totp is different
self.assertNotEqual(old_totp.secret, new_totp.secret)
# should redirect to verification page
self.assertRedirects(resp, url_for('account.new_two_factor'))
def test_admin_resets_hotp_with_missing_otp_secret_key(self):
self._login_admin()
resp = self.client.post(url_for('admin.reset_two_factor_hotp'),
data=dict(uid=self.user.id))
self.assertIn('Change Secret', resp.data)
def test_admin_new_user_2fa_redirect(self):
self._login_admin()
resp = self.client.post(
url_for('admin.new_user_two_factor', uid=self.user.id),
data=dict(token='mocked'))
self.assertRedirects(resp, url_for('admin.index'))
def test_http_get_on_admin_new_user_two_factor_page(self):
self._login_admin()
resp = self.client.get(url_for('admin.new_user_two_factor',
uid=self.user.id))
# any GET req should take a user to the admin.new_user_two_factor page
self.assertIn('FreeOTP', resp.data)
def test_http_get_on_admin_add_user_page(self):
self._login_admin()
resp = self.client.get(url_for('admin.add_user'))
# any GET req should take a user to the admin_add_user page
self.assertIn('ADD USER', resp.data)
def test_admin_add_user(self):
self._login_admin()
max_journalist_pk = max([user.id for user in Journalist.query.all()])
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='dellsberg',
password=VALID_PASSWORD,
is_admin=None))
self.assertRedirects(resp, url_for('admin.new_user_two_factor',
uid=max_journalist_pk+1))
def test_admin_add_user_without_username(self):
self._login_admin()
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='',
password=VALID_PASSWORD,
is_admin=None))
self.assertIn('This field is required.', resp.data)
def test_admin_add_user_too_short_username(self):
self._login_admin()
username = 'a' * (Journalist.MIN_USERNAME_LEN - 1)
resp = self.client.post(url_for('admin.add_user'),
data=dict(username=username,
password='pentagonpapers',
password_again='pentagonpapers',
is_admin=None))
self.assertIn('Field must be at least {} characters long'.format(
Journalist.MIN_USERNAME_LEN),
resp.data)
def test_admin_add_user_yubikey_odd_length(self):
self._login_admin()
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='dellsberg',
password=VALID_PASSWORD,
password_again=VALID_PASSWORD,
is_admin=None,
is_hotp=True,
otp_secret='123'))
self.assertIn('HOTP secrets are 40 characters', resp.data)
def test_admin_add_user_yubikey_valid_length(self):
self._login_admin()
otp = '1234567890123456789012345678901234567890'
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='dellsberg',
password=VALID_PASSWORD,
password_again=VALID_PASSWORD,
is_admin=None,
is_hotp=True,
otp_secret=otp),
follow_redirects=True)
# Should redirect to the token verification page
self.assertIn('Enable YubiKey (OATH-HOTP)', resp.data)
def test_admin_add_user_yubikey_correct_length_with_whitespace(self):
self._login_admin()
otp = '12 34 56 78 90 12 34 56 78 90 12 34 56 78 90 12 34 56 78 90'
resp = self.client.post(url_for('admin.add_user'),
data=dict(username='dellsberg',
password=VALID_PASSWORD,
password_again=VALID_PASSWORD,
is_admin=None,
is_hotp=True,
otp_secret=otp),
follow_redirects=True)
# Should redirect to the token verification page
self.assertIn('Enable YubiKey (OATH-HOTP)', resp.data)
def test_admin_sets_user_to_admin(self):
self._login_admin()
new_user = 'admin-set-user-to-admin-test'
resp = self.client.post(url_for('admin.add_user'),
data=dict(username=new_user,
password=VALID_PASSWORD,
is_admin=None))
assert resp.status_code in (200, 302)
journo = Journalist.query.filter(Journalist.username == new_user).one()
assert not journo.is_admin
resp = self.client.post(url_for('admin.edit_user', user_id=journo.id),
data=dict(is_admin=True))
assert resp.status_code in (200, 302), resp.data.decode('utf-8')
# there are better ways to do this, but flake8 complains
journo = Journalist.query.filter(Journalist.username == new_user).one()
assert journo.is_admin is True
def test_admin_renames_user(self):
self._login_admin()
new_user = 'admin-renames-user-test'
resp = self.client.post(url_for('admin.add_user'),
data=dict(username=new_user,
password=VALID_PASSWORD,
is_admin=None))
assert resp.status_code in (200, 302)
journo = Journalist.query.filter(Journalist.username == new_user).one()
new_user = new_user + 'a'
resp = self.client.post(url_for('admin.edit_user', user_id=journo.id),
data=dict(username=new_user))
assert resp.status_code in (200, 302), resp.data.decode('utf-8')
# the following will throw an exception if new_user is not found
# therefore asserting it has been created
Journalist.query.filter(Journalist.username == new_user).one()
@patch('journalist_app.admin.current_app.logger.error')
@patch('journalist_app.admin.Journalist',
side_effect=IntegrityError('STATEMENT', 'PARAMETERS', None))
def test_admin_add_user_integrity_error(self,
mock_journalist,
mocked_error_logger):
self._login_admin()
self.client.post(url_for('admin.add_user'),
data=dict(username='username',
password=VALID_PASSWORD,
is_admin=None))
log_event = mocked_error_logger.call_args[0][0]
self.assertIn(
"Adding user 'username' failed: (__builtin__.NoneType) "
"None [SQL: 'STATEMENT'] [parameters: 'PARAMETERS']",
log_event)
self.assertMessageFlashed(
"An error occurred saving this user to the database."
" Please inform your administrator.",
"error")
def test_logo_upload_with_valid_image_succeeds(self):
# Save original logo to restore after test run
logo_image_location = os.path.join(config.SECUREDROP_ROOT,
"static/i/logo.png")
with open(logo_image_location) as logo_file:
original_image = logo_file.read()
try:
self._login_admin()
form = journalist_app_module.forms.LogoForm(
logo=(StringIO('imagedata'), 'test.png')
)
self.client.post(url_for('admin.manage_config'),
data=form.data,
follow_redirects=True)
self.assertMessageFlashed("Image updated.", "logo-success")
finally:
# Restore original image to logo location for subsequent tests
with open(logo_image_location, 'w') as logo_file:
logo_file.write(original_image)
def test_logo_upload_with_invalid_filetype_fails(self):
self._login_admin()
form = journalist_app_module.forms.LogoForm(
logo=(StringIO('filedata'), 'bad.exe')
)
resp = self.client.post(url_for('admin.manage_config'),
data=form.data,
follow_redirects=True)
self.assertMessageFlashed("Upload images only.", "logo-error")
self.assertIn('Upload images only.', resp.data)
def test_logo_upload_with_empty_input_field_fails(self):
self._login_admin()
form = journalist_app_module.forms.LogoForm(
logo=(StringIO(''), '')
)
resp = self.client.post(url_for('admin.manage_config'),
data=form.data,
follow_redirects=True)
self.assertMessageFlashed("File required.", "logo-error")
self.assertIn('File required.', resp.data)
@patch('journalist.app.logger.error')
def test_creation_of_ossec_test_log_event(self, mocked_error_logger):
self._login_admin()
self.client.get(url_for('admin.ossec_test'))
mocked_error_logger.assert_called_once_with(
"This is a test OSSEC alert"
)
def test_admin_page_restriction_http_gets(self):
admin_urls = [url_for('admin.index'), url_for('admin.add_user'),
url_for('admin.edit_user', user_id=self.user.id)]
self._login_user()
for admin_url in admin_urls:
resp = self.client.get(admin_url)
self.assertStatus(resp, 302)
def test_admin_page_restriction_http_posts(self):
admin_urls = [url_for('admin.reset_two_factor_totp'),
url_for('admin.reset_two_factor_hotp'),
url_for('admin.add_user', user_id=self.user.id),
url_for('admin.new_user_two_factor'),
url_for('admin.reset_two_factor_totp'),
url_for('admin.reset_two_factor_hotp'),
url_for('admin.edit_user', user_id=self.user.id),
url_for('admin.delete_user', user_id=self.user.id)]
self._login_user()
for admin_url in admin_urls:
resp = self.client.post(admin_url)
self.assertStatus(resp, 302)
def test_user_authorization_for_gets(self):
urls = [url_for('main.index'), url_for('col.col', filesystem_id='1'),
url_for('col.download_single_submission',
filesystem_id='1', fn='1'),
url_for('account.edit')]
for url in urls:
resp = self.client.get(url)
self.assertStatus(resp, 302)
def test_user_authorization_for_posts(self):
urls = [url_for('col.add_star', filesystem_id='1'),
url_for('col.remove_star', filesystem_id='1'),
url_for('col.process'),
url_for('col.delete_single', filesystem_id='1'),
url_for('main.reply'),
url_for('main.regenerate_code'),
url_for('main.bulk'),
url_for('account.new_two_factor'),
url_for('account.reset_two_factor_totp'),
url_for('account.reset_two_factor_hotp')]
for url in urls:
res = self.client.post(url)
self.assertStatus(res, 302)
def test_incorrect_current_password_change(self):
self._login_user()
resp = self.client.post(url_for('account.new_password'),
data=dict(password=VALID_PASSWORD,
token='mocked',
current_password='badpw'),
follow_redirects=True)
text = resp.data.decode('utf-8')
self.assertIn('Incorrect password or two-factor code', text)
def test_too_long_user_password_change(self):
self._login_user()
overly_long_password = VALID_PASSWORD + \
'a' * (Journalist.MAX_PASSWORD_LEN - len(VALID_PASSWORD) + 1)
self.client.post(url_for('account.new_password'),
data=dict(password=overly_long_password,
token='mocked',
current_password=self.user_pw),
follow_redirects=True)
self.assertMessageFlashed('You submitted a bad password! Password not '
'changed.', 'error')
def test_valid_user_password_change(self):
self._login_user()
resp = self.client.post(
url_for('account.new_password'),
data=dict(password=VALID_PASSWORD_2,
token='mocked',
current_password=self.user_pw),
follow_redirects=True)
assert 'Password updated.' in \
resp.data.decode('utf-8')
def test_regenerate_totp(self):
self._login_user()
old_totp = self.user.totp
res = self.client.post(url_for('account.reset_two_factor_totp'))
new_totp = self.user.totp
# check that totp is different
self.assertNotEqual(old_totp.secret, new_totp.secret)
# should redirect to verification page
self.assertRedirects(res, url_for('account.new_two_factor'))
def test_edit_hotp(self):
self._login_user()
old_hotp = self.user.hotp
res = self.client.post(
url_for('account.reset_two_factor_hotp'),
data=dict(otp_secret=123456)
)
new_hotp = self.user.hotp
# check that hotp is different
self.assertNotEqual(old_hotp.secret, new_hotp.secret)
# should redirect to verification page
self.assertRedirects(res, url_for('account.new_two_factor'))
def test_delete_source_deletes_submissions(self):
"""Verify that when a source is deleted, the submissions that
correspond to them are also deleted."""
self._delete_collection_setup()
journalist_app_module.utils.delete_collection(
self.source.filesystem_id)
# Source should be gone
results = db.session.query(Source).filter(
Source.id == self.source.id).all()
self.assertEqual(results, [])
def _delete_collection_setup(self):
self.source, _ = utils.db_helper.init_source()
utils.db_helper.submit(self.source, 2)
utils.db_helper.reply(self.user, self.source, 2)
def test_delete_collection_updates_db(self):
"""Verify that when a source is deleted, their Source identity
record, as well as Reply & Submission records associated with
that record are purged from the database."""
self._delete_collection_setup()
journalist_app_module.utils.delete_collection(
self.source.filesystem_id)
results = Source.query.filter(Source.id == self.source.id).all()
self.assertEqual(results, [])
results = db.session.query(
Submission.source_id == self.source.id).all()
self.assertEqual(results, [])
results = db.session.query(Reply.source_id == self.source.id).all()
self.assertEqual(results, [])
def test_delete_source_deletes_source_key(self):
"""Verify that when a source is deleted, the PGP key that corresponds
to them is also deleted."""
self._delete_collection_setup()
# Source key exists
source_key = current_app.crypto_util.getkey(self.source.filesystem_id)
self.assertNotEqual(source_key, None)
journalist_app_module.utils.delete_collection(
self.source.filesystem_id)
# Source key no longer exists
source_key = current_app.crypto_util.getkey(self.source.filesystem_id)
self.assertEqual(source_key, None)
def test_delete_source_deletes_docs_on_disk(self):
"""Verify that when a source is deleted, the encrypted documents that
exist on disk is also deleted."""
self._delete_collection_setup()
# Encrypted documents exists
dir_source_docs = os.path.join(config.STORE_DIR,
self.source.filesystem_id)
self.assertTrue(os.path.exists(dir_source_docs))
job = journalist_app_module.utils.delete_collection(
self.source.filesystem_id)
# Wait up to 5s to wait for Redis worker `srm` operation to complete
utils.async.wait_for_redis_worker(job)
# Encrypted documents no longer exist
self.assertFalse(os.path.exists(dir_source_docs))
def test_download_selected_submissions_from_source(self):
source, _ = utils.db_helper.init_source()
submissions = utils.db_helper.submit(source, 4)
selected_submissions = random.sample(submissions, 2)
selected_fnames = [submission.filename
for submission in selected_submissions]
selected_fnames.sort()
self._login_user()
resp = self.client.post(
'/bulk', data=dict(action='download',
filesystem_id=source.filesystem_id,
doc_names_selected=selected_fnames))
# The download request was succesful, and the app returned a zipfile
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(resp.data)))
# The submissions selected are in the zipfile
for filename in selected_fnames:
self.assertTrue(
# Check that the expected filename is in the zip file
zipfile.ZipFile(StringIO(resp.data)).getinfo(
os.path.join(
source.journalist_filename,
"%s_%s" % (filename.split('-')[0],
source.last_updated.date()),
filename
))
)
# The submissions not selected are absent from the zipfile
not_selected_submissions = set(submissions).difference(
selected_submissions)
not_selected_fnames = [submission.filename
for submission in not_selected_submissions]
for filename in not_selected_fnames:
with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(resp.data)).getinfo(
os.path.join(
source.journalist_filename,
source.journalist_designation,
"%s_%s" % (filename.split('-')[0],
source.last_updated.date()),
filename
))
def _bulk_download_setup(self):
"""Create a couple sources, make some submissions on their behalf,
mark some of them as downloaded, and then perform *action* on all
sources."""
self.source0, _ = utils.db_helper.init_source()
self.source1, _ = utils.db_helper.init_source()
self.journo0, _ = utils.db_helper.init_journalist()
self.submissions0 = utils.db_helper.submit(self.source0, 2)
self.submissions1 = utils.db_helper.submit(self.source1, 3)
self.downloaded0 = random.sample(self.submissions0, 1)
utils.db_helper.mark_downloaded(*self.downloaded0)
self.not_downloaded0 = set(self.submissions0).difference(
self.downloaded0)
self.downloaded1 = random.sample(self.submissions1, 2)
utils.db_helper.mark_downloaded(*self.downloaded1)
self.not_downloaded1 = set(self.submissions1).difference(
self.downloaded1)
def test_download_unread_all_sources(self):
self._bulk_download_setup()
self._login_user()
# Download all unread messages from all sources
self.resp = self.client.post(
url_for('col.process'),
data=dict(action='download-unread',
cols_selected=[self.source0.filesystem_id,
self.source1.filesystem_id]))
# The download request was succesful, and the app returned a zipfile
self.assertEqual(self.resp.status_code, 200)
self.assertEqual(self.resp.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(self.resp.data)))
# All the not dowloaded submissions are in the zipfile
for submission in self.not_downloaded0:
self.assertTrue(
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
os.path.join(
"unread",
self.source0.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source0.last_updated.date()),
submission.filename
))
)
for submission in self.not_downloaded1:
self.assertTrue(
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
os.path.join(
"unread",
self.source1.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source1.last_updated.date()),
submission.filename
))
)
# All the downloaded submissions are absent from the zipfile
for submission in self.downloaded0:
with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
os.path.join(
"unread",
self.source0.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source0.last_updated.date()),
submission.filename
))
for submission in self.downloaded1:
with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(self.resp.data)).getinfo(
os.path.join(
"unread",
self.source1.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source1.last_updated.date()),
submission.filename
))
def test_download_all_selected_sources(self):
self._bulk_download_setup()
self._login_user()
# Dowload all messages from self.source1
self.resp = self.client.post(
url_for('col.process'),
data=dict(action='download-all',
cols_selected=[self.source1.filesystem_id]))
resp = self.client.post(
url_for('col.process'),
data=dict(action='download-all',
cols_selected=[self.source1.filesystem_id]))
# The download request was succesful, and the app returned a zipfile
self.assertEqual(resp.status_code, 200)
self.assertEqual(resp.content_type, 'application/zip')
self.assertTrue(zipfile.is_zipfile(StringIO(resp.data)))
# All messages from self.source1 are in the zipfile
for submission in self.submissions1:
self.assertTrue(
zipfile.ZipFile(StringIO(resp.data)).getinfo(
os.path.join(
"all",
self.source1.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source1.last_updated.date()),
submission.filename)
)
)
# All messages from self.source0 are absent from the zipfile
for submission in self.submissions0:
with self.assertRaises(KeyError):
zipfile.ZipFile(StringIO(resp.data)).getinfo(
os.path.join(
"all",
self.source0.journalist_designation,
"%s_%s" % (submission.filename.split('-')[0],
self.source0.last_updated.date()),
submission.filename)
)
def test_single_source_is_successfully_starred(self):
source, _ = utils.db_helper.init_source()
self._login_user()
resp = self.client.post(url_for('col.add_star',
filesystem_id=source.filesystem_id))
self.assertRedirects(resp, url_for('main.index'))
# Assert source is starred
self.assertTrue(source.star.starred)
def test_single_source_is_successfully_unstarred(self):
source, _ = utils.db_helper.init_source()
self._login_user()
# First star the source
self.client.post(url_for('col.add_star',
filesystem_id=source.filesystem_id))
# Now unstar the source
resp = self.client.post(url_for('col.remove_star',
filesystem_id=source.filesystem_id))
self.assertRedirects(resp, url_for('main.index'))
# Assert source is not starred
self.assertFalse(source.star.starred)
def test_journalist_session_expiration(self):
try:
old_expiration = config.SESSION_EXPIRATION_MINUTES
has_session_expiration = True
except AttributeError:
has_session_expiration = False
try:
with self.client as client:
# set the expiration to ensure we trigger an expiration
config.SESSION_EXPIRATION_MINUTES = -1
# do a real login to get a real session
# (none of the mocking `g` hacks)
resp = client.post(url_for('main.login'),
data=dict(username=self.user.username,
password=self.user_pw,
token='mocked'))
self.assertRedirects(resp, url_for('main.index'))
assert 'uid' in session
resp = client.get(url_for('account.edit'),
follow_redirects=True)
# check that the session was cleared (apart from 'expires'
# which is always present and 'csrf_token' which leaks no info)
session.pop('expires', None)
session.pop('csrf_token', None)
assert not session, session
assert ('You have been logged out due to inactivity' in
resp.data.decode('utf-8'))
finally:
if has_session_expiration:
config.SESSION_EXPIRATION_MINUTES = old_expiration
else:
del config.SESSION_EXPIRATION_MINUTES
def test_csrf_error_page(self):
old_enabled = self.app.config['WTF_CSRF_ENABLED']
self.app.config['WTF_CSRF_ENABLED'] = True
try:
with self.app.test_client() as app:
resp = app.post(url_for('main.login'))
self.assertRedirects(resp, url_for('main.login'))
resp = app.post(url_for('main.login'), follow_redirects=True)
self.assertIn('You have been logged out due to inactivity',
resp.data)
finally:
self.app.config['WTF_CSRF_ENABLED'] = old_enabled
def test_col_process_aborts_with_bad_action(self):
"""If the action is not a valid choice, a 500 should occur"""
self._login_user()
form_data = {'cols_selected': 'does not matter',
'action': 'this action does not exist'}
resp = self.client.post(url_for('col.process'), data=form_data)
self.assert500(resp)
def test_col_process_successfully_deletes_multiple_sources(self):
# Create two sources with one submission each
source_1, _ = utils.db_helper.init_source()
utils.db_helper.submit(source_1, 1)
source_2, _ = utils.db_helper.init_source()
utils.db_helper.submit(source_2, 1)
self._login_user()
form_data = {'cols_selected': [source_1.filesystem_id,
source_2.filesystem_id],
'action': 'delete'}
resp = self.client.post(url_for('col.process'), data=form_data,
follow_redirects=True)
self.assert200(resp)
# Verify there are no remaining sources
remaining_sources = db.session.query(models.Source).all()
self.assertEqual(len(remaining_sources), 0)
def test_col_process_successfully_stars_sources(self):
source_1, _ = utils.db_helper.init_source()
utils.db_helper.submit(source_1, 1)
self._login_user()
form_data = {'cols_selected': [source_1.filesystem_id],
'action': 'star'}
resp = self.client.post(url_for('col.process'), data=form_data,
follow_redirects=True)
self.assert200(resp)
# Verify the source is starred
self.assertTrue(source_1.star.starred)
def test_col_process_successfully_unstars_sources(self):
source_1, _ = utils.db_helper.init_source()
utils.db_helper.submit(source_1, 1)
self._login_user()
# First star the source
form_data = {'cols_selected': [source_1.filesystem_id],
'action': 'star'}
self.client.post(url_for('col.process'), data=form_data,
follow_redirects=True)
# Now unstar the source
form_data = {'cols_selected': [source_1.filesystem_id],
'action': 'un-star'}
resp = self.client.post(url_for('col.process'), data=form_data,
follow_redirects=True)
self.assert200(resp)
# Verify the source is not starred
self.assertFalse(source_1.star.starred)
class TestJournalistLocale(TestCase):
def setUp(self):
utils.env.setup()
# Patch the two-factor verification to avoid intermittent errors
utils.db_helper.mock_verify_token(self)
# Setup test user
self.user, self.user_pw = utils.db_helper.init_journalist()
def tearDown(self):
utils.env.teardown()
def get_fake_config(self):
return SDConfig()
# A method required by flask_testing.TestCase
def create_app(self):
fake_config = self.get_fake_config()
fake_config.SUPPORTED_LOCALES = ['en_US', 'fr_FR']
return journalist_app_module.create_app(fake_config)
def test_render_locales(self):
"""the locales.html template must collect both request.args (l=XX) and
request.view_args (/<filesystem_id>) to build the URL to
change the locale
"""
source, _ = utils.db_helper.init_source()
self._ctx.g.user = self.user
url = url_for('col.col', filesystem_id=source.filesystem_id)
resp = self.client.get(url + '?l=fr_FR')
self.assertNotIn('?l=fr_FR', resp.data)
self.assertIn(url + '?l=en_US', resp.data)
class TestJournalistLogin(unittest.TestCase):
def setUp(self):
self.__context = journalist_app_module.create_app(config).app_context()
self.__context.push()
utils.env.setup()
# Patch the two-factor verification so it always succeeds
utils.db_helper.mock_verify_token(self)
self.user, self.user_pw = utils.db_helper.init_journalist()
def tearDown(self):
utils.env.teardown()
self.__context.pop()
@patch('models.Journalist._scrypt_hash')
@patch('models.Journalist.valid_password', return_value=True)
def test_valid_login_calls_scrypt(self,
mock_scrypt_hash,
mock_valid_password):
Journalist.login(self.user.username, self.user_pw, 'mocked')
self.assertTrue(
mock_scrypt_hash.called,
"Failed to call _scrypt_hash for password w/ valid length")
@patch('models.Journalist._scrypt_hash')
def test_login_with_invalid_password_doesnt_call_scrypt(self,
mock_scrypt_hash):
invalid_pw = 'a'*(Journalist.MAX_PASSWORD_LEN + 1)
with self.assertRaises(InvalidPasswordLength):
Journalist.login(self.user.username, invalid_pw, 'mocked')
self.assertFalse(
mock_scrypt_hash.called,
"Called _scrypt_hash for password w/ invalid length")
|
#!/usr/bin/env python
import array
import itertools
import re
class gapbuffer(object):
"""
Represents a sequence of identically-typed primitive items using a gap
buffer. Can be initialized with any iterable item, as long as the items in
the iterable are all of the same type. Uses an array.array internally to
store data.
"""
# type information for the different types our internal array can take on.
# used to initialize the internal array to some non-zero size and to get
# formal names for the type codes.
TYPE_CODES = {
"c": (str(' '), "character"),
"b": (0, "signed character"),
"B": (0, "unsigned character"),
"u": (unicode(' '), "unicode character"),
"h": (0, "signed short"),
"H": (0, "unsigned short"),
"i": (0, "signed int"),
"I": (0L, "unsigned int"),
"l": (0L, "signed long"),
"L": (0L, "unsigned long"),
"f": (0.0, "float"),
"d": (0.0, "double")
}
def __init__(self, typecode, initial_content=[], gap_size=10):
"""
TODO
"""
# minimum space to create for the new gap when resizing the current one
self.__gap_size = gap_size
# allocate the initial gap for the internal buffer. if the typecode is
# invalid, array.array throws a nice ValueError for us.
item = gapbuffer.TYPE_CODES[typecode][0]
self.__buf = array.array(typecode, (item for i in xrange(gap_size)))
# first space of the gap, initially always at the start of the buffer
self.__gap_start = 0
# first space after the final space in the gap, even if past the end of
# the internal buffer. since our buffer is (at the moment) all-gap, it
# starts as the length of the buffer.
self.__gap_end = len(self.__buf)
# add the initial content (defaults to an empty iterable)
try:
# add the initial content to the end of the buffer
self.__buf.extend(initial_content)
except TypeError:
# map array's TypeError to our own version of the same
raise TypeError(self.__class__.__name__ + " items must be of type "
+ gapbuffer.TYPE_CODES[typecode][1])
# the space immediately following the final item in the buffer,
# including space for the gap. i.e., if the gap is at the very end of
# the buffer, the content end is equivalent to the gap end.
self.__content_end = len(self.__buf)
@property
def typecode(self):
"""The read-only typecode of this gapbuffer."""
return self.__buf.typecode
@property
def __gap_len(self):
"""Get the length of the current gap."""
return self.__gap_end - self.__gap_start
def __len__(self):
"""Get the length of the buffer."""
return self.__content_end - self.__gap_len
def __compare(self, other):
"""
Does a lexicographic comparison with another other iterable, and returns
-1, 0, or 1 if the buffer is less than, equal to, or greater than the
other.
"""
# don't compare with things that have no length method since iterating
# over them might modify them if they're generators.
if not hasattr(other, "__len__"):
# we're always greater than non-iterable objects
return 1
# fill value guaranteed to be unique to this fun. call and inaccessible
fv = object()
for si, oi in itertools.izip_longest(self, other, fillvalue=fv):
# we're shorter than the other iterable and aren't different
if si is fv:
return -1
# the other is shorter than us and not different
if oi is fv:
return 1
# we're smaller than the other, or the other is larger
if oi > si:
return -1
elif oi < si:
return 1
# we're equal if none of the cases passed (same length, not different)
return 0
def __eq__(self, other):
"""Determine whether this is item-equivalent to another iterable."""
# optimize for iterables that the len() method works on
if hasattr(other, "__len__") and len(other) != len(self):
return False
return self.__compare(other) == 0
def __cmp__(self, other):
"""Lexicographically compares this with another iterable."""
return self.__compare(other)
def __contains__(self, value):
"""
Return True if the given item is contained in the buffer, False
otherwise.
"""
# substring test for character and unicode buffers
if (self.typecode in ["u", "c"] and isinstance(value, basestring)
and len(value) > 1):
# search the gap-less version of our underlying buffer
with self as buf:
# escape the given string and return whether a result was found
return re.search(re.escape(value), buf) is not None
elif (self.typecode in ["u", "c"] and
isinstance(value, basestring) and len(value) == 0):
# the empty string is a member of every string
return True
# general test for membership, including single-character string values
for item in self:
if value == item:
return True
return False
def __add__(self, other):
"""
Concatenate the other iterable to this one and return the result as a
new buffer.
"""
added = gapbuffer(self.typecode, self)
added.extend(other)
return added
def __iadd__(self, other):
"""Concatenate the other iterable to this one in-place."""
self.extend(other)
def __mul__(self, n):
"""
Concatenate ourself to ourself some number of times and return the
result as a new buffer.
"""
multiplied = gapbuffer(self.typecode)
# don't concatenate if 0 or less was specified
if n > 0:
for i in xrange(n):
multiplied.extend(self)
return multiplied
def __imul__(self, n):
"""Concatenate ourself to ourself some number of times in-place."""
# clear the buffer if 0 or less was specified
if n <= 0:
del self[:]
else:
for i in xrange(n - 1):
self.extend(self)
def __enforce_index(self, *indices):
"""Ensures the given indices are valid for the current buffer size."""
for index in indices:
if index >= len(self) or index < -len(self):
raise IndexError(self.__class__.__name__ + " index out of range")
def __getitem__(self, x):
"""Get the item or slice at the given index."""
# handle slicing with a 'step' (normal format is handled by __getslice__)
if isinstance(x, slice):
return self.__get_slice(x)
return self.__get_index(x)
def __get_index(self, i):
"""Get the item at some index."""
# constrain index bounds
self.__enforce_index(i)
# if before the gap, access buffer directly, else account for gap
index = i if i < self.__gap_start else i + self.__gap_len
return self.__buf[index]
def __get_slice(self, s):
"""Get the sequence at the given slice."""
# unpack 'indices()' into xrange as a generator for our items
return gapbuffer(self.typecode,
(self[i] for i in xrange(*s.indices(len(self)))))
def __setitem__(self, x, value):
"""Set an index or slice to some value."""
if isinstance(x, slice):
return self.__set_slice(x, value)
return self.__set_index(x, value)
def __set_index(self, i, value):
"""Set the item at some index."""
self.__enforce_index(i)
index = i if i < self.__gap_start else i + self.__gap_len
self.__buf[index] = value
def __set_slice(self, s, value):
"""Set the slice at some index."""
# get the length of the value sequence, consuming it if necessary.
values = value
if not hasattr(value, "__len__"):
values = [v for v in value]
# normalize slice indices
start, stop, step = s.indices(len(self))
# handle extended slices, which are the same size as what they replace
if step != 1:
# get our range
xr = xrange(start, stop, step)
# enforce range size for extended slices
if len(values) != len(xr):
raise ValueError("attempt to assign sequence of size " +
str(len(values)) + " to extended slice of size " +
str(len(xr)))
# set the indices in the range to their new values
for i, v in itertools.izip(xr, values):
self[i] = v
else:
# move the gap to the start of the slice
self.__move_gap(start)
# resize the gap to contain the new values, then delete the old ones
self.__resize_gap(len(values))
self.__gap_end += stop - start
# replace old values with the new ones
for v in values:
# add the next value and bump up the gap pointer as we go
self.__buf[self.__gap_start] = v
self.__gap_start += 1
def __delitem__(self, x):
"""Delete some index or slice."""
if isinstance(x, slice):
return self.__del_slice(x)
return self.__del_index(x)
def __del_index(self, i):
"""Delete the item at some index."""
self.__enforce_index(i)
# move the gap to the given index
self.__move_gap(i)
# 'delete' the index by causing the gap to consume the index
self.__gap_end += 1
def __del_slice(self, s):
"""Delete some slice."""
# get the range we'll be manipulating
start, stop, step = s.indices(len(self))
xr = xrange(start, stop, step)
# handle extended slices
if step != 1:
# delete every item in the slice range
for count, index in enumerate(xr):
# account for already deleted indices
del self[index - count]
else:
# don't do anything if there was no gap given
if len(xr) > 0:
# move the gap to the start and expand to cover the range
self.__move_gap(start)
self.__gap_end += len(xr)
def __enter__(self):
"""
Return the raw array.array underlying the buffer, sans gap. This allows
for easy manipulation of the underlying data structure without worrying
about breaking state in the buffer at large.
"""
# move the gap to the end of the buffer
self.__move_gap(len(self))
# remove the gap. this should just be a pointer update in the C code.
del self.__buf[self.__gap_start:]
# give the context the raw buffer
return self.__buf
def __exit__(self, exception_type, exception_value, traceback):
"""Replace the gap when context exits, ignoring any errors."""
# add a new gap at the end of the buffer
item = gapbuffer.TYPE_CODES[self.typecode][0]
self.__buf.extend(item for i in xrange(self.__gap_size))
# account for any size change in the buffer
self.__content_end = len(self.__buf)
# move the gap pointers to point at the new gap
self.__gap_start = self.__content_end - self.__gap_size
self.__gap_end = self.__content_end
def index(self, item, start=0, end=None):
"""
Return the index of the first occurence of 'item' in this gapbuffer from
the slice between the optional start (default 0) and end (default end of
buffer) values.
"""
# set a default for the end
end = end or len(self)
# only check if we have an increasing, non-empty range
if start != end:
# return the index for the first matching item
for i in xrange(*slice(start, end).indices(len(self))):
if self[i] == item:
return i
# signal failure if we couldn't find anything
raise ValueError(self.__class__.__name__ +
".index(x): x is not in " + self.__class__.__name__)
def count(self, item):
"""Return the number of times 'item' occurs in this gapbuffer."""
result = 0
for self_item in self:
if self_item == item:
result += 1
return result
def append(self, item):
"""Append the 'item' to this gapbuffer."""
self.insert(len(self), item)
def extend(self, other):
"""
Append all the items from the other iterable onto the end of this
gapbuffer.
"""
# append the other iterable's items to the end of the existing raw buffer
with self as buf:
buf.extend(other)
def insert(self, index, item):
"""Insert an item at the given index."""
self[index:index] = [item]
def pop(self, index=None):
"""Remove the item at 'index' (default final item) and returns it."""
if len(self) == 0:
raise IndexError("pop from empty " + self.__class__.__name__)
# default index to the end of the buffer
index = len(self) - 1 if index is None else index
item = self[index]
del self[index]
return item
def remove(self, item):
"""Remove the first occurence of 'item' in this gapbuffer."""
del self[self.index(item)]
def reverse(self):
"""Reverse the items in this gapbuffer in-place."""
# only reverse if necessary
if len(self) > 1:
for i in xrange(len(self) / 2):
self[-(i + 1)], self[i] = self[i], self[-(i + 1)]
def debug_view(self): # pragma: no cover
"""
Get a debug view of the buffer's contents and internal values as a
unicode string.
"""
# write special values into gap
for i in xrange(self.__gap_start, self.__gap_end):
self.__buf[i] = u"_"
# write special values into blank area
for i in xrange(self.__content_end, len(self.__buf)):
self.__buf[i] = u"#"
# our desired display characters and their positions
chars = [
("s", self.__gap_start),
("e", self.__gap_end),
("$", self.__content_end)
]
# find the left-most value we'll be displaying
max_pos = max(map(lambda t: t[1], chars))
# track all the rows we'll need
rows = []
# add the first row
rows.append([None] * (max_pos + 1))
# insert all the characters into rows
for char, pos in chars:
# try all the rows in turn until an empty slot is found
for i in xrange(len(chars)):
# add more space if we need it
if len(rows) == i:
rows.append([None] * (max_pos + 1))
# fill the slot if it was empty, then move on
if rows[i][pos] is None:
rows[i][pos] = char
break
# build the final string
s = [
u"'" + unicode(self) + u"', " + unicode(len(self)),
self.__buf.tounicode()
]
for row in rows:
t = []
for c in row:
t.append(c if c is not None else u" ")
s.append(u"".join(t))
return u'\n'.join(s)
def __resize_buf(self, target_size, factor=(1.0 / 16)):
"""
Ensure that the buffer is at least as large as some target by repeatedly
increasing its size by some factor (default 1/16).
"""
# prevent decreasing or failure to increase buffer size
assert factor > 0
# increase the buffer size by our factor until it's long enough
item = gapbuffer.TYPE_CODES[self.typecode][0]
while len(self.__buf) < target_size:
extend_len = max(1, int((1.0 + factor) * (1 + len(self.__buf))))
self.__buf.extend(item for i in xrange(extend_len))
def __resize_gap(self, target_size):
"""Ensure that the gap is at least as large as some target."""
if self.__gap_len < target_size:
# calculate size increase of the gap, including the min gap size
gap_delta = target_size + self.__gap_size - self.__gap_len
# make room for the current content and the new gap
self.__resize_buf(len(self.__buf) + gap_delta)
# shift the right content down to make room for the new gap
for i in reversed(xrange(self.__gap_end, self.__content_end)):
self.__buf[i + gap_delta] = self.__buf[i]
# move the gap and content end pointers forward
self.__gap_end += gap_delta
self.__content_end += gap_delta
def __move_gap(self, index):
"""Move the gap to some index."""
# TODO: test corner cases (0-length gap, gap to ends, etc.)
# don't move the gap if it consists of the entire internal buffer
if len(self) == 0:
return
# normalize the index to a positive number if negative
index = len(self) + index if index < 0 else index
# make sure we're within virtual buffer bounds. the start of the
# gap is always the same as the virtual buffer index, so we must limit
# it to this since its end extends to the end of the actual buffer.
assert 0 <= index <= len(self)
# optimize for moving a zero-length gap (avoids needless copies)
if self.__gap_len == 0:
self.__gap_start = self.__gap_end = index
else:
# move the gap left as far as necessary
while self.__gap_start > index:
# slide the gap to the left
self.__gap_start -= 1
self.__gap_end -= 1
# copy the gap's former preceding character to the gap's old
# final slot.
self.__buf[self.__gap_end] = self.__buf[self.__gap_start]
# move the gap right as far as necessary
while self.__gap_start < index:
# copy the gap's following character to the gap's first slot.
self.__buf[self.__gap_start] = self.__buf[self.__gap_end]
# slide the gap to the right
self.__gap_start += 1
self.__gap_end += 1
def __str__(self):
"""Return the string representation of the buffer's contents."""
# NOTE: we do this separately from the unicode version to prevent weird
# str/unicode conversions.
# do more compact representations for string and unicode types
if self.typecode in ["u", "c"]:
return ''.join(c for c in self)
# turn all other types into a simple list
return repr([i for i in self])
def __unicode__(self):
"""Return the unicode representation of the buffer's contents."""
if self.typecode in ["u", "c"]:
return u''.join(c for c in self)
return unicode(repr([i for i in self]))
def __repr__(self):
# class name, typecode, and opening paren
s = unicode(self.__class__.__name__ + "(" + repr(self.typecode))
# add the content representation if there is any
if len(self) > 0:
s += u", "
# do more compact represenstations for string and unicode types
if self.typecode == "c":
s += repr(''.join(c for c in self))
elif self.typecode == "u":
s += repr(u''.join(c for c in self))
else:
# turn all other types into a simple list
s += repr([i for i in self])
# add close paren and return
return s + u")"
Fix iadd and imul
They now return self, a hidden requirement.
#!/usr/bin/env python
import array
import itertools
import re
class gapbuffer(object):
"""
Represents a sequence of identically-typed primitive items using a gap
buffer. Can be initialized with any iterable item, as long as the items in
the iterable are all of the same type. Uses an array.array internally to
store data.
"""
# type information for the different types our internal array can take on.
# used to initialize the internal array to some non-zero size and to get
# formal names for the type codes.
TYPE_CODES = {
"c": (str(' '), "character"),
"b": (0, "signed character"),
"B": (0, "unsigned character"),
"u": (unicode(' '), "unicode character"),
"h": (0, "signed short"),
"H": (0, "unsigned short"),
"i": (0, "signed int"),
"I": (0L, "unsigned int"),
"l": (0L, "signed long"),
"L": (0L, "unsigned long"),
"f": (0.0, "float"),
"d": (0.0, "double")
}
def __init__(self, typecode, initial_content=[], gap_size=10):
"""
TODO
"""
# minimum space to create for the new gap when resizing the current one
self.__gap_size = gap_size
# allocate the initial gap for the internal buffer. if the typecode is
# invalid, array.array throws a nice ValueError for us.
item = gapbuffer.TYPE_CODES[typecode][0]
self.__buf = array.array(typecode, (item for i in xrange(gap_size)))
# first space of the gap, initially always at the start of the buffer
self.__gap_start = 0
# first space after the final space in the gap, even if past the end of
# the internal buffer. since our buffer is (at the moment) all-gap, it
# starts as the length of the buffer.
self.__gap_end = len(self.__buf)
# add the initial content (defaults to an empty iterable)
try:
# add the initial content to the end of the buffer
self.__buf.extend(initial_content)
except TypeError:
# map array's TypeError to our own version of the same
raise TypeError(self.__class__.__name__ + " items must be of type "
+ gapbuffer.TYPE_CODES[typecode][1])
# the space immediately following the final item in the buffer,
# including space for the gap. i.e., if the gap is at the very end of
# the buffer, the content end is equivalent to the gap end.
self.__content_end = len(self.__buf)
@property
def typecode(self):
"""The read-only typecode of this gapbuffer."""
return self.__buf.typecode
@property
def __gap_len(self):
"""Get the length of the current gap."""
return self.__gap_end - self.__gap_start
def __len__(self):
"""Get the length of the buffer."""
return self.__content_end - self.__gap_len
def __compare(self, other):
"""
Does a lexicographic comparison with another other iterable, and returns
-1, 0, or 1 if the buffer is less than, equal to, or greater than the
other.
"""
# don't compare with things that have no length method since iterating
# over them might modify them if they're generators.
if not hasattr(other, "__len__"):
# we're always greater than non-iterable objects
return 1
# fill value guaranteed to be unique to this fun. call and inaccessible
fv = object()
for si, oi in itertools.izip_longest(self, other, fillvalue=fv):
# we're shorter than the other iterable and aren't different
if si is fv:
return -1
# the other is shorter than us and not different
if oi is fv:
return 1
# we're smaller than the other, or the other is larger
if oi > si:
return -1
elif oi < si:
return 1
# we're equal if none of the cases passed (same length, not different)
return 0
def __eq__(self, other):
"""Determine whether this is item-equivalent to another iterable."""
# optimize for iterables that the len() method works on
if hasattr(other, "__len__") and len(other) != len(self):
return False
return self.__compare(other) == 0
def __cmp__(self, other):
"""Lexicographically compares this with another iterable."""
return self.__compare(other)
def __contains__(self, value):
"""
Return True if the given item is contained in the buffer, False
otherwise.
"""
# substring test for character and unicode buffers
if (self.typecode in ["u", "c"] and isinstance(value, basestring)
and len(value) > 1):
# search the gap-less version of our underlying buffer
with self as buf:
# escape the given string and return whether a result was found
return re.search(re.escape(value), buf) is not None
elif (self.typecode in ["u", "c"] and
isinstance(value, basestring) and len(value) == 0):
# the empty string is a member of every string
return True
# general test for membership, including single-character string values
for item in self:
if value == item:
return True
return False
def __add__(self, other):
"""
Concatenate the other iterable to this one and return the result as a
new buffer.
"""
added = gapbuffer(self.typecode, self)
added.extend(other)
return added
def __iadd__(self, other):
"""Concatenate the other iterable to this one in-place."""
self.extend(other)
return self
def __mul__(self, n):
"""
Concatenate ourself to ourself some number of times and return the
result as a new buffer.
"""
multiplied = gapbuffer(self.typecode)
# don't concatenate if 0 or less was specified
if n > 0:
for i in xrange(n):
multiplied.extend(self)
return multiplied
def __imul__(self, n):
"""Concatenate ourself to ourself some number of times in-place."""
# clear the buffer if 0 or less was specified
if n <= 0:
del self[:]
else:
for i in xrange(n - 1):
self.extend(self)
return self
def __enforce_index(self, *indices):
"""Ensures the given indices are valid for the current buffer size."""
for index in indices:
if index >= len(self) or index < -len(self):
raise IndexError(self.__class__.__name__ + " index out of range")
def __getitem__(self, x):
"""Get the item or slice at the given index."""
# handle slicing with a 'step' (normal format is handled by __getslice__)
if isinstance(x, slice):
return self.__get_slice(x)
return self.__get_index(x)
def __get_index(self, i):
"""Get the item at some index."""
# constrain index bounds
self.__enforce_index(i)
# if before the gap, access buffer directly, else account for gap
index = i if i < self.__gap_start else i + self.__gap_len
return self.__buf[index]
def __get_slice(self, s):
"""Get the sequence at the given slice."""
# unpack 'indices()' into xrange as a generator for our items
return gapbuffer(self.typecode,
(self[i] for i in xrange(*s.indices(len(self)))))
def __setitem__(self, x, value):
"""Set an index or slice to some value."""
if isinstance(x, slice):
return self.__set_slice(x, value)
return self.__set_index(x, value)
def __set_index(self, i, value):
"""Set the item at some index."""
self.__enforce_index(i)
index = i if i < self.__gap_start else i + self.__gap_len
self.__buf[index] = value
def __set_slice(self, s, value):
"""Set the slice at some index."""
# get the length of the value sequence, consuming it if necessary.
values = value
if not hasattr(value, "__len__"):
values = [v for v in value]
# normalize slice indices
start, stop, step = s.indices(len(self))
# handle extended slices, which are the same size as what they replace
if step != 1:
# get our range
xr = xrange(start, stop, step)
# enforce range size for extended slices
if len(values) != len(xr):
raise ValueError("attempt to assign sequence of size " +
str(len(values)) + " to extended slice of size " +
str(len(xr)))
# set the indices in the range to their new values
for i, v in itertools.izip(xr, values):
self[i] = v
else:
# move the gap to the start of the slice
self.__move_gap(start)
# resize the gap to contain the new values, then delete the old ones
self.__resize_gap(len(values))
self.__gap_end += stop - start
# replace old values with the new ones
for v in values:
# add the next value and bump up the gap pointer as we go
self.__buf[self.__gap_start] = v
self.__gap_start += 1
def __delitem__(self, x):
"""Delete some index or slice."""
if isinstance(x, slice):
return self.__del_slice(x)
return self.__del_index(x)
def __del_index(self, i):
"""Delete the item at some index."""
self.__enforce_index(i)
# move the gap to the given index
self.__move_gap(i)
# 'delete' the index by causing the gap to consume the index
self.__gap_end += 1
def __del_slice(self, s):
"""Delete some slice."""
# get the range we'll be manipulating
start, stop, step = s.indices(len(self))
xr = xrange(start, stop, step)
# handle extended slices
if step != 1:
# delete every item in the slice range
for count, index in enumerate(xr):
# account for already deleted indices
del self[index - count]
else:
# don't do anything if there was no gap given
if len(xr) > 0:
# move the gap to the start and expand to cover the range
self.__move_gap(start)
self.__gap_end += len(xr)
def __enter__(self):
"""
Return the raw array.array underlying the buffer, sans gap. This allows
for easy manipulation of the underlying data structure without worrying
about breaking state in the buffer at large.
"""
# move the gap to the end of the buffer
self.__move_gap(len(self))
# remove the gap. this should just be a pointer update in the C code.
del self.__buf[self.__gap_start:]
# give the context the raw buffer
return self.__buf
def __exit__(self, exception_type, exception_value, traceback):
"""Replace the gap when context exits, ignoring any errors."""
# add a new gap at the end of the buffer
item = gapbuffer.TYPE_CODES[self.typecode][0]
self.__buf.extend(item for i in xrange(self.__gap_size))
# account for any size change in the buffer
self.__content_end = len(self.__buf)
# move the gap pointers to point at the new gap
self.__gap_start = self.__content_end - self.__gap_size
self.__gap_end = self.__content_end
def index(self, item, start=0, end=None):
"""
Return the index of the first occurence of 'item' in this gapbuffer from
the slice between the optional start (default 0) and end (default end of
buffer) values.
"""
# set a default for the end
end = end or len(self)
# only check if we have an increasing, non-empty range
if start != end:
# return the index for the first matching item
for i in xrange(*slice(start, end).indices(len(self))):
if self[i] == item:
return i
# signal failure if we couldn't find anything
raise ValueError(self.__class__.__name__ +
".index(x): x is not in " + self.__class__.__name__)
def count(self, item):
"""Return the number of times 'item' occurs in this gapbuffer."""
result = 0
for self_item in self:
if self_item == item:
result += 1
return result
def append(self, item):
"""Append the 'item' to this gapbuffer."""
self.insert(len(self), item)
def extend(self, other):
"""
Append all the items from the other iterable onto the end of this
gapbuffer.
"""
# append the other iterable's items to the end of the existing raw buffer
with self as buf:
buf.extend(other)
def insert(self, index, item):
"""Insert an item at the given index."""
self[index:index] = [item]
def pop(self, index=None):
"""Remove the item at 'index' (default final item) and returns it."""
if len(self) == 0:
raise IndexError("pop from empty " + self.__class__.__name__)
# default index to the end of the buffer
index = len(self) - 1 if index is None else index
item = self[index]
del self[index]
return item
def remove(self, item):
"""Remove the first occurence of 'item' in this gapbuffer."""
del self[self.index(item)]
def reverse(self):
"""Reverse the items in this gapbuffer in-place."""
# only reverse if necessary
if len(self) > 1:
for i in xrange(len(self) / 2):
self[-(i + 1)], self[i] = self[i], self[-(i + 1)]
def debug_view(self): # pragma: no cover
"""
Get a debug view of the buffer's contents and internal values as a
unicode string.
"""
# write special values into gap
for i in xrange(self.__gap_start, self.__gap_end):
self.__buf[i] = u"_"
# write special values into blank area
for i in xrange(self.__content_end, len(self.__buf)):
self.__buf[i] = u"#"
# our desired display characters and their positions
chars = [
("s", self.__gap_start),
("e", self.__gap_end),
("$", self.__content_end)
]
# find the left-most value we'll be displaying
max_pos = max(map(lambda t: t[1], chars))
# track all the rows we'll need
rows = []
# add the first row
rows.append([None] * (max_pos + 1))
# insert all the characters into rows
for char, pos in chars:
# try all the rows in turn until an empty slot is found
for i in xrange(len(chars)):
# add more space if we need it
if len(rows) == i:
rows.append([None] * (max_pos + 1))
# fill the slot if it was empty, then move on
if rows[i][pos] is None:
rows[i][pos] = char
break
# build the final string
s = [
u"'" + unicode(self) + u"', " + unicode(len(self)),
self.__buf.tounicode()
]
for row in rows:
t = []
for c in row:
t.append(c if c is not None else u" ")
s.append(u"".join(t))
return u'\n'.join(s)
def __resize_buf(self, target_size, factor=(1.0 / 16)):
"""
Ensure that the buffer is at least as large as some target by repeatedly
increasing its size by some factor (default 1/16).
"""
# prevent decreasing or failure to increase buffer size
assert factor > 0
# increase the buffer size by our factor until it's long enough
item = gapbuffer.TYPE_CODES[self.typecode][0]
while len(self.__buf) < target_size:
extend_len = max(1, int((1.0 + factor) * (1 + len(self.__buf))))
self.__buf.extend(item for i in xrange(extend_len))
def __resize_gap(self, target_size):
"""Ensure that the gap is at least as large as some target."""
if self.__gap_len < target_size:
# calculate size increase of the gap, including the min gap size
gap_delta = target_size + self.__gap_size - self.__gap_len
# make room for the current content and the new gap
self.__resize_buf(len(self.__buf) + gap_delta)
# shift the right content down to make room for the new gap
for i in reversed(xrange(self.__gap_end, self.__content_end)):
self.__buf[i + gap_delta] = self.__buf[i]
# move the gap and content end pointers forward
self.__gap_end += gap_delta
self.__content_end += gap_delta
def __move_gap(self, index):
"""Move the gap to some index."""
# TODO: test corner cases (0-length gap, gap to ends, etc.)
# don't move the gap if it consists of the entire internal buffer
if len(self) == 0:
return
# normalize the index to a positive number if negative
index = len(self) + index if index < 0 else index
# make sure we're within virtual buffer bounds. the start of the
# gap is always the same as the virtual buffer index, so we must limit
# it to this since its end extends to the end of the actual buffer.
assert 0 <= index <= len(self)
# optimize for moving a zero-length gap (avoids needless copies)
if self.__gap_len == 0:
self.__gap_start = self.__gap_end = index
else:
# move the gap left as far as necessary
while self.__gap_start > index:
# slide the gap to the left
self.__gap_start -= 1
self.__gap_end -= 1
# copy the gap's former preceding character to the gap's old
# final slot.
self.__buf[self.__gap_end] = self.__buf[self.__gap_start]
# move the gap right as far as necessary
while self.__gap_start < index:
# copy the gap's following character to the gap's first slot.
self.__buf[self.__gap_start] = self.__buf[self.__gap_end]
# slide the gap to the right
self.__gap_start += 1
self.__gap_end += 1
def __str__(self):
"""Return the string representation of the buffer's contents."""
# NOTE: we do this separately from the unicode version to prevent weird
# str/unicode conversions.
# do more compact representations for string and unicode types
if self.typecode in ["u", "c"]:
return ''.join(c for c in self)
# turn all other types into a simple list
return repr([i for i in self])
def __unicode__(self):
"""Return the unicode representation of the buffer's contents."""
if self.typecode in ["u", "c"]:
return u''.join(c for c in self)
return unicode(repr([i for i in self]))
def __repr__(self):
# class name, typecode, and opening paren
s = unicode(self.__class__.__name__ + "(" + repr(self.typecode))
# add the content representation if there is any
if len(self) > 0:
s += u", "
# do more compact represenstations for string and unicode types
if self.typecode == "c":
s += repr(''.join(c for c in self))
elif self.typecode == "u":
s += repr(u''.join(c for c in self))
else:
# turn all other types into a simple list
s += repr([i for i in self])
# add close paren and return
return s + u")"
|
from __future__ import print_function, division, absolute_import
# from .pio import PIO
from .distributed import DistributedPass
from .hiframes import HiFrames
from .hiframes_typed import HiFramesTyped
import numba
import numba.compiler
from numba import ir_utils, ir, postproc
from numba.targets.registry import CPUDispatcher
from numba.ir_utils import guard, get_definition
from numba.inline_closurecall import inline_closure_call, InlineClosureCallPass
from hpat import config
if config._has_h5py:
from hpat import pio
# this is for previous version of pipeline manipulation (numba hpat_req <0.38)
# def stage_io_pass(pipeline):
# """
# Convert IO calls
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# if config._has_h5py:
# io_pass = pio.PIO(pipeline.func_ir, pipeline.locals)
# io_pass.run()
#
#
# def stage_distributed_pass(pipeline):
# """
# parallelize for distributed-memory
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# dist_pass = DistributedPass(pipeline.func_ir, pipeline.typingctx,
# pipeline.type_annotation.typemap, pipeline.type_annotation.calltypes)
# dist_pass.run()
#
#
# def stage_df_pass(pipeline):
# """
# Convert DataFrame calls
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# df_pass = HiFrames(pipeline.func_ir, pipeline.typingctx,
# pipeline.args, pipeline.locals)
# df_pass.run()
#
#
# def stage_df_typed_pass(pipeline):
# """
# Convert HiFrames after typing
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# df_pass = HiFramesTyped(pipeline.func_ir, pipeline.typingctx,
# pipeline.type_annotation.typemap, pipeline.type_annotation.calltypes)
# df_pass.run()
#
#
# def stage_inline_pass(pipeline):
# """
# Inline function calls (to enable distributed pass analysis)
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# inline_calls(pipeline.func_ir)
#
#
# def stage_repeat_inline_closure(pipeline):
# assert pipeline.func_ir
# inline_pass = InlineClosureCallPass(
# pipeline.func_ir, pipeline.flags.auto_parallel)
# inline_pass.run()
# post_proc = postproc.PostProcessor(pipeline.func_ir)
# post_proc.run()
#
#
# def add_hpat_stages(pipeline_manager, pipeline):
# pp = pipeline_manager.pipeline_stages['nopython']
# new_pp = []
# for (func, desc) in pp:
# if desc == 'nopython frontend':
# # before type inference: add inline calls pass,
# # untyped hiframes pass, hdf5 io
# # also repeat inline closure pass to inline df stencils
# new_pp.append(
# (lambda: stage_inline_pass(pipeline), "inline funcs"))
# new_pp.append((lambda: stage_df_pass(
# pipeline), "convert DataFrames"))
# new_pp.append((lambda: stage_io_pass(
# pipeline), "replace IO calls"))
# new_pp.append((lambda: stage_repeat_inline_closure(
# pipeline), "repeat inline closure"))
# # need to handle string array exprs before nopython rewrites converts
# # them to arrayexpr.
# # since generic_rewrites has the same description, we check func name
# if desc == 'nopython rewrites' and 'generic_rewrites' not in str(func):
# new_pp.append((lambda: stage_df_typed_pass(
# pipeline), "typed hiframes pass"))
# if desc == 'nopython mode backend':
# # distributed pass after parfor pass and before lowering
# new_pp.append((lambda: stage_distributed_pass(
# pipeline), "convert to distributed"))
# new_pp.append((func, desc))
# pipeline_manager.pipeline_stages['nopython'] = new_pp
def inline_calls(func_ir):
work_list = list(func_ir.blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == 'call':
func_def = guard(get_definition, func_ir, expr.func)
if isinstance(func_def, ir.Global) and isinstance(func_def.value, CPUDispatcher):
py_func = func_def.value.py_func
new_blocks = inline_closure_call(func_ir,
func_ir.func_id.func.__globals__,
block, i, py_func, work_list=work_list)
# for block in new_blocks:
# work_list.append(block)
# current block is modified, skip the rest
# (included in new blocks)
break
class HPATPipeline(numba.compiler.BasePipeline):
"""HPAT compiler pipeline
"""
def define_pipelines(self, pm):
name = 'hpat'
pm.create_pipeline(name)
self.add_preprocessing_stage(pm)
self.add_pre_typing_stage(pm)
pm.add_stage(self.stage_inline_pass, "inline funcs")
pm.add_stage(self.stage_df_pass, "convert DataFrames")
pm.add_stage(self.stage_io_pass, "replace IO calls")
# repeat inline closure pass to inline df stencils
pm.add_stage(self.stage_repeat_inline_closure, "repeat inline closure")
self.add_typing_stage(pm)
# breakup optimization stage since df_typed needs to run before
# rewrites
# e.g. need to handle string array exprs before nopython rewrites
# converts them to arrayexpr.
# self.add_optimization_stage(pm)
pm.add_stage(self.stage_pre_parfor_pass, "Preprocessing for parfors")
pm.add_stage(self.stage_df_typed_pass, "typed hiframes pass")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_nopython_rewrites, "nopython rewrites")
if self.flags.auto_parallel.enabled:
pm.add_stage(self.stage_parfor_pass, "convert to parfors")
pm.add_stage(self.stage_distributed_pass, "convert to distributed")
self.add_lowering_stage(pm)
self.add_cleanup_stage(pm)
def stage_inline_pass(self):
"""
Inline function calls (to enable distributed pass analysis)
"""
# Ensure we have an IR and type information.
assert self.func_ir
inline_calls(self.func_ir)
def stage_df_pass(self):
"""
Convert DataFrame calls
"""
# Ensure we have an IR and type information.
assert self.func_ir
df_pass = HiFrames(self.func_ir, self.typingctx,
self.args, self.locals)
df_pass.run()
def stage_io_pass(self):
"""
Convert IO calls
"""
# Ensure we have an IR and type information.
assert self.func_ir
if config._has_h5py:
io_pass = pio.PIO(self.func_ir, self.locals)
io_pass.run()
def stage_repeat_inline_closure(self):
assert self.func_ir
inline_pass = InlineClosureCallPass(
self.func_ir, self.flags.auto_parallel)
inline_pass.run()
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
def stage_distributed_pass(self):
"""
parallelize for distributed-memory
"""
# Ensure we have an IR and type information.
assert self.func_ir
dist_pass = DistributedPass(self.func_ir, self.typingctx, self.targetctx,
self.type_annotation.typemap, self.type_annotation.calltypes)
dist_pass.run()
def stage_df_typed_pass(self):
"""
Convert HiFrames after typing
"""
# Ensure we have an IR and type information.
assert self.func_ir
df_pass = HiFramesTyped(self.func_ir, self.typingctx,
self.type_annotation.typemap,
self.type_annotation.calltypes,
self.return_type)
ret_typ = df_pass.run()
# XXX update return type since it can be Series and trigger box_array
# for string array etc.
if ret_typ is not None:
self.return_type = ret_typ
move hiframes_typed pass before pre_parfor_pass
from __future__ import print_function, division, absolute_import
# from .pio import PIO
from .distributed import DistributedPass
from .hiframes import HiFrames
from .hiframes_typed import HiFramesTyped
import numba
import numba.compiler
from numba import ir_utils, ir, postproc
from numba.targets.registry import CPUDispatcher
from numba.ir_utils import guard, get_definition
from numba.inline_closurecall import inline_closure_call, InlineClosureCallPass
from hpat import config
if config._has_h5py:
from hpat import pio
# this is for previous version of pipeline manipulation (numba hpat_req <0.38)
# def stage_io_pass(pipeline):
# """
# Convert IO calls
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# if config._has_h5py:
# io_pass = pio.PIO(pipeline.func_ir, pipeline.locals)
# io_pass.run()
#
#
# def stage_distributed_pass(pipeline):
# """
# parallelize for distributed-memory
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# dist_pass = DistributedPass(pipeline.func_ir, pipeline.typingctx,
# pipeline.type_annotation.typemap, pipeline.type_annotation.calltypes)
# dist_pass.run()
#
#
# def stage_df_pass(pipeline):
# """
# Convert DataFrame calls
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# df_pass = HiFrames(pipeline.func_ir, pipeline.typingctx,
# pipeline.args, pipeline.locals)
# df_pass.run()
#
#
# def stage_df_typed_pass(pipeline):
# """
# Convert HiFrames after typing
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# df_pass = HiFramesTyped(pipeline.func_ir, pipeline.typingctx,
# pipeline.type_annotation.typemap, pipeline.type_annotation.calltypes)
# df_pass.run()
#
#
# def stage_inline_pass(pipeline):
# """
# Inline function calls (to enable distributed pass analysis)
# """
# # Ensure we have an IR and type information.
# assert pipeline.func_ir
# inline_calls(pipeline.func_ir)
#
#
# def stage_repeat_inline_closure(pipeline):
# assert pipeline.func_ir
# inline_pass = InlineClosureCallPass(
# pipeline.func_ir, pipeline.flags.auto_parallel)
# inline_pass.run()
# post_proc = postproc.PostProcessor(pipeline.func_ir)
# post_proc.run()
#
#
# def add_hpat_stages(pipeline_manager, pipeline):
# pp = pipeline_manager.pipeline_stages['nopython']
# new_pp = []
# for (func, desc) in pp:
# if desc == 'nopython frontend':
# # before type inference: add inline calls pass,
# # untyped hiframes pass, hdf5 io
# # also repeat inline closure pass to inline df stencils
# new_pp.append(
# (lambda: stage_inline_pass(pipeline), "inline funcs"))
# new_pp.append((lambda: stage_df_pass(
# pipeline), "convert DataFrames"))
# new_pp.append((lambda: stage_io_pass(
# pipeline), "replace IO calls"))
# new_pp.append((lambda: stage_repeat_inline_closure(
# pipeline), "repeat inline closure"))
# # need to handle string array exprs before nopython rewrites converts
# # them to arrayexpr.
# # since generic_rewrites has the same description, we check func name
# if desc == 'nopython rewrites' and 'generic_rewrites' not in str(func):
# new_pp.append((lambda: stage_df_typed_pass(
# pipeline), "typed hiframes pass"))
# if desc == 'nopython mode backend':
# # distributed pass after parfor pass and before lowering
# new_pp.append((lambda: stage_distributed_pass(
# pipeline), "convert to distributed"))
# new_pp.append((func, desc))
# pipeline_manager.pipeline_stages['nopython'] = new_pp
def inline_calls(func_ir):
work_list = list(func_ir.blocks.items())
while work_list:
label, block = work_list.pop()
for i, instr in enumerate(block.body):
if isinstance(instr, ir.Assign):
lhs = instr.target
expr = instr.value
if isinstance(expr, ir.Expr) and expr.op == 'call':
func_def = guard(get_definition, func_ir, expr.func)
if isinstance(func_def, ir.Global) and isinstance(func_def.value, CPUDispatcher):
py_func = func_def.value.py_func
new_blocks = inline_closure_call(func_ir,
func_ir.func_id.func.__globals__,
block, i, py_func, work_list=work_list)
# for block in new_blocks:
# work_list.append(block)
# current block is modified, skip the rest
# (included in new blocks)
break
class HPATPipeline(numba.compiler.BasePipeline):
"""HPAT compiler pipeline
"""
def define_pipelines(self, pm):
name = 'hpat'
pm.create_pipeline(name)
self.add_preprocessing_stage(pm)
self.add_pre_typing_stage(pm)
pm.add_stage(self.stage_inline_pass, "inline funcs")
pm.add_stage(self.stage_df_pass, "convert DataFrames")
pm.add_stage(self.stage_io_pass, "replace IO calls")
# repeat inline closure pass to inline df stencils
pm.add_stage(self.stage_repeat_inline_closure, "repeat inline closure")
self.add_typing_stage(pm)
# breakup optimization stage since df_typed needs to run before
# rewrites
# e.g. need to handle string array exprs before nopython rewrites
# converts them to arrayexpr.
# self.add_optimization_stage(pm)
# hiframes typed pass should be before pre_parfor since variable types
# need updating, and A.call to np.call transformation is invalid for
# Series (e.g. S.var is not the same as np.var(S))
pm.add_stage(self.stage_df_typed_pass, "typed hiframes pass")
pm.add_stage(self.stage_pre_parfor_pass, "Preprocessing for parfors")
if not self.flags.no_rewrites:
pm.add_stage(self.stage_nopython_rewrites, "nopython rewrites")
if self.flags.auto_parallel.enabled:
pm.add_stage(self.stage_parfor_pass, "convert to parfors")
pm.add_stage(self.stage_distributed_pass, "convert to distributed")
self.add_lowering_stage(pm)
self.add_cleanup_stage(pm)
def stage_inline_pass(self):
"""
Inline function calls (to enable distributed pass analysis)
"""
# Ensure we have an IR and type information.
assert self.func_ir
inline_calls(self.func_ir)
def stage_df_pass(self):
"""
Convert DataFrame calls
"""
# Ensure we have an IR and type information.
assert self.func_ir
df_pass = HiFrames(self.func_ir, self.typingctx,
self.args, self.locals)
df_pass.run()
def stage_io_pass(self):
"""
Convert IO calls
"""
# Ensure we have an IR and type information.
assert self.func_ir
if config._has_h5py:
io_pass = pio.PIO(self.func_ir, self.locals)
io_pass.run()
def stage_repeat_inline_closure(self):
assert self.func_ir
inline_pass = InlineClosureCallPass(
self.func_ir, self.flags.auto_parallel)
inline_pass.run()
post_proc = postproc.PostProcessor(self.func_ir)
post_proc.run()
def stage_distributed_pass(self):
"""
parallelize for distributed-memory
"""
# Ensure we have an IR and type information.
assert self.func_ir
dist_pass = DistributedPass(self.func_ir, self.typingctx, self.targetctx,
self.type_annotation.typemap, self.type_annotation.calltypes)
dist_pass.run()
def stage_df_typed_pass(self):
"""
Convert HiFrames after typing
"""
# Ensure we have an IR and type information.
assert self.func_ir
df_pass = HiFramesTyped(self.func_ir, self.typingctx,
self.type_annotation.typemap,
self.type_annotation.calltypes,
self.return_type)
ret_typ = df_pass.run()
# XXX update return type since it can be Series and trigger box_array
# for string array etc.
if ret_typ is not None:
self.return_type = ret_typ
|
#!/usr/bin/env python
"""Top-level module for JAMS"""
# Import the necessary modules
from .exceptions import *
from . import util
from . import schema
from . import eval
from .version import version as __version__
from .core import *
# Populate the namespace mapping
from pkg_resources import resource_filename
for _ in util.find_with_extension(resource_filename(__name__, schema.NS_SCHEMA_DIR),
'json'):
schema.add_namespace(_)
added environment variable for schema extensions
#!/usr/bin/env python
"""Top-level module for JAMS"""
# Import the necessary modules
from .exceptions import *
from . import util
from . import schema
from . import eval
from .version import version as __version__
from .core import *
# Populate the namespace mapping
from pkg_resources import resource_filename
for _ in util.find_with_extension(resource_filename(__name__, schema.NS_SCHEMA_DIR),
'json'):
schema.add_namespace(_)
# Populate local namespaces
import os
try:
for _ in util.find_with_extension(os.environ['JAMS_SCHEMA_DIR'], 'json'):
schema.add_namespace(_)
except KeyError:
pass
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from cStringIO import StringIO
import gevent.monkey
import requests
import csv
import ast
import datetime
from bs4 import BeautifulSoup
from django.core.management.base import BaseCommand
from gevent.pool import Pool
from measuring_stations.models import IndicatedValue, MeasuringPoint
class Command(BaseCommand):
args = '<period {24H|48H|7D|1M|3M|6M|1Y}>'
help = 'Fetches data for the given period from www.umwelt.sachsen.de'
URL = "http://www.umwelt.sachsen.de/umwelt/infosysteme/luftonline/Recherche.aspx"
STATION_KEY = "ctl00$Inhalt$StationList"
SCHADSTOFF_KEY = "ctl00$Inhalt$SchadstoffList"
MITTELWERT_KEY = "ctl00$Inhalt$MwttList"
ZEITRAUM_KEY = "ctl00$Inhalt$LetzteList"
VALIDATION_KEY = "__EVENTVALIDATION"
TARGET_KEY = "__EVENTTARGET"
VIEWSTATE_KEY = "__VIEWSTATE"
BUTTON_KEY = "ctl00$Inhalt$BtnCsvDown"
BUTTON_VALUE = "CSV-Download"
STATIONEN = {
#"Leipzig-Luetzner Straße": "224",
"Leipzig-Mitte": "211",
"Leipzig-Thekla": "214",
"Leipzig-West": "213"
}
SCHADSTOFFE = {
"BEN": "161;1",
"NO": "121;0",
"NO2": "122;0",
"O3": "23;0",
"PM10": "224;0",
"PM25": "109;2",
"SO2": "22;1"
}
INV_SCHADSTOFFE = {
"BEN": "161;1",
"NO": "121;0",
"NO2": "122;0",
"O3": "23;0",
"PM10": "224;0",
"PM2.5": "109;2",
"SO2": "22;1"
}
MITTELWERT = {
"STUNDEN": "45; 3600",
"TAGE": "21; 86400"
}
ZEITRAUM = {
"24H": "1",
"48H": "2",
"7D": "3",
"1M": "4",
"3M": "5",
"6M": "6",
"1Y": "7"
}
STATION_SCHADSTOFF_MAP = {
"224": ["NO", "NO2", "PM10", "PM25"],
"211": ["BEN", "NO", "NO2", "O3", "PM10", "PM25", "SO2"],
"214": ["O3"],
"213": ["BEN", "NO", "NO2", "O3", "PM10", "PM25", "SO2"]
}
MITTELWERT_SCHADSTOFF_MAP = {
"161;1": "STUNDEN",
"121;0": "STUNDEN",
"122;0": "STUNDEN",
"23;0": "STUNDEN",
"224;0": "TAGE",
"109;2": "TAGE",
"22;1": "STUNDEN"
}
headers = {}
def handle(self, *args, **options):
if len(args) < 1 or not(args[0] in self.ZEITRAUM.keys()):
self.stdout.write("Usage: manage.py fetch {24H|48H|7D|1M|3M|6M|1Y}")
sys.exit(0)
gevent.monkey.patch_socket()
stationPool = Pool(len(self.STATIONEN))
params = {}
self.s = requests.Session()
self.inv_stations = self.invert_dict(self.STATIONEN)
self.inv_schadstoff = self.invert_dict(self.INV_SCHADSTOFFE)
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
params[self.TARGET_KEY] = self.STATION_KEY
for station in self.STATIONEN.keys():
tmp = dict(params)
tmp[self.STATION_KEY] = self.STATIONEN[station]
stationPool.spawn(self.fetchStation, tmp, args[0])
stationPool.join()
def fetchStation(self, params, period):
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
schadstoffList = self.STATION_SCHADSTOFF_MAP[params[self.STATION_KEY]]
schadstoffPool = Pool(len(schadstoffList))
for schadstoff in schadstoffList:
tmp = dict(params)
tmp[self.SCHADSTOFF_KEY] = self.SCHADSTOFFE[schadstoff]
tmp[self.TARGET_KEY] = self.SCHADSTOFF_KEY
schadstoffPool.spawn(self.fetchSchadstoff, tmp, period)
schadstoffPool.join()
def fetchSchadstoff(self, params, period):
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
params[self.MITTELWERT_KEY] = self.MITTELWERT[
self.MITTELWERT_SCHADSTOFF_MAP[params[self.SCHADSTOFF_KEY]]]
params[self.ZEITRAUM_KEY] = 0
params[self.TARGET_KEY] = self.MITTELWERT_KEY
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
params[self.ZEITRAUM_KEY] = self.ZEITRAUM[period]
params[self.TARGET_KEY] = self.ZEITRAUM_KEY
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.BUTTON_KEY] = self.BUTTON_VALUE
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
del params[self.TARGET_KEY]
response = self.s.post(self.URL, params, headers=self.headers)
if response.status_code == 200:
f = StringIO(response.content)
reader = csv.DictReader(f, delimiter=';')
stationName = self.inv_stations[params[self.STATION_KEY]]
station = MeasuringPoint.objects.get(name=stationName)
unit = self.inv_schadstoff[params[self.SCHADSTOFF_KEY]]
for row in reader:
dateRow = row['Datum Zeit']
if len(dateRow) > 0:
date = self.try_parsing_date(dateRow)
value = row[' ' + stationName + ' ' + unit].strip()
if value.find(',') > -1:
value = float(value.replace(",","."))
if (isinstance(value, float) or (value.find('g/m') == -1 and value.find('n. def.') == -1)):
IndicatedValue.objects.create(unit=unit,
date_created=date,
measuring_point=station,
value=value+0.0)
f.close
def invert_dict(self, d):
return dict([(v, k) for k, v in d.iteritems()])
def try_parsing_date(self, text):
for fmt in ('%d-%m-%y %H:%M', '%d-%m-%y'):
try:
return datetime.datetime.strptime(text, fmt)
except ValueError:
pass
raise ValueError('no valid date format found')
fix value parsing
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from cStringIO import StringIO
import gevent.monkey
import requests
import csv
import ast
import datetime
from bs4 import BeautifulSoup
from django.core.management.base import BaseCommand
from gevent.pool import Pool
from measuring_stations.models import IndicatedValue, MeasuringPoint
class Command(BaseCommand):
args = '<period {24H|48H|7D|1M|3M|6M|1Y}>'
help = 'Fetches data for the given period from www.umwelt.sachsen.de'
URL = "http://www.umwelt.sachsen.de/umwelt/infosysteme/luftonline/Recherche.aspx"
STATION_KEY = "ctl00$Inhalt$StationList"
SCHADSTOFF_KEY = "ctl00$Inhalt$SchadstoffList"
MITTELWERT_KEY = "ctl00$Inhalt$MwttList"
ZEITRAUM_KEY = "ctl00$Inhalt$LetzteList"
VALIDATION_KEY = "__EVENTVALIDATION"
TARGET_KEY = "__EVENTTARGET"
VIEWSTATE_KEY = "__VIEWSTATE"
BUTTON_KEY = "ctl00$Inhalt$BtnCsvDown"
BUTTON_VALUE = "CSV-Download"
STATIONEN = {
#"Leipzig-Luetzner Straße": "224",
"Leipzig-Mitte": "211",
"Leipzig-Thekla": "214",
"Leipzig-West": "213"
}
SCHADSTOFFE = {
"BEN": "161;1",
"NO": "121;0",
"NO2": "122;0",
"O3": "23;0",
"PM10": "224;0",
"PM25": "109;2",
"SO2": "22;1"
}
INV_SCHADSTOFFE = {
"BEN": "161;1",
"NO": "121;0",
"NO2": "122;0",
"O3": "23;0",
"PM10": "224;0",
"PM2.5": "109;2",
"SO2": "22;1"
}
MITTELWERT = {
"STUNDEN": "45; 3600",
"TAGE": "21; 86400"
}
ZEITRAUM = {
"24H": "1",
"48H": "2",
"7D": "3",
"1M": "4",
"3M": "5",
"6M": "6",
"1Y": "7"
}
STATION_SCHADSTOFF_MAP = {
"224": ["NO", "NO2", "PM10", "PM25"],
"211": ["BEN", "NO", "NO2", "O3", "PM10", "PM25", "SO2"],
"214": ["O3"],
"213": ["BEN", "NO", "NO2", "O3", "PM10", "PM25", "SO2"]
}
MITTELWERT_SCHADSTOFF_MAP = {
"161;1": "STUNDEN",
"121;0": "STUNDEN",
"122;0": "STUNDEN",
"23;0": "STUNDEN",
"224;0": "TAGE",
"109;2": "TAGE",
"22;1": "STUNDEN"
}
headers = {}
def handle(self, *args, **options):
if len(args) < 1 or not(args[0] in self.ZEITRAUM.keys()):
self.stdout.write("Usage: manage.py fetch {24H|48H|7D|1M|3M|6M|1Y}")
sys.exit(0)
gevent.monkey.patch_socket()
stationPool = Pool(len(self.STATIONEN))
params = {}
self.s = requests.Session()
self.inv_stations = self.invert_dict(self.STATIONEN)
self.inv_schadstoff = self.invert_dict(self.INV_SCHADSTOFFE)
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
params[self.TARGET_KEY] = self.STATION_KEY
for station in self.STATIONEN.keys():
tmp = dict(params)
tmp[self.STATION_KEY] = self.STATIONEN[station]
stationPool.spawn(self.fetchStation, tmp, args[0])
stationPool.join()
def fetchStation(self, params, period):
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
schadstoffList = self.STATION_SCHADSTOFF_MAP[params[self.STATION_KEY]]
schadstoffPool = Pool(len(schadstoffList))
for schadstoff in schadstoffList:
tmp = dict(params)
tmp[self.SCHADSTOFF_KEY] = self.SCHADSTOFFE[schadstoff]
tmp[self.TARGET_KEY] = self.SCHADSTOFF_KEY
schadstoffPool.spawn(self.fetchSchadstoff, tmp, period)
schadstoffPool.join()
def fetchSchadstoff(self, params, period):
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
params[self.MITTELWERT_KEY] = self.MITTELWERT[
self.MITTELWERT_SCHADSTOFF_MAP[params[self.SCHADSTOFF_KEY]]]
params[self.ZEITRAUM_KEY] = 0
params[self.TARGET_KEY] = self.MITTELWERT_KEY
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
params[self.ZEITRAUM_KEY] = self.ZEITRAUM[period]
params[self.TARGET_KEY] = self.ZEITRAUM_KEY
response = self.s.post(self.URL, params, headers=self.headers)
soup = BeautifulSoup(response.text)
params[self.BUTTON_KEY] = self.BUTTON_VALUE
params[self.VALIDATION_KEY] = soup.find_all(id=self.VALIDATION_KEY)[0]['value']
params[self.VIEWSTATE_KEY] = soup.find_all(id=self.VIEWSTATE_KEY)[0]['value']
del params[self.TARGET_KEY]
response = self.s.post(self.URL, params, headers=self.headers)
if response.status_code == 200:
f = StringIO(response.content)
reader = csv.DictReader(f, delimiter=';')
stationName = self.inv_stations[params[self.STATION_KEY]]
station = MeasuringPoint.objects.get(name=stationName)
unit = self.inv_schadstoff[params[self.SCHADSTOFF_KEY]]
for row in reader:
dateRow = row['Datum Zeit']
if len(dateRow) > 0:
date = self.try_parsing_date(dateRow)
value = row[' ' + stationName + ' ' + unit].strip()
if value.find(',') > -1:
value = float(value.replace(",","."))
if (isinstance(value, float) or (value.find('g/m') == -1 and value.find('n. def.') == -1)):
value = float(value)
IndicatedValue.objects.create(unit=unit,
date_created=date,
measuring_point=station,
value=value)
f.close
def invert_dict(self, d):
return dict([(v, k) for k, v in d.iteritems()])
def try_parsing_date(self, text):
for fmt in ('%d-%m-%y %H:%M', '%d-%m-%y'):
try:
return datetime.datetime.strptime(text, fmt)
except ValueError:
pass
raise ValueError('no valid date format found') |
# Copyright (c) 2008-2009 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Advanced coroutine control.
This module provides means to spawn, kill and link coroutines. Linking means
subscribing to the coroutine's result, either in form of return value or
unhandled exception.
To create a linkable coroutine use spawn function provided by this module:
>>> def demofunc(x, y):
... return x / y
>>> p = spawn(demofunc, 6, 2)
The return value of spawn is an instance of Proc class that you can "link":
* p.link(obj) - notify obj when the coroutine is finished
What does "notify" means here depends on the type of `obj': a callable is
simply called, an event or a queue is notified using send/send_exception
methods and if `obj' is another greenlet it's killed with LinkedExited
exception.
Here's an example:
>>> event = coros.event()
>>> p.link(event)
>>> event.wait()
3
Now, even though `p' is finished it's still possible to link it. In this
case the notification is performed immediatelly:
>>> p.link()
Traceback (most recent call last):
...
LinkedCompleted: '<function demofunc at 0x...>' completed successfully
(Without an argument, link is created to the current greenlet)
There are also link_value and link_exception methods that only deliver a return
value and an unhandled exception respectively (plain `link' deliver both).
Suppose we want to spawn a greenlet to do an important part of the task; if it
fails then there's no way to complete the task so the parent must fail as well;
`link_exception' is useful here:
>>> p = spawn(demofunc, 1, 0)
>>> p.link_exception()
>>> api.sleep(1)
Traceback (most recent call last):
...
LinkedFailed: '<function demofunc at 0x...>' failed with ZeroDivisionError
One application of linking is `waitall' function: link to a bunch of coroutines
and wait for all them to complete. Such function is provided by this module.
"""
import sys
from eventlet import api, coros
__all__ = ['LinkedExited',
'LinkedFailed',
'LinkedCompleted',
'LinkedKilled',
'ProcExit',
'wait',
'Proc',
'spawn',
'spawn_link',
'spawn_link_value',
'spawn_link_exception']
class LinkedExited(Exception):
"""Raised when a linked proc exits"""
msg = "%r exited"
def __init__(self, name=None, msg=None):
self.name = name
if msg is None:
msg = self.msg % self.name
api.GreenletExit.__init__(self, msg)
class LinkedFailed(LinkedExited):
"""Raised when a linked proc dies because of unhandled exception"""
msg = "%r failed with %s"
def __init__(self, name, typ, value=None, tb=None):
msg = self.msg % (name, typ.__name__)
LinkedExited.__init__(self, name, msg)
class LinkedCompleted(LinkedExited):
"""Raised when a linked proc finishes the execution cleanly"""
msg = "%r completed successfully"
class LinkedKilled(LinkedFailed):
"""Raised when a linked proc dies because of unhandled GreenletExit
(i.e. it was killed)
"""
msg = """%r was killed with %s"""
def getLinkedFailed(name, typ, value=None, tb=None):
if issubclass(typ, api.GreenletExit):
return LinkedKilled(name, typ, value, tb)
return LinkedFailed(name, typ, value, tb)
class ProcExit(api.GreenletExit):
"""Raised when this proc is killed."""
SUCCESS, FAILURE = range(2)
class Link(object):
def __init__(self, listener):
self.listener = listener
def cancel(self):
self.listener = None
def __enter__(self):
pass
def __exit__(self, *args):
self.cancel()
def _fire(self, source, tag, result):
if self.listener is None:
return
if tag is SUCCESS:
self._fire_value(source, result)
elif tag is FAILURE:
self._fire_exception(source, result)
else:
raise RuntimeError('invalid arguments to _fire: %r %s %r %r' % (self, source, tag, result))
__call__ = _fire
class LinkToEvent(Link):
def _fire_value(self, source, value):
self.listener.send(value)
def _fire_exception(self, source, throw_args):
self.listener.send_exception(*throw_args)
class LinkToGreenlet(Link):
def _fire_value(self, source, value):
self.listener.throw(LinkedCompleted(source))
def _fire_exception(self, source, throw_args):
self.listener.throw(getLinkedFailed(source, *throw_args))
class LinkToCallable(Link):
def _fire_value(self, source, value):
self.listener(value)
def _fire_exception(self, source, throw_args):
self.listener(*throw_args)
def waitall(lst, trap_errors=False):
queue = coros.queue()
results = [None] * len(lst)
for (index, linkable) in enumerate(lst):
linkable.link(decorate_send(queue, index))
count = 0
while count < len(lst):
try:
index, value = queue.wait()
except Exception:
if not trap_errors:
raise
else:
results[index] = value
count += 1
return results
class decorate_send(object):
def __init__(self, event, tag):
self._event = event
self._tag = tag
def __repr__(self):
params = (type(self).__name__, self._tag, self._event)
return '<%s tag=%r event=%r>' % params
def __getattr__(self, name):
assert name != '_event'
return getattr(self._event, name)
def send(self, value):
self._event.send((self._tag, value))
_NOT_USED = object()
def spawn_greenlet(function, *args):
"""Create a new greenlet that will run `function(*args)'.
The current greenlet won't be unscheduled. Keyword arguments aren't
supported (limitation of greenlet), use spawn() to work around that.
"""
g = api.Greenlet(function)
g.parent = api.get_hub().greenlet
api.get_hub().schedule_call_global(0, g.switch, *args)
return g
class Source(object):
"""Maintain a set of links to the listeners. Delegate the sent value or
the exception to all of them.
To set up a link, use link_value, link_exception or link method. The
latter establishes both "value" and "exception" link. It is possible to
link to events, queues, greenlets and callables.
>>> source = Source()
>>> event = coros.event()
>>> source.link(event)
Once source's send or send_exception method is called, all the listeners
with the right type of link will be notified ("right type" means that
exceptions won't be delivered to "value" links and values won't be
delivered to "exception" links). Once link has been fired it is removed.
Notifying listeners is performed in the MAINLOOP greenlet. Under the hood
notifying a link means executing a callback, see Link class for details. Notification
must not attempt to switch to the hub, i.e. call any of blocking functions.
>>> source.send('hello')
>>> event.wait()
'hello'
Any error happened while sending will be logged as a regular unhandled
exception. This won't prevent other links from being fired.
There 3 kinds of listeners supported:
1. If `listener' is a greenlet (regardless if it's a raw greenlet or an
extension like Proc), a subclass of LinkedExited exception is raised
in it.
2. If `listener' is something with send/send_exception methods (event,
queue, Source but not Proc) the relevant method is called.
3. If `listener' is a callable, it is called with 1 argument (the result)
for "value" links and with 3 arguments (typ, value, tb) for "exception"
links.
"""
def __init__(self, name=None):
self.name = name
self._value_links = {}
self._exception_links = {}
self._result = _NOT_USED
self._exc = None
def _repr_helper(self):
result = []
result.append(repr(self.name))
if self._result is not _NOT_USED:
if self._exc is None:
res = repr(self._result)
if len(res)>50:
res = res[:50]+'...'
result.append('result=%s' % res)
else:
result.append('raised=%s' % (self._exc, ))
result.append('{%s:%s}' % (len(self._value_links), len(self._exception_links)))
return result
def __repr__(self):
klass = type(self).__name__
return '<%s at %s %s>' % (klass, hex(id(self)), ' '.join(self._repr_helper()))
def ready(self):
return self._result is not _NOT_USED
def link_value(self, listener=None, link=None):
if self.ready() and self._exc is not None:
return
if listener is None:
listener = api.getcurrent()
if link is None:
link = self.getLink(listener)
if self.ready() and listener is api.getcurrent():
link(self.name, SUCCESS, self._result)
else:
self._value_links[listener] = link
if self._result is not _NOT_USED:
self._start_send()
return link
def link_exception(self, listener=None, link=None):
if self._result is not _NOT_USED and self._exc is None:
return
if listener is None:
listener = api.getcurrent()
if link is None:
link = self.getLink(listener)
if self.ready() and listener is api.getcurrent():
link(self.name, FAILURE, self._exc)
else:
self._exception_links[listener] = link
if self._result is not _NOT_USED:
self._start_send_exception()
return link
def link(self, listener=None, link=None):
if listener is None:
listener = api.getcurrent()
if link is None:
link = self.getLink(listener)
if self.ready() and listener is api.getcurrent():
if self._exc is None:
link(self.name, SUCCESS, self._result)
else:
link(self.name, FAILURE, self._exc)
else:
self._value_links[listener] = link
self._exception_links[listener] = link
if self._result is not _NOT_USED:
if self._exc is None:
self._start_send()
else:
self._start_send_exception()
return link
def unlink(self, listener=None):
if listener is None:
listener = api.getcurrent()
self._value_links.pop(listener, None)
self._exception_links.pop(listener, None)
@staticmethod
def getLink(listener):
if hasattr(listener, 'throw'):
return LinkToGreenlet(listener)
if hasattr(listener, 'send'):
return LinkToEvent(listener)
elif callable(listener):
return LinkToCallable(listener)
else:
raise TypeError("Don't know how to link to %r" % (listener, ))
def send(self, value):
assert not self.ready(), "%s has been fired already" % self
self._result = value
self._exc = None
self._start_send()
def _start_send(self):
api.get_hub().schedule_call_global(0, self._do_send, self._value_links.items(),
SUCCESS, self._result, self._value_links)
def send_exception(self, *throw_args):
assert not self.ready(), "%s has been fired already" % self
self._result = None
self._exc = throw_args
self._start_send_exception()
def _start_send_exception(self):
api.get_hub().schedule_call_global(0, self._do_send, self._exception_links.items(),
FAILURE, self._exc, self._exception_links)
def _do_send(self, links, tag, value, consult):
while links:
listener, link = links.pop()
try:
if listener in consult:
try:
link(self.name, tag, value)
finally:
consult.pop(listener, None)
except:
api.get_hub().schedule_call_global(0, self._do_send, links, tag, value, consult)
raise
def wait(self, timeout=None, *throw_args):
"""Wait until send() or send_exception() is called or `timeout' has
expired. Return the argument of send or raise the argument of
send_exception. If timeout has expired, None is returned.
The arguments, when provided, specify how many seconds to wait and what
to do when timeout has expired. They are treated the same way as
api.timeout treats them.
"""
if self._result is not _NOT_USED:
if self._exc is None:
return self._result
else:
api.getcurrent().throw(*self._exc)
if timeout is not None:
timer = api.timeout(timeout, *throw_args)
timer.__enter__()
if timeout==0:
if timer.__exit__(None, None, None):
return
else:
try:
api.getcurrent().throw(*timer.throw_args)
except:
if not timer.__exit__(*sys.exc_info()):
raise
return
EXC = True
try:
try:
event = Waiter()
self.link(event)
try:
return event.wait()
finally:
self.unlink(event)
except:
EXC = False
if timeout is None or not timer.__exit__(*sys.exc_info()):
raise
finally:
if timeout is not None and EXC:
timer.__exit__(None, None, None)
class Waiter(object):
def __init__(self):
self.greenlet = None
def send(self, value):
"""Make greenlet calling wait() wake up (if there is a wait()).
Can only be called from get_hub().greenlet.
"""
assert api.getcurrent() is api.get_hub().greenlet
if self.greenlet is not None:
self.greenlet.switch(value)
def send_exception(self, *throw_args):
"""Make greenlet calling wait() wake up (if there is a wait()).
Can only be called from get_hub().greenlet.
"""
assert api.getcurrent() is api.get_hub().greenlet
if self.greenlet is not None:
self.greenlet.throw(*throw_args)
def wait(self):
"""Wait until send or send_exception is called. Return value passed
into send() or raise exception passed into send_exception().
"""
assert self.greenlet is None
current = api.getcurrent()
assert current is not api.get_hub().greenlet
self.greenlet = current
try:
return api.get_hub().switch()
finally:
self.greenlet = None
class Proc(Source):
"""A linkable coroutine based on Source.
Upon completion, delivers coroutine's result to the listeners.
"""
def __init__(self, name=None):
self.greenlet = None
Source.__init__(self, name)
def _repr_helper(self):
if self.greenlet is not None and self.greenlet.dead:
dead = '(dead)'
else:
dead = ''
return ['%r%s' % (self.greenlet, dead)] + Source._repr_helper(self)
def __repr__(self):
klass = type(self).__name__
return '<%s %s>' % (klass, ' '.join(self._repr_helper()))
def __nonzero__(self):
if self.ready():
# with current _run this does not makes any difference
# still, let keep it there
return False
# otherwise bool(proc) is the same as bool(greenlet)
if self.greenlet is not None:
return bool(self.greenlet)
@property
def dead(self):
return self.ready() or self.greenlet.dead
@classmethod
def spawn(cls, function, *args, **kwargs):
"""Return a new Proc instance that is scheduled to execute
function(*args, **kwargs) upon the next hub iteration.
"""
proc = cls()
proc.run(function, *args, **kwargs)
return proc
def run(self, function, *args, **kwargs):
"""Create a new greenlet to execute `function(*args, **kwargs)'.
The created greenlet is scheduled to run upon the next hub iteration.
"""
assert self.greenlet is None, "'run' can only be called once per instance"
if self.name is None:
self.name = str(function)
self.greenlet = spawn_greenlet(self._run, function, args, kwargs)
def _run(self, function, args, kwargs):
"""Internal top level function.
Execute *function* and send its result to the listeners.
"""
try:
result = function(*args, **kwargs)
except:
self.send_exception(*sys.exc_info())
raise # let mainloop log the exception
else:
self.send(result)
def throw(self, *throw_args):
"""Used internally to raise the exception.
Behaves exactly like greenlet's 'throw' with the exception that ProcExit
is raised by default. Do not use this function as it leaves the current
greenlet unscheduled forever. Use kill() method instead.
"""
if not self.dead:
if not throw_args:
throw_args = (ProcExit, )
self.greenlet.throw(*throw_args)
def kill(self, *throw_args):
"""Raise an exception in the greenlet. Unschedule the current greenlet
so that this Proc can handle the exception (or die).
The exception can be specified with throw_args. By default, ProcExit is
raised.
"""
if not self.dead:
if not throw_args:
throw_args = (ProcExit, )
api.get_hub().schedule_call_global(0, self.greenlet.throw, *throw_args)
if api.getcurrent() is not api.get_hub().greenlet:
api.sleep(0)
# QQQ maybe Proc should not inherit from Source (because its send() and send_exception()
# QQQ methods are for internal use only)
spawn = Proc.spawn
def spawn_link(function, *args, **kwargs):
p = spawn(function, *args, **kwargs)
p.link()
return p
def spawn_link_value(function, *args, **kwargs):
p = spawn(function, *args, **kwargs)
p.link_value()
return p
def spawn_link_exception(function, *args, **kwargs):
p = spawn(function, *args, **kwargs)
p.link_exception()
return p
def trap_errors(errors, func, *args, **kwargs):
"""DEPRECATED in favor of wrap_errors"""
try:
return func(*args, **kwargs)
except errors, ex:
return ex
class wrap_errors(object):
def __init__(self, errors, func):
"""Make a new function from `func', such that it catches `errors' (an
Exception subclass, or a tuple of Exception subclasses) and return
it as a value.
"""
self.errors = errors
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except self.errors, ex:
return ex
def __str__(self):
return str(self.func)
def __repr__(self):
return str(self.func)
def __getattr__(self, item):
return getattr(self.func, item)
class Pool(object):
linkable_class = Proc
def __init__(self, limit):
self.semaphore = coros.Semaphore(limit)
def allocate(self):
self.semaphore.acquire()
g = self.linkable_class()
g.link(lambda *_args: self.semaphore.release())
return g
if __name__=='__main__':
import doctest
doctest.testmod()
proc: improve docstring
# Copyright (c) 2008-2009 AG Projects
# Author: Denis Bilenko
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Advanced coroutine control.
This module provides means to spawn, kill and link coroutines. Linking means
subscribing to the coroutine's result, either in form of return value or
unhandled exception.
To create a linkable coroutine use spawn function provided by this module:
>>> def demofunc(x, y):
... return x / y
>>> p = spawn(demofunc, 6, 2)
The return value of spawn is an instance of Proc class that you can "link":
* p.link(obj) - notify obj when the coroutine is finished
What does "notify" means here depends on the type of `obj': a callable is
simply called, an event or a queue is notified using send/send_exception
methods and if `obj' is another greenlet it's killed with LinkedExited
exception.
Here's an example:
>>> event = coros.event()
>>> p.link(event)
>>> event.wait()
3
Now, even though `p' is finished it's still possible to link it. In this
case the notification is performed immediatelly:
>>> p.link()
Traceback (most recent call last):
...
LinkedCompleted: '<function demofunc at 0x...>' completed successfully
(Without an argument, link is created to the current greenlet)
There are also link_value and link_exception methods that only deliver a return
value and an unhandled exception respectively (plain `link' deliver both).
Suppose we want to spawn a greenlet to do an important part of the task; if it
fails then there's no way to complete the task so the parent must fail as well;
`link_exception' is useful here:
>>> p = spawn(demofunc, 1, 0)
>>> p.link_exception()
>>> api.sleep(1)
Traceback (most recent call last):
...
LinkedFailed: '<function demofunc at 0x...>' failed with ZeroDivisionError
One application of linking is `waitall' function: link to a bunch of coroutines
and wait for all them to complete. Such function is provided by this module.
"""
import sys
from eventlet import api, coros
__all__ = ['LinkedExited',
'LinkedFailed',
'LinkedCompleted',
'LinkedKilled',
'ProcExit',
'wait',
'Proc',
'spawn',
'spawn_link',
'spawn_link_value',
'spawn_link_exception']
class LinkedExited(Exception):
"""Raised when a linked proc exits"""
msg = "%r exited"
def __init__(self, name=None, msg=None):
self.name = name
if msg is None:
msg = self.msg % self.name
api.GreenletExit.__init__(self, msg)
class LinkedFailed(LinkedExited):
"""Raised when a linked proc dies because of unhandled exception"""
msg = "%r failed with %s"
def __init__(self, name, typ, value=None, tb=None):
msg = self.msg % (name, typ.__name__)
LinkedExited.__init__(self, name, msg)
class LinkedCompleted(LinkedExited):
"""Raised when a linked proc finishes the execution cleanly"""
msg = "%r completed successfully"
class LinkedKilled(LinkedFailed):
"""Raised when a linked proc dies because of unhandled GreenletExit
(i.e. it was killed)
"""
msg = """%r was killed with %s"""
def getLinkedFailed(name, typ, value=None, tb=None):
if issubclass(typ, api.GreenletExit):
return LinkedKilled(name, typ, value, tb)
return LinkedFailed(name, typ, value, tb)
class ProcExit(api.GreenletExit):
"""Raised when this proc is killed."""
SUCCESS, FAILURE = range(2)
class Link(object):
def __init__(self, listener):
self.listener = listener
def cancel(self):
self.listener = None
def __enter__(self):
pass
def __exit__(self, *args):
self.cancel()
def _fire(self, source, tag, result):
if self.listener is None:
return
if tag is SUCCESS:
self._fire_value(source, result)
elif tag is FAILURE:
self._fire_exception(source, result)
else:
raise RuntimeError('invalid arguments to _fire: %r %s %r %r' % (self, source, tag, result))
__call__ = _fire
class LinkToEvent(Link):
def _fire_value(self, source, value):
self.listener.send(value)
def _fire_exception(self, source, throw_args):
self.listener.send_exception(*throw_args)
class LinkToGreenlet(Link):
def _fire_value(self, source, value):
self.listener.throw(LinkedCompleted(source))
def _fire_exception(self, source, throw_args):
self.listener.throw(getLinkedFailed(source, *throw_args))
class LinkToCallable(Link):
def _fire_value(self, source, value):
self.listener(value)
def _fire_exception(self, source, throw_args):
self.listener(*throw_args)
def waitall(lst, trap_errors=False):
queue = coros.queue()
results = [None] * len(lst)
for (index, linkable) in enumerate(lst):
linkable.link(decorate_send(queue, index))
count = 0
while count < len(lst):
try:
index, value = queue.wait()
except Exception:
if not trap_errors:
raise
else:
results[index] = value
count += 1
return results
class decorate_send(object):
def __init__(self, event, tag):
self._event = event
self._tag = tag
def __repr__(self):
params = (type(self).__name__, self._tag, self._event)
return '<%s tag=%r event=%r>' % params
def __getattr__(self, name):
assert name != '_event'
return getattr(self._event, name)
def send(self, value):
self._event.send((self._tag, value))
_NOT_USED = object()
def spawn_greenlet(function, *args):
"""Create a new greenlet that will run `function(*args)'.
The current greenlet won't be unscheduled. Keyword arguments aren't
supported (limitation of greenlet), use spawn() to work around that.
"""
g = api.Greenlet(function)
g.parent = api.get_hub().greenlet
api.get_hub().schedule_call_global(0, g.switch, *args)
return g
class Source(object):
"""Maintain a set of links to the listeners. Delegate the sent value or
the exception to all of them.
To set up a link, use link_value, link_exception or link method. The
latter establishes both "value" and "exception" link. It is possible to
link to events, queues, greenlets and callables.
>>> source = Source()
>>> event = coros.event()
>>> source.link(event)
Once source's send or send_exception method is called, all the listeners
with the right type of link will be notified ("right type" means that
exceptions won't be delivered to "value" links and values won't be
delivered to "exception" links). Once link has been fired it is removed.
Notifying listeners is performed in the MAINLOOP greenlet. Under the hood
notifying a link means executing a callback, see Link class for details. Notification
must not attempt to switch to the hub, i.e. call any of blocking functions.
>>> source.send('hello')
>>> event.wait()
'hello'
Any error happened while sending will be logged as a regular unhandled
exception. This won't prevent other links from being fired.
There 3 kinds of listeners supported:
1. If `listener' is a greenlet (regardless if it's a raw greenlet or an
extension like Proc), a subclass of LinkedExited exception is raised
in it.
2. If `listener' is something with send/send_exception methods (event,
queue, Source but not Proc) the relevant method is called.
3. If `listener' is a callable, it is called with 1 argument (the result)
for "value" links and with 3 arguments (typ, value, tb) for "exception"
links.
"""
def __init__(self, name=None):
self.name = name
self._value_links = {}
self._exception_links = {}
self._result = _NOT_USED
self._exc = None
def _repr_helper(self):
result = []
result.append(repr(self.name))
if self._result is not _NOT_USED:
if self._exc is None:
res = repr(self._result)
if len(res)>50:
res = res[:50]+'...'
result.append('result=%s' % res)
else:
result.append('raised=%s' % (self._exc, ))
result.append('{%s:%s}' % (len(self._value_links), len(self._exception_links)))
return result
def __repr__(self):
klass = type(self).__name__
return '<%s at %s %s>' % (klass, hex(id(self)), ' '.join(self._repr_helper()))
def ready(self):
return self._result is not _NOT_USED
def link_value(self, listener=None, link=None):
if self.ready() and self._exc is not None:
return
if listener is None:
listener = api.getcurrent()
if link is None:
link = self.getLink(listener)
if self.ready() and listener is api.getcurrent():
link(self.name, SUCCESS, self._result)
else:
self._value_links[listener] = link
if self._result is not _NOT_USED:
self._start_send()
return link
def link_exception(self, listener=None, link=None):
if self._result is not _NOT_USED and self._exc is None:
return
if listener is None:
listener = api.getcurrent()
if link is None:
link = self.getLink(listener)
if self.ready() and listener is api.getcurrent():
link(self.name, FAILURE, self._exc)
else:
self._exception_links[listener] = link
if self._result is not _NOT_USED:
self._start_send_exception()
return link
def link(self, listener=None, link=None):
if listener is None:
listener = api.getcurrent()
if link is None:
link = self.getLink(listener)
if self.ready() and listener is api.getcurrent():
if self._exc is None:
link(self.name, SUCCESS, self._result)
else:
link(self.name, FAILURE, self._exc)
else:
self._value_links[listener] = link
self._exception_links[listener] = link
if self._result is not _NOT_USED:
if self._exc is None:
self._start_send()
else:
self._start_send_exception()
return link
def unlink(self, listener=None):
if listener is None:
listener = api.getcurrent()
self._value_links.pop(listener, None)
self._exception_links.pop(listener, None)
@staticmethod
def getLink(listener):
if hasattr(listener, 'throw'):
return LinkToGreenlet(listener)
if hasattr(listener, 'send'):
return LinkToEvent(listener)
elif callable(listener):
return LinkToCallable(listener)
else:
raise TypeError("Don't know how to link to %r" % (listener, ))
def send(self, value):
assert not self.ready(), "%s has been fired already" % self
self._result = value
self._exc = None
self._start_send()
def _start_send(self):
api.get_hub().schedule_call_global(0, self._do_send, self._value_links.items(),
SUCCESS, self._result, self._value_links)
def send_exception(self, *throw_args):
assert not self.ready(), "%s has been fired already" % self
self._result = None
self._exc = throw_args
self._start_send_exception()
def _start_send_exception(self):
api.get_hub().schedule_call_global(0, self._do_send, self._exception_links.items(),
FAILURE, self._exc, self._exception_links)
def _do_send(self, links, tag, value, consult):
while links:
listener, link = links.pop()
try:
if listener in consult:
try:
link(self.name, tag, value)
finally:
consult.pop(listener, None)
except:
api.get_hub().schedule_call_global(0, self._do_send, links, tag, value, consult)
raise
def wait(self, timeout=None, *throw_args):
"""Wait until send() or send_exception() is called or `timeout' has
expired. Return the argument of send or raise the argument of
send_exception. If timeout has expired, None is returned.
The arguments, when provided, specify how many seconds to wait and what
to do when timeout has expired. They are treated the same way as
api.timeout treats them.
"""
if self._result is not _NOT_USED:
if self._exc is None:
return self._result
else:
api.getcurrent().throw(*self._exc)
if timeout is not None:
timer = api.timeout(timeout, *throw_args)
timer.__enter__()
if timeout==0:
if timer.__exit__(None, None, None):
return
else:
try:
api.getcurrent().throw(*timer.throw_args)
except:
if not timer.__exit__(*sys.exc_info()):
raise
return
EXC = True
try:
try:
event = Waiter()
self.link(event)
try:
return event.wait()
finally:
self.unlink(event)
except:
EXC = False
if timeout is None or not timer.__exit__(*sys.exc_info()):
raise
finally:
if timeout is not None and EXC:
timer.__exit__(None, None, None)
class Waiter(object):
def __init__(self):
self.greenlet = None
def send(self, value):
"""Wake up the greenlet that is calling wait() currently (if there is one).
Can only be called from get_hub().greenlet.
"""
assert api.getcurrent() is api.get_hub().greenlet
if self.greenlet is not None:
self.greenlet.switch(value)
def send_exception(self, *throw_args):
"""Make greenlet calling wait() wake up (if there is a wait()).
Can only be called from get_hub().greenlet.
"""
assert api.getcurrent() is api.get_hub().greenlet
if self.greenlet is not None:
self.greenlet.throw(*throw_args)
def wait(self):
"""Wait until send or send_exception is called. Return value passed
into send() or raise exception passed into send_exception().
"""
assert self.greenlet is None
current = api.getcurrent()
assert current is not api.get_hub().greenlet
self.greenlet = current
try:
return api.get_hub().switch()
finally:
self.greenlet = None
class Proc(Source):
"""A linkable coroutine based on Source.
Upon completion, delivers coroutine's result to the listeners.
"""
def __init__(self, name=None):
self.greenlet = None
Source.__init__(self, name)
def _repr_helper(self):
if self.greenlet is not None and self.greenlet.dead:
dead = '(dead)'
else:
dead = ''
return ['%r%s' % (self.greenlet, dead)] + Source._repr_helper(self)
def __repr__(self):
klass = type(self).__name__
return '<%s %s>' % (klass, ' '.join(self._repr_helper()))
def __nonzero__(self):
if self.ready():
# with current _run this does not makes any difference
# still, let keep it there
return False
# otherwise bool(proc) is the same as bool(greenlet)
if self.greenlet is not None:
return bool(self.greenlet)
@property
def dead(self):
return self.ready() or self.greenlet.dead
@classmethod
def spawn(cls, function, *args, **kwargs):
"""Return a new Proc instance that is scheduled to execute
function(*args, **kwargs) upon the next hub iteration.
"""
proc = cls()
proc.run(function, *args, **kwargs)
return proc
def run(self, function, *args, **kwargs):
"""Create a new greenlet to execute `function(*args, **kwargs)'.
The created greenlet is scheduled to run upon the next hub iteration.
"""
assert self.greenlet is None, "'run' can only be called once per instance"
if self.name is None:
self.name = str(function)
self.greenlet = spawn_greenlet(self._run, function, args, kwargs)
def _run(self, function, args, kwargs):
"""Internal top level function.
Execute *function* and send its result to the listeners.
"""
try:
result = function(*args, **kwargs)
except:
self.send_exception(*sys.exc_info())
raise # let mainloop log the exception
else:
self.send(result)
def throw(self, *throw_args):
"""Used internally to raise the exception.
Behaves exactly like greenlet's 'throw' with the exception that ProcExit
is raised by default. Do not use this function as it leaves the current
greenlet unscheduled forever. Use kill() method instead.
"""
if not self.dead:
if not throw_args:
throw_args = (ProcExit, )
self.greenlet.throw(*throw_args)
def kill(self, *throw_args):
"""Raise an exception in the greenlet. Unschedule the current greenlet
so that this Proc can handle the exception (or die).
The exception can be specified with throw_args. By default, ProcExit is
raised.
"""
if not self.dead:
if not throw_args:
throw_args = (ProcExit, )
api.get_hub().schedule_call_global(0, self.greenlet.throw, *throw_args)
if api.getcurrent() is not api.get_hub().greenlet:
api.sleep(0)
# QQQ maybe Proc should not inherit from Source (because its send() and send_exception()
# QQQ methods are for internal use only)
spawn = Proc.spawn
def spawn_link(function, *args, **kwargs):
p = spawn(function, *args, **kwargs)
p.link()
return p
def spawn_link_value(function, *args, **kwargs):
p = spawn(function, *args, **kwargs)
p.link_value()
return p
def spawn_link_exception(function, *args, **kwargs):
p = spawn(function, *args, **kwargs)
p.link_exception()
return p
def trap_errors(errors, func, *args, **kwargs):
"""DEPRECATED in favor of wrap_errors"""
try:
return func(*args, **kwargs)
except errors, ex:
return ex
class wrap_errors(object):
def __init__(self, errors, func):
"""Make a new function from `func', such that it catches `errors' (an
Exception subclass, or a tuple of Exception subclasses) and return
it as a value.
"""
self.errors = errors
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except self.errors, ex:
return ex
def __str__(self):
return str(self.func)
def __repr__(self):
return str(self.func)
def __getattr__(self, item):
return getattr(self.func, item)
class Pool(object):
linkable_class = Proc
def __init__(self, limit):
self.semaphore = coros.Semaphore(limit)
def allocate(self):
self.semaphore.acquire()
g = self.linkable_class()
g.link(lambda *_args: self.semaphore.release())
return g
if __name__=='__main__':
import doctest
doctest.testmod()
|
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import logging
import time
import requests
from flask import Response
def percentage(x, y):
if y == 0:
return 0
else:
return int(((x * 100) / y) + 0.5)
class EnvoyStats (object):
def __init__(self, max_live_age=20, max_ready_age=20):
self.update_errors = 0
self.max_live_age = max_live_age
self.max_ready_age = max_ready_age
self.loginfo = None
self.stats = {
"created": time.time(),
"last_update": 0,
"last_attempt": 0,
"update_errors": 0,
"services": {},
"envoy": {}
}
def is_alive(self):
"""
Make sure we've heard from Envoy within max_live_age seconds.
If we haven't yet heard from Envoy at all (we've just booted),
consider Envoy alive if we haven't yet been running for max_live_age
seconds -- basically, Envoy gets a grace period to start running at
boot time.
"""
epoch = self.stats["last_update"]
if not epoch:
epoch = self.stats["created"]
return (time.time() - epoch) <= self.max_live_age
def is_ready(self):
"""
Make sure we've heard from Envoy within max_ready_age seconds.
If we haven't yet heard from Envoy at all (we've just booted),
then Envoy is not yet ready, and is_ready() returns False.
"""
epoch = self.stats["last_update"]
if not epoch:
return False
return (time.time() - epoch) <= self.max_ready_age
def time_since_boot(self):
""" Return the number of seconds since Envoy booted. """
return time.time() - self.stats["created"]
def time_since_update(self):
"""
Return the number of seconds since we last heard from Envoy, or None if
we've never heard from Envoy.
"""
if self.stats["last_update"] == 0:
return None
else:
return time.time() - self.stats["last_update"]
def cluster_stats(self, name):
if not self.stats['last_update']:
# No updates.
return {
'valid': False,
'reason': "No stats updates have succeeded",
'health': "no stats yet",
'hmetric': 'startup',
'hcolor': 'grey'
}
# OK, we should be OK.
when = self.stats['last_update']
cstat = self.stats['clusters']
if name not in cstat:
return {
'valid': False,
'reason': "Cluster %s is not defined" % name,
'health': "undefined cluster",
'hmetric': 'undefined cluster',
'hcolor': 'orange',
}
cstat = dict(**cstat[name])
cstat.update({
'valid': True,
'reason': "Cluster %s updated at %d" % (name, when)
})
pct = cstat.get('healthy_percent', None)
if pct != None:
color = 'green'
if pct < 70:
color = 'red'
elif pct < 90:
color = 'yellow'
cstat.update({
'health': "%d%% healthy" % pct,
'hmetric': int(pct),
'hcolor': color
})
else:
cstat.update({
'health': "no requests yet",
'hmetric': 'waiting',
'hcolor': 'grey'
})
return cstat
def update_log_levels(self, last_attempt, level=None):
# logging.info("updating levels")
try:
url = "http://127.0.0.1:8001/logging"
if level:
url += "?level=%s" % level
r = requests.post(url)
except OSError as e:
logging.warning("EnvoyStats.update_log_levels failed: %s" % e)
self.stats['update_errors'] += 1
return False
# OMFG. Querying log levels returns with a 404 code.
if (r.status_code != 200) and (r.status_code != 404):
logging.warning("EnvoyStats.update_log_levels failed: %s" % r.text)
self.stats['update_errors'] += 1
return False
levels = {}
for line in r.text.split("\n"):
if not line:
continue
if line.startswith(' '):
( logtype, level ) = line[2:].split(": ")
x = levels.setdefault(level, {})
x[logtype] = True
# logging.info("levels: %s" % levels)
if len(levels.keys()) == 1:
self.loginfo = { 'all': list(levels.keys())[0] }
else:
self.loginfo = { x: levels[x] for x in sorted(levels.keys()) }
# logging.info("loginfo: %s" % self.loginfo)
return True
def get_prometheus_state(self):
try:
r = requests.get("http://127.0.0.1:8001/stats/prometheus")
except OSError as e:
logging.warning("EnvoyStats.get_prometheus_state failed: %s" % e)
return Response("EnvoyStats.get_prometheus_state failed, OSError: %s" % e, 503)
if r.status_code != 200:
logging.warning("EnvoyStats.get_prometheus_state failed: %s" % r.text)
return Response("EnvoyStats.get_prometheus_state failed: %s" % r.text, r.status_code)
else:
return Response(r.text, r.status_code, dict(r.headers))
def update_envoy_stats(self, last_attempt):
# logging.info("updating stats")
try:
r = requests.get("http://127.0.0.1:8001/stats")
except OSError as e:
logging.warning("EnvoyStats.update failed: %s" % e)
self.stats['update_errors'] += 1
return
if r.status_code != 200:
logging.warning("EnvoyStats.update failed: %s" % r.text)
self.stats['update_errors'] += 1
return
# Parse stats into a hierarchy.
envoy_stats = {}
for line in r.text.split("\n"):
if not line:
continue
# logging.info('line: %s' % line)
key, value = line.split(":")
keypath = key.split('.')
node = envoy_stats
for key in keypath[:-1]:
if key not in node:
node[key] = {}
node = node[key]
value = value.strip()
# Skip histograms for the moment.
# if value.startswith("P0("):
# continue
# # for field in value.split(' '):
# # if field.startswith('P95('):
# # value = field.split(',')
try:
node[keypath[-1]] = int(value)
except:
continue
# Now dig into clusters a bit more.
requests_info = {}
active_clusters = {}
if ("http" in envoy_stats) and ("ingress_http" in envoy_stats["http"]):
ingress_stats = envoy_stats["http"]["ingress_http"]
requests_total = ingress_stats.get("downstream_rq_total", 0)
requests_4xx = ingress_stats.get('downstream_rq_4xx', 0)
requests_5xx = ingress_stats.get('downstream_rq_5xx', 0)
requests_bad = requests_4xx + requests_5xx
requests_ok = requests_total - requests_bad
requests_info = {
"total": requests_total,
"4xx": requests_4xx,
"5xx": requests_5xx,
"bad": requests_bad,
"ok": requests_ok,
}
if "cluster" in envoy_stats:
for cluster_name in envoy_stats['cluster']:
cluster = envoy_stats['cluster'][cluster_name]
# # Toss any _%d -- that's madness with our Istio code at the moment.
# cluster_name = re.sub('_\d+$', '', cluster_name)
# mapping_name = active_cluster_map[cluster_name]
# active_mappings[mapping_name] = {}
# logging.info("cluster %s stats: %s" % (cluster_name, cluster))
healthy_members = cluster['membership_healthy']
total_members = cluster['membership_total']
healthy_percent = percentage(healthy_members, total_members)
update_attempts = cluster['update_attempt']
update_successes = cluster['update_success']
update_percent = percentage(update_successes, update_attempts)
# Weird.
# upstream_ok = cluster.get('upstream_rq_2xx', 0)
upstream_total = cluster.get('upstream_rq_pending_total', 0)
upstream_4xx = cluster.get('upstream_rq_4xx', 0)
upstream_5xx = cluster.get('upstream_rq_5xx', 0)
upstream_bad = upstream_5xx # used to include 4XX here, but that seems wrong.
upstream_ok = upstream_total - upstream_bad
# logging.info("%s total %s bad %s ok %s" % (cluster_name, upstream_total, upstream_bad, upstream_ok))
if upstream_total > 0:
healthy_percent = percentage(upstream_ok, upstream_total)
# logging.debug("cluster %s is %d%% healthy" % (cluster_name, healthy_percent))
else:
healthy_percent = None
# logging.debug("cluster %s has had no requests" % cluster_name)
active_clusters[cluster_name] = {
'healthy_members': healthy_members,
'total_members': total_members,
'healthy_percent': healthy_percent,
'update_attempts': update_attempts,
'update_successes': update_successes,
'update_percent': update_percent,
'upstream_ok': upstream_ok,
'upstream_4xx': upstream_4xx,
'upstream_5xx': upstream_5xx,
'upstream_bad': upstream_bad
}
# OK, we're now officially finished with all the hard stuff.
last_update = time.time()
self.stats.update({
"last_update": last_update,
"last_attempt": last_attempt,
"requests": requests_info,
"clusters": active_clusters,
"envoy": envoy_stats
})
# logging.info("stats updated")
# def update(self, active_mapping_names):
def update(self):
try:
# Remember when we started.
last_attempt = time.time()
self.update_log_levels(last_attempt)
self.update_envoy_stats(last_attempt)
except Exception as e:
logging.error("could not update Envoy stats: %s" % e)
Switch to using upstream_rq_completed as the base for cluster-health calculations.
# Copyright 2018 Datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
import logging
import time
import requests
from flask import Response
def percentage(x, y):
if y == 0:
return 0
else:
return int(((x * 100) / y) + 0.5)
class EnvoyStats (object):
def __init__(self, max_live_age=20, max_ready_age=20):
self.update_errors = 0
self.max_live_age = max_live_age
self.max_ready_age = max_ready_age
self.loginfo = None
self.stats = {
"created": time.time(),
"last_update": 0,
"last_attempt": 0,
"update_errors": 0,
"services": {},
"envoy": {}
}
def is_alive(self):
"""
Make sure we've heard from Envoy within max_live_age seconds.
If we haven't yet heard from Envoy at all (we've just booted),
consider Envoy alive if we haven't yet been running for max_live_age
seconds -- basically, Envoy gets a grace period to start running at
boot time.
"""
epoch = self.stats["last_update"]
if not epoch:
epoch = self.stats["created"]
return (time.time() - epoch) <= self.max_live_age
def is_ready(self):
"""
Make sure we've heard from Envoy within max_ready_age seconds.
If we haven't yet heard from Envoy at all (we've just booted),
then Envoy is not yet ready, and is_ready() returns False.
"""
epoch = self.stats["last_update"]
if not epoch:
return False
return (time.time() - epoch) <= self.max_ready_age
def time_since_boot(self):
""" Return the number of seconds since Envoy booted. """
return time.time() - self.stats["created"]
def time_since_update(self):
"""
Return the number of seconds since we last heard from Envoy, or None if
we've never heard from Envoy.
"""
if self.stats["last_update"] == 0:
return None
else:
return time.time() - self.stats["last_update"]
def cluster_stats(self, name):
if not self.stats['last_update']:
# No updates.
return {
'valid': False,
'reason': "No stats updates have succeeded",
'health': "no stats yet",
'hmetric': 'startup',
'hcolor': 'grey'
}
# OK, we should be OK.
when = self.stats['last_update']
cstat = self.stats['clusters']
if name not in cstat:
return {
'valid': False,
'reason': "Cluster %s is not defined" % name,
'health': "undefined cluster",
'hmetric': 'undefined cluster',
'hcolor': 'orange',
}
cstat = dict(**cstat[name])
cstat.update({
'valid': True,
'reason': "Cluster %s updated at %d" % (name, when)
})
pct = cstat.get('healthy_percent', None)
if pct != None:
color = 'green'
if pct < 70:
color = 'red'
elif pct < 90:
color = 'yellow'
cstat.update({
'health': "%d%% healthy" % pct,
'hmetric': int(pct),
'hcolor': color
})
else:
cstat.update({
'health': "no requests yet",
'hmetric': 'waiting',
'hcolor': 'grey'
})
return cstat
def update_log_levels(self, last_attempt, level=None):
# logging.info("updating levels")
try:
url = "http://127.0.0.1:8001/logging"
if level:
url += "?level=%s" % level
r = requests.post(url)
except OSError as e:
logging.warning("EnvoyStats.update_log_levels failed: %s" % e)
self.stats['update_errors'] += 1
return False
# OMFG. Querying log levels returns with a 404 code.
if (r.status_code != 200) and (r.status_code != 404):
logging.warning("EnvoyStats.update_log_levels failed: %s" % r.text)
self.stats['update_errors'] += 1
return False
levels = {}
for line in r.text.split("\n"):
if not line:
continue
if line.startswith(' '):
( logtype, level ) = line[2:].split(": ")
x = levels.setdefault(level, {})
x[logtype] = True
# logging.info("levels: %s" % levels)
if len(levels.keys()) == 1:
self.loginfo = { 'all': list(levels.keys())[0] }
else:
self.loginfo = { x: levels[x] for x in sorted(levels.keys()) }
# logging.info("loginfo: %s" % self.loginfo)
return True
def get_prometheus_state(self):
try:
r = requests.get("http://127.0.0.1:8001/stats/prometheus")
except OSError as e:
logging.warning("EnvoyStats.get_prometheus_state failed: %s" % e)
return Response("EnvoyStats.get_prometheus_state failed, OSError: %s" % e, 503)
if r.status_code != 200:
logging.warning("EnvoyStats.get_prometheus_state failed: %s" % r.text)
return Response("EnvoyStats.get_prometheus_state failed: %s" % r.text, r.status_code)
else:
return Response(r.text, r.status_code, dict(r.headers))
def update_envoy_stats(self, last_attempt):
# logging.info("updating stats")
try:
r = requests.get("http://127.0.0.1:8001/stats")
except OSError as e:
logging.warning("EnvoyStats.update failed: %s" % e)
self.stats['update_errors'] += 1
return
if r.status_code != 200:
logging.warning("EnvoyStats.update failed: %s" % r.text)
self.stats['update_errors'] += 1
return
# Parse stats into a hierarchy.
envoy_stats = {}
for line in r.text.split("\n"):
if not line:
continue
# logging.info('line: %s' % line)
key, value = line.split(":")
keypath = key.split('.')
node = envoy_stats
for key in keypath[:-1]:
if key not in node:
node[key] = {}
node = node[key]
value = value.strip()
# Skip histograms for the moment.
# if value.startswith("P0("):
# continue
# # for field in value.split(' '):
# # if field.startswith('P95('):
# # value = field.split(',')
try:
node[keypath[-1]] = int(value)
except:
continue
# Now dig into clusters a bit more.
requests_info = {}
active_clusters = {}
if ("http" in envoy_stats) and ("ingress_http" in envoy_stats["http"]):
ingress_stats = envoy_stats["http"]["ingress_http"]
requests_total = ingress_stats.get("downstream_rq_total", 0)
requests_4xx = ingress_stats.get('downstream_rq_4xx', 0)
requests_5xx = ingress_stats.get('downstream_rq_5xx', 0)
requests_bad = requests_4xx + requests_5xx
requests_ok = requests_total - requests_bad
requests_info = {
"total": requests_total,
"4xx": requests_4xx,
"5xx": requests_5xx,
"bad": requests_bad,
"ok": requests_ok,
}
if "cluster" in envoy_stats:
for cluster_name in envoy_stats['cluster']:
cluster = envoy_stats['cluster'][cluster_name]
# # Toss any _%d -- that's madness with our Istio code at the moment.
# cluster_name = re.sub('_\d+$', '', cluster_name)
# mapping_name = active_cluster_map[cluster_name]
# active_mappings[mapping_name] = {}
# logging.info("cluster %s stats: %s" % (cluster_name, cluster))
healthy_members = cluster['membership_healthy']
total_members = cluster['membership_total']
healthy_percent = percentage(healthy_members, total_members)
update_attempts = cluster['update_attempt']
update_successes = cluster['update_success']
update_percent = percentage(update_successes, update_attempts)
# Weird.
# upstream_ok = cluster.get('upstream_rq_2xx', 0)
# upstream_total = cluster.get('upstream_rq_pending_total', 0)
upstream_total = cluster.get('upstream_rq_completed', 0)
upstream_4xx = cluster.get('upstream_rq_4xx', 0)
upstream_5xx = cluster.get('upstream_rq_5xx', 0)
upstream_bad = upstream_5xx # used to include 4XX here, but that seems wrong.
upstream_ok = upstream_total - upstream_bad
# logging.info("%s total %s bad %s ok %s" % (cluster_name, upstream_total, upstream_bad, upstream_ok))
if upstream_total > 0:
healthy_percent = percentage(upstream_ok, upstream_total)
# logging.debug("cluster %s is %d%% healthy" % (cluster_name, healthy_percent))
else:
healthy_percent = None
# logging.debug("cluster %s has had no requests" % cluster_name)
active_clusters[cluster_name] = {
'healthy_members': healthy_members,
'total_members': total_members,
'healthy_percent': healthy_percent,
'update_attempts': update_attempts,
'update_successes': update_successes,
'update_percent': update_percent,
'upstream_ok': upstream_ok,
'upstream_4xx': upstream_4xx,
'upstream_5xx': upstream_5xx,
'upstream_bad': upstream_bad
}
# OK, we're now officially finished with all the hard stuff.
last_update = time.time()
self.stats.update({
"last_update": last_update,
"last_attempt": last_attempt,
"requests": requests_info,
"clusters": active_clusters,
"envoy": envoy_stats
})
# logging.info("stats updated")
# def update(self, active_mapping_names):
def update(self):
try:
# Remember when we started.
last_attempt = time.time()
self.update_log_levels(last_attempt)
self.update_envoy_stats(last_attempt)
except Exception as e:
logging.error("could not update Envoy stats: %s" % e)
|
import factory
from factory import fuzzy
from .. import models
class MaatschappelijkeActiviteitFactory(factory.DjangoModelFactory):
class Meta:
model = models.MaatschappelijkeActiviteit
id = fuzzy.FuzzyInteger(low=100000000000000000, high=100000000000000099)
class PersoonFactory(factory.DjangoModelFactory):
class Meta:
model = models.Persoon
prsid = fuzzy.FuzzyInteger(low=100000000000000000, high=100000000000000099)
class VestigingFactory(factory.DjangoModelFactory):
class Meta:
model = models.Vestiging
id = fuzzy.FuzzyInteger(low=100000000000000000, high=100000000000000099)
hoofdvestiging = fuzzy.FuzzyChoice(choices=[True, False])
maatschappelijke_activiteit = factory.SubFactory(MaatschappelijkeActiviteitFactory)
class FunctievervullingFactory(factory.DjangoModelFactory):
class Meta:
model = models.Functievervulling
fvvid = fuzzy.FuzzyInteger(low=100000000000000000, high=100000000000000099)
TG-61 Fix failing tests
De velden waar je lookup op pleegt, mogen niet leeg zijn.
import factory
from factory import fuzzy
from .. import models
class MaatschappelijkeActiviteitFactory(factory.DjangoModelFactory):
class Meta:
model = models.MaatschappelijkeActiviteit
id = fuzzy.FuzzyInteger(low=100000000000000000, high=100000000000000099)
kvk_nummer = fuzzy.FuzzyInteger(low=1, high=99999999)
class PersoonFactory(factory.DjangoModelFactory):
class Meta:
model = models.Persoon
prsid = fuzzy.FuzzyInteger(low=100000000000000000, high=100000000000000099)
class VestigingFactory(factory.DjangoModelFactory):
class Meta:
model = models.Vestiging
id = fuzzy.FuzzyInteger(low=100000000000000000, high=100000000000000099)
vestigingsnummer = fuzzy.FuzzyInteger(low=1, high=9999999)
hoofdvestiging = fuzzy.FuzzyChoice(choices=[True, False])
maatschappelijke_activiteit = factory.SubFactory(MaatschappelijkeActiviteitFactory)
class FunctievervullingFactory(factory.DjangoModelFactory):
class Meta:
model = models.Functievervulling
fvvid = fuzzy.FuzzyInteger(low=100000000000000000, high=100000000000000099)
|
from django.contrib import admin
from django.forms import models as modelforms
from ella.core.models import Author, Source, Category, Listing, Related
class ListingForm(modelforms.ModelForm):
class Meta:
model = Listing
class ListingInlineAdmin(admin.TabularInline):
model = Listing
extra = 2
fieldsets = ((None, {'fields': ('category', 'publish_from', 'commercial',)}),)
class RelatedInlineAdmin(admin.TabularInline):
model = Related
extra = 3
# raw_id_fields = ('publishable_id',)
class CategoryAdmin(admin.ModelAdmin):
list_filter = ('site',)
list_display = ('draw_title', 'tree_path', '__unicode__')
search_fields = ('title', 'slug',)
#ordering = ('site', 'tree_path',)
prepopulated_fields = {'slug': ('title',)}
class AuthorAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name',)
raw_id_fields = ('photo',)
class SourceAdmin(admin.ModelAdmin):
list_display = ('name', 'url',)
search_fields = ('name',)
class PublishableAdmin(admin.ModelAdmin):
""" Default admin options for all publishables """
list_display = ('title', 'category', 'publish_from')
list_filter = ('category', 'authors',)
search_fields = ('title', 'description', 'slug', 'authors__name', 'authors__slug',) # FIXME: 'tags__tag__name',)
raw_id_fields = ('photo',)
prepopulated_fields = {'slug': ('title',)}
rich_text_fields = {None: ('description',)}
suggest_fields = {
'category': ('tree_path', 'title', 'slug',),
'authors': ('name', 'slug', 'email',),
'source': ('name', 'url',),
}
admin.site.register(Category, CategoryAdmin)
admin.site.register(Source, SourceAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Listing)
customize listing admin change list page
from django.contrib import admin
from django.forms import models as modelforms
from ella.core.models import Author, Source, Category, Listing, Related
class ListingForm(modelforms.ModelForm):
class Meta:
model = Listing
class ListingInlineAdmin(admin.TabularInline):
model = Listing
extra = 2
fieldsets = ((None, {'fields': ('category', 'publish_from', 'commercial',)}),)
class RelatedInlineAdmin(admin.TabularInline):
model = Related
extra = 3
# raw_id_fields = ('publishable_id',)
class CategoryAdmin(admin.ModelAdmin):
list_filter = ('site',)
list_display = ('draw_title', 'tree_path', '__unicode__')
search_fields = ('title', 'slug',)
#ordering = ('site', 'tree_path',)
prepopulated_fields = {'slug': ('title',)}
class AuthorAdmin(admin.ModelAdmin):
prepopulated_fields = {'slug': ('name',)}
search_fields = ('name',)
raw_id_fields = ('photo',)
class SourceAdmin(admin.ModelAdmin):
list_display = ('name', 'url',)
search_fields = ('name',)
class PublishableAdmin(admin.ModelAdmin):
""" Default admin options for all publishables """
list_display = ('title', 'category', 'publish_from')
list_filter = ('category', 'authors',)
search_fields = ('title', 'description', 'slug', 'authors__name', 'authors__slug',) # FIXME: 'tags__tag__name',)
raw_id_fields = ('photo',)
prepopulated_fields = {'slug': ('title',)}
rich_text_fields = {None: ('description',)}
suggest_fields = {
'category': ('tree_path', 'title', 'slug',),
'authors': ('name', 'slug', 'email',),
'source': ('name', 'url',),
}
class ListingAdmin(admin.ModelAdmin):
date_hierarchy = 'publish_from'
list_display = ('__unicode__', 'publish_from', 'publish_to',)
list_filter = ('category',)
search_fields = ('publishable__title', 'publishable__slug',
'publishable__description',)
admin.site.register(Category, CategoryAdmin)
admin.site.register(Source, SourceAdmin)
admin.site.register(Author, AuthorAdmin)
admin.site.register(Listing, ListingAdmin)
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Math Operations.
Note: Functions taking `Tensor` arguments can also take anything accepted by
`tf.convert_to_tensor`.
Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
TensorFlow provides a variety of math functions including:
* Basic arithmetic operators and trigonometric functions.
* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
* Segment functions (like: `tf.math.segment_sum`)
See: `tf.linalg` for matrix and tensor functions.
<a id=Segmentation></a>
## About Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
# ==> [[0 0 0 0]
# [5 6 7 8]]
```
The standard `segment_*` functions assert that the segment indices are sorted.
If you have unsorted indices use the equivalent `unsorted_segment_` function.
These functions take an additional argument `num_segments` so that the output
tensor can be efficiently allocated.
``` python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 6, 8, 10, 12],
# [-1, -2, -3, -4]]
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
import six
from six.moves import builtins
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
np_dtypes = LazyLoader(
"np_dtypes", globals(),
"tensorflow.python.ops.numpy_ops.np_dtypes")
# Aliases for some automatically-generated names.
nextafter = gen_math_ops.next_after
@tf_export("linspace", v1=["lin_space", "linspace"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("lin_space")
def linspace_nd(start, stop, num, name=None, axis=0):
r"""Generates evenly-spaced values in an interval along a given axis.
A sequence of `num` evenly-spaced values are generated beginning at `start`
along a given `axis`.
If `num > 1`, the values in the sequence increase by
`(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
If `num <= 0`, `ValueError` is raised.
Matches
[np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s
behaviour
except when `num == 0`.
For example:
```
tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
```
`Start` and `stop` can be tensors of arbitrary size:
>>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)
<tf.Tensor: shape=(5, 2), dtype=float32, numpy=
array([[ 0. , 5. ],
[ 2.5 , 13.75],
[ 5. , 22.5 ],
[ 7.5 , 31.25],
[10. , 40. ]], dtype=float32)>
`Axis` is where the values will be generated (the dimension in the
returned tensor which corresponds to the axis will be equal to `num`)
>>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[ 0. , 2.5 , 5. , 7.5 , 10. ],
[ 5. , 13.75, 22.5 , 31.25, 40. ]], dtype=float32)>
Args:
start: A `Tensor`. Must be one of the following types: `bfloat16`,
`float32`, `float64`. N-D tensor. First entry in the range.
stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.
Last entry in the range.
num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D
tensor. Number of values to generate.
name: A name for the operation (optional).
axis: Axis along which the operation is performed (used only when N-D
tensors are provided).
Returns:
A `Tensor`. Has the same type as `start`.
"""
with ops.name_scope(name, "linspace", [start, stop]):
start = ops.convert_to_tensor(start, name="start")
# stop must be convertible to the same dtype as start
stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype)
num_int = array_ops.convert_to_int_tensor(num, name="num")
num = cast(num_int, dtype=start.dtype)
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(start), array_ops.shape(stop))
start = array_ops.broadcast_to(start, broadcast_shape)
stop = array_ops.broadcast_to(stop, broadcast_shape)
expanded_start = array_ops.expand_dims(start, axis=axis)
expanded_stop = array_ops.expand_dims(stop, axis=axis)
shape = array_ops.shape(expanded_start)
ndims = array_ops.shape(shape)[0]
axis = array_ops.where_v2(axis >= 0, axis, ndims + axis)
# The purpose is to avoid having negative values when repeating.
num_fill = gen_math_ops.maximum(num_int - 2, 0)
# To avoid having negative values in the range or zero division
# the result is sliced in the end so a correct result is returned for
# num == 1, and num == 0.
n_steps = gen_math_ops.maximum(num_int - 1, 1)
delta = (expanded_stop - expanded_start) / cast(n_steps,
expanded_stop.dtype)
# Re-cast tensors as delta.
expanded_start = cast(expanded_start, delta.dtype)
expanded_stop = cast(expanded_stop, delta.dtype)
# If num < 0, we will throw exception in the range
# otherwise use the same div for delta
range_end = array_ops.where_v2(num_int >= 0, n_steps, -1)
# Even though range supports an output dtype, its limited
# (e.g. doesn't support half at the moment).
desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype)
mask = gen_math_ops.equal(axis, range(ndims))
# desired_range_shape is [1. 1. 1. ... 1. num_fill 1. 1. ... 1.], where the
# index of num_fill is equal to axis.
desired_range_shape = array_ops.where_v2(mask, num_fill, 1)
desired_range = array_ops.reshape(desired_range, desired_range_shape)
res = expanded_start + delta * desired_range
# Add the start and endpoints to the result, and slice out the desired
# portion.
all_tensors = (expanded_start, res, expanded_stop)
concatenated = array_ops.concat(all_tensors, axis=axis)
begin = array_ops.zeros_like(shape)
size = array_ops.where_v2(mask, num_int, shape)
return array_ops.slice(concatenated, begin, size)
linspace = linspace_nd
arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment
arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment
tf_export(v1=["arg_max"])(dispatch.add_dispatch_support(arg_max))
tf_export(v1=["arg_min"])(dispatch.add_dispatch_support(arg_min))
# This is set by resource_variable_ops.py. It is included in this way since
# there is a circular dependency between math_ops and resource_variable_ops
_resource_variable_type = None
def _set_doc(doc):
def _decorator(func):
func.__doc__ = doc
return func
return _decorator
# pylint: disable=redefined-builtin
@tf_export(v1=["math.argmax", "argmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_max.__doc__.replace("dimensions",
"axes").replace("dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
dimension)
return argmax_v2(input, axis, output_type, name)
@tf_export("math.argmax", "argmax", v1=[])
@dispatch.add_dispatch_support
def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
"""Returns the index with the largest value across axes of a tensor.
In case of identity returns the smallest index.
For example:
>>> A = tf.constant([2, 20, 30, 3, 6])
>>> tf.math.argmax(A) # A[2] is maximum in tensor A
<tf.Tensor: shape=(), dtype=int64, numpy=2>
>>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
... [14, 45, 23, 5, 27]])
>>> tf.math.argmax(B, 0)
<tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
>>> tf.math.argmax(B, 1)
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>
>>> C = tf.constant([0, 0, 0, 0])
>>> tf.math.argmax(C) # Returns smallest index in case of ties
<tf.Tensor: shape=(), dtype=int64, numpy=0>
Args:
input: A `Tensor`.
axis: An integer, the axis to reduce across. Default to 0.
output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
to `tf.int64`.
name: An optional name for the operation.
Returns:
A `Tensor` of type `output_type`.
"""
if axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
@tf_export(v1=["math.argmin", "argmin"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_min.__doc__.replace("dimensions",
"axes").replace("dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
dimension)
return argmin_v2(input, axis, output_type, name)
@tf_export("math.argmin", "argmin", v1=[])
@dispatch.add_dispatch_support
def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
"""Returns the index with the smallest value across axes of a tensor.
Returns the smallest index in case of ties.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,
`quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,
`uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to
`tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
Usage:
```python
import tensorflow as tf
a = [1, 10, 26.9, 2.8, 166.32, 62.3]
b = tf.math.argmin(input = a)
c = tf.keras.backend.eval(b)
# c = 0
# here a[0] = 1 which is the smallest element of a across axis 0
```
"""
if axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
@tf_export("math.abs", "abs")
@dispatch.add_dispatch_support
def abs(x, name=None): # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.
Given a tensor of integer or floating-point values, this operation returns a
tensor of the same type, where each element contains the absolute value of the
corresponding element in the input.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. For
a complex number \\(a + bj\\), its absolute value is computed as
\\(\sqrt{a^2 + b^2}\\).
For example:
>>> # real number
>>> x = tf.constant([-2.25, 3.25])
>>> tf.abs(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([2.25, 3.25], dtype=float32)>
>>> # complex number
>>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
>>> tf.abs(x)
<tf.Tensor: shape=(2, 1), dtype=float64, numpy=
array([[5.25594901],
[6.60492241]])>
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
with absolute values. Note, for `complex64` or `complex128` input, the
returned `Tensor` will be of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
@tf_export("math.divide", "divide")
@dispatch.add_dispatch_support
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`.
For example:
>>> x = tf.constant([16, 12, 11])
>>> y = tf.constant([4, 6, 2])
>>> tf.divide(x,y)
<tf.Tensor: shape=(3,), dtype=float64,
numpy=array([4. , 2. , 5.5])>
Args:
x: A `Tensor`
y: A `Tensor`
name: A name for the operation (optional).
Returns:
A `Tensor` with same shape as input
"""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
# We do conversion here to make sure at least x is a tensor.
if not tensor_util.is_tf_type(x):
dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None
x = ops.convert_to_tensor(x, dtype=dtype)
return x / y
@tf_export("math.multiply", "multiply")
@dispatch.add_dispatch_support
def multiply(x, y, name=None):
"""Returns an element-wise x * y.
For example:
>>> x = tf.constant(([1, 2, 3, 4]))
>>> tf.math.multiply(x, x)
<tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1, 4, 9, 16], dtype=int32)>
Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also
pass in non-`Tensor` arguments:
>>> tf.math.multiply(7,6)
<tf.Tensor: shape=(), dtype=int32, numpy=42>
If `x.shape` is not the same as `y.shape`, they will be broadcast to a
compatible shape. (More about broadcasting
[here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)
For example:
>>> x = tf.ones([1, 2]);
>>> y = tf.ones([2, 1]);
>>> x * y # Taking advantage of operator overriding
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[1., 1.],
[1., 1.]], dtype=float32)>
The reduction version of this elementwise operation is `tf.math.reduce_prod`
Args:
x: A Tensor. Must be one of the following types: `bfloat16`,
`half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
`int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
Raises:
* InvalidArgumentError: When `x` and `y` have incompatible shapes or types.
"""
return gen_math_ops.mul(x, y, name)
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops.mul(x, y, name)
_mul.__doc__ = (
gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
@tf_export("math.subtract", "subtract")
@dispatch.add_dispatch_support
def subtract(x, y, name=None):
return gen_math_ops.sub(x, y, name)
subtract.__doc__ = gen_math_ops.sub.__doc__
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops.sub(x, y, name)
_sub.__doc__ = (
gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
negative = gen_math_ops.neg
# pylint: disable=g-docstring-has-escape
@deprecation.deprecated(
"2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export(v1=["math.scalar_mul", "scalar_mul"])
@dispatch.add_dispatch_support
def scalar_mul(scalar, x, name=None):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
This is a special case of `tf.math.multiply`, where the first value must be a
`scalar`. Unlike the general form of `tf.math.multiply`, this is operation is
guaranteed to be efficient for `tf.IndexedSlices`.
>>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])
>>> with tf.GradientTape() as g:
... g.watch(x)
... y = tf.gather(x, [1, 2]) # IndexedSlices
... z = tf.math.scalar_mul(10.0, y)
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
name: A name for the operation (optional).
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(
gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
@tf_export("math.softplus", "nn.softplus", v1=["math.softplus", "nn.softplus"])
@dispatch.add_dispatch_support
def softplus(features, name=None):
"""Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.
`softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
takes on positive values.
<img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">
Example:
>>> import tensorflow as tf
>>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
array([0.6931472, 1.3132616], dtype=float32)
Args:
features: `Tensor`
name: Optional: name to associate with this operation.
Returns:
`Tensor`
"""
return gen_nn_ops.softplus(features, name)
@tf_export("math.scalar_mul", "scalar_mul", v1=[])
@dispatch.add_dispatch_support
@_set_doc(scalar_mul.__doc__)
def scalar_mul_v2(scalar, x, name=None):
with ops.name_scope(name, "scalar_mul", [x]) as name:
return scalar_mul(scalar, x, name)
@tf_export("math.pow", "pow")
@dispatch.add_dispatch_support
def pow(x, y, name=None): # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
@tf_export("dtypes.complex", "complex")
@dispatch.add_dispatch_support
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
Raises:
TypeError: Real and imag must be correct types
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
@tf_export("math.sign", "sign")
@dispatch.add_dispatch_support
def sign(x, name=None):
r"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.
For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.
Example usage:
>>> # real number
>>> tf.math.sign([0., 2., -3.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([ 0., 1., -1.], dtype=float32)>
>>> # complex number
>>> tf.math.sign([1 + 1j, 0 + 0j])
<tf.Tensor: shape=(2,), dtype=complex128,
numpy=array([0.70710678+0.70710678j, 0. +0.j ])>
Args:
x: A Tensor. Must be one of the following types: bfloat16, half, float32,
float64, int32, int64, complex64, complex128.
name: A name for the operation (optional).
Returns:
A Tensor. Has the same type as x.
If x is a SparseTensor, returns SparseTensor(x.indices,
tf.math.sign(x.values, ...), x.dense_shape).
"""
x = ops.convert_to_tensor(x)
if x.dtype.is_complex:
return gen_math_ops.div_no_nan(
x,
cast(
gen_math_ops.complex_abs(
x,
Tout=dtypes.float32
if x.dtype == dtypes.complex64 else dtypes.float64),
dtype=x.dtype),
name=name)
return gen_math_ops.sign(x, name=name)
@tf_export("math.real", v1=["math.real", "real"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("real")
@dispatch.add_dispatch_support
def real(input, name=None):
r"""Returns the real part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the real part of each element in `input` considered as a complex number.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.real(x) # [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
else:
return input
@tf_export("math.imag", v1=["math.imag", "imag"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("imag")
@dispatch.add_dispatch_support
def imag(input, name=None):
r"""Returns the imaginary part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the imaginary part of each element in `input` considered as a complex
number. If `input` is real, a tensor of all zeros is returned.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.imag(x) # [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)
@tf_export("math.angle", v1=["math.angle", "angle"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("angle")
@dispatch.add_dispatch_support
def angle(input, name=None):
r"""Returns the element-wise argument of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the argument of each element in `input` considered as a complex number.
The elements in `input` are considered to be complex numbers of the form
\\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
If `input` is real then *b* is zero by definition.
The argument returned by this function is of the form \\(atan2(b, a)\\).
If `input` is real, a tensor of all zeros is returned.
For example:
```
input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
tf.math.angle(input).numpy()
# ==> array([2.0131705, 1.056345 ], dtype=float32)
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Angle", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.where(input < 0, np.pi * array_ops.ones_like(input),
array_ops.zeros_like(input))
# pylint: enable=redefined-outer-name,redefined-builtin
@tf_export("math.round", "round")
@dispatch.add_dispatch_support
def round(x, name=None): # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
@tf_export("cast", "dtypes.cast")
@dispatch.add_dispatch_support
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
For example:
>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
Notice `tf.cast` has an alias `tf.dtypes.cast`:
>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.dtypes.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
The operation supports data types (for `x` and `dtype`) of
`uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
In case of casting from complex types (`complex64`, `complex128`) to real
types, only the real part of `x` is returned. In case of casting from real
types to complex types (`complex64`, `complex128`), the imaginary part of the
returned value is set to `0`. The handling of complex types here matches the
behavior of numpy.
Note casting nan and inf values to integral types has undefined behavior.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
`int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
`bfloat16`.
dtype: The destination type. The list of supported dtypes is the same as
`x`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
same type as `dtype`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
if isinstance(x,
(ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
elif isinstance(x, ops.IndexedSlices):
values_cast = cast(x.values, base_type, name=name)
x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
else:
# TODO(josh11b): If x is not already a Tensor, we could return
# ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != base_type:
x = gen_math_ops.cast(x, base_type, name=name)
if x.dtype.is_complex and base_type.is_floating:
logging.warn("Casting complex to real discards imaginary part.")
return x
@tf_export("dtypes.saturate_cast", "saturate_cast")
@dispatch.add_dispatch_support
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(
value,
ops.convert_to_tensor(dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(
value,
ops.convert_to_tensor(dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_float"])
@dispatch.add_dispatch_support
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_double"])
@dispatch.add_dispatch_support
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_int32"])
@dispatch.add_dispatch_support
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_int64"])
@dispatch.add_dispatch_support
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_bfloat16"])
@dispatch.add_dispatch_support
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_complex64"])
@dispatch.add_dispatch_support
def to_complex64(x, name="ToComplex64"):
"""Casts a tensor to type `complex64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex64`.
Raises:
TypeError: If `x` cannot be cast to the `complex64`.
"""
return cast(x, dtypes.complex64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_complex128"])
@dispatch.add_dispatch_support
def to_complex128(x, name="ToComplex128"):
"""Casts a tensor to type `complex128`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex128`.
Raises:
TypeError: If `x` cannot be cast to the `complex128`.
"""
return cast(x, dtypes.complex128, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
def _maybe_get_dtype(x):
"""Returns a numpy type if available from x. Skips if x is numpy.ndarray."""
# Don't put np.ndarray in this list, because np.result_type looks at the
# value (not just dtype) of np.ndarray to decide the result type.
if isinstance(x, numbers.Real):
return x
if isinstance(x, ops.Tensor):
return x.dtype.as_numpy_dtype
if isinstance(x, dtypes.DType):
return x.as_numpy_dtype
if isinstance(x, tensor_shape.TensorShape):
return np.int32
if isinstance(x, (list, tuple)):
raise ValueError("Got sequence {}".format(x))
return x
def maybe_promote_tensors(*tensors, force_same_dtype=True):
"""Promote tensors if numpy style promotion is enabled."""
if not tensors:
return tensors
if not ops._numpy_style_type_promotion:
if not force_same_dtype:
return tensors
promoted_tensors = []
promoted_tensors.append(tensors[0])
dtype = tensors[0].dtype.base_dtype
for tensor in tensors[1:]:
promoted_tensors.append(
ops.convert_to_tensor(tensor, dtype, name="x"))
return promoted_tensors
result_type = np_dtypes._result_type(
*[_maybe_get_dtype(x) for x in nest.flatten(tensors)])
def _promote_or_cast(x):
if isinstance(x, ops.Tensor):
x = cast(x, result_type)
else:
x = ops.convert_to_tensor(x, result_type)
return x
return [_promote_or_cast(x) for x in tensors]
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
try:
# force_same_dtype=False to preserve existing TF behavior
# TODO(b/178860388): Figure out why binary_op_wrapper and
# r_binary_op_wrapper use different force_same_dtype values.
x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
return func(x, y, name=name)
except (TypeError, ValueError) as e:
# Even if dispatching the op failed, the RHS may be a tensor aware
# object that can implement the operator with knowledge of itself
# and the tensor.
# If the RHS is not tensor aware we still want to raise the
# original error from the LHS, because it may be more
# informative.
if hasattr(type(y), "__r%s__" % op_name):
try:
r_op = getattr(y, "__r%s__" % op_name)
out = r_op(x)
if out is NotImplemented:
raise
return out
except (TypeError, ValueError):
raise e
else:
raise
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(
sp_x.indices,
func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
# TODO(b/178860388): Figure out why binary_op_wrapper and
# r_binary_op_wrapper use different force_same_dtype values.
y, x = maybe_promote_tensors(y, x)
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.bfloat16: None,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics.
Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops.real_div(x, y, name=name)
else:
return gen_math_ops.floor_div(x, y, name=name)
@tf_export("math.truediv", "truediv")
@dispatch.add_dispatch_support
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
@deprecation.deprecated(
date=None,
instructions="Deprecated in favor of operator or tf.math.divide.")
@tf_export(v1=["div"])
@dispatch.add_dispatch_support
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
3 division operator semantics.
This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
and `y` are both integers then the result will be an integer. This is in
contrast to Python 3, where division with `/` is always a float while division
with `//` is always an integer.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("div_no_nan")
@dispatch.add_dispatch_support
def div_no_nan(x, y, name=None):
"""Computes a safe divide which returns 0 if `y` (denominator) is zero.
For example:
>>> tf.constant(3.0) / 0.0
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
>>> tf.math.divide_no_nan(3.0, 0.0)
<tf.Tensor: shape=(), dtype=float32, numpy=0.0>
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x divided by y.
"""
with ops.name_scope(name, "div_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
return gen_math_ops.div_no_nan(x, y, name=name)
@tf_export("math.multiply_no_nan")
@dispatch.add_dispatch_support
def multiply_no_nan(x, y, name=None):
"""Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x times y.
"""
with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
return gen_math_ops.mul_no_nan(x, y, name=name)
# TODO(aselle): This should be removed
mod = gen_math_ops.floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("floordiv")
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.compat.v1.div(x,y)` for integers, but uses
`tf.floor(tf.compat.v1.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down.
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops.floor_div(x, y, name=name)
realdiv = gen_math_ops.real_div
truncatediv = gen_math_ops.truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops.floor_div
truncatemod = gen_math_ops.truncate_mod
floormod = gen_math_ops.floor_mod
@tf_export("__operators__.add", v1=[])
@dispatch.add_dispatch_support
def _add_dispatch(x, y, name=None):
"""The operation invoked by the `Tensor.__add__` operator.
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__add__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
x: The left-hand side of the `+` operator.
y: The right-hand side of the `+` operator.
name: an optional name for the operation.
Returns:
The result of the elementwise `+` operation.
"""
if not isinstance(y, ops.Tensor) and not isinstance(
y, sparse_tensor.SparseTensor):
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
if x.dtype == dtypes.string:
return gen_math_ops.add(x, y, name=name)
else:
return gen_math_ops.add_v2(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
if isinstance(y, sparse_tensor.SparseTensor): # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
else:
return multiply(x, y, name=name)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python semantics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_add_dispatch, "add")
_OverrideBinaryOperatorHelper(subtract, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("logical_xor")
def logical_xor(x, y, name="LogicalXor"):
"""Logical XOR function.
x ^ y = (x | y) & ~(x & y)
Requires that `x` and `y` have the same shape or have
[broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
shapes. For example, `x` and `y` can be:
- Two single elements of type `bool`
- One `tf.Tensor` of type `bool` and one single `bool`, where the result will
be calculated by applying logical XOR with the single element to each
element in the larger Tensor.
- Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
the result will be the element-wise logical XOR of the two input tensors.
Usage:
>>> a = tf.constant([True])
>>> b = tf.constant([False])
>>> tf.math.logical_xor(a, b)
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
>>> c = tf.constant([True])
>>> x = tf.constant([False, True, True, False])
>>> tf.math.logical_xor(c, x)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False, True])>
>>> y = tf.constant([False, False, True, True])
>>> z = tf.constant([False, True, False, True])
>>> tf.math.logical_xor(y, z)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
Args:
x: A `tf.Tensor` type bool.
y: A `tf.Tensor` of type bool.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
"""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
def and_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_and(x, y, name)
return gen_bitwise_ops.bitwise_and(x, y)
def or_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_or(x, y, name)
return gen_bitwise_ops.bitwise_or(x, y)
def xor_(x, y, name=None):
if x.dtype == dtypes.bool:
return logical_xor(x, y, name)
return gen_bitwise_ops.bitwise_xor(x, y)
def invert_(x, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_not(x, name=name)
return gen_bitwise_ops.invert(x, name=name)
_OverrideBinaryOperatorHelper(and_, "and")
_OverrideBinaryOperatorHelper(or_, "or")
_OverrideBinaryOperatorHelper(xor_, "xor")
ops.Tensor._override_operator("__invert__", invert_)
def _promote_dtypes_decorator(fn):
def wrapper(x, y, *args, **kwargs):
x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
return fn(x, y, *args, **kwargs)
return tf_decorator.make_decorator(fn, wrapper)
ops.Tensor._override_operator("__lt__", _promote_dtypes_decorator(
gen_math_ops.less))
ops.Tensor._override_operator("__le__", _promote_dtypes_decorator(
gen_math_ops.less_equal))
ops.Tensor._override_operator("__gt__", _promote_dtypes_decorator(
gen_math_ops.greater))
ops.Tensor._override_operator("__ge__", _promote_dtypes_decorator(
gen_math_ops.greater_equal))
@tf_export("math.equal", "equal")
@dispatch.add_dispatch_support
def equal(x, y, name=None):
"""Returns the truth value of (x == y) element-wise.
Performs a [broadcast](
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
arguments and then an element-wise equality comparison, returning a Tensor of
boolean values.
For example:
>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
Args:
x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
Raises:
`tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
"""
return gen_math_ops.equal(x, y, name=name)
@tf_export("math.not_equal", "not_equal")
@dispatch.add_dispatch_support
def not_equal(x, y, name=None):
"""Returns the truth value of (x != y) element-wise.
Performs a [broadcast](
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
arguments and then an element-wise inequality comparison, returning a Tensor
of boolean values.
For example:
>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])>
>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
Args:
x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
Raises:
`tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
"""
return gen_math_ops.not_equal(x, y, name=name)
@tf_export("__operators__.eq", v1=[])
@dispatch.add_dispatch_support
def tensor_equals(self, other):
"""The operation invoked by the `Tensor.__eq__` operator.
Compares two tensors element-wise for equality if they are
broadcast-compatible; or returns False if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__eq__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `==` operator.
other: The right-hand side of the `==` operator.
Returns:
The result of the elementwise `==` operation, or `False` if the arguments
are not broadcast-compatible.
"""
if other is None:
return False
g = getattr(self, "graph", None)
if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
(g is None or g.building_function)):
self, other = maybe_promote_tensors(self, other)
return gen_math_ops.equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is other
@tf_export("__operators__.ne", v1=[])
@dispatch.add_dispatch_support
def tensor_not_equals(self, other):
"""The operation invoked by the `Tensor.__ne__` operator.
Compares two tensors element-wise for inequality if they are
broadcast-compatible; or returns True if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.not_equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__ne__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `!=` operator.
other: The right-hand side of the `!=` operator.
Returns:
The result of the elementwise `!=` operation, or `True` if the arguments
are not broadcast-compatible.
"""
if other is None:
return True
if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
self, other = maybe_promote_tensors(self, other)
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is not other
ops.Tensor._override_operator("__eq__", tensor_equals)
ops.Tensor._override_operator("__ne__", tensor_not_equals)
@tf_export("range")
@dispatch.add_dispatch_support
def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
>>> start = 3
>>> limit = 18
>>> delta = 3
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([ 3, 6, 9, 12, 15], dtype=int32)>
>>> start = 3
>>> limit = 1
>>> delta = -0.5
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>
>>> limit = 5
>>> tf.range(limit)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([0, 1, 2, 3, 4], dtype=int32)>
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
is not None; otherwise, acts as range limit and first entry defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
defaults to the value of `start` while the first entry of the range
defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
if not isinstance(start, ops.Tensor):
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
if not isinstance(limit, ops.Tensor):
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
if not isinstance(delta, ops.Tensor):
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
# Always try to perform a cast even when start/limit/delta are already
# tensors. This will resolve the case where start/limit/delta's original's
# dtype is different from provided dtype.
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
def _range_tensor_conversion_function(value, dtype=None, name=None,
as_ref=False):
del as_ref
return range(value.start, value.stop, value.step, dtype=dtype, name=name)
if not six.PY2:
ops.register_tensor_conversion_function(builtins.range,
_range_tensor_conversion_function)
# Reduction operations
def _ReductionDims(x, axis): # pylint: disable=invalid-name
"""Returns range(0, rank(x)) if axis is None."""
if axis is not None:
return axis
else:
x_rank = None
if isinstance(x, ops.Tensor):
x_rank = x.shape.rank
elif (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.shape.is_fully_defined()):
x_rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D.
# Fast path: avoid creating Rank and Range ops if ndims is known.
if x_rank:
return constant_op.constant(np.arange(x_rank, dtype=np.int32))
else:
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def _has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
def _may_reduce_to_scalar(keepdims, axis, output):
"""Set a reduction's output shape to be a scalar if we are certain."""
if not _has_fully_defined_shape(output) and (not keepdims) and (
axis is None):
output.set_shape(())
return output
@tf_export(v1=["math.reduce_sum", "reduce_sum"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_sum_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_sum(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_sum", "reduce_sum", v1=[])
@dispatch.add_dispatch_support
def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor)]`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))
def reduce_sum_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._sum(input_tensor, dims, keepdims, name=name))
@tf_export("math.reduce_euclidean_norm")
@dispatch.add_dispatch_support
def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the Euclidean norm of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
tf.math.reduce_euclidean_norm(x) # returns 4 as dtype is tf.int32
y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
tf.math.reduce_euclidean_norm(y) # returns 4.1231055 which is sqrt(17)
tf.math.reduce_euclidean_norm(y, 0) # [sqrt(2), sqrt(5), sqrt(10)]
tf.math.reduce_euclidean_norm(y, 1) # [sqrt(14), sqrt(3)]
tf.math.reduce_euclidean_norm(y, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]]
tf.math.reduce_euclidean_norm(y, [0, 1]) # sqrt(17)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
"""
keepdims = bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.euclidean_norm(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.count_nonzero", "count_nonzero"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
@deprecation.deprecated_args(
None, "reduction_indices is deprecated, use axis instead",
"reduction_indices")
def count_nonzero(input_tensor=None,
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None,
reduction_indices=None,
keep_dims=None,
input=None): # pylint: disable=redefined-builtin
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x) # 3
tf.math.count_nonzero(x, 0) # [1, 2, 0]
tf.math.count_nonzero(x, 1) # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.math.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
`string`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
input: Overrides input_tensor. For compatibility.
Returns:
The reduced tensor (number of nonzero values).
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
input_tensor = deprecation.deprecated_argument_lookup("input", input,
"input_tensor",
input_tensor)
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
@tf_export("math.count_nonzero", v1=[])
@dispatch.add_dispatch_support
def count_nonzero_v2(
input, # pylint: disable=redefined-builtin
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x) # 3
tf.math.count_nonzero(x, 0) # [1, 2, 0]
tf.math.count_nonzero(x, 1) # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.math.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input), rank(input))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
if keepdims is None:
keepdims = False
with ops.name_scope(name, "count_nonzero", [input]):
input = ops.convert_to_tensor(input, name="input")
# A scalar of 'zero' is enough as `not_equal` will broadcast.
zero = array_ops.zeros([], dtype=input.dtype)
return cast(
reduce_sum(
# int64 reduction happens on GPU
cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
axis=axis,
keepdims=keepdims),
dtype=dtype)
@tf_export(v1=["math.reduce_mean", "reduce_mean"])
@dispatch.add_dispatch_support
def reduce_mean_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis` by computing the
mean of elements across the dimensions in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a tensor with a single
element is returned.
For example:
>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_mean(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_mean", "reduce_mean", v1=[])
@dispatch.add_dispatch_support
def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis` by computing the
mean of elements across the dimensions in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a tensor with a single
element is returned.
For example:
>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.mean(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export("math.reduce_variance")
@dispatch.add_dispatch_support
def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the variance of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_variance(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.25>
>>> tf.math.reduce_variance(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)>
>>> tf.math.reduce_variance(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)>
Args:
input_tensor: The tensor to reduce. Should have real or complex type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor. Note, for
`complex64` or `complex128` input, the returned `Tensor` will be of type
`float32` or `float64`, respectively.
@compatibility(numpy)
Equivalent to np.var
Please note `np.var` has a `dtype` parameter that could be used to specify the
output type. By default this is `dtype=float64`. On the other hand,
`tf.math.reduce_variance` has aggressive type inference from `input_tensor`.
@end_compatibility
"""
name = name if name else "reduce_variance"
with ops.name_scope(name):
means = reduce_mean(input_tensor, axis=axis, keepdims=True)
if means.dtype.is_integer:
raise TypeError("Input must be either real or complex")
diff = input_tensor - means
if diff.dtype.is_complex:
# For complex values we need to take the absolute value before squaring.
# This is achieved by multiplying with the conjugate.
real_dtype = diff.dtype.real_dtype
squared_deviations = gen_math_ops.real(
gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype)
else:
squared_deviations = gen_math_ops.square(diff)
return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
@tf_export("math.reduce_std")
@dispatch.add_dispatch_support
def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the standard deviation of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_std(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.118034>
>>> tf.math.reduce_std(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>
>>> tf.math.reduce_std(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have real or complex type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor. Note, for
`complex64` or `complex128` input, the returned `Tensor` will be of type
`float32` or `float64`, respectively.
@compatibility(numpy)
Equivalent to np.std
Please note `np.std` has a `dtype` parameter that could be used to specify the
output type. By default this is `dtype=float64`. On the other hand,
`tf.math.reduce_std` has aggressive type inference from `input_tensor`.
@end_compatibility
"""
name = name if name else "reduce_std"
with ops.name_scope(name):
variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
return gen_math_ops.sqrt(variance)
@tf_export("math.reduce_prod", "reduce_prod", v1=[])
@dispatch.add_dispatch_support
def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.prod(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_prod", "reduce_prod"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_prod_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_prod(input_tensor, axis, keepdims, name)
@tf_export(v1=["math.reduce_min", "reduce_min"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_min_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-5>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=-inf>
See the numpy docs for `np.amin` and `np.nanmin` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_min(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_min", "reduce_min", v1=[])
@dispatch.add_dispatch_support
def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> a = tf.constant([
... [[1, 2], [3, 4]],
... [[1, 2], [3, 4]]
... ])
>>> tf.reduce_min(a)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
Choosing a specific axis returns minimum element in the given axis:
>>> b = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tf.reduce_min(b, axis=0)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)>
>>> tf.reduce_min(b, axis=1)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 4], dtype=int32)>
Setting `keepdims` to `True` retains the dimension of `input_tensor`:
>>> tf.reduce_min(a, keepdims=True)
<tf.Tensor: shape=(1, 1, 1), dtype=int32, numpy=array([[[1]]], dtype=int32)>
>>> tf.math.reduce_min(a, axis=0, keepdims=True)
<tf.Tensor: shape=(1, 2, 2), dtype=int32, numpy=
array([[[1, 2],
[3, 4]]], dtype=int32)>
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._min(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_max", "reduce_max"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_max_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_max(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_max", "reduce_max", v1=[])
@dispatch.add_dispatch_support
def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return reduce_max_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))
def reduce_max_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._max(input_tensor, dims, keepdims, name=name))
@tf_export(v1=["math.reduce_all", "reduce_all"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_all_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_all(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_all", "reduce_all", v1=[])
@dispatch.add_dispatch_support
def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._all(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_any", "reduce_any"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_any_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_any(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_any", "reduce_any", v1=[])
@dispatch.add_dispatch_support
def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._any(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_logsumexp_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_logsumexp(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
@dispatch.add_dispatch_support
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
keepdims = False if keepdims is None else keepdims
input_tensor = ops.convert_to_tensor(input_tensor)
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
reduce_dim = _ReductionDims(input_tensor, axis)
raw_max = reduce_max_with_dims(
input_tensor, axis=axis, keepdims=True, dims=reduce_dim)
my_max = array_ops.stop_gradient(
gen_math_ops.select(
gen_math_ops.is_finite(raw_max), raw_max,
gen_array_ops.zeros_like(raw_max)))
result = gen_math_ops.log(
reduce_sum_with_dims(
gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
axis=axis,
keepdims=keepdims,
dims=reduce_dim))
if not keepdims:
my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
result = _add_dispatch(result, my_max, name=name)
return _may_reduce_to_scalar(keepdims, axis, result)
@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("trace")
@dispatch.add_dispatch_support
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])`
For example:
```python
x = tf.constant([[1, 2], [3, 4]])
tf.linalg.trace(x) # 5
x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
tf.linalg.trace(x) # 15
x = tf.constant([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
tf.linalg.trace(x) # [15, -15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
@tf_export("linalg.matmul", "matmul")
@dispatch.add_dispatch_support
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
output_type=None,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication dimensions,
and any further outer dimensions specify matching batch size.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
A simple 2-D tensor matrix multiplication:
>>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
>>> a # 2-D tensor
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
>>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
>>> b # 2-D tensor
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[ 7, 8],
[ 9, 10],
[11, 12]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c # `a` * `b`
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 58, 64],
[139, 154]], dtype=int32)>
A batch matrix multiplication with batch shape [2]:
>>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
>>> a # 3-D tensor
<tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
array([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]], dtype=int32)>
>>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
>>> b # 3-D tensor
<tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
array([[[13, 14],
[15, 16],
[17, 18]],
[[19, 20],
[21, 22],
[23, 24]]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c # `a` * `b`
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 94, 100],
[229, 244]],
[[508, 532],
[697, 730]]], dtype=int32)>
Since python >= 3.5 the @ operator is supported
(see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
it simply calls the `tf.matmul()` function, so the following lines are
equivalent:
>>> d = a @ b @ [[10], [11]]
>>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])
Args:
a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
`complex64`, `complex128` and rank > 1.
b: `tf.Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this
**does not support `tf.sparse.SparseTensor`**, it just makes optimizations
that assume most values in `a` are zero.
See `tf.sparse.sparse_dense_matmul`
for some support for `tf.sparse.SparseTensor` multiplication.
b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this
**does not support `tf.sparse.SparseTensor`**, it just makes optimizations
that assume most values in `a` are zero.
See `tf.sparse.sparse_dense_matmul`
for some support for `tf.sparse.SparseTensor` multiplication.
output_type: The output datatype if needed. Defaults to None in which case
the output_type is the same as input type. Currently only works when input
tensors are type int8 and output_type can be int32.
name: Name for the operation (optional).
Returns:
A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
is the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
for all indices `i`, `j`.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
`adjoint_b` are both set to `True`.
TypeError: If output_type is specified but the types of `a`, `b` and
`output_type` is not int8, int8 and int32.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
if context.executing_eagerly():
if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
a = ops.convert_to_tensor(a, name="a")
if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
else:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
# TODO(apassos) remove _shape_tuple here when it is not needed.
a_shape = a._shape_tuple() # pylint: disable=protected-access
b_shape = b._shape_tuple() # pylint: disable=protected-access
output_may_have_non_empty_batch_shape = (
(a_shape is None or len(a_shape) > 2) or
(b_shape is None or len(b_shape) > 2))
# TODO(b/178749687): remove this boolean and all related branches once the
# bridges are ready.
# batch_matmul_v3 is for when input type is different from output type.
use_batch_matmul_v3 = False
if output_type and (output_type != a.dtype or output_type != b.dtype):
use_batch_matmul_v3 = True
if (not a_is_sparse and
not b_is_sparse) and output_may_have_non_empty_batch_shape:
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
if use_batch_matmul_v3:
return gen_math_ops.batch_mat_mul_v3(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
else:
return gen_math_ops.batch_mat_mul_v2(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
use_sparse_matmul = False
if a_is_sparse or b_is_sparse:
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (
a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
if (((a.dtype == dtypes.bfloat16 and b.dtype != dtypes.int8) or
(b.dtype == dtypes.bfloat16 and a.dtype != dtypes.int8)) and
a.dtype != b.dtype):
# matmul currently doesn't handle mixed-precision inputs other than
# fp16 * int8 which is supported in BatchMatMulV3.
use_sparse_matmul = True
if use_sparse_matmul:
ret = sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
# sparse_matmul always returns float32, even with
# bfloat16 inputs. This prevents us from configuring bfloat16 training.
# casting to bfloat16 also matches non-sparse matmul behavior better.
if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
ret = cast(ret, dtypes.bfloat16)
return ret
else:
if use_batch_matmul_v3:
adjoint_a = adjoint_a or transpose_a
adjoint_b = adjoint_b or transpose_b
return gen_math_ops.batch_mat_mul_v3(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
else:
return gen_math_ops.mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
@tf_export("linalg.matvec")
@dispatch.add_dispatch_support
def matvec(a,
b,
transpose_a=False,
adjoint_a=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by vector `b`, producing `a` * `b`.
The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
with `shape(b)[:-1]`.
Both `a` and `b` must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Matrix `a` can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the inputs contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices/vectors (rank-2/1
tensors) with datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 1-D tensor `b`
# [7, 9, 11]
b = tf.constant([7, 9, 11], shape=[3])
# `a` * `b`
# [ 58, 64]
c = tf.linalg.matvec(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 2-D tensor `b`
# [[13, 14, 15],
# [16, 17, 18]]
b = tf.constant(np.arange(13, 19, dtype=np.int32),
shape=[2, 3])
# `a` * `b`
# [[ 86, 212],
# [410, 563]]
c = tf.linalg.matvec(a, b)
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type as `a` and compatible dimensions.
transpose_a: If `True`, `a` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most vector is
the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
all transpose or adjoint attributes are `False`:
`output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
Note: This is matrix-vector product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a are both set to True.
"""
with ops.name_scope(name, "MatVec", [a, b]) as name:
output = matmul(
a,
array_ops.expand_dims(b, axis=-1),
transpose_a=transpose_a,
adjoint_a=adjoint_a,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse)
return array_ops.squeeze(output, axis=-1)
# TODO(b/178650720): Also support numpy-style type promotion in freestanding TF
# functions (e.g. tf.add).
def matmul_wrapper(a, b, name=None): # pylint: disable=missing-function-docstring
if ops._numpy_style_type_promotion:
return a._matmul(b)
return matmul(a, b, name=name)
matmul_wrapper.__doc__ = matmul.__doc__
_OverrideBinaryOperatorHelper(matmul_wrapper, "matmul")
sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
gen_math_ops.sparse_mat_mul)
tf_export(v1=["sparse_matmul"])(sparse_matmul)
@dispatch.add_dispatch_support
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("BatchMatMul", "flops")
@ops.RegisterStatistics("BatchMatMulV2", "flops")
@ops.RegisterStatistics("BatchMatMulV3", "flops")
def _calc_batch_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for BatchMatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[-2])
else:
k = int(a_shape[-1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
@tf_export("math.add_n", "add_n")
@dispatch.add_dispatch_support
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
`tf.math.add_n` performs the same operation as `tf.math.accumulate_n`, but it
waits for all of its inputs to be ready before beginning to sum.
This buffering can result in higher memory consumption when inputs are ready
at different times, since the minimum temporary storage required is
proportional to the input size rather than the output size.
This op does not [broadcast](
https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
instead.
For example:
>>> a = tf.constant([[3, 5], [4, 8]])
>>> b = tf.constant([[1, 6], [2, 9]])
>>> tf.math.add_n([a, b, a])
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 7, 16],
[10, 25]], dtype=int32)>
Args:
inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
same shape and type. `tf.IndexedSlices` objects will be converted into
dense tensors prior to adding.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of the same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, collections_abc.Iterable):
raise ValueError("inputs must be an iterable of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):
raise ValueError("inputs must be an iterable of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
if len(inputs) == 1:
if isinstance(inputs[0], ops.IndexedSlices):
values = ops.convert_to_tensor(inputs[0])
else:
values = inputs[0]
if name:
return array_ops.identity(values, name=name)
return values
return gen_math_ops.add_n(inputs, name=name)
@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("accumulate_n")
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
`accumulate_n` performs the same operation as `tf.math.add_n`.
For example:
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7, 4],
# [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Expected shape of elements of `inputs` (optional). Also controls the
output shape of this op, which may affect type inference in other ops. A
value of `None` means "infer the input shape from the shapes in `inputs`".
tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
means "infer the input dtype from `inputs[0]`".
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
def _input_error():
return ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not inputs or not isinstance(inputs, (list, tuple)):
raise _input_error()
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise _input_error()
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _input_error()
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}".format(
tensor_dtype, inputs[0].dtype))
if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)
return add_n(inputs, name=name)
@ops.RegisterGradient("AccumulateNV2")
def _accumulate_n_grad(op, grad):
"""Same as gradient for AddN. Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
@dispatch.add_dispatch_support
def sigmoid(x, name=None):
r"""Computes sigmoid of `x` element-wise.
Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$.
For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$.
Example Usage:
If a positive number is large, then its sigmoid will approach to 1 since the
formula will be `y = <large_num> / (1 + <large_num>)`
>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([0.5 , 0.7310586, 1. , 1. ], dtype=float32)>
If a negative number is large, its sigmoid will approach to 0 since the
formula will be `y = 1 / (1 + <large_num>)`
>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
dtype=float32)>
Args:
x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
`complex128`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
Usage Example:
>>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
>>> tf.sigmoid(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0. , 0.5, 1. ], dtype=float32)>
@compatibility(scipy)
Equivalent to scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.sigmoid(x, name=name)
@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("log_sigmoid")
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
Usage Example:
If a positive number is large, then its log_sigmoid will approach to 0 since
the formula will be `y = log( <large_num> / (1 + <large_num>) )` which
approximates to `log (1)` which is 0.
>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.log_sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00],
dtype=float32)>
If a negative number is large, its log_sigmoid will approach to the number
itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is
`log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>`
that is the number itself.
>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.log_sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([-100. , -50. , -1.3132616, -0.6931472],
dtype=float32)>
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)
@tf_export("math.cumsum", "cumsum")
@dispatch.add_dispatch_support
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
For example:
>>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 2, 6, 12, 20], dtype=int32)>
>>> # using varying `axis` values
>>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
>>> tf.cumsum(y, axis=0)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2, 4, 6, 8],
[ 3, 7, 11, 15]], dtype=int32)>
>>> tf.cumsum(y, axis=1)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2, 6, 12, 20],
[ 1, 4, 9, 16]], dtype=int32)>
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
>>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 0, 2, 6, 12], dtype=int32)>
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
>>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([20, 18, 14, 8], dtype=int32)>
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
>>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([18, 14, 8, 0], dtype=int32)>
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("cumprod")
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
@dispatch.add_dispatch_support
def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative log-sum-exp of the tensor `x` along `axis`.
By default, this op performs an inclusive cumulative log-sum-exp, which means
that the first element of the input is identical to the first element of
the output.
This operation is significantly more numerically stable than the equivalent
tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
computes the same result given infinite numerical precision. However, note
that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
for a given element, as it applies the "log-sum-exp trick" in a different
way.
More precisely, where `tf.math.reduce_logsumexp` uses the following trick:
```
log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
```
it cannot be directly used here as there is no fast way of applying it
to each prefix `x[:i]`. Instead, this function implements a prefix
scan using pairwise log-add-exp, which is a commutative and associative
(up to floating point precision) operator:
```
log_add_exp(x, y) = log(exp(x) + exp(y))
= log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
```
However, reducing using the above operator leads to a different computation
tree (logs are taken repeatedly instead of only at the end), and the maximum
is only computed pairwise instead of over the entire prefix. In general, this
leads to a different and slightly less precise computation.
Args:
x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
`float64`.
axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
range `[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumulative log-sum-exp.
reverse: If `True`, performs the cumulative log-sum-exp in the reverse
direction.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same shape and type as `x`.
"""
with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumulative_logsumexp(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.conj", v1=["math.conj", "conj"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("conj")
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `x` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `x`. The
complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the
real part and `b` is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
>>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
>>> tf.math.conj(x)
<tf.Tensor: shape=(2,), dtype=complex128,
numpy=array([-2.25-4.75j, 3.25-5.75j])>
If `x` is real, it is returned unchanged.
For example:
>>> x = tf.constant([-2.25, 3.25])
>>> tf.math.conj(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([-2.25, 3.25], dtype=float32)>
Args:
x: `Tensor` to conjugate. Must have numeric or variant type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
@compatibility(numpy)
Equivalent to numpy.conj.
@end_compatibility
"""
if isinstance(x, ops.Tensor):
dt = x.dtype
if dt.is_floating or dt.is_integer:
return x
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex or x.dtype == dtypes.variant:
return gen_math_ops.conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric or variant tensor, got dtype %r" %
x.dtype)
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keepdims were set to True.
"""
# TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to
# `input_shape` rather than `tf.shape` of it. Then we can check if the shape
# is fully defined here, which may be faster executing eagerly than running
# `tf.shape` and then fetching its constant value.
constant_input_shape = tensor_util.constant_value(input_shape)
if constant_input_shape is not None:
constant_axes = tensor_util.constant_value(axes)
if constant_axes is not None:
constant_axes = np.array(constant_axes, dtype=np.int32)
constant_input_shape = np.array(constant_input_shape, dtype=np.int32)
constant_input_shape[constant_axes] = 1
return constant_input_shape
# Example:
# cast needed for SparseTensor reductions
input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7]
axes = cast(axes, dtypes.int32) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.ones(axes_shape, dtype=dtypes.int32)
]) # [1, 1]
def _unsorted_segment_N(data, segment_ids, num_segments):
""" Helper function for unsorted_segment_mean/_sqrtN.
Computes the number
of segment entries with 0-entries set to 1 to allow division by N.
"""
num_segments = ops.convert_to_tensor(num_segments)
# bincount doesn't support negative indices so we use unsorted_segment_sum
segment_ids_shape = array_ops.shape_internal(segment_ids)
ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
# add dimensions for all non-reduced axes
broadcastable_shape = array_ops.concat(
[num_segments[array_ops.newaxis],
array_ops.ones([array_ops.rank(data)
- array_ops.rank(segment_ids)],
dtype=num_segments.dtype)],
axis=0)
n = array_ops.reshape(n, broadcastable_shape)
return gen_math_ops.maximum(n, 1)
@tf_export(
"math.unsorted_segment_mean",
v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("unsorted_segment_mean")
@dispatch.add_dispatch_support
def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
r"""Computes the mean along segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
This operator is similar to the `tf.math.unsorted_segment_sum` operator.
Instead of computing the sum over segments, it computes the mean of all
entries belonging to a segment such that:
\\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
`j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentMean"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / N
@tf_export(
"math.unsorted_segment_sqrt_n",
v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
@dispatch.add_dispatch_support
def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
This operator is similar to the `tf.math.unsorted_segment_sum` operator.
Additionally to computing the sum over segments, it divides the results by
sqrt(N).
\\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
number of occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
Note that this op only supports floating point and complex dtypes,
due to tf.sqrt only supporting these types.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentSqrtN"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / gen_math_ops.sqrt(N)
@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
@deprecation.deprecated_endpoints("sparse_segment_sum")
def sparse_segment_sum(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
first dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sum_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sum(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sum", v1=[])
def sparse_segment_sum_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
first dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sum(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
@deprecation.deprecated_endpoints("sparse_segment_mean")
def sparse_segment_mean(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
`data`'s first dimension, selecting a subset of dimension 0, specified by
`indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_mean_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_mean(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_mean", v1=[])
def sparse_segment_mean_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
`data`'s first dimension, selecting a subset of dimension 0, specified by
`indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_mean(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
def sparse_segment_sqrt_n(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
`N` is the size of the segment being reduced.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sqrt_n(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sqrt_n", v1=[])
def sparse_segment_sqrt_n_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
segment, `N`, divide by `sqrt(N)` instead.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sqrt_n(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export("tensordot", "linalg.tensordot")
@dispatch.add_dispatch_support
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes and outer product.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `a_axes` and `b_axes`.
The lists `a_axes` and `b_axes` specify those pairs of axes along which to
contract the tensors. The axis `a_axes[i]` of `a` must have the same dimension
as axis `b_axes[i]` of `b` for all `i` in `range(0, len(a_axes))`. The lists
`a_axes` and `b_axes` must have identical length and consist of unique
integers that specify valid axes for each of the tensors. Additionally
outer product is supported by passing `axes=0`.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes = 1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
the outer product, a tensor of order 4.
Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes of
b in order. If axes is a list or `Tensor` the first and second row contain
the set of unique integers specifying axes along which the contraction is
computed, for `a` and `b`, respectively. The number of axes for `a` and
`b` must be equal. If `axes=0`, computes the outer product between `a` and
`b`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
if (perm != np.arange(len(shape_a))).any():
a_trans = array_ops.transpose(a, perm)
else:
a_trans = a
if a_trans.get_shape().as_list() != new_shape:
reshaped_a = array_ops.reshape(a_trans, new_shape)
else:
reshaped_a = a_trans
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
axes_dims = [shape_a[i] for i in axes]
free_dims = [shape_a[i] for i in free]
free_dims_static = free_dims
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
shape_a = array_ops.shape(a)
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 0:
raise ValueError("'axes' must be at least 0.")
if a_shape.ndims is not None:
if axes > a_shape.ndims:
raise ValueError("'axes' must not be larger than the number of "
"dimensions of tensor %s." % a)
return (list(xrange(a_shape.ndims - axes,
a_shape.ndims)), list(xrange(axes)))
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank,
dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if isinstance(a_axes, compat.integral_types) and \
isinstance(b_axes, compat.integral_types):
a_axes = [a_axes]
b_axes = [b_axes]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s." %
(len(a_axes), len(b_axes)))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
b, b_axes, True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
if (ab_matmul.get_shape().is_fully_defined() and
ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
return ab_matmul
else:
return array_ops.reshape(
ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
@tf_export("math.polyval")
@dispatch.add_dispatch_support
def polyval(coeffs, x, name=None):
r"""Computes the elementwise value of a polynomial.
If `x` is a tensor and `coeffs` is a list n + 1 tensors,
this function returns the value of the n-th order polynomial
`p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)`
evaluated using Horner's method, i.e.
```python
p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0]))
```
Usage Example:
>>> coefficients = [1.0, 2.5, -4.2]
>>> x = 5.0
>>> y = tf.math.polyval(coefficients, x)
>>> y
<tf.Tensor: shape=(), dtype=float32, numpy=33.3>
Usage Example:
>>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
<tf.Tensor: shape=(), dtype=int32, numpy=21>
`tf.math.polyval` can also be used in polynomial regression. Taking
advantage of this function can facilitate writing a polynomial equation
as compared to explicitly writing it out, especially for higher degree
polynomials.
>>> x = tf.constant(3)
>>> theta1 = tf.Variable(2)
>>> theta2 = tf.Variable(1)
>>> theta3 = tf.Variable(0)
>>> tf.math.polyval([theta1, theta2, theta3], x)
<tf.Tensor: shape=(), dtype=int32, numpy=21>
Args:
coeffs: A list of `Tensor` representing the coefficients of the polynomial.
x: A `Tensor` representing the variable of the polynomial.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as the expression p(x) with usual broadcasting
rules for element-wise addition and multiplication applied.
@compatibility(numpy)
Equivalent to numpy.polyval.
@end_compatibility
"""
if not isinstance(coeffs, list):
raise ValueError("Argument coeffs must be list type "
"found {}.".format(type(coeffs)))
with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if len(coeffs) < 1:
return array_ops.zeros_like(x, name=name)
coeffs = [
ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
for index, coeff in enumerate(coeffs)
]
p = coeffs[0]
for c in coeffs[1:]:
p = c + p * x
return p
@tf_export("math.reciprocal_no_nan")
@dispatch.add_dispatch_support
def reciprocal_no_nan(x, name=None):
"""Performs a safe reciprocal operation, element wise.
If a particular element is zero, the reciprocal for that element is
also set to zero.
For example:
```python
x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
tf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
Raises:
TypeError: x must be of a valid dtype.
"""
with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
x = ops.convert_to_tensor(x, name="x")
one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
return gen_math_ops.div_no_nan(one, x, name=scope)
@tf_export("math.xlog1py")
@dispatch.add_dispatch_support
def xlog1py(x, y, name=None):
r"""Compute x * log1p(y).
Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
zero when `x = 0`, no matter what the value of `y` is.
Example:
>>> tf.math.xlog1py(0., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
>>> tf.math.xlog1py(1., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
>>> tf.math.xlog1py(2., 2.)
<tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
>>> tf.math.xlog1py(0., -1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
Args:
x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
y: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
name: A name for the operation (optional).
Returns:
`x * log1p(y)`.
@compatibility(scipy)
Equivalent to scipy.special.xlog1py
@end_compatibility
"""
with ops.name_scope(name, "xlog1py", [x]):
return gen_math_ops.xlog1py(x, y)
@tf_export("math.erfinv")
@dispatch.add_dispatch_support
def erfinv(x, name=None):
"""Compute inverse error function.
Given `x`, compute the inverse error function of `x`. This function
is the inverse of `tf.math.erf`.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
"""
with ops.name_scope(name, "erfinv", [x]):
return gen_math_ops.erfinv(x)
@tf_export("math.ndtri")
@dispatch.add_dispatch_support
def ndtri(x, name=None):
"""Compute quantile of Standard Normal.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
"""
with ops.name_scope(name, "ndtri", [x]):
return gen_math_ops.ndtri(x)
@tf_export("math.erfcinv")
@dispatch.add_dispatch_support
def erfcinv(x, name=None):
"""Computes the inverse of complementary error function.
Given `x`, compute the inverse complementary error function of `x`.
This function is the inverse of `tf.math.erfc`, and is defined on
`[0, 2]`.
>>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.])
<tf.Tensor: shape=(5,), dtype=float32, numpy=
array([ inf, 0.9061935, -0. , -0.4769363, -inf],
dtype=float32)>
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse complementary error function of `x`.
@compatibility(numpy)
Equivalent to scipy.special.erfcinv
@end_compatibility
"""
with ops.name_scope(name, "erfcinv", [x]):
x = ops.convert_to_tensor(x, name="start")
return -ndtri(0.5 * x) * np.sqrt(0.5)
@tf_export("math.ceil", v1=["math.ceil", "ceil"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("ceil")
@dispatch.add_dispatch_support
def ceil(x, name=None):
"""Return the ceiling of the input, element-wise.
For example:
>>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([-1., -1., -0., 1., 2., 2., 2.], dtype=float32)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`. `int32`
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.ceil
@end_compatibility
"""
return gen_math_ops.ceil(x, name)
@tf_export("math.sqrt", "sqrt")
@dispatch.add_dispatch_support
def sqrt(x, name=None): # pylint: disable=redefined-builtin
r"""Computes element-wise square root of the input tensor.
Note: This operation does not support integer types.
>>> x = tf.constant([[4.0], [16.0]])
>>> tf.sqrt(x)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[2.],
[4.]], dtype=float32)>
>>> y = tf.constant([[-4.0], [16.0]])
>>> tf.sqrt(y)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[nan],
[ 4.]], dtype=float32)>
>>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
>>> tf.sqrt(z)
<tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
array([[0.0+1.j],
[4.0+0.j]])>
Note: In order to support complex type, please provide an input tensor
of `complex64` or `complex128`.
Args:
x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of same size, type and sparsity as `x`.
"""
return gen_math_ops.sqrt(x, name)
# pylint: disable=g-docstring-has-escape
@tf_export("math.exp", "exp")
@dispatch.add_dispatch_support
def exp(x, name=None):
r"""Computes exponential of x element-wise. \\(y = e^x\\).
This function computes the exponential of the input tensor element-wise.
i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
\\(e\\) denotes Euler's number and is approximately equal to 2.718281.
Output is positive for any real input.
>>> x = tf.constant(2.0)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=float32, numpy=7.389056>
>>> x = tf.constant([2.0, 8.0])
>>> tf.math.exp(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([ 7.389056, 2980.958 ], dtype=float32)>
For complex numbers, the exponential value is calculated as
$$
e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)})
$$
For `1+1j` the value would be computed as:
$$
e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j)
$$
>>> x = tf.constant(1 + 1j)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=complex128,
numpy=(1.4686939399158851+2.2873552871788423j)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.exp
@end_compatibility
"""
return gen_math_ops.exp(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export("math.sobol_sample")
@dispatch.add_dispatch_support
def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
"""Generates points from the Sobol sequence.
Creates a Sobol sequence with `num_results` samples. Each sample has dimension
`dim`. Skips the first `skip` samples.
Args:
dim: Positive scalar `Tensor` representing each sample's dimension.
num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
points to return in the output.
skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
initial points of the Sobol sequence to skip. Default value is 0.
dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
`tf.float64`. Defaults to `tf.float32`.
name: (Optional) Python `str` name prefixed to ops created by this function.
Returns:
`Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
"""
with ops.name_scope(name, "sobol", [dim, num_results, skip]):
return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)
@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("rsqrt")
@dispatch.add_dispatch_support
def rsqrt(x, name=None):
"""Computes reciprocal of square root of x element-wise.
For example:
>>> x = tf.constant([2., 0., -2.])
>>> tf.math.rsqrt(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
"""
return gen_math_ops.rsqrt(x, name)
@tf_export("math.acos", "acos")
@dispatch.add_dispatch_support
def acos(x, name=None):
"""Computes acos of x element-wise.
Provided an input tensor, the `tf.math.acos` operation
returns the inverse cosine of each element of the tensor.
If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
For example:
>>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)
>>> tf.math.acos(x)
<tf.Tensor: shape=(6,), dtype=float32,
numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan],
dtype=float32)>
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`,
`complex64`, `complex128`, `string`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as x.
"""
return gen_math_ops.acos(x, name)
@tf_export("math.floor", "floor")
@dispatch.add_dispatch_support
def floor(x, name=None):
"""Returns element-wise largest integer not greater than x.
Both input range is `(-inf, inf)` and the
output range consists of all integer values.
For example:
>>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
>>> tf.floor(x).numpy()
array([ 1., -2., 5., -3., 0., inf], dtype=float32)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as x.
"""
return gen_math_ops.floor(x, name)
Update function doc to reflect that 'a_axes' and 'b_axes' have been replaced by a single 'axes'.
PiperOrigin-RevId: 365940681
Change-Id: Id160dc2c79880e0b59e46ab28d6a0dce833f6a1f
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Math Operations.
Note: Functions taking `Tensor` arguments can also take anything accepted by
`tf.convert_to_tensor`.
Note: Elementwise binary operations in TensorFlow follow [numpy-style
broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
TensorFlow provides a variety of math functions including:
* Basic arithmetic operators and trigonometric functions.
* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
* Segment functions (like: `tf.math.segment_sum`)
See: `tf.linalg` for matrix and tensor functions.
<a id=Segmentation></a>
## About Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
# ==> [[0 0 0 0]
# [5 6 7 8]]
```
The standard `segment_*` functions assert that the segment indices are sorted.
If you have unsorted indices use the equivalent `unsorted_segment_` function.
These functions take an additional argument `num_segments` so that the output
tensor can be efficiently allocated.
``` python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 6, 8, 10, 12],
# [-1, -2, -3, -4]]
```
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numbers
import numpy as np
import six
from six.moves import builtins
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_bitwise_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
np_dtypes = LazyLoader(
"np_dtypes", globals(),
"tensorflow.python.ops.numpy_ops.np_dtypes")
# Aliases for some automatically-generated names.
nextafter = gen_math_ops.next_after
@tf_export("linspace", v1=["lin_space", "linspace"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("lin_space")
def linspace_nd(start, stop, num, name=None, axis=0):
r"""Generates evenly-spaced values in an interval along a given axis.
A sequence of `num` evenly-spaced values are generated beginning at `start`
along a given `axis`.
If `num > 1`, the values in the sequence increase by
`(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
If `num <= 0`, `ValueError` is raised.
Matches
[np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s
behaviour
except when `num == 0`.
For example:
```
tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
```
`Start` and `stop` can be tensors of arbitrary size:
>>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)
<tf.Tensor: shape=(5, 2), dtype=float32, numpy=
array([[ 0. , 5. ],
[ 2.5 , 13.75],
[ 5. , 22.5 ],
[ 7.5 , 31.25],
[10. , 40. ]], dtype=float32)>
`Axis` is where the values will be generated (the dimension in the
returned tensor which corresponds to the axis will be equal to `num`)
>>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)
<tf.Tensor: shape=(2, 5), dtype=float32, numpy=
array([[ 0. , 2.5 , 5. , 7.5 , 10. ],
[ 5. , 13.75, 22.5 , 31.25, 40. ]], dtype=float32)>
Args:
start: A `Tensor`. Must be one of the following types: `bfloat16`,
`float32`, `float64`. N-D tensor. First entry in the range.
stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.
Last entry in the range.
num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D
tensor. Number of values to generate.
name: A name for the operation (optional).
axis: Axis along which the operation is performed (used only when N-D
tensors are provided).
Returns:
A `Tensor`. Has the same type as `start`.
"""
with ops.name_scope(name, "linspace", [start, stop]):
start = ops.convert_to_tensor(start, name="start")
# stop must be convertible to the same dtype as start
stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype)
num_int = array_ops.convert_to_int_tensor(num, name="num")
num = cast(num_int, dtype=start.dtype)
broadcast_shape = array_ops.broadcast_dynamic_shape(
array_ops.shape(start), array_ops.shape(stop))
start = array_ops.broadcast_to(start, broadcast_shape)
stop = array_ops.broadcast_to(stop, broadcast_shape)
expanded_start = array_ops.expand_dims(start, axis=axis)
expanded_stop = array_ops.expand_dims(stop, axis=axis)
shape = array_ops.shape(expanded_start)
ndims = array_ops.shape(shape)[0]
axis = array_ops.where_v2(axis >= 0, axis, ndims + axis)
# The purpose is to avoid having negative values when repeating.
num_fill = gen_math_ops.maximum(num_int - 2, 0)
# To avoid having negative values in the range or zero division
# the result is sliced in the end so a correct result is returned for
# num == 1, and num == 0.
n_steps = gen_math_ops.maximum(num_int - 1, 1)
delta = (expanded_stop - expanded_start) / cast(n_steps,
expanded_stop.dtype)
# Re-cast tensors as delta.
expanded_start = cast(expanded_start, delta.dtype)
expanded_stop = cast(expanded_stop, delta.dtype)
# If num < 0, we will throw exception in the range
# otherwise use the same div for delta
range_end = array_ops.where_v2(num_int >= 0, n_steps, -1)
# Even though range supports an output dtype, its limited
# (e.g. doesn't support half at the moment).
desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype)
mask = gen_math_ops.equal(axis, range(ndims))
# desired_range_shape is [1. 1. 1. ... 1. num_fill 1. 1. ... 1.], where the
# index of num_fill is equal to axis.
desired_range_shape = array_ops.where_v2(mask, num_fill, 1)
desired_range = array_ops.reshape(desired_range, desired_range_shape)
res = expanded_start + delta * desired_range
# Add the start and endpoints to the result, and slice out the desired
# portion.
all_tensors = (expanded_start, res, expanded_stop)
concatenated = array_ops.concat(all_tensors, axis=axis)
begin = array_ops.zeros_like(shape)
size = array_ops.where_v2(mask, num_int, shape)
return array_ops.slice(concatenated, begin, size)
linspace = linspace_nd
arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment
arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment
tf_export(v1=["arg_max"])(dispatch.add_dispatch_support(arg_max))
tf_export(v1=["arg_min"])(dispatch.add_dispatch_support(arg_min))
# This is set by resource_variable_ops.py. It is included in this way since
# there is a circular dependency between math_ops and resource_variable_ops
_resource_variable_type = None
def _set_doc(doc):
def _decorator(func):
func.__doc__ = doc
return func
return _decorator
# pylint: disable=redefined-builtin
@tf_export(v1=["math.argmax", "argmax"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_max.__doc__.replace("dimensions",
"axes").replace("dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
dimension)
return argmax_v2(input, axis, output_type, name)
@tf_export("math.argmax", "argmax", v1=[])
@dispatch.add_dispatch_support
def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
"""Returns the index with the largest value across axes of a tensor.
In case of identity returns the smallest index.
For example:
>>> A = tf.constant([2, 20, 30, 3, 6])
>>> tf.math.argmax(A) # A[2] is maximum in tensor A
<tf.Tensor: shape=(), dtype=int64, numpy=2>
>>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
... [14, 45, 23, 5, 27]])
>>> tf.math.argmax(B, 0)
<tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
>>> tf.math.argmax(B, 1)
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>
>>> C = tf.constant([0, 0, 0, 0])
>>> tf.math.argmax(C) # Returns smallest index in case of ties
<tf.Tensor: shape=(), dtype=int64, numpy=0>
Args:
input: A `Tensor`.
axis: An integer, the axis to reduce across. Default to 0.
output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
to `tf.int64`.
name: An optional name for the operation.
Returns:
A `Tensor` of type `output_type`.
"""
if axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
@tf_export(v1=["math.argmin", "argmin"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None, "Use the `axis` argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_min.__doc__.replace("dimensions",
"axes").replace("dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
dimension)
return argmin_v2(input, axis, output_type, name)
@tf_export("math.argmin", "argmin", v1=[])
@dispatch.add_dispatch_support
def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
"""Returns the index with the smallest value across axes of a tensor.
Returns the smallest index in case of ties.
Args:
input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,
`quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,
`uint64`.
axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
int32 or int64, must be in the range `-rank(input), rank(input))`.
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to
`tf.int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `output_type`.
Usage:
```python
import tensorflow as tf
a = [1, 10, 26.9, 2.8, 166.32, 62.3]
b = tf.math.argmin(input = a)
c = tf.keras.backend.eval(b)
# c = 0
# here a[0] = 1 which is the smallest element of a across axis 0
```
"""
if axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
# pylint: enable=redefined-builtin
# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
@tf_export("math.abs", "abs")
@dispatch.add_dispatch_support
def abs(x, name=None): # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.
Given a tensor of integer or floating-point values, this operation returns a
tensor of the same type, where each element contains the absolute value of the
corresponding element in the input.
Given a tensor `x` of complex numbers, this operation returns a tensor of type
`float32` or `float64` that is the absolute value of each element in `x`. For
a complex number \\(a + bj\\), its absolute value is computed as
\\(\sqrt{a^2 + b^2}\\).
For example:
>>> # real number
>>> x = tf.constant([-2.25, 3.25])
>>> tf.abs(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([2.25, 3.25], dtype=float32)>
>>> # complex number
>>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
>>> tf.abs(x)
<tf.Tensor: shape=(2, 1), dtype=float64, numpy=
array([[5.25594901],
[6.60492241]])>
Args:
x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
`int32`, `int64`, `complex64` or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
with absolute values. Note, for `complex64` or `complex128` input, the
returned `Tensor` will be of type `float32` or `float64`, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)
# pylint: enable=g-docstring-has-escape
# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
# pylint: enable=redefined-builtin
class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""
def __init__(self, x, name):
"""Construct DivideDelegateWithName.
Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name
def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)
def __floordiv__(self, y):
return floordiv(self.x, y, self.name)
def __div__(self, y):
return _div_python2(self.x, y, self.name)
@tf_export("math.divide", "divide")
@dispatch.add_dispatch_support
def divide(x, y, name=None):
"""Computes Python style division of `x` by `y`.
For example:
>>> x = tf.constant([16, 12, 11])
>>> y = tf.constant([4, 6, 2])
>>> tf.divide(x,y)
<tf.Tensor: shape=(3,), dtype=float64,
numpy=array([4. , 2. , 5.5])>
Args:
x: A `Tensor`
y: A `Tensor`
name: A name for the operation (optional).
Returns:
A `Tensor` with same shape as input
"""
if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
# We do conversion here to make sure at least x is a tensor.
if not tensor_util.is_tf_type(x):
dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None
x = ops.convert_to_tensor(x, dtype=dtype)
return x / y
@tf_export("math.multiply", "multiply")
@dispatch.add_dispatch_support
def multiply(x, y, name=None):
"""Returns an element-wise x * y.
For example:
>>> x = tf.constant(([1, 2, 3, 4]))
>>> tf.math.multiply(x, x)
<tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1, 4, 9, 16], dtype=int32)>
Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also
pass in non-`Tensor` arguments:
>>> tf.math.multiply(7,6)
<tf.Tensor: shape=(), dtype=int32, numpy=42>
If `x.shape` is not the same as `y.shape`, they will be broadcast to a
compatible shape. (More about broadcasting
[here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)
For example:
>>> x = tf.ones([1, 2]);
>>> y = tf.ones([2, 1]);
>>> x * y # Taking advantage of operator overriding
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[1., 1.],
[1., 1.]], dtype=float32)>
The reduction version of this elementwise operation is `tf.math.reduce_prod`
Args:
x: A Tensor. Must be one of the following types: `bfloat16`,
`half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
`int16`, `int32`, `int64`, `complex64`, `complex128`.
y: A `Tensor`. Must have the same type as `x`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
Raises:
* InvalidArgumentError: When `x` and `y` have incompatible shapes or types.
"""
return gen_math_ops.mul(x, y, name)
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`")
def _mul(x, y, name=None):
return gen_math_ops.mul(x, y, name)
_mul.__doc__ = (
gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
@tf_export("math.subtract", "subtract")
@dispatch.add_dispatch_support
def subtract(x, y, name=None):
return gen_math_ops.sub(x, y, name)
subtract.__doc__ = gen_math_ops.sub.__doc__
# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
def _sub(x, y, name=None):
return gen_math_ops.sub(x, y, name)
_sub.__doc__ = (
gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
negative = gen_math_ops.neg
# pylint: disable=g-docstring-has-escape
@deprecation.deprecated(
"2016-12-30",
"`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.
I.e., \\(y = -x\\).
Args:
x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
`float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
"""
return negative(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export(v1=["math.scalar_mul", "scalar_mul"])
@dispatch.add_dispatch_support
def scalar_mul(scalar, x, name=None):
"""Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
This is a special case of `tf.math.multiply`, where the first value must be a
`scalar`. Unlike the general form of `tf.math.multiply`, this is operation is
guaranteed to be efficient for `tf.IndexedSlices`.
>>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])
>>> with tf.GradientTape() as g:
... g.watch(x)
... y = tf.gather(x, [1, 2]) # IndexedSlices
... z = tf.math.scalar_mul(10.0, y)
Args:
scalar: A 0-D scalar `Tensor`. Must have known shape.
x: A `Tensor` or `IndexedSlices` to be scaled.
name: A name for the operation (optional).
Returns:
`scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
Raises:
ValueError: if scalar is not a 0-D `scalar`.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(
gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)
@tf_export("math.softplus", "nn.softplus", v1=["math.softplus", "nn.softplus"])
@dispatch.add_dispatch_support
def softplus(features, name=None):
"""Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.
`softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
takes on positive values.
<img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">
Example:
>>> import tensorflow as tf
>>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
array([0.6931472, 1.3132616], dtype=float32)
Args:
features: `Tensor`
name: Optional: name to associate with this operation.
Returns:
`Tensor`
"""
return gen_nn_ops.softplus(features, name)
@tf_export("math.scalar_mul", "scalar_mul", v1=[])
@dispatch.add_dispatch_support
@_set_doc(scalar_mul.__doc__)
def scalar_mul_v2(scalar, x, name=None):
with ops.name_scope(name, "scalar_mul", [x]) as name:
return scalar_mul(scalar, x, name)
@tf_export("math.pow", "pow")
@dispatch.add_dispatch_support
def pow(x, y, name=None): # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
corresponding elements in `x` and `y`. For example:
```python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y) # [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
`complex64`, or `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)
# pylint: disable=redefined-builtin,redefined-outer-name
@tf_export("dtypes.complex", "complex")
@dispatch.add_dispatch_support
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \\(a + bj\\), where
*a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must have the same shape.
For example:
```python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
imag: A `Tensor`. Must have the same type as `real`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64` or `complex128`.
Raises:
TypeError: Real and imag must be correct types
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
@tf_export("math.sign", "sign")
@dispatch.add_dispatch_support
def sign(x, name=None):
r"""Returns an element-wise indication of the sign of a number.
`y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.
For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.
Example usage:
>>> # real number
>>> tf.math.sign([0., 2., -3.])
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([ 0., 1., -1.], dtype=float32)>
>>> # complex number
>>> tf.math.sign([1 + 1j, 0 + 0j])
<tf.Tensor: shape=(2,), dtype=complex128,
numpy=array([0.70710678+0.70710678j, 0. +0.j ])>
Args:
x: A Tensor. Must be one of the following types: bfloat16, half, float32,
float64, int32, int64, complex64, complex128.
name: A name for the operation (optional).
Returns:
A Tensor. Has the same type as x.
If x is a SparseTensor, returns SparseTensor(x.indices,
tf.math.sign(x.values, ...), x.dense_shape).
"""
x = ops.convert_to_tensor(x)
if x.dtype.is_complex:
return gen_math_ops.div_no_nan(
x,
cast(
gen_math_ops.complex_abs(
x,
Tout=dtypes.float32
if x.dtype == dtypes.complex64 else dtypes.float64),
dtype=x.dtype),
name=name)
return gen_math_ops.sign(x, name=name)
@tf_export("math.real", v1=["math.real", "real"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("real")
@dispatch.add_dispatch_support
def real(input, name=None):
r"""Returns the real part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the real part of each element in `input` considered as a complex number.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.real(x) # [-2.25, 3.25]
```
If `input` is already real, it is returned unchanged.
Args:
input: A `Tensor`. Must have numeric type.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Real", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
else:
return input
@tf_export("math.imag", v1=["math.imag", "imag"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("imag")
@dispatch.add_dispatch_support
def imag(input, name=None):
r"""Returns the imaginary part of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the imaginary part of each element in `input` considered as a complex
number. If `input` is real, a tensor of all zeros is returned.
For example:
```python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.imag(x) # [4.75, 5.75]
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Imag", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)
@tf_export("math.angle", v1=["math.angle", "angle"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("angle")
@dispatch.add_dispatch_support
def angle(input, name=None):
r"""Returns the element-wise argument of a complex (or real) tensor.
Given a tensor `input`, this operation returns a tensor of type `float` that
is the argument of each element in `input` considered as a complex number.
The elements in `input` are considered to be complex numbers of the form
\\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
If `input` is real then *b* is zero by definition.
The argument returned by this function is of the form \\(atan2(b, a)\\).
If `input` is real, a tensor of all zeros is returned.
For example:
```
input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
tf.math.angle(input).numpy()
# ==> array([2.0131705, 1.056345 ], dtype=float32)
```
Args:
input: A `Tensor`. Must be one of the following types: `float`, `double`,
`complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `float32` or `float64`.
"""
with ops.name_scope(name, "Angle", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.where(input < 0, np.pi * array_ops.ones_like(input),
array_ops.zeros_like(input))
# pylint: enable=redefined-outer-name,redefined-builtin
@tf_export("math.round", "round")
@dispatch.add_dispatch_support
def round(x, name=None): # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.
Rounds half to even. Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:
```python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)
@tf_export("cast", "dtypes.cast")
@dispatch.add_dispatch_support
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
For example:
>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
Notice `tf.cast` has an alias `tf.dtypes.cast`:
>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.dtypes.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
The operation supports data types (for `x` and `dtype`) of
`uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
`float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
In case of casting from complex types (`complex64`, `complex128`) to real
types, only the real part of `x` is returned. In case of casting from real
types to complex types (`complex64`, `complex128`), the imaginary part of the
returned value is set to `0`. The handling of complex types here matches the
behavior of numpy.
Note casting nan and inf values to integral types has undefined behavior.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
`int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
`bfloat16`.
dtype: The destination type. The list of supported dtypes is the same as
`x`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
same type as `dtype`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
if isinstance(x,
(ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
elif isinstance(x, ops.IndexedSlices):
values_cast = cast(x.values, base_type, name=name)
x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
else:
# TODO(josh11b): If x is not already a Tensor, we could return
# ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != base_type:
x = gen_math_ops.cast(x, base_type, name=name)
if x.dtype.is_complex and base_type.is_floating:
logging.warn("Casting complex to real discards imaginary part.")
return x
@tf_export("dtypes.saturate_cast", "saturate_cast")
@dispatch.add_dispatch_support
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of `value` to `dtype`.
This function casts the input to `dtype` without applying any scaling. If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.
Args:
value: A `Tensor`.
dtype: The desired output `DType`.
name: A name for the operation (optional).
Returns:
`value` safely cast to `dtype`.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(
value,
ops.convert_to_tensor(dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(
value,
ops.convert_to_tensor(dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_float"])
@dispatch.add_dispatch_support
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, dtypes.float32, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_double"])
@dispatch.add_dispatch_support
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, dtypes.float64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_int32"])
@dispatch.add_dispatch_support
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, dtypes.int32, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_int64"])
@dispatch.add_dispatch_support
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, dtypes.int64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_bfloat16"])
@dispatch.add_dispatch_support
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, dtypes.bfloat16, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_complex64"])
@dispatch.add_dispatch_support
def to_complex64(x, name="ToComplex64"):
"""Casts a tensor to type `complex64`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex64`.
Raises:
TypeError: If `x` cannot be cast to the `complex64`.
"""
return cast(x, dtypes.complex64, name=name)
@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
@tf_export(v1=["to_complex128"])
@dispatch.add_dispatch_support
def to_complex128(x, name="ToComplex128"):
"""Casts a tensor to type `complex128`.
Args:
x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
type `complex128`.
Raises:
TypeError: If `x` cannot be cast to the `complex128`.
"""
return cast(x, dtypes.complex128, name=name)
ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
def _maybe_get_dtype(x):
"""Returns a numpy type if available from x. Skips if x is numpy.ndarray."""
# Don't put np.ndarray in this list, because np.result_type looks at the
# value (not just dtype) of np.ndarray to decide the result type.
if isinstance(x, numbers.Real):
return x
if isinstance(x, ops.Tensor):
return x.dtype.as_numpy_dtype
if isinstance(x, dtypes.DType):
return x.as_numpy_dtype
if isinstance(x, tensor_shape.TensorShape):
return np.int32
if isinstance(x, (list, tuple)):
raise ValueError("Got sequence {}".format(x))
return x
def maybe_promote_tensors(*tensors, force_same_dtype=True):
"""Promote tensors if numpy style promotion is enabled."""
if not tensors:
return tensors
if not ops._numpy_style_type_promotion:
if not force_same_dtype:
return tensors
promoted_tensors = []
promoted_tensors.append(tensors[0])
dtype = tensors[0].dtype.base_dtype
for tensor in tensors[1:]:
promoted_tensors.append(
ops.convert_to_tensor(tensor, dtype, name="x"))
return promoted_tensors
result_type = np_dtypes._result_type(
*[_maybe_get_dtype(x) for x in nest.flatten(tensors)])
def _promote_or_cast(x):
if isinstance(x, ops.Tensor):
x = cast(x, result_type)
else:
x = ops.convert_to_tensor(x, result_type)
return x
return [_promote_or_cast(x) for x in tensors]
def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.
If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
"""
def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
try:
# force_same_dtype=False to preserve existing TF behavior
# TODO(b/178860388): Figure out why binary_op_wrapper and
# r_binary_op_wrapper use different force_same_dtype values.
x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
return func(x, y, name=name)
except (TypeError, ValueError) as e:
# Even if dispatching the op failed, the RHS may be a tensor aware
# object that can implement the operator with knowledge of itself
# and the tensor.
# If the RHS is not tensor aware we still want to raise the
# original error from the LHS, because it may be more
# informative.
if hasattr(type(y), "__r%s__" % op_name):
try:
r_op = getattr(y, "__r%s__" % op_name)
out = r_op(x)
if out is NotImplemented:
raise
return out
except (TypeError, ValueError):
raise e
else:
raise
def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(
sp_x.indices,
func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
sp_x.dense_shape)
def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
# TODO(b/178860388): Figure out why binary_op_wrapper and
# r_binary_op_wrapper use different force_same_dtype values.
y, x = maybe_promote_tensors(y, x)
return func(x, y, name=name)
# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc
if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.bfloat16: None,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}
# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()". Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)
def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.real_div(x, y, name=name)
def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics.
Used for Tensor.__div__.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops.real_div(x, y, name=name)
else:
return gen_math_ops.floor_div(x, y, name=name)
@tf_export("math.truediv", "truediv")
@dispatch.add_dispatch_support
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).
NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.
This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first. This op is generated by normal
`x / y` division in Python 3 and in Python 2.7 with
`from __future__ import division`. If you want integer division that rounds
down, use `x // y` or `tf.math.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
return _truediv_python3(x, y, name)
@deprecation.deprecated(
date=None,
instructions="Deprecated in favor of operator or tf.math.divide.")
@tf_export(v1=["div"])
@dispatch.add_dispatch_support
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).
NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
3 division operator semantics.
This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
and `y` are both integers then the result will be an integer. This is in
contrast to Python 3, where division with `/` is always a float while division
with `//` is always an integer.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` returns the quotient of x and y.
"""
return _div_python2(x, y, name)
@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("div_no_nan")
@dispatch.add_dispatch_support
def div_no_nan(x, y, name=None):
"""Computes a safe divide which returns 0 if `y` (denominator) is zero.
For example:
>>> tf.constant(3.0) / 0.0
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
>>> tf.math.divide_no_nan(3.0, 0.0)
<tf.Tensor: shape=(), dtype=float32, numpy=0.0>
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x divided by y.
"""
with ops.name_scope(name, "div_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
return gen_math_ops.div_no_nan(x, y, name=name)
@tf_export("math.multiply_no_nan")
@dispatch.add_dispatch_support
def multiply_no_nan(x, y, name=None):
"""Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
y: A `Tensor` whose dtype is compatible with `x`.
name: A name for the operation (optional).
Returns:
The element-wise value of the x times y.
"""
with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
return gen_math_ops.mul_no_nan(x, y, name=name)
# TODO(aselle): This should be removed
mod = gen_math_ops.floor_mod
# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("floordiv")
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding toward the most negative integer.
The same as `tf.compat.v1.div(x,y)` for integers, but uses
`tf.floor(tf.compat.v1.div(x,y))` for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point). This op is generated by
`x // y` floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` denominator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down.
Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops.floor_div(x, y, name=name)
realdiv = gen_math_ops.real_div
truncatediv = gen_math_ops.truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops.floor_div
truncatemod = gen_math_ops.truncate_mod
floormod = gen_math_ops.floor_mod
@tf_export("__operators__.add", v1=[])
@dispatch.add_dispatch_support
def _add_dispatch(x, y, name=None):
"""The operation invoked by the `Tensor.__add__` operator.
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__add__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
x: The left-hand side of the `+` operator.
y: The right-hand side of the `+` operator.
name: an optional name for the operation.
Returns:
The result of the elementwise `+` operation.
"""
if not isinstance(y, ops.Tensor) and not isinstance(
y, sparse_tensor.SparseTensor):
y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
if x.dtype == dtypes.string:
return gen_math_ops.add(x, y, name=name)
else:
return gen_math_ops.add_v2(x, y, name=name)
def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
if isinstance(y, sparse_tensor.SparseTensor): # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
else:
return multiply(x, y, name=name)
# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python semantics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_add_dispatch, "add")
_OverrideBinaryOperatorHelper(subtract, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")
@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("logical_xor")
def logical_xor(x, y, name="LogicalXor"):
"""Logical XOR function.
x ^ y = (x | y) & ~(x & y)
Requires that `x` and `y` have the same shape or have
[broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
shapes. For example, `x` and `y` can be:
- Two single elements of type `bool`
- One `tf.Tensor` of type `bool` and one single `bool`, where the result will
be calculated by applying logical XOR with the single element to each
element in the larger Tensor.
- Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
the result will be the element-wise logical XOR of the two input tensors.
Usage:
>>> a = tf.constant([True])
>>> b = tf.constant([False])
>>> tf.math.logical_xor(a, b)
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
>>> c = tf.constant([True])
>>> x = tf.constant([False, True, True, False])
>>> tf.math.logical_xor(c, x)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False, True])>
>>> y = tf.constant([False, False, True, True])
>>> z = tf.constant([False, True, False, True])
>>> tf.math.logical_xor(y, z)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
Args:
x: A `tf.Tensor` type bool.
y: A `tf.Tensor` of type bool.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
"""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)
def and_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_and(x, y, name)
return gen_bitwise_ops.bitwise_and(x, y)
def or_(x, y, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_or(x, y, name)
return gen_bitwise_ops.bitwise_or(x, y)
def xor_(x, y, name=None):
if x.dtype == dtypes.bool:
return logical_xor(x, y, name)
return gen_bitwise_ops.bitwise_xor(x, y)
def invert_(x, name=None):
if x.dtype == dtypes.bool:
return gen_math_ops.logical_not(x, name=name)
return gen_bitwise_ops.invert(x, name=name)
_OverrideBinaryOperatorHelper(and_, "and")
_OverrideBinaryOperatorHelper(or_, "or")
_OverrideBinaryOperatorHelper(xor_, "xor")
ops.Tensor._override_operator("__invert__", invert_)
def _promote_dtypes_decorator(fn):
def wrapper(x, y, *args, **kwargs):
x, y = maybe_promote_tensors(x, y, force_same_dtype=False)
return fn(x, y, *args, **kwargs)
return tf_decorator.make_decorator(fn, wrapper)
ops.Tensor._override_operator("__lt__", _promote_dtypes_decorator(
gen_math_ops.less))
ops.Tensor._override_operator("__le__", _promote_dtypes_decorator(
gen_math_ops.less_equal))
ops.Tensor._override_operator("__gt__", _promote_dtypes_decorator(
gen_math_ops.greater))
ops.Tensor._override_operator("__ge__", _promote_dtypes_decorator(
gen_math_ops.greater_equal))
@tf_export("math.equal", "equal")
@dispatch.add_dispatch_support
def equal(x, y, name=None):
"""Returns the truth value of (x == y) element-wise.
Performs a [broadcast](
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
arguments and then an element-wise equality comparison, returning a Tensor of
boolean values.
For example:
>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
Args:
x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
Raises:
`tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
"""
return gen_math_ops.equal(x, y, name=name)
@tf_export("math.not_equal", "not_equal")
@dispatch.add_dispatch_support
def not_equal(x, y, name=None):
"""Returns the truth value of (x != y) element-wise.
Performs a [broadcast](
https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
arguments and then an element-wise inequality comparison, returning a Tensor
of boolean values.
For example:
>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])>
>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
Args:
x: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
y: A `tf.Tensor` or `tf.sparse.SparseTensor` or `tf.IndexedSlices`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of type bool with the same size as that of x or y.
Raises:
`tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
"""
return gen_math_ops.not_equal(x, y, name=name)
@tf_export("__operators__.eq", v1=[])
@dispatch.add_dispatch_support
def tensor_equals(self, other):
"""The operation invoked by the `Tensor.__eq__` operator.
Compares two tensors element-wise for equality if they are
broadcast-compatible; or returns False if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__eq__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `==` operator.
other: The right-hand side of the `==` operator.
Returns:
The result of the elementwise `==` operation, or `False` if the arguments
are not broadcast-compatible.
"""
if other is None:
return False
g = getattr(self, "graph", None)
if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
(g is None or g.building_function)):
self, other = maybe_promote_tensors(self, other)
return gen_math_ops.equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is other
@tf_export("__operators__.ne", v1=[])
@dispatch.add_dispatch_support
def tensor_not_equals(self, other):
"""The operation invoked by the `Tensor.__ne__` operator.
Compares two tensors element-wise for inequality if they are
broadcast-compatible; or returns True if they are not broadcast-compatible.
(Note that this behavior differs from `tf.math.not_equal`, which raises an
exception if the two tensors are not broadcast-compatible.)
Purpose in the API:
This method is exposed in TensorFlow's API so that library developers
can register dispatching for `Tensor.__ne__` to allow it to handle
custom composite tensors & other custom objects.
The API symbol is not intended to be called by users directly and does
appear in TensorFlow's generated documentation.
Args:
self: The left-hand side of the `!=` operator.
other: The right-hand side of the `!=` operator.
Returns:
The result of the elementwise `!=` operation, or `True` if the arguments
are not broadcast-compatible.
"""
if other is None:
return True
if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
self, other = maybe_promote_tensors(self, other)
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is not other
ops.Tensor._override_operator("__eq__", tensor_equals)
ops.Tensor._override_operator("__ne__", tensor_not_equals)
@tf_export("range")
@dispatch.add_dispatch_support
def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin
"""Creates a sequence of numbers.
Creates a sequence of numbers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
>>> start = 3
>>> limit = 18
>>> delta = 3
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([ 3, 6, 9, 12, 15], dtype=int32)>
>>> start = 3
>>> limit = 1
>>> delta = -0.5
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>
>>> limit = 5
>>> tf.range(limit)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([0, 1, 2, 3, 4], dtype=int32)>
Args:
start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
is not None; otherwise, acts as range limit and first entry defaults to 0.
limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
defaults to the value of `start` while the first entry of the range
defaults to 0.
delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".
Returns:
An 1-D `Tensor` of type `dtype`.
@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start
with ops.name_scope(name, "Range", [start, limit, delta]) as name:
if not isinstance(start, ops.Tensor):
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
if not isinstance(limit, ops.Tensor):
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
if not isinstance(delta, ops.Tensor):
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
# Always try to perform a cast even when start/limit/delta are already
# tensors. This will resolve the case where start/limit/delta's original's
# dtype is different from provided dtype.
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)
return gen_math_ops._range(start, limit, delta, name=name)
def _range_tensor_conversion_function(value, dtype=None, name=None,
as_ref=False):
del as_ref
return range(value.start, value.stop, value.step, dtype=dtype, name=name)
if not six.PY2:
ops.register_tensor_conversion_function(builtins.range,
_range_tensor_conversion_function)
# Reduction operations
def _ReductionDims(x, axis): # pylint: disable=invalid-name
"""Returns range(0, rank(x)) if axis is None."""
if axis is not None:
return axis
else:
x_rank = None
if isinstance(x, ops.Tensor):
x_rank = x.shape.rank
elif (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.shape.is_fully_defined()):
x_rank = x.dense_shape.shape.dims[0].value # sparse.dense_shape is 1-D.
# Fast path: avoid creating Rank and Range ops if ndims is known.
if x_rank:
return constant_op.constant(np.arange(x_rank, dtype=np.int32))
else:
# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))
def _has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
def _may_reduce_to_scalar(keepdims, axis, output):
"""Set a reduction's output shape to be a scalar if we are certain."""
if not _has_fully_defined_shape(output) and (not keepdims) and (
axis is None):
output.set_shape(())
return output
@tf_export(v1=["math.reduce_sum", "reduce_sum"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_sum_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_sum(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_sum", "reduce_sum", v1=[])
@dispatch.add_dispatch_support
def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the sum of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.add` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> # x has a shape of (2, 3) (two rows and three columns):
>>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
>>> x.numpy()
array([[1, 1, 1],
[1, 1, 1]], dtype=int32)
>>> # sum all the elements
>>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
>>> tf.reduce_sum(x).numpy()
6
>>> # reduce along the first dimension
>>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> tf.reduce_sum(x, 0).numpy()
array([2, 2, 2], dtype=int32)
>>> # reduce along the second dimension
>>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
>>> tf.reduce_sum(x, 1).numpy()
array([3, 3], dtype=int32)
>>> # keep the original dimensions
>>> tf.reduce_sum(x, 1, keepdims=True).numpy()
array([[3],
[3]], dtype=int32)
>>> # reduce along both dimensions
>>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
>>> # or, equivalently, reduce along rows, then reduce the resultant array
>>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
>>> # 2 + 2 + 2 = 6
>>> tf.reduce_sum(x, [0, 1]).numpy()
6
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor)]`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))
def reduce_sum_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._sum(input_tensor, dims, keepdims, name=name))
@tf_export("math.reduce_euclidean_norm")
@dispatch.add_dispatch_support
def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the Euclidean norm of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
tf.math.reduce_euclidean_norm(x) # returns 4 as dtype is tf.int32
y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
tf.math.reduce_euclidean_norm(y) # returns 4.1231055 which is sqrt(17)
tf.math.reduce_euclidean_norm(y, 0) # [sqrt(2), sqrt(5), sqrt(10)]
tf.math.reduce_euclidean_norm(y, 1) # [sqrt(14), sqrt(3)]
tf.math.reduce_euclidean_norm(y, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]]
tf.math.reduce_euclidean_norm(y, [0, 1]) # sqrt(17)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor.
"""
keepdims = bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.euclidean_norm(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.count_nonzero", "count_nonzero"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
@deprecation.deprecated_args(
None, "reduction_indices is deprecated, use axis instead",
"reduction_indices")
def count_nonzero(input_tensor=None,
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None,
reduction_indices=None,
keep_dims=None,
input=None): # pylint: disable=redefined-builtin
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x) # 3
tf.math.count_nonzero(x, 0) # [1, 2, 0]
tf.math.count_nonzero(x, 1) # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.math.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
`string`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
input: Overrides input_tensor. For compatibility.
Returns:
The reduced tensor (number of nonzero values).
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
input_tensor = deprecation.deprecated_argument_lookup("input", input,
"input_tensor",
input_tensor)
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
@tf_export("math.count_nonzero", v1=[])
@dispatch.add_dispatch_support
def count_nonzero_v2(
input, # pylint: disable=redefined-builtin
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None):
"""Computes number of nonzero elements across dimensions of a tensor.
Reduces `input` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
**NOTE** Floating point comparison to zero is done by exact floating point
equality check. Small values are **not** rounded to zero for purposes of
the nonzero check.
For example:
```python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x) # 3
tf.math.count_nonzero(x, 0) # [1, 2, 0]
tf.math.count_nonzero(x, 1) # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
tf.math.count_nonzero(x, [0, 1]) # 3
```
**NOTE** Strings are compared against zero-length empty string `""`. Any
string with a size greater than zero is already considered as nonzero.
For example:
```python
x = tf.constant(["", "a", " ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
```
Args:
input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input), rank(input))`.
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to `tf.int64`.
name: A name for the operation (optional).
Returns:
The reduced tensor (number of nonzero values).
"""
if keepdims is None:
keepdims = False
with ops.name_scope(name, "count_nonzero", [input]):
input = ops.convert_to_tensor(input, name="input")
# A scalar of 'zero' is enough as `not_equal` will broadcast.
zero = array_ops.zeros([], dtype=input.dtype)
return cast(
reduce_sum(
# int64 reduction happens on GPU
cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
axis=axis,
keepdims=keepdims),
dtype=dtype)
@tf_export(v1=["math.reduce_mean", "reduce_mean"])
@dispatch.add_dispatch_support
def reduce_mean_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis` by computing the
mean of elements across the dimensions in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a tensor with a single
element is returned.
For example:
>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_mean(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_mean", "reduce_mean", v1=[])
@dispatch.add_dispatch_support
def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis` by computing the
mean of elements across the dimensions in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a tensor with a single
element is returned.
For example:
>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.mean
Please note that `np.mean` has a `dtype` parameter that could be used to
specify the output type. By default this is `dtype=float64`. On the other
hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
for example:
>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.mean(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export("math.reduce_variance")
@dispatch.add_dispatch_support
def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the variance of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_variance(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.25>
>>> tf.math.reduce_variance(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)>
>>> tf.math.reduce_variance(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)>
Args:
input_tensor: The tensor to reduce. Should have real or complex type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor. Note, for
`complex64` or `complex128` input, the returned `Tensor` will be of type
`float32` or `float64`, respectively.
@compatibility(numpy)
Equivalent to np.var
Please note `np.var` has a `dtype` parameter that could be used to specify the
output type. By default this is `dtype=float64`. On the other hand,
`tf.math.reduce_variance` has aggressive type inference from `input_tensor`.
@end_compatibility
"""
name = name if name else "reduce_variance"
with ops.name_scope(name):
means = reduce_mean(input_tensor, axis=axis, keepdims=True)
if means.dtype.is_integer:
raise TypeError("Input must be either real or complex")
diff = input_tensor - means
if diff.dtype.is_complex:
# For complex values we need to take the absolute value before squaring.
# This is achieved by multiplying with the conjugate.
real_dtype = diff.dtype.real_dtype
squared_deviations = gen_math_ops.real(
gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype)
else:
squared_deviations = gen_math_ops.square(diff)
return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
@tf_export("math.reduce_std")
@dispatch.add_dispatch_support
def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the standard deviation of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_std(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.118034>
>>> tf.math.reduce_std(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>
>>> tf.math.reduce_std(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have real or complex type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).
Returns:
The reduced tensor, of the same dtype as the input_tensor. Note, for
`complex64` or `complex128` input, the returned `Tensor` will be of type
`float32` or `float64`, respectively.
@compatibility(numpy)
Equivalent to np.std
Please note `np.std` has a `dtype` parameter that could be used to specify the
output type. By default this is `dtype=float64`. On the other hand,
`tf.math.reduce_std` has aggressive type inference from `input_tensor`.
@end_compatibility
"""
name = name if name else "reduce_std"
with ops.name_scope(name):
variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
return gen_math_ops.sqrt(variance)
@tf_export("math.reduce_prod", "reduce_prod", v1=[])
@dispatch.add_dispatch_support
def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
entry in `axis`. If `keepdims` is true, the reduced dimensions
are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.prod(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_prod", "reduce_prod"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_prod_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.multiply` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.multiply` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[1., 2.], [3., 4.]])
>>> tf.math.reduce_prod(x)
<tf.Tensor: shape=(), dtype=float32, numpy=24.>
>>> tf.math.reduce_prod(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
>>> tf.math.reduce_prod(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
dtype=float32)>
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_prod(input_tensor, axis, keepdims, name)
@tf_export(v1=["math.reduce_min", "reduce_min"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_min_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-5>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_min(x)
<tf.Tensor: shape=(), dtype=float32, numpy=-inf>
See the numpy docs for `np.amin` and `np.nanmin` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_min(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_min", "reduce_min", v1=[])
@dispatch.add_dispatch_support
def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the `tf.math.minimum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.minimum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> a = tf.constant([
... [[1, 2], [3, 4]],
... [[1, 2], [3, 4]]
... ])
>>> tf.reduce_min(a)
<tf.Tensor: shape=(), dtype=int32, numpy=1>
Choosing a specific axis returns minimum element in the given axis:
>>> b = tf.constant([[1, 2, 3], [4, 5, 6]])
>>> tf.reduce_min(b, axis=0)
<tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)>
>>> tf.reduce_min(b, axis=1)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 4], dtype=int32)>
Setting `keepdims` to `True` retains the dimension of `input_tensor`:
>>> tf.reduce_min(a, keepdims=True)
<tf.Tensor: shape=(1, 1, 1), dtype=int32, numpy=array([[[1]]], dtype=int32)>
>>> tf.math.reduce_min(a, axis=0, keepdims=True)
<tf.Tensor: shape=(1, 2, 2), dtype=int32, numpy=
array([[[1, 2],
[3, 4]]], dtype=int32)>
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._min(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_max", "reduce_max"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_max_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_max(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_max", "reduce_max", v1=[])
@dispatch.add_dispatch_support
def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.maximum` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.maximum` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
Usage example:
>>> x = tf.constant([5, 1, 2, 4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=5>
>>> x = tf.constant([-5, -1, -2, -4])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=int32, numpy=-1>
>>> x = tf.constant([4, float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('nan'), float('nan')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=nan>
>>> x = tf.constant([float('-inf'), float('inf')])
>>> tf.reduce_max(x)
<tf.Tensor: shape=(), dtype=float32, numpy=inf>
See the numpy docs for `np.amax` and `np.nanmax` behavior.
Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return reduce_max_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))
def reduce_max_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._max(input_tensor, dims, keepdims, name=name))
@tf_export(v1=["math.reduce_all", "reduce_all"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_all_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_all(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_all", "reduce_all", v1=[])
@dispatch.add_dispatch_support
def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_and` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_and` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.math.reduce_all(x)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
>>> tf.math.reduce_all(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
>>> tf.math.reduce_all(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._all(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_any", "reduce_any"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_any_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_any(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_any", "reduce_any", v1=[])
@dispatch.add_dispatch_support
def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
"""Computes `tf.math.logical_or` of elements across dimensions of a tensor.
This is the reduction operation for the elementwise `tf.math.logical_or` op.
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` is None, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
>>> x = tf.constant([[True, True], [False, False]])
>>> tf.reduce_any(x)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> tf.reduce_any(x, 0)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
>>> tf.reduce_any(x, 1)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
keepdims = False if keepdims is None else bool(keepdims)
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._any(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))
@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
@dispatch.add_dispatch_support
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_logsumexp_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for `keepdims`.
Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_logsumexp(input_tensor, axis, keepdims, name)
@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
@dispatch.add_dispatch_support
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).
Reduces `input_tensor` along the dimensions given in `axis`.
Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
of the entries in `axis`, which must be unique. If `keepdims` is true, the
reduced dimensions are retained with length 1.
If `axis` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.
For example:
```python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x) # log(6)
tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1]) # log(6)
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If `None` (the default), reduces all
dimensions. Must be in the range `[-rank(input_tensor),
rank(input_tensor))`.
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
keepdims = False if keepdims is None else keepdims
input_tensor = ops.convert_to_tensor(input_tensor)
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
reduce_dim = _ReductionDims(input_tensor, axis)
raw_max = reduce_max_with_dims(
input_tensor, axis=axis, keepdims=True, dims=reduce_dim)
my_max = array_ops.stop_gradient(
gen_math_ops.select(
gen_math_ops.is_finite(raw_max), raw_max,
gen_array_ops.zeros_like(raw_max)))
result = gen_math_ops.log(
reduce_sum_with_dims(
gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
axis=axis,
keepdims=keepdims,
dims=reduce_dim))
if not keepdims:
my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
result = _add_dispatch(result, my_max, name=name)
return _may_reduce_to_scalar(keepdims, axis, result)
@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("trace")
@dispatch.add_dispatch_support
def trace(x, name=None):
"""Compute the trace of a tensor `x`.
`trace(x)` returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
`output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])`
For example:
```python
x = tf.constant([[1, 2], [3, 4]])
tf.linalg.trace(x) # 5
x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
tf.linalg.trace(x) # 15
x = tf.constant([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
tf.linalg.trace(x) # [15, -15]
```
Args:
x: tensor.
name: A name for the operation (optional).
Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
@tf_export("linalg.matmul", "matmul")
@dispatch.add_dispatch_support
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
adjoint_a=False,
adjoint_b=False,
a_is_sparse=False,
b_is_sparse=False,
output_type=None,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication dimensions,
and any further outer dimensions specify matching batch size.
Both matrices must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes `bfloat16` or `float32`.
A simple 2-D tensor matrix multiplication:
>>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
>>> a # 2-D tensor
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
>>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
>>> b # 2-D tensor
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[ 7, 8],
[ 9, 10],
[11, 12]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c # `a` * `b`
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 58, 64],
[139, 154]], dtype=int32)>
A batch matrix multiplication with batch shape [2]:
>>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
>>> a # 3-D tensor
<tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
array([[[ 1, 2, 3],
[ 4, 5, 6]],
[[ 7, 8, 9],
[10, 11, 12]]], dtype=int32)>
>>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
>>> b # 3-D tensor
<tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
array([[[13, 14],
[15, 16],
[17, 18]],
[[19, 20],
[21, 22],
[23, 24]]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c # `a` * `b`
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 94, 100],
[229, 244]],
[[508, 532],
[697, 730]]], dtype=int32)>
Since python >= 3.5 the @ operator is supported
(see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
it simply calls the `tf.matmul()` function, so the following lines are
equivalent:
>>> d = a @ b @ [[10], [11]]
>>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])
Args:
a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
`complex64`, `complex128` and rank > 1.
b: `tf.Tensor` with same type and rank as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
adjoint_b: If `True`, `b` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this
**does not support `tf.sparse.SparseTensor`**, it just makes optimizations
that assume most values in `a` are zero.
See `tf.sparse.sparse_dense_matmul`
for some support for `tf.sparse.SparseTensor` multiplication.
b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this
**does not support `tf.sparse.SparseTensor`**, it just makes optimizations
that assume most values in `a` are zero.
See `tf.sparse.sparse_dense_matmul`
for some support for `tf.sparse.SparseTensor` multiplication.
output_type: The output datatype if needed. Defaults to None in which case
the output_type is the same as input type. Currently only works when input
tensors are type int8 and output_type can be int32.
name: Name for the operation (optional).
Returns:
A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
is the product of the corresponding matrices in `a` and `b`, e.g. if all
transpose or adjoint attributes are `False`:
`output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
for all indices `i`, `j`.
Note: This is matrix product, not element-wise product.
Raises:
ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
`adjoint_b` are both set to `True`.
TypeError: If output_type is specified but the types of `a`, `b` and
`output_type` is not int8, int8 and int32.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
if transpose_a and adjoint_a:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
if transpose_b and adjoint_b:
raise ValueError("Only one of transpose_b and adjoint_b can be True.")
if context.executing_eagerly():
if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
a = ops.convert_to_tensor(a, name="a")
if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
else:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
# TODO(apassos) remove _shape_tuple here when it is not needed.
a_shape = a._shape_tuple() # pylint: disable=protected-access
b_shape = b._shape_tuple() # pylint: disable=protected-access
output_may_have_non_empty_batch_shape = (
(a_shape is None or len(a_shape) > 2) or
(b_shape is None or len(b_shape) > 2))
# TODO(b/178749687): remove this boolean and all related branches once the
# bridges are ready.
# batch_matmul_v3 is for when input type is different from output type.
use_batch_matmul_v3 = False
if output_type and (output_type != a.dtype or output_type != b.dtype):
use_batch_matmul_v3 = True
if (not a_is_sparse and
not b_is_sparse) and output_may_have_non_empty_batch_shape:
# BatchMatmul does not support transpose, so we conjugate the matrix and
# use adjoint instead. Conj() is a noop for real matrices.
if transpose_a:
a = conj(a)
adjoint_a = True
if transpose_b:
b = conj(b)
adjoint_b = True
if use_batch_matmul_v3:
return gen_math_ops.batch_mat_mul_v3(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
else:
return gen_math_ops.batch_mat_mul_v2(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
if adjoint_a:
a = conj(a)
transpose_a = True
if adjoint_b:
b = conj(b)
transpose_b = True
use_sparse_matmul = False
if a_is_sparse or b_is_sparse:
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (
a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
if (((a.dtype == dtypes.bfloat16 and b.dtype != dtypes.int8) or
(b.dtype == dtypes.bfloat16 and a.dtype != dtypes.int8)) and
a.dtype != b.dtype):
# matmul currently doesn't handle mixed-precision inputs other than
# fp16 * int8 which is supported in BatchMatMulV3.
use_sparse_matmul = True
if use_sparse_matmul:
ret = sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
# sparse_matmul always returns float32, even with
# bfloat16 inputs. This prevents us from configuring bfloat16 training.
# casting to bfloat16 also matches non-sparse matmul behavior better.
if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
ret = cast(ret, dtypes.bfloat16)
return ret
else:
if use_batch_matmul_v3:
adjoint_a = adjoint_a or transpose_a
adjoint_b = adjoint_b or transpose_b
return gen_math_ops.batch_mat_mul_v3(
a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
else:
return gen_math_ops.mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
@tf_export("linalg.matvec")
@dispatch.add_dispatch_support
def matvec(a,
b,
transpose_a=False,
adjoint_a=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by vector `b`, producing `a` * `b`.
The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
with `shape(b)[:-1]`.
Both `a` and `b` must be of the same type. The supported types are:
`float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
Matrix `a` can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to `True`. These are `False`
by default.
If one or both of the inputs contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
This optimization is only available for plain matrices/vectors (rank-2/1
tensors) with datatypes `bfloat16` or `float32`.
For example:
```python
# 2-D tensor `a`
# [[1, 2, 3],
# [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
# 1-D tensor `b`
# [7, 9, 11]
b = tf.constant([7, 9, 11], shape=[3])
# `a` * `b`
# [ 58, 64]
c = tf.linalg.matvec(a, b)
# 3-D tensor `a`
# [[[ 1, 2, 3],
# [ 4, 5, 6]],
# [[ 7, 8, 9],
# [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])
# 2-D tensor `b`
# [[13, 14, 15],
# [16, 17, 18]]
b = tf.constant(np.arange(13, 19, dtype=np.int32),
shape=[2, 3])
# `a` * `b`
# [[ 86, 212],
# [410, 563]]
c = tf.linalg.matvec(a, b)
```
Args:
a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
`complex128` and rank > 1.
b: `Tensor` with same type as `a` and compatible dimensions.
transpose_a: If `True`, `a` is transposed before multiplication.
adjoint_a: If `True`, `a` is conjugated and transposed before
multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a` and `b` where each inner-most vector is
the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
all transpose or adjoint attributes are `False`:
`output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
Note: This is matrix-vector product, not element-wise product.
Raises:
ValueError: If transpose_a and adjoint_a are both set to True.
"""
with ops.name_scope(name, "MatVec", [a, b]) as name:
output = matmul(
a,
array_ops.expand_dims(b, axis=-1),
transpose_a=transpose_a,
adjoint_a=adjoint_a,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse)
return array_ops.squeeze(output, axis=-1)
# TODO(b/178650720): Also support numpy-style type promotion in freestanding TF
# functions (e.g. tf.add).
def matmul_wrapper(a, b, name=None): # pylint: disable=missing-function-docstring
if ops._numpy_style_type_promotion:
return a._matmul(b)
return matmul(a, b, name=name)
matmul_wrapper.__doc__ = matmul.__doc__
_OverrideBinaryOperatorHelper(matmul_wrapper, "matmul")
sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
gen_math_ops.sparse_mat_mul)
tf_export(v1=["sparse_matmul"])(sparse_matmul)
@dispatch.add_dispatch_support
@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
@ops.RegisterStatistics("BatchMatMul", "flops")
@ops.RegisterStatistics("BatchMatMulV2", "flops")
@ops.RegisterStatistics("BatchMatMulV3", "flops")
def _calc_batch_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for BatchMatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[-2])
else:
k = int(a_shape[-1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))
def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
@tf_export("math.add_n", "add_n")
@dispatch.add_dispatch_support
def add_n(inputs, name=None):
"""Adds all input tensors element-wise.
`tf.math.add_n` performs the same operation as `tf.math.accumulate_n`, but it
waits for all of its inputs to be ready before beginning to sum.
This buffering can result in higher memory consumption when inputs are ready
at different times, since the minimum temporary storage required is
proportional to the input size rather than the output size.
This op does not [broadcast](
https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
instead.
For example:
>>> a = tf.constant([[3, 5], [4, 8]])
>>> b = tf.constant([[1, 6], [2, 9]])
>>> tf.math.add_n([a, b, a])
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 7, 16],
[10, 25]], dtype=int32)>
Args:
inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
same shape and type. `tf.IndexedSlices` objects will be converted into
dense tensors prior to adding.
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of the same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, collections_abc.Iterable):
raise ValueError("inputs must be an iterable of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):
raise ValueError("inputs must be an iterable of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
if len(inputs) == 1:
if isinstance(inputs[0], ops.IndexedSlices):
values = ops.convert_to_tensor(inputs[0])
else:
values = inputs[0]
if name:
return array_ops.identity(values, name=name)
return values
return gen_math_ops.add_n(inputs, name=name)
@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("accumulate_n")
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
`accumulate_n` performs the same operation as `tf.math.add_n`.
For example:
```python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.math.accumulate_n([a, b, a]) # [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7, 4],
# [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Expected shape of elements of `inputs` (optional). Also controls the
output shape of this op, which may affect type inference in other ops. A
value of `None` means "infer the input shape from the shapes in `inputs`".
tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
means "infer the input dtype from `inputs[0]`".
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
def _input_error():
return ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not inputs or not isinstance(inputs, (list, tuple)):
raise _input_error()
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise _input_error()
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _input_error()
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}".format(
tensor_dtype, inputs[0].dtype))
if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)
return add_n(inputs, name=name)
@ops.RegisterGradient("AccumulateNV2")
def _accumulate_n_grad(op, grad):
"""Same as gradient for AddN. Copies the gradient to all inputs."""
# Not broadcasting.
return [grad] * len(op.inputs)
@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
@dispatch.add_dispatch_support
def sigmoid(x, name=None):
r"""Computes sigmoid of `x` element-wise.
Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$.
For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$.
Example Usage:
If a positive number is large, then its sigmoid will approach to 1 since the
formula will be `y = <large_num> / (1 + <large_num>)`
>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([0.5 , 0.7310586, 1. , 1. ], dtype=float32)>
If a negative number is large, its sigmoid will approach to 0 since the
formula will be `y = 1 / (1 + <large_num>)`
>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
dtype=float32)>
Args:
x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
`complex128`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
Usage Example:
>>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
>>> tf.sigmoid(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0. , 0.5, 1. ], dtype=float32)>
@compatibility(scipy)
Equivalent to scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.sigmoid(x, name=name)
@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("log_sigmoid")
def log_sigmoid(x, name=None):
"""Computes log sigmoid of `x` element-wise.
Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
we use `y = -tf.nn.softplus(-x)`.
Args:
x: A Tensor with type `float32` or `float64`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x`.
Usage Example:
If a positive number is large, then its log_sigmoid will approach to 0 since
the formula will be `y = log( <large_num> / (1 + <large_num>) )` which
approximates to `log (1)` which is 0.
>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.log_sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00],
dtype=float32)>
If a negative number is large, its log_sigmoid will approach to the number
itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is
`log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>`
that is the number itself.
>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.log_sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([-100. , -50. , -1.3132616, -0.6931472],
dtype=float32)>
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)
@tf_export("math.cumsum", "cumsum")
@dispatch.add_dispatch_support
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor `x` along `axis`.
By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
For example:
>>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 2, 6, 12, 20], dtype=int32)>
>>> # using varying `axis` values
>>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
>>> tf.cumsum(y, axis=0)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2, 4, 6, 8],
[ 3, 7, 11, 15]], dtype=int32)>
>>> tf.cumsum(y, axis=1)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2, 6, 12, 20],
[ 1, 4, 9, 16]], dtype=int32)>
By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
instead:
>>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 0, 2, 6, 12], dtype=int32)>
By setting the `reverse` kwarg to `True`, the cumsum is performed in the
opposite direction:
>>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([20, 18, 14, 8], dtype=int32)>
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
>>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([18, 14, 8, 0], dtype=int32)>
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumsum.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("cumprod")
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor `x` along `axis`.
By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:
```python
tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]
```
By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
performed
instead:
```python
tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
```
By setting the `reverse` kwarg to `True`, the cumprod is performed in the
opposite direction:
```python
tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
```
This is more efficient than using separate `tf.reverse` ops.
The `reverse` and `exclusive` kwargs can also be combined:
```python
tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
```
Args:
x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
`int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
`complex128`, `qint8`, `quint8`, `qint32`, `half`.
axis: A `Tensor` of type `int32` (default: 0). Must be in the range
`[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumprod.
reverse: A `bool` (default: False).
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as `x`.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
@dispatch.add_dispatch_support
def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative log-sum-exp of the tensor `x` along `axis`.
By default, this op performs an inclusive cumulative log-sum-exp, which means
that the first element of the input is identical to the first element of
the output.
This operation is significantly more numerically stable than the equivalent
tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
computes the same result given infinite numerical precision. However, note
that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
for a given element, as it applies the "log-sum-exp trick" in a different
way.
More precisely, where `tf.math.reduce_logsumexp` uses the following trick:
```
log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
```
it cannot be directly used here as there is no fast way of applying it
to each prefix `x[:i]`. Instead, this function implements a prefix
scan using pairwise log-add-exp, which is a commutative and associative
(up to floating point precision) operator:
```
log_add_exp(x, y) = log(exp(x) + exp(y))
= log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
```
However, reducing using the above operator leads to a different computation
tree (logs are taken repeatedly instead of only at the end), and the maximum
is only computed pairwise instead of over the entire prefix. In general, this
leads to a different and slightly less precise computation.
Args:
x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
`float64`.
axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
range `[-rank(x), rank(x))`.
exclusive: If `True`, perform exclusive cumulative log-sum-exp.
reverse: If `True`, performs the cumulative log-sum-exp in the reverse
direction.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same shape and type as `x`.
"""
with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumulative_logsumexp(
x, axis, exclusive=exclusive, reverse=reverse, name=name)
@tf_export("math.conj", v1=["math.conj", "conj"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("conj")
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.
Given a tensor `x` of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in `x`. The
complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the
real part and `b` is the imaginary part.
The complex conjugate returned by this operation is of the form \\(a - bj\\).
For example:
>>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
>>> tf.math.conj(x)
<tf.Tensor: shape=(2,), dtype=complex128,
numpy=array([-2.25-4.75j, 3.25-5.75j])>
If `x` is real, it is returned unchanged.
For example:
>>> x = tf.constant([-2.25, 3.25])
>>> tf.math.conj(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([-2.25, 3.25], dtype=float32)>
Args:
x: `Tensor` to conjugate. Must have numeric or variant type.
name: A name for the operation (optional).
Returns:
A `Tensor` that is the conjugate of `x` (with the same type).
Raises:
TypeError: If `x` is not a numeric tensor.
@compatibility(numpy)
Equivalent to numpy.conj.
@end_compatibility
"""
if isinstance(x, ops.Tensor):
dt = x.dtype
if dt.is_floating or dt.is_integer:
return x
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex or x.dtype == dtypes.variant:
return gen_math_ops.conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric or variant tensor, got dtype %r" %
x.dtype)
def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.
Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.
Returns:
A 1-D Tensor, the output shape as if keepdims were set to True.
"""
# TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to
# `input_shape` rather than `tf.shape` of it. Then we can check if the shape
# is fully defined here, which may be faster executing eagerly than running
# `tf.shape` and then fetching its constant value.
constant_input_shape = tensor_util.constant_value(input_shape)
if constant_input_shape is not None:
constant_axes = tensor_util.constant_value(axes)
if constant_axes is not None:
constant_axes = np.array(constant_axes, dtype=np.int32)
constant_input_shape = np.array(constant_input_shape, dtype=np.int32)
constant_input_shape[constant_axes] = 1
return constant_input_shape
# Example:
# cast needed for SparseTensor reductions
input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7]
axes = cast(axes, dtypes.int32) # [1, 2]
input_rank = array_ops.size(input_shape) # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes) # [2]
return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
[
range(input_rank), # [0, 1, 2, 3]
axes
], # [1, 2]
[
input_shape, # [2, 3, 5, 7]
array_ops.ones(axes_shape, dtype=dtypes.int32)
]) # [1, 1]
def _unsorted_segment_N(data, segment_ids, num_segments):
""" Helper function for unsorted_segment_mean/_sqrtN.
Computes the number
of segment entries with 0-entries set to 1 to allow division by N.
"""
num_segments = ops.convert_to_tensor(num_segments)
# bincount doesn't support negative indices so we use unsorted_segment_sum
segment_ids_shape = array_ops.shape_internal(segment_ids)
ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
# add dimensions for all non-reduced axes
broadcastable_shape = array_ops.concat(
[num_segments[array_ops.newaxis],
array_ops.ones([array_ops.rank(data)
- array_ops.rank(segment_ids)],
dtype=num_segments.dtype)],
axis=0)
n = array_ops.reshape(n, broadcastable_shape)
return gen_math_ops.maximum(n, 1)
@tf_export(
"math.unsorted_segment_mean",
v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("unsorted_segment_mean")
@dispatch.add_dispatch_support
def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
r"""Computes the mean along segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
This operator is similar to the `tf.math.unsorted_segment_sum` operator.
Instead of computing the sum over segments, it computes the mean of all
entries belonging to a segment such that:
\\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
`j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentMean"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / N
@tf_export(
"math.unsorted_segment_sqrt_n",
v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
@dispatch.add_dispatch_support
def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
This operator is similar to the `tf.math.unsorted_segment_sum` operator.
Additionally to computing the sum over segments, it divides the results by
sqrt(N).
\\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
number of occurrences of id \\i\\.
If there is no entry for a given segment ID `i`, it outputs 0.
Note that this op only supports floating point and complex dtypes,
due to tf.sqrt only supporting these types.
If the given segment ID `i` is negative, the value is dropped and will not
be added to the sum of the segment.
Args:
data: A `Tensor` with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
num_segments: An integer scalar `Tensor`. The number of distinct segment
IDs.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
dimensions, which are replaced with a single dimension which has size
`num_segments`.
"""
with ops.name_scope(name, "UnsortedSegmentSqrtN"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / gen_math_ops.sqrt(N)
@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
@deprecation.deprecated_endpoints("sparse_segment_sum")
def sparse_segment_sum(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
first dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sum_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sum(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sum", v1=[])
def sparse_segment_sum_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
first dimension, selecting a subset of dimension 0, specified by `indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]
# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1 2 3 4]
# [-1 -2 -3 -4]]
# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1 2 3 4]
# [ 0 0 0 0]
# [-1 -2 -3 -4]
# [ 0 0 0 0]]
# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
# [5 6 7 8]]
# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
```
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sum(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
@deprecation.deprecated_endpoints("sparse_segment_mean")
def sparse_segment_mean(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
`data`'s first dimension, selecting a subset of dimension 0, specified by
`indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_mean_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_mean(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_mean", v1=[])
def sparse_segment_mean_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the mean along sparse segments of a tensor.
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
`data`'s first dimension, selecting a subset of dimension 0, specified by
`indices`.
`segment_ids` is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases `num_segments` is used to determine
the size of the output.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_mean(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
def sparse_segment_sqrt_n(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
`N` is the size of the segment being reduced.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sqrt_n(
data=data, indices=indices, segment_ids=segment_ids, name=name)
@tf_export("sparse.segment_sqrt_n", v1=[])
def sparse_segment_sqrt_n_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
Read [the section on
segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
for an explanation of segments.
Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
segment, `N`, divide by `sqrt(N)` instead.
Args:
data: A `Tensor` with data that will be assembled in the output.
indices: A 1-D `Tensor` with indices into `data`. Has same rank as
`segment_ids`.
segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
`Tensor`.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as data, except for dimension 0 which
has size `k`, the number of segments specified via `num_segments` or
inferred for the last element in `segments_ids`.
"""
return sparse_segment_sqrt_n(
data, indices, segment_ids, name=name, num_segments=num_segments)
@tf_export("tensordot", "linalg.tensordot")
@dispatch.add_dispatch_support
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes and outer product.
Tensordot (also known as tensor contraction) sums the product of elements
from `a` and `b` over the indices specified by `axes`.
This operation corresponds to `numpy.tensordot(a, b, axes)`.
Example 1: When `a` and `b` are matrices (order 2), the case `axes=1`
is equivalent to matrix multiplication.
Example 2: When `a` and `b` are matrices (order 2), the case
`axes = [[1], [0]]` is equivalent to matrix multiplication.
Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
the outer product, a tensor of order 4.
Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
\\(c_{jklm}\\) whose entry
corresponding to the indices \\((j,k,l,m)\\) is given by:
\\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
Args:
a: `Tensor` of type `float32` or `float64`.
b: `Tensor` with the same type as `a`.
axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes of
b in order. If axes is a list or `Tensor` the first and second row contain
the set of unique integers specifying axes along which the contraction is
computed, for `a` and `b`, respectively. The number of axes for `a` and
`b` must be equal. If `axes=0`, computes the outer product between `a` and
`b`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `a`.
Raises:
ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""
def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.
This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
using `array_ops.transpose` and `array_ops.reshape`. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.
Args:
a: `Tensor`.
axes: List or `int32` `Tensor` of unique indices specifying valid axes of
`a`.
flipped: An optional `bool`. Defaults to `False`. If `True`, the method
assumes that `a` is the second argument in the contraction operation.
Returns:
A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
either a list of integers or an `int32` `Tensor`, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
if (perm != np.arange(len(shape_a))).any():
a_trans = array_ops.transpose(a, perm)
else:
a_trans = a
if a_trans.get_shape().as_list() != new_shape:
reshaped_a = array_ops.reshape(a_trans, new_shape)
else:
reshaped_a = a_trans
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
axes_dims = [shape_a[i] for i in axes]
free_dims = [shape_a[i] for i in free]
free_dims_static = free_dims
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
shape_a = array_ops.shape(a)
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static
def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 0:
raise ValueError("'axes' must be at least 0.")
if a_shape.ndims is not None:
if axes > a_shape.ndims:
raise ValueError("'axes' must not be larger than the number of "
"dimensions of tensor %s." % a)
return (list(xrange(a_shape.ndims - axes,
a_shape.ndims)), list(xrange(axes)))
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank,
dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if isinstance(a_axes, compat.integral_types) and \
isinstance(b_axes, compat.integral_types):
a_axes = [a_axes]
b_axes = [b_axes]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s." %
(len(a_axes), len(b_axes)))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]
with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
b, b_axes, True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
if (ab_matmul.get_shape().is_fully_defined() and
ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
return ab_matmul
else:
return array_ops.reshape(
ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product
@tf_export("math.polyval")
@dispatch.add_dispatch_support
def polyval(coeffs, x, name=None):
r"""Computes the elementwise value of a polynomial.
If `x` is a tensor and `coeffs` is a list n + 1 tensors,
this function returns the value of the n-th order polynomial
`p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)`
evaluated using Horner's method, i.e.
```python
p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0]))
```
Usage Example:
>>> coefficients = [1.0, 2.5, -4.2]
>>> x = 5.0
>>> y = tf.math.polyval(coefficients, x)
>>> y
<tf.Tensor: shape=(), dtype=float32, numpy=33.3>
Usage Example:
>>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
<tf.Tensor: shape=(), dtype=int32, numpy=21>
`tf.math.polyval` can also be used in polynomial regression. Taking
advantage of this function can facilitate writing a polynomial equation
as compared to explicitly writing it out, especially for higher degree
polynomials.
>>> x = tf.constant(3)
>>> theta1 = tf.Variable(2)
>>> theta2 = tf.Variable(1)
>>> theta3 = tf.Variable(0)
>>> tf.math.polyval([theta1, theta2, theta3], x)
<tf.Tensor: shape=(), dtype=int32, numpy=21>
Args:
coeffs: A list of `Tensor` representing the coefficients of the polynomial.
x: A `Tensor` representing the variable of the polynomial.
name: A name for the operation (optional).
Returns:
A `tensor` of the shape as the expression p(x) with usual broadcasting
rules for element-wise addition and multiplication applied.
@compatibility(numpy)
Equivalent to numpy.polyval.
@end_compatibility
"""
if not isinstance(coeffs, list):
raise ValueError("Argument coeffs must be list type "
"found {}.".format(type(coeffs)))
with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if len(coeffs) < 1:
return array_ops.zeros_like(x, name=name)
coeffs = [
ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
for index, coeff in enumerate(coeffs)
]
p = coeffs[0]
for c in coeffs[1:]:
p = c + p * x
return p
@tf_export("math.reciprocal_no_nan")
@dispatch.add_dispatch_support
def reciprocal_no_nan(x, name=None):
"""Performs a safe reciprocal operation, element wise.
If a particular element is zero, the reciprocal for that element is
also set to zero.
For example:
```python
x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
tf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ]
```
Args:
x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
`complex128`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
Raises:
TypeError: x must be of a valid dtype.
"""
with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
x = ops.convert_to_tensor(x, name="x")
one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
return gen_math_ops.div_no_nan(one, x, name=scope)
@tf_export("math.xlog1py")
@dispatch.add_dispatch_support
def xlog1py(x, y, name=None):
r"""Compute x * log1p(y).
Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
zero when `x = 0`, no matter what the value of `y` is.
Example:
>>> tf.math.xlog1py(0., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
>>> tf.math.xlog1py(1., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
>>> tf.math.xlog1py(2., 2.)
<tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
>>> tf.math.xlog1py(0., -1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
Args:
x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
y: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
name: A name for the operation (optional).
Returns:
`x * log1p(y)`.
@compatibility(scipy)
Equivalent to scipy.special.xlog1py
@end_compatibility
"""
with ops.name_scope(name, "xlog1py", [x]):
return gen_math_ops.xlog1py(x, y)
@tf_export("math.erfinv")
@dispatch.add_dispatch_support
def erfinv(x, name=None):
"""Compute inverse error function.
Given `x`, compute the inverse error function of `x`. This function
is the inverse of `tf.math.erf`.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
"""
with ops.name_scope(name, "erfinv", [x]):
return gen_math_ops.erfinv(x)
@tf_export("math.ndtri")
@dispatch.add_dispatch_support
def ndtri(x, name=None):
"""Compute quantile of Standard Normal.
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse error function of `x`.
"""
with ops.name_scope(name, "ndtri", [x]):
return gen_math_ops.ndtri(x)
@tf_export("math.erfcinv")
@dispatch.add_dispatch_support
def erfcinv(x, name=None):
"""Computes the inverse of complementary error function.
Given `x`, compute the inverse complementary error function of `x`.
This function is the inverse of `tf.math.erfc`, and is defined on
`[0, 2]`.
>>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.])
<tf.Tensor: shape=(5,), dtype=float32, numpy=
array([ inf, 0.9061935, -0. , -0.4769363, -inf],
dtype=float32)>
Args:
x: `Tensor` with type `float` or `double`.
name: A name for the operation (optional).
Returns:
Inverse complementary error function of `x`.
@compatibility(numpy)
Equivalent to scipy.special.erfcinv
@end_compatibility
"""
with ops.name_scope(name, "erfcinv", [x]):
x = ops.convert_to_tensor(x, name="start")
return -ndtri(0.5 * x) * np.sqrt(0.5)
@tf_export("math.ceil", v1=["math.ceil", "ceil"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("ceil")
@dispatch.add_dispatch_support
def ceil(x, name=None):
"""Return the ceiling of the input, element-wise.
For example:
>>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([-1., -1., -0., 1., 2., 2., 2.], dtype=float32)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`. `int32`
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.ceil
@end_compatibility
"""
return gen_math_ops.ceil(x, name)
@tf_export("math.sqrt", "sqrt")
@dispatch.add_dispatch_support
def sqrt(x, name=None): # pylint: disable=redefined-builtin
r"""Computes element-wise square root of the input tensor.
Note: This operation does not support integer types.
>>> x = tf.constant([[4.0], [16.0]])
>>> tf.sqrt(x)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[2.],
[4.]], dtype=float32)>
>>> y = tf.constant([[-4.0], [16.0]])
>>> tf.sqrt(y)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[nan],
[ 4.]], dtype=float32)>
>>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
>>> tf.sqrt(z)
<tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
array([[0.0+1.j],
[4.0+0.j]])>
Note: In order to support complex type, please provide an input tensor
of `complex64` or `complex128`.
Args:
x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
`complex64`, `complex128`
name: A name for the operation (optional).
Returns:
A `tf.Tensor` of same size, type and sparsity as `x`.
"""
return gen_math_ops.sqrt(x, name)
# pylint: disable=g-docstring-has-escape
@tf_export("math.exp", "exp")
@dispatch.add_dispatch_support
def exp(x, name=None):
r"""Computes exponential of x element-wise. \\(y = e^x\\).
This function computes the exponential of the input tensor element-wise.
i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
\\(e\\) denotes Euler's number and is approximately equal to 2.718281.
Output is positive for any real input.
>>> x = tf.constant(2.0)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=float32, numpy=7.389056>
>>> x = tf.constant([2.0, 8.0])
>>> tf.math.exp(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([ 7.389056, 2980.958 ], dtype=float32)>
For complex numbers, the exponential value is calculated as
$$
e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)})
$$
For `1+1j` the value would be computed as:
$$
e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j)
$$
>>> x = tf.constant(1 + 1j)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=complex128,
numpy=(1.4686939399158851+2.2873552871788423j)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `complex64`, `complex128`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
@compatibility(numpy)
Equivalent to np.exp
@end_compatibility
"""
return gen_math_ops.exp(x, name)
# pylint: enable=g-docstring-has-escape
@tf_export("math.sobol_sample")
@dispatch.add_dispatch_support
def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
"""Generates points from the Sobol sequence.
Creates a Sobol sequence with `num_results` samples. Each sample has dimension
`dim`. Skips the first `skip` samples.
Args:
dim: Positive scalar `Tensor` representing each sample's dimension.
num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
points to return in the output.
skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
initial points of the Sobol sequence to skip. Default value is 0.
dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
`tf.float64`. Defaults to `tf.float32`.
name: (Optional) Python `str` name prefixed to ops created by this function.
Returns:
`Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
"""
with ops.name_scope(name, "sobol", [dim, num_results, skip]):
return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)
@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints("rsqrt")
@dispatch.add_dispatch_support
def rsqrt(x, name=None):
"""Computes reciprocal of square root of x element-wise.
For example:
>>> x = tf.constant([2., 0., -2.])
>>> tf.math.rsqrt(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>
Args:
x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `tf.Tensor`. Has the same type as `x`.
"""
return gen_math_ops.rsqrt(x, name)
@tf_export("math.acos", "acos")
@dispatch.add_dispatch_support
def acos(x, name=None):
"""Computes acos of x element-wise.
Provided an input tensor, the `tf.math.acos` operation
returns the inverse cosine of each element of the tensor.
If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
For example:
>>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)
>>> tf.math.acos(x)
<tf.Tensor: shape=(6,), dtype=float32,
numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan],
dtype=float32)>
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`, `uint8`, `int8`, `int16`, `int32`, `int64`,
`complex64`, `complex128`, `string`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as x.
"""
return gen_math_ops.acos(x, name)
@tf_export("math.floor", "floor")
@dispatch.add_dispatch_support
def floor(x, name=None):
"""Returns element-wise largest integer not greater than x.
Both input range is `(-inf, inf)` and the
output range consists of all integer values.
For example:
>>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
>>> tf.floor(x).numpy()
array([ 1., -2., 5., -3., 0., inf], dtype=float32)
Args:
x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
`float32`, `float64`.
name: A name for the operation (optional).
Returns:
A `Tensor`. Has the same type as x.
"""
return gen_math_ops.floor(x, name)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.