repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
benhoff/reddit_helper | reddit_helper/__main__.py | 1 | 2193 | #!/usr/bin/env python3
import sys
import os
import tempfile
import json
from subprocess import call
import github
import reddit
def _get_settings():
main_dir = os.path.dirname(os.path.realpath(__file__))
settings = None
settings_filename= os.path.join(main_dir, 'settings.json')
if os.path.exists(settings_filename):
with open(settings_filename) as settings_file:
settings = json.load(settings_file)
else:
print('Fill out your `settings.json` file!')
sys.exit(0)
return settings
def main():
settings = _get_settings()
reddit_username = settings['reddit_username']
github_username = settings['github_username']
latest_commits = github.get_most_recent_commits(github_username,
settings['num_hours_interested_in'])
EDITOR = os.environ.get('EDITOR','vim') #that easy!
initial_message = 'So during this stream I\n\n'
and_message = 'Also,\n\n'
num_of_repos_changed = len(latest_commits)
for index, (repo_name, commit) in enumerate(latest_commits.items()):
github_link= "https://github.com/{}".format(repo_name)
s = "[here's the directroy at the end of this stream]({github_link}/tree/{commit})\n\n[and here's just the repo]({github_link})\n\n".format(github_link=github_link,
commit = commit['sha'])
initial_message += s
if index < num_of_repos_changed - 1:
initial_message += and_message
comment = ''
with tempfile.NamedTemporaryFile('w+', suffix=".tmp") as tempfile_:
tempfile_.write(initial_message)
tempfile_.flush()
call([EDITOR, tempfile_.name])
tempfile_.seek(0)
comment = tempfile_.read()
print("One second......")
reddit_submission = reddit.get_most_recent_submission(reddit_username,
settings['reddit_password'])
_ = os.system('clear')
print('Commenting in: {}'.format(reddit_submission.title))
user_prompt = input('Submit? (Y/N): ')
if user_prompt.lower() in ['yes', 'y', 'yeppers']:
reddit_submission.add_comment(comment)
print('Commented!')
main()
| gpl-3.0 |
kingvuplus/Test-OBH | keyids.py | 2 | 5474 | KEYIDS = {
"KEY_RESERVED": 0,
"KEY_ESC": 1,
"KEY_1": 2,
"KEY_2": 3,
"KEY_3": 4,
"KEY_4": 5,
"KEY_5": 6,
"KEY_6": 7,
"KEY_7": 8,
"KEY_8": 9,
"KEY_9": 10,
"KEY_0": 11,
"KEY_MINUS": 12,
"KEY_EQUAL": 13,
"KEY_BACKSPACE": 14,
"KEY_TAB": 15,
"KEY_Q": 16,
"KEY_W": 17,
"KEY_E": 18,
"KEY_R": 19,
"KEY_T": 20,
"KEY_Y": 21,
"KEY_U": 22,
"KEY_I": 23,
"KEY_O": 24,
"KEY_P": 25,
"KEY_LEFTBRACE": 26,
"KEY_RIGHTBRACE": 27,
"KEY_ENTER": 28,
"KEY_LEFTCTRL": 29,
"KEY_A": 30,
"KEY_S": 31,
"KEY_D": 32,
"KEY_F": 33,
"KEY_G": 34,
"KEY_H": 35,
"KEY_J": 36,
"KEY_K": 37,
"KEY_L": 38,
"KEY_SEMICOLON": 39,
"KEY_APOSTROPHE": 40,
"KEY_GRAVE": 41,
"KEY_LEFTSHIFT": 42,
"KEY_BACKSLASH": 43,
"KEY_Z": 44,
"KEY_X": 45,
"KEY_C": 46,
"KEY_V": 47,
"KEY_B": 48,
"KEY_N": 49,
"KEY_M": 50,
"KEY_COMMA": 51,
"KEY_DOT": 52,
"KEY_SLASH": 53,
"KEY_RIGHTSHIFT": 54,
"KEY_KPASTERISK": 55,
"KEY_LEFTALT": 56,
"KEY_SPACE": 57,
"KEY_CAPSLOCK": 58,
"KEY_F1": 59,
"KEY_F2": 60,
"KEY_F3": 61,
"KEY_F4": 62,
"KEY_F5": 63,
"KEY_F6": 64,
"KEY_F7": 65,
"KEY_F8": 66,
"KEY_F9": 67,
"KEY_F10": 68,
"KEY_NUMLOCK": 69,
"KEY_SCROLLLOCK": 70,
"KEY_KP7": 71,
"KEY_KP8": 72,
"KEY_KP9": 73,
"KEY_KPMINUS": 74,
"KEY_KP4": 75,
"KEY_KP5": 76,
"KEY_KP6": 77,
"KEY_KPPLUS": 78,
"KEY_KP1": 79,
"KEY_KP2": 80,
"KEY_KP3": 81,
"KEY_KP0": 82,
"KEY_KPDOT": 83,
"KEY_103RD": 84,
"KEY_F13": 85,
"KEY_102ND": 86,
"KEY_F11": 87,
"KEY_F12": 88,
"KEY_F14": 89,
"KEY_F15": 90,
"KEY_F16": 91,
"KEY_F17": 92,
"KEY_F18": 93,
"KEY_F19": 94,
"KEY_F20": 95,
"KEY_KPENTER": 96,
"KEY_RIGHTCTRL": 97,
"KEY_KPSLASH": 98,
"KEY_SYSRQ": 99,
"KEY_RIGHTALT": 100,
"KEY_LINEFEED": 101,
"KEY_HOME": 102,
"KEY_UP": 103,
"KEY_PAGEUP": 104,
"KEY_LEFT": 105,
"KEY_RIGHT": 106,
"KEY_END": 107,
"KEY_DOWN": 108,
"KEY_PAGEDOWN": 109,
"KEY_INSERT": 110,
"KEY_DELETE": 111,
"KEY_MACRO": 112,
"KEY_MUTE": 113,
"KEY_VOLUMEDOWN": 114,
"KEY_VOLUMEUP": 115,
"KEY_POWER": 116,
"KEY_KPEQUAL": 117,
"KEY_KPPLUSMINUS": 118,
"KEY_PAUSE": 119,
"KEY_F21": 120,
"KEY_F22": 121,
"KEY_F23": 122,
"KEY_F24": 123,
"KEY_KPCOMMA": 124,
"KEY_LEFTMETA": 125,
"KEY_RIGHTMETA": 126,
"KEY_COMPOSE": 127,
"KEY_STOP": 128,
"KEY_AGAIN": 129,
"KEY_PROPS": 130,
"KEY_UNDO": 131,
"KEY_FRONT": 132,
"KEY_COPY": 133,
"KEY_OPEN": 134,
"KEY_PASTE": 135,
"KEY_FIND": 136,
"KEY_CUT": 137,
"KEY_HELP": 138,
"KEY_MENU": 139,
"KEY_CALC": 140,
"KEY_SETUP": 141,
"KEY_SLEEP": 142,
"KEY_WAKEUP": 143,
"KEY_FILE": 144,
"KEY_SENDFILE": 145,
"KEY_DELETEFILE": 146,
"KEY_XFER": 147,
"KEY_PROG1": 148,
"KEY_PROG2": 149,
"KEY_WWW": 150,
"KEY_MSDOS": 151,
"KEY_COFFEE": 152,
"KEY_DIRECTION": 153,
"KEY_CYCLEWINDOWS": 154,
"KEY_MAIL": 155,
"KEY_BOOKMARKS": 156,
"KEY_COMPUTER": 157,
"KEY_BACK": 158,
"KEY_FORWARD": 159,
"KEY_CLOSECD": 160,
"KEY_EJECTCD": 161,
"KEY_EJECTCLOSECD": 162,
"KEY_NEXTSONG": 163,
"KEY_PLAYPAUSE": 164,
"KEY_PREVIOUSSONG": 165,
"KEY_STOPCD": 166,
"KEY_RECORD": 167,
"KEY_REWIND": 168,
"KEY_PHONE": 169,
"KEY_ISO": 170,
"KEY_CONFIG": 171,
"KEY_HOMEPAGE": 172,
"KEY_REFRESH": 173,
"KEY_EXIT": 174,
"KEY_MOVE": 175,
"KEY_EDIT": 176,
"KEY_SCROLLUP": 177,
"KEY_SCROLLDOWN": 178,
"KEY_KPLEFTPAREN": 179,
"KEY_KPRIGHTPAREN": 180,
"KEY_INTL1": 181,
"KEY_INTL2": 182,
"KEY_INTL3": 183,
"KEY_INTL4": 184,
"KEY_INTL5": 185,
"KEY_INTL6": 186,
"KEY_INTL7": 187,
"KEY_INTL8": 188,
"KEY_INTL9": 189,
"KEY_LANG1": 190,
"KEY_LANG2": 191,
"KEY_LANG3": 192,
"KEY_LANG4": 193,
"KEY_LANG5": 194,
"KEY_LANG6": 195,
"KEY_LANG7": 196,
"KEY_LANG8": 197,
"KEY_LANG9": 198,
"KEY_PLAYCD": 200,
"KEY_PAUSECD": 201,
"KEY_PROG3": 202,
"KEY_PROG4": 203,
"KEY_SUSPEND": 205,
"KEY_CLOSE": 206,
"KEY_PLAY": 207,
"KEY_FASTFORWARD": 208,
"KEY_BASSBOOST": 209,
"KEY_PRINT": 210,
"KEY_HP": 211,
"KEY_CAMERA": 212,
"KEY_SOUND": 213,
"KEY_QUESTION": 214,
"KEY_EMAIL": 215,
"KEY_CHAT": 216,
"KEY_SEARCH": 217,
"KEY_CONNECT": 218,
"KEY_FINANCE": 219,
"KEY_SPORT": 220,
"KEY_SHOP": 221,
"KEY_ALTERASE": 222,
"KEY_CANCEL": 223,
"KEY_BRIGHTNESSDOWN": 224,
"KEY_BRIGHTNESSUP": 225,
"KEY_MEDIA": 226,
"KEY_VMODE": 227,
"KEY_UNKNOWN": 240,
"KEY_OK": 352,
"KEY_SELECT": 353,
"KEY_GOTO": 354,
"KEY_CLEAR": 355,
"KEY_POWER2": 356,
"KEY_OPTION": 357,
"KEY_INFO": 358,
"KEY_TIME": 359,
"KEY_VENDOR": 360,
"KEY_ARCHIVE": 361,
"KEY_PROGRAM": 362,
"KEY_CHANNEL": 363,
"KEY_FAVORITES": 364,
"KEY_EPG": 365,
"KEY_PVR": 366,
"KEY_MHP": 367,
"KEY_LANGUAGE": 368,
"KEY_TITLE": 369,
"KEY_SUBTITLE": 370,
"KEY_ANGLE": 371,
"KEY_ZOOM": 372,
"KEY_MODE": 373,
"KEY_KEYBOARD": 374,
"KEY_SCREEN": 375,
"KEY_PC": 376,
"KEY_TV": 377,
"KEY_TV2": 378,
"KEY_VCR": 379,
"KEY_VCR2": 380,
"KEY_SAT": 381,
"KEY_SAT2": 382,
"KEY_CD": 383,
"KEY_TAPE": 384,
"KEY_RADIO": 385,
"KEY_TUNER": 386,
"KEY_PLAYER": 387,
"KEY_TEXT": 388,
"KEY_DVD": 389,
"KEY_AUX": 390,
"KEY_MP3": 391,
"KEY_AUDIO": 392,
"KEY_VIDEO": 393,
"KEY_DIRECTORY": 394,
"KEY_LIST": 395,
"KEY_MEMO": 396,
"KEY_CALENDAR": 397,
"KEY_RED": 398,
"KEY_GREEN": 399,
"KEY_YELLOW": 400,
"KEY_BLUE": 401,
"KEY_CHANNELUP": 402,
"KEY_CHANNELDOWN": 403,
"KEY_FIRST": 404,
"KEY_LAST": 405,
"KEY_AB": 406,
"KEY_NEXT": 407,
"KEY_RESTART": 408,
"KEY_SLOW": 409,
"KEY_SHUFFLE": 410,
"KEY_BREAK": 411,
"KEY_PREVIOUS": 412,
"KEY_DIGITS": 413,
"KEY_TEEN": 414,
"KEY_TWEN": 415,
"KEY_CONTEXT_MENU": 438,
"KEY_DEL_EOL": 448,
"KEY_DEL_EOS": 449,
"KEY_INS_LINE": 450,
"KEY_DEL_LINE": 451,
"KEY_ASCII": 510,
"KEY_MAX": 511,
"BTN_0": 256,
"BTN_1": 257,
"KEY_TVSAT": 512,
"KEY_PICASA": 513,
"KEY_SHOUTCAST": 514,
"KEY_YOUTUBE": 515,
"KEY_SPARK": 516,
"KEY_RECALL": 517,
"KEY_PLAYMODE": 518,
"KEY_USB": 519,
"KEY_PORTAL": 520,
"KEY_FAST": 521,
}
| gpl-2.0 |
dalegregory/odoo | addons/account/test/test_parent_structure.py | 432 | 2108 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
#
# TODO: move this in a YAML test with !python tag
#
import xmlrpclib
DB = 'training3'
USERID = 1
USERPASS = 'admin'
sock = xmlrpclib.ServerProxy('http://%s:%s/xmlrpc/object' % ('localhost',8069))
ids = sock.execute(DB, USERID, USERPASS, 'account.account', 'search', [], {})
account_lists = sock.execute(DB, USERID, USERPASS, 'account.account', 'read', ids, ['parent_id','parent_left','parent_right'])
accounts = dict(map(lambda x: (x['id'],x), account_lists))
for a in account_lists:
if a['parent_id']:
assert a['parent_left'] > accounts[a['parent_id'][0]]['parent_left']
assert a['parent_right'] < accounts[a['parent_id'][0]]['parent_right']
assert a['parent_left'] < a['parent_right']
for a2 in account_lists:
assert not ((a2['parent_right']>a['parent_left']) and
(a2['parent_left']<a['parent_left']) and
(a2['parent_right']<a['parent_right']))
if a2['parent_id']==a['id']:
assert (a2['parent_left']>a['parent_left']) and (a2['parent_right']<a['parent_right'])
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yzl0083/orange | Orange/OrangeWidgets/OWTreeViewer2D.py | 6 | 26897 | import orange, orngTree, OWGUI, OWColorPalette
from OWWidget import *
import Orange
from PyQt4.QtCore import *
from PyQt4.QtGui import *
DefDroppletRadiust=7
DefNodeWidth=30
DefNodeHeight=20
DefDroppletBrush=QBrush(Qt.darkGray)
class graph_node(object):
def __init__(self, *args, **kwargs):
self.edges = kwargs.get("edges", set())
def graph_edges(self):
return self.edges
def graph_add_edge(self, edge):
self.edges.add(edge)
def __iter__(self):
for edge in self.edges:
yield edge.node2
def graph_nodes(self, type=1):
pass
class graph_edge(object):
def __init__(self, node1=None, node2=None, type=1):
self.node1 = node1
self.node2 = node2
self.type = type
node1.graph_add_edge(self)
node2.graph_add_edge(self)
class GraphicsDroplet(QGraphicsEllipseItem):
def __init__(self, *args):
QGraphicsEllipseItem.__init__(self, *args)
self.setAcceptHoverEvents(True)
self.setAcceptedMouseButtons(Qt.LeftButton)
self.setBrush(QBrush(Qt.gray))
def hoverEnterEvent(self, event):
QGraphicsEllipseItem.hoverEnterEvent(self, event)
self.setBrush(QBrush(QColor(100, 100, 100)))
self.update()
def hoverLeaveEvent(self, event):
QGraphicsEllipseItem.hoverLeaveEvent(self, event)
self.setBrush(QBrush(QColor(200, 200, 200)))
self.update()
def mousePressEvent(self, event):
QGraphicsEllipseItem.mousePressEvent(self, event)
self.parentItem().setOpen(not self.parentItem().isOpen)
if self.scene():
self.scene().fixPos()
def luminance(color):
"""Return the `luminance`_ (sRGB color space) of the color.
.. _luminance: http://en.wikipedia.org/wiki/Luminance_(colorimetry)
"""
r, g, b, _ = color.getRgb()
Y = 0.2126 * r + 0.7152 * g + 0.0722 * b
return Y
class TextTreeNode(QGraphicsTextItem, graph_node):
"""A Tree node with text.
"""
def setBorderRadius(self, r):
if self._borderRadius != r:
self.prepareGeometryChange()
self._borderRadius = r
self.update()
def borderRadius(self):
return getattr(self, "_borderRadius", 0)
borderRadius = pyqtProperty("int", fget=borderRadius, fset=setBorderRadius,
doc="Rounded rect's border radius")
def setBackgroundBrush(self, brush):
"""Set node's background brush.
"""
if self._backgroundBrush != brush:
self._backgroundBrush = QBrush(brush)
color = brush.color()
if luminance(color) > 30:
self.setDefaultTextColor(Qt.black)
else:
self.setDefaultTextColor(Qt.white)
self.update()
def backgroundBrush(self):
"""Return the node's background brush.
"""
brush = getattr(self, "_backgroundBrush",
getattr(self.scene(), "defaultItemBrush", Qt.NoBrush))
return QBrush(brush)
backgroundBrush = pyqtProperty("QBrush", fget=backgroundBrush,
fset=setBackgroundBrush,
doc="Background brush")
def setTruncateText(self, truncate):
"""Set the truncateText to truncate. If true the text will
be truncated to fit inside the node's box, otherwise it will
overflow.
"""
if self._truncateText != truncate:
self._truncateText = truncate
self.updateContents()
def truncateText(self):
return getattr(self, "_truncateText", False)
truncateText = pyqtProperty("bool", fget=truncateText,
fset=setTruncateText,
doc="Truncate text")
def __init__(self, tree, parent, *args, **kwargs):
QGraphicsTextItem.__init__(self, *args)
graph_node.__init__(self, **kwargs)
self._borderRadius = 0
self._backgroundBrush = None
self._truncateText = False
self.tree = tree
self.parent = parent
font = self.font()
font.setPointSize(10)
self.setFont(font)
self.droplet = GraphicsDroplet(-5, 0, 10, 10, self, self.scene())
self.droplet.setPos(self.rect().center().x(), self.rect().height())
self.connect(self.document(), SIGNAL("contentsChanged()"),
self.updateContents)
self.isOpen = True
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
def setHtml(self, html):
if qVersion() < "4.5":
html = html.replace("<hr>", "<hr width=200>") #bug in Qt4.4 (need width = 200)
return QGraphicsTextItem.setHtml(self, "<body>" + html + "</body>")
def updateContents(self):
if getattr(self, "_rect", QRectF()).isValid() and not self.truncateText:
self.setTextWidth(self._rect.width())
else:
self.setTextWidth(-1)
self.setTextWidth(self.document().idealWidth())
self.droplet.setPos(self.rect().center().x(), self.rect().height())
self.droplet.setVisible(bool(self.branches))
def setRect(self, rect):
self.prepareGeometryChange()
rect = QRectF() if rect is None else rect
self._rect = rect
self.updateContents()
self.update()
def shape(self):
path = QPainterPath()
path.addRect(self.boundingRect())
return path
def rect(self):
if self.truncateText and getattr(self, "_rect", QRectF()).isValid():
return self._rect
else:
return QRectF(QPointF(0,0), self.document().size()) | getattr(self, "_rect", QRectF(0, 0, 1, 1))
def boundingRect(self):
if self.truncateText and getattr(self, "_rect", QRectF()).isValid():
return self._rect
else:
return QGraphicsTextItem.boundingRect(self)
@property
def branches(self):
return [edge.node2 for edge in self.graph_edges() if edge.node1 is self]
def paint(self, painter, option, widget=0):
painter.save()
painter.setBrush(self.backgroundBrush)
rect = self.rect()
painter.drawRoundedRect(rect, self.borderRadius, self.borderRadius)
painter.restore()
painter.setClipRect(rect)
return QGraphicsTextItem.paint(self, painter, option, widget)
def graph_traverse_bf(nodes, level=None, test=None):
visited = set()
queue = list(nodes)
while queue:
node = queue.pop(0)
if node not in visited:
yield node
visited.add(node)
if not test or test(node):
queue.extend(list(node))
class GraphicsNode(TextTreeNode):
def setOpen(self, open, level=1):
self.isOpen = open
for node in graph_traverse_bf(self, test=lambda node: node.isOpen):
if node is not self:
node.setVisible(open)
def itemChange(self, change, value):
if change == QGraphicsItem.ItemPositionHasChanged:
self.updateEdge()
elif change == QGraphicsItem.ItemVisibleHasChanged:
self.updateEdge()
return TextTreeNode.itemChange(self, change, value)
def updateEdge(self):
for edge in self.edges:
if edge.node1 is self:
QTimer.singleShot(0, edge.updateEnds)
elif edge.node2 is self:
edge.setVisible(self.isVisible())
def edgeInPoint(self, edge):
return edge.mapFromItem(self, QPointF(self.rect().center().x(), self.rect().y()))
def edgeOutPoint(self, edge):
return edge.mapFromItem(self.droplet, self.droplet.rect().center())
def paint(self, painter, option, widget=0):
if self.isSelected():
option.state = option.state.__xor__(QStyle.State_Selected)
if self.isSelected():
rect = self.rect()
painter.save()
# painter.setBrush(QBrush(QColor(100, 0, 255, 100)))
painter.setBrush(QBrush(QColor(125, 162, 206, 192)))
painter.drawRoundedRect(rect.adjusted(-4, -4, 4, 4), self.borderRadius, self.borderRadius)
painter.restore()
TextTreeNode.paint(self, painter, option, widget)
def boundingRect(self):
return TextTreeNode.boundingRect(self).adjusted(-5, -5, 5, 5)
def mousePressEvent(self, event):
return TextTreeNode.mousePressEvent(self, event)
class GraphicsEdge(QGraphicsLineItem, graph_edge):
def __init__(self, *args, **kwargs):
QGraphicsLineItem.__init__(self, *args)
graph_edge.__init__(self, **kwargs)
self.setZValue(-30)
def updateEnds(self):
try:
self.prepareGeometryChange()
self.setLine(QLineF(self.node1.edgeOutPoint(self), self.node2.edgeInPoint(self)))
except RuntimeError: # this gets called through QTimer.singleShot and might already be deleted by Qt
pass
class TreeGraphicsView(QGraphicsView):
def __init__(self, master, scene, *args):
QGraphicsView.__init__(self, scene, *args)
# try:
# import PyQt4.QtOpenGL as gl
# fmt = gl.QGLFormat()
# fmt.setSampleBuffers(True)
# fmt.setSamples(32)
# print fmt.sampleBuffers()
# self.setViewport(gl.QGLWidget(fmt, self))
# except Exception, ex:
# print ex
self.viewport().setMouseTracking(True)
self.setFocusPolicy(Qt.WheelFocus)
self.setRenderHint(QPainter.Antialiasing)
self.setRenderHint(QPainter.TextAntialiasing)
self.setRenderHint(QPainter.HighQualityAntialiasing)
def resizeEvent(self, event):
QGraphicsView.resizeEvent(self, event)
self.emit(SIGNAL("resized(QSize)"), self.size())
class TreeGraphicsScene(QGraphicsScene):
def __init__(self, master, *args):
QGraphicsScene.__init__(self, *args)
self.HSpacing=10
self.VSpacing=10
self.master=master
self.nodeList=[]
self.edgeList=[]
def fixPos(self, node=None, x=10, y=10):
self.gx=x
self.gy=y
if not node:
if self.nodes():
node = [node for node in self.nodes() if not node.parent][0]
else:
return
if not x or not y: x, y= self.HSpacing, self.VSpacing
self._fixPos(node,x,y)
self.setSceneRect(reduce(QRectF.united, [item.sceneBoundingRect() for item in self.items() if item.isVisible()], QRectF(0, 0, 10, 10)).adjusted(0, 0, 100, 100))
# print self.sceneRect()
self.update()
def _fixPos(self, node, x, y):
ox=x
def bRect(node):
return node.boundingRect() | node.childrenBoundingRect()
if node.branches and node.isOpen:
for n in node.branches:
(x,ry)=self._fixPos(n,x,y+self.VSpacing + bRect(node).height())
x=(node.branches[0].pos().x() + node.branches[-1].pos().x())/2
# print x,y
node.setPos(x,y)
for e in node.edges:
e.updateEnds()
else:
# print self.gx, y
node.setPos(self.gx,y)
self.gx+=self.HSpacing + bRect(node).width()
x+=self.HSpacing + bRect(node).width()
self.gy=max([y,self.gy])
return (x,y)
def mouseMoveEvent(self,event):
return QGraphicsScene.mouseMoveEvent(self, event)
def mousePressEvent(self, event):
return QGraphicsScene.mousePressEvent(self, event)
def edges(self):
return [item for item in self.items() if isinstance(item, graph_edge)]
def nodes(self):
return [item for item in self.items() if isinstance(item, graph_node)]
class TreeNavigator(QGraphicsView):
def __init__(self, masterView, *args):
QGraphicsView.__init__(self)
self.masterView = masterView
self.setScene(self.masterView.scene())
self.connect(self.scene(), SIGNAL("sceneRectChanged(QRectF)"), self.updateSceneRect)
self.connect(self.masterView, SIGNAL("resized(QSize)"), self.updateView)
self.setRenderHint(QPainter.Antialiasing)
def mousePressEvent(self, event):
if event.buttons() & Qt.LeftButton:
self.masterView.centerOn(self.mapToScene(event.pos()))
self.updateView()
return QGraphicsView.mousePressEvent(self, event)
def mouseMoveEvent(self, event):
if event.buttons() & Qt.LeftButton:
self.masterView.centerOn(self.mapToScene(event.pos()))
self.updateView()
return QGraphicsView.mouseMoveEvent(self, event)
def resizeEvent(self, event):
QGraphicsView.resizeEvent(self, event)
self.updateView()
#
def resizeView(self):
self.updateView()
def updateSceneRect(self, rect):
QGraphicsView.updateSceneRect(self, rect)
self.updateView()
def updateView(self, *args):
if self.scene():
self.fitInView(self.scene().sceneRect())
def paintEvent(self, event):
QGraphicsView.paintEvent(self, event)
painter = QPainter(self.viewport())
painter.setBrush(QColor(100, 100, 100, 100))
painter.setRenderHints(self.renderHints())
painter.drawPolygon(self.viewPolygon())
def viewPolygon(self):
return self.mapFromScene(self.masterView.mapToScene(self.masterView.viewport().rect()))
class OWTreeViewer2D(OWWidget):
settingsList = ["ZoomAutoRefresh", "AutoArrange", "ToolTipsEnabled",
"Zoom", "VSpacing", "HSpacing", "MaxTreeDepth", "MaxTreeDepthB",
"LineWidth", "LineWidthMethod",
"MaxNodeWidth", "LimitNodeWidth", "NodeInfo", "NodeColorMethod",
"TruncateText"]
def __init__(self, parent=None, signalManager = None, name='TreeViewer2D'):
OWWidget.__init__(self, parent, signalManager, name, wantGraph=True)
self.root = None
self.selectedNode = None
self.inputs = [("Classification Tree", Orange.classification.tree.TreeClassifier, self.ctree)]
self.outputs = [("Examples", ExampleTable)]
#set default settings
self.ZoomAutoRefresh = 0
self.AutoArrange = 0
self.ToolTipsEnabled = 1
self.MaxTreeDepth = 5; self.MaxTreeDepthB = 0
self.LineWidth = 5; self.LineWidthMethod = 2
self.NodeSize = 5
self.MaxNodeWidth = 150
self.LimitNodeWidth = True
self.NodeInfo = [0, 1]
self.Zoom = 5
self.VSpacing = 5; self.HSpacing = 5
self.TruncateText = 1
self.loadSettings()
self.NodeInfo.sort()
# Changed when the GUI was simplified - added here to override any saved settings
self.VSpacing = 1; self.HSpacing = 1
self.ToolTipsEnabled = 1
self.LineWidth = 15 # Also reset when the LineWidthMethod is changed!
# GUI definition
# self.tabs = OWGUI.tabWidget(self.controlArea)
# GENERAL TAB
# GeneralTab = OWGUI.createTabPage(self.tabs, "General")
# GeneralTab = TreeTab = OWGUI.createTabPage(self.tabs, "Tree")
# NodeTab = OWGUI.createTabPage(self.tabs, "Node")
GeneralTab = NodeTab = TreeTab = self.controlArea
self.infBox = OWGUI.widgetBox(GeneralTab, 'Info', sizePolicy = QSizePolicy(QSizePolicy.Minimum , QSizePolicy.Fixed ), addSpace=True)
self.infoa = OWGUI.widgetLabel(self.infBox, 'No tree.')
self.infob = OWGUI.widgetLabel(self.infBox, " ")
self.sizebox = OWGUI.widgetBox(GeneralTab, "Size", addSpace=True)
OWGUI.hSlider(self.sizebox, self, 'Zoom', label='Zoom', minValue=1, maxValue=10, step=1,
callback=self.toggleZoomSlider, ticks=1)
OWGUI.separator(self.sizebox)
cb, sb = OWGUI.checkWithSpin(self.sizebox, self, "Max node width:", 50, 200, "LimitNodeWidth", "MaxNodeWidth",
tooltip="Limit the width of tree nodes",
checkCallback=self.toggleNodeSize,
spinCallback=self.toggleNodeSize,
step=10)
b = OWGUI.checkBox(OWGUI.indentedBox(self.sizebox, sep=OWGUI.checkButtonOffsetHint(cb)), self, "TruncateText", "Truncate text", callback=self.toggleTruncateText)
cb.disables.append(b)
cb.makeConsistent()
OWGUI.checkWithSpin(self.sizebox, self, 'Max tree depth:', 1, 20, 'MaxTreeDepthB', "MaxTreeDepth",
tooltip='Defines the depth of the tree displayed',
checkCallback=self.toggleTreeDepth,
spinCallback=self.toggleTreeDepth)
self.edgebox = OWGUI.widgetBox(GeneralTab, "Edge Widths", addSpace=True)
OWGUI.comboBox(self.edgebox, self, 'LineWidthMethod',
items=['Equal width', 'Root node', 'Parent node'],
callback=self.toggleLineWidth)
# Node information
grid = QGridLayout()
grid.setContentsMargins(*self.controlArea.layout().getContentsMargins())
navButton = OWGUI.button(self.controlArea, self, "Navigator", self.toggleNavigator, debuggingEnabled = 0, addToLayout=False)
# findbox = OWGUI.widgetBox(self.controlArea, orientation = "horizontal")
self.centerRootButton=OWGUI.button(self.controlArea, self, "Find Root", addToLayout=False,
callback=lambda :self.rootNode and \
self.sceneView.centerOn(self.rootNode.x(), self.rootNode.y()))
self.centerNodeButton=OWGUI.button(self.controlArea, self, "Find Selected", addToLayout=False,
callback=lambda :self.selectedNode and \
self.sceneView.centerOn(self.selectedNode.scenePos()))
grid.addWidget(navButton, 0, 0, 1, 2)
grid.addWidget(self.centerRootButton, 1, 0)
grid.addWidget(self.centerNodeButton, 1, 1)
self.leftWidgetPart.layout().insertLayout(1, grid)
self.NodeTab=NodeTab
self.TreeTab=TreeTab
self.GeneralTab=GeneralTab
# OWGUI.rubber(NodeTab)
self.rootNode=None
self.tree=None
self.resize(800, 500)
self.connect(self.graphButton, SIGNAL("clicked()"), self.saveGraph)
def sendReport(self):
from PyQt4.QtSvg import QSvgGenerator
if self.tree:
self.reportSection("Tree")
urlfn, filefn = self.getUniqueImageName(ext=".svg")
svg = QSvgGenerator()
svg.setFileName(filefn)
ssize = self.scene.sceneRect().size()
w, h = ssize.width(), ssize.height()
fact = 600/w
svg.setSize(QSize(600, h*fact))
painter = QPainter()
painter.begin(svg)
self.scene.render(painter)
painter.end()
# buffer = QPixmap(QSize(600, h*fact))
# painter.begin(buffer)
# painter.fillRect(buffer.rect(), QBrush(QColor(255, 255, 255)))
# self.scene.render(painter)
# painter.end()
# self.reportImage(lambda filename: buffer.save(filename, os.path.splitext(filename)[1][1:]))
from OWDlgs import OWChooseImageSizeDlg
self.reportImage(OWChooseImageSizeDlg(self.scene).saveImage)
self.reportRaw('<!--browsercode<br/>(Click <a href="%s">here</a> to view or download this image in a scalable vector format)-->' % urlfn)
#self.reportObject(self.svg_type, urlfn, width="600", height=str(h*fact))
def toggleZoomSlider(self):
k = 0.0028 * (self.Zoom ** 2) + 0.2583 * self.Zoom + 1.1389
self.sceneView.setTransform(QTransform().scale(k/2, k/2))
self.scene.update()
def toggleVSpacing(self):
self.rescaleTree()
self.scene.fixPos(self.rootNode,10,10)
self.scene.update()
def toggleHSpacing(self):
self.rescaleTree()
self.scene.fixPos(self.rootNode,10,10)
self.scene.update()
def toggleTreeDepth(self):
self.walkupdate(self.rootNode)
self.scene.fixPos(self.rootNode,10,10)
self.scene.update()
def toggleLineWidth(self):
for edge in self.scene.edges():
if self.LineWidthMethod==0:
width=5 # self.LineWidth
elif self.LineWidthMethod == 1:
width = (edge.node2.tree.distribution.cases/self.tree.distribution.cases) * 20 # self.LineWidth
elif self.LineWidthMethod == 2:
width = (edge.node2.tree.distribution.cases/edge.node1.tree.distribution.cases) * 10 # self.LineWidth
edge.setPen(QPen(Qt.gray, width, Qt.SolidLine, Qt.RoundCap))
self.scene.update()
def toggleNodeSize(self):
pass
def toggleTruncateText(self):
for n in self.scene.nodes():
n.truncateText = self.TruncateText
self.scene.fixPos(self.rootNode, 10, 10)
def toggleNavigator(self):
self.navWidget.setHidden(not self.navWidget.isHidden())
def activateLoadedSettings(self):
if not self.tree:
return
self.rescaleTree()
self.scene.fixPos(self.rootNode,10,10)
self.scene.update()
self.toggleTreeDepth()
self.toggleLineWidth()
# self.toggleNodeSize()
def ctree(self, tree=None):
self.clear()
if not tree:
self.centerRootButton.setDisabled(1)
self.centerNodeButton.setDisabled(0)
self.infoa.setText('No tree.')
self.infob.setText('')
self.tree=None
self.rootNode = None
else:
self.tree=tree.tree
self.infoa.setText('Number of nodes: ' + str(orngTree.countNodes(tree)))
self.infob.setText('Number of leaves: ' + str(orngTree.countLeaves(tree)))
if hasattr(self.scene, "colorPalette"):
self.scene.colorPalette.setNumberOfColors(len(self.tree.distribution))
# self.scene.setDataModel(GraphicsTree(self.tree))
self.rootNode=self.walkcreate(self.tree, None)
# self.scene.addItem(self.rootNode)
self.scene.fixPos(self.rootNode,self.HSpacing,self.VSpacing)
self.activateLoadedSettings()
self.sceneView.centerOn(self.rootNode.x(), self.rootNode.y())
self.updateNodeToolTips()
self.centerRootButton.setDisabled(0)
self.centerNodeButton.setDisabled(1)
self.scene.update()
def walkcreate(self, tree, parent=None, level=0):
node = GraphicsNode(tree, parent, None, self.scene)
node.borderRadius = 10
if parent:
parent.graph_add_edge(GraphicsEdge(None, self.scene, node1=parent, node2=node))
if tree.branches:
for i in range(len(tree.branches)):
if tree.branches[i]:
self.walkcreate(tree.branches[i],node,level+1)
return node
def walkupdate(self, node, level=0):
if not node: return
if self.MaxTreeDepthB and self.MaxTreeDepth<=level+1:
node.setOpen(False)
return
else:
node.setOpen(True,1)
for n in node.branches:
self.walkupdate(n,level+1)
def clear(self):
self.tree=None
self.scene.clear()
def updateNodeToolTips(self):
for node in self.scene.nodes():
node.setToolTip(self.nodeToolTip(node) if self.ToolTipsEnabled else "")
def nodeToolTip(self, tree):
return "tree node"
def rescaleTree(self):
NodeHeight = DefNodeHeight
NodeWidth = DefNodeWidth * ((self.NodeSize -1) * (1.5 / 9.0) + 0.5)
k = 1.0
self.scene.VSpacing=int(NodeHeight*k*(0.3+self.VSpacing*0.15))
self.scene.HSpacing=int(NodeWidth*k*(0.3+self.HSpacing*0.20))
for r in self.scene.nodeList:
r.setRect(r.rect().x(), r.rect().y(), int(NodeWidth*k), int(NodeHeight*k))
self.scene.fixPos() #self.rootNode, 10, 10)
def updateSelection(self):
self.selectedNode = (self.scene.selectedItems() + [None])[0]
self.centerNodeButton.setDisabled(not self.selectedNode)
self.send("Data", self.selectedNode.tree.examples if self.selectedNode else None)
def saveGraph(self, fileName = None):
from OWDlgs import OWChooseImageSizeDlg
dlg = OWChooseImageSizeDlg(self.scene, [("Save as Dot Tree File (.dot)", self.saveDot)], parent=self)
dlg.exec_()
def saveDot(self, filename=None):
if filename==None:
filename = QFileDialog.getSaveFileName(self, "Save to ...", "tree.dot", "Dot Tree File (.DOT)")
filename = unicode(filename)
if not filename:
return
orngTree.printDot(self.tree, filename)
class OWDefTreeViewer2D(OWTreeViewer2D):
def __init__(self, parent=None, signalManager = None, name='DefTreeViewer2D'):
OWTreeViewer2D.__init__(self, parent, signalManager, name)
self.settingsList=self.settingsList+["ShowPie"]
self.scene = TreeGraphicsScene(self)
self.sceneView = TreeGraphicsView(self, self.scene, self.mainArea)
self.mainArea.layout().addWidget(self.sceneView)
# self.scene.setSceneRect(0,0,800,800)
self.navWidget = QWidget(None)
self.navWidget.setLayout(QVBoxLayout(self.navWidget))
scene = TreeGraphicsScene(self.navWidget)
self.treeNav = TreeNavigator(self.sceneView)
# self.treeNav.setScene(scene)
self.navWidget.layout().addWidget(self.treeNav)
# self.sceneView.setNavigator(self.treeNav)
self.navWidget.resize(400,400)
# OWGUI.button(self.TreeTab,self,"Navigator",self.toggleNavigator)
if __name__=="__main__":
a = QApplication(sys.argv)
ow = OWDefTreeViewer2D()
#data = orange.ExampleTable('../../doc/datasets/voting.tab')
data = orange.ExampleTable(r"..//doc//datasets//zoo.tab")
data = orange.ExampleTable(r"..//doc//datasets//iris.tab")
tree = orange.TreeLearner(data, storeExamples = 1)
ow.activateLoadedSettings()
ow.ctree(None)
ow.ctree(tree)
# here you can test setting some stuff
ow.show()
a.exec_()
ow.saveSettings()
| gpl-3.0 |
klahnakoski/TestLog-ETL | vendor/jx_elasticsearch/es52/expressions/basic_starts_with_op.py | 3 | 1322 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Contact: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import, division, unicode_literals
from jx_base.expressions import (
BasicStartsWithOp as BasicStartsWithOp_,
Variable as Variable_,
is_literal,
)
from jx_base.language import is_op
from jx_elasticsearch.es52.expressions.false_op import MATCH_NONE
from jx_elasticsearch.es52.expressions.true_op import MATCH_ALL
from jx_elasticsearch.es52.painless import false_script
from mo_future import first
from jx_elasticsearch.es52.painless import BasicStartsWithOp as PainlessBasicStartsWithOp
class BasicStartsWithOp(BasicStartsWithOp_):
def to_esfilter(self, schema):
if not self.value:
return MATCH_ALL
elif is_op(self.value, Variable_) and is_literal(self.prefix):
var = first(schema.leaves(self.value.var)).es_column
return {"prefix": {var: self.prefix.value}}
else:
output = PainlessBasicStartsWithOp.to_es_script(self, schema)
if output is false_script:
return MATCH_NONE
return output
| mpl-2.0 |
yongshengwang/builthue | desktop/core/ext-py/Django-1.4.5/tests/modeltests/custom_columns/tests.py | 40 | 2265 | from __future__ import absolute_import
from django.core.exceptions import FieldError
from django.test import TestCase
from .models import Author, Article
class CustomColumnsTests(TestCase):
def test_db_column(self):
a1 = Author.objects.create(first_name="John", last_name="Smith")
a2 = Author.objects.create(first_name="Peter", last_name="Jones")
art = Article.objects.create(headline="Django lets you build Web apps easily")
art.authors = [a1, a2]
# Although the table and column names on Author have been set to custom
# values, nothing about using the Author model has changed...
# Query the available authors
self.assertQuerysetEqual(
Author.objects.all(), [
"Peter Jones", "John Smith",
],
unicode
)
self.assertQuerysetEqual(
Author.objects.filter(first_name__exact="John"), [
"John Smith",
],
unicode
)
self.assertEqual(
Author.objects.get(first_name__exact="John"),
a1,
)
self.assertRaises(FieldError,
lambda: Author.objects.filter(firstname__exact="John")
)
a = Author.objects.get(last_name__exact="Smith")
a.first_name = "John"
a.last_name = "Smith"
self.assertRaises(AttributeError, lambda: a.firstname)
self.assertRaises(AttributeError, lambda: a.last)
# Although the Article table uses a custom m2m table,
# nothing about using the m2m relationship has changed...
# Get all the authors for an article
self.assertQuerysetEqual(
art.authors.all(), [
"Peter Jones",
"John Smith",
],
unicode
)
# Get the articles for an author
self.assertQuerysetEqual(
a.article_set.all(), [
"Django lets you build Web apps easily",
],
lambda a: a.headline
)
# Query the authors across the m2m relation
self.assertQuerysetEqual(
art.authors.filter(last_name='Jones'), [
"Peter Jones"
],
unicode
)
| apache-2.0 |
0hoo/django-ecogwiki | wiki/markdownext/md_url.py | 5 | 1306 | # -*- coding: utf-8 -*-
from markdown.extensions import Extension
from markdown.inlinepatterns import Pattern
from markdown.util import etree
url_re = r'(' \
r'(?P<plainurl>((?P<itemprop>[^\s\:]+)\:\:)?(?P<url>\w+://' \
r'[a-zA-Z0-9\~\!\@\#\$\%\^\&\*\-\_\=\+\[\]\\\:\;\"\'\,\.\'' \
r'\?/]' \
r'+))' \
r'|' \
r'(?P<email>[^\s]+@[^\s]+\.' \
r'[a-zA-Z0-9\~\!\@\#\$\%\^\&\*\-\_\=\+\[\]\\\:\;\"\'\,\.\'/]' \
r'{2,3})' \
r')'
class URLExtension(Extension):
def extendMarkdown(self, md, md_globals):
url_pattern = UrlPattern(url_re)
md.inlinePatterns.add('url', url_pattern, "<backtick")
del md.inlinePatterns['emphasis2']
class UrlPattern(Pattern):
def handleMatch(self, m):
if m.group('plainurl'):
url = m.group('url')
a = etree.Element('a')
a.text = url
a.set('href', url)
a.set('class', 'plainurl')
if m.group('itemprop'):
a.set('itemprop', m.group('itemprop'))
return a
else:
url = m.group('email')
a = etree.Element('a')
a.text = url
a.set('href', 'mailto:%s' % url)
a.set('class', 'email')
return a
| gpl-3.0 |
vlimant/IntelROCCS | CUADRnT/src/python/cuadrnt/data_management/services/mit_db.py | 4 | 2035 | #!/usr/bin/env python2.7
"""
File : mit_db.py
Author : Bjorn Barrefors <bjorn dot peter dot barrefors AT cern dot ch>
Description: MIT DB service access module
"""
# system modules
import logging
import MySQLdb
# package modules
from cuadrnt.data_management.services.generic import GenericService
class MITDBService(GenericService):
"""
Helper class to access MIT DB
"""
def __init__(self, config=dict()):
GenericService.__init__(self, config)
self.logger = logging.getLogger(__name__)
self.SERVICE = 'mit_db'
host = str(self.config[self.SERVICE]['host'])
user = str(self.config[self.SERVICE]['user'])
passwd = str(self.config[self.SERVICE]['passwd'])
db = str(self.config[self.SERVICE]['db'])
self.conn = MySQLdb.connect(host=host, user=user, passwd=passwd, db=db)
def query(self, query, values=tuple(), cache=True, cache_only=False, force_cache=False):
"""
Get MIT DB data
"""
if cache:
json_data = list()
if not force_cache:
json_data = self.storage.get_cache(self.SERVICE, query, values)
if not json_data:
json_data = self.call(query=query, values=values)
self.storage.insert_cache(self.SERVICE, query, values, json_data)
if not cache_only:
return json_data
else:
json_data = self.call(query=query, values=values)
return json_data
def call(self, query, values=tuple()):
"""
Submit query to MIT DB
"""
data = []
values = tuple([str(value) for value in values])
try:
with self.conn:
cur = self.conn.cursor()
cur.execute(query, values)
for row in cur:
data.append(row)
except Exception, e:
self.logger.error('Query failed with message %s\n\tQuery: %s %s' % (e, str(query), str(values)))
return data
| mit |
pshen/ansible | lib/ansible/modules/cloud/digital_ocean/digital_ocean_sshkey.py | 37 | 5059 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: digital_ocean_sshkey
short_description: Create/delete an SSH key in DigitalOcean
description:
- Create/delete an SSH key.
version_added: "1.6"
author: "Michael Gregson (@mgregson)"
options:
state:
description:
- Indicate desired state of the target.
default: present
choices: ['present', 'absent']
client_id:
description:
- DigitalOcean manager id.
api_key:
description:
- DigitalOcean api key.
id:
description:
- Numeric, the SSH key id you want to operate on.
name:
description:
- String, this is the name of an SSH key to create or destroy.
ssh_pub_key:
description:
- The public SSH key you want to add to your account.
notes:
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
- Version 1 of DigitalOcean API is used.
requirements:
- "python >= 2.6"
- dopy
'''
EXAMPLES = '''
# Ensure a SSH key is present
# If a key matches this name, will return the ssh key id and changed = False
# If no existing key matches this name, a new key is created, the ssh key id is returned and changed = False
- digital_ocean_sshkey:
state: present
name: my_ssh_key
ssh_pub_key: 'ssh-rsa AAAA...'
client_id: XXX
api_key: XXX
'''
import os
import traceback
try:
from dopy.manager import DoError, DoManager
HAS_DOPY = True
except ImportError:
HAS_DOPY = False
from ansible.module_utils.basic import AnsibleModule
class JsonfyMixIn(object):
def to_json(self):
return self.__dict__
class SSH(JsonfyMixIn):
manager = None
def __init__(self, ssh_key_json):
self.__dict__.update(ssh_key_json)
update_attr = __init__
def destroy(self):
self.manager.destroy_ssh_key(self.id)
return True
@classmethod
def setup(cls, client_id, api_key):
cls.manager = DoManager(client_id, api_key)
@classmethod
def find(cls, name):
if not name:
return False
keys = cls.list_all()
for key in keys:
if key.name == name:
return key
return False
@classmethod
def list_all(cls):
json = cls.manager.all_ssh_keys()
return map(cls, json)
@classmethod
def add(cls, name, key_pub):
json = cls.manager.new_ssh_key(name, key_pub)
return cls(json)
def core(module):
def getkeyordie(k):
v = module.params[k]
if v is None:
module.fail_json(msg='Unable to load %s' % k)
return v
try:
# params['client_id'] will be None even if client_id is not passed in
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
except KeyError as e:
module.fail_json(msg='Unable to load %s' % e.message)
state = module.params['state']
SSH.setup(client_id, api_key)
name = getkeyordie('name')
if state in ('present'):
key = SSH.find(name)
if key:
module.exit_json(changed=False, ssh_key=key.to_json())
key = SSH.add(name, getkeyordie('ssh_pub_key'))
module.exit_json(changed=True, ssh_key=key.to_json())
elif state in ('absent'):
key = SSH.find(name)
if not key:
module.exit_json(changed=False, msg='SSH key with the name of %s is not found.' % name)
key.destroy()
module.exit_json(changed=True)
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(choices=['present', 'absent'], default='present'),
client_id=dict(aliases=['CLIENT_ID'], no_log=True),
api_key=dict(aliases=['API_KEY'], no_log=True),
name=dict(type='str'),
id=dict(aliases=['droplet_id'], type='int'),
ssh_pub_key=dict(type='str'),
),
required_one_of=(
['id', 'name'],
),
)
if not HAS_DOPY:
module.fail_json(msg='dopy required for this module')
try:
core(module)
except (DoError, Exception) as e:
module.fail_json(msg=str(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
2uller/LotF | App/Lib/site-packages/numpy/polynomial/hermite.py | 4 | 54763 | """
Objects for dealing with Hermite series.
This module provides a number of objects (mostly functions) useful for
dealing with Hermite series, including a `Hermite` class that
encapsulates the usual arithmetic operations. (General information
on how this module represents and works with such polynomials is in the
docstring for its "parent" sub-package, `numpy.polynomial`).
Constants
---------
- `hermdomain` -- Hermite series default domain, [-1,1].
- `hermzero` -- Hermite series that evaluates identically to 0.
- `hermone` -- Hermite series that evaluates identically to 1.
- `hermx` -- Hermite series for the identity map, ``f(x) = x``.
Arithmetic
----------
- `hermmulx` -- multiply a Hermite series in ``P_i(x)`` by ``x``.
- `hermadd` -- add two Hermite series.
- `hermsub` -- subtract one Hermite series from another.
- `hermmul` -- multiply two Hermite series.
- `hermdiv` -- divide one Hermite series by another.
- `hermval` -- evaluate a Hermite series at given points.
- `hermval2d` -- evaluate a 2D Hermite series at given points.
- `hermval3d` -- evaluate a 3D Hermite series at given points.
- `hermgrid2d` -- evaluate a 2D Hermite series on a Cartesian product.
- `hermgrid3d` -- evaluate a 3D Hermite series on a Cartesian product.
Calculus
--------
- `hermder` -- differentiate a Hermite series.
- `hermint` -- integrate a Hermite series.
Misc Functions
--------------
- `hermfromroots` -- create a Hermite series with specified roots.
- `hermroots` -- find the roots of a Hermite series.
- `hermvander` -- Vandermonde-like matrix for Hermite polynomials.
- `hermvander2d` -- Vandermonde-like matrix for 2D power series.
- `hermvander3d` -- Vandermonde-like matrix for 3D power series.
- `hermgauss` -- Gauss-Hermite quadrature, points and weights.
- `hermweight` -- Hermite weight function.
- `hermcompanion` -- symmetrized companion matrix in Hermite form.
- `hermfit` -- least-squares fit returning a Hermite series.
- `hermtrim` -- trim leading coefficients from a Hermite series.
- `hermline` -- Hermite series of given straight line.
- `herm2poly` -- convert a Hermite series to a polynomial.
- `poly2herm` -- convert a polynomial to a Hermite series.
Classes
-------
- `Hermite` -- A Hermite series class.
See also
--------
`numpy.polynomial`
"""
from __future__ import division
import numpy as np
import numpy.linalg as la
import polyutils as pu
import warnings
from polytemplate import polytemplate
__all__ = ['hermzero', 'hermone', 'hermx', 'hermdomain', 'hermline',
'hermadd', 'hermsub', 'hermmulx', 'hermmul', 'hermdiv', 'hermpow',
'hermval', 'hermder', 'hermint', 'herm2poly', 'poly2herm',
'hermfromroots', 'hermvander', 'hermfit', 'hermtrim', 'hermroots',
'Hermite', 'hermval2d', 'hermval3d', 'hermgrid2d', 'hermgrid3d',
'hermvander2d', 'hermvander3d', 'hermcompanion', 'hermgauss',
'hermweight']
hermtrim = pu.trimcoef
def poly2herm(pol) :
"""
poly2herm(pol)
Convert a polynomial to a Hermite series.
Convert an array representing the coefficients of a polynomial (relative
to the "standard" basis) ordered from lowest degree to highest, to an
array of the coefficients of the equivalent Hermite series, ordered
from lowest to highest degree.
Parameters
----------
pol : array_like
1-D array containing the polynomial coefficients
Returns
-------
c : ndarray
1-D array containing the coefficients of the equivalent Hermite
series.
See Also
--------
herm2poly
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite_e import poly2herme
>>> poly2herm(np.arange(4))
array([ 1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
deg = len(pol) - 1
res = 0
for i in range(deg, -1, -1) :
res = hermadd(hermmulx(res), pol[i])
return res
def herm2poly(c) :
"""
Convert a Hermite series to a polynomial.
Convert an array representing the coefficients of a Hermite series,
ordered from lowest degree to highest, to an array of the coefficients
of the equivalent polynomial (relative to the "standard" basis) ordered
from lowest to highest degree.
Parameters
----------
c : array_like
1-D array containing the Hermite series coefficients, ordered
from lowest order term to highest.
Returns
-------
pol : ndarray
1-D array containing the coefficients of the equivalent polynomial
(relative to the "standard" basis) ordered from lowest order term
to highest.
See Also
--------
poly2herm
Notes
-----
The easy way to do conversions between polynomial basis sets
is to use the convert method of a class instance.
Examples
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
array([ 0., 1., 2., 3.])
"""
from polynomial import polyadd, polysub, polymulx
[c] = pu.as_series([c])
n = len(c)
if n == 1:
return c
if n == 2:
c[1] *= 2
return c
else:
c0 = c[-2]
c1 = c[-1]
# i is the current degree of c1
for i in range(n - 1, 1, -1) :
tmp = c0
c0 = polysub(c[i - 2], c1*(2*(i - 1)))
c1 = polyadd(tmp, polymulx(c1)*2)
return polyadd(c0, polymulx(c1)*2)
#
# These are constant arrays are of integer type so as to be compatible
# with the widest range of other types, such as Decimal.
#
# Hermite
hermdomain = np.array([-1,1])
# Hermite coefficients representing zero.
hermzero = np.array([0])
# Hermite coefficients representing one.
hermone = np.array([1])
# Hermite coefficients representing the identity x.
hermx = np.array([0, 1/2])
def hermline(off, scl) :
"""
Hermite series whose graph is a straight line.
Parameters
----------
off, scl : scalars
The specified line is given by ``off + scl*x``.
Returns
-------
y : ndarray
This module's representation of the Hermite series for
``off + scl*x``.
See Also
--------
polyline, chebline
Examples
--------
>>> from numpy.polynomial.hermite import hermline, hermval
>>> hermval(0,hermline(3, 2))
3.0
>>> hermval(1,hermline(3, 2))
5.0
"""
if scl != 0 :
return np.array([off,scl/2])
else :
return np.array([off])
def hermfromroots(roots) :
"""
Generate a Hermite series with given roots.
The function returns the coefficients of the polynomial
.. math:: p(x) = (x - r_0) * (x - r_1) * ... * (x - r_n),
in Hermite form, where the `r_n` are the roots specified in `roots`.
If a zero has multiplicity n, then it must appear in `roots` n times.
For instance, if 2 is a root of multiplicity three and 3 is a root of
multiplicity 2, then `roots` looks something like [2, 2, 2, 3, 3]. The
roots can appear in any order.
If the returned coefficients are `c`, then
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x)
The coefficient of the last term is not generally 1 for monic
polynomials in Hermite form.
Parameters
----------
roots : array_like
Sequence containing the roots.
Returns
-------
out : ndarray
1-D array of coefficients. If all roots are real then `out` is a
real array, if some of the roots are complex, then `out` is complex
even if all the coefficients in the result are real (see Examples
below).
See Also
--------
polyfromroots, legfromroots, lagfromroots, chebfromroots,
hermefromroots.
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
array([ 0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
array([ 0.+0.j, 0.+0.j])
"""
if len(roots) == 0 :
return np.ones(1)
else :
[roots] = pu.as_series([roots], trim=False)
roots.sort()
p = [hermline(-r, 1) for r in roots]
n = len(p)
while n > 1:
m, r = divmod(n, 2)
tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
if r:
tmp[0] = hermmul(tmp[0], p[-1])
p = tmp
n = m
return p[0]
def hermadd(c1, c2):
"""
Add one Hermite series to another.
Returns the sum of two Hermite series `c1` + `c2`. The arguments
are sequences of coefficients ordered from lowest order term to
highest, i.e., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the Hermite series of their sum.
See Also
--------
hermsub, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the sum of two Hermite series
is a Hermite series (without having to "reproject" the result onto
the basis set) so addition, just like that of "standard" polynomials,
is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
array([ 2., 4., 6., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] += c2
ret = c1
else :
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermsub(c1, c2):
"""
Subtract one Hermite series from another.
Returns the difference of two Hermite series `c1` - `c2`. The
sequences of coefficients are from lowest order term to highest, i.e.,
[1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their difference.
See Also
--------
hermadd, hermmul, hermdiv, hermpow
Notes
-----
Unlike multiplication, division, etc., the difference of two Hermite
series is a Hermite series (without having to "reproject" the result
onto the basis set) so subtraction, just like that of "standard"
polynomials, is simply "component-wise."
Examples
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
array([ 0., 0., 0., 4.])
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2) :
c1[:c2.size] -= c2
ret = c1
else :
c2 = -c2
c2[:c1.size] += c1
ret = c2
return pu.trimseq(ret)
def hermmulx(c):
"""Multiply a Hermite series by x.
Multiply the Hermite series `c` by x, where x is the independent
variable.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Array representing the result of the multiplication.
Notes
-----
The multiplication uses the recursion relationship for Hermite
polynomials in the form
.. math::
xP_i(x) = (P_{i + 1}(x)/2 + i*P_{i - 1}(x))
Examples
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
array([ 2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
# The zero series needs special treatment
if len(c) == 1 and c[0] == 0:
return c
prd = np.empty(len(c) + 1, dtype=c.dtype)
prd[0] = c[0]*0
prd[1] = c[0]/2
for i in range(1, len(c)):
prd[i + 1] = c[i]/2
prd[i - 1] += c[i]*i
return prd
def hermmul(c1, c2):
"""
Multiply one Hermite series by another.
Returns the product of two Hermite series `c1` * `c2`. The arguments
are sequences of coefficients, from lowest order "term" to highest,
e.g., [1,2,3] represents the series ``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
out : ndarray
Of Hermite series coefficients representing their product.
See Also
--------
hermadd, hermsub, hermdiv, hermpow
Notes
-----
In general, the (polynomial) product of two C-series results in terms
that are not in the Hermite polynomial basis set. Thus, to express
the product as a Hermite series, it is necessary to "reproject" the
product onto said basis set, which may produce "unintuitive" (but
correct) results; see Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
array([ 52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if len(c1) > len(c2):
c = c2
xs = c1
else:
c = c1
xs = c2
if len(c) == 1:
c0 = c[0]*xs
c1 = 0
elif len(c) == 2:
c0 = c[0]*xs
c1 = c[1]*xs
else :
nd = len(c)
c0 = c[-2]*xs
c1 = c[-1]*xs
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = hermsub(c[-i]*xs, c1*(2*(nd - 1)))
c1 = hermadd(tmp, hermmulx(c1)*2)
return hermadd(c0, hermmulx(c1)*2)
def hermdiv(c1, c2):
"""
Divide one Hermite series by another.
Returns the quotient-with-remainder of two Hermite series
`c1` / `c2`. The arguments are sequences of coefficients from lowest
order "term" to highest, e.g., [1,2,3] represents the series
``P_0 + 2*P_1 + 3*P_2``.
Parameters
----------
c1, c2 : array_like
1-D arrays of Hermite series coefficients ordered from low to
high.
Returns
-------
[quo, rem] : ndarrays
Of Hermite series coefficients representing the quotient and
remainder.
See Also
--------
hermadd, hermsub, hermmul, hermpow
Notes
-----
In general, the (polynomial) division of one Hermite series by another
results in quotient and remainder terms that are not in the Hermite
polynomial basis set. Thus, to express these results as a Hermite
series, it is necessary to "reproject" the results onto the Hermite
basis set, which may produce "unintuitive" (but correct) results; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
(array([ 1., 2., 3.]), array([ 1., 1.]))
"""
# c1, c2 are trimmed copies
[c1, c2] = pu.as_series([c1, c2])
if c2[-1] == 0 :
raise ZeroDivisionError()
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2 :
return c1[:1]*0, c1
elif lc2 == 1 :
return c1/c2[-1], c1[:1]*0
else :
quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
rem = c1
for i in range(lc1 - lc2, - 1, -1):
p = hermmul([0]*i + [1], c2)
q = rem[-1]/p[-1]
rem = rem[:-1] - q*p[:-1]
quo[i] = q
return quo, pu.trimseq(rem)
def hermpow(c, pow, maxpower=16) :
"""Raise a Hermite series to a power.
Returns the Hermite series `c` raised to the power `pow`. The
argument `c` is a sequence of coefficients ordered from low to high.
i.e., [1,2,3] is the series ``P_0 + 2*P_1 + 3*P_2.``
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to
high.
pow : integer
Power to which the series will be raised
maxpower : integer, optional
Maximum power allowed. This is mainly to limit growth of the series
to unmanageable size. Default is 16
Returns
-------
coef : ndarray
Hermite series of power.
See Also
--------
hermadd, hermsub, hermmul, hermdiv
Examples
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
array([ 81., 52., 82., 12., 9.])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
if power != pow or power < 0 :
raise ValueError("Power must be a non-negative integer.")
elif maxpower is not None and power > maxpower :
raise ValueError("Power is too large")
elif power == 0 :
return np.array([1], dtype=c.dtype)
elif power == 1 :
return c
else :
# This can be made more efficient by using powers of two
# in the usual way.
prd = c
for i in range(2, power + 1) :
prd = hermmul(prd, c)
return prd
def hermder(c, m=1, scl=1, axis=0) :
"""
Differentiate a Hermite series.
Returns the Hermite series coefficients `c` differentiated `m` times
along `axis`. At each iteration the result is multiplied by `scl` (the
scaling factor is for use in a linear change of variable). The argument
`c` is an array of coefficients from low to high degree along each
axis, e.g., [1,2,3] represents the series ``1*H_0 + 2*H_1 + 3*H_2``
while [[1,2],[1,2]] represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) +
2*H_0(x)*H_1(y) + 2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is
``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If `c` is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Number of derivatives taken, must be non-negative. (Default: 1)
scl : scalar, optional
Each differentiation is multiplied by `scl`. The end result is
multiplication by ``scl**m``. This is for use in a linear change of
variable. (Default: 1)
axis : int, optional
Axis over which the derivative is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
der : ndarray
Hermite series of the derivative.
See Also
--------
hermint
Notes
-----
In general, the result of differentiating a Hermite series does not
resemble the same operation on a power series. Thus the result of this
function may be "unintuitive," albeit correct; see Examples section
below.
Examples
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
array([ 1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
array([ 1., 2., 3.])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of derivation must be integer")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
n = len(c)
if cnt >= n:
c = c[:1]*0
else :
for i in range(cnt):
n = n - 1
c *= scl
der = np.empty((n,) + c.shape[1:], dtype=c.dtype)
for j in range(n, 0, -1):
der[j - 1] = (2*j)*c[j]
c = der
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
"""
Integrate a Hermite series.
Returns the Hermite series coefficients `c` integrated `m` times from
`lbnd` along `axis`. At each iteration the resulting series is
**multiplied** by `scl` and an integration constant, `k`, is added.
The scaling factor is for use in a linear change of variable. ("Buyer
beware": note that, depending on what one is doing, one may want `scl`
to be the reciprocal of what one might expect; for more information,
see the Notes section below.) The argument `c` is an array of
coefficients from low to high degree along each axis, e.g., [1,2,3]
represents the series ``H_0 + 2*H_1 + 3*H_2`` while [[1,2],[1,2]]
represents ``1*H_0(x)*H_0(y) + 1*H_1(x)*H_0(y) + 2*H_0(x)*H_1(y) +
2*H_1(x)*H_1(y)`` if axis=0 is ``x`` and axis=1 is ``y``.
Parameters
----------
c : array_like
Array of Hermite series coefficients. If c is multidimensional the
different axis correspond to different variables with the degree in
each axis given by the corresponding index.
m : int, optional
Order of integration, must be positive. (Default: 1)
k : {[], list, scalar}, optional
Integration constant(s). The value of the first integral at
``lbnd`` is the first value in the list, the value of the second
integral at ``lbnd`` is the second value, etc. If ``k == []`` (the
default), all constants are set to zero. If ``m == 1``, a single
scalar can be given instead of a list.
lbnd : scalar, optional
The lower bound of the integral. (Default: 0)
scl : scalar, optional
Following each integration the result is *multiplied* by `scl`
before the integration constant is added. (Default: 1)
axis : int, optional
Axis over which the integral is taken. (Default: 0).
.. versionadded:: 1.7.0
Returns
-------
S : ndarray
Hermite series coefficients of the integral.
Raises
------
ValueError
If ``m < 0``, ``len(k) > m``, ``np.isscalar(lbnd) == False``, or
``np.isscalar(scl) == False``.
See Also
--------
hermder
Notes
-----
Note that the result of each integration is *multiplied* by `scl`.
Why is this important to note? Say one is making a linear change of
variable :math:`u = ax + b` in an integral relative to `x`. Then
.. math::`dx = du/a`, so one will need to set `scl` equal to
:math:`1/a` - perhaps not what one would have first thought.
Also note that, in general, the result of integrating a C-series needs
to be "reprojected" onto the C-series basis set. Thus, typically,
the result of this function is "unintuitive," albeit correct; see
Examples section below.
Examples
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
array([ 1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ])
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
array([ 2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ])
"""
c = np.array(c, ndmin=1, copy=1)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
cnt, iaxis = [int(t) for t in [m, axis]]
if cnt != m:
raise ValueError("The order of integration must be integer")
if cnt < 0 :
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt :
raise ValueError("Too many integration constants")
if iaxis != axis:
raise ValueError("The axis must be integer")
if not -c.ndim <= iaxis < c.ndim:
raise ValueError("The axis is out of range")
if iaxis < 0:
iaxis += c.ndim
if cnt == 0:
return c
c = np.rollaxis(c, iaxis)
k = list(k) + [0]*(cnt - len(k))
for i in range(cnt) :
n = len(c)
c *= scl
if n == 1 and np.all(c[0] == 0):
c[0] += k[i]
else:
tmp = np.empty((n + 1,) + c.shape[1:], dtype=c.dtype)
tmp[0] = c[0]*0
tmp[1] = c[0]/2
for j in range(1, n):
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[0] += k[i] - hermval(lbnd, tmp)
c = tmp
c = np.rollaxis(c, 0, iaxis + 1)
return c
def hermval(x, c, tensor=True):
"""
Evaluate an Hermite series at points x.
If `c` is of length `n + 1`, this function returns the value:
.. math:: p(x) = c_0 * H_0(x) + c_1 * H_1(x) + ... + c_n * H_n(x)
The parameter `x` is converted to an array only if it is a tuple or a
list, otherwise it is treated as a scalar. In either case, either `x`
or its elements must support multiplication and addition both with
themselves and with the elements of `c`.
If `c` is a 1-D array, then `p(x)` will have the same shape as `x`. If
`c` is multidimensional, then the shape of the result depends on the
value of `tensor`. If `tensor` is true the shape will be c.shape[1:] +
x.shape. If `tensor` is false the shape will be c.shape[1:]. Note that
scalars have shape (,).
Trailing zeros in the coefficients will be used in the evaluation, so
they should be avoided if efficiency is a concern.
Parameters
----------
x : array_like, compatible object
If `x` is a list or tuple, it is converted to an ndarray, otherwise
it is left unchanged and treated as a scalar. In either case, `x`
or its elements must support addition and multiplication with
with themselves and with the elements of `c`.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree n are contained in c[n]. If `c` is multidimensional the
remaining indices enumerate multiple polynomials. In the two
dimensional case the coefficients may be thought of as stored in
the columns of `c`.
tensor : boolean, optional
If True, the shape of the coefficient array is extended with ones
on the right, one for each dimension of `x`. Scalars have dimension 0
for this action. The result is that every column of coefficients in
`c` is evaluated for every element of `x`. If False, `x` is broadcast
over the columns of `c` for the evaluation. This keyword is useful
when `c` is multidimensional. The default value is True.
.. versionadded:: 1.7.0
Returns
-------
values : ndarray, algebra_like
The shape of the return value is described above.
See Also
--------
hermval2d, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
The evaluation uses Clenshaw recursion, aka synthetic division.
Examples
--------
>>> from numpy.polynomial.hermite import hermval
>>> coef = [1,2,3]
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
array([[ 11., 51.],
[ 115., 203.]])
"""
c = np.array(c, ndmin=1, copy=0)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
x = np.asarray(x)
if isinstance(x, np.ndarray) and tensor:
c = c.reshape(c.shape + (1,)*x.ndim)
x2 = x*2
if len(c) == 1 :
c0 = c[0]
c1 = 0
elif len(c) == 2 :
c0 = c[0]
c1 = c[1]
else :
nd = len(c)
c0 = c[-2]
c1 = c[-1]
for i in range(3, len(c) + 1) :
tmp = c0
nd = nd - 1
c0 = c[-i] - c1*(2*(nd - 1))
c1 = tmp + c1*x2
return c0 + c1*x2
def hermval2d(x, y, c):
"""
Evaluate a 2-D Hermite series at points (x, y).
This function returns the values:
.. math:: p(x,y) = \\sum_{i,j} c_{i,j} * H_i(x) * H_j(y)
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars and they
must have the same shape after conversion. In either case, either `x`
and `y` or their elements must support multiplication and addition both
with themselves and with the elements of `c`.
If `c` is a 1-D array a one is implicitly appended to its shape to make
it 2-D. The shape of the result will be c.shape[2:] + x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points `(x, y)`,
where `x` and `y` must have the same shape. If `x` or `y` is a list
or tuple, it is first converted to an ndarray, otherwise it is left
unchanged and if it isn't an ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term
of multi-degree i,j is contained in ``c[i,j]``. If `c` has
dimension greater than two the remaining indices enumerate multiple
sets of coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points formed with
pairs of corresponding values from `x` and `y`.
See Also
--------
hermval, hermgrid2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y = np.array((x, y), copy=0)
except:
raise ValueError('x, y are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
return c
def hermgrid2d(x, y, c):
"""
Evaluate a 2-D Hermite series on the Cartesian product of x and y.
This function returns the values:
.. math:: p(a,b) = \sum_{i,j} c_{i,j} * H_i(a) * H_j(b)
where the points `(a, b)` consist of all pairs formed by taking
`a` from `x` and `b` from `y`. The resulting points form a grid with
`x` in the first dimension and `y` in the second.
The parameters `x` and `y` are converted to arrays only if they are
tuples or a lists, otherwise they are treated as a scalars. In either
case, either `x` and `y` or their elements must support multiplication
and addition both with themselves and with the elements of `c`.
If `c` has fewer than two dimensions, ones are implicitly appended to
its shape to make it 2-D. The shape of the result will be c.shape[2:] +
x.shape.
Parameters
----------
x, y : array_like, compatible objects
The two dimensional series is evaluated at the points in the
Cartesian product of `x` and `y`. If `x` or `y` is a list or
tuple, it is first converted to an ndarray, otherwise it is left
unchanged and, if it isn't an ndarray, it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermval3d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
return c
def hermval3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series at points (x, y, z).
This function returns the values:
.. math:: p(x,y,z) = \\sum_{i,j,k} c_{i,j,k} * H_i(x) * H_j(y) * H_k(z)
The parameters `x`, `y`, and `z` are converted to arrays only if
they are tuples or a lists, otherwise they are treated as a scalars and
they must have the same shape after conversion. In either case, either
`x`, `y`, and `z` or their elements must support multiplication and
addition both with themselves and with the elements of `c`.
If `c` has fewer than 3 dimensions, ones are implicitly appended to its
shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape.
Parameters
----------
x, y, z : array_like, compatible object
The three dimensional series is evaluated at the points
`(x, y, z)`, where `x`, `y`, and `z` must have the same shape. If
any of `x`, `y`, or `z` is a list or tuple, it is first converted
to an ndarray, otherwise it is left unchanged and if it isn't an
ndarray it is treated as a scalar.
c : array_like
Array of coefficients ordered so that the coefficient of the term of
multi-degree i,j,k is contained in ``c[i,j,k]``. If `c` has dimension
greater than 3 the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the multidimensional polynomial on points formed with
triples of corresponding values from `x`, `y`, and `z`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermgrid3d
Notes
-----
.. versionadded::1.7.0
"""
try:
x, y, z = np.array((x, y, z), copy=0)
except:
raise ValueError('x, y, z are incompatible')
c = hermval(x, c)
c = hermval(y, c, tensor=False)
c = hermval(z, c, tensor=False)
return c
def hermgrid3d(x, y, z, c):
"""
Evaluate a 3-D Hermite series on the Cartesian product of x, y, and z.
This function returns the values:
.. math:: p(a,b,c) = \\sum_{i,j,k} c_{i,j,k} * H_i(a) * H_j(b) * H_k(c)
where the points `(a, b, c)` consist of all triples formed by taking
`a` from `x`, `b` from `y`, and `c` from `z`. The resulting points form
a grid with `x` in the first dimension, `y` in the second, and `z` in
the third.
The parameters `x`, `y`, and `z` are converted to arrays only if they
are tuples or a lists, otherwise they are treated as a scalars. In
either case, either `x`, `y`, and `z` or their elements must support
multiplication and addition both with themselves and with the elements
of `c`.
If `c` has fewer than three dimensions, ones are implicitly appended to
its shape to make it 3-D. The shape of the result will be c.shape[3:] +
x.shape + y.shape + z.shape.
Parameters
----------
x, y, z : array_like, compatible objects
The three dimensional series is evaluated at the points in the
Cartesian product of `x`, `y`, and `z`. If `x`,`y`, or `z` is a
list or tuple, it is first converted to an ndarray, otherwise it is
left unchanged and, if it isn't an ndarray, it is treated as a
scalar.
c : array_like
Array of coefficients ordered so that the coefficients for terms of
degree i,j are contained in ``c[i,j]``. If `c` has dimension
greater than two the remaining indices enumerate multiple sets of
coefficients.
Returns
-------
values : ndarray, compatible object
The values of the two dimensional polynomial at points in the Cartesian
product of `x` and `y`.
See Also
--------
hermval, hermval2d, hermgrid2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
c = hermval(x, c)
c = hermval(y, c)
c = hermval(z, c)
return c
def hermvander(x, deg) :
"""Pseudo-Vandermonde matrix of given degree.
Returns the pseudo-Vandermonde matrix of degree `deg` and sample points
`x`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., i] = H_i(x),
where `0 <= i <= deg`. The leading indices of `V` index the elements of
`x` and the last index is the degree of the Hermite polynomial.
If `c` is a 1-D array of coefficients of length `n + 1` and `V` is the
array ``V = hermvander(x, n)``, then ``np.dot(V, c)`` and
``hermval(x, c)`` are the same up to roundoff. This equivalence is
useful both for least squares fitting and for the evaluation of a large
number of Hermite series of the same degree and sample points.
Parameters
----------
x : array_like
Array of points. The dtype is converted to float64 or complex128
depending on whether any of the elements are complex. If `x` is
scalar it is converted to a 1-D array.
deg : int
Degree of the resulting matrix.
Returns
-------
vander: ndarray
The pseudo-Vandermonde matrix. The shape of the returned matrix is
``x.shape + (deg + 1,)``, where The last index is the degree of the
corresponding Hermite polynomial. The dtype will be the same as
the converted `x`.
Examples
--------
>>> from numpy.polynomial.hermite import hermvander
>>> x = np.array([-1, 0, 1])
>>> hermvander(x, 3)
array([[ 1., -2., 2., 4.],
[ 1., 0., -2., -0.],
[ 1., 2., 2., -4.]])
"""
ideg = int(deg)
if ideg != deg:
raise ValueError("deg must be integer")
if ideg < 0:
raise ValueError("deg must be non-negative")
x = np.array(x, copy=0, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
v[0] = x*0 + 1
if ideg > 0 :
x2 = x*2
v[1] = x2
for i in range(2, ideg + 1) :
v[i] = (v[i-1]*x2 - v[i-2]*(2*(i - 1)))
return np.rollaxis(v, 0, v.ndim)
def hermvander2d(x, y, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y)`. The pseudo-Vandermonde matrix is defined by
.. math:: V[..., deg[1]*i + j] = H_i(x) * H_j(y),
where `0 <= i <= deg[0]` and `0 <= j <= deg[1]`. The leading indices of
`V` index the points `(x, y)` and the last index encodes the degrees of
the Hermite polynomials.
If ``V = hermvander2d(x, y, [xdeg, ydeg])``, then the columns of `V`
correspond to the elements of a 2-D coefficient array `c` of shape
(xdeg + 1, ydeg + 1) in the order
.. math:: c_{00}, c_{01}, c_{02} ... , c_{10}, c_{11}, c_{12} ...
and ``np.dot(V, c.flat)`` and ``hermval2d(x, y, c)`` will be the same
up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 2-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y : array_like
Arrays of point coordinates, all of the same shape. The dtypes
will be converted to either float64 or complex128 depending on
whether any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg].
Returns
-------
vander2d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)`. The dtype will be the same
as the converted `x` and `y`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy = ideg
x, y = np.array((x, y), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
v = vx[..., None]*vy[..., None, :]
return v.reshape(v.shape[:-2] + (-1,))
def hermvander3d(x, y, z, deg) :
"""Pseudo-Vandermonde matrix of given degrees.
Returns the pseudo-Vandermonde matrix of degrees `deg` and sample
points `(x, y, z)`. If `l, m, n` are the given degrees in `x, y, z`,
then The pseudo-Vandermonde matrix is defined by
.. math:: V[..., (m+1)(n+1)i + (n+1)j + k] = H_i(x)*H_j(y)*H_k(z),
where `0 <= i <= l`, `0 <= j <= m`, and `0 <= j <= n`. The leading
indices of `V` index the points `(x, y, z)` and the last index encodes
the degrees of the Hermite polynomials.
If ``V = hermvander3d(x, y, z, [xdeg, ydeg, zdeg])``, then the columns
of `V` correspond to the elements of a 3-D coefficient array `c` of
shape (xdeg + 1, ydeg + 1, zdeg + 1) in the order
.. math:: c_{000}, c_{001}, c_{002},... , c_{010}, c_{011}, c_{012},...
and ``np.dot(V, c.flat)`` and ``hermval3d(x, y, z, c)`` will be the
same up to roundoff. This equivalence is useful both for least squares
fitting and for the evaluation of a large number of 3-D Hermite
series of the same degrees and sample points.
Parameters
----------
x, y, z : array_like
Arrays of point coordinates, all of the same shape. The dtypes will
be converted to either float64 or complex128 depending on whether
any of the elements are complex. Scalars are converted to 1-D
arrays.
deg : list of ints
List of maximum degrees of the form [x_deg, y_deg, z_deg].
Returns
-------
vander3d : ndarray
The shape of the returned matrix is ``x.shape + (order,)``, where
:math:`order = (deg[0]+1)*(deg([1]+1)*(deg[2]+1)`. The dtype will
be the same as the converted `x`, `y`, and `z`.
See Also
--------
hermvander, hermvander3d. hermval2d, hermval3d
Notes
-----
.. versionadded::1.7.0
"""
ideg = [int(d) for d in deg]
is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
if is_valid != [1, 1, 1]:
raise ValueError("degrees must be non-negative integers")
degx, degy, degz = ideg
x, y, z = np.array((x, y, z), copy=0) + 0.0
vx = hermvander(x, degx)
vy = hermvander(y, degy)
vz = hermvander(z, degz)
v = vx[..., None, None]*vy[..., None, :, None]*vz[..., None, None, :]
return v.reshape(v.shape[:-3] + (-1,))
def hermfit(x, y, deg, rcond=None, full=False, w=None):
"""
Least squares fit of Hermite series to data.
Return the coefficients of a Hermite series of degree `deg` that is the
least squares fit to the data values `y` given at points `x`. If `y` is
1-D the returned coefficients will also be 1-D. If `y` is 2-D multiple
fits are done, one for each column of `y`, and the resulting
coefficients are stored in the corresponding columns of a 2-D return.
The fitted polynomial(s) are in the form
.. math:: p(x) = c_0 + c_1 * H_1(x) + ... + c_n * H_n(x),
where `n` is `deg`.
Since numpy version 1.7.0, hermfit also supports NA. If any of the
elements of `x`, `y`, or `w` are NA, then the corresponding rows of the
linear least squares problem (see Notes) are set to 0. If `y` is 2-D,
then an NA in any row of `y` invalidates that whole row.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (`M`,), optional
Weights. If not None, the contribution of each point
``(x[i],y[i])`` to the fit is weighted by `w[i]`. Ideally the
weights are chosen so that the errors of the products ``w[i]*y[i]``
all have the same variance. The default value is None.
Returns
-------
coef : ndarray, shape (M,) or (M, K)
Hermite coefficients ordered from low to high. If `y` was 2-D,
the coefficients for the data in column k of `y` are in column
`k`.
[residuals, rank, singular_values, rcond] : present when `full` = True
Residuals of the least-squares fit, the effective rank of the
scaled Vandermonde matrix and its singular values, and the
specified value of `rcond`. For more details, see `linalg.lstsq`.
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False. The
warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', RankWarning)
See Also
--------
chebfit, legfit, lagfit, polyfit, hermefit
hermval : Evaluates a Hermite series.
hermvander : Vandermonde matrix of Hermite series.
hermweight : Hermite weight function
linalg.lstsq : Computes a least-squares fit from the matrix.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution is the coefficients of the Hermite series `p` that
minimizes the sum of the weighted squared errors
.. math:: E = \\sum_j w_j^2 * |y_j - p(x_j)|^2,
where the :math:`w_j` are the weights. This problem is solved by
setting up the (typically) overdetermined matrix equation
.. math:: V(x) * c = w * y,
where `V` is the weighted pseudo Vandermonde matrix of `x`, `c` are the
coefficients to be solved for, `w` are the weights, `y` are the
observed values. This equation is then solved using the singular value
decomposition of `V`.
If some of the singular values of `V` are so small that they are
neglected, then a `RankWarning` will be issued. This means that the
coefficient values may be poorly determined. Using a lower order fit
will usually get rid of the warning. The `rcond` parameter can also be
set to a value smaller than its default, but the resulting fit may be
spurious and have large contributions from roundoff error.
Fits using Hermite series are probably most useful when the data can be
approximated by ``sqrt(w(x)) * p(x)``, where `w(x)` is the Hermite
weight. In that case the weight ``sqrt(w(x[i])`` should be used
together with data values ``y[i]/sqrt(w(x[i])``. The weight function is
available as `hermweight`.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
Examples
--------
>>> from numpy.polynomial.hermite import hermfit, hermval
>>> x = np.linspace(-10, 10)
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
array([ 0.97902637, 1.99849131, 3.00006 ])
"""
order = int(deg) + 1
x = np.asarray(x) + 0.0
y = np.asarray(y) + 0.0
# check arguments.
if deg < 0 :
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2 :
raise TypeError("expected 1D or 2D array for y")
if len(x) != len(y):
raise TypeError("expected x and y to have same length")
# set up the least squares matrices in transposed form
lhs = hermvander(x, deg).T
rhs = y.T
if w is not None:
w = np.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected 1D vector for w")
if len(x) != len(w):
raise TypeError("expected x and w to have same length")
# apply weights. Don't use inplace operations as they
# can cause problems with NA.
lhs = lhs * w
rhs = rhs * w
# set rcond
if rcond is None :
rcond = len(x)*np.finfo(x.dtype).eps
# Determine the norms of the design matrix columns.
if issubclass(lhs.dtype.type, np.complexfloating):
scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
else:
scl = np.sqrt(np.square(lhs).sum(1))
scl[scl == 0] = 1
# Solve the least squares problem.
c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
c = (c.T/scl).T
# warn on rank reduction
if rank != order and not full:
msg = "The fit may be poorly conditioned"
warnings.warn(msg, pu.RankWarning)
if full :
return c, [resids, rank, s, rcond]
else :
return c
def hermcompanion(c):
"""Return the scaled companion matrix of c.
The basis polynomials are scaled so that the companion matrix is
symmetric when `c` is an Hermite basis polynomial. This provides
better eigenvalue estimates than the unscaled case and for basis
polynomials the eigenvalues are guaranteed to be real if
`numpy.linalg.eigvalsh` is used to obtain them.
Parameters
----------
c : array_like
1-D array of Hermite series coefficients ordered from low to high
degree.
Returns
-------
mat : ndarray
Scaled companion matrix of dimensions (deg, deg).
Notes
-----
.. versionadded::1.7.0
"""
accprod = np.multiply.accumulate
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) < 2:
raise ValueError('Series must have maximum degree of at least 1.')
if len(c) == 2:
return np.array(-.5*c[0]/c[1])
n = len(c) - 1
mat = np.zeros((n, n), dtype=c.dtype)
scl = np.hstack((1., np.sqrt(2.*np.arange(1,n))))
scl = np.multiply.accumulate(scl)
top = mat.reshape(-1)[1::n+1]
bot = mat.reshape(-1)[n::n+1]
top[...] = np.sqrt(.5*np.arange(1,n))
bot[...] = top
mat[:,-1] -= (c[:-1]/c[-1])*(scl/scl[-1])*.5
return mat
def hermroots(c):
"""
Compute the roots of a Hermite series.
Return the roots (a.k.a. "zeros") of the polynomial
.. math:: p(x) = \\sum_i c[i] * H_i(x).
Parameters
----------
c : 1-D array_like
1-D array of coefficients.
Returns
-------
out : ndarray
Array of the roots of the series. If all the roots are real,
then `out` is also real, otherwise it is complex.
See Also
--------
polyroots, legroots, lagroots, chebroots, hermeroots
Notes
-----
The root estimates are obtained as the eigenvalues of the companion
matrix, Roots far from the origin of the complex plane may have large
errors due to the numerical instability of the series for such
values. Roots with multiplicity greater than 1 will also show larger
errors as the value of the series near such points is relatively
insensitive to errors in the roots. Isolated roots near the origin can
be improved by a few iterations of Newton's method.
The Hermite series basis polynomials aren't powers of `x` so the
results of this function may seem unintuitive.
Examples
--------
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
array([ 0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
[c] = pu.as_series([c])
if len(c) <= 1 :
return np.array([], dtype=c.dtype)
if len(c) == 2 :
return np.array([-.5*c[0]/c[1]])
m = hermcompanion(c)
r = la.eigvals(m)
r.sort()
return r
def hermgauss(deg):
"""
Gauss-Hermite quadrature.
Computes the sample points and weights for Gauss-Hermite quadrature.
These sample points and weights will correctly integrate polynomials of
degree :math:`2*deg - 1` or less over the interval :math:`[-\inf, \inf]`
with the weight function :math:`f(x) = \exp(-x^2)`.
Parameters
----------
deg : int
Number of sample points and weights. It must be >= 1.
Returns
-------
x : ndarray
1-D ndarray containing the sample points.
y : ndarray
1-D ndarray containing the weights.
Notes
-----
.. versionadded::1.7.0
The results have only been tested up to degree 100, higher degrees may
be problematic. The weights are determined by using the fact that
.. math:: w_k = c / (H'_n(x_k) * H_{n-1}(x_k))
where :math:`c` is a constant independent of :math:`k` and :math:`x_k`
is the k'th root of :math:`H_n`, and then scaling the results to get
the right value when integrating 1.
"""
ideg = int(deg)
if ideg != deg or ideg < 1:
raise ValueError("deg must be a non-negative integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
c = np.array([0]*deg + [1])
m = hermcompanion(c)
x = la.eigvals(m)
x.sort()
# improve roots by one application of Newton
dy = hermval(x, c)
df = hermval(x, hermder(c))
x -= dy/df
# compute the weights. We scale the factor to avoid possible numerical
# overflow.
fm = hermval(x, c[1:])
fm /= np.abs(fm).max()
df /= np.abs(df).max()
w = 1/(fm * df)
# for Hermite we can also symmetrize
w = (w + w[::-1])/2
x = (x - x[::-1])/2
# scale w to get the right value
w *= np.sqrt(np.pi) / w.sum()
return x, w
def hermweight(x):
"""
Weight function of the Hermite polynomials.
The weight function is :math:`\exp(-x^2)` and the interval of
integration is :math:`[-\inf, \inf]`. the Hermite polynomials are
orthogonal, but not normalized, with respect to this weight function.
Parameters
----------
x : array_like
Values at which the weight function will be computed.
Returns
-------
w : ndarray
The weight function at `x`.
Notes
-----
.. versionadded::1.7.0
"""
w = np.exp(-x**2)
return w
#
# Hermite series class
#
exec polytemplate.substitute(name='Hermite', nick='herm', domain='[-1,1]')
| gpl-2.0 |
piexona/pyside-examples | pyside_225_mdiEditor.py | 1 | 14077 | # coding: utf-8
# author: wie@ppi.co.jp
from PySide import QtGui, QtCore
import mdiEditor_rc
# import subprocess
# cmd = 'C:/Python27/Lib/site-packages/PySide/pyside-rcc.exe -o mdiEditor_rc.py images/mdiEditor.qrc'
# subprocess.Popen(cmd, stdout=subprocess.PIPE)
class MainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent=parent)
self.mdiArea = QtGui.QMdiArea()
self.setCentralWidget(self.mdiArea)
self.mdiArea.subWindowActivated.connect(self.updateActions)
self.createActions()
self.createMenus()
self.createToolBars()
self.createStatusBar()
self.setWindowIcon(QtGui.QPixmap(':icon.png'))
self.setWindowTitle(self.tr('MDI Editor'))
# 이벤트 루프가 유휴상태에 진입하자마자 곧바로 타임아웃
# 읽어들어야 할 파일이 클 경우 사용자가 프로그램이 죽었다고 생각할 수 있으므로
# 윈도우를 먼저 표시하고 처리를 진행
QtCore.QTimer.singleShot(0, self.loadFiles)
self.readSettings()
def activeEditor(self):
subWindow = self.mdiArea.activeSubWindow()
if subWindow:
return subWindow.widget()
return 0
def addEditor(self, editor):
editor.copyAvailable.connect(self.cutAction.setEnabled)
editor.copyAvailable.connect(self.copyAction.setEnabled)
# addSubWindow 함수는 QMidSubWindow 하나를 생성한 다음, 매개변수로 전달받은
# 위젯을 이 하위 윈두우에 붙인 뒤, 이 하위 윈도우의 포인터를 리턴한다
subWindow = self.mdiArea.addSubWindow(editor)
# 액션은 에디터에 귀속되어 있으므로 에디터가 닫히면 메뉴에서 자동으로 사라진다.
self.windowMenu.addAction(editor.windowMenuAction())
self.windowActionGroup.addAction(editor.windowMenuAction())
subWindow.show()
def loadFiles(self):
# sys.argv 와 같은 값을 받으나,
# -style 이나 -font 와 같은 Qt 전용의 명령행 옵션은 QApplication 에 의해
# 자동으로 인자 목록에서 제거됨
args = QtGui.QApplication.arguments()
# 프로그램 실행시 함께 받은 인자를 오픈
# 여기서 몇개를 빼야 할지는 maya, c++ 등 환경에 따라 바뀔 수 있음
if args[2:]:
for arg in args[2:]:
self.openFile(arg)
self.mdiArea.cascadeSubWindows()
else:
self.newFile()
# 에디터 윈도우를 활성화 하고, 활성화 시그널을 발생시킴
self.mdiArea.activateNextSubWindow()
def newFile(self):
editor = Editor()
editor.newFile()
self.addEditor(editor)
def open(self):
editor = Editor.open(self)
if editor:
self.addEditor(editor)
def save(self):
if self.activeEditor():
self.activeEditor().save()
def saveAs(self):
if self.activeEditor():
self.activeEditor().saveAs()
def copy(self):
if self.activeEditor():
self.activeEditor().copy()
def cut(self):
if self.activeEditor():
self.activeEditor().cut()
def paste(self):
if self.activeEditor():
self.activeEditor().paste()
def about(self):
pass
def createActions(self):
self.newAction = QtGui.QAction(self.tr('&New'), self)
self.newAction.setIcon(QtGui.QIcon(':/new.png'))
self.newAction.setShortcut(QtGui.QKeySequence.New)
self.newAction.setStatusTip(self.tr('Create a new file'))
self.newAction.triggered.connect(self.newFile)
self.openAction = QtGui.QAction(self.tr('&Open'), self)
self.openAction.setIcon(QtGui.QIcon(':/open.png'))
self.openAction.setShortcut(QtGui.QKeySequence.Open)
self.openAction.setStatusTip(self.tr('Open an existing file'))
self.openAction.triggered.connect(self.open)
self.saveAction = QtGui.QAction(self.tr('&Save'), self)
self.saveAction.setIcon(QtGui.QIcon(':save.png'))
self.saveAction.setShortcut(QtGui.QKeySequence.Save)
self.saveAction.setStatusTip(self.tr('Save the file to disk'))
self.saveAction.triggered.connect(self.save)
self.saveAsAction = QtGui.QAction(self.tr('Save &As'), self)
self.saveAsAction.setStatusTip(self.tr('Save the file under a new name'))
self.saveAsAction.triggered.connect(self.saveAs)
self.exitAction = QtGui.QAction(self.tr('E&xit'), self)
self.exitAction.setShortcut(self.tr('Ctrl+Q'))
self.exitAction.setStatusTip(self.tr('Exit the application'))
self.exitAction.triggered.connect(self.close)
self.cutAction = QtGui.QAction(self.tr('Cu&t'), self)
self.cutAction.setIcon(QtGui.QIcon(':/images/cut.png'))
self.cutAction.setShortcut(QtGui.QKeySequence.Cut)
self.setStatusTip(self.tr('Cut the current selection to the clipboard'))
self.cutAction.triggered.connect(self.cut)
self.copyAction = QtGui.QAction(self.tr('&Copy'), self)
self.copyAction.setIcon(QtGui.QIcon(':/images/copy.png'))
self.copyAction.setShortcut(QtGui.QKeySequence.Cut)
self.setStatusTip(self.tr('Cut the current selection to the clipboard'))
self.cutAction.triggered.connect(self.cut)
self.pasteAction = QtGui.QAction(self.tr('&Paste'), self)
self.pasteAction.setIcon(QtGui.QIcon(':/images/paste.png'))
self.pasteAction.setShortcut(QtGui.QKeySequence.Paste)
self.pasteAction.setStatusTip(self.tr("Paste the clipboard's contents at "
"the cursor position"))
self.pasteAction.triggered.connect(self.paste)
self.closeAction = QtGui.QAction(self.tr('Cl&ose'), self)
self.closeAction.setShortcut(QtGui.QKeySequence.Close)
self.closeAction.setStatusTip(self.tr('Close the active window'))
self.closeAction.triggered.connect(self.mdiArea.closeActiveSubWindow)
self.closeAllAction = QtGui.QAction(self.tr('Close &All'), self)
self.closeAllAction.setStatusTip(self.tr('Close all the windows'))
self.closeAllAction.triggered.connect(self.close)
self.tileAction = QtGui.QAction(self.tr('&Tile'), self)
self.tileAction.setStatusTip(self.tr('Tile the windows'))
self.tileAction.triggered.connect(self.mdiArea.tileSubWindows)
self.cascadeAction = QtGui.QAction(self.tr('&Cascade'), self)
self.cascadeAction.setStatusTip(self.tr('Cascade the windows'))
self.cascadeAction.triggered.connect(self.mdiArea.cascadeSubWindows)
self.nextAction = QtGui.QAction(self.tr('Ne&xt'), self)
self.nextAction.setShortcut(QtGui.QKeySequence.NextChild)
self.nextAction.setStatusTip(self.tr('Move the focus to the next window'))
self.nextAction.triggered.connect(self.mdiArea.activateNextSubWindow)
self.previousAction = QtGui.QAction(self.tr('Pre&vious'), self)
self.previousAction.setShortcut(QtGui.QKeySequence.PreviousChild)
self.previousAction.setStatusTip(self.tr('Move the focus to the previous '
'window'))
self.previousAction.triggered.connect(self.mdiArea.activatePreviousSubWindow)
self.separatorAction = QtGui.QAction(self)
self.separatorAction.setSeparator(True)
self.aboutAction = QtGui.QAction(self.tr('&About'), self)
self.aboutAction.setStatusTip(self.tr("Show the Qt library's About box"))
self.aboutAction.triggered.connect(self.about)
self.aboutQtAction = QtGui.QAction(self.tr('About &Qt'), self)
self.aboutQtAction.setStatusTip(self.tr("Show the Qt library's About box"))
self.aboutQtAction.triggered.connect(QtGui.QApplication.aboutQt)
self.windowActionGroup = QtGui.QActionGroup(self)
def createMenus(self):
self.fileMenu = self.menuBar().addMenu(self.tr('&File'))
self.fileMenu.addAction(self.newAction)
self.fileMenu.addAction(self.openAction)
self.fileMenu.addAction(self.saveAction)
self.fileMenu.addSeparator()
self.fileMenu.addAction(self.exitAction)
self.editMenu = self.menuBar().addMenu(self.tr('&Edit'))
self.editMenu.addAction(self.cutAction)
self.editMenu.addAction(self.copyAction)
self.editMenu.addAction(self.pasteAction)
self.windowMenu = self.menuBar().addMenu(self.tr('&Window'))
self.windowMenu.addAction(self.closeAction)
self.windowMenu.addAction(self.closeAllAction)
self.windowMenu.addSeparator()
self.windowMenu.addAction(self.tileAction)
self.windowMenu.addAction(self.cascadeAction)
self.windowMenu.addSeparator()
self.windowMenu.addAction(self.nextAction)
self.windowMenu.addAction(self.previousAction)
self.windowMenu.addAction(self.separatorAction)
self.menuBar().addSeparator()
self.helpMenu = self.menuBar().addMenu(self.tr('&Help'))
self.helpMenu.addAction(self.aboutAction)
self.helpMenu.addAction(self.aboutQtAction)
def createToolBars(self):
self.fileToolBar = self.addToolBar(self.tr('File'))
self.fileToolBar.setObjectName('fileToolBar')
self.fileToolBar.addAction(self.newAction)
self.fileToolBar.addAction(self.openAction)
self.fileToolBar.addAction(self.saveAction)
self.editToolBar = self.addToolBar(self.tr('Edit'))
self.editToolBar.setObjectName('editToolBar')
self.editToolBar.addAction(self.cutAction)
self.editToolBar.addAction(self.copyAction)
self.editToolBar.addAction(self.pasteAction)
def createStatusBar(self):
readyLabel = QtGui.QLabel(self.tr(' Ready'))
self.statusBar().addWidget(readyLabel, 1)
@QtCore.Slot()
def updateActions(self):
hasEditor = self.activeEditor() != 0
hasSelection = self.activeEditor() and self.activeEditor().textCursor().hasSelection()
self.saveAction.setEnabled(hasEditor)
self.saveAsAction.setEnabled(hasEditor)
self.cutAction.setEnabled(hasSelection)
self.copyAction.setEnabled(hasSelection)
self.closeAction.setEnabled(hasEditor)
self.closeAllAction.setEnabled(hasEditor)
self.tileAction.setEnabled(hasEditor)
self.cascadeAction.setEnabled(hasEditor)
self.nextAction.setEnabled(hasEditor)
self.previousAction.setEnabled(hasEditor)
self.separatorAction.setEnabled(hasEditor)
if self.activeEditor():
self.activeEditor().windowMenuAction().setChecked(True)
def writeSettings(self):
settings = QtCore.QSettings('Software Inc.', 'MDI Editor')
settings.beginGroup('mainWindow')
settings.setValue('geometry', self.saveGeometry())
settings.setValue('state', self.saveState())
settings.endGroup()
def readSettings(self):
settings = QtCore.QSettings('Software Inc.', 'MDI Editor')
settings.beginGroup('mainWindow')
self.restoreGeometry(settings.value('geometry'))
self.restoreState(settings.value('state'))
settings.endGroup()
def closeEvent(self, event):
self.writeSettings()
self.mdiArea.closeAllSubWindows()
if self.mdiArea.subWindowList():
event.ignore()
else:
event.accept()
class Editor(QtGui.QTextEdit):
documentNumber = 1
def __init__(self, parent=None):
super(Editor, self).__init__(parent=parent)
self.action = QtGui.QAction(self)
self.action.setCheckable(True)
self.action.triggered.connect(self.show)
self.action.triggered.connect(self.setFocus)
self.isUntitled = True
self.curFile = ''
self.setWindowIcon(QtGui.QPixmap(':/document.png'))
self.setWindowTitle('[*]')
self.document().contentsChanged.connect(self.documentWasModified)
# 윈도우를 닫을 때 메모리가 누수되지 않도록 함
self.setAttribute(QtCore.Qt.WA_DeleteOnClose)
def documentWasModified(self):
pass
def windowMenuAction(self):
return self.action
def newFile(self):
self.curFile = self.tr('document%d.txt' % self.documentNumber)
self.setWindowTitle(self.curFile + '[*]')
self.action.setText(self.curFile)
self.isUntitled = True
Editor.documentNumber += 1
@classmethod
def open(cls, parent):
fileName = QtGui.QFileDialog.getOpenFileName(parent, 'Open', '.')
if fileName:
return cls.openFile(fileName, parent)
return 0
@staticmethod
def openFile(fileName, parent):
editor = Editor(parent)
if editor.readFile(fileName):
editor.setCurrentFile(fileName)
return editor
else:
del editor
return 0
def save(self):
if self.isUntitled:
return self.saveAs()
else:
return self.saveFile(self.curFile)
def closeEvent(self, event):
if self.okToContinue():
event.accept()
else:
event.ignore()
def okToContinue(self):
return True
def setCurrentFile(self, fileName):
self.curFile = fileName
self.isUntitled = False
self.action.setText(self.strippedName(self.curFile))
self.document().setModified(False)
self.setWindowTitle(self.strippedName(self.curfile) + '[*]')
self.setWindowModified(False)
def sizeHint(self):
return QtCore.QSize(72 * self.fontMetrics().width('x'),
25 * self.fontMetrics().lineSpacing())
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mdiEditor = MainWindow()
mdiEditor.show()
sys.exit(app.exec_()) | apache-2.0 |
tensorflow/tensorflow | tensorflow/python/ops/check_ops.py | 6 | 88439 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=g-short-docstring-punctuation
"""Asserts and Boolean Checks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
from tensorflow.python.eager import context
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
NUMERIC_TYPES = frozenset(
[dtypes.float32, dtypes.float64, dtypes.int8, dtypes.int16, dtypes.int32,
dtypes.int64, dtypes.uint8, dtypes.qint8, dtypes.qint32, dtypes.quint8,
dtypes.complex64])
__all__ = [
'assert_negative',
'assert_positive',
'assert_proper_iterable',
'assert_non_negative',
'assert_non_positive',
'assert_equal',
'assert_none_equal',
'assert_near',
'assert_integer',
'assert_less',
'assert_less_equal',
'assert_greater',
'assert_greater_equal',
'assert_rank',
'assert_rank_at_least',
'assert_rank_in',
'assert_same_float_dtype',
'assert_scalar',
'assert_type',
'assert_shapes',
'is_non_decreasing',
'is_numeric_tensor',
'is_strictly_increasing',
]
def _maybe_constant_value_string(t):
if not isinstance(t, ops.Tensor):
return str(t)
const_t = tensor_util.constant_value(t)
if const_t is not None:
return str(const_t)
return t
def _assert_static(condition, data):
"""Raises a InvalidArgumentError with as much information as possible."""
if not condition:
data_static = [_maybe_constant_value_string(x) for x in data]
raise errors.InvalidArgumentError(node_def=None, op=None,
message='\n'.join(data_static))
def _shape_and_dtype_str(tensor):
"""Returns a string containing tensor's shape and dtype."""
return 'shape=%s dtype=%s' % (tensor.shape, tensor.dtype.name)
def _unary_assert_doc(sym, sym_name):
"""Common docstring for assert_* ops that evaluate a unary predicate over every element of a tensor.
Args:
sym: Mathematical symbol for the check performed on each element, i.e. "> 0"
sym_name: English-language name for the op described by sym
Returns:
Decorator that adds the appropriate docstring to the function for symbol
`sym`.
"""
def _decorator(func):
"""Generated decorator that adds the appropriate docstring to the function for symbol `sym`.
Args:
func: Function for a TensorFlow op
Returns:
Version of `func` with documentation attached.
"""
opname = func.__name__
cap_sym_name = sym_name.capitalize()
func.__doc__ = """
Assert the condition `x {sym}` holds element-wise.
When running in graph mode, you should add a dependency on this operation
to ensure that it runs. Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.debugging.{opname}(x, y)]):
output = tf.reduce_sum(x)
```
{sym_name} means, for every element `x[i]` of `x`, we have `x[i] {sym}`.
If `x` is empty this is trivially satisfied.
Args:
x: Numeric `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "{opname}".
Returns:
Op that raises `InvalidArgumentError` if `x {sym}` is False.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x {sym}` is False. The check can be performed immediately during
eager execution or if `x` is statically known.
""".format(
sym=sym, sym_name=cap_sym_name, opname=opname)
return func
return _decorator
def _binary_assert_doc(sym, test_var):
"""Common docstring for most of the v1 assert_* ops that compare two tensors element-wise.
Args:
sym: Binary operation symbol, i.e. "=="
test_var: a string that represents the variable in the right-hand side of
binary operator of the test case
Returns:
Decorator that adds the appropriate docstring to the function for
symbol `sym`.
"""
def _decorator(func):
"""Generated decorator that adds the appropriate docstring to the function for symbol `sym`.
Args:
func: Function for a TensorFlow op
Returns:
A version of `func` with documentation attached.
"""
opname = func.__name__
func.__doc__ = """
Assert the condition `x {sym} y` holds element-wise.
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have `x[i] {sym} y[i]`.
If both `x` and `y` are empty, this is trivially satisfied.
When running in graph mode, you should add a dependency on this operation
to ensure that it runs. Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.{opname}(x, y)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "{opname}".
Returns:
Op that raises `InvalidArgumentError` if `x {sym} y` is False.
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x {sym} y` is False. The check can be performed immediately during
eager execution or if `x` and `y` are statically known.
@compatibility(TF2)
`tf.compat.v1.{opname}` is compatible with eager execution and
`tf.function`.
Please use `tf.debugging.{opname}` instead when migrating to TF2. Apart
from `data`, all arguments are supported with the same argument name.
If you want to ensure the assert statements run before the
potentially-invalid computation, please use `tf.control_dependencies`,
as tf.function auto-control dependencies are insufficient for assert
statements.
#### Structural Mapping to Native TF2
Before:
```python
tf.compat.v1.{opname}(
x=x, y=y, data=data, summarize=summarize,
message=message, name=name)
```
After:
```python
tf.debugging.{opname}(
x=x, y=y, message=message,
summarize=summarize, name=name)
```
#### TF1 & TF2 Usage Example
TF1:
>>> g = tf.Graph()
>>> with g.as_default():
... a = tf.compat.v1.placeholder(tf.float32, [2])
... b = tf.compat.v1.placeholder(tf.float32, [2])
... result = tf.compat.v1.{opname}(a, b,
... message='"a {sym} b" does not hold for the given inputs')
... with tf.compat.v1.control_dependencies([result]):
... sum_node = a + b
>>> sess = tf.compat.v1.Session(graph=g)
>>> val = sess.run(sum_node, feed_dict={{a: [1, 2], b:{test_var}}})
TF2:
>>> a = tf.Variable([1, 2], dtype=tf.float32)
>>> b = tf.Variable({test_var}, dtype=tf.float32)
>>> assert_op = tf.debugging.{opname}(a, b, message=
... '"a {sym} b" does not hold for the given inputs')
>>> # When working with tf.control_dependencies
>>> with tf.control_dependencies([assert_op]):
... val = a + b
@end_compatibility
""".format(
sym=sym, opname=opname, test_var=test_var)
return func
return _decorator
def _make_assert_msg_data(sym, x, y, summarize, test_op):
"""Subroutine of _binary_assert that generates the components of the default error message when running in eager mode.
Args:
sym: Mathematical symbol for the test to apply to pairs of tensor elements,
i.e. "=="
x: First input to the assertion after applying `convert_to_tensor()`
y: Second input to the assertion
summarize: Value of the "summarize" parameter to the original assert_* call;
tells how many elements of each tensor to print.
test_op: TensorFlow op that returns a Boolean tensor with True in each
position where the assertion is satisfied.
Returns:
List of tensors and scalars that, when stringified and concatenated,
will produce the error message string.
"""
# Prepare a message with first elements of x and y.
data = []
data.append('Condition x %s y did not hold.' % sym)
if summarize > 0:
if x.shape == y.shape and x.shape.as_list():
# If the shapes of x and y are the same (and not scalars),
# Get the values that actually differed and their indices.
# If shapes are different this information is more confusing
# than useful.
mask = math_ops.logical_not(test_op)
indices = array_ops.where(mask)
indices_np = indices.numpy()
x_vals = array_ops.boolean_mask(x, mask)
y_vals = array_ops.boolean_mask(y, mask)
num_vals = min(summarize, indices_np.shape[0])
data.append('Indices of first %d different values:' % num_vals)
data.append(indices_np[:num_vals])
data.append('Corresponding x values:')
data.append(x_vals.numpy().reshape((-1,))[:num_vals])
data.append('Corresponding y values:')
data.append(y_vals.numpy().reshape((-1,))[:num_vals])
# reshape((-1,)) is the fastest way to get a flat array view.
x_np = x.numpy().reshape((-1,))
y_np = y.numpy().reshape((-1,))
x_sum = min(x_np.size, summarize)
y_sum = min(y_np.size, summarize)
data.append('First %d elements of x:' % x_sum)
data.append(x_np[:x_sum])
data.append('First %d elements of y:' % y_sum)
data.append(y_np[:y_sum])
return data
def _pretty_print(data_item, summarize):
"""Format a data item for use in an error message in eager mode.
Args:
data_item: One of the items in the "data" argument to an assert_* function.
Can be a Tensor or a scalar value.
summarize: How many elements to retain of each tensor-valued entry in data.
Returns:
An appropriate string representation of data_item
"""
if isinstance(data_item, ops.Tensor):
arr = data_item.numpy()
if np.isscalar(arr):
# Tensor.numpy() returns a scalar for zero-dimensional tensors
return str(arr)
else:
flat = arr.reshape((-1,))
lst = [str(x) for x in flat[:summarize]]
if len(lst) < flat.size:
lst.append('...')
return str(lst)
else:
return str(data_item)
def _binary_assert(sym, opname, op_func, static_func, x, y, data, summarize,
message, name):
"""Generic binary elementwise assertion.
Implements the behavior described in _binary_assert_doc() above.
Args:
sym: Mathematical symbol for the test to apply to pairs of tensor elements,
i.e. "=="
opname: Name of the assert op in the public API, i.e. "assert_equal"
op_func: Function that, if passed the two Tensor inputs to the assertion (x
and y), will return the test to be passed to reduce_all() i.e.
static_func: Function that, if passed numpy ndarray versions of the two
inputs to the assertion, will return a Boolean ndarray with containing
True in all positions where the assertion PASSES.
i.e. np.equal for assert_equal()
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to the value of
`opname`.
Returns:
See docstring template in _binary_assert_doc().
"""
with ops.name_scope(name, opname, [x, y, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y')
if context.executing_eagerly():
test_op = op_func(x, y)
condition = math_ops.reduce_all(test_op)
if condition:
return
# If we get here, the assertion has failed.
# Default to printing 3 elements like control_flow_ops.Assert (used
# by graph mode) does. Also treat negative values as "print
# everything" for consistency with Tensor::SummarizeValue().
if summarize is None:
summarize = 3
elif summarize < 0:
summarize = 1e9 # Code below will find exact size of x and y.
if data is None:
data = _make_assert_msg_data(sym, x, y, summarize, test_op)
if message is not None:
data = [message] + list(data)
raise errors.InvalidArgumentError(
node_def=None,
op=None,
message=('\n'.join(_pretty_print(d, summarize) for d in data)))
else: # not context.executing_eagerly()
if data is None:
data = [
'Condition x %s y did not hold element-wise:' % sym,
'x (%s) = ' % x.name, x,
'y (%s) = ' % y.name, y
]
if message is not None:
data = [message] + list(data)
condition = math_ops.reduce_all(op_func(x, y))
x_static = tensor_util.constant_value(x)
y_static = tensor_util.constant_value(y)
if x_static is not None and y_static is not None:
condition_static = np.all(static_func(x_static, y_static))
_assert_static(condition_static, data)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export(
'debugging.assert_proper_iterable',
v1=['debugging.assert_proper_iterable', 'assert_proper_iterable'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_proper_iterable')
def assert_proper_iterable(values):
"""Static assert that values is a "proper" iterable.
`Ops` that expect iterables of `Tensor` can call this to validate input.
Useful since `Tensor`, `ndarray`, byte/text type are all iterables themselves.
Args:
values: Object to be checked.
Raises:
TypeError: If `values` is not iterable or is one of
`Tensor`, `SparseTensor`, `np.array`, `tf.compat.bytes_or_text_types`.
"""
unintentional_iterables = (
(ops.Tensor, sparse_tensor.SparseTensor, np.ndarray)
+ compat.bytes_or_text_types
)
if isinstance(values, unintentional_iterables):
raise TypeError(
'Expected argument "values" to be a "proper" iterable. Found: %s' %
type(values))
if not hasattr(values, '__iter__'):
raise TypeError(
'Expected argument "values" to be iterable. Found: %s' % type(values))
@tf_export('debugging.assert_negative', v1=[])
@dispatch.add_dispatch_support
def assert_negative_v2(x, message=None, summarize=None, name=None):
"""Assert the condition `x < 0` holds element-wise.
This Op checks that `x[i] < 0` holds for every element of `x`. If `x` is
empty, this is trivially satisfied.
If `x` is not negative everywhere, `message`, as well as the first `summarize`
entries of `x` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all negative. This can be
used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x[i] < 0` is False. The check can be performed immediately during eager
execution or if `x` is statically known.
"""
return assert_negative(x=x, message=message, summarize=summarize, name=name)
@tf_export(v1=['debugging.assert_negative', 'assert_negative'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_negative')
@_unary_assert_doc('< 0', 'negative')
def assert_negative(x, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
message = message or ''
with ops.name_scope(name, 'assert_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x < 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(x, zero, data=data, summarize=summarize)
@tf_export('debugging.assert_positive', v1=[])
@dispatch.add_dispatch_support
def assert_positive_v2(x, message=None, summarize=None, name=None):
"""Assert the condition `x > 0` holds element-wise.
This Op checks that `x[i] > 0` holds for every element of `x`. If `x` is
empty, this is trivially satisfied.
If `x` is not positive everywhere, `message`, as well as the first `summarize`
entries of `x` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all positive. This can be
used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x[i] > 0` is False. The check can be performed immediately during eager
execution or if `x` is statically known.
"""
return assert_positive(x=x, summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_positive', 'assert_positive'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_positive')
@_unary_assert_doc('> 0', 'positive')
def assert_positive(x, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
message = message or ''
with ops.name_scope(name, 'assert_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message, 'Condition x > 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less(zero, x, data=data, summarize=summarize)
@tf_export('debugging.assert_non_negative', v1=[])
@dispatch.add_dispatch_support
def assert_non_negative_v2(x, message=None, summarize=None, name=None):
"""Assert the condition `x >= 0` holds element-wise.
This Op checks that `x[i] >= 0` holds for every element of `x`. If `x` is
empty, this is trivially satisfied.
If `x` is not >= 0 everywhere, `message`, as well as the first `summarize`
entries of `x` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to
"assert_non_negative".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-negative. This can
be used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x[i] >= 0` is False. The check can be performed immediately during eager
execution or if `x` is statically known.
"""
return assert_non_negative(x=x, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_non_negative', 'assert_non_negative'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_non_negative')
@_unary_assert_doc('>= 0', 'non-negative')
def assert_non_negative(x, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
message = message or ''
with ops.name_scope(name, 'assert_non_negative', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x >= 0 did not hold element-wise:',
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(zero, x, data=data, summarize=summarize)
@tf_export('debugging.assert_non_positive', v1=[])
@dispatch.add_dispatch_support
def assert_non_positive_v2(x, message=None, summarize=None, name=None):
"""Assert the condition `x <= 0` holds element-wise.
This Op checks that `x[i] <= 0` holds for every element of `x`. If `x` is
empty, this is trivially satisfied.
If `x` is not <= 0 everywhere, `message`, as well as the first `summarize`
entries of `x` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to
"assert_non_positive".
Returns:
Op raising `InvalidArgumentError` unless `x` is all non-positive. This can
be used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x[i] <= 0` is False. The check can be performed immediately during eager
execution or if `x` is statically known.
"""
return assert_non_positive(x=x, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_non_positive', 'assert_non_positive'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_non_positive')
@_unary_assert_doc('<= 0', 'non-positive')
def assert_non_positive(x, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
message = message or ''
with ops.name_scope(name, 'assert_non_positive', [x, data]):
x = ops.convert_to_tensor(x, name='x')
if data is None:
if context.executing_eagerly():
name = _shape_and_dtype_str(x)
else:
name = x.name
data = [
message,
'Condition x <= 0 did not hold element-wise:'
'x (%s) = ' % name, x]
zero = ops.convert_to_tensor(0, dtype=x.dtype)
return assert_less_equal(x, zero, data=data, summarize=summarize)
@tf_export('debugging.assert_equal', 'assert_equal', v1=[])
@dispatch.add_dispatch_support
def assert_equal_v2(x, y, message=None, summarize=None, name=None):
"""Assert the condition `x == y` holds element-wise.
This Op checks that `x[i] == y[i]` holds for every pair of (possibly
broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is
trivially satisfied.
If `x` and `y` are not equal, `message`, as well as the first `summarize`
entries of `x` and `y` are printed, and `InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_equal".
Returns:
Op that raises `InvalidArgumentError` if `x == y` is False. This can be
used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x == y` is False. The check can be performed immediately during eager
execution or if `x` and `y` are statically known.
"""
return assert_equal(x=x, y=y, summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_equal', 'assert_equal'])
@dispatch.add_dispatch_support
@_binary_assert_doc('==', '[1, 2]')
def assert_equal(x, y, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
with ops.name_scope(name, 'assert_equal', [x, y, data]):
# Short-circuit if x and y are the same tensor.
if x is y:
return None if context.executing_eagerly() else control_flow_ops.no_op()
return _binary_assert('==', 'assert_equal', math_ops.equal, np.equal, x, y,
data, summarize, message, name)
@tf_export('debugging.assert_none_equal', v1=[])
@dispatch.add_dispatch_support
def assert_none_equal_v2(x, y, summarize=None, message=None, name=None):
"""Assert the condition `x != y` holds for all elements.
This Op checks that `x[i] != y[i]` holds for every pair of (possibly
broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is
trivially satisfied.
If any elements of `x` and `y` are equal, `message`, as well as the first
`summarize` entries of `x` and `y` are printed, and `InvalidArgumentError`
is raised.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_none_equal".
Returns:
Op that raises `InvalidArgumentError` if `x != y` is ever False. This can
be used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x != y` is False for any pair of elements in `x` and `y`. The check can
be performed immediately during eager execution or if `x` and `y` are
statically known.
"""
return assert_none_equal(x=x, y=y, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_none_equal', 'assert_none_equal'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_none_equal')
@_binary_assert_doc('!=', '[2, 1]')
def assert_none_equal(
x, y, data=None, summarize=None, message=None, name=None):
return _binary_assert('!=', 'assert_none_equal', math_ops.not_equal,
np.not_equal, x, y, data, summarize, message, name)
@tf_export('debugging.assert_near', v1=[])
@dispatch.add_dispatch_support
def assert_near_v2(x, y, rtol=None, atol=None, message=None, summarize=None,
name=None):
"""Assert the condition `x` and `y` are close element-wise.
This Op checks that `x[i] - y[i] < atol + rtol * tf.abs(y[i])` holds for every
pair of (possibly broadcast) elements of `x` and `y`. If both `x` and `y` are
empty, this is trivially satisfied.
If any elements of `x` and `y` are not close, `message`, as well as the first
`summarize` entries of `x` and `y` are printed, and `InvalidArgumentError`
is raised.
The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest
representable positive number such that `1 + eps != 1`. This is about
`1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.
See `numpy.finfo`.
Args:
x: Float or complex `Tensor`.
y: Float or complex `Tensor`, same dtype as and broadcastable to `x`.
rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The relative tolerance. Default is `10 * eps`.
atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The absolute tolerance. Default is `10 * eps`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_near".
Returns:
Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x != y` is False for any pair of elements in `x` and `y`. The check can
be performed immediately during eager execution or if `x` and `y` are
statically known.
@compatibility(numpy)
Similar to `numpy.testing.assert_allclose`, except tolerance depends on data
type. This is due to the fact that `TensorFlow` is often used with `32bit`,
`64bit`, and even `16bit` data.
@end_compatibility
"""
return assert_near(x=x, y=y, rtol=rtol, atol=atol, summarize=summarize,
message=message, name=name)
@tf_export(v1=['debugging.assert_near', 'assert_near'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_near')
def assert_near(
x, y, rtol=None, atol=None, data=None, summarize=None, message=None,
name=None):
"""Assert the condition `x` and `y` are close element-wise.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_near(x, y)]):
output = tf.reduce_sum(x)
```
This condition holds if for every pair of (possibly broadcast) elements
`x[i]`, `y[i]`, we have
```tf.abs(x[i] - y[i]) <= atol + rtol * tf.abs(y[i])```.
If both `x` and `y` are empty, this is trivially satisfied.
The default `atol` and `rtol` is `10 * eps`, where `eps` is the smallest
representable positive number such that `1 + eps != 1`. This is about
`1.2e-6` in `32bit`, `2.22e-15` in `64bit`, and `0.00977` in `16bit`.
See `numpy.finfo`.
Args:
x: Float or complex `Tensor`.
y: Float or complex `Tensor`, same `dtype` as, and broadcastable to, `x`.
rtol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The relative tolerance. Default is `10 * eps`.
atol: `Tensor`. Same `dtype` as, and broadcastable to, `x`.
The absolute tolerance. Default is `10 * eps`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`, `y`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_near".
Returns:
Op that raises `InvalidArgumentError` if `x` and `y` are not close enough.
@compatibility(numpy)
Similar to `numpy.testing.assert_allclose`, except tolerance depends on data
type. This is due to the fact that `TensorFlow` is often used with `32bit`,
`64bit`, and even `16bit` data.
@end_compatibility
"""
message = message or ''
with ops.name_scope(name, 'assert_near', [x, y, rtol, atol, data]):
x = ops.convert_to_tensor(x, name='x')
y = ops.convert_to_tensor(y, name='y', dtype=x.dtype)
dtype = x.dtype
if dtype.is_complex:
dtype = dtype.real_dtype
eps = np.finfo(dtype.as_numpy_dtype).eps
rtol = 10 * eps if rtol is None else rtol
atol = 10 * eps if atol is None else atol
rtol = ops.convert_to_tensor(rtol, name='rtol', dtype=dtype)
atol = ops.convert_to_tensor(atol, name='atol', dtype=dtype)
if context.executing_eagerly():
x_name = _shape_and_dtype_str(x)
y_name = _shape_and_dtype_str(y)
else:
x_name = x.name
y_name = y.name
if data is None:
data = [
message,
'x and y not equal to tolerance rtol = %s, atol = %s' % (rtol, atol),
'x (%s) = ' % x_name, x, 'y (%s) = ' % y_name, y
]
tol = atol + rtol * math_ops.abs(y)
diff = math_ops.abs(x - y)
condition = math_ops.reduce_all(math_ops.less(diff, tol))
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_less', 'assert_less', v1=[])
@dispatch.add_dispatch_support
def assert_less_v2(x, y, message=None, summarize=None, name=None):
"""Assert the condition `x < y` holds element-wise.
This Op checks that `x[i] < y[i]` holds for every pair of (possibly
broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is
trivially satisfied.
If `x` is not less than `y` element-wise, `message`, as well as the first
`summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is
raised.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_less".
Returns:
Op that raises `InvalidArgumentError` if `x < y` is False.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x < y` is False. The check can be performed immediately during eager
execution or if `x` and `y` are statically known.
"""
return assert_less(x=x, y=y, summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_less', 'assert_less'])
@dispatch.add_dispatch_support
@_binary_assert_doc('<', '[2, 3]')
def assert_less(x, y, data=None, summarize=None, message=None, name=None):
return _binary_assert('<', 'assert_less', math_ops.less, np.less, x, y, data,
summarize, message, name)
@tf_export('debugging.assert_less_equal', v1=[])
@dispatch.add_dispatch_support
def assert_less_equal_v2(x, y, message=None, summarize=None, name=None):
"""Assert the condition `x <= y` holds element-wise.
This Op checks that `x[i] <= y[i]` holds for every pair of (possibly
broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is
trivially satisfied.
If `x` is not less or equal than `y` element-wise, `message`, as well as the
first `summarize` entries of `x` and `y` are printed, and
`InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_less_equal".
Returns:
Op that raises `InvalidArgumentError` if `x <= y` is False. This can be
used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x <= y` is False. The check can be performed immediately during eager
execution or if `x` and `y` are statically known.
"""
return assert_less_equal(x=x, y=y,
summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_less_equal', 'assert_less_equal'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_less_equal')
@_binary_assert_doc('<=', '[1, 3]')
def assert_less_equal(x, y, data=None, summarize=None, message=None, name=None):
return _binary_assert('<=', 'assert_less_equal', math_ops.less_equal,
np.less_equal, x, y, data, summarize, message, name)
@tf_export('debugging.assert_greater', 'assert_greater', v1=[])
@dispatch.add_dispatch_support
def assert_greater_v2(x, y, message=None, summarize=None, name=None):
"""Assert the condition `x > y` holds element-wise.
This Op checks that `x[i] > y[i]` holds for every pair of (possibly
broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is
trivially satisfied.
If `x` is not greater than `y` element-wise, `message`, as well as the first
`summarize` entries of `x` and `y` are printed, and `InvalidArgumentError` is
raised.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to "assert_greater".
Returns:
Op that raises `InvalidArgumentError` if `x > y` is False. This can be
used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x > y` is False. The check can be performed immediately during eager
execution or if `x` and `y` are statically known.
"""
return assert_greater(x=x, y=y, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_greater', 'assert_greater'])
@dispatch.add_dispatch_support
@_binary_assert_doc('>', '[0, 1]')
def assert_greater(x, y, data=None, summarize=None, message=None, name=None): # pylint: disable=missing-docstring
return _binary_assert('>', 'assert_greater', math_ops.greater, np.greater, x,
y, data, summarize, message, name)
@tf_export('debugging.assert_greater_equal', v1=[])
@dispatch.add_dispatch_support
def assert_greater_equal_v2(x, y, message=None, summarize=None, name=None):
"""Assert the condition `x >= y` holds element-wise.
This Op checks that `x[i] >= y[i]` holds for every pair of (possibly
broadcast) elements of `x` and `y`. If both `x` and `y` are empty, this is
trivially satisfied.
If `x` is not greater or equal to `y` element-wise, `message`, as well as the
first `summarize` entries of `x` and `y` are printed, and
`InvalidArgumentError` is raised.
Args:
x: Numeric `Tensor`.
y: Numeric `Tensor`, same dtype as and broadcastable to `x`.
message: A string to prefix to the default message.
summarize: Print this many entries of each tensor.
name: A name for this operation (optional). Defaults to
"assert_greater_equal".
Returns:
Op that raises `InvalidArgumentError` if `x >= y` is False. This can be
used with `tf.control_dependencies` inside of `tf.function`s to block
followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x >= y` is False. The check can be performed immediately during eager
execution or if `x` and `y` are statically known.
"""
return assert_greater_equal(x=x, y=y, summarize=summarize, message=message,
name=name)
@tf_export(v1=['debugging.assert_greater_equal', 'assert_greater_equal'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_greater_equal')
@_binary_assert_doc('>=', '[1, 0]')
def assert_greater_equal(x, y, data=None, summarize=None, message=None,
name=None):
return _binary_assert('>=', 'assert_greater_equal', math_ops.greater_equal,
np.greater_equal, x, y, data, summarize, message, name)
def _assert_rank_condition(
x, rank, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
static_condition: A python function that takes `[actual_rank, given_rank]`
and returns `True` if the condition is satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_rank] and return
`True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
rank_static = tensor_util.constant_value(rank)
if rank_static is not None:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, rank_static):
raise ValueError(
'Static rank condition failed', x_rank_static, rank_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), rank)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_rank', 'assert_rank', v1=[])
@dispatch.add_dispatch_support
def assert_rank_v2(x, rank, message=None, name=None):
"""Assert that `x` has rank equal to `rank`.
This Op checks that the rank of `x` is equal to `rank`.
If `x` has a different rank, `message`, as well as the shape of `x` are
printed, and `InvalidArgumentError` is raised.
Args:
x: `Tensor`.
rank: Scalar integer `Tensor`.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: if the check can be performed immediately and
`x` does not have rank `rank`. The check can be performed immediately
during eager execution or if the shape of `x` is statically known.
"""
return assert_rank(x=x, rank=rank, message=message, name=name)
@tf_export(v1=['debugging.assert_rank', 'assert_rank'])
@dispatch.add_dispatch_support
def assert_rank(x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_rank(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar integer `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and the shape of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(name, 'assert_rank', (x, rank) + tuple(data or [])):
if not isinstance(x, sparse_tensor.SparseTensor):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank == given_rank
dynamic_condition = math_ops.equal
if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):
name = ''
else:
name = x.name
if data is None:
data = [
message,
'Tensor %s must have rank' % name, rank, 'Received shape: ',
array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank %d. Received rank %d, shape %s' %
(message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
@tf_export('debugging.assert_rank_at_least', v1=[])
@dispatch.add_dispatch_support
def assert_rank_at_least_v2(x, rank, message=None, name=None):
"""Assert that `x` has rank of at least `rank`.
This Op checks that the rank of `x` is greater or equal to `rank`.
If `x` has a rank lower than `rank`, `message`, as well as the shape of `x`
are printed, and `InvalidArgumentError` is raised.
Args:
x: `Tensor`.
rank: Scalar integer `Tensor`.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to
"assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: `x` does not have rank at least `rank`, but the rank
cannot be statically determined.
ValueError: If static checks determine `x` has mismatched rank.
"""
return assert_rank_at_least(x=x, rank=rank, message=message, name=name)
@tf_export(v1=['debugging.assert_rank_at_least', 'assert_rank_at_least'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_rank_at_least')
def assert_rank_at_least(
x, rank, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank equal to `rank` or higher.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_rank_at_least(x, 2)]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
rank: Scalar `Tensor`.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_at_least".
Returns:
Op raising `InvalidArgumentError` unless `x` has specified rank or higher.
If static checks determine `x` has correct rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has wrong rank.
"""
with ops.name_scope(
name, 'assert_rank_at_least', (x, rank) + tuple(data or [])):
x = ops.convert_to_tensor(x, name='x')
rank = ops.convert_to_tensor(rank, name='rank')
message = message or ''
static_condition = lambda actual_rank, given_rank: actual_rank >= given_rank
dynamic_condition = math_ops.greater_equal
if context.executing_eagerly():
name = ''
else:
name = x.name
if data is None:
data = [
message,
'Tensor %s must have rank at least' % name, rank,
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_rank_condition(x, rank, static_condition,
dynamic_condition, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank at least %d. Received rank %d, '
'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
def _static_rank_in(actual_rank, given_ranks):
return actual_rank in given_ranks
def _dynamic_rank_in(actual_rank, given_ranks):
if len(given_ranks) < 1:
return ops.convert_to_tensor(False)
result = math_ops.equal(given_ranks[0], actual_rank)
for given_rank in given_ranks[1:]:
result = math_ops.logical_or(
result, math_ops.equal(given_rank, actual_rank))
return result
def _assert_ranks_condition(
x, ranks, static_condition, dynamic_condition, data, summarize):
"""Assert `x` has a rank that satisfies a given condition.
Args:
x: Numeric `Tensor`.
ranks: Scalar `Tensor`.
static_condition: A python function that takes
`[actual_rank, given_ranks]` and returns `True` if the condition is
satisfied, `False` otherwise.
dynamic_condition: An `op` that takes [actual_rank, given_ranks]
and return `True` if the condition is satisfied, `False` otherwise.
data: The tensors to print out if the condition is false. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
Returns:
Op raising `InvalidArgumentError` if `x` fails dynamic_condition.
Raises:
ValueError: If static checks determine `x` fails static_condition.
"""
for rank in ranks:
assert_type(rank, dtypes.int32)
# Attempt to statically defined rank.
ranks_static = tuple([tensor_util.constant_value(rank) for rank in ranks])
if not any(r is None for r in ranks_static):
for rank_static in ranks_static:
if rank_static.ndim != 0:
raise ValueError('Rank must be a scalar.')
x_rank_static = x.get_shape().ndims
if x_rank_static is not None:
if not static_condition(x_rank_static, ranks_static):
raise ValueError(
'Static rank condition failed', x_rank_static, ranks_static)
return control_flow_ops.no_op(name='static_checks_determined_all_ok')
condition = dynamic_condition(array_ops.rank(x), ranks)
# Add the condition that `rank` must have rank zero. Prevents the bug where
# someone does assert_rank(x, [n]), rather than assert_rank(x, n).
for rank, rank_static in zip(ranks, ranks_static):
if rank_static is None:
this_data = ['Rank must be a scalar. Received rank: ', rank]
rank_check = assert_rank(rank, 0, data=this_data)
condition = control_flow_ops.with_dependencies([rank_check], condition)
return control_flow_ops.Assert(condition, data, summarize=summarize)
@tf_export('debugging.assert_rank_in', v1=[])
@dispatch.add_dispatch_support
def assert_rank_in_v2(x, ranks, message=None, name=None):
"""Assert that `x` has a rank in `ranks`.
This Op checks that the rank of `x` is in `ranks`.
If `x` has a different rank, `message`, as well as the shape of `x` are
printed, and `InvalidArgumentError` is raised.
Args:
x: `Tensor`.
ranks: `Iterable` of scalar `Tensor` objects.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
This can be used with `tf.control_dependencies` inside of `tf.function`s
to block followup computation until the check has executed.
@compatibility(eager)
returns None
@end_compatibility
Raises:
InvalidArgumentError: `x` does not have rank in `ranks`, but the rank cannot
be statically determined.
ValueError: If static checks determine `x` has mismatched rank.
"""
return assert_rank_in(x=x, ranks=ranks, message=message, name=name)
@tf_export(v1=['debugging.assert_rank_in', 'assert_rank_in'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_rank_in')
def assert_rank_in(
x, ranks, data=None, summarize=None, message=None, name=None):
"""Assert `x` has rank in `ranks`.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_rank_in(x, (2, 4))]):
output = tf.reduce_sum(x)
```
Args:
x: Numeric `Tensor`.
ranks: Iterable of scalar `Tensor` objects.
data: The tensors to print out if the condition is False. Defaults to
error message and first few entries of `x`.
summarize: Print this many entries of each tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional).
Defaults to "assert_rank_in".
Returns:
Op raising `InvalidArgumentError` unless rank of `x` is in `ranks`.
If static checks determine `x` has matching rank, a `no_op` is returned.
Raises:
ValueError: If static checks determine `x` has mismatched rank.
"""
with ops.name_scope(
name, 'assert_rank_in', (x,) + tuple(ranks) + tuple(data or [])):
if not isinstance(x, sparse_tensor.SparseTensor):
x = ops.convert_to_tensor(x, name='x')
ranks = tuple([ops.convert_to_tensor(rank, name='rank') for rank in ranks])
message = message or ''
if context.executing_eagerly() or isinstance(x, sparse_tensor.SparseTensor):
name = ''
else:
name = x.name
if data is None:
data = [
message, 'Tensor %s must have rank in' % name
] + list(ranks) + [
'Received shape: ', array_ops.shape(x)
]
try:
assert_op = _assert_ranks_condition(x, ranks, _static_rank_in,
_dynamic_rank_in, data, summarize)
except ValueError as e:
if e.args[0] == 'Static rank condition failed':
raise ValueError(
'%s. Tensor %s must have rank in %s. Received rank %d, '
'shape %s' % (message, name, e.args[2], e.args[1], x.get_shape()))
else:
raise
return assert_op
@tf_export('debugging.assert_integer', v1=[])
@dispatch.add_dispatch_support
def assert_integer_v2(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
If `x` has a non-integer type, `message`, as well as the dtype of `x` are
printed, and `InvalidArgumentError` is raised.
This can always be checked statically, so this method returns nothing.
Args:
x: A `Tensor`.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is not a non-quantized integer type.
"""
assert_integer(x=x, message=message, name=name)
@tf_export(v1=['debugging.assert_integer', 'assert_integer'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_integer')
def assert_integer(x, message=None, name=None):
"""Assert that `x` is of integer dtype.
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.compat.v1.assert_integer(x)]):
output = tf.reduce_sum(x)
```
Args:
x: `Tensor` whose basetype is integer and is not quantized.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_integer".
Raises:
TypeError: If `x.dtype` is anything other than non-quantized integer.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
with ops.name_scope(name, 'assert_integer', [x]):
x = ops.convert_to_tensor(x, name='x')
if not x.dtype.is_integer:
if context.executing_eagerly():
name = 'tensor'
else:
name = x.name
err_msg = (
'%s Expected "x" to be integer type. Found: %s of dtype %s'
% (message, name, x.dtype))
raise TypeError(err_msg)
return control_flow_ops.no_op('statically_determined_was_integer')
@tf_export('debugging.assert_type', v1=[])
@dispatch.add_dispatch_support
def assert_type_v2(tensor, tf_type, message=None, name=None):
"""Asserts that the given `Tensor` is of the specified type.
This can always be checked statically, so this method returns nothing.
Example:
>>> a = tf.Variable(1.0)
>>> tf.debugging.assert_type(a, tf_type= tf.float32)
>>> b = tf.constant(21)
>>> tf.debugging.assert_type(b, tf_type=tf.bool)
Traceback (most recent call last):
...
TypeError: ...
>>> c = tf.SparseTensor(indices=[[0, 0], [1, 2]], values=[1, 2],
... dense_shape=[3, 4])
>>> tf.debugging.assert_type(c, tf_type= tf.int32)
Args:
tensor: A `Tensor`, `SparseTensor` or `tf.Variable .
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name for this operation. Defaults to "assert_type"
Raises:
TypeError: If the tensor's data type doesn't match `tf_type`.
"""
assert_type(tensor=tensor, tf_type=tf_type, message=message, name=name)
@tf_export(v1=['debugging.assert_type', 'assert_type'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_type')
def assert_type(tensor, tf_type, message=None, name=None):
"""Statically asserts that the given `Tensor` is of the specified type.
Args:
tensor: A `Tensor` or `SparseTensor`.
tf_type: A tensorflow type (`dtypes.float32`, `tf.int64`, `dtypes.bool`,
etc).
message: A string to prefix to the default message.
name: A name to give this `Op`. Defaults to "assert_type"
Raises:
TypeError: If the tensors data type doesn't match `tf_type`.
Returns:
A `no_op` that does nothing. Type can be determined statically.
"""
message = message or ''
tf_type = dtypes.as_dtype(tf_type)
with ops.name_scope(name, 'assert_type', [tensor]):
if not isinstance(tensor, sparse_tensor.SparseTensor):
tensor = ops.convert_to_tensor(tensor, name='tensor')
if tensor.dtype != tf_type:
if context.executing_eagerly():
raise TypeError('%s tensor must be of type %s' % (message, tf_type))
else:
raise TypeError(
'%s %s must be of type %s' %
(message, tensor.name if hasattr(tensor, 'name') else '', tf_type))
return control_flow_ops.no_op('statically_determined_correct_type')
def _dimension_sizes(x):
"""Gets the dimension sizes of a tensor `x`.
If a size can be determined statically it is returned as an integer,
otherwise as a tensor.
If `x` is a scalar it is treated as rank 1 size 1.
Args:
x: A `Tensor`.
Returns:
Dimension sizes.
"""
dynamic_shape = array_ops.shape(x)
rank = x.get_shape().rank
rank_is_known = rank is not None
if rank_is_known and rank == 0:
return (1,)
if rank_is_known and rank > 0:
static_shape = x.get_shape().as_list()
sizes = [
int(size) if size is not None else dynamic_shape[i]
for i, size in enumerate(static_shape)
]
return sizes
has_rank_zero = math_ops.equal(array_ops.rank(x), 0)
return control_flow_ops.cond(
has_rank_zero, lambda: array_ops.constant([1]), lambda: dynamic_shape)
def _symbolic_dimension_sizes(symbolic_shape):
# If len(symbolic_shape) == 0 construct a tuple
if not symbolic_shape:
return tuple([1])
return symbolic_shape
def _has_known_value(dimension_size):
not_none = dimension_size is not None
try:
int(dimension_size)
can_be_parsed_as_int = True
except (ValueError, TypeError):
can_be_parsed_as_int = False
return not_none and can_be_parsed_as_int
def _is_symbol_for_any_size(symbol):
return symbol in [None, '.']
_TensorDimSizes = collections.namedtuple(
'_TensorDimSizes',
['x', 'unspecified_dim', 'actual_sizes', 'symbolic_sizes'])
@tf_export('debugging.assert_shapes', v1=[])
@dispatch.add_dispatch_support
def assert_shapes_v2(shapes, data=None, summarize=None, message=None,
name=None):
"""Assert tensor shapes and dimension size relationships between tensors.
This Op checks that a collection of tensors shape relationships
satisfies given constraints.
Example:
>>> n = 10
>>> q = 3
>>> d = 7
>>> x = tf.zeros([n,q])
>>> y = tf.ones([n,d])
>>> param = tf.Variable([1.0, 2.0, 3.0])
>>> scalar = 1.0
>>> tf.debugging.assert_shapes([
... (x, ('N', 'Q')),
... (y, ('N', 'D')),
... (param, ('Q',)),
... (scalar, ()),
... ])
>>> tf.debugging.assert_shapes([
... (x, ('N', 'D')),
... (y, ('N', 'D'))
... ])
Traceback (most recent call last):
...
ValueError: ...
If `x`, `y`, `param` or `scalar` does not have a shape that satisfies
all specified constraints, `message`, as well as the first `summarize` entries
of the first encountered violating tensor are printed, and
`InvalidArgumentError` is raised.
Size entries in the specified shapes are checked against other entries by
their __hash__, except:
- a size entry is interpreted as an explicit size if it can be parsed as an
integer primitive.
- a size entry is interpreted as *any* size if it is None or '.'.
If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates
a variable number of outer dimensions of unspecified size, i.e. the constraint
applies to the inner-most dimensions only.
Scalar tensors and specified shapes of length zero (excluding the 'inner-most'
prefix) are both treated as having a single dimension of size one.
Args:
shapes: dictionary with (`Tensor` to shape) items, or a list of
(`Tensor`, shape) tuples. A shape must be an iterable.
data: The tensors to print out if the condition is False. Defaults to error
message and first few entries of the violating tensor.
summarize: Print this many entries of the tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_shapes".
Raises:
ValueError: If static checks determine any shape constraint is violated.
"""
assert_shapes(
shapes, data=data, summarize=summarize, message=message, name=name)
@tf_export(v1=['debugging.assert_shapes'])
@dispatch.add_dispatch_support
def assert_shapes(shapes, data=None, summarize=None, message=None, name=None):
"""Assert tensor shapes and dimension size relationships between tensors.
This Op checks that a collection of tensors shape relationships
satisfies given constraints.
Example:
>>> n = 10
>>> q = 3
>>> d = 7
>>> x = tf.zeros([n,q])
>>> y = tf.ones([n,d])
>>> param = tf.Variable([1.0, 2.0, 3.0])
>>> scalar = 1.0
>>> tf.debugging.assert_shapes([
... (x, ('N', 'Q')),
... (y, ('N', 'D')),
... (param, ('Q',)),
... (scalar, ()),
... ])
>>> tf.debugging.assert_shapes([
... (x, ('N', 'D')),
... (y, ('N', 'D'))
... ])
Traceback (most recent call last):
...
ValueError: ...
Example of adding a dependency to an operation:
```python
with tf.control_dependencies([tf.assert_shapes(shapes)]):
output = tf.matmul(x, y, transpose_a=True)
```
If `x`, `y`, `param` or `scalar` does not have a shape that satisfies
all specified constraints, `message`, as well as the first `summarize` entries
of the first encountered violating tensor are printed, and
`InvalidArgumentError` is raised.
Size entries in the specified shapes are checked against other entries by
their __hash__, except:
- a size entry is interpreted as an explicit size if it can be parsed as an
integer primitive.
- a size entry is interpreted as *any* size if it is None or '.'.
If the first entry of a shape is `...` (type `Ellipsis`) or '*' that indicates
a variable number of outer dimensions of unspecified size, i.e. the constraint
applies to the inner-most dimensions only.
Scalar tensors and specified shapes of length zero (excluding the 'inner-most'
prefix) are both treated as having a single dimension of size one.
Args:
shapes: A list of (`Tensor`, `shape`) tuples, wherein `shape` is the
expected shape of `Tensor`. See the example code above. The `shape` must
be an iterable. Each element of the iterable can be either a concrete
integer value or a string that abstractly represents the dimension.
For example,
- `('N', 'Q')` specifies a 2D shape wherein the first and second
dimensions of shape may or may not be equal.
- `('N', 'N', 'Q')` specifies a 3D shape wherein the first and second
dimensions are equal.
- `(1, 'N')` specifies a 2D shape wherein the first dimension is
exactly 1 and the second dimension can be any value.
Note that the abstract dimension letters take effect across different
tuple elements of the list. For example,
`tf.debugging.assert_shapes([(x, ('N', 'A')), (y, ('N', 'B'))]` asserts
that both `x` and `y` are rank-2 tensors and their first dimensions are
equal (`N`).
`shape` can also be a `tf.TensorShape`.
data: The tensors to print out if the condition is False. Defaults to error
message and first few entries of the violating tensor.
summarize: Print this many entries of the tensor.
message: A string to prefix to the default message.
name: A name for this operation (optional). Defaults to "assert_shapes".
Returns:
Op raising `InvalidArgumentError` unless all shape constraints are
satisfied.
If static checks determine all constraints are satisfied, a `no_op` is
returned.
Raises:
ValueError: If static checks determine any shape constraint is violated.
"""
# If the user manages to assemble a dict containing tensors (possible in
# Graph mode only), make sure we still accept that.
if isinstance(shapes, dict):
shapes = shapes.items()
message = message or ''
with ops.name_scope(name, 'assert_shapes', [shapes, data]):
# Shape specified as None implies no constraint
shape_constraints = [(x if isinstance(x, sparse_tensor.SparseTensor) else
ops.convert_to_tensor(x), s)
for x, s in shapes if s is not None]
executing_eagerly = context.executing_eagerly()
def tensor_name(x):
if executing_eagerly or isinstance(x, sparse_tensor.SparseTensor):
return _shape_and_dtype_str(x)
return x.name
tensor_dim_sizes = []
for tensor, symbolic_shape in shape_constraints:
is_iterable = (
hasattr(symbolic_shape, '__iter__') or
hasattr(symbolic_shape, '__getitem__') # For Python 2 compat.
)
if not is_iterable:
raise ValueError(
'%s. '
'Tensor %s. Specified shape must be an iterable. '
'An iterable has the attribute `__iter__` or `__getitem__`. '
'Received specified shape: %s' %
(message, tensor_name(tensor), symbolic_shape))
# We convert this into a tuple to handle strings, lists and numpy arrays
symbolic_shape_tuple = tuple(symbolic_shape)
tensors_specified_innermost = False
for i, symbol in enumerate(symbolic_shape_tuple):
if symbol not in [Ellipsis, '*']:
continue
if i != 0:
raise ValueError(
'%s. '
'Tensor %s specified shape index %d. '
'Symbol `...` or `*` for a variable number of '
'unspecified dimensions is only allowed as the first entry' %
(message, tensor_name(tensor), i))
tensors_specified_innermost = True
# Only include the size of the specified dimensions since the 0th symbol
# is either ellipsis or *
tensor_dim_sizes.append(
_TensorDimSizes(
tensor, tensors_specified_innermost, _dimension_sizes(tensor),
_symbolic_dimension_sizes(
symbolic_shape_tuple[1:]
if tensors_specified_innermost else symbolic_shape_tuple)))
rank_assertions = []
for sizes in tensor_dim_sizes:
rank = len(sizes.symbolic_sizes)
rank_zero_or_one = rank in [0, 1]
if sizes.unspecified_dim:
if rank_zero_or_one:
# No assertion of rank needed as `x` only need to have rank at least
# 0. See elif rank_zero_or_one case comment.
continue
assertion = assert_rank_at_least(
x=sizes.x,
rank=rank,
data=data,
summarize=summarize,
message=message,
name=name)
elif rank_zero_or_one:
# Rank 0 is treated as rank 1 size 1, i.e. there is
# no distinction between the two in terms of rank.
# See _dimension_sizes.
assertion = assert_rank_in(
x=sizes.x,
ranks=[0, 1],
data=data,
summarize=summarize,
message=message,
name=name)
else:
assertion = assert_rank(
x=sizes.x,
rank=rank,
data=data,
summarize=summarize,
message=message,
name=name)
rank_assertions.append(assertion)
size_assertions = []
size_specifications = {}
for sizes in tensor_dim_sizes:
for i, size_symbol in enumerate(sizes.symbolic_sizes):
if _is_symbol_for_any_size(size_symbol):
# Size specified as any implies no constraint
continue
if sizes.unspecified_dim:
tensor_dim = i - len(sizes.symbolic_sizes)
else:
tensor_dim = i
if size_symbol in size_specifications or _has_known_value(size_symbol):
if _has_known_value(size_symbol):
specified_size = int(size_symbol)
size_check_message = 'Specified explicitly'
else:
specified_size, specified_by_y, specified_at_dim = \
size_specifications[size_symbol]
size_check_message = (
'Specified by tensor %s dimension %d' %
(tensor_name(specified_by_y), specified_at_dim))
# This is extremely subtle. If actual_sizes is dynamic, we must
# make sure a control dependency is inserted here so that this slice
# can not execute until the rank is asserted to be enough for the
# slice to not fail.
with ops.control_dependencies(rank_assertions):
actual_size = sizes.actual_sizes[tensor_dim]
if _has_known_value(actual_size) and _has_known_value(specified_size):
if int(actual_size) != int(specified_size):
raise ValueError(
'%s. %s. Tensor %s dimension %s must have size %d. '
'Received size %d, shape %s' %
(message, size_check_message, tensor_name(sizes.x),
tensor_dim, specified_size, actual_size,
sizes.x.get_shape()))
# No dynamic assertion needed
continue
condition = math_ops.equal(
ops.convert_to_tensor(actual_size),
ops.convert_to_tensor(specified_size))
data_ = data
if data is None:
data_ = [
message, size_check_message,
'Tensor %s dimension' % tensor_name(sizes.x), tensor_dim,
'must have size', specified_size, 'Received shape: ',
array_ops.shape(sizes.x)
]
size_assertions.append(
control_flow_ops.Assert(condition, data_, summarize=summarize))
else:
# Not sure if actual_sizes is a constant, but for safety, guard
# on rank. See explanation above about actual_sizes need for safety.
with ops.control_dependencies(rank_assertions):
size = sizes.actual_sizes[tensor_dim]
size_specifications[size_symbol] = (size, sizes.x, tensor_dim)
# Ensure both assertions actually occur.
with ops.control_dependencies(rank_assertions):
shapes_assertion = control_flow_ops.group(size_assertions)
return shapes_assertion
# pylint: disable=line-too-long
def _get_diff_for_monotonic_comparison(x):
"""Gets the difference x[1:] - x[:-1]."""
x = array_ops.reshape(x, [-1])
if not is_numeric_tensor(x):
raise TypeError('Expected x to be numeric, instead found: %s' % x)
# If x has less than 2 elements, there is nothing to compare. So return [].
is_shorter_than_two = math_ops.less(array_ops.size(x), 2)
short_result = lambda: ops.convert_to_tensor([], dtype=x.dtype)
# With 2 or more elements, return x[1:] - x[:-1]
s_len = array_ops.shape(x) - 1
diff = lambda: array_ops.strided_slice(x, [1], [1] + s_len)- array_ops.strided_slice(x, [0], s_len)
return control_flow_ops.cond(is_shorter_than_two, short_result, diff)
@tf_export(
'debugging.is_numeric_tensor',
v1=['debugging.is_numeric_tensor', 'is_numeric_tensor'])
@deprecation.deprecated_endpoints('is_numeric_tensor')
def is_numeric_tensor(tensor):
"""Returns `True` if the elements of `tensor` are numbers.
Specifically, returns `True` if the dtype of `tensor` is one of the following:
* `tf.float32`
* `tf.float64`
* `tf.int8`
* `tf.int16`
* `tf.int32`
* `tf.int64`
* `tf.uint8`
* `tf.qint8`
* `tf.qint32`
* `tf.quint8`
* `tf.complex64`
Returns `False` if `tensor` is of a non-numeric type or if `tensor` is not
a `tf.Tensor` object.
"""
return isinstance(tensor, ops.Tensor) and tensor.dtype in NUMERIC_TYPES
@tf_export(
'math.is_non_decreasing',
v1=[
'math.is_non_decreasing', 'debugging.is_non_decreasing',
'is_non_decreasing'
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('debugging.is_non_decreasing',
'is_non_decreasing')
def is_non_decreasing(x, name=None):
"""Returns `True` if `x` is non-decreasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is non-decreasing if for every adjacent pair we have `x[i] <= x[i+1]`.
If `x` has less than two elements, it is trivially non-decreasing.
See also: `is_strictly_increasing`
>>> x1 = tf.constant([1.0, 1.0, 3.0])
>>> tf.math.is_non_decreasing(x1)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> x2 = tf.constant([3.0, 1.0, 2.0])
>>> tf.math.is_non_decreasing(x2)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional). Defaults to "is_non_decreasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is non-decreasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_non_decreasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less_equal = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less_equal(zero, diff))
@tf_export(
'math.is_strictly_increasing',
v1=[
'math.is_strictly_increasing', 'debugging.is_strictly_increasing',
'is_strictly_increasing'
])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('debugging.is_strictly_increasing',
'is_strictly_increasing')
def is_strictly_increasing(x, name=None):
"""Returns `True` if `x` is strictly increasing.
Elements of `x` are compared in row-major order. The tensor `[x[0],...]`
is strictly increasing if for every adjacent pair we have `x[i] < x[i+1]`.
If `x` has less than two elements, it is trivially strictly increasing.
See also: `is_non_decreasing`
>>> x1 = tf.constant([1.0, 2.0, 3.0])
>>> tf.math.is_strictly_increasing(x1)
<tf.Tensor: shape=(), dtype=bool, numpy=True>
>>> x2 = tf.constant([3.0, 1.0, 2.0])
>>> tf.math.is_strictly_increasing(x2)
<tf.Tensor: shape=(), dtype=bool, numpy=False>
Args:
x: Numeric `Tensor`.
name: A name for this operation (optional).
Defaults to "is_strictly_increasing"
Returns:
Boolean `Tensor`, equal to `True` iff `x` is strictly increasing.
Raises:
TypeError: if `x` is not a numeric tensor.
"""
with ops.name_scope(name, 'is_strictly_increasing', [x]):
diff = _get_diff_for_monotonic_comparison(x)
# When len(x) = 1, diff = [], less = [], and reduce_all([]) = True.
zero = ops.convert_to_tensor(0, dtype=diff.dtype)
return math_ops.reduce_all(math_ops.less(zero, diff))
def _assert_same_base_type(items, expected_type=None):
r"""Asserts all items are of the same base type.
Args:
items: List of graph items (e.g., `Variable`, `Tensor`, `SparseTensor`,
`Operation`, or `IndexedSlices`). Can include `None` elements, which
will be ignored.
expected_type: Expected type. If not specified, assert all items are
of the same base type.
Returns:
Validated type, or none if neither expected_type nor items provided.
Raises:
ValueError: If any types do not match.
"""
original_expected_type = expected_type
mismatch = False
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
elif expected_type != item_type:
mismatch = True
break
if mismatch:
# Loop back through and build up an informative error message (this is very
# slow, so we don't do it unless we found an error above).
expected_type = original_expected_type
original_item_str = None
for item in items:
if item is not None:
item_type = item.dtype.base_dtype
if not expected_type:
expected_type = item_type
original_item_str = item.name if hasattr(item, 'name') else str(item)
elif expected_type != item_type:
raise ValueError('%s, type=%s, must be of the same type (%s)%s.' % (
item.name if hasattr(item, 'name') else str(item),
item_type, expected_type,
(' as %s' % original_item_str) if original_item_str else ''))
return expected_type # Should be unreachable
else:
return expected_type
@tf_export(
'debugging.assert_same_float_dtype',
v1=['debugging.assert_same_float_dtype', 'assert_same_float_dtype'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_same_float_dtype')
def assert_same_float_dtype(tensors=None, dtype=None):
"""Validate and return float type based on `tensors` and `dtype`.
For ops such as matrix multiplication, inputs and weights must be of the
same float type. This function validates that all `tensors` are the same type,
validates that type is `dtype` (if supplied), and returns the type. Type must
be a floating point type. If neither `tensors` nor `dtype` is supplied,
the function will return `dtypes.float32`.
Args:
tensors: Tensors of input values. Can include `None` elements, which will be
ignored.
dtype: Expected type.
Returns:
Validated type.
Raises:
ValueError: if neither `tensors` nor `dtype` is supplied, or result is not
float, or the common type of the inputs is not a floating point type.
"""
if tensors:
dtype = _assert_same_base_type(tensors, dtype)
if not dtype:
dtype = dtypes.float32
elif not dtype.is_floating:
raise ValueError('Expected floating point type, got %s.' % dtype)
return dtype
@tf_export('debugging.assert_scalar', v1=[])
@dispatch.add_dispatch_support
def assert_scalar_v2(tensor, message=None, name=None):
"""Asserts that the given `tensor` is a scalar.
This function raises `ValueError` unless it can be certain that the given
`tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is
unknown.
This is always checked statically, so this method returns nothing.
Args:
tensor: A `Tensor`.
message: A string to prefix to the default message.
name: A name for this operation. Defaults to "assert_scalar"
Raises:
ValueError: If the tensor is not scalar (rank 0), or if its shape is
unknown.
"""
assert_scalar(tensor=tensor, message=message, name=name)
@tf_export(v1=['debugging.assert_scalar', 'assert_scalar'])
@dispatch.add_dispatch_support
@deprecation.deprecated_endpoints('assert_scalar')
def assert_scalar(tensor, name=None, message=None):
"""Asserts that the given `tensor` is a scalar (i.e. zero-dimensional).
This function raises `ValueError` unless it can be certain that the given
`tensor` is a scalar. `ValueError` is also raised if the shape of `tensor` is
unknown.
Args:
tensor: A `Tensor`.
name: A name for this operation. Defaults to "assert_scalar"
message: A string to prefix to the default message.
Returns:
The input tensor (potentially converted to a `Tensor`).
Raises:
ValueError: If the tensor is not scalar (rank 0), or if its shape is
unknown.
"""
with ops.name_scope(name, 'assert_scalar', [tensor]) as name_scope:
tensor = ops.convert_to_tensor(tensor, name=name_scope)
shape = tensor.get_shape()
if shape.ndims != 0:
if context.executing_eagerly():
raise ValueError('%sExpected scalar shape, saw shape: %s.'
% (message or '', shape,))
else:
raise ValueError('%sExpected scalar shape for %s, saw shape: %s.'
% (message or '', tensor.name, shape))
return tensor
@tf_export('ensure_shape')
@dispatch.add_dispatch_support
def ensure_shape(x, shape, name=None):
"""Updates the shape of a tensor and checks at runtime that the shape holds.
When executed, this operation asserts that the input tensor `x`'s shape
is compatible with the `shape` argument.
See `tf.TensorShape.is_compatible_with` for details.
>>> x = tf.constant([[1, 2, 3],
... [4, 5, 6]])
>>> x = tf.ensure_shape(x, [2, 3])
Use `None` for unknown dimensions:
>>> x = tf.ensure_shape(x, [None, 3])
>>> x = tf.ensure_shape(x, [2, None])
If the tensor's shape is not compatible with the `shape` argument, an error
is raised:
>>> x = tf.ensure_shape(x, [5])
Traceback (most recent call last):
...
tf.errors.InvalidArgumentError: Shape of tensor dummy_input [3] is not
compatible with expected shape [5]. [Op:EnsureShape]
During graph construction (typically tracing a `tf.function`),
`tf.ensure_shape` updates the static-shape of the **result** tensor by
merging the two shapes. See `tf.TensorShape.merge_with` for details.
This is most useful when **you** know a shape that can't be determined
statically by TensorFlow.
The following trivial `tf.function` prints the input tensor's
static-shape before and after `ensure_shape` is applied.
>>> @tf.function
... def f(tensor):
... print("Static-shape before:", tensor.shape)
... tensor = tf.ensure_shape(tensor, [None, 3])
... print("Static-shape after:", tensor.shape)
... return tensor
This lets you see the effect of `tf.ensure_shape` when the function is traced:
>>> cf = f.get_concrete_function(tf.TensorSpec([None, None]))
Static-shape before: (None, None)
Static-shape after: (None, 3)
>>> cf(tf.zeros([3, 3])) # Passes
>>> cf(tf.constant([1, 2, 3])) # fails
Traceback (most recent call last):
...
InvalidArgumentError: Shape of tensor x [3] is not compatible with expected shape [3,3].
The above example raises `tf.errors.InvalidArgumentError`, because `x`'s
shape, `(3,)`, is not compatible with the `shape` argument, `(None, 3)`
Inside a `tf.function` or `v1.Graph` context it checks both the buildtime and
runtime shapes. This is stricter than `tf.Tensor.set_shape` which only
checks the buildtime shape.
Note: This differs from `tf.Tensor.set_shape` in that it sets the static shape
of the resulting tensor and enforces it at runtime, raising an error if the
tensor's runtime shape is incompatible with the specified shape.
`tf.Tensor.set_shape` sets the static shape of the tensor without enforcing it
at runtime, which may result in inconsistencies between the statically-known
shape of tensors and the runtime value of tensors.
For example, of loading images of a known size:
>>> @tf.function
... def decode_image(png):
... image = tf.image.decode_png(png, channels=3)
... # the `print` executes during tracing.
... print("Initial shape: ", image.shape)
... image = tf.ensure_shape(image,[28, 28, 3])
... print("Final shape: ", image.shape)
... return image
When tracing a function, no ops are being executed, shapes may be unknown.
See the [Concrete Functions Guide](https://www.tensorflow.org/guide/concrete_function)
for details.
>>> concrete_decode = decode_image.get_concrete_function(
... tf.TensorSpec([], dtype=tf.string))
Initial shape: (None, None, 3)
Final shape: (28, 28, 3)
>>> image = tf.random.uniform(maxval=255, shape=[28, 28, 3], dtype=tf.int32)
>>> image = tf.cast(image,tf.uint8)
>>> png = tf.image.encode_png(image)
>>> image2 = concrete_decode(png)
>>> print(image2.shape)
(28, 28, 3)
>>> image = tf.concat([image,image], axis=0)
>>> print(image.shape)
(56, 28, 3)
>>> png = tf.image.encode_png(image)
>>> image2 = concrete_decode(png)
Traceback (most recent call last):
...
tf.errors.InvalidArgumentError: Shape of tensor DecodePng [56,28,3] is not
compatible with expected shape [28,28,3].
Caution: if you don't use the result of `tf.ensure_shape` the check may not
run.
>>> @tf.function
... def bad_decode_image(png):
... image = tf.image.decode_png(png, channels=3)
... # the `print` executes during tracing.
... print("Initial shape: ", image.shape)
... # BAD: forgot to use the returned tensor.
... tf.ensure_shape(image,[28, 28, 3])
... print("Final shape: ", image.shape)
... return image
>>> image = bad_decode_image(png)
Initial shape: (None, None, 3)
Final shape: (None, None, 3)
>>> print(image.shape)
(56, 28, 3)
Args:
x: A `Tensor`.
shape: A `TensorShape` representing the shape of this tensor, a
`TensorShapeProto`, a list, a tuple, or None.
name: A name for this operation (optional). Defaults to "EnsureShape".
Returns:
A `Tensor`. Has the same type and contents as `x`.
Raises:
tf.errors.InvalidArgumentError: If `shape` is incompatible with the shape
of `x`.
"""
if not isinstance(shape, tensor_shape.TensorShape):
shape = tensor_shape.TensorShape(shape)
return array_ops.ensure_shape(x, shape, name=name)
@ops.RegisterGradient('EnsureShape')
def _ensure_shape_grad(op, grad):
del op # Unused.
return grad
| apache-2.0 |
tsifrer/python-twitch-client | docs/conf.py | 1 | 5381 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
#
# python-twitch-client documentation build configuration file, created by
# sphinx-quickstart on Wed Feb 15 20:51:23 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "python-twitch-client"
copyright = "2018-2020, Tomaz Sifrer"
author = "Tomaz Sifrer"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.1"
# The full version, including alpha/beta/rc tags.
release = "0.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# on_rtd is whether we are on readthedocs.org, this line of code grabbed from docs.readthedocs.org
on_rtd = os.environ.get("READTHEDOCS", None) == "True"
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "python-twitch-clientdoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"python-twitch-client.tex",
"python-twitch-client Documentation",
"Tomaz Sifrer",
"manual",
),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(
master_doc,
"python-twitch-client",
"python-twitch-client Documentation",
[author],
1,
)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"python-twitch-client",
"python-twitch-client Documentation",
author,
"python-twitch-client",
"An easy to use Python library for accessing the Twitch API.",
"Miscellaneous",
),
]
| mit |
flavour/iscram | models/000_1st_run.py | 3 | 4195 | # -*- coding: utf-8 -*-
"""
1st RUN:
- Run update check if needed.
- Import the S3 Framework Extensions
- If needed, copy deployment specific templates to the live installation.
Developers: note that the templates are version-controlled, while their
site-specific copies are not (to avoid leaking of sensitive
or irrelevant information into the repository).
If you add something new to these files, you should also
make the change at deployment-templates and commit it.
"""
import os
# -----------------------------------------------------------------------------
# Perform update checks - will happen in 1st_run or on those upgrades when new
# dependencies have been added.
# Increment this when new dependencies are added
# This will be compared to the version in the 0000_update_check.py 'canary' file.
CURRENT_UPDATE_CHECK_ID = 2
update_check_needed = False
try:
if CANARY_UPDATE_CHECK_ID != CURRENT_UPDATE_CHECK_ID:
update_check_needed = True
except NameError:
update_check_needed = True
if update_check_needed:
# Run update checks -- these are the update_check() functions from each
# Python file in private/update_check that has such a function.
from gluon.fileutils import listdir
update_check_path_parts = [
"applications", request.application, "private", "update_check"]
update_check_path = os.path.join(*update_check_path_parts)
update_check_import_path = ".".join(update_check_path_parts)
errors = []
warnings = []
# Supply the current (Web2py) environment. Pick out only the items that are
# safe for the check functions to combine with their own environments, i.e.
# not anything of the form __x__.
environment = dict((k, v) for (k, v) in globals().iteritems() if not k.startswith("__"))
for filename in listdir(update_check_path, expression = ".*\.py$"):
try:
exec "from %s.%s import update_check" % \
(update_check_import_path, filename[0:-3])
except ImportError:
continue
messages = update_check(environment)
errors.extend(messages.get("error_messages", []))
warnings.extend(messages.get("warning_messages", []))
# Catch-all check for dependency errors.
# @ToDo: This does not satisfy the goal of calling out all the setup errors
# at once -- it will die on the first fatal error encountered.
try:
import s3 as s3base
except Exception, errmsg:
errors.extend(errmsg)
# Report (non-fatal) warnings.
if warnings:
prefix = "\n%s: " % T("WARNING")
msg = prefix + prefix.join(warnings)
import sys
print >> sys.stderr, msg
# Report errors and stop.
if errors:
prefix = "\n%s: " % T("ACTION REQUIRED")
msg = prefix + prefix.join(errors)
import sys
print >> sys.stderr, msg
raise HTTP(500, body=msg)
# Create or update the canary file.
from gluon import portalocker
canary = open(
"applications/%s/models/0000_update_check.py" % request.application,
"w")
portalocker.lock(canary, portalocker.LOCK_EX)
statement = "CANARY_UPDATE_CHECK_ID = %s" % CURRENT_UPDATE_CHECK_ID
canary.write(statement)
canary.close()
# -----------------------------------------------------------------------------
from gluon import current
from gluon.storage import Storage
from gluon.contrib.simplejson.ordered_dict import OrderedDict
# Keep all S3 framework-level elements stored in response.s3, so as to avoid
# polluting global namespace & to make it clear which part of the framework is
# being interacted with.
# Avoid using this where a method parameter could be used:
# http://en.wikipedia.org/wiki/Anti_pattern#Programming_anti-patterns
response.s3 = Storage()
s3 = response.s3
response.s3.gis = Storage() # Defined early for use by S3Config.
current.cache = cache
# Import S3Config
import s3cfg
deployment_settings = s3cfg.S3Config()
current.deployment_settings = deployment_settings
# END =========================================================================
| mit |
facelesspanda/inkpy | inkpy/_runtime/value.py | 1 | 5794 | from enum import IntEnum
from .path import Path
from .inklist import InkList
from .object import Object
class ValueType(IntEnum):
INT = 0
FLOAT = 1
LIST = 2
STRING = 3
DIVERT_TARGET = 4
VAR_POINTER = 5
class Value(Object):
def __init__(self, value, *, vtype):
super().__init__()
if not isinstance(vtype, type): raise TypeError
self.__t = vtype
self.__v = value
@property
def value_type(self):
raise NotImplementedError
def __bool__(self):
raise NotImplementedError
def cast(self, ntype):
raise NotImplementedError
@property
def value(self):
return self.__v
@value.setter
def value(self, v):
if not isinstance(v, self.__t): raise TypeError
self.__v = v
@staticmethod
def create(val):
if isinstance(val, bool): val = 1 if val else 0
if isinstance(val, int): return IntValue(val)
if isinstance(val, float): return FloatValue(val)
if isinstance(val, str): return StringValue(val)
if isinstance(val, Path): return DivertTargetValue(val)
if isinstance(val, InkList): return ListValue(val)
return None
def copy(self):
return self.create(self.value)
def __str__(self):
return str(self.value)
class IntValue(Value):
def __init__(self, i=0):
super().__init__(i, vtype=int)
@property
def value_type(self):
return ValueType.INT
def __bool__(self):
return self.value != 0
def cast(self, ntype):
if ntype == self.value_type: return self
if ntype == ValueType.FLOAT: return FloatValue(float(self.value))
if ntype == ValueType.STRING: return StringValue(str(self.value))
raise ValueError("Unexpected type cast of Int Value to new ValueType")
class FloatValue(Value):
def __init__(self, f=0.0):
super().__init__(f, vtype=float)
@property
def value_type(self):
return ValueType.FLOAT
def __bool__(self):
return self.value != 0.0
def cast(self, ntype):
if ntype == self.value_type: return self
if ntype == ValueType.INT: return IntValue(int(self.value))
if ntype == ValueType.STRING: return StringValue(str(self.value))
raise ValueError(
"Unexpected type cast of Float Value to new ValueType")
class StringValue(Value):
def __init__(self, s=""):
super().__init__(s, vtype=str)
self.__nl = s == "\n"
self.__iws = not any(c == ' ' or c == '\t' for c in s)
@property
def value_type(self):
return ValueType.STRING
def __bool__(self):
return len(self.value) > 0
@property
def is_new_line(self):
return self.__nl
@property
def is_inline_ws(self):
return self.__iws
@property
def is_non_ws(self):
return not self.__nl and not self.__iws
def cast(self, ntype):
if ntype == self.value_type: return self
if ntype == ValueType.INT:
try:
return int(self.value)
except ValueError:
return None
if ntype == ValueType.FLOAT:
try:
return float(self.value)
except ValueError:
return None
raise ValueError(
"Unexpected type cast of String Value to new ValueType")
class DivertTargetValue(Value):
def __init__(self, p=None):
super().__init__(p, vtype=Path)
@property
def value_type(self):
return ValueType.DIVERT_TARGET
def cast(self, ntype):
if ntype == self.value_type: return self
raise ValueError(
"Unexpected type cast of Divert Target Value to new ValueType")
def __str__(self):
return "DivertTargetValue(%s)" % self.value
class VarPtrValue(Value):
def __init__(self, vname=None, cidx=-1):
super().__init__(vname, vtype=str)
self.context_index = cidx
@property
def value_type(self):
return ValueType.VAR_POINTER
def cast(self, ntype):
if ntype == self.value_type: return self
raise ValueError(
"Unexpected type cast of Variable Pointer Value to new ValueType")
def __str__(self):
return "VariablePointerValue(%s)" % self.value
def copy(self):
return VarPtrValue(self.value, self.context_index)
class ListValue(Value):
def __init__(self, *args):
if len(args) == 0:
self.value = InkList()
return
if len(args) == 1:
self.value = InkList(args[0])
return
if len(args) == 2:
self.value = InkList.from_single(args)
return
raise NotImplementedError
@property
def value_type(self):
return ValueType.LIST
def __bool__(self):
return any(v != 0 for v in self.value.values())
def cast(self, ntype):
if ntype == self.value_type: return self
if ntype == ValueType.INT:
m = self.value.max
if m["item"].is_none: return 0
return m["value"]
if ntype == ValueType.FLOAT:
m = self.value.max
if m["item"].is_none: return 0.0
return float(m["value"])
if ntype == ValueType.STRING:
m = self.value.max
if m["item"].is_none: return ""
return str(m["item"])
raise ValueError(
"Unexpected type cast of Ink List Value to new ValueType")
@staticmethod
def retain_list_origins(old, new):
if not isinstance(old, ListValue) or not isinstance(new, ListValue):
return
if len(new.value) == 0:
new.value.set_initial_orgnames(old.value.origin_names)
| mit |
taniwha-qf/Extraplanetary-Launchpads | tools/cvtquickhull.py | 2 | 2596 | from mu import Mu, MuObject, MuTransform, MuMesh, MuTagLayer
from multiprocessing import Pool
def read_vertices(input):
count = input.read_int()
verts = [None] * count
for i in range(count):
verts[i] = input.read_vector()
return verts
class Face:
pass
def read_face(input):
f = Face()
a, b, c = input.read_int(3)
if not a:
f.tri = a, c, b
else:
f.tri = c, b, a
f.highest = input.read_int()
count = input.read_int()
f.vispoints = input.read_int(count)
return f
def read_facelist(input):
count = input.read_int()
faces = [None] * count
for i in range(count):
faces[i] = read_face(input)
return faces
def make_transform(name):
transform = MuTransform()
transform.name = name
transform.localPosition = (0, 0, 0)
transform.localRotation = (1, 0, 0, 0)
transform.localScale = (1, 1, 1)
return transform
def make_empty(name):
obj = MuObject()
obj.transform = make_transform(name)
obj.tag_and_layer = MuTagLayer()
obj.tag_and_layer.tag = ""
obj.tag_and_layer.layer = 0
return obj
def make_tris(faces):
tris = []
for f in faces:
tris.append(f.tri)
return tris
def make_mesh(name, verts, faces):
obj = make_empty(name)
mesh = MuMesh()
mesh.verts = verts
mesh.submeshes = [make_tris(faces)]
obj.shared_mesh = mesh
return obj
def thread_func(parms):
name = parms
input = Mu()
input.file = open(name + ".bin", "rb");
verts = read_vertices(input)
faces = read_facelist(input)
final_faces = read_facelist(input)
point = input.read_int()
lit_faces = read_facelist(input)
new_faces = read_facelist(input)
output = Mu()
output.materials = []
output.textures = []
output.obj = make_empty(name)
output.obj.children.append(make_mesh("faces", verts, faces))
output.obj.children.append(make_mesh("final_faces", verts, final_faces))
output.obj.children.append(make_mesh("lit_faces", verts, lit_faces))
output.obj.children.append(make_mesh("new_faces", verts, new_faces))
if (point >= 0):
p = make_empty("point")
p.transform.localPosition = verts[point]
output.obj.children.append(p)
output.write(name+".mu")
print(name)
i = 0
work_queue = []
while True:
name = f"quickhull-{i:#05d}"
try:
file = open(name + ".bin", "rb");
except:
break
else:
file.close()
work_queue.append(name)
i+=1
print(len(work_queue))
with Pool(12) as p:
p.map(thread_func, work_queue)
| gpl-3.0 |
HTCPCP/TPhiBot | functs/magic.py | 1 | 3364 | import mtgsdk as mtg
import traceback
import phibot
def magic_card_by_name(name, chat_id):
answer = ''
cards = mtg.Card.where(name=name).all()
# noinspection PyBroadException
try:
for card in cards:
if card.name == name:
res = ''
res += card.name + '\n'
res += '-' * 20 + '\n'
if card.mana_cost:
res += 'Cost: \t' + card.mana_cost + '\n'
res += 'Type: \t' + card.type + '\n'
if 'Creature' in card.type:
res += 'Power: \t' + card.power + '\n'
res += 'Toughness: \t' + card.toughness + '\n'
res += '-' * 20 + '\n'
if card.text:
res += 'Text: \t' + card.text + '\n'
res += '-' * 20 + '\n'
return res
card = cards[0]
res = '! NO MATCHING CARD FOUND, GUESSING YOUR INTENT !\n\n'
res += card.name + '\n'
res += '-' * 20 + '\n'
if card.mana_cost:
res += 'Cost: \t' + card.mana_cost + '\n'
res += 'Type: \t' + card.type + '\n'
if 'Creature' in card.type:
res += 'Power: \t' + card.power + '\n'
res += 'Toughness: \t' + card.toughness + '\n'
res += '-' * 20 + '\n'
if card.text:
res += 'Text: \t' + card.text + '\n'
res += '-' * 20 + '\n'
answer = res
except:
traceback.print_exc()
answer = 'No card matching the name %s found.' % name
finally:
phibot.send_text(answer, chat_id)
# TODO: send image correctly
def magic_card_img_by_name(name, chat_id):
answer = ''
cards = mtg.Card.where(name=name).all()
# noinspection PyBroadException
try:
for card in cards:
if card.name == name:
res = ''
res += card.name + '\n'
res += '-' * 20 + '\n'
if card.mana_cost:
res += 'Cost: \t' + card.mana_cost + '\n'
res += 'Type: \t' + card.type + '\n'
if 'Creature' in card.type:
res += 'Power: \t' + card.power + '\n'
res += 'Toughness: \t' + card.toughness + '\n'
res += '-' * 20 + '\n'
if card.text:
res += 'Text: \t' + card.text + '\n'
res += '-' * 20 + '\n'
if card.image_url is not None:
res += card.image_url
else:
res += 'No image URL available for %s' % name
return res
card = cards[0]
res = '! NO MATCHING CARD FOUND, GUESSING YOUR INTENT !\n\n'
res += card.name + '\n'
res += '-' * 20 + '\n'
if card.mana_cost:
res += 'Cost: \t' + card.mana_cost + '\n'
res += 'Type: \t' + card.type + '\n'
if 'Creature' in card.type:
res += 'Power: \t' + card.power + '\n'
res += 'Toughness: \t' + card.toughness + '\n'
res += '-' * 20 + '\n'
if card.text:
res += 'Text: \t' + card.text + '\n'
res += '-' * 20 + '\n'
answer = res
except:
answer = '! No card matching the name %s found. !' % name
finally:
phibot.send_text(answer, chat_id)
| gpl-3.0 |
diorcety/intellij-community | python/lib/Lib/site-packages/django/core/management/commands/makemessages.py | 73 | 15487 | import fnmatch
import glob
import os
import re
import sys
from itertools import dropwhile
from optparse import make_option
from subprocess import PIPE, Popen
from django.core.management.base import CommandError, BaseCommand
from django.utils.text import get_text_list
pythonize_re = re.compile(r'(?:^|\n)\s*//')
plural_forms_re = re.compile(r'^(?P<value>"Plural-Forms.+?\\n")\s*$', re.MULTILINE | re.DOTALL)
def handle_extensions(extensions=('html',)):
"""
organizes multiple extensions that are separated with commas or passed by
using --extension/-e multiple times.
for example: running 'django-admin makemessages -e js,txt -e xhtml -a'
would result in a extension list: ['.js', '.txt', '.xhtml']
>>> handle_extensions(['.html', 'html,js,py,py,py,.py', 'py,.py'])
['.html', '.js']
>>> handle_extensions(['.html, txt,.tpl'])
['.html', '.tpl', '.txt']
"""
ext_list = []
for ext in extensions:
ext_list.extend(ext.replace(' ','').split(','))
for i, ext in enumerate(ext_list):
if not ext.startswith('.'):
ext_list[i] = '.%s' % ext_list[i]
# we don't want *.py files here because of the way non-*.py files
# are handled in make_messages() (they are copied to file.ext.py files to
# trick xgettext to parse them as Python files)
return set([x for x in ext_list if x != '.py'])
def _popen(cmd):
"""
Friendly wrapper around Popen for Windows
"""
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, close_fds=os.name != 'nt', universal_newlines=True)
return p.communicate()
def walk(root, topdown=True, onerror=None, followlinks=False):
"""
A version of os.walk that can follow symlinks for Python < 2.6
"""
for dirpath, dirnames, filenames in os.walk(root, topdown, onerror):
yield (dirpath, dirnames, filenames)
if followlinks:
for d in dirnames:
p = os.path.join(dirpath, d)
if os.path.islink(p):
for link_dirpath, link_dirnames, link_filenames in walk(p):
yield (link_dirpath, link_dirnames, link_filenames)
def is_ignored(path, ignore_patterns):
"""
Helper function to check if the given path should be ignored or not.
"""
for pattern in ignore_patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def find_files(root, ignore_patterns, verbosity, symlinks=False):
"""
Helper function to get all files in the given root.
"""
all_files = []
for (dirpath, dirnames, filenames) in walk(".", followlinks=symlinks):
for f in filenames:
norm_filepath = os.path.normpath(os.path.join(dirpath, f))
if is_ignored(norm_filepath, ignore_patterns):
if verbosity > 1:
sys.stdout.write('ignoring file %s in %s\n' % (f, dirpath))
else:
all_files.extend([(dirpath, f)])
all_files.sort()
return all_files
def copy_plural_forms(msgs, locale, domain, verbosity):
"""
Copies plural forms header contents from a Django catalog of locale to
the msgs string, inserting it at the right place. msgs should be the
contents of a newly created .po file.
"""
import django
django_dir = os.path.normpath(os.path.join(os.path.dirname(django.__file__)))
if domain == 'djangojs':
domains = ('djangojs', 'django')
else:
domains = ('django',)
for domain in domains:
django_po = os.path.join(django_dir, 'conf', 'locale', locale, 'LC_MESSAGES', '%s.po' % domain)
if os.path.exists(django_po):
m = plural_forms_re.search(open(django_po, 'rU').read())
if m:
if verbosity > 1:
sys.stderr.write("copying plural forms: %s\n" % m.group('value'))
lines = []
seen = False
for line in msgs.split('\n'):
if not line and not seen:
line = '%s\n' % m.group('value')
seen = True
lines.append(line)
msgs = '\n'.join(lines)
break
return msgs
def make_messages(locale=None, domain='django', verbosity='1', all=False,
extensions=None, symlinks=False, ignore_patterns=[], no_wrap=False):
"""
Uses the locale directory from the Django SVN tree or an application/
project to process all
"""
# Need to ensure that the i18n framework is enabled
from django.conf import settings
if settings.configured:
settings.USE_I18N = True
else:
settings.configure(USE_I18N = True)
from django.utils.translation import templatize
invoked_for_django = False
if os.path.isdir(os.path.join('conf', 'locale')):
localedir = os.path.abspath(os.path.join('conf', 'locale'))
invoked_for_django = True
elif os.path.isdir('locale'):
localedir = os.path.abspath('locale')
else:
raise CommandError("This script should be run from the Django SVN tree or your project or app tree. If you did indeed run it from the SVN checkout or your project or application, maybe you are just missing the conf/locale (in the django tree) or locale (for project and application) directory? It is not created automatically, you have to create it by hand if you want to enable i18n for your project or application.")
if domain not in ('django', 'djangojs'):
raise CommandError("currently makemessages only supports domains 'django' and 'djangojs'")
if (locale is None and not all) or domain is None:
# backwards compatible error message
if not sys.argv[0].endswith("make-messages.py"):
message = "Type '%s help %s' for usage.\n" % (os.path.basename(sys.argv[0]), sys.argv[1])
else:
message = "usage: make-messages.py -l <language>\n or: make-messages.py -a\n"
raise CommandError(message)
# We require gettext version 0.15 or newer.
output = _popen('xgettext --version')[0]
match = re.search(r'(?P<major>\d+)\.(?P<minor>\d+)', output)
if match:
xversion = (int(match.group('major')), int(match.group('minor')))
if xversion < (0, 15):
raise CommandError("Django internationalization requires GNU gettext 0.15 or newer. You are using version %s, please upgrade your gettext toolset." % match.group())
languages = []
if locale is not None:
languages.append(locale)
elif all:
locale_dirs = filter(os.path.isdir, glob.glob('%s/*' % localedir))
languages = [os.path.basename(l) for l in locale_dirs]
wrap = no_wrap and '--no-wrap' or ''
for locale in languages:
if verbosity > 0:
print "processing language", locale
basedir = os.path.join(localedir, locale, 'LC_MESSAGES')
if not os.path.isdir(basedir):
os.makedirs(basedir)
pofile = os.path.join(basedir, '%s.po' % domain)
potfile = os.path.join(basedir, '%s.pot' % domain)
if os.path.exists(potfile):
os.unlink(potfile)
for dirpath, file in find_files(".", ignore_patterns, verbosity, symlinks=symlinks):
file_base, file_ext = os.path.splitext(file)
if domain == 'djangojs' and file_ext in extensions:
if verbosity > 1:
sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
src = open(os.path.join(dirpath, file), "rU").read()
src = pythonize_re.sub('\n#', src)
thefile = '%s.py' % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(src)
finally:
f.close()
cmd = (
'xgettext -d %s -L Perl %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=pgettext:1c,2 --keyword=npgettext:1c,2,3 '
'--from-code UTF-8 --add-comments=Translators -o - "%s"' % (
domain, wrap, os.path.join(dirpath, thefile)
)
)
msgs, errors = _popen(cmd)
if errors:
raise CommandError("errors happened while running xgettext on %s\n%s" % (file, errors))
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+os.path.join(dirpath, file)[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
if msgs:
f = open(potfile, 'ab')
try:
f.write(msgs)
finally:
f.close()
os.unlink(os.path.join(dirpath, thefile))
elif domain == 'django' and (file_ext == '.py' or file_ext in extensions):
thefile = file
orig_file = os.path.join(dirpath, file)
if file_ext in extensions:
src = open(orig_file, "rU").read()
thefile = '%s.py' % file
f = open(os.path.join(dirpath, thefile), "w")
try:
f.write(templatize(src, orig_file[2:]))
finally:
f.close()
if verbosity > 1:
sys.stdout.write('processing file %s in %s\n' % (file, dirpath))
cmd = (
'xgettext -d %s -L Python %s --keyword=gettext_noop '
'--keyword=gettext_lazy --keyword=ngettext_lazy:1,2 '
'--keyword=ugettext_noop --keyword=ugettext_lazy '
'--keyword=ungettext_lazy:1,2 --keyword=pgettext:1c,2 '
'--keyword=npgettext:1c,2,3 --keyword=pgettext_lazy:1c,2 '
'--keyword=npgettext_lazy:1c,2,3 --from-code UTF-8 '
'--add-comments=Translators -o - "%s"' % (
domain, wrap, os.path.join(dirpath, thefile))
)
msgs, errors = _popen(cmd)
if errors:
raise CommandError("errors happened while running xgettext on %s\n%s" % (file, errors))
if thefile != file:
old = '#: '+os.path.join(dirpath, thefile)[2:]
new = '#: '+orig_file[2:]
msgs = msgs.replace(old, new)
if os.path.exists(potfile):
# Strip the header
msgs = '\n'.join(dropwhile(len, msgs.split('\n')))
else:
msgs = msgs.replace('charset=CHARSET', 'charset=UTF-8')
if msgs:
f = open(potfile, 'ab')
try:
f.write(msgs)
finally:
f.close()
if thefile != file:
os.unlink(os.path.join(dirpath, thefile))
if os.path.exists(potfile):
msgs, errors = _popen('msguniq %s --to-code=utf-8 "%s"' %
(wrap, potfile))
if errors:
raise CommandError("errors happened while running msguniq\n%s" % errors)
f = open(potfile, 'w')
try:
f.write(msgs)
finally:
f.close()
if os.path.exists(pofile):
msgs, errors = _popen('msgmerge %s -q "%s" "%s"' %
(wrap, pofile, potfile))
if errors:
raise CommandError("errors happened while running msgmerge\n%s" % errors)
elif not invoked_for_django:
msgs = copy_plural_forms(msgs, locale, domain, verbosity)
msgs = msgs.replace(
"#. #-#-#-#-# %s.pot (PACKAGE VERSION) #-#-#-#-#\n" % domain, "")
f = open(pofile, 'wb')
try:
f.write(msgs)
finally:
f.close()
os.unlink(potfile)
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--locale', '-l', default=None, dest='locale',
help='Creates or updates the message files only for the given locale (e.g. pt_BR).'),
make_option('--domain', '-d', default='django', dest='domain',
help='The domain of the message files (default: "django").'),
make_option('--all', '-a', action='store_true', dest='all',
default=False, help='Reexamines all source code and templates for new translation strings and updates all message files for all available languages.'),
make_option('--extension', '-e', dest='extensions',
help='The file extension(s) to examine (default: ".html", separate multiple extensions with commas, or use -e multiple times)',
action='append'),
make_option('--symlinks', '-s', action='store_true', dest='symlinks',
default=False, help='Follows symlinks to directories when examining source code and templates for translation strings.'),
make_option('--ignore', '-i', action='append', dest='ignore_patterns',
default=[], metavar='PATTERN', help='Ignore files or directories matching this glob-style pattern. Use multiple times to ignore more.'),
make_option('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns',
default=True, help="Don't ignore the common glob-style patterns 'CVS', '.*' and '*~'."),
make_option('--no-wrap', action='store_true', dest='no_wrap',
default=False, help="Don't break long message lines into several lines"),
)
help = "Runs over the entire source tree of the current directory and pulls out all strings marked for translation. It creates (or updates) a message file in the conf/locale (in the django tree) or locale (for project and application) directory."
requires_model_validation = False
can_import_settings = False
def handle(self, *args, **options):
if len(args) != 0:
raise CommandError("Command doesn't accept any arguments")
locale = options.get('locale')
domain = options.get('domain')
verbosity = int(options.get('verbosity'))
process_all = options.get('all')
extensions = options.get('extensions')
symlinks = options.get('symlinks')
ignore_patterns = options.get('ignore_patterns')
if options.get('use_default_ignore_patterns'):
ignore_patterns += ['CVS', '.*', '*~']
ignore_patterns = list(set(ignore_patterns))
no_wrap = options.get('no_wrap')
if domain == 'djangojs':
extensions = handle_extensions(extensions or ['js'])
else:
extensions = handle_extensions(extensions or ['html'])
if verbosity > 1:
sys.stdout.write('examining files with the extensions: %s\n'
% get_text_list(list(extensions), 'and'))
make_messages(locale, domain, verbosity, process_all, extensions, symlinks, ignore_patterns, no_wrap)
| apache-2.0 |
kelseyoo14/Wander | venv_2_7/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/optionaltags.py | 1727 | 10500 | from __future__ import absolute_import, division, unicode_literals
from . import _base
class Filter(_base.Filter):
def slider(self):
previous1 = previous2 = None
for token in self.source:
if previous1 is not None:
yield previous2, previous1, token
previous2 = previous1
previous1 = token
yield previous2, previous1, None
def __iter__(self):
for previous, token, next in self.slider():
type = token["type"]
if type == "StartTag":
if (token["data"] or
not self.is_optional_start(token["name"], previous, next)):
yield token
elif type == "EndTag":
if not self.is_optional_end(token["name"], next):
yield token
else:
yield token
def is_optional_start(self, tagname, previous, next):
type = next and next["type"] or None
if tagname in 'html':
# An html element's start tag may be omitted if the first thing
# inside the html element is not a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname == 'head':
# A head element's start tag may be omitted if the first thing
# inside the head element is an element.
# XXX: we also omit the start tag if the head element is empty
if type in ("StartTag", "EmptyTag"):
return True
elif type == "EndTag":
return next["name"] == "head"
elif tagname == 'body':
# A body element's start tag may be omitted if the first thing
# inside the body element is not a space character or a comment,
# except if the first thing inside the body element is a script
# or style element and the node immediately preceding the body
# element is a head element whose end tag has been omitted.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we do not look at the preceding event, so we never omit
# the body element's start tag if it's followed by a script or
# a style element.
return next["name"] not in ('script', 'style')
else:
return True
elif tagname == 'colgroup':
# A colgroup element's start tag may be omitted if the first thing
# inside the colgroup element is a col element, and if the element
# is not immediately preceeded by another colgroup element whose
# end tag has been omitted.
if type in ("StartTag", "EmptyTag"):
# XXX: we do not look at the preceding event, so instead we never
# omit the colgroup element's end tag when it is immediately
# followed by another colgroup element. See is_optional_end.
return next["name"] == "col"
else:
return False
elif tagname == 'tbody':
# A tbody element's start tag may be omitted if the first thing
# inside the tbody element is a tr element, and if the element is
# not immediately preceeded by a tbody, thead, or tfoot element
# whose end tag has been omitted.
if type == "StartTag":
# omit the thead and tfoot elements' end tag when they are
# immediately followed by a tbody element. See is_optional_end.
if previous and previous['type'] == 'EndTag' and \
previous['name'] in ('tbody', 'thead', 'tfoot'):
return False
return next["name"] == 'tr'
else:
return False
return False
def is_optional_end(self, tagname, next):
type = next and next["type"] or None
if tagname in ('html', 'head', 'body'):
# An html element's end tag may be omitted if the html element
# is not immediately followed by a space character or a comment.
return type not in ("Comment", "SpaceCharacters")
elif tagname in ('li', 'optgroup', 'tr'):
# A li element's end tag may be omitted if the li element is
# immediately followed by another li element or if there is
# no more content in the parent element.
# An optgroup element's end tag may be omitted if the optgroup
# element is immediately followed by another optgroup element,
# or if there is no more content in the parent element.
# A tr element's end tag may be omitted if the tr element is
# immediately followed by another tr element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] == tagname
else:
return type == "EndTag" or type is None
elif tagname in ('dt', 'dd'):
# A dt element's end tag may be omitted if the dt element is
# immediately followed by another dt element or a dd element.
# A dd element's end tag may be omitted if the dd element is
# immediately followed by another dd element or a dt element,
# or if there is no more content in the parent element.
if type == "StartTag":
return next["name"] in ('dt', 'dd')
elif tagname == 'dd':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'p':
# A p element's end tag may be omitted if the p element is
# immediately followed by an address, article, aside,
# blockquote, datagrid, dialog, dir, div, dl, fieldset,
# footer, form, h1, h2, h3, h4, h5, h6, header, hr, menu,
# nav, ol, p, pre, section, table, or ul, element, or if
# there is no more content in the parent element.
if type in ("StartTag", "EmptyTag"):
return next["name"] in ('address', 'article', 'aside',
'blockquote', 'datagrid', 'dialog',
'dir', 'div', 'dl', 'fieldset', 'footer',
'form', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
'header', 'hr', 'menu', 'nav', 'ol',
'p', 'pre', 'section', 'table', 'ul')
else:
return type == "EndTag" or type is None
elif tagname == 'option':
# An option element's end tag may be omitted if the option
# element is immediately followed by another option element,
# or if it is immediately followed by an <code>optgroup</code>
# element, or if there is no more content in the parent
# element.
if type == "StartTag":
return next["name"] in ('option', 'optgroup')
else:
return type == "EndTag" or type is None
elif tagname in ('rt', 'rp'):
# An rt element's end tag may be omitted if the rt element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
# An rp element's end tag may be omitted if the rp element is
# immediately followed by an rt or rp element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('rt', 'rp')
else:
return type == "EndTag" or type is None
elif tagname == 'colgroup':
# A colgroup element's end tag may be omitted if the colgroup
# element is not immediately followed by a space character or
# a comment.
if type in ("Comment", "SpaceCharacters"):
return False
elif type == "StartTag":
# XXX: we also look for an immediately following colgroup
# element. See is_optional_start.
return next["name"] != 'colgroup'
else:
return True
elif tagname in ('thead', 'tbody'):
# A thead element's end tag may be omitted if the thead element
# is immediately followed by a tbody or tfoot element.
# A tbody element's end tag may be omitted if the tbody element
# is immediately followed by a tbody or tfoot element, or if
# there is no more content in the parent element.
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] in ['tbody', 'tfoot']
elif tagname == 'tbody':
return type == "EndTag" or type is None
else:
return False
elif tagname == 'tfoot':
# A tfoot element's end tag may be omitted if the tfoot element
# is immediately followed by a tbody element, or if there is no
# more content in the parent element.
# XXX: we never omit the end tag when the following element is
# a tbody. See is_optional_start.
if type == "StartTag":
return next["name"] == 'tbody'
else:
return type == "EndTag" or type is None
elif tagname in ('td', 'th'):
# A td element's end tag may be omitted if the td element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
# A th element's end tag may be omitted if the th element is
# immediately followed by a td or th element, or if there is
# no more content in the parent element.
if type == "StartTag":
return next["name"] in ('td', 'th')
else:
return type == "EndTag" or type is None
return False
| artistic-2.0 |
rbaindourov/v8-inspector | Source/chrome/tools/gyp/test/mac/gyptest-identical-name.py | 94 | 1547 | #!/usr/bin/env python
# Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies libraries (in identical-names) are properly handeled by xcode.
The names for all libraries participating in this build are:
libtestlib.a - identical-name/testlib
libtestlib.a - identical-name/proxy/testlib
libproxy.a - identical-name/proxy
The first two libs produce a hash collision in Xcode when Gyp is executed,
because they have the same name and would be copied to the same directory with
Xcode default settings.
For this scenario to work one needs to change the Xcode variables SYMROOT and
CONFIGURATION_BUILD_DIR. Setting these to per-lib-unique directories, avoids
copying the libs into the same directory.
The test consists of two steps. The first one verifies that by setting both
vars, there is no hash collision anymore during Gyp execution and that the libs
can actually be be built. The second one verifies that there is still a hash
collision if the vars are not set and thus the current behavior is preserved.
"""
import TestGyp
import sys
def IgnoreOutput(string, expected_string):
return True
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['xcode'])
test.run_gyp('test.gyp', chdir='identical-name')
test.build('test.gyp', test.ALL, chdir='identical-name')
test.run_gyp('test-should-fail.gyp', chdir='identical-name')
test.built_file_must_not_exist('test-should-fail.xcodeproj')
test.pass_test()
| bsd-3-clause |
IllusionRom-deprecated/android_platform_external_chromium_org | build/android/update_verification.py | 30 | 5204 | #!/usr/bin/env python
#
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs semi-automated update testing on a non-rooted device."""
import logging
import optparse
import os
import shutil
import sys
import time
from pylib import android_commands
def _SaveAppData(adb, package_name, from_apk=None, data_dir=None):
def _BackupAppData(data_dir=None):
adb.Adb().SendCommand('backup %s' % package_name)
backup_file = os.path.join(os.getcwd(), 'backup.ab')
assert os.path.exists(backup_file), 'Backup failed.'
if data_dir:
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
shutil.move(backup_file, data_dir)
backup_file = os.path.join(data_dir, 'backup.ab')
print 'Application data saved to %s' % backup_file
if from_apk:
logging.info('Installing %s...', from_apk)
output = adb.Install(from_apk, reinstall=True)
if 'Success' not in output:
raise Exception('Unable to install %s. output: %s' % (from_apk, output))
raw_input('Set the application state. Once ready, press enter and '
'select "Backup my data" on the device.')
_BackupAppData(data_dir)
def _VerifyAppUpdate(adb, to_apk, app_data, from_apk=None):
def _RestoreAppData():
assert os.path.exists(app_data), 'Backup file does not exist!'
adb.Adb().SendCommand('restore %s' % app_data)
# It seems restore command is not synchronous.
time.sleep(15)
if from_apk:
logging.info('Installing %s...', from_apk)
output = adb.Install(from_apk, reinstall=True)
if 'Success' not in output:
raise Exception('Unable to install %s. output: %s' % (from_apk, output))
logging.info('Restoring the application data...')
raw_input('Press enter and select "Restore my data" on the device.')
_RestoreAppData()
logging.info('Verifying that %s cannot be installed side-by-side...',
to_apk)
output = adb.Install(to_apk)
if 'INSTALL_FAILED_ALREADY_EXISTS' not in output:
if 'Success' in output:
raise Exception('Package name has changed! output: %s' % output)
else:
raise Exception(output)
logging.info('Verifying that %s can be overinstalled...', to_apk)
output = adb.Install(to_apk, reinstall=True)
if 'Success' not in output:
raise Exception('Unable to install %s.\n output: %s' % (to_apk, output))
logging.info('Successfully updated to the new apk. Please verify that the '
'the application data is preserved.')
def main():
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
desc = (
'Performs semi-automated application update verification testing. '
'When given --save, it takes a snapshot of the application data '
'on the device. (A dialog on the device will prompt the user to grant '
'permission to backup the data.) Otherwise, it performs the update '
'testing as follows: '
'1. Installs the |from-apk| (optional). '
'2. Restores the previously stored snapshot of application data '
'given by |app-data| '
'(A dialog on the device will prompt the user to grant permission to '
'restore the data.) '
'3. Verifies that |to-apk| cannot be installed side-by-side. '
'4. Verifies that |to-apk| can replace |from-apk|.')
parser = optparse.OptionParser(description=desc)
parser.add_option('--package-name', help='Package name for the application.')
parser.add_option('--save', action='store_true',
help=('Save a snapshot of application data. '
'This will be saved as backup.db in the '
'current directory if |app-data| directory '
'is not specifid.'))
parser.add_option('--from-apk',
help=('APK to update from. This is optional if you already '
'have the app installed.'))
parser.add_option('--to-apk', help='APK to update to.')
parser.add_option('--app-data',
help=('Path to the application data to be restored or the '
'directory where the data should be saved.'))
(options, args) = parser.parse_args()
if args:
parser.print_help(sys.stderr)
parser.error('Unknown arguments: %s.' % args)
if len(android_commands.GetAttachedDevices()) != 1:
parser.error('Exactly 1 device must be attached.')
adb = android_commands.AndroidCommands()
if options.from_apk:
assert os.path.isfile(options.from_apk)
if options.save:
if not options.package_name:
parser.print_help(sys.stderr)
parser.error('Missing --package-name.')
_SaveAppData(adb, options.package_name, from_apk=options.from_apk,
data_dir=options.app_data)
else:
if not options.to_apk or not options.app_data:
parser.print_help(sys.stderr)
parser.error('Missing --to-apk or --app-data.')
assert os.path.isfile(options.to_apk)
assert os.path.isfile(options.app_data)
_VerifyAppUpdate(adb, options.to_apk, options.app_data,
from_apk=options.from_apk)
if __name__ == '__main__':
main()
| bsd-3-clause |
krafczyk/spack | var/spack/repos/builtin/packages/rna-seqc/package.py | 2 | 2830 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os.path
class RnaSeqc(Package):
"""RNA-SeQC is a java program which computes a series of quality control
metrics for RNA-seq data."""
homepage = "http://archive.broadinstitute.org/cancer/cga/rna-seqc"
url = "http://www.broadinstitute.org/cancer/cga/tools/rnaseqc/RNA-SeQC_v1.1.8.jar"
version('1.1.8', '71d7b5d3b3dcc1893cdc7f6819185d41', expand=False)
version('1.1.7', '2d0b8ecac955af2f9bc1b185fdfb6b45', expand=False)
version('1.1.6', 'fa9c9885081ae2e47f285c7c0f596a14', expand=False)
version('1.1.5', '4b875671e906f708cbb8fd9bcf0e958d', expand=False)
version('1.1.4', 'b04d06947c48cb2dc1b0ba29c8232db5', expand=False)
depends_on('jdk@8:', type='run')
def install(self, spec, prefix):
mkdirp(prefix.bin)
jar_file = 'RNA-SeQC_v{0}.jar'.format(self.version.dotted)
install(jar_file, prefix.bin)
# Set up a helper script to call java on the jar file,
# explicitly codes the path for java and the jar file.
script_sh = join_path(os.path.dirname(__file__), "rna-seqc.sh")
script = join_path(prefix.bin, "rna-seqc")
install(script_sh, script)
set_executable(script)
# Munge the helper script to explicitly point to java and the
# jar file.
java = self.spec['jdk'].prefix.bin.java
kwargs = {'ignore_absent': False, 'backup': False, 'string': False}
filter_file('^java', java, script, **kwargs)
filter_file('RNA-SeQC_v{0}.jar', join_path(prefix.bin, jar_file),
script, **kwargs)
| lgpl-2.1 |
neerajvashistha/pa-dude | lib/python2.7/site-packages/tweepy/models.py | 56 | 14021 | # Tweepy
# Copyright 2009-2010 Joshua Roesslein
# See LICENSE for details.
from __future__ import absolute_import, print_function
from tweepy.utils import parse_datetime, parse_html_value, parse_a_href
class ResultSet(list):
"""A list like object that holds results from a Twitter API query."""
def __init__(self, max_id=None, since_id=None):
super(ResultSet, self).__init__()
self._max_id = max_id
self._since_id = since_id
@property
def max_id(self):
if self._max_id:
return self._max_id
ids = self.ids()
# Max_id is always set to the *smallest* id, minus one, in the set
return (min(ids) - 1) if ids else None
@property
def since_id(self):
if self._since_id:
return self._since_id
ids = self.ids()
# Since_id is always set to the *greatest* id in the set
return max(ids) if ids else None
def ids(self):
return [item.id for item in self if hasattr(item, 'id')]
class Model(object):
def __init__(self, api=None):
self._api = api
def __getstate__(self):
# pickle
pickle = dict(self.__dict__)
try:
del pickle['_api'] # do not pickle the API reference
except KeyError:
pass
return pickle
@classmethod
def parse(cls, api, json):
"""Parse a JSON object into a model instance."""
raise NotImplementedError
@classmethod
def parse_list(cls, api, json_list):
"""
Parse a list of JSON objects into
a result set of model instances.
"""
results = ResultSet()
for obj in json_list:
if obj:
results.append(cls.parse(api, obj))
return results
def __repr__(self):
state = ['%s=%s' % (k, repr(v)) for (k, v) in vars(self).items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(state))
class Status(Model):
@classmethod
def parse(cls, api, json):
status = cls(api)
setattr(status, '_json', json)
for k, v in json.items():
if k == 'user':
user_model = getattr(api.parser.model_factory, 'user') if api else User
user = user_model.parse(api, v)
setattr(status, 'author', user)
setattr(status, 'user', user) # DEPRECIATED
elif k == 'created_at':
setattr(status, k, parse_datetime(v))
elif k == 'source':
if '<' in v:
setattr(status, k, parse_html_value(v))
setattr(status, 'source_url', parse_a_href(v))
else:
setattr(status, k, v)
setattr(status, 'source_url', None)
elif k == 'retweeted_status':
setattr(status, k, Status.parse(api, v))
elif k == 'place':
if v is not None:
setattr(status, k, Place.parse(api, v))
else:
setattr(status, k, None)
else:
setattr(status, k, v)
return status
def destroy(self):
return self._api.destroy_status(self.id)
def retweet(self):
return self._api.retweet(self.id)
def retweets(self):
return self._api.retweets(self.id)
def favorite(self):
return self._api.create_favorite(self.id)
def __eq__(self, other):
if isinstance(other, Status):
return self.id == other.id
return NotImplemented
def __ne__(self, other):
result = self == other
if result is NotImplemented:
return result
return not result
class User(Model):
@classmethod
def parse(cls, api, json):
user = cls(api)
setattr(user, '_json', json)
for k, v in json.items():
if k == 'created_at':
setattr(user, k, parse_datetime(v))
elif k == 'status':
setattr(user, k, Status.parse(api, v))
elif k == 'following':
# twitter sets this to null if it is false
if v is True:
setattr(user, k, True)
else:
setattr(user, k, False)
else:
setattr(user, k, v)
return user
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['users']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
def timeline(self, **kargs):
return self._api.user_timeline(user_id=self.id, **kargs)
def friends(self, **kargs):
return self._api.friends(user_id=self.id, **kargs)
def followers(self, **kargs):
return self._api.followers(user_id=self.id, **kargs)
def follow(self):
self._api.create_friendship(user_id=self.id)
self.following = True
def unfollow(self):
self._api.destroy_friendship(user_id=self.id)
self.following = False
def lists_memberships(self, *args, **kargs):
return self._api.lists_memberships(user=self.screen_name,
*args,
**kargs)
def lists_subscriptions(self, *args, **kargs):
return self._api.lists_subscriptions(user=self.screen_name,
*args,
**kargs)
def lists(self, *args, **kargs):
return self._api.lists_all(user=self.screen_name,
*args,
**kargs)
def followers_ids(self, *args, **kargs):
return self._api.followers_ids(user_id=self.id,
*args,
**kargs)
class DirectMessage(Model):
@classmethod
def parse(cls, api, json):
dm = cls(api)
for k, v in json.items():
if k == 'sender' or k == 'recipient':
setattr(dm, k, User.parse(api, v))
elif k == 'created_at':
setattr(dm, k, parse_datetime(v))
else:
setattr(dm, k, v)
return dm
def destroy(self):
return self._api.destroy_direct_message(self.id)
class Friendship(Model):
@classmethod
def parse(cls, api, json):
relationship = json['relationship']
# parse source
source = cls(api)
for k, v in relationship['source'].items():
setattr(source, k, v)
# parse target
target = cls(api)
for k, v in relationship['target'].items():
setattr(target, k, v)
return source, target
class Category(Model):
@classmethod
def parse(cls, api, json):
category = cls(api)
for k, v in json.items():
setattr(category, k, v)
return category
class SavedSearch(Model):
@classmethod
def parse(cls, api, json):
ss = cls(api)
for k, v in json.items():
if k == 'created_at':
setattr(ss, k, parse_datetime(v))
else:
setattr(ss, k, v)
return ss
def destroy(self):
return self._api.destroy_saved_search(self.id)
class SearchResults(ResultSet):
@classmethod
def parse(cls, api, json):
metadata = json['search_metadata']
results = SearchResults()
results.refresh_url = metadata.get('refresh_url')
results.completed_in = metadata.get('completed_in')
results.query = metadata.get('query')
results.count = metadata.get('count')
results.next_results = metadata.get('next_results')
status_model = getattr(api.parser.model_factory, 'status') if api else Status
for status in json['statuses']:
results.append(status_model.parse(api, status))
return results
class List(Model):
@classmethod
def parse(cls, api, json):
lst = List(api)
for k, v in json.items():
if k == 'user':
setattr(lst, k, User.parse(api, v))
elif k == 'created_at':
setattr(lst, k, parse_datetime(v))
else:
setattr(lst, k, v)
return lst
@classmethod
def parse_list(cls, api, json_list, result_set=None):
results = ResultSet()
if isinstance(json_list, dict):
json_list = json_list['lists']
for obj in json_list:
results.append(cls.parse(api, obj))
return results
def update(self, **kargs):
return self._api.update_list(self.slug, **kargs)
def destroy(self):
return self._api.destroy_list(self.slug)
def timeline(self, **kargs):
return self._api.list_timeline(self.user.screen_name,
self.slug,
**kargs)
def add_member(self, id):
return self._api.add_list_member(self.slug, id)
def remove_member(self, id):
return self._api.remove_list_member(self.slug, id)
def members(self, **kargs):
return self._api.list_members(self.user.screen_name,
self.slug,
**kargs)
def is_member(self, id):
return self._api.is_list_member(self.user.screen_name,
self.slug,
id)
def subscribe(self):
return self._api.subscribe_list(self.user.screen_name, self.slug)
def unsubscribe(self):
return self._api.unsubscribe_list(self.user.screen_name, self.slug)
def subscribers(self, **kargs):
return self._api.list_subscribers(self.user.screen_name,
self.slug,
**kargs)
def is_subscribed(self, id):
return self._api.is_subscribed_list(self.user.screen_name,
self.slug,
id)
class Relation(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k, v in json.items():
if k == 'value' and json['kind'] in ['Tweet', 'LookedupStatus']:
setattr(result, k, Status.parse(api, v))
elif k == 'results':
setattr(result, k, Relation.parse_list(api, v))
else:
setattr(result, k, v)
return result
class Relationship(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
for k, v in json.items():
if k == 'connections':
setattr(result, 'is_following', 'following' in v)
setattr(result, 'is_followed_by', 'followed_by' in v)
else:
setattr(result, k, v)
return result
class JSONModel(Model):
@classmethod
def parse(cls, api, json):
return json
class IDModel(Model):
@classmethod
def parse(cls, api, json):
if isinstance(json, list):
return json
else:
return json['ids']
class BoundingBox(Model):
@classmethod
def parse(cls, api, json):
result = cls(api)
if json is not None:
for k, v in json.items():
setattr(result, k, v)
return result
def origin(self):
"""
Return longitude, latitude of southwest (bottom, left) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][0])
def corner(self):
"""
Return longitude, latitude of northeast (top, right) corner of
bounding box, as a tuple.
This assumes that bounding box is always a rectangle, which
appears to be the case at present.
"""
return tuple(self.coordinates[0][2])
class Place(Model):
@classmethod
def parse(cls, api, json):
place = cls(api)
for k, v in json.items():
if k == 'bounding_box':
# bounding_box value may be null (None.)
# Example: "United States" (id=96683cc9126741d1)
if v is not None:
t = BoundingBox.parse(api, v)
else:
t = v
setattr(place, k, t)
elif k == 'contained_within':
# contained_within is a list of Places.
setattr(place, k, Place.parse_list(api, v))
else:
setattr(place, k, v)
return place
@classmethod
def parse_list(cls, api, json_list):
if isinstance(json_list, list):
item_list = json_list
else:
item_list = json_list['result']['places']
results = ResultSet()
for obj in item_list:
results.append(cls.parse(api, obj))
return results
class Media(Model):
@classmethod
def parse(cls, api, json):
media = cls(api)
for k, v in json.items():
setattr(media, k, v)
return media
class ModelFactory(object):
"""
Used by parsers for creating instances
of models. You may subclass this factory
to add your own extended models.
"""
status = Status
user = User
direct_message = DirectMessage
friendship = Friendship
saved_search = SavedSearch
search_results = SearchResults
category = Category
list = List
relation = Relation
relationship = Relationship
media = Media
json = JSONModel
ids = IDModel
place = Place
bounding_box = BoundingBox
| mit |
Dekken/tick | tick/linear_model/tests/simu_linreg_test.py | 2 | 1475 | # License: BSD 3 clause
import unittest
import numpy as np
from tick.linear_model import SimuLinReg
class Test(unittest.TestCase):
def test_SimuLinReg(self):
"""...Test simulation of a Linear Regression
"""
n_samples = 10
n_features = 3
idx = np.arange(n_features)
weights = np.exp(-idx / 10.)
weights[::2] *= -1
seed = 123
simu = SimuLinReg(weights, None, n_samples=n_samples, seed=seed,
verbose=False)
X, y = simu.simulate()
X_truth = np.array([[1.4912667, 0.80881799, 0.26977298], [
1.23227551, 0.50697013, 1.9409132
], [1.8891494, 1.49834791,
2.41445794], [0.19431319, 0.80245126, 1.02577552], [
-1.61687582, -1.08411865, -0.83438387
], [2.30419894, -0.68987056,
-0.39750262], [-0.28826405, -1.23635074, -0.76124386],
[-1.32869473, -1.8752391,
-0.182537], [0.79464218, 0.65055633, 1.57572506],
[0.71524202, 1.66759831, 0.88679047]])
y_truth = np.array([
-1.23590872, -5.1612244, -4.28171221, -1.00793646, 2.24652287,
-2.7766077, -0.20433269, 0.46957959, -2.37562537, 0.35124802
])
np.testing.assert_array_almost_equal(X_truth, X)
np.testing.assert_array_almost_equal(y_truth, y)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
bjodah/PyLaTeX | examples/basic.py | 1 | 1488 | #!/usr/bin/python
"""
This example shows basic document generation functionality.
.. :copyright: (c) 2014 by Jelte Fennema.
:license: MIT, see License for more details.
"""
# begin-doc-include
from pylatex import Document, Section, Subsection, Command
from pylatex.utils import italic, NoEscape
def fill_document(doc):
"""Add a section, a subsection and some text to the document.
:param doc: the document
:type doc: :class:`pylatex.document.Document` instance
"""
with doc.create(Section('A section')):
doc.append('Some regular text and some ')
doc.append(italic('italic text. '))
with doc.create(Subsection('A subsection')):
doc.append('Also some crazy characters: $&#{}')
if __name__ == '__main__':
# Basic document
doc = Document('basic')
fill_document(doc)
doc.generate_pdf()
doc.generate_tex()
# Document with `\maketitle` command activated
doc = Document()
doc.preamble.append(Command('title', 'Awesome Title'))
doc.preamble.append(Command('author', 'Anonymous author'))
doc.preamble.append(Command('date', NoEscape(r'\today')))
doc.append(NoEscape(r'\maketitle'))
fill_document(doc)
doc.generate_pdf('basic_maketitle', clean=False)
# Add stuff to the document
with doc.create(Section('A second section')):
doc.append('Some text.')
doc.generate_pdf('basic_maketitle2')
tex = doc.dumps() # The document as string in LaTeX syntax
| mit |
paulondc/gaffer | python/GafferUITest/CompoundPlugValueWidgetTest.py | 5 | 3637 | ##########################################################################
#
# Copyright (c) 2012, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferUI
import GafferUITest
class CompoundPlugValueWidgetTest( GafferUITest.TestCase ) :
def testChildAccess( self ) :
n = Gaffer.Node()
n["c"] = Gaffer.CompoundPlug()
n["c"]["i"] = Gaffer.IntPlug()
n["c"]["s"] = Gaffer.StringPlug()
pw = GafferUI.CompoundPlugValueWidget( n["c"] )
# shouldn't get anything because the ui is being built lazily
self.assertEqual( pw.childPlugValueWidget( n["c"]["i"] ), None )
self.assertEqual( pw.childPlugValueWidget( n["c"]["s"] ), None )
# but should be able to force the creation
self.failUnless( isinstance( pw.childPlugValueWidget( n["c"]["i"], lazy=False ), GafferUI.PlugValueWidget ) )
self.failUnless( isinstance( pw.childPlugValueWidget( n["c"]["s"], lazy=False ), GafferUI.PlugValueWidget ) )
# shouldn't need to force with lazy=False if the ui is user-visible.
pw = GafferUI.CompoundPlugValueWidget( n["c"], collapsed=None )
w = GafferUI.Window()
w.setChild( pw )
w.setVisible( True )
self.failUnless( isinstance( pw.childPlugValueWidget( n["c"]["i"] ), GafferUI.PlugValueWidget ) )
self.failUnless( isinstance( pw.childPlugValueWidget( n["c"]["s"] ), GafferUI.PlugValueWidget ) )
def testChildReadOnlyStatus( self ) :
n = Gaffer.Node()
n["c"] = Gaffer.CompoundPlug()
n["c"]["i"] = Gaffer.IntPlug()
n["c"]["s"] = Gaffer.StringPlug()
w = GafferUI.CompoundPlugValueWidget( n["c"] )
w.setReadOnly( True )
iw = w.childPlugValueWidget( n["c"]["i"], lazy=False )
sw = w.childPlugValueWidget( n["c"]["s"], lazy=False )
self.assertEqual( iw.getReadOnly(), True )
self.assertEqual( sw.getReadOnly(), True )
w.setReadOnly( False )
self.assertEqual( iw.getReadOnly(), False )
self.assertEqual( sw.getReadOnly(), False )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
tadashi-aikawa/gemini | jumeaux/handlers/server.py | 1 | 1802 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import socketserver
import urllib
from http.server import SimpleHTTPRequestHandler
from typing import Optional
from jumeaux.logger import Logger
logger: Logger = Logger(__name__)
class MyServerHandler(SimpleHTTPRequestHandler):
def do_GET(self):
logger.info_lv2("*" * 80)
logger.info_lv2("<<< Request headers >>>")
logger.info_lv2(self.headers)
SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
logger.info_lv2("*" * 80)
logger.info_lv2("<<< Request headers >>>")
logger.info_lv2(self.headers)
content_type = self.headers.get_content_type()
content_charset = self.headers.get_content_charset() or "utf-8"
if content_type == "application/x-www-form-urlencoded":
logger.info_lv2("<<< Parse as x-www-form-urlencoded.. >>>")
logger.info_lv2(
urllib.parse.parse_qs(
self.rfile.read(int(self.headers.get("content-length"))).decode(
content_charset
),
keep_blank_values=1,
)
)
elif content_type == "application/json":
logger.info_lv2(
json.loads(
self.rfile.read(int(self.headers.get("content-length"))),
encoding=content_charset,
)
)
SimpleHTTPRequestHandler.do_GET(self)
class ReuseAddressTCPServer(socketserver.TCPServer):
allow_reuse_address = True
def handle(port: Optional[int]):
with ReuseAddressTCPServer(("", port), MyServerHandler) as httpd:
logger.info_lv1(f"Serving HTTP on 0.0.0.0 port {port} (http://0.0.0.0:{port}/)")
httpd.serve_forever()
| mit |
dulems/hue | desktop/core/ext-py/Django-1.6.10/tests/forms_tests/tests/test_regressions.py | 49 | 7966 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import warnings
from django.forms import *
from django.test import TestCase
from django.utils.translation import ugettext_lazy, override
from forms_tests.models import Cheese
from django.test.utils import TransRealMixin
class FormsRegressionsTestCase(TransRealMixin, TestCase):
def test_class(self):
# Tests to prevent against recurrences of earlier bugs.
extra_attrs = {'class': 'special'}
class TestForm(Form):
f1 = CharField(max_length=10, widget=TextInput(attrs=extra_attrs))
f2 = CharField(widget=TextInput(attrs=extra_attrs))
self.assertHTMLEqual(TestForm(auto_id=False).as_p(), '<p>F1: <input type="text" class="special" name="f1" maxlength="10" /></p>\n<p>F2: <input type="text" class="special" name="f2" /></p>')
def test_regression_3600(self):
# Tests for form i18n #
# There were some problems with form translations in #3600
class SomeForm(Form):
username = CharField(max_length=10, label=ugettext_lazy('Username'))
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Username:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
# Translations are done at rendering time, so multi-lingual apps can define forms)
with override('de'):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Benutzername:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
with override('pl', deactivate=True):
self.assertHTMLEqual(f.as_p(), '<p><label for="id_username">Nazwa u\u017cytkownika:</label> <input id="id_username" type="text" name="username" maxlength="10" /></p>')
def test_regression_5216(self):
# There was some problems with form translations in #5216
class SomeForm(Form):
field_1 = CharField(max_length=10, label=ugettext_lazy('field_1'))
field_2 = CharField(max_length=10, label=ugettext_lazy('field_2'), widget=TextInput(attrs={'id': 'field_2_id'}))
f = SomeForm()
self.assertHTMLEqual(f['field_1'].label_tag(), '<label for="id_field_1">field_1:</label>')
self.assertHTMLEqual(f['field_2'].label_tag(), '<label for="field_2_id">field_2:</label>')
# Unicode decoding problems...
GENDERS = (('\xc5', 'En tied\xe4'), ('\xf8', 'Mies'), ('\xdf', 'Nainen'))
class SomeForm(Form):
somechoice = ChoiceField(choices=GENDERS, widget=RadioSelect(), label='\xc5\xf8\xdf')
f = SomeForm()
self.assertHTMLEqual(f.as_p(), '<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Testing choice validation with UTF-8 bytestrings as input (these are the
# Russian abbreviations "мес." and "шт.".
UNITS = ((b'\xd0\xbc\xd0\xb5\xd1\x81.', b'\xd0\xbc\xd0\xb5\xd1\x81.'),
(b'\xd1\x88\xd1\x82.', b'\xd1\x88\xd1\x82.'))
f = ChoiceField(choices=UNITS)
with warnings.catch_warnings():
# Ignore UnicodeWarning
warnings.simplefilter("ignore")
self.assertEqual(f.clean('\u0448\u0442.'), '\u0448\u0442.')
self.assertEqual(f.clean(b'\xd1\x88\xd1\x82.'), '\u0448\u0442.')
# Translated error messages used to be buggy.
with override('ru'):
f = SomeForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>\u041e\u0431\u044f\u0437\u0430\u0442\u0435\u043b\u044c\u043d\u043e\u0435 \u043f\u043e\u043b\u0435.</li></ul>\n<p><label for="id_somechoice_0">\xc5\xf8\xdf:</label> <ul id="id_somechoice">\n<li><label for="id_somechoice_0"><input type="radio" id="id_somechoice_0" value="\xc5" name="somechoice" /> En tied\xe4</label></li>\n<li><label for="id_somechoice_1"><input type="radio" id="id_somechoice_1" value="\xf8" name="somechoice" /> Mies</label></li>\n<li><label for="id_somechoice_2"><input type="radio" id="id_somechoice_2" value="\xdf" name="somechoice" /> Nainen</label></li>\n</ul></p>')
# Deep copying translated text shouldn't raise an error)
from django.utils.translation import gettext_lazy
class CopyForm(Form):
degree = IntegerField(widget=Select(choices=((1, gettext_lazy('test')),)))
f = CopyForm()
def test_misc(self):
# There once was a problem with Form fields called "data". Let's make sure that
# doesn't come back.
class DataForm(Form):
data = CharField(max_length=10)
f = DataForm({'data': 'xyzzy'})
self.assertTrue(f.is_valid())
self.assertEqual(f.cleaned_data, {'data': 'xyzzy'})
# A form with *only* hidden fields that has errors is going to be very unusual.
class HiddenForm(Form):
data = IntegerField(widget=HiddenInput)
f = HiddenForm({})
self.assertHTMLEqual(f.as_p(), '<ul class="errorlist"><li>(Hidden field data) This field is required.</li></ul>\n<p> <input type="hidden" name="data" id="id_data" /></p>')
self.assertHTMLEqual(f.as_table(), '<tr><td colspan="2"><ul class="errorlist"><li>(Hidden field data) This field is required.</li></ul><input type="hidden" name="data" id="id_data" /></td></tr>')
def test_xss_error_messages(self):
###################################################
# Tests for XSS vulnerabilities in error messages #
###################################################
# The forms layer doesn't escape input values directly because error messages
# might be presented in non-HTML contexts. Instead, the message is just marked
# for escaping by the template engine. So we'll need to construct a little
# silly template to trigger the escaping.
from django.template import Template, Context
t = Template('{{ form.errors }}')
class SomeForm(Form):
field = ChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': '<script>'})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
class SomeForm(Form):
field = MultipleChoiceField(choices=[('one', 'One')])
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>Select a valid choice. <script> is not one of the available choices.</li></ul></li></ul>')
from forms_tests.models import ChoiceModel
class SomeForm(Form):
field = ModelMultipleChoiceField(ChoiceModel.objects.all())
f = SomeForm({'field': ['<script>']})
self.assertHTMLEqual(t.render(Context({'form': f})), '<ul class="errorlist"><li>field<ul class="errorlist"><li>"<script>" is not a valid value for a primary key.</li></ul></li></ul>')
def test_regression_14234(self):
"""
Re-cleaning an instance that was added via a ModelForm should not raise
a pk uniqueness error.
"""
class CheeseForm(ModelForm):
class Meta:
model = Cheese
fields = '__all__'
form = CheeseForm({
'name': 'Brie',
})
self.assertTrue(form.is_valid())
obj = form.save()
obj.name = 'Camembert'
obj.full_clean()
| apache-2.0 |
ntiufalara/openerp7 | openerp/addons/point_of_sale/report/pos_details.py | 30 | 9180 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.report import report_sxw
class pos_details(report_sxw.rml_parse):
def _get_invoice(self, inv_id):
res={}
if inv_id:
self.cr.execute("select number from account_invoice as ac where id = %s", (inv_id,))
res = self.cr.fetchone()
return res[0] or 'Draft'
else:
return ''
def _get_all_users(self):
user_obj = self.pool.get('res.users')
return user_obj.search(self.cr, self.uid, [])
def _pos_sales_details(self, form):
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
data = []
result = {}
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('state','in',['done','paid','invoiced']),('company_id','=',company_id)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids):
for pol in pos.lines:
result = {
'code': pol.product_id.default_code,
'name': pol.product_id.name,
'invoice_id': pos.invoice_id.id,
'price_unit': pol.price_unit,
'qty': pol.qty,
'discount': pol.discount,
'total': (pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0)),
'date_order': pos.date_order,
'pos_name': pos.name,
'uom': pol.product_id.uom_id.name
}
data.append(result)
self.total += result['total']
self.qty += result['qty']
self.discount += result['discount']
if data:
return data
else:
return {}
def _get_qty_total_2(self):
return self.qty
def _get_sales_total_2(self):
return self.total
def _get_sum_invoice_2(self, form):
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('company_id','=',company_id),('invoice_id','<>',False)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids):
for pol in pos.lines:
self.total_invoiced += (pol.price_unit * pol.qty * (1 - (pol.discount) / 100.0))
return self.total_invoiced or False
def _paid_total_2(self):
return self.total or 0.0
def _get_sum_dis_2(self):
return self.discount or 0.0
def _get_sum_discount(self, form):
#code for the sum of discount value
pos_obj = self.pool.get('pos.order')
user_obj = self.pool.get('res.users')
user_ids = form['user_ids'] or self._get_all_users()
company_id = user_obj.browse(self.cr, self.uid, self.uid).company_id.id
pos_ids = pos_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('user_id','in',user_ids),('company_id','=',company_id)])
for pos in pos_obj.browse(self.cr, self.uid, pos_ids):
for pol in pos.lines:
self.total_discount += ((pol.price_unit * pol.qty) * (pol.discount / 100))
return self.total_discount or False
def _get_payments(self, form):
statement_line_obj = self.pool.get("account.bank.statement.line")
pos_order_obj = self.pool.get("pos.order")
user_ids = form['user_ids'] or self._get_all_users()
pos_ids = pos_order_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('state','in',['paid','invoiced','done']),('user_id','in',user_ids)])
data={}
if pos_ids:
st_line_ids = statement_line_obj.search(self.cr, self.uid, [('pos_statement_id', 'in', pos_ids)])
if st_line_ids:
st_id = statement_line_obj.browse(self.cr, self.uid, st_line_ids)
a_l=[]
for r in st_id:
a_l.append(r['id'])
self.cr.execute("select aj.name,sum(amount) from account_bank_statement_line as absl,account_bank_statement as abs,account_journal as aj " \
"where absl.statement_id = abs.id and abs.journal_id = aj.id and absl.id IN %s " \
"group by aj.name ",(tuple(a_l),))
data = self.cr.dictfetchall()
return data
else:
return {}
def _total_of_the_day(self, objects):
if self.total:
if self.total == self.total_invoiced:
return self.total
else:
return ((self.total or 0.00) - (self.total_invoiced or 0.00))
else:
return False
def _sum_invoice(self, objects):
return reduce(lambda acc, obj:
acc + obj.invoice_id.amount_total,
[o for o in objects if o.invoice_id and o.invoice_id.number],
0.0)
def _ellipsis(self, orig_str, maxlen=100, ellipsis='...'):
maxlen = maxlen - len(ellipsis)
if maxlen <= 0:
maxlen = 1
new_str = orig_str[:maxlen]
return new_str
def _strip_name(self, name, maxlen=50):
return self._ellipsis(name, maxlen, ' ...')
def _get_tax_amount(self, form):
taxes = {}
account_tax_obj = self.pool.get('account.tax')
user_ids = form['user_ids'] or self._get_all_users()
pos_order_obj = self.pool.get('pos.order')
pos_ids = pos_order_obj.search(self.cr, self.uid, [('date_order','>=',form['date_start'] + ' 00:00:00'),('date_order','<=',form['date_end'] + ' 23:59:59'),('state','in',['paid','invoiced','done']),('user_id','in',user_ids)])
for order in pos_order_obj.browse(self.cr, self.uid, pos_ids):
for line in order.lines:
line_taxes = account_tax_obj.compute_all(self.cr, self.uid, line.product_id.taxes_id, line.price_unit, line.qty, product=line.product_id, partner=line.order_id.partner_id or False)
for tax in line_taxes['taxes']:
taxes.setdefault(tax['id'], {'name': tax['name'], 'amount':0.0})
taxes[tax['id']]['amount'] += tax['amount']
return taxes.values()
def _get_user_names(self, user_ids):
user_obj = self.pool.get('res.users')
return ', '.join(map(lambda x: x.name, user_obj.browse(self.cr, self.uid, user_ids)))
def __init__(self, cr, uid, name, context):
super(pos_details, self).__init__(cr, uid, name, context=context)
self.total = 0.0
self.qty = 0.0
self.total_invoiced = 0.0
self.discount = 0.0
self.total_discount = 0.0
self.localcontext.update({
'time': time,
'strip_name': self._strip_name,
'getpayments': self._get_payments,
'getsumdisc': self._get_sum_discount,
'gettotalofthaday': self._total_of_the_day,
'gettaxamount': self._get_tax_amount,
'pos_sales_details':self._pos_sales_details,
'getqtytotal2': self._get_qty_total_2,
'getsalestotal2': self._get_sales_total_2,
'getsuminvoice2':self._get_sum_invoice_2,
'getpaidtotal2': self._paid_total_2,
'getinvoice':self._get_invoice,
'get_user_names': self._get_user_names,
})
report_sxw.report_sxw('report.pos.details', 'pos.order', 'addons/point_of_sale_singer/report/pos_details.rml', parser=pos_details, header='internal')
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| mit |
tdf/tdc | counter/admin.py | 1 | 1274 | from django.contrib import admin
# Register your models here.
from counter.models import Filename, Product, Country, OS, Arch, Version, Language, LogEntry, Query
class ProductAdmin(admin.ModelAdmin):
pass
class CountryAdmin(admin.ModelAdmin):
pass
class OSAdmin(admin.ModelAdmin):
pass
class ArchAdmin(admin.ModelAdmin):
pass
class VersionAdmin(admin.ModelAdmin):
pass
class LanguageAdmin(admin.ModelAdmin):
pass
class FilenameAdmin(admin.ModelAdmin):
list_display = ('name', 'product', 'os', 'arch', 'version', 'language')
list_filter = ('product', 'os', 'arch', 'version', 'language')
class LogEntryAdmin(admin.ModelAdmin):
list_display = ('date', 'filename', 'country', 'count')
date_hierarchy = "date"
readonly_fields = list_display
class QueryAdmin(admin.ModelAdmin):
list_display = ('name', 'start_date', 'end_date', 'count')
admin.site.register(Filename, FilenameAdmin)
admin.site.register(Product, ProductAdmin)
admin.site.register(Country, CountryAdmin)
admin.site.register(OS, OSAdmin)
admin.site.register(Arch, ArchAdmin)
admin.site.register(Version, VersionAdmin)
admin.site.register(Language, LanguageAdmin)
admin.site.register(LogEntry, LogEntryAdmin)
admin.site.register(Query, QueryAdmin)
| gpl-3.0 |
elingg/tensorflow | tensorflow/contrib/tensor_forest/hybrid/python/ops/training_ops.py | 42 | 11156 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops for hybrid model training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import tf_logging as logging
TRAINING_OPS_FILE = '_training_ops.so'
_training_ops = None
_ops_lock = threading.Lock()
# TODO(b/31222613): Some of these ops are probably differentiable, and
# there may be latent bugs here.
ops.NotDifferentiable('HardRoutingFunction')
ops.NotDifferentiable('RoutingGradient')
ops.NotDifferentiable('KFeatureDataGradient')
ops.NotDifferentiable('KFeatureRoutingGradient')
ops.NotDifferentiable('KFeatureWeightGradient')
ops.NotDifferentiable('UnpackPath')
@ops.RegisterGradient('RoutingFunction')
def _RoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
routing_gradient = _training_ops.routing_gradient
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
routing_function_tensor = op.outputs[0]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(grad, 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(
routing_gradient(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
routing_function_tensor,
max_nodes=op.get_attr('max_nodes')),
2)
# df / dx is the derivative of the decision function with respect to the input
# data. f_i(x) = (-t_i * x + b_i), so df_i / dx = -t_i.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = -array_ops.expand_dims(tree_weights_tensor, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f_i(x) = (-t_i * x + b_i), so df_i / d t_i = -x.
#
# df / dt has dimension (batch_size, num_features), which we expand to
# (batch_size, 1, num_features).
df_dt = -array_ops.expand_dims(input_data_tensor, 1)
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f_i(x) = (-t_i * x + b_i), so df_i / d t_i = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(
array_ops.expand_dims(array_ops.ones_like(tree_thresholds_tensor), 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
@ops.RegisterGradient('StochasticHardRoutingFunction')
def _StochasticHardRoutingFunctionGradient(op, routing_grad, unused_path_grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
routing_grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
gradient_op = _training_ops.stochastic_hard_routing_gradient
unpack_path_op = _training_ops.unpack_path
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
path_probability_tensor = op.outputs[0]
path_tensor = op.outputs[1]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
du_df_raw, df_dx_raw, df_dt_raw, df_db_raw = gradient_op(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
path_probability_tensor,
path_tensor,
tree_depth=op.get_attr('tree_depth'))
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(unpack_path_op(path_tensor, routing_grad), 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# single_feature_routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(du_df_raw, 2)
# df / dx is the derivative of the decision function with respect to the input
# data. f(x) = (-t * x + b), so df / dx = -t for the selected features and
# zero elsewhere.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = array_ops.expand_dims(df_dx_raw, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f(x) = (-t * x + b), so df / dt = -x[feature].
#
# df / dt has dimension (batch_size, num_nodes, num_features).
df_dt = -df_dt_raw
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f(x) = (-t * x + b), so df / dt = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(array_ops.expand_dims(df_db_raw, 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
@ops.RegisterGradient('KFeatureRoutingFunction')
def _KFeatureRoutingFunctionGradient(op, grad):
"""The gradient of RoutingFunction.
Args:
op: The RoutingFunction op.
grad: Gradient with respect to the output of the RoutingFunction op.
Returns:
Gradients with respect to the input of the RoutingFunction op.
"""
gradient_op = _training_ops.k_feature_gradient
input_data_tensor = op.inputs[0]
tree_weights_tensor = op.inputs[1]
tree_thresholds_tensor = op.inputs[2]
routing_function_tensor = op.outputs[0]
# The derivatives below are each defined over one or two of three dimensions:
# (batch_size, num_nodes, num_features). We explicitly expand each derivative
# to three dimensions to ensure that they're broadcasted correctly.
du_df_raw, df_dx_raw, df_dt_raw = gradient_op(
input_data_tensor,
tree_weights_tensor,
tree_thresholds_tensor,
routing_function_tensor,
layer_num=op.get_attr('layer_num'),
random_seed=op.get_attr('random_seed'))
# dl / du is the derivative of the loss with respect to the output of the
# routing function, which is provided by tensorflow.
#
# dl / du has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
dl_du = array_ops.expand_dims(grad, 2)
# du / df is the derivative of the output of the routing function with respect
# to the decision function at each node. It is computed by
# single_feature_routing_gradient_op.cc.
#
# du / df has dimension (batch_size, num_nodes), which we expand to
# (batch_size, num_nodes, 1).
du_df = array_ops.expand_dims(du_df_raw, 2)
# df / dx is the derivative of the decision function with respect to the input
# data. f(x) = (-t * x + b), so df / dx = -t for the selected features and
# zero elsewhere.
#
# df / dx has dimension (num_nodes, num_features), which we expand to
# (1, num_nodes, num_features).
df_dx = array_ops.expand_dims(df_dx_raw, 0)
# df / dt is the derivative of the decision function with respect to its
# parameters. f(x) = (-t * x + b), so df / dt = -x[feature].
#
# df / dt has dimension (batch_size, num_nodes, num_features).
df_dt = -df_dt_raw
# df / dt is the derivative of the decision function with respect to its
# bias parameter. f(x) = (-t * x + b), so df / dt = 1.
#
# df / db has dimension (num_nodes), which we expand to
# (1, num_nodes, 1).
df_db = array_ops.expand_dims(
array_ops.expand_dims(array_ops.ones_like(tree_thresholds_tensor), 0), 2)
# Compute the derivatives of the loss with respect to the inputs using the
# chain rule (backpropagation).
dl_dx = math_ops.reduce_mean(dl_du * du_df * df_dx, 1)
dl_dt = math_ops.reduce_mean(dl_du * du_df * df_dt, 0)
dl_db = math_ops.reduce_mean(array_ops.squeeze(dl_du * du_df * df_db, [2]), 0)
input_gradients = [dl_dx, dl_dt, dl_db]
return input_gradients
# Workaround for the fact that importing tensorflow imports contrib
# (even if a user isn't using this or any other contrib op), but
# there's not yet any guarantee that the shared object exists.
# In which case, "import tensorflow" will always crash, even for users that
# never use contrib.
def Load():
"""Load training ops library and return the loaded module."""
with _ops_lock:
global _training_ops
if not _training_ops:
ops_path = resource_loader.get_path_to_datafile(TRAINING_OPS_FILE)
logging.info('data path: %s', ops_path)
_training_ops = load_library.load_op_library(ops_path)
assert _training_ops, 'Could not load _training_ops.so'
return _training_ops
| apache-2.0 |
sid-kap/pants | tests/python/pants_test/jvm/jvm_tool_task_test_base.py | 6 | 4986 | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import shutil
from pants.backend.jvm.subsystems.jvm_tool_mixin import JvmToolMixin
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.tasks.bootstrap_jvm_tools import BootstrapJvmTools
from pants.base.build_environment import get_pants_cachedir
from pants.base.build_file_aliases import BuildFileAliases
from pants.ivy.bootstrapper import Bootstrapper
from pants_test.tasks.task_test_base import TaskTestBase
class JvmToolTaskTestBase(TaskTestBase):
"""Prepares an ephemeral test build root that supports tasks that use jvm tool bootstrapping."""
@property
def alias_groups(self):
# Aliases appearing in our real BUILD.tools.
return BuildFileAliases.create(
targets={
'jar_library': JarLibrary,
},
objects={
'jar': JarDependency,
},
)
def setUp(self):
super(JvmToolTaskTestBase, self).setUp()
# Use a synthetic subclass for proper isolation when bootstrapping within the test.
bootstrap_scope = 'bootstrap_scope'
self.bootstrap_task_type = self.synthesize_task_subtype(BootstrapJvmTools, bootstrap_scope)
JvmToolMixin.reset_registered_tools()
# Set some options:
# 1. Cap BootstrapJvmTools memory usage in tests. The Xmx was empirically arrived upon using
# -Xloggc and verifying no full gcs for a test using the full gamut of resolving a multi-jar
# tool, constructing a fat jar and then shading that fat jar.
#
# 2. Allow tests to read/write tool jars from the real artifact cache, so they don't
# each have to resolve and shade them every single time, which is a huge slowdown.
# Note that local artifact cache writes are atomic, so it's fine for multiple concurrent
# tests to write to it.
#
# Note that we don't have access to the invoking pants instance's options, so we assume that
# its artifact cache is in the standard location. If it isn't, worst case the tests will
# populate a second cache at the standard location, which is no big deal.
# TODO: We really need a straightforward way for pants's own tests to get to the enclosing
# pants instance's options values.
artifact_caches = [os.path.join(get_pants_cachedir(), 'artifact_cache')]
self.set_options_for_scope(bootstrap_scope, jvm_options=['-Xmx128m'])
self.set_options_for_scope('cache.{}'.format(bootstrap_scope),
read_from=artifact_caches,
write_to=artifact_caches)
# Tool option defaults currently point to targets in the real BUILD.tools, so we copy it
# into our test workspace.
shutil.copy(os.path.join(self.real_build_root, 'BUILD.tools'), self.build_root)
Bootstrapper.reset_instance()
def context(self, for_task_types=None, options=None, target_roots=None,
console_outstream=None, workspace=None):
# Add in the bootstrapper task type, so its options get registered and set.
for_task_types = [self.bootstrap_task_type] + (for_task_types or [])
return super(JvmToolTaskTestBase, self).context(for_task_types=for_task_types,
options=options,
target_roots=target_roots,
console_outstream=console_outstream,
workspace=workspace)
def prepare_execute(self, context):
"""Prepares a jvm tool-using task for execution, first bootstrapping any required jvm tools.
Note: Other task pre-requisites will not be ensured and tests must instead setup their own
product requirements if any.
:returns: The prepared Task instance.
"""
task = self.create_task(context)
task.invalidate()
# Bootstrap the tools needed by the task under test.
# We need the bootstrap task's workdir to be under the test's .pants.d, so that it can
# use artifact caching. Making it a sibling of the main task's workdir achieves this.
bootstrap_workdir = os.path.join(os.path.dirname(task.workdir), 'bootstrap_jvm_tools')
self.bootstrap_task_type(context, bootstrap_workdir).execute()
return task
def execute(self, context):
"""Executes a jvm tool-using task, first bootstrapping any required jvm tools.
Note: Other task pre-requisites will not be ensured and tests must instead setup their own
product requirements if any.
:returns: The Task instance that was executed.
"""
task = self.prepare_execute(context)
task.execute()
return task
| apache-2.0 |
halfakop/Teacup_Firmware | configtool/calcbelt.py | 2 | 8202 |
import wx
from configtool.data import (BSIZESMALL, reFloat, reInteger, offsetChLabel,
offsetTcLabel)
class CalcBelt(wx.Dialog):
def __init__(self, parent, font, cbUse):
wx.Dialog.__init__(self, parent, wx.ID_ANY,
"Steps calculator for belt driven axes",
size = (360, 300))
self.SetFont(font)
self.Bind(wx.EVT_CLOSE, self.onExit)
self.use = cbUse
labelWidth = 130
hsz = wx.BoxSizer(wx.HORIZONTAL)
hsz.AddSpacer((10, 10))
sz = wx.BoxSizer(wx.VERTICAL)
sz.AddSpacer((10, 10))
lsz = wx.BoxSizer(wx.HORIZONTAL)
st = wx.StaticText(self, wx.ID_ANY, "Step Angle:", size = (labelWidth, -1),
style = wx.ALIGN_RIGHT)
st.SetFont(font)
lsz.Add(st, 1, wx.TOP, offsetChLabel)
lsz.AddSpacer((5, 5))
stepAngles = ["1.8 (200 per revolution)", "0.9 (400 per revolution)",
"7.5 (48 per revolution)"]
self.stepAngleValues = [200, 400, 48]
tc = wx.Choice(self, wx.ID_ANY, choices = stepAngles)
tc.SetFont(font)
tc.SetSelection(0)
tc.Bind(wx.EVT_CHOICE, self.onChoice)
lsz.Add(tc)
tc.SetToolTipString("Step angle. Depends on your type of stepper motor.")
self.tcStep = tc
sz.Add(lsz)
sz.AddSpacer((10, 10))
lsz = wx.BoxSizer(wx.HORIZONTAL)
st = wx.StaticText(self, wx.ID_ANY, "Microstepping:",
size = (labelWidth, -1), style = wx.ALIGN_RIGHT)
st.SetFont(font)
lsz.Add(st, 1, wx.TOP, offsetChLabel)
lsz.AddSpacer((5, 5))
microStepping = ["1 - full step", "1/2 - half step", "1/4 - quarter step",
"1/8", "1/16", "1/32", "1/64", "1/128"]
self.microSteppingValues = [1, 2, 4, 8, 16, 32, 64, 128]
tc = wx.Choice(self, wx.ID_ANY, choices = microStepping)
tc.SetFont(font)
tc.Bind(wx.EVT_CHOICE, self.onChoice)
tc.SetSelection(4)
lsz.Add(tc)
tc.SetToolTipString("Microstepping. Most boards allow to change this by "
"setting jumpers. The value here must match the "
"setting on the board in conjunction with the type "
"of stepper driver chip.")
self.tcMicroStepping = tc
sz.Add(lsz)
sz.AddSpacer((10, 10))
lsz = wx.BoxSizer(wx.HORIZONTAL)
st = wx.StaticText(self, wx.ID_ANY, "Belt Pitch (in mm):",
size = (labelWidth, -1), style = wx.ALIGN_RIGHT)
st.SetFont(font)
lsz.Add(st, 1, wx.TOP, offsetTcLabel)
lsz.AddSpacer((5, 5))
tc = wx.TextCtrl(self, wx.ID_ANY, "2", style = wx.TE_RIGHT)
tc.SetFont(font)
tc.Bind(wx.EVT_TEXT, self.onTextCtrlFloat)
lsz.Add(tc)
tc.SetToolTipString("Belt pitch. Distance between two teeth on the belt.")
self.tcBeltPitch = tc
lsz.AddSpacer((5, 5))
beltPresets = ["-", "2mm Pitch (GT2)", "MXL Pitch (2.03mm)",
"T2.5 (2.5mm)", "3mm Pitch (GT2, HTD)",
"5mm Pitch (T5, GTD, HTD)", "0.2\" XL belt (5.08mm)"]
self.beltPresetValues = [-1, 2.0, 2.03, 2.5, 3.0, 5.0, 5.08]
tc = wx.Choice(self, wx.ID_ANY, choices = beltPresets)
tc.SetFont(font)
tc.SetSelection(0)
tc.Bind(wx.EVT_CHOICE, self.onPresetChoice)
lsz.Add(tc)
tc.SetToolTipString("Belt pitch presets.")
self.tcPresets = tc
sz.Add(lsz)
sz.AddSpacer((10, 10))
lsz = wx.BoxSizer(wx.HORIZONTAL)
st = wx.StaticText(self, wx.ID_ANY, "Pulley Teeth Count:",
size = (labelWidth, -1), style = wx.ALIGN_RIGHT)
st.SetFont(font)
lsz.Add(st, 1, wx.TOP, offsetTcLabel)
lsz.AddSpacer((5, 5))
tc = wx.TextCtrl(self, wx.ID_ANY, "8", style = wx.TE_RIGHT)
tc.SetFont(font)
tc.Bind(wx.EVT_TEXT, self.onTextCtrlInteger)
lsz.Add(tc)
tc.SetToolTipString("Pulley teeth count. Count them!")
self.tcPulleyTeeth = tc
sz.Add(lsz)
sz.AddSpacer((30, 30))
lsz = wx.BoxSizer(wx.HORIZONTAL)
st = wx.StaticText(self, wx.ID_ANY, "Result:", size = (labelWidth, -1),
style = wx.ALIGN_RIGHT)
st.SetFont(font)
lsz.Add(st)
lsz.AddSpacer((5, 5))
tc = wx.StaticText(self, wx.ID_ANY, "", size = (260, -1),
style = wx.ALIGN_LEFT)
tc.SetFont(font)
lsz.Add(tc)
self.tcResult = tc
sz.Add(lsz)
lsz = wx.BoxSizer(wx.HORIZONTAL)
st = wx.StaticText(self, wx.ID_ANY, "Resolution:", size = (labelWidth, -1),
style = wx.ALIGN_RIGHT)
st.SetFont(font)
lsz.Add(st)
lsz.AddSpacer((5, 5))
tc = wx.StaticText(self, wx.ID_ANY, "", size = (260, -1),
style = wx.ALIGN_LEFT)
tc.SetFont(font)
lsz.Add(tc)
self.tcResolution = tc
sz.Add(lsz)
sz.AddSpacer((20, 20))
bsz = wx.BoxSizer(wx.HORIZONTAL)
b = wx.Button(self, wx.ID_ANY, "Use for X", size = BSIZESMALL)
b.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.onUseForX, b)
bsz.Add(b)
self.bUseForX = b
bsz.AddSpacer((5, 5))
b = wx.Button(self, wx.ID_ANY, "Use for Y", size = BSIZESMALL)
b.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.onUseForY, b)
bsz.Add(b)
self.bUseForY = b
bsz.AddSpacer((5, 5))
b = wx.Button(self, wx.ID_ANY, "Use for Z", size = BSIZESMALL)
b.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.onUseForZ, b)
bsz.Add(b)
self.bUseForZ = b
bsz.AddSpacer((5, 5))
b = wx.Button(self, wx.ID_ANY, "Use for E", size = BSIZESMALL)
b.SetFont(font)
self.Bind(wx.EVT_BUTTON, self.onUseForE, b)
bsz.Add(b)
self.bUseForE = b
sz.Add(bsz)
sz.AddSpacer((10, 10))
hsz.Add(sz)
hsz.AddSpacer((10, 10))
self.enableUseButtons(False)
self.SetSizer(hsz)
self.Fit()
self.calculate()
def calculate(self):
self.tcResult.SetLabel("")
self.tcResolution.SetLabel("")
self.enableUseButtons(False)
s = self.tcStep.GetSelection()
sv = self.stepAngleValues[s]
try:
bp = float(self.tcBeltPitch.GetValue())
except:
return
try:
pt = int(self.tcPulleyTeeth.GetValue())
except:
return
s = self.tcMicroStepping.GetSelection()
msv = self.microSteppingValues[s]
length = pt * bp
steps = sv * msv
resultmm = steps / length
self.result = int(resultmm * 1000.0)
self.tcResult.SetLabel("%d steps/m (%.3f steps/mm)" %
(self.result, resultmm))
self.tcResolution.SetLabel("%.3f micrometers" % (length / steps * 1000.0))
self.enableUseButtons(True)
def enableUseButtons(self, flag):
self.bUseForX.Enable(flag)
self.bUseForY.Enable(flag)
self.bUseForZ.Enable(flag)
self.bUseForE.Enable(flag)
def onUseForX(self, evt):
self.use('STEPS_PER_M_X', self.result)
def onUseForY(self, evt):
self.use('STEPS_PER_M_Y', self.result)
def onUseForZ(self, evt):
self.use('STEPS_PER_M_Z', self.result)
def onUseForE(self, evt):
self.use('STEPS_PER_M_E', self.result)
def onPresetChoice(self, evt):
s = self.tcPresets.GetSelection()
sv = self.beltPresetValues[s]
if sv < 0:
return
s = "%f" % sv
s = s.rstrip("0")
if s[-1] == ".":
s += "0"
self.tcBeltPitch.SetValue(s)
def onChoice(self, evt):
self.calculate()
def onTextCtrlInteger(self, evt):
tc = evt.GetEventObject()
w = tc.GetValue().strip()
if w == "":
valid = False
else:
m = reInteger.match(w)
if m:
valid = True
else:
valid = False
if valid:
tc.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
else:
tc.SetBackgroundColour("pink")
tc.Refresh()
self.calculate()
evt.Skip()
def onTextCtrlFloat(self, evt):
tc = evt.GetEventObject()
w = tc.GetValue().strip()
if w == "":
valid = False
else:
m = reFloat.match(w)
if m:
valid = True
else:
valid = False
if valid:
tc.SetBackgroundColour(wx.SystemSettings_GetColour(wx.SYS_COLOUR_WINDOW))
else:
tc.SetBackgroundColour("pink")
tc.Refresh()
self.calculate()
evt.Skip()
def onExit(self, evt):
self.EndModal(wx.ID_OK)
| gpl-2.0 |
greyhwndz/rethinkdb | test/rql_test/connections/http_support/httpbin/helpers.py | 49 | 9578 | # -*- coding: utf-8 -*-
"""
httpbin.helpers
~~~~~~~~~~~~~~~
This module provides helper functions for httpbin.
"""
import json
import base64
from hashlib import md5
from werkzeug.http import parse_authorization_header
from flask import request, make_response
try:
from urlparse import urlparse, urlunparse
except ImportError:
from urllib.parse import urlparse, urlunparse
from .structures import CaseInsensitiveDict
ASCII_ART = """
-=[ teapot ]=-
_...._
.' _ _ `.
| ."` ^ `". _,
\_;`"---"`|//
| ;/
\_ _/
`\"\"\"`
"""
REDIRECT_LOCATION = '/redirect/1'
ENV_HEADERS = (
'X-Varnish',
'X-Request-Start',
'X-Heroku-Queue-Depth',
'X-Real-Ip',
'X-Forwarded-Proto',
'X-Heroku-Queue-Wait-Time',
'X-Forwarded-For',
'X-Heroku-Dynos-In-Use',
'X-Forwarded-For',
'X-Forwarded-Protocol',
'X-Forwarded-Port'
)
ROBOT_TXT = """User-agent: *
Disallow: /deny
"""
ANGRY_ASCII ="""
.-''''''-.
.' _ _ '.
/ O O \\
: :
| |
: __ :
\ .-"` `"-. /
'. .'
'-......-'
YOU SHOUDN'T BE HERE
"""
def json_safe(string, content_type='application/octet-stream'):
"""Returns JSON-safe version of `string`.
If `string` is a Unicode string or a valid UTF-8, it is returned unmodified,
as it can safely be encoded to JSON string.
If `string` contains raw/binary data, it is Base64-encoded, formatted and
returned according to "data" URL scheme (RFC2397). Since JSON is not
suitable for binary data, some additional encoding was necessary; "data"
URL scheme was chosen for its simplicity.
"""
try:
string = string.decode('utf-8')
_encoded = json.dumps(string)
return string
except (ValueError, TypeError):
return b''.join([
b'data:',
content_type.encode('utf-8'),
b';base64,',
base64.b64encode(string)
]).decode('utf-8')
def get_files():
"""Returns files dict from request context."""
files = dict()
for k, v in request.files.items():
content_type = request.files[k].content_type or 'application/octet-stream'
val = json_safe(v.read(), content_type)
if files.get(k):
if not isinstance(files[k], list):
files[k] = [files[k]]
files[k].append(val)
else:
files[k] = val
return files
def get_headers(hide_env=True):
"""Returns headers dict from request context."""
headers = dict(request.headers.items())
if hide_env and ('show_env' not in request.args):
for key in ENV_HEADERS:
try:
del headers[key]
except KeyError:
pass
return CaseInsensitiveDict(headers.items())
def semiflatten(multi):
"""Convert a MutiDict into a regular dict. If there are more than one value
for a key, the result will have a list of values for the key. Otherwise it
will have the plain value."""
if multi:
result = multi.to_dict(flat=False)
for k, v in result.items():
if len(v) == 1:
result[k] = v[0]
return result
else:
return multi
def get_url(request):
"""
Since we might be hosted behind a proxy, we need to check the
X-Forwarded-Proto header to find out what protocol was used to access us.
"""
if 'X-Forwarded-Proto' not in request.headers:
return request.url
url = list(urlparse(request.url))
url[0] = request.headers.get('X-Forwarded-Proto')
return urlunparse(url)
def get_dict(*keys, **extras):
"""Returns request dict of given keys."""
_keys = ('url', 'args', 'form', 'data', 'origin', 'headers', 'files', 'json')
assert all(map(_keys.__contains__, keys))
data = request.data
form = request.form
form = semiflatten(request.form)
try:
_json = json.loads(data.decode('utf-8'))
except (ValueError, TypeError):
_json = None
d = dict(
url=get_url(request),
args=semiflatten(request.args),
form=form,
data=json_safe(data),
origin=request.headers.get('X-Forwarded-For', request.remote_addr),
headers=get_headers(),
files=get_files(),
json=_json
)
out_d = dict()
for key in keys:
out_d[key] = d.get(key)
out_d.update(extras)
return out_d
def status_code(code):
"""Returns response object of given status code."""
redirect = dict(headers=dict(location=REDIRECT_LOCATION))
code_map = {
301: redirect,
302: redirect,
303: redirect,
304: dict(data=''),
305: redirect,
307: redirect,
401: dict(headers={'WWW-Authenticate': 'Basic realm="Fake Realm"'}),
402: dict(
data='Fuck you, pay me!',
headers={
'x-more-info': 'http://vimeo.com/22053820'
}
),
407: dict(headers={'Proxy-Authenticate': 'Basic realm="Fake Realm"'}),
418: dict( # I'm a teapot!
data=ASCII_ART,
headers={
'x-more-info': 'http://tools.ietf.org/html/rfc2324'
}
),
}
r = make_response()
r.status_code = code
if code in code_map:
m = code_map[code]
if 'data' in m:
r.data = m['data']
if 'headers' in m:
r.headers = m['headers']
return r
def check_basic_auth(user, passwd):
"""Checks user authentication using HTTP Basic Auth."""
auth = request.authorization
return auth and auth.username == user and auth.password == passwd
# Digest auth helpers
# qop is a quality of protection
def H(data):
return md5(data).hexdigest()
def HA1(realm, username, password):
"""Create HA1 hash by realm, username, password
HA1 = md5(A1) = MD5(username:realm:password)
"""
if not realm:
realm = u''
return H(b":".join([username.encode('utf-8'),
realm.encode('utf-8'),
password.encode('utf-8')]))
def HA2(credentails, request):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentails.get("qop") == "auth" or credentails.get('qop') is None:
return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')]))
elif credentails.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
return H("%s:%s:%s" % (request['method'],
request['uri'],
H(request['body'])))
raise ValueError
def response(credentails, user, password, request):
"""Compile digest auth response
If the qop directive's value is "auth" or "auth-int" , then compute the response as follows:
RESPONSE = MD5(HA1:nonce:nonceCount:clienNonce:qop:HA2)
Else if the qop directive is unspecified, then compute the response as follows:
RESPONSE = MD5(HA1:nonce:HA2)
Arguments:
- `credentails`: credentails dict
- `user`: request user name
- `password`: request user password
- `request`: request dict
"""
for key in 'nonce', 'realm':
if key not in credentails:
raise ValueError("%s required for response" % key)
response = None
HA1_value = HA1(
credentails.get('realm'),
user,
password
)
HA2_value = HA2(credentails, request)
if credentails.get('qop') is None:
response = H(b":".join([
HA1_value.encode('utf-8'),
credentails.get('nonce').encode('utf-8'),
HA2_value.encode('utf-8')
]))
elif credentails.get('qop') == 'auth' or credentails.get('qop') == 'auth-int':
for k in 'nonce', 'nc', 'cnonce', 'qop':
if k not in credentails:
raise ValueError("%s required for response H" % k)
response = H(b":".join([HA1_value.encode('utf-8'),
credentails.get('nonce').encode('utf-8'),
credentails.get('nc').encode('utf-8'),
credentails.get('cnonce').encode('utf-8'),
credentails.get('qop').encode('utf-8'),
HA2_value.encode('utf-8')]))
else:
raise ValueError("qop value are wrong")
return response
def check_digest_auth(user, passwd):
"""Check user authentication using HTTP Digest auth"""
if request.headers.get('Authorization'):
credentails = parse_authorization_header(request.headers.get('Authorization'))
if not credentails:
return False
response_hash = response(credentails, user, passwd, dict(uri=request.path,
body=request.data,
method=request.method))
if credentails.get('response') == response_hash:
return True
return False
def secure_cookie():
"""Return true if cookie should have secure attribute"""
return request.environ['wsgi.url_scheme'] == 'https'
| agpl-3.0 |
Azure/azure-sdk-for-python | sdk/security/azure-mgmt-security/azure/mgmt/security/aio/operations/_advanced_threat_protection_operations.py | 1 | 7647 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class AdvancedThreatProtectionOperations:
"""AdvancedThreatProtectionOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.security.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_id: str,
**kwargs
) -> "_models.AdvancedThreatProtectionSetting":
"""Gets the Advanced Threat Protection settings for the specified resource.
:param resource_id: The identifier of the resource.
:type resource_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AdvancedThreatProtectionSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.AdvancedThreatProtectionSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AdvancedThreatProtectionSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01"
setting_name = "current"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
'settingName': self._serialize.url("setting_name", setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AdvancedThreatProtectionSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/advancedThreatProtectionSettings/{settingName}'} # type: ignore
async def create(
self,
resource_id: str,
advanced_threat_protection_setting: "_models.AdvancedThreatProtectionSetting",
**kwargs
) -> "_models.AdvancedThreatProtectionSetting":
"""Creates or updates the Advanced Threat Protection settings on a specified resource.
:param resource_id: The identifier of the resource.
:type resource_id: str
:param advanced_threat_protection_setting: Advanced Threat Protection Settings.
:type advanced_threat_protection_setting: ~azure.mgmt.security.models.AdvancedThreatProtectionSetting
:keyword callable cls: A custom type or function that will be passed the direct response
:return: AdvancedThreatProtectionSetting, or the result of cls(response)
:rtype: ~azure.mgmt.security.models.AdvancedThreatProtectionSetting
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AdvancedThreatProtectionSetting"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-01-01"
setting_name = "current"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'resourceId': self._serialize.url("resource_id", resource_id, 'str', skip_quote=True),
'settingName': self._serialize.url("setting_name", setting_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(advanced_threat_protection_setting, 'AdvancedThreatProtectionSetting')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('AdvancedThreatProtectionSetting', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/{resourceId}/providers/Microsoft.Security/advancedThreatProtectionSettings/{settingName}'} # type: ignore
| mit |
ageron/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/dask_io.py | 42 | 4229 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Methods to allow dask.DataFrame (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.util.deprecation import deprecated
try:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
allowed_classes = (dd.Series, dd.DataFrame)
HAS_DASK = True
except ImportError:
HAS_DASK = False
def _add_to_index(df, start):
"""New dask.dataframe with values added to index of each subdataframe."""
df = df.copy()
df.index += start
return df
def _get_divisions(df):
"""Number of rows in each sub-dataframe."""
lengths = df.map_partitions(len).compute()
divisions = np.cumsum(lengths).tolist()
divisions.insert(0, 0)
return divisions
def _construct_dask_df_with_divisions(df):
"""Construct the new task graph and make a new dask.dataframe around it."""
divisions = _get_divisions(df)
# pylint: disable=protected-access
name = 'csv-index' + df._name
dsk = {(name, i): (_add_to_index, (df._name, i), divisions[i])
for i in range(df.npartitions)}
# pylint: enable=protected-access
from toolz import merge # pylint: disable=g-import-not-at-top
if isinstance(df, dd.DataFrame):
return dd.DataFrame(merge(dsk, df.dask), name, df.columns, divisions)
elif isinstance(df, dd.Series):
return dd.Series(merge(dsk, df.dask), name, df.name, divisions)
@deprecated(None, 'Please feed input to tf.data to support dask.')
def extract_dask_data(data):
"""Extract data from dask.Series or dask.DataFrame for predictors.
Given a distributed dask.DataFrame or dask.Series containing columns or names
for one or more predictors, this operation returns a single dask.DataFrame or
dask.Series that can be iterated over.
Args:
data: A distributed dask.DataFrame or dask.Series.
Returns:
A dask.DataFrame or dask.Series that can be iterated over.
If the supplied argument is neither a dask.DataFrame nor a dask.Series this
operation returns it without modification.
"""
if isinstance(data, allowed_classes):
return _construct_dask_df_with_divisions(data)
else:
return data
@deprecated(None, 'Please feed input to tf.data to support dask.')
def extract_dask_labels(labels):
"""Extract data from dask.Series or dask.DataFrame for labels.
Given a distributed dask.DataFrame or dask.Series containing exactly one
column or name, this operation returns a single dask.DataFrame or dask.Series
that can be iterated over.
Args:
labels: A distributed dask.DataFrame or dask.Series with exactly one
column or name.
Returns:
A dask.DataFrame or dask.Series that can be iterated over.
If the supplied argument is neither a dask.DataFrame nor a dask.Series this
operation returns it without modification.
Raises:
ValueError: If the supplied dask.DataFrame contains more than one
column or the supplied dask.Series contains more than
one name.
"""
if isinstance(labels, dd.DataFrame):
ncol = labels.columns
elif isinstance(labels, dd.Series):
ncol = labels.name
if isinstance(labels, allowed_classes):
if len(ncol) > 1:
raise ValueError('Only one column for labels is allowed.')
return _construct_dask_df_with_divisions(labels)
else:
return labels
| apache-2.0 |
valkjsaaa/sl4a | python-build/python-libs/gdata/tests/gdata_tests/books_test.py | 87 | 2277 | #!/usr/bin/python
__author__ = "James Sams <sams.james@gmail.com>"
import unittest
from gdata import test_data
import gdata.books
import atom
class BookEntryTest(unittest.TestCase):
def testBookEntryFromString(self):
entry = gdata.books.Book.FromString(test_data.BOOK_ENTRY)
self.assert_(isinstance(entry, gdata.books.Book))
self.assertEquals([x.text for x in entry.creator], ['John Rawls'])
self.assertEquals(entry.date.text, '1999')
self.assertEquals(entry.format.text, '538 pages')
self.assertEquals([x.text for x in entry.identifier],
['b7GZr5Btp30C', 'ISBN:0198250541', 'ISBN:9780198250548'])
self.assertEquals([x.text for x in entry.publisher],
['Oxford University Press'])
self.assertEquals(entry.subject, None)
self.assertEquals([x.text for x in entry.dc_title],
['A theory of justice'])
self.assertEquals(entry.viewability.value,
'http://schemas.google.com/books/2008#view_partial')
self.assertEquals(entry.embeddability.value,
'http://schemas.google.com/books/2008#embeddable')
self.assertEquals(entry.review, None)
self.assertEquals([getattr(entry.rating, x) for x in
("min", "max", "average", "value")], ['1', '5', '4.00', None])
self.assertEquals(entry.GetThumbnailLink().href,
'http://bks0.books.google.com/books?id=b7GZr5Btp30C&printsec=frontcover&img=1&zoom=5&sig=ACfU3U121bWZsbjBfVwVRSK2o982jJTd1w&source=gbs_gdata')
self.assertEquals(entry.GetInfoLink().href,
'http://books.google.com/books?id=b7GZr5Btp30C&ie=ISO-8859-1&source=gbs_gdata')
self.assertEquals(entry.GetPreviewLink(), None)
self.assertEquals(entry.GetAnnotationLink().href,
'http://www.google.com/books/feeds/users/me/volumes')
self.assertEquals(entry.get_google_id(), 'b7GZr5Btp30C')
def testBookFeedFromString(self):
feed = gdata.books.BookFeed.FromString(test_data.BOOK_FEED)
self.assert_(isinstance(feed, gdata.books.BookFeed))
self.assertEquals( len(feed.entry), 1)
self.assert_(isinstance(feed.entry[0], gdata.books.Book))
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
amiramix/serna-free | 3rd/pyqt/4.5.4/_patches/darwin/patch-configure.py | 10 | 1464 | *** xx/configure.py.orig 2005-05-11 20:01:53.719957680 +0400
--- xx/configure.py 2005-05-11 20:05:29.699123856 +0400
@@ -721,7 +721,7 @@
from distutils.sysconfig import get_config_vars
ducfg = get_config_vars()
- if sys.platform == "darwin":
+ if False and sys.platform == "darwin":
# We need to work out how to specify the right framework
# version.
link = "-framework Python"
@@ -755,6 +755,8 @@
fout = open("python.pro", "w+")
+ if opts.debug:
+ fout.write("CONFIG += debug\n")
if sipcfg.universal:
fout.write("CONFIG += ppc i386\n")
fout.write("QMAKE_MAC_SDK = %s\n" % sipcfg.universal)
@@ -1422,9 +1424,11 @@
def fix_qmake_args(args=""):
"""Make any platform specific adjustments to the arguments passed to qmake.
"""
- if sys.platform == "darwin":
+ if sys.platform == "darwin" and not os.environ.has_key('QMAKESPEC'):
# The Qt binary installer has macx-xcode as the default.
- args = "-spec macx-g++ " + args
+ if opts.debug:
+ args = "CONFIG+=debug " + args
+ args = "-spec ${QTDIR}/mkspecs/macx-g++ " + args
return args
| gpl-3.0 |
temasek/android_external_chromium_org | tools/resources/find_unused_resources.py | 24 | 6302 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This script searches for unused art assets listed in a .grd file.
It uses git grep to look for references to the IDR resource id or the base
filename. If neither is found, the file is reported unused.
Requires a git checkout. Must be run from your checkout's "src" root.
Example:
cd /work/chrome/src
tools/resources/find_unused_resouces.py ash/resources/ash_resources.grd
"""
__author__ = 'jamescook@chromium.org (James Cook)'
import os
import re
import subprocess
import sys
def GetBaseResourceId(resource_id):
"""Removes common suffixes from a resource ID.
Removes suffixies that may be added by macros like IMAGE_GRID or IMAGE_BORDER.
For example, converts IDR_FOO_LEFT and IDR_FOO_RIGHT to just IDR_FOO.
Args:
resource_id: String resource ID.
Returns:
A string with the base part of the resource ID.
"""
suffixes = [
'_TOP_LEFT', '_TOP', '_TOP_RIGHT',
'_LEFT', '_CENTER', '_RIGHT',
'_BOTTOM_LEFT', '_BOTTOM', '_BOTTOM_RIGHT',
'_TL', '_T', '_TR',
'_L', '_M', '_R',
'_BL', '_B', '_BR']
# Note: This does not check _HOVER, _PRESSED, _HOT, etc. as those are never
# used in macros.
for suffix in suffixes:
if resource_id.endswith(suffix):
resource_id = resource_id[:-len(suffix)]
return resource_id
def FindFilesWithContents(string_a, string_b):
"""Returns list of paths of files that contain |string_a| or |string_b|.
Uses --name-only to print the file paths. The default behavior of git grep
is to OR together multiple patterns.
Args:
string_a: A string to search for (not a regular expression).
string_b: As above.
Returns:
A list of file paths as strings.
"""
matching_files = subprocess.check_output([
'git', 'grep', '--name-only', '--fixed-strings', '-e', string_a,
'-e', string_b])
files_list = matching_files.split('\n')
# The output ends in a newline, so slice that off.
files_list = files_list[:-1]
return files_list
def GetUnusedResources(grd_filepath):
"""Returns a list of resources that are unused in the code.
Prints status lines to the console because this function is quite slow.
Args:
grd_filepath: Path to a .grd file listing resources.
Returns:
A list of pairs of [resource_id, filepath] for the unused resources.
"""
unused_resources = []
grd_file = open(grd_filepath, 'r')
grd_data = grd_file.read()
print 'Checking:'
# Match the resource id and file path out of substrings like:
# ...name="IDR_FOO_123" file="common/foo.png"...
# by matching between the quotation marks.
pattern = re.compile(
r"""name="([^"]*)" # Match resource ID between quotes.
\s* # Run of whitespace, including newlines.
file="([^"]*)" # Match file path between quotes.""",
re.VERBOSE)
# Use finditer over the file contents because there may be newlines between
# the name and file attributes.
for result in pattern.finditer(grd_data):
# Extract the IDR resource id and file path.
resource_id = result.group(1)
filepath = result.group(2)
filename = os.path.basename(filepath)
# Print progress as we go along.
print resource_id
# Ensure the resource isn't used anywhere by checking both for the resource
# id (which should appear in C++ code) and the raw filename (in case the
# file is referenced in a script, test HTML file, etc.).
base_resource_id = GetBaseResourceId(resource_id)
matching_files = FindFilesWithContents(base_resource_id, filename)
# Each file is matched once in the resource file itself. If there are no
# other matching files, it is unused.
if len(matching_files) == 1:
# Give the user some happy news.
print 'Unused!'
unused_resources.append([resource_id, filepath])
return unused_resources
def GetScaleDirectories(resources_path):
"""Returns a list of paths to per-scale-factor resource directories.
Assumes the directory names end in '_percent', for example,
ash/resources/default_200_percent or
chrome/app/theme/resources/touch_140_percent
Args:
resources_path: The base path of interest.
Returns:
A list of paths relative to the 'src' directory.
"""
file_list = os.listdir(resources_path)
scale_directories = []
for file_entry in file_list:
file_path = os.path.join(resources_path, file_entry)
if os.path.isdir(file_path) and file_path.endswith('_percent'):
scale_directories.append(file_path)
scale_directories.sort()
return scale_directories
def main():
# The script requires exactly one parameter, the .grd file path.
if len(sys.argv) != 2:
print 'Usage: tools/resources/find_unused_resources.py <path/to/grd>'
sys.exit(1)
grd_filepath = sys.argv[1]
# Try to ensure we are in a source checkout.
current_dir = os.getcwd()
if os.path.basename(current_dir) != 'src':
print 'Script must be run in your "src" directory.'
sys.exit(1)
# We require a git checkout to use git grep.
if not os.path.exists(current_dir + '/.git'):
print 'You must use a git checkout for this script to run.'
print current_dir + '/.git', 'not found.'
sys.exit(1)
# Look up the scale-factor directories.
resources_path = os.path.dirname(grd_filepath)
scale_directories = GetScaleDirectories(resources_path)
if not scale_directories:
print 'No scale directories (like "default_100_percent") found.'
sys.exit(1)
# |unused_resources| stores pairs of [resource_id, filepath] for resource ids
# that are not referenced in the code.
unused_resources = GetUnusedResources(grd_filepath)
if not unused_resources:
print 'All resources are used.'
sys.exit(0)
# Dump our output for the user.
print
print 'Unused resource ids:'
for resource_id, filepath in unused_resources:
print resource_id
# Print a list of 'git rm' command lines to remove unused assets.
print
print 'Unused files:'
for resource_id, filepath in unused_resources:
for directory in scale_directories:
print 'git rm ' + os.path.join(directory, filepath)
if __name__ == '__main__':
main()
| bsd-3-clause |
coldmind/django | django/core/management/commands/runserver.py | 140 | 7204 | from __future__ import unicode_literals
import errno
import os
import re
import socket
import sys
from datetime import datetime
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.core.servers.basehttp import get_internal_wsgi_application, run
from django.db import DEFAULT_DB_ALIAS, connections
from django.db.migrations.exceptions import MigrationSchemaMissing
from django.db.migrations.executor import MigrationExecutor
from django.utils import autoreload, six
from django.utils.encoding import force_text, get_system_encoding
naiveip_re = re.compile(r"""^(?:
(?P<addr>
(?P<ipv4>\d{1,3}(?:\.\d{1,3}){3}) | # IPv4 address
(?P<ipv6>\[[a-fA-F0-9:]+\]) | # IPv6 address
(?P<fqdn>[a-zA-Z0-9-]+(?:\.[a-zA-Z0-9-]+)*) # FQDN
):)?(?P<port>\d+)$""", re.X)
class Command(BaseCommand):
help = "Starts a lightweight Web server for development."
# Validation is called explicitly each time the server is reloaded.
requires_system_checks = False
leave_locale_alone = True
default_port = '8000'
def add_arguments(self, parser):
parser.add_argument('addrport', nargs='?',
help='Optional port number, or ipaddr:port')
parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False,
help='Tells Django to use an IPv6 address.')
parser.add_argument('--nothreading', action='store_false', dest='use_threading', default=True,
help='Tells Django to NOT use threading.')
parser.add_argument('--noreload', action='store_false', dest='use_reloader', default=True,
help='Tells Django to NOT use the auto-reloader.')
def execute(self, *args, **options):
if options.get('no_color'):
# We rely on the environment because it's currently the only
# way to reach WSGIRequestHandler. This seems an acceptable
# compromise considering `runserver` runs indefinitely.
os.environ[str("DJANGO_COLORS")] = str("nocolor")
super(Command, self).execute(*args, **options)
def get_handler(self, *args, **options):
"""
Returns the default WSGI handler for the runner.
"""
return get_internal_wsgi_application()
def handle(self, *args, **options):
from django.conf import settings
if not settings.DEBUG and not settings.ALLOWED_HOSTS:
raise CommandError('You must set settings.ALLOWED_HOSTS if DEBUG is False.')
self.use_ipv6 = options.get('use_ipv6')
if self.use_ipv6 and not socket.has_ipv6:
raise CommandError('Your Python does not support IPv6.')
self._raw_ipv6 = False
if not options.get('addrport'):
self.addr = ''
self.port = self.default_port
else:
m = re.match(naiveip_re, options['addrport'])
if m is None:
raise CommandError('"%s" is not a valid port number '
'or address:port pair.' % options['addrport'])
self.addr, _ipv4, _ipv6, _fqdn, self.port = m.groups()
if not self.port.isdigit():
raise CommandError("%r is not a valid port number." % self.port)
if self.addr:
if _ipv6:
self.addr = self.addr[1:-1]
self.use_ipv6 = True
self._raw_ipv6 = True
elif self.use_ipv6 and not _fqdn:
raise CommandError('"%s" is not a valid IPv6 address.' % self.addr)
if not self.addr:
self.addr = '::1' if self.use_ipv6 else '127.0.0.1'
self._raw_ipv6 = bool(self.use_ipv6)
self.run(**options)
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
use_reloader = options.get('use_reloader')
if use_reloader:
autoreload.main(self.inner_run, None, options)
else:
self.inner_run(None, **options)
def inner_run(self, *args, **options):
threading = options.get('use_threading')
shutdown_message = options.get('shutdown_message', '')
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
self.check_migrations()
now = datetime.now().strftime('%B %d, %Y - %X')
if six.PY2:
now = now.decode(get_system_encoding())
self.stdout.write(now)
self.stdout.write((
"Django version %(version)s, using settings %(settings)r\n"
"Starting development server at http://%(addr)s:%(port)s/\n"
"Quit the server with %(quit_command)s.\n"
) % {
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"addr": '[%s]' % self.addr if self._raw_ipv6 else self.addr,
"port": self.port,
"quit_command": quit_command,
})
try:
handler = self.get_handler(*args, **options)
run(self.addr, int(self.port), handler,
ipv6=self.use_ipv6, threading=threading)
except socket.error as e:
# Use helpful error messages instead of ugly tracebacks.
ERRORS = {
errno.EACCES: "You don't have permission to access that port.",
errno.EADDRINUSE: "That port is already in use.",
errno.EADDRNOTAVAIL: "That IP address can't be assigned to.",
}
try:
error_text = ERRORS[e.errno]
except KeyError:
error_text = force_text(e)
self.stderr.write("Error: %s" % error_text)
# Need to use an OS exit because sys.exit doesn't work in a thread
os._exit(1)
except KeyboardInterrupt:
if shutdown_message:
self.stdout.write(shutdown_message)
sys.exit(0)
def check_migrations(self):
"""
Checks to see if the set of migrations on disk matches the
migrations in the database. Prints a warning if they don't match.
"""
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
except MigrationSchemaMissing:
self.stdout.write(self.style.NOTICE(
"\nNot checking migrations as it is not possible to access/create the django_migrations table."
))
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
self.stdout.write(self.style.NOTICE(
"\nYou have unapplied migrations; your app may not work properly until they are applied."
))
self.stdout.write(self.style.NOTICE("Run 'python manage.py migrate' to apply them.\n"))
# Kept for backward compatibility
BaseRunserverCommand = Command
| bsd-3-clause |
eduartheinen/raspiblocos | node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 1788 | 1435 | #!/usr/bin/env python
import re
import json
# https://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
# Skip non-scalar values.
if codePoint >= 0xD800 and codePoint <= 0xDFFF:
continue
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| apache-2.0 |
sovietspy2/uzletiProject | python/Lib/poplib.py | 63 | 12805 | """A POP3 client class.
Based on the J. Myers POP3 draft, Jan. 96
"""
# Author: David Ascher <david_ascher@brown.edu>
# [heavily stealing from nntplib.py]
# Updated: Piers Lauder <piers@cs.su.oz.au> [Jul '97]
# String method conversion and test jig improvements by ESR, February 2001.
# Added the POP3_SSL class. Methods loosely based on IMAP_SSL. Hector Urtubia <urtubia@mrbook.org> Aug 2003
# Example (see the test function at the end of this file)
# Imports
import re, socket
__all__ = ["POP3","error_proto"]
# Exception raised when an error or invalid response is received:
class error_proto(Exception): pass
# Standard Port
POP3_PORT = 110
# POP SSL PORT
POP3_SSL_PORT = 995
# Line terminators (we always output CRLF, but accept any of CRLF, LFCR, LF)
CR = '\r'
LF = '\n'
CRLF = CR+LF
class POP3:
"""This class supports both the minimal and optional command sets.
Arguments can be strings or integers (where appropriate)
(e.g.: retr(1) and retr('1') both work equally well.
Minimal Command Set:
USER name user(name)
PASS string pass_(string)
STAT stat()
LIST [msg] list(msg = None)
RETR msg retr(msg)
DELE msg dele(msg)
NOOP noop()
RSET rset()
QUIT quit()
Optional Commands (some servers support these):
RPOP name rpop(name)
APOP name digest apop(name, digest)
TOP msg n top(msg, n)
UIDL [msg] uidl(msg = None)
Raises one exception: 'error_proto'.
Instantiate with:
POP3(hostname, port=110)
NB: the POP protocol locks the mailbox from user
authorization until QUIT, so be sure to get in, suck
the messages, and quit, each time you access the
mailbox.
POP is a line-based protocol, which means large mail
messages consume lots of python cycles reading them
line-by-line.
If it's available on your mail server, use IMAP4
instead, it doesn't suffer from the two problems
above.
"""
def __init__(self, host, port=POP3_PORT,
timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), timeout)
self.file = self.sock.makefile('rb')
self._debugging = 0
self.welcome = self._getresp()
def _putline(self, line):
if self._debugging > 1: print '*put*', repr(line)
self.sock.sendall('%s%s' % (line, CRLF))
# Internal: send one command to the server (through _putline())
def _putcmd(self, line):
if self._debugging: print '*cmd*', repr(line)
self._putline(line)
# Internal: return one line from the server, stripping CRLF.
# This is where all the CPU time of this module is consumed.
# Raise error_proto('-ERR EOF') if the connection is closed.
def _getline(self):
line = self.file.readline()
if self._debugging > 1: print '*get*', repr(line)
if not line: raise error_proto('-ERR EOF')
octets = len(line)
# server can send any combination of CR & LF
# however, 'readline()' returns lines ending in LF
# so only possibilities are ...LF, ...CRLF, CR...LF
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
# Internal: get a response from the server.
# Raise 'error_proto' if the response doesn't start with '+'.
def _getresp(self):
resp, o = self._getline()
if self._debugging > 1: print '*resp*', repr(resp)
c = resp[:1]
if c != '+':
raise error_proto(resp)
return resp
# Internal: get a response plus following text from the server.
def _getlongresp(self):
resp = self._getresp()
list = []; octets = 0
line, o = self._getline()
while line != '.':
if line[:2] == '..':
o = o-1
line = line[1:]
octets = octets + o
list.append(line)
line, o = self._getline()
return resp, list, octets
# Internal: send a command and get the response
def _shortcmd(self, line):
self._putcmd(line)
return self._getresp()
# Internal: send a command and get the response plus following text
def _longcmd(self, line):
self._putcmd(line)
return self._getlongresp()
# These can be useful:
def getwelcome(self):
return self.welcome
def set_debuglevel(self, level):
self._debugging = level
# Here are all the POP commands:
def user(self, user):
"""Send user name, return response
(should indicate password required).
"""
return self._shortcmd('USER %s' % user)
def pass_(self, pswd):
"""Send password, return response
(response includes message count, mailbox size).
NB: mailbox is locked by server from here to 'quit()'
"""
return self._shortcmd('PASS %s' % pswd)
def stat(self):
"""Get mailbox status.
Result is tuple of 2 ints (message count, mailbox size)
"""
retval = self._shortcmd('STAT')
rets = retval.split()
if self._debugging: print '*stat*', repr(rets)
numMessages = int(rets[1])
sizeMessages = int(rets[2])
return (numMessages, sizeMessages)
def list(self, which=None):
"""Request listing, return result.
Result without a message number argument is in form
['response', ['mesg_num octets', ...], octets].
Result when a message number argument is given is a
single response: the "scan listing" for that message.
"""
if which is not None:
return self._shortcmd('LIST %s' % which)
return self._longcmd('LIST')
def retr(self, which):
"""Retrieve whole message number 'which'.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('RETR %s' % which)
def dele(self, which):
"""Delete message number 'which'.
Result is 'response'.
"""
return self._shortcmd('DELE %s' % which)
def noop(self):
"""Does nothing.
One supposes the response indicates the server is alive.
"""
return self._shortcmd('NOOP')
def rset(self):
"""Unmark all messages marked for deletion."""
return self._shortcmd('RSET')
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto, val:
resp = val
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
#__del__ = quit
# optional commands:
def rpop(self, user):
"""Not sure what this does."""
return self._shortcmd('RPOP %s' % user)
timestamp = re.compile(r'\+OK.*(<[^>]+>)')
def apop(self, user, secret):
"""Authorisation
- only possible if server has supplied a timestamp in initial greeting.
Args:
user - mailbox user;
secret - secret shared between client and server.
NB: mailbox is locked by server from here to 'quit()'
"""
m = self.timestamp.match(self.welcome)
if not m:
raise error_proto('-ERR APOP not supported by server')
import hashlib
digest = hashlib.md5(m.group(1)+secret).digest()
digest = ''.join(map(lambda x:'%02x'%ord(x), digest))
return self._shortcmd('APOP %s %s' % (user, digest))
def top(self, which, howmuch):
"""Retrieve message header of message number 'which'
and first 'howmuch' lines of message body.
Result is in form ['response', ['line', ...], octets].
"""
return self._longcmd('TOP %s %s' % (which, howmuch))
def uidl(self, which=None):
"""Return message digest (unique id) list.
If 'which', result contains unique id for that message
in the form 'response mesgnum uid', otherwise result is
the list ['response', ['mesgnum uid', ...], octets]
"""
if which is not None:
return self._shortcmd('UIDL %s' % which)
return self._longcmd('UIDL')
try:
import ssl
except ImportError:
pass
else:
class POP3_SSL(POP3):
"""POP3 client class over SSL connection
Instantiate with: POP3_SSL(hostname, port=995, keyfile=None, certfile=None)
hostname - the hostname of the pop3 over ssl server
port - port number
keyfile - PEM formatted file that countains your private key
certfile - PEM formatted certificate chain file
See the methods of the parent class POP3 for more documentation.
"""
def __init__(self, host, port = POP3_SSL_PORT, keyfile = None, certfile = None):
self.host = host
self.port = port
self.keyfile = keyfile
self.certfile = certfile
self.buffer = ""
msg = "getaddrinfo returns an empty list"
self.sock = None
for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
try:
self.sock = socket.socket(af, socktype, proto)
self.sock.connect(sa)
except socket.error, msg:
if self.sock:
self.sock.close()
self.sock = None
continue
break
if not self.sock:
raise socket.error, msg
self.file = self.sock.makefile('rb')
self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile)
self._debugging = 0
self.welcome = self._getresp()
def _fillBuffer(self):
localbuf = self.sslobj.read()
if len(localbuf) == 0:
raise error_proto('-ERR EOF')
self.buffer += localbuf
def _getline(self):
line = ""
renewline = re.compile(r'.*?\n')
match = renewline.match(self.buffer)
while not match:
self._fillBuffer()
match = renewline.match(self.buffer)
line = match.group(0)
self.buffer = renewline.sub('' ,self.buffer, 1)
if self._debugging > 1: print '*get*', repr(line)
octets = len(line)
if line[-2:] == CRLF:
return line[:-2], octets
if line[0] == CR:
return line[1:-1], octets
return line[:-1], octets
def _putline(self, line):
if self._debugging > 1: print '*put*', repr(line)
line += CRLF
bytes = len(line)
while bytes > 0:
sent = self.sslobj.write(line)
if sent == bytes:
break # avoid copy
line = line[sent:]
bytes = bytes - sent
def quit(self):
"""Signoff: commit changes on server, unlock mailbox, close connection."""
try:
resp = self._shortcmd('QUIT')
except error_proto, val:
resp = val
self.sock.close()
del self.sslobj, self.sock
return resp
__all__.append("POP3_SSL")
if __name__ == "__main__":
import sys
a = POP3(sys.argv[1])
print a.getwelcome()
a.user(sys.argv[2])
a.pass_(sys.argv[3])
a.list()
(numMsgs, totalSize) = a.stat()
for i in range(1, numMsgs + 1):
(header, msg, octets) = a.retr(i)
print "Message %d:" % i
for line in msg:
print ' ' + line
print '-----------------------'
a.quit()
| gpl-3.0 |
0sc0d3r/enigma2 | lib/python/Tools/Trashcan.py | 34 | 6801 | import Components.Task
from Components.config import config
from Components import Harddisk
from Components.GUIComponent import GUIComponent
from Components.VariableText import VariableText
import time
import os
import enigma
from enigma import pNavigation
def getTrashFolder(path=None):
# Returns trash folder without symlinks
try:
if path is None or os.path.realpath(path) == '/media/autofs':
print 'path is none'
return ""
else:
if '/movie' in path:
mountpoint = Harddisk.findMountPoint(os.path.realpath(path))
trashcan = os.path.join(mountpoint, 'movie')
else:
trashcan = Harddisk.findMountPoint(os.path.realpath(path))
return os.path.realpath(os.path.join(trashcan, ".Trash"))
except:
return None
def createTrashFolder(path=None):
trash = getTrashFolder(path)
if trash and os.access(os.path.split(trash)[0], os.W_OK):
if not os.path.isdir(trash):
try:
os.mkdir(trash)
except:
return None
return trash
else:
return None
def get_size(start_path = '.'):
total_size = 0
if start_path:
for dirpath, dirnames, filenames in os.walk(start_path):
for f in filenames:
try:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
except:
pass
return total_size
class Trashcan:
def __init__(self, session):
self.session = session
session.nav.record_event.append(self.gotRecordEvent)
self.gotRecordEvent(None, None)
def gotRecordEvent(self, service, event):
self.recordings = len(self.session.nav.getRecordings(False,pNavigation.isRealRecording))
if event == enigma.iRecordableService.evEnd:
self.cleanIfIdle()
def destroy(self):
if self.session is not None:
self.session.nav.record_event.remove(self.gotRecordEvent)
self.session = None
def __del__(self):
self.destroy()
def cleanIfIdle(self):
# RecordTimer calls this when preparing a recording. That is a
# nice moment to clean up.
if self.recordings:
print "[Trashcan] Recording in progress", self.recordings
return
ctimeLimit = time.time() - (config.usage.movielist_trashcan_days.value * 3600 * 24)
reserveBytes = 1024*1024*1024 * int(config.usage.movielist_trashcan_reserve.value)
clean(ctimeLimit, reserveBytes)
def clean(ctimeLimit, reserveBytes):
isCleaning = False
for job in Components.Task.job_manager.getPendingJobs():
jobname = str(job.name)
if jobname.startswith(_("Cleaning Trashes")):
isCleaning = True
break
if config.usage.movielist_trashcan.value and not isCleaning:
name = _("Cleaning Trashes")
job = Components.Task.Job(name)
task = CleanTrashTask(job, name)
task.openFiles(ctimeLimit, reserveBytes)
Components.Task.job_manager.AddJob(job)
elif isCleaning:
print "[Trashcan] Cleanup already running"
else:
print "[Trashcan] Disabled skipping check."
def cleanAll(path=None):
trash = getTrashFolder(path)
if not os.path.isdir(trash):
print "[Trashcan] No trash.", trash
return 0
for root, dirs, files in os.walk(trash, topdown=False):
for name in files:
fn = os.path.join(root, name)
try:
enigma.eBackgroundFileEraser.getInstance().erase(fn)
except Exception, e:
print "[Trashcan] Failed to erase %s:"% name, e
# Remove empty directories if possible
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except:
pass
def init(session):
global instance
instance = Trashcan(session)
class CleanTrashTask(Components.Task.PythonTask):
def openFiles(self, ctimeLimit, reserveBytes):
self.ctimeLimit = ctimeLimit
self.reserveBytes = reserveBytes
def work(self):
mounts=[]
matches = []
print "[Trashcan] probing folders"
f = open('/proc/mounts', 'r')
for line in f.readlines():
parts = line.strip().split()
if parts[1] == '/media/autofs':
continue
if config.usage.movielist_trashcan_network_clean.value and parts[1].startswith('/media/net'):
mounts.append(parts[1])
elif config.usage.movielist_trashcan_network_clean.value and parts[1].startswith('/media/autofs'):
mounts.append(parts[1])
elif not parts[1].startswith('/media/net') and not parts[1].startswith('/media/autofs'):
mounts.append(parts[1])
f.close()
for mount in mounts:
if os.path.isdir(os.path.join(mount,'.Trash')):
matches.append(os.path.join(mount,'.Trash'))
if os.path.isdir(os.path.join(mount,'movie/.Trash')):
matches.append(os.path.join(mount,'movie/.Trash'))
print "[Trashcan] found following trashcan's:",matches
if len(matches):
for trashfolder in matches:
print "[Trashcan] looking in trashcan",trashfolder
trashsize = get_size(trashfolder)
diskstat = os.statvfs(trashfolder)
free = diskstat.f_bfree * diskstat.f_bsize
bytesToRemove = self.reserveBytes - free
print "[Trashcan] " + str(trashfolder) + ": Size:",trashsize
candidates = []
size = 0
for root, dirs, files in os.walk(trashfolder, topdown=False):
for name in files:
try:
fn = os.path.join(root, name)
st = os.stat(fn)
if st.st_ctime < self.ctimeLimit:
enigma.eBackgroundFileEraser.getInstance().erase(fn)
bytesToRemove -= st.st_size
else:
candidates.append((st.st_ctime, fn, st.st_size))
size += st.st_size
except Exception, e:
print "[Trashcan] Failed to stat %s:"% name, e
# Remove empty directories if possible
for name in dirs:
try:
os.rmdir(os.path.join(root, name))
except:
pass
candidates.sort()
# Now we have a list of ctime, candidates, size. Sorted by ctime (=deletion time)
for st_ctime, fn, st_size in candidates:
if bytesToRemove < 0:
break
try:
# somtimes the file does not exist, can happen if trashcan is on a network, the main box could also be emptying trash at same time.
enigma.eBackgroundFileEraser.getInstance().erase(fn)
except:
pass
bytesToRemove -= st_size
size -= st_size
print "[Trashcan] " + str(trashfolder) + ": Size now:",size
class TrashInfo(VariableText, GUIComponent):
FREE = 0
USED = 1
SIZE = 2
def __init__(self, path, type, update = True):
GUIComponent.__init__(self)
VariableText.__init__(self)
self.type = type
if update and path != '/media/autofs/':
self.update(path)
def update(self, path):
try:
total_size = get_size(getTrashFolder(path))
except OSError:
return -1
if self.type == self.USED:
try:
if total_size < 10000000:
total_size = _("%d KB") % (total_size >> 10)
elif total_size < 10000000000:
total_size = _("%d MB") % (total_size >> 20)
else:
total_size = _("%d GB") % (total_size >> 30)
self.setText(_("Trashcan:") + " " + total_size)
except:
# occurs when f_blocks is 0 or a similar error
self.setText("-?-")
GUI_WIDGET = enigma.eLabel
| gpl-2.0 |
flwh/KK_mt6589_iq451 | prebuilts/python/linux-x86/2.7.5/lib/python2.7/encodings/unicode_escape.py | 852 | 1184 | """ Python 'unicode-escape' Codec
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.unicode_escape_encode
decode = codecs.unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| gpl-2.0 |
Sutil/netanimations | node_modules/node-gyp/gyp/pylib/gyp/ninja_syntax.py | 2485 | 5536 | # This file comes from
# https://github.com/martine/ninja/blob/master/misc/ninja_syntax.py
# Do not edit! Edit the upstream one instead.
"""Python module for generating .ninja files.
Note that this is emphatically not a required piece of Ninja; it's
just a helpful utility for build-file-generation systems that already
use Python.
"""
import textwrap
import re
def escape_path(word):
return word.replace('$ ','$$ ').replace(' ','$ ').replace(':', '$:')
class Writer(object):
def __init__(self, output, width=78):
self.output = output
self.width = width
def newline(self):
self.output.write('\n')
def comment(self, text):
for line in textwrap.wrap(text, self.width - 2):
self.output.write('# ' + line + '\n')
def variable(self, key, value, indent=0):
if value is None:
return
if isinstance(value, list):
value = ' '.join(filter(None, value)) # Filter out empty strings.
self._line('%s = %s' % (key, value), indent)
def pool(self, name, depth):
self._line('pool %s' % name)
self.variable('depth', depth, indent=1)
def rule(self, name, command, description=None, depfile=None,
generator=False, pool=None, restat=False, rspfile=None,
rspfile_content=None, deps=None):
self._line('rule %s' % name)
self.variable('command', command, indent=1)
if description:
self.variable('description', description, indent=1)
if depfile:
self.variable('depfile', depfile, indent=1)
if generator:
self.variable('generator', '1', indent=1)
if pool:
self.variable('pool', pool, indent=1)
if restat:
self.variable('restat', '1', indent=1)
if rspfile:
self.variable('rspfile', rspfile, indent=1)
if rspfile_content:
self.variable('rspfile_content', rspfile_content, indent=1)
if deps:
self.variable('deps', deps, indent=1)
def build(self, outputs, rule, inputs=None, implicit=None, order_only=None,
variables=None):
outputs = self._as_list(outputs)
all_inputs = self._as_list(inputs)[:]
out_outputs = list(map(escape_path, outputs))
all_inputs = list(map(escape_path, all_inputs))
if implicit:
implicit = map(escape_path, self._as_list(implicit))
all_inputs.append('|')
all_inputs.extend(implicit)
if order_only:
order_only = map(escape_path, self._as_list(order_only))
all_inputs.append('||')
all_inputs.extend(order_only)
self._line('build %s: %s' % (' '.join(out_outputs),
' '.join([rule] + all_inputs)))
if variables:
if isinstance(variables, dict):
iterator = iter(variables.items())
else:
iterator = iter(variables)
for key, val in iterator:
self.variable(key, val, indent=1)
return outputs
def include(self, path):
self._line('include %s' % path)
def subninja(self, path):
self._line('subninja %s' % path)
def default(self, paths):
self._line('default %s' % ' '.join(self._as_list(paths)))
def _count_dollars_before_index(self, s, i):
"""Returns the number of '$' characters right in front of s[i]."""
dollar_count = 0
dollar_index = i - 1
while dollar_index > 0 and s[dollar_index] == '$':
dollar_count += 1
dollar_index -= 1
return dollar_count
def _line(self, text, indent=0):
"""Write 'text' word-wrapped at self.width characters."""
leading_space = ' ' * indent
while len(leading_space) + len(text) > self.width:
# The text is too wide; wrap if possible.
# Find the rightmost space that would obey our width constraint and
# that's not an escaped space.
available_space = self.width - len(leading_space) - len(' $')
space = available_space
while True:
space = text.rfind(' ', 0, space)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# No such space; just use the first unescaped space we can find.
space = available_space - 1
while True:
space = text.find(' ', space + 1)
if space < 0 or \
self._count_dollars_before_index(text, space) % 2 == 0:
break
if space < 0:
# Give up on breaking.
break
self.output.write(leading_space + text[0:space] + ' $\n')
text = text[space+1:]
# Subsequent lines are continuations, so indent them.
leading_space = ' ' * (indent+2)
self.output.write(leading_space + text + '\n')
def _as_list(self, input):
if input is None:
return []
if isinstance(input, list):
return input
return [input]
def escape(string):
"""Escape a string such that it can be embedded into a Ninja file without
further interpretation."""
assert '\n' not in string, 'Ninja syntax does not allow newlines'
# We only have one special metacharacter: '$'.
return string.replace('$', '$$')
| cc0-1.0 |
spcui/tp-qemu | qemu/tests/balloon_check.py | 3 | 12940 | import re
import logging
import random
from autotest.client.shared import error
from virttest import qemu_monitor, utils_test, utils_misc
class BallooningTest(object):
"""
Provide basic functions for memory ballooning test cases
"""
def __init__(self, test, params, env):
self.test = test
self.params = params
self.env = env
self.free_mem_cmd = params["free_mem_cmd"]
self.ratio = float(params.get("ratio", 0.5))
self.vm = env.get_vm(params["main_vm"])
self.vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
self.session = self.vm.wait_for_login(timeout=timeout)
self.ori_mem = int(params['mem'])
self.current_mmem = self.get_ballooned_memory()
if self.current_mmem != self.ori_mem:
self.balloon_memory(self.ori_mem)
self.ori_gmem = self.get_memory_status()
self.current_gmem = self.ori_gmem
self.current_mmem = self.ori_mem
self.test_round = 0
def get_ballooned_memory(self):
"""
Get the size of memory from monitor
:return: the size of memory
:rtype: int
"""
try:
output = self.vm.monitor.info("balloon")
ballooned_mem = int(re.findall(r"\d+", str(output))[0])
if self.vm.monitor.protocol == "qmp":
ballooned_mem *= 1024 ** -2
except qemu_monitor.MonitorError, emsg:
logging.error(emsg)
return 0
return ballooned_mem
@error.context_aware
def memory_check(self, step, ballooned_mem):
"""
Check memory status according expect values
:param step: the check point string
:type step: string
:param ballooned_mem: ballooned memory in current step
:type ballooned_mem: int
:return: memory size get from monitor and guest
:rtype: tuple
"""
error.context("Check memory status %s" % step, logging.info)
mmem = self.get_ballooned_memory()
gmem = self.get_memory_status()
if (abs(mmem - self.ori_mem) != ballooned_mem
or (abs(gmem - self.ori_gmem) < self.ratio * ballooned_mem)):
self.error_report(step, self.ori_mem - ballooned_mem, mmem, gmem)
raise error.TestFail("Balloon test failed %s" % step)
return (mmem, gmem)
@error.context_aware
def balloon_memory(self, new_mem):
"""
Baloon memory to new_mem and verifies on both qemu monitor and
guest OS if change worked.
:param new_mem: New desired memory.
:type new_mem: int
"""
error.context("Change VM memory to %s" % new_mem, logging.info)
compare_mem = new_mem
if self.params["monitor_type"] == "qmp":
new_mem = new_mem * 1024 * 1024
# This should be replaced by proper monitor method call
self.vm.monitor.send_args_cmd("balloon value=%s" % new_mem)
balloon_timeout = float(self.params.get("balloon_timeout", 100))
status = utils_misc.wait_for((lambda: compare_mem
== self.get_ballooned_memory()),
balloon_timeout)
if status is None:
raise error.TestFail("Failed to balloon memory to expect"
" value during %ss" % balloon_timeout)
def run_balloon_sub_test(self, test, params, env, test_tag):
"""
Run subtest after ballooned memory. Set up the related parameters
according to the subtest.
:param test: QEMU test object
:type test: object
:param params: Dictionary with the test parameters
:type param: dict
:param env: Dictionary with test environment.
:type env: dict
:return: if qemu-kvm process quit after test. There are three status
for this variable. -1 means the process will not quit. 0
means the process will quit but already restart in sub test.
1 means the process quit after sub test.
:rtype: int
"""
utils_test.run_virt_sub_test(test, params, env,
sub_type=test_tag)
qemu_quit_after_test = -1
if "shutdown" in test_tag:
logging.info("Guest shutdown normally after balloon")
qemu_quit_after_test = 1
if params.get("session_need_update", "no") == "yes":
timeout = int(self.params.get("login_timeout", 360))
self.session = self.vm.wait_for_login(timeout=timeout)
if params.get("qemu_quit_after_sub_case", "no") == "yes":
self.current_mmem = self.ori_mem
self.current_gmem = self.ori_gmem
qemu_quit_after_test = 0
return qemu_quit_after_test
def get_memory_boundary(self, balloon_type='evict'):
"""
Get the legal memory boundary for balloon operation.
:param balloon_type: evict or enlarge
:type balloon_type: string
:return: min and max size of the memory
:rtype: tuple
"""
max_size = self.ori_mem
if balloon_type == 'enlarge':
min_size = self.current_mmem
else:
vm_total = self.get_memory_status()
status, output = self.session.cmd_status_output(self.free_mem_cmd)
if status != 0:
raise error.TestError("Can not get guest memory information")
vm_mem_free = int(re.findall(r'\d+', output)[0]) / 1024
min_size = vm_total - vm_mem_free
return min_size, max_size
@error.context_aware
def run_ballooning_test(self, expect_mem, tag):
"""
Run a loop of ballooning test
:param expect_mem: memory will be setted in test
:type expect_mem: int
:param tag: test tag to get related params
:type tag: string
:return: If test should quit after test
:rtype: bool
"""
if self.test_round < 1:
self.memory_check("before ballooning test", 0)
params_tag = self.params.object_params(tag)
balloon_type = params_tag.get("balloon_type")
min_size, max_size = self.get_memory_boundary(balloon_type)
if expect_mem < min_size or expect_mem > max_size:
raise error.TestError("Memory is set to an illegal size %s. It "
"should between %s and %s" % (expect_mem,
min_size,
max_size))
self.balloon_memory(expect_mem)
self.test_round += 1
if expect_mem > self.current_mmem:
balloon_type = "enlarge"
elif expect_mem < self.current_mmem:
balloon_type = "evict"
else:
balloon_type = "command test"
mmem, gmem = self.memory_check("after %s memory" % balloon_type,
self.ori_mem - expect_mem)
self.current_mmem = mmem
self.current_gmem = gmem
if (params_tag.get("run_sub_test_after_balloon", "no") == "yes"
and params_tag.get('sub_test_after_balloon')):
should_quit = self.run_balloon_sub_test(self.test, params_tag,
self.env,
params_tag['sub_test_after_balloon'])
if should_quit == 1:
return True
elif should_quit == 0:
expect_mem = self.ori_mem
mmem, gmem = self.memory_check("after subtest",
self.ori_mem - expect_mem)
self.current_mmem = mmem
self.current_gmem = gmem
return False
def reset_memory(self):
"""
Reset memory to original value
"""
if self.vm.is_alive():
self.balloon_memory(self.ori_mem)
def error_report(self, step, expect_value, monitor_value, guest_value):
"""
Generate the error report
:param step: the step of the error happen
:param expect_value: memory size assign to the vm
:param monitor_value: memory size report from monitor, this value can
be None
:param guest_value: memory size report from guest, this value can be
None
"""
pass
def get_memory_status(self):
"""
Get Memory status inside guest.
"""
pass
class BallooningTestWin(BallooningTest):
"""
Windows memory ballooning test
"""
def error_report(self, step, expect_value, monitor_value, guest_value):
"""
Generate the error report
:param step: the step of the error happen
:param expect_value: memory size assign to the vm
:param monitor_value: memory size report from monitor, this value can
be None
:param guest_value: memory size report from guest, this value can be
None
"""
logging.error("Memory size mismatch %s:\n" % step)
error_msg = "Wanted to be changed: %s\n" % (self.ori_mem
- expect_value)
if monitor_value:
error_msg += "Changed in monitor: %s\n" % (self.ori_mem
- monitor_value)
error_msg += "Changed in guest: %s\n" % (guest_value - self.ori_gmem)
logging.error(error_msg)
def get_memory_status(self):
"""
Get Memory status inside guest.
:return: the free memory size inside guest.
:rtype: int
"""
free_mem_cmd = self.params['free_mem_cmd']
try:
# In Windows guest we get the free memory for memory compare
memory = self.session.cmd_output(free_mem_cmd)
memory = int(re.findall(r"\d+", memory)[0])
memory *= 1024 ** -1
except Exception, emsg:
logging.error(emsg)
return 0
return memory
class BallooningTestLinux(BallooningTest):
"""
Linux memory ballooning test
"""
def error_report(self, step, expect_value, monitor_value, guest_value):
"""
Generate the error report
@param step: the step of the error happen
@param expect_value: memory size assign to the vm
@param monitor_value: memory size report from monitor, this value can
be None
@param guest_value: memory size report from guest, this value can be
None
"""
logging.error("Memory size mismatch %s:\n" % step)
error_msg = "Assigner to VM: %s\n" % expect_value
if monitor_value:
error_msg += "Reported by monitor: %s\n" % monitor_value
if guest_value:
error_msg += "Reported by guest OS: %s\n" % guest_value
logging.error(error_msg)
def get_memory_status(self):
"""
Get Memory status inside guest.
:return: the size of total memory in guest
:rtype: int
"""
try:
memory = self.vm.get_current_memory_size()
except Exception, emsg:
logging.error(emsg)
return 0
return memory
@error.context_aware
def run(test, params, env):
"""
Check Memory ballooning, use M when compare memory in this script:
1) Boot a guest with balloon enabled.
2) Balloon guest memory to given value and run sub test(Optional)
3) Repeat step 2 following the cfg files.
8) Reset memory back to the original value
:param test: QEMU test object
:param params: Dictionary with the test parameters
:param env: Dictionary with test environment.
"""
if params['os_type'] == 'windows':
balloon_test = BallooningTestWin(test, params, env)
else:
balloon_test = BallooningTestLinux(test, params, env)
for tag in params.objects('test_tags'):
error.context("Running %s test" % tag, logging.info)
params_tag = params.object_params(tag)
if params_tag.get('expect_memory'):
expect_mem = int(params_tag.get('expect_memory'))
elif params_tag.get('expect_memory_ratio'):
expect_mem = int(balloon_test.ori_mem *
float(params_tag.get('expect_memory_ratio')))
else:
balloon_type = params_tag['balloon_type']
min_sz, max_sz = balloon_test.get_memory_boundary(balloon_type)
expect_mem = int(random.uniform(min_sz, max_sz))
quit_after_test = balloon_test.run_ballooning_test(expect_mem, tag)
if quit_after_test:
return
balloon_test.reset_memory()
| gpl-2.0 |
msingh172/youtube-dl | devscripts/gh-pages/generate-download.py | 126 | 1263 | #!/usr/bin/env python3
from __future__ import unicode_literals
import hashlib
import urllib.request
import json
versions_info = json.load(open('update/versions.json'))
version = versions_info['latest']
URL = versions_info['versions'][version]['bin'][0]
data = urllib.request.urlopen(URL).read()
# Read template page
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
md5sum = hashlib.md5(data).hexdigest()
sha1sum = hashlib.sha1(data).hexdigest()
sha256sum = hashlib.sha256(data).hexdigest()
template = template.replace('@PROGRAM_VERSION@', version)
template = template.replace('@PROGRAM_URL@', URL)
template = template.replace('@PROGRAM_MD5SUM@', md5sum)
template = template.replace('@PROGRAM_SHA1SUM@', sha1sum)
template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
with open('download.html', 'w', encoding='utf-8') as dlf:
dlf.write(template)
| unlicense |
Yuudachimoe/HikariChun-RedBot | lib/pip/wheel.py | 338 | 32010 | """
Support for installing and building the "wheel" binary package format.
"""
from __future__ import absolute_import
import compileall
import csv
import errno
import functools
import hashlib
import logging
import os
import os.path
import re
import shutil
import stat
import sys
import tempfile
import warnings
from base64 import urlsafe_b64encode
from email.parser import Parser
from pip._vendor.six import StringIO
import pip
from pip.compat import expanduser
from pip.download import path_to_url, unpack_url
from pip.exceptions import (
InstallationError, InvalidWheelFilename, UnsupportedWheel)
from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME
from pip import pep425tags
from pip.utils import (
call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks,
)
from pip.utils.ui import open_spinner
from pip.utils.logging import indent_log
from pip.utils.setuptools_build import SETUPTOOLS_SHIM
from pip._vendor.distlib.scripts import ScriptMaker
from pip._vendor import pkg_resources
from pip._vendor.packaging.utils import canonicalize_name
from pip._vendor.six.moves import configparser
wheel_ext = '.whl'
VERSION_COMPATIBLE = (1, 0)
logger = logging.getLogger(__name__)
class WheelCache(object):
"""A cache of wheels for future installs."""
def __init__(self, cache_dir, format_control):
"""Create a wheel cache.
:param cache_dir: The root of the cache.
:param format_control: A pip.index.FormatControl object to limit
binaries being read from the cache.
"""
self._cache_dir = expanduser(cache_dir) if cache_dir else None
self._format_control = format_control
def cached_wheel(self, link, package_name):
return cached_wheel(
self._cache_dir, link, self._format_control, package_name)
def _cache_for_link(cache_dir, link):
"""
Return a directory to store cached wheels in for link.
Because there are M wheels for any one sdist, we provide a directory
to cache them in, and then consult that directory when looking up
cache hits.
We only insert things into the cache if they have plausible version
numbers, so that we don't contaminate the cache with things that were not
unique. E.g. ./package might have dozens of installs done for it and build
a version of 0.0...and if we built and cached a wheel, we'd end up using
the same wheel even if the source has been edited.
:param cache_dir: The cache_dir being used by pip.
:param link: The link of the sdist for which this will cache wheels.
"""
# We want to generate an url to use as our cache key, we don't want to just
# re-use the URL because it might have other items in the fragment and we
# don't care about those.
key_parts = [link.url_without_fragment]
if link.hash_name is not None and link.hash is not None:
key_parts.append("=".join([link.hash_name, link.hash]))
key_url = "#".join(key_parts)
# Encode our key url with sha224, we'll use this because it has similar
# security properties to sha256, but with a shorter total output (and thus
# less secure). However the differences don't make a lot of difference for
# our use case here.
hashed = hashlib.sha224(key_url.encode()).hexdigest()
# We want to nest the directories some to prevent having a ton of top level
# directories where we might run out of sub directories on some FS.
parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]]
# Inside of the base location for cached wheels, expand our parts and join
# them all together.
return os.path.join(cache_dir, "wheels", *parts)
def cached_wheel(cache_dir, link, format_control, package_name):
if not cache_dir:
return link
if not link:
return link
if link.is_wheel:
return link
if not link.is_artifact:
return link
if not package_name:
return link
canonical_name = canonicalize_name(package_name)
formats = pip.index.fmt_ctl_formats(format_control, canonical_name)
if "binary" not in formats:
return link
root = _cache_for_link(cache_dir, link)
try:
wheel_names = os.listdir(root)
except OSError as e:
if e.errno in (errno.ENOENT, errno.ENOTDIR):
return link
raise
candidates = []
for wheel_name in wheel_names:
try:
wheel = Wheel(wheel_name)
except InvalidWheelFilename:
continue
if not wheel.supported():
# Built for a different python/arch/etc
continue
candidates.append((wheel.support_index_min(), wheel_name))
if not candidates:
return link
candidates.sort()
path = os.path.join(root, candidates[0][1])
return pip.index.Link(path_to_url(path))
def rehash(path, algo='sha256', blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.new(algo)"""
h = hashlib.new(algo)
length = 0
with open(path, 'rb') as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = 'sha256=' + urlsafe_b64encode(
h.digest()
).decode('latin1').rstrip('=')
return (digest, length)
def open_for_csv(name, mode):
if sys.version_info[0] < 3:
nl = {}
bin = 'b'
else:
nl = {'newline': ''}
bin = ''
return open(name, mode + bin, **nl)
def fix_script(path):
"""Replace #!python with #!/path/to/python
Return True if file was changed."""
# XXX RECORD hashes will need to be updated
if os.path.isfile(path):
with open(path, 'rb') as script:
firstline = script.readline()
if not firstline.startswith(b'#!python'):
return False
exename = sys.executable.encode(sys.getfilesystemencoding())
firstline = b'#!' + exename + os.linesep.encode("ascii")
rest = script.read()
with open(path, 'wb') as script:
script.write(firstline)
script.write(rest)
return True
dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?)
\.dist-info$""", re.VERBOSE)
def root_is_purelib(name, wheeldir):
"""
Return True if the extracted wheel in wheeldir should go into purelib.
"""
name_folded = name.replace("-", "_")
for item in os.listdir(wheeldir):
match = dist_info_re.match(item)
if match and match.group('name') == name_folded:
with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel:
for line in wheel:
line = line.lower().rstrip()
if line == "root-is-purelib: true":
return True
return False
def get_entrypoints(filename):
if not os.path.exists(filename):
return {}, {}
# This is done because you can pass a string to entry_points wrappers which
# means that they may or may not be valid INI files. The attempt here is to
# strip leading and trailing whitespace in order to make them valid INI
# files.
with open(filename) as fp:
data = StringIO()
for line in fp:
data.write(line.strip())
data.write("\n")
data.seek(0)
cp = configparser.RawConfigParser()
cp.optionxform = lambda option: option
cp.readfp(data)
console = {}
gui = {}
if cp.has_section('console_scripts'):
console = dict(cp.items('console_scripts'))
if cp.has_section('gui_scripts'):
gui = dict(cp.items('gui_scripts'))
return console, gui
def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None,
pycompile=True, scheme=None, isolated=False, prefix=None):
"""Install a wheel"""
if not scheme:
scheme = distutils_scheme(
name, user=user, home=home, root=root, isolated=isolated,
prefix=prefix,
)
if root_is_purelib(name, wheeldir):
lib_dir = scheme['purelib']
else:
lib_dir = scheme['platlib']
info_dir = []
data_dirs = []
source = wheeldir.rstrip(os.path.sep) + os.path.sep
# Record details of the files moved
# installed = files copied from the wheel to the destination
# changed = files changed while installing (scripts #! line typically)
# generated = files newly generated during the install (script wrappers)
installed = {}
changed = set()
generated = []
# Compile all of the pyc files that we're going to be installing
if pycompile:
with captured_stdout() as stdout:
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
compileall.compile_dir(source, force=True, quiet=True)
logger.debug(stdout.getvalue())
def normpath(src, p):
return os.path.relpath(src, p).replace(os.path.sep, '/')
def record_installed(srcfile, destfile, modified=False):
"""Map archive RECORD paths to installation RECORD paths."""
oldpath = normpath(srcfile, wheeldir)
newpath = normpath(destfile, lib_dir)
installed[oldpath] = newpath
if modified:
changed.add(destfile)
def clobber(source, dest, is_base, fixer=None, filter=None):
ensure_dir(dest) # common for the 'include' path
for dir, subdirs, files in os.walk(source):
basedir = dir[len(source):].lstrip(os.path.sep)
destdir = os.path.join(dest, basedir)
if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'):
continue
for s in subdirs:
destsubdir = os.path.join(dest, basedir, s)
if is_base and basedir == '' and destsubdir.endswith('.data'):
data_dirs.append(s)
continue
elif (is_base and
s.endswith('.dist-info') and
canonicalize_name(s).startswith(
canonicalize_name(req.name))):
assert not info_dir, ('Multiple .dist-info directories: ' +
destsubdir + ', ' +
', '.join(info_dir))
info_dir.append(destsubdir)
for f in files:
# Skip unwanted files
if filter and filter(f):
continue
srcfile = os.path.join(dir, f)
destfile = os.path.join(dest, basedir, f)
# directory creation is lazy and after the file filtering above
# to ensure we don't install empty dirs; empty dirs can't be
# uninstalled.
ensure_dir(destdir)
# We use copyfile (not move, copy, or copy2) to be extra sure
# that we are not moving directories over (copyfile fails for
# directories) as well as to ensure that we are not copying
# over any metadata because we want more control over what
# metadata we actually copy over.
shutil.copyfile(srcfile, destfile)
# Copy over the metadata for the file, currently this only
# includes the atime and mtime.
st = os.stat(srcfile)
if hasattr(os, "utime"):
os.utime(destfile, (st.st_atime, st.st_mtime))
# If our file is executable, then make our destination file
# executable.
if os.access(srcfile, os.X_OK):
st = os.stat(srcfile)
permissions = (
st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH
)
os.chmod(destfile, permissions)
changed = False
if fixer:
changed = fixer(destfile)
record_installed(srcfile, destfile, changed)
clobber(source, lib_dir, True)
assert info_dir, "%s .dist-info directory not found" % req
# Get the defined entry points
ep_file = os.path.join(info_dir[0], 'entry_points.txt')
console, gui = get_entrypoints(ep_file)
def is_entrypoint_wrapper(name):
# EP, EP.exe and EP-script.py are scripts generated for
# entry point EP by setuptools
if name.lower().endswith('.exe'):
matchname = name[:-4]
elif name.lower().endswith('-script.py'):
matchname = name[:-10]
elif name.lower().endswith(".pya"):
matchname = name[:-4]
else:
matchname = name
# Ignore setuptools-generated scripts
return (matchname in console or matchname in gui)
for datadir in data_dirs:
fixer = None
filter = None
for subdir in os.listdir(os.path.join(wheeldir, datadir)):
fixer = None
if subdir == 'scripts':
fixer = fix_script
filter = is_entrypoint_wrapper
source = os.path.join(wheeldir, datadir, subdir)
dest = scheme[subdir]
clobber(source, dest, False, fixer=fixer, filter=filter)
maker = ScriptMaker(None, scheme['scripts'])
# Ensure old scripts are overwritten.
# See https://github.com/pypa/pip/issues/1800
maker.clobber = True
# Ensure we don't generate any variants for scripts because this is almost
# never what somebody wants.
# See https://bitbucket.org/pypa/distlib/issue/35/
maker.variants = set(('', ))
# This is required because otherwise distlib creates scripts that are not
# executable.
# See https://bitbucket.org/pypa/distlib/issue/32/
maker.set_mode = True
# Simplify the script and fix the fact that the default script swallows
# every single stack trace.
# See https://bitbucket.org/pypa/distlib/issue/34/
# See https://bitbucket.org/pypa/distlib/issue/33/
def _get_script_text(entry):
if entry.suffix is None:
raise InstallationError(
"Invalid script entry point: %s for req: %s - A callable "
"suffix is required. Cf https://packaging.python.org/en/"
"latest/distributing.html#console-scripts for more "
"information." % (entry, req)
)
return maker.script_template % {
"module": entry.prefix,
"import_name": entry.suffix.split(".")[0],
"func": entry.suffix,
}
maker._get_script_text = _get_script_text
maker.script_template = """# -*- coding: utf-8 -*-
import re
import sys
from %(module)s import %(import_name)s
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(%(func)s())
"""
# Special case pip and setuptools to generate versioned wrappers
#
# The issue is that some projects (specifically, pip and setuptools) use
# code in setup.py to create "versioned" entry points - pip2.7 on Python
# 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into
# the wheel metadata at build time, and so if the wheel is installed with
# a *different* version of Python the entry points will be wrong. The
# correct fix for this is to enhance the metadata to be able to describe
# such versioned entry points, but that won't happen till Metadata 2.0 is
# available.
# In the meantime, projects using versioned entry points will either have
# incorrect versioned entry points, or they will not be able to distribute
# "universal" wheels (i.e., they will need a wheel per Python version).
#
# Because setuptools and pip are bundled with _ensurepip and virtualenv,
# we need to use universal wheels. So, as a stopgap until Metadata 2.0, we
# override the versioned entry points in the wheel and generate the
# correct ones. This code is purely a short-term measure until Metadata 2.0
# is available.
#
# To add the level of hack in this section of code, in order to support
# ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment
# variable which will control which version scripts get installed.
#
# ENSUREPIP_OPTIONS=altinstall
# - Only pipX.Y and easy_install-X.Y will be generated and installed
# ENSUREPIP_OPTIONS=install
# - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note
# that this option is technically if ENSUREPIP_OPTIONS is set and is
# not altinstall
# DEFAULT
# - The default behavior is to install pip, pipX, pipX.Y, easy_install
# and easy_install-X.Y.
pip_script = console.pop('pip', None)
if pip_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'pip = ' + pip_script
generated.extend(maker.make(spec))
if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall":
spec = 'pip%s = %s' % (sys.version[:1], pip_script)
generated.extend(maker.make(spec))
spec = 'pip%s = %s' % (sys.version[:3], pip_script)
generated.extend(maker.make(spec))
# Delete any other versioned pip entry points
pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)]
for k in pip_ep:
del console[k]
easy_install_script = console.pop('easy_install', None)
if easy_install_script:
if "ENSUREPIP_OPTIONS" not in os.environ:
spec = 'easy_install = ' + easy_install_script
generated.extend(maker.make(spec))
spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script)
generated.extend(maker.make(spec))
# Delete any other versioned easy_install entry points
easy_install_ep = [
k for k in console if re.match(r'easy_install(-\d\.\d)?$', k)
]
for k in easy_install_ep:
del console[k]
# Generate the console and GUI entry points specified in the wheel
if len(console) > 0:
generated.extend(
maker.make_multiple(['%s = %s' % kv for kv in console.items()])
)
if len(gui) > 0:
generated.extend(
maker.make_multiple(
['%s = %s' % kv for kv in gui.items()],
{'gui': True}
)
)
# Record pip as the installer
installer = os.path.join(info_dir[0], 'INSTALLER')
temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip')
with open(temp_installer, 'wb') as installer_file:
installer_file.write(b'pip\n')
shutil.move(temp_installer, installer)
generated.append(installer)
# Record details of all files installed
record = os.path.join(info_dir[0], 'RECORD')
temp_record = os.path.join(info_dir[0], 'RECORD.pip')
with open_for_csv(record, 'r') as record_in:
with open_for_csv(temp_record, 'w+') as record_out:
reader = csv.reader(record_in)
writer = csv.writer(record_out)
for row in reader:
row[0] = installed.pop(row[0], row[0])
if row[0] in changed:
row[1], row[2] = rehash(row[0])
writer.writerow(row)
for f in generated:
h, l = rehash(f)
writer.writerow((normpath(f, lib_dir), h, l))
for f in installed:
writer.writerow((installed[f], '', ''))
shutil.move(temp_record, record)
def _unique(fn):
@functools.wraps(fn)
def unique(*args, **kw):
seen = set()
for item in fn(*args, **kw):
if item not in seen:
seen.add(item)
yield item
return unique
# TODO: this goes somewhere besides the wheel module
@_unique
def uninstallation_paths(dist):
"""
Yield all the uninstallation paths for dist based on RECORD-without-.pyc
Yield paths to all the files in RECORD. For each .py file in RECORD, add
the .pyc in the same directory.
UninstallPathSet.add() takes care of the __pycache__ .pyc.
"""
from pip.utils import FakeFile # circular import
r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD')))
for row in r:
path = os.path.join(dist.location, row[0])
yield path
if path.endswith('.py'):
dn, fn = os.path.split(path)
base = fn[:-3]
path = os.path.join(dn, base + '.pyc')
yield path
def wheel_version(source_dir):
"""
Return the Wheel-Version of an extracted wheel, if possible.
Otherwise, return False if we couldn't parse / extract it.
"""
try:
dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0]
wheel_data = dist.get_metadata('WHEEL')
wheel_data = Parser().parsestr(wheel_data)
version = wheel_data['Wheel-Version'].strip()
version = tuple(map(int, version.split('.')))
return version
except:
return False
def check_compatibility(version, name):
"""
Raises errors or warns if called with an incompatible Wheel-Version.
Pip should refuse to install a Wheel-Version that's a major series
ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when
installing a version only minor version ahead (e.g 1.2 > 1.1).
version: a 2-tuple representing a Wheel-Version (Major, Minor)
name: name of wheel or package to raise exception about
:raises UnsupportedWheel: when an incompatible Wheel-Version is given
"""
if not version:
raise UnsupportedWheel(
"%s is in an unsupported or invalid wheel" % name
)
if version[0] > VERSION_COMPATIBLE[0]:
raise UnsupportedWheel(
"%s's Wheel-Version (%s) is not compatible with this version "
"of pip" % (name, '.'.join(map(str, version)))
)
elif version > VERSION_COMPATIBLE:
logger.warning(
'Installing from a newer Wheel-Version (%s)',
'.'.join(map(str, version)),
)
class Wheel(object):
"""A wheel file"""
# TODO: maybe move the install code into this class
wheel_file_re = re.compile(
r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?))
((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?)
\.whl|\.dist-info)$""",
re.VERBOSE
)
def __init__(self, filename):
"""
:raises InvalidWheelFilename: when the filename is invalid for a wheel
"""
wheel_info = self.wheel_file_re.match(filename)
if not wheel_info:
raise InvalidWheelFilename(
"%s is not a valid wheel filename." % filename
)
self.filename = filename
self.name = wheel_info.group('name').replace('_', '-')
# we'll assume "_" means "-" due to wheel naming scheme
# (https://github.com/pypa/pip/issues/1150)
self.version = wheel_info.group('ver').replace('_', '-')
self.pyversions = wheel_info.group('pyver').split('.')
self.abis = wheel_info.group('abi').split('.')
self.plats = wheel_info.group('plat').split('.')
# All the tag combinations from this file
self.file_tags = set(
(x, y, z) for x in self.pyversions
for y in self.abis for z in self.plats
)
def support_index_min(self, tags=None):
"""
Return the lowest index that one of the wheel's file_tag combinations
achieves in the supported_tags list e.g. if there are 8 supported tags,
and one of the file tags is first in the list, then return 0. Returns
None is the wheel is not supported.
"""
if tags is None: # for mock
tags = pep425tags.supported_tags
indexes = [tags.index(c) for c in self.file_tags if c in tags]
return min(indexes) if indexes else None
def supported(self, tags=None):
"""Is this wheel supported on this system?"""
if tags is None: # for mock
tags = pep425tags.supported_tags
return bool(set(tags).intersection(self.file_tags))
class WheelBuilder(object):
"""Build wheels from a RequirementSet."""
def __init__(self, requirement_set, finder, build_options=None,
global_options=None):
self.requirement_set = requirement_set
self.finder = finder
self._cache_root = requirement_set._wheel_cache._cache_dir
self._wheel_dir = requirement_set.wheel_download_dir
self.build_options = build_options or []
self.global_options = global_options or []
def _build_one(self, req, output_dir, python_tag=None):
"""Build one wheel.
:return: The filename of the built wheel, or None if the build failed.
"""
tempd = tempfile.mkdtemp('pip-wheel-')
try:
if self.__build_one(req, tempd, python_tag=python_tag):
try:
wheel_name = os.listdir(tempd)[0]
wheel_path = os.path.join(output_dir, wheel_name)
shutil.move(os.path.join(tempd, wheel_name), wheel_path)
logger.info('Stored in directory: %s', output_dir)
return wheel_path
except:
pass
# Ignore return, we can't do anything else useful.
self._clean_one(req)
return None
finally:
rmtree(tempd)
def _base_setup_args(self, req):
return [
sys.executable, "-u", '-c',
SETUPTOOLS_SHIM % req.setup_py
] + list(self.global_options)
def __build_one(self, req, tempd, python_tag=None):
base_args = self._base_setup_args(req)
spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,)
with open_spinner(spin_message) as spinner:
logger.debug('Destination directory: %s', tempd)
wheel_args = base_args + ['bdist_wheel', '-d', tempd] \
+ self.build_options
if python_tag is not None:
wheel_args += ["--python-tag", python_tag]
try:
call_subprocess(wheel_args, cwd=req.setup_py_dir,
show_stdout=False, spinner=spinner)
return True
except:
spinner.finish("error")
logger.error('Failed building wheel for %s', req.name)
return False
def _clean_one(self, req):
base_args = self._base_setup_args(req)
logger.info('Running setup.py clean for %s', req.name)
clean_args = base_args + ['clean', '--all']
try:
call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False)
return True
except:
logger.error('Failed cleaning build dir for %s', req.name)
return False
def build(self, autobuilding=False):
"""Build wheels.
:param unpack: If True, replace the sdist we built from with the
newly built wheel, in preparation for installation.
:return: True if all the wheels built correctly.
"""
assert self._wheel_dir or (autobuilding and self._cache_root)
# unpack sdists and constructs req set
self.requirement_set.prepare_files(self.finder)
reqset = self.requirement_set.requirements.values()
buildset = []
for req in reqset:
if req.constraint:
continue
if req.is_wheel:
if not autobuilding:
logger.info(
'Skipping %s, due to already being wheel.', req.name)
elif autobuilding and req.editable:
pass
elif autobuilding and req.link and not req.link.is_artifact:
pass
elif autobuilding and not req.source_dir:
pass
else:
if autobuilding:
link = req.link
base, ext = link.splitext()
if pip.index.egg_info_matches(base, None, link) is None:
# Doesn't look like a package - don't autobuild a wheel
# because we'll have no way to lookup the result sanely
continue
if "binary" not in pip.index.fmt_ctl_formats(
self.finder.format_control,
canonicalize_name(req.name)):
logger.info(
"Skipping bdist_wheel for %s, due to binaries "
"being disabled for it.", req.name)
continue
buildset.append(req)
if not buildset:
return True
# Build the wheels.
logger.info(
'Building wheels for collected packages: %s',
', '.join([req.name for req in buildset]),
)
with indent_log():
build_success, build_failure = [], []
for req in buildset:
python_tag = None
if autobuilding:
python_tag = pep425tags.implementation_tag
output_dir = _cache_for_link(self._cache_root, req.link)
try:
ensure_dir(output_dir)
except OSError as e:
logger.warning("Building wheel for %s failed: %s",
req.name, e)
build_failure.append(req)
continue
else:
output_dir = self._wheel_dir
wheel_file = self._build_one(
req, output_dir,
python_tag=python_tag,
)
if wheel_file:
build_success.append(req)
if autobuilding:
# XXX: This is mildly duplicative with prepare_files,
# but not close enough to pull out to a single common
# method.
# The code below assumes temporary source dirs -
# prevent it doing bad things.
if req.source_dir and not os.path.exists(os.path.join(
req.source_dir, PIP_DELETE_MARKER_FILENAME)):
raise AssertionError(
"bad source dir - missing marker")
# Delete the source we built the wheel from
req.remove_temporary_source()
# set the build directory again - name is known from
# the work prepare_files did.
req.source_dir = req.build_location(
self.requirement_set.build_dir)
# Update the link for this.
req.link = pip.index.Link(
path_to_url(wheel_file))
assert req.link.is_wheel
# extract the wheel into the dir
unpack_url(
req.link, req.source_dir, None, False,
session=self.requirement_set.session)
else:
build_failure.append(req)
# notify success/failure
if build_success:
logger.info(
'Successfully built %s',
' '.join([req.name for req in build_success]),
)
if build_failure:
logger.info(
'Failed to build %s',
' '.join([req.name for req in build_failure]),
)
# Return True if all builds were successful
return len(build_failure) == 0
| gpl-3.0 |
loop1024/pymo-global | android/pgs4a-0.9.6/buildlib/colorama/win32.py | 22 | 3779 |
# from winbase.h
STDOUT = -11
STDERR = -12
try:
from ctypes import windll
except ImportError:
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort
)
handles = {
STDOUT: windll.kernel32.GetStdHandle(STDOUT),
STDERR: windll.kernel32.GetStdHandle(STDERR),
}
SHORT = c_short
WORD = c_ushort
DWORD = c_uint32
TCHAR = c_char
class COORD(Structure):
"""struct in wincon.h"""
_fields_ = [
('X', SHORT),
('Y', SHORT),
]
class SMALL_RECT(Structure):
"""struct in wincon.h."""
_fields_ = [
("Left", SHORT),
("Top", SHORT),
("Right", SHORT),
("Bottom", SHORT),
]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", WORD),
("srWindow", SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return windll.kernel32.SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return windll.kernel32.SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = TCHAR(char)
length = DWORD(length)
num_written = DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = windll.kernel32.FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = WORD(attr)
length = DWORD(length)
num_written = DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return windll.kernel32.FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
| mit |
mkolar/pyblish | pyblish/plugin.py | 1 | 32233 | """Plug-in system
Works similar to how OSs look for executables; i.e. a number of
absolute paths are searched for a given match. The predicate for
executables is whether or not an extension matches a number of
options, such as ".exe" or ".bat".
In this system, the predicate is whether or not a fname starts
with "validate" and ends with ".py"
"""
# Standard library
import os
import sys
import time
import types
import logging
import inspect
import warnings
import contextlib
# Local library
from . import (
__version__,
version_info,
_registered_callbacks,
_registered_services,
_registered_plugins,
_registered_hosts,
_registered_paths,
)
from . import lib
from .vendor import iscompatible
log = logging.getLogger("pyblish.plugin")
class Provider(object):
"""Dependency provider
This object is given a series of "services" that it then distributes
to a passed function based on the function's argument signature.
For example, the function func:`myfunc(a, b)` is given the services
called "a" and "b", given they have previously been added to the provider.
"""
def __init__(self):
self._services = dict()
def get(self, service):
return self.services.get(service)
@property
def services(self):
services = _registered_services.copy()
services.update(self._services)
# Forwards-compatibility alias
services["asset"] = services["instance"]
return services
@classmethod
def args(cls, func):
return [a for a in inspect.getargspec(func)[0]
if a not in ("self", "cls")]
def invoke(self, func):
"""Supply function `func` with objects to its signature
Raises:
KeyError if an argument asked for is not available
Returns:
Result of `func`
"""
args = self.args(func)
unavailable = [a for a in args if a not in self.services]
if unavailable:
raise KeyError("Unavailable service requested: %s" % unavailable)
inject = dict((k, v) for k, v in self.services.items()
if k in args)
return func(**inject)
def inject(self, name, obj):
self._services[name] = obj
def evaluate_pre11(plugin):
"""Determine whether the plug-in is pre-1.1"""
plugin.__pre11__ = False
if hasattr(plugin, "process_context"):
plugin.__pre11__ = True
plugin.process = plugin.process_context
del(plugin.process_context)
if hasattr(plugin, "process_instance"):
plugin.__pre11__ = True
plugin.process = plugin.process_instance
del(plugin.process_instance)
# Repair is deprecated
if hasattr(plugin, "repair_context"):
plugin.__pre11__ = True
plugin.repair = plugin.repair_context
del(plugin.repair_context)
if hasattr(plugin, "repair_instance"):
plugin.__pre11__ = True
plugin.repair = plugin.repair_instance
del(plugin.repair_instance)
def evaluate_enabledness(plugin):
"""Deterimine whether the plug-in supports Context/Instance"""
plugin.__contextEnabled__ = False
plugin.__instanceEnabled__ = False
args_ = inspect.getargspec(plugin.process).args
if "instance" in args_:
plugin.__instanceEnabled__ = True
if "context" in args_:
plugin.__contextEnabled__ = True
# Forwards-compatibility with asset
if "asset" in args_:
plugin.__instanceEnabled__ = True
def append_logger(plugin):
"""Append logger to plugin
The logger will include a plug-in's final name, as defined
by the subclasser. For example, if a plug-in is defined, subclassing
:class:`Plugin`, it's given name will be present in log records.
"""
module = plugin.__module__
name = plugin.__name__
# Package name appended, for filtering of LogRecord instances
logname = "pyblish.%s.%s" % (module, name)
plugin.log = logging.getLogger(logname)
plugin.log.setLevel(logging.DEBUG)
# All messages are handled by root-logger
plugin.log.propagate = True
class MetaPlugin(type):
"""Rewrite plug-ins written prior to 1.1
..warning:: In case of plug-ins written prior to 1.1,
that also process both instance and context,
only the instance process will remain available.
"""
def __init__(cls, *args, **kwargs):
append_logger(cls)
evaluate_pre11(cls)
evaluate_enabledness(cls)
return super(MetaPlugin, cls).__init__(*args, **kwargs)
@lib.log
class Plugin(object):
"""Base-class for plugins
Attributes:
hosts: Optionally limit a plug-in to one or more hosts
families: Optionally limit a plug-in to one or more families
label: Printed name of plug-in
active: Whether or not to use plug-in during processing
version: Optional version for forwards-compatibility.
Pyblish is (currently not) using the version to allow
for plug-ins incompatible with a particular running
instance of Pyblish to co-exist alongside compatible
versions.
order: Order in which this plug-in is processed. This is
used internally to control which plug-ins are processed
before another so as to allow plug-ins to communicate
with each other. E.g. one plug-in may provide critical
information to another and so must be allowed to be
processed first.
optional: Whether or not plug-in can be skipped by the user.
requires: Which version of Pyblish is required by this plug-in.
Plug-ins requiring a version newer than the current version
will not be loaded. 1.0.8 was when :attr:`Plugin.requires`
was first introduced.
actions: Actions associated to this plug-in
"""
__metaclass__ = MetaPlugin
hosts = ["*"]
families = ["*"]
label = None
active = True
version = (0, 0, 0)
order = -1
optional = False
requires = "pyblish>=1"
actions = []
id = lib.classproperty(lambda cls: cls.__name__)
def __str__(self):
return self.label or type(self).__name__
def __repr__(self):
return u"%s.%s(%r)" % (__name__, type(self).__name__, self.__str__())
def process(self):
"""Primary processing method
This method is called whenever your plug-in is invoked
and is injected with object relative to it's signature.
E.g. process(self, context, instance) will have the current
context and instance injected into it at run-time.
Available objects:
- context
- instance
- user
- time
Raises:
Any error
"""
pass
def repair(self):
"""DEPRECATED"""
pass
class Collector(Plugin):
"""Parse a given working scene for available Instances"""
order = 0
class Validator(Plugin):
"""Validate/check/test individual instance for correctness."""
order = 1
class Extractor(Plugin):
"""Physically separate Instance from Host into corresponding resources"""
order = 2
class Integrator(Plugin):
"""Integrates publishes into a pipeline"""
order = 3
CollectorOrder = 0
ValidatorOrder = 1
ExtractorOrder = 2
IntegratorOrder = 3
class ContextPlugin(Plugin):
def process(self, context):
"""Primary processing method
Arguments:
context (Context): Context with which to process
"""
class InstancePlugin(Plugin):
def process(self, instance):
"""Primary processing method
Arguments:
instance (Instance): Instance with which to process
"""
class MetaAction(type):
"""Inject additional metadata into Action"""
def __init__(cls, *args, **kwargs):
cls.__error__ = None
if cls.on not in ("all",
"processed",
"failed",
"succeeded"):
cls.__error__ = (
"Action had an unrecognised value "
"for `on`: \"%s\"" % cls.on
)
return super(MetaAction, cls).__init__(*args, **kwargs)
@lib.log
class Action(object):
"""User-supplied interactive action
Subclass this class and append to Plugin.actions in order
to provide your users with optional, context sensitive
functionality.
Attributes:
label: Optional label to display in place of class name.
active: Whether or not to allow execution of action.
on: When to enable this action; available options are:
- "all": Always available (default).
- "processed": The plug-in has been processed
- "succeeded": The plug-in has been processed, and succeeded
- "failed": The plug-in has been processed, and failed
icon: Name, relative path or absolute path to image for
use as an icon of this action. For relative paths,
the current working directory of the host is used and
names represent icons available via Awesome Icons.
fortawesome.github.io/Font-Awesome/icons/
"""
__metaclass__ = MetaAction
__type__ = "action"
label = None
active = True
on = "all"
icon = None
id = lib.classproperty(lambda cls: cls.__name__)
def __str__(self):
return self.label or type(self).__name__
def __repr__(self):
return u"%s.%s(%r)" % (__name__, type(self).__name__, self.__str__())
def process(self):
pass
class Separator(Action):
__type__ = "separator"
def Category(label):
return type("Category", (Action,), {"label": label,
"__type__": "category"})
@contextlib.contextmanager
def logger(handler):
"""Listen in on the global logger
Arguments:
handler (Handler): Custom handler with which to use
to listen for log messages
"""
l = logging.getLogger()
old_level = l.level
l.addHandler(handler)
l.setLevel(logging.DEBUG)
try:
yield
finally:
l.removeHandler(handler)
l.setLevel(old_level)
def process(plugin, context, instance=None, action=None):
"""Produce a single result from a Plug-in
Arguments:
plugin(Plugin): Uninstantiated plug-in class
context(Context): The current Context
instance(Instance, optional): Instance to process
action(str): Id of action to process, in place of plug-in.
Returns:
Dictionary of result
"""
if issubclass(plugin, (ContextPlugin, InstancePlugin)):
return __explicit_process(plugin, context, instance, action)
else:
return __implicit_process(plugin, context, instance, action)
def __explicit_process(plugin, context, instance=None, action=None):
"""Produce result from explicit plug-in
This is the primary internal mechanism for producing results
from the processing of plug-in/instance pairs.
This mechanism replaces :func:`__implicit_process`.
"""
assert not (issubclass(plugin, InstancePlugin) and instance is None), (
"Cannot process an InstancePlugin without an instance. This is a bug")
result = {
"success": False,
"plugin": plugin,
"instance": instance,
"action": action,
"error": None,
"records": list(),
"duration": None,
}
if not action:
runner = plugin().process
else:
actions = dict((a.id, a) for a in plugin.actions)
action = actions[action] if action else None
runner = action().process
records = list()
handler = lib.MessageHandler(records)
__start = time.time()
try:
with logger(handler):
runner(context if issubclass(plugin, ContextPlugin) else instance)
result["success"] = True
except Exception as error:
lib.emit("pluginFailed", plugin=plugin, context=context,
instance=instance, error=error)
lib.extract_traceback(error)
result["error"] = error
__end = time.time()
for record in records:
result["records"].append(record)
result["duration"] = (__end - __start) * 1000 # ms
if "results" not in context.data:
context.data["results"] = list()
context.data["results"].append(result)
return result
def __implicit_process(plugin, context, instance=None, action=None):
"""Produce result from implicit plug-in
This is a fallback mechanism for backwards compatibility.
An implicit plug-in are those subclassed from Collector,
Validator, Extractor or Integrator.
The mechanism which replaces this is :func:`__explicit_process`.
"""
result = {
"success": False,
"plugin": plugin,
"instance": instance,
"action": action,
"error": None,
"records": list(),
"duration": None,
}
if not action:
runner = plugin().process
else:
actions = dict((a.id, a) for a in plugin.actions)
action = actions[action] if action else None
runner = action().process
records = list()
handler = lib.MessageHandler(records)
provider = Provider()
provider.inject("plugin", plugin)
provider.inject("context", context)
provider.inject("instance", instance)
__start = time.time()
try:
with logger(handler):
provider.invoke(runner)
result["success"] = True
except Exception as error:
lib.emit("pluginFailed", plugin=plugin, context=context,
instance=instance, error=error)
lib.extract_traceback(error)
result["error"] = error
__end = time.time()
for record in records:
result["records"].append(record)
result["duration"] = (__end - __start) * 1000 # ms
if "results" not in context.data:
context.data["results"] = list()
context.data["results"].append(result)
# Backwards compatibility
result["asset"] = instance # Deprecated key
return result
def repair(plugin, context, instance=None):
"""Produce single result from repairing"""
import time
if "results" not in context.data:
context.data["results"] = list()
result = {
"success": False,
"plugin": plugin,
"instance": instance,
"error": None,
"records": list(),
"duration": None
}
plugin = plugin()
records = list()
handler = lib.MessageHandler(records)
provider = Provider()
provider.inject("context", context)
provider.inject("instance", instance)
__start = time.time()
try:
with logger(handler):
provider.invoke(plugin.repair)
result["success"] = True
except Exception as error:
lib.extract_traceback(error)
result["error"] = error
__end = time.time()
for record in records:
result["records"].append(record)
result["duration"] = (__end - __start) * 1000 # ms
context.data["results"].append(result)
return result
class _Dict(dict):
"""Temporary object during transition from set_data to data dictionary"""
def __init__(self, parent):
self._parent = parent
def __call__(self, key=None, default=None):
if key is None:
return self.copy()
if key == "name":
default = self._parent.name
return self.get(key, default)
class AbstractEntity(list):
"""Superclass for Context and Instance"""
def __init__(self):
self.data = _Dict(self)
class Context(AbstractEntity):
"""Maintain a collection of Instances"""
id = property(lambda self: "Context")
def __contains__(self, key):
"""Support both Instance objects and `id` strings
Example:
>>> context = Context()
>>> instance = context.create_instance("MyInstance")
>>> "MyInstance" in context
True
>>> instance in context
True
>>> "NotExists" in context
False
"""
try:
key = key.id
except:
pass
return key in [c.id for c in self]
def create_instance(self, name, **kwargs):
"""Convenience method of the following.
>>> ctx = Context()
>>> inst = Instance("name", parent=ctx)
Example:
>>> ctx = Context()
>>> inst = ctx.create_instance(name="Name")
"""
instance = Instance(name, parent=self)
instance.data.update(kwargs)
return instance
def __getitem__(self, item):
"""Enable support for dict-like getting of children by id
Example:
>>> context = Context()
>>> instance = context.create_instance("MyInstance")
>>> assert context["MyInstance"].name == "MyInstance"
>>> assert context[0].name == "MyInstance"
>>> assert context.get("MyInstance").name == "MyInstance"
"""
if isinstance(item, int):
return super(Context, self).__getitem__(item)
try:
return next(c for c in self if c.id == item)
except StopIteration:
raise KeyError("%s not in list" % item)
def get(self, key, default=None):
try:
return next(c for c in self if c.id == key)
except StopIteration:
return default
@lib.log
class Instance(AbstractEntity):
"""An in-memory representation of one or more files
Examples include rigs, models.
Arguments:
name (str): Name of instance, typically used during
extraction as name of resulting files.
parent (AbstractEntity): Optional parent. This is
supplied automatically when creating instances with
:class:`Context.create_instance()`.
Attributes:
id (str): Unique identifier of instance
name (str): Name of instance
parent (AbstractEntity): Optional parent of instance
"""
id = property(lambda self: self.name)
def __eq__(self, other):
return self.id == getattr(other, "id", None)
def __ne__(self, other):
return self.id != getattr(other, "id", None)
def __repr__(self):
return u"%s.%s(\"%s\")" % (__name__, type(self).__name__, self)
def __str__(self):
return self.name
def __init__(self, name, parent=None):
super(Instance, self).__init__()
assert isinstance(name, basestring)
assert parent is None or isinstance(parent, AbstractEntity)
self.name = name
self.parent = parent
self.data["name"] = name
self.data["family"] = "default"
if parent is not None:
parent.append(self)
@property
def context(self):
"""Return top-level parent; the context"""
parent = self.parent
while parent:
try:
parent = parent.parent
except:
break
assert isinstance(parent, Context)
return parent
# Forwards-compatibility alias
Asset = Instance
def current_host():
"""Return host last registered thru `register_host()`
When running Pyblish from within a host, this function determines
which host is running and returns the equivalent keyword.
Example:
>> # Running within Autodesk Maya
>> current_host()
"maya"
>> # Running within Sidefx Houdini
>> current_host()
"houdini"
>> # Running from an unknown software
>> current_host()
"unknown"
"""
return _registered_hosts[-1] or "unknown"
def register_callback(signal, callback):
"""Register a new callback
Arguments:
signal (string): Name of signal to register the callback with.
callback (func): Function to execute when a signal is emitted.
Raises:
ValueError if `callback` is not callable.
"""
if not hasattr(callback, "__call__"):
raise ValueError("%s is not callable" % callback)
if signal in _registered_callbacks:
_registered_callbacks[signal].append(callback)
else:
_registered_callbacks[signal] = [callback]
def deregister_callback(signal, callback):
"""Deregister a callback
Arguments:
signal (string): Name of signal to deregister the callback with.
callback (func): Function to execute when a signal is emitted.
Raises:
KeyError on missing signal
ValueError on missing callback
"""
_registered_callbacks[signal].remove(callback)
def deregister_all_callbacks():
"""Deregisters all callback"""
_registered_callbacks.clear()
def registered_callbacks():
"""Returns registered callbacks"""
return _registered_callbacks
def register_plugin(plugin):
"""Register a new plug-in
Arguments:
plugin (Plugin): Plug-in to register
Raises:
TypeError if `plugin` is not callable
"""
if not hasattr(plugin, "__call__"):
raise TypeError("Plug-in must be callable "
"returning an instance of a class")
if not plugin_is_valid(plugin):
raise TypeError("Plug-in invalid: %s", plugin)
if not version_is_compatible(plugin):
raise TypeError(
"Plug-in %s not compatible with "
"this version (%s) of Pyblish." % (
plugin, __version__))
if not host_is_compatible(plugin):
raise TypeError("Plug-in %s is not compatible "
"with this host" % plugin)
_registered_plugins[plugin.__name__] = plugin
def deregister_plugin(plugin):
"""De-register an existing plug-in
Arguments:
plugin (Plugin): Existing plug-in to de-register
"""
_registered_plugins.pop(plugin.__name__)
def deregister_all_plugins():
"""De-register all plug-ins"""
_registered_plugins.clear()
@lib.deprecated
def register_service(name, obj):
"""Register a new service
Arguments:
name (str): Name of service
obj (object): Any object
"""
_registered_services[name] = obj
@lib.deprecated
def deregister_service(name):
"""De-register an existing service by name
Arguments:
name (str): Name of service
"""
_registered_services.pop(name)
@lib.deprecated
def deregister_all_services():
"""De-register all existing services"""
_registered_services.clear()
@lib.deprecated
def registered_services():
"""Return the currently registered services as a dictionary
.. note:: This returns a copy of the registered paths
and can therefore not be modified directly.
"""
return _registered_services.copy()
def register_plugin_path(path):
"""Plug-ins are looked up at run-time from directories registered here
To register a new directory, run this command along with the absolute
path to where you"re plug-ins are located.
Example:
>>> import os
>>> my_plugins = "/server/plugins"
>>> register_plugin_path(my_plugins)
'/server/plugins'
Returns:
Actual path added, including any post-processing
"""
if path in _registered_paths:
return log.warning("Path already registered: {0}".format(path))
_registered_paths.append(path)
return path
def deregister_plugin_path(path):
"""Remove a pyblish._registered_paths path
Raises:
KeyError if `path` isn't registered
"""
_registered_paths.remove(path)
def deregister_all_paths():
"""Mainly used in tests"""
_registered_paths[:] = []
def registered_paths():
"""Return paths added via registration
..note:: This returns a copy of the registered paths
and can therefore not be modified directly.
"""
return list(_registered_paths)
def registered_plugins():
"""Return plug-ins added via :func:`register_plugin`
.. note:: This returns a copy of the registered plug-ins
and can therefore not be modified directly
"""
return _registered_plugins.values()
def register_host(host):
"""Register a new host
Registered hosts are used to filter discovered
plug-ins by host.
Example:
>>> register_host("My Host")
>>> "My Host" in registered_hosts()
True
"""
if host not in _registered_hosts:
_registered_hosts.append(host)
def deregister_host(host, quiet=False):
"""Remove an already registered host
Arguments:
host (str): Name of host
quiet (bool): Whether to raise an exception
when attempting to remove a host that is
not already registered.
"""
try:
_registered_hosts.remove(host)
except Exception as e:
if not quiet:
raise e
def deregister_all_hosts():
_registered_hosts[:] = []
def registered_hosts():
"""Return the currently registered hosts"""
return list(_registered_hosts)
def environment_paths():
"""Return paths added via environment variable"""
plugin_path = os.environ.get("PYBLISHPLUGINPATH")
if not plugin_path:
return list()
paths = plugin_path.split(os.pathsep)
log.debug("Paths from environment: %s" % paths)
return paths
def plugin_paths():
"""Collect paths from all sources.
This function looks at the three potential sources of paths
and returns a list with all of them together.
The sources are:
- Registered paths using :func:`register_plugin_path`,
- Paths from the environment variable `PYBLISHPLUGINPATH`
Returns:
list of paths in which plugins may be locat
"""
paths = list()
for path in registered_paths() + environment_paths():
if path in paths:
continue
paths.append(path)
return paths
def discover(type=None, regex=None, paths=None):
"""Find and return available plug-ins
This function looks for files within paths registered via
:func:`register_plugin_path` and those added to `PYBLISHPLUGINPATH`.
It determines *type* - :class:`Selector`, :class:`Validator`,
:class:`Extractor` or :class:`Conform` - based on whether it
matches it's corresponding regular expression; e.g.
"$validator_*^" for plug-ins of type Validator.
Arguments:
type (str, optional): !DEPRECATED! Only return plugins of
specified type. E.g. validators, extractors. In None is specified,
return all plugins. Available options are "selectors", validators",
"extractors", "conformers", "collectors" and "integrators".
regex (str, optional): Limit results to those matching `regex`.
Matching is done on classes, as opposed to
filenames, due to a file possibly hosting
multiple plugins.
paths (list, optional): Paths to discover plug-ins from.
If no paths are provided, all paths are searched.
"""
if type is not None:
warnings.warn("type argument has been deprecated and does nothing")
if regex is not None:
warnings.warn("discover(): regex argument "
"has been deprecated and does nothing")
plugins = dict()
# Include plug-ins from registered paths
for path in paths or plugin_paths():
path = os.path.normpath(path)
if not os.path.isdir(path):
continue
for fname in os.listdir(path):
if fname.startswith("_"):
continue
abspath = os.path.join(path, fname)
if not os.path.isfile(abspath):
continue
mod_name, mod_ext = os.path.splitext(fname)
if not mod_ext == ".py":
continue
module = types.ModuleType(mod_name)
module.__file__ = abspath
try:
execfile(abspath, module.__dict__)
# Store reference to original module, to avoid
# garbage collection from collecting it's global
# imports, such as `import os`.
sys.modules[mod_name] = module
except Exception as err:
log.debug("Skipped: \"%s\" (%s)", mod_name, err)
continue
for plugin in plugins_from_module(module):
if plugin.id in plugins:
log.debug("Duplicate plug-in found: %s", plugin)
continue
plugins[plugin.id] = plugin
# Include plug-ins from registration.
# Directly registered plug-ins take precedence.
for name, plugin in _registered_plugins.items():
if name in plugins:
log.debug("Duplicate plug-in found: %s", plugin)
continue
plugins[name] = plugin
plugins = list(plugins.values())
sort(plugins) # In-place
return plugins
def plugins_from_module(module):
"""Return plug-ins from module
Arguments:
module (types.ModuleType): Imported module from which to
parse valid Pyblish plug-ins.
Returns:
List of plug-ins, or empty list if none is found.
"""
plugins = list()
for name in dir(module):
if name.startswith("_"):
continue
# It could be anything at this point
obj = getattr(module, name)
if not inspect.isclass(obj):
continue
if not issubclass(obj, Plugin):
continue
if not plugin_is_valid(obj):
log.debug("Plug-in invalid: %s", obj)
continue
if not version_is_compatible(obj):
log.debug("Plug-in %s not compatible with "
"this version (%s) of Pyblish." % (
obj, __version__))
continue
if not host_is_compatible(obj):
continue
plugins.append(obj)
return plugins
def plugin_is_valid(plugin):
"""Determine whether or not plug-in `plugin` is valid
Arguments:
plugin (Plugin): Plug-in to assess
"""
if not isinstance(plugin.requires, basestring):
log.debug("Plug-in requires must be of type string: %s", plugin)
return False
if not isinstance(plugin.families, list):
log.debug(".families must be list of stirngs")
return False
if not isinstance(plugin.hosts, list):
log.debug(".hosts must be list of strings")
return False
for family in plugin.families:
if not isinstance(family, basestring):
log.debug("Families must be string")
return False
for host in plugin.hosts:
if not isinstance(host, basestring):
log.debug("Hosts must be string")
return False
return True
def version_is_compatible(plugin):
"""Lookup compatibility between plug-in and current version of Pyblish
Arguments:
plugin (Plugin): Plug-in to test against
"""
if not iscompatible.iscompatible(requirements=plugin.requires,
version=version_info):
return False
return True
def host_is_compatible(plugin):
"""Determine whether plug-in `plugin` is compatible with the current host
Available hosts are determined by :func:`registered_hosts`.
Arguments:
plugin (Plugin): Plug-in to assess.
"""
if "*" in plugin.hosts:
return True
return any(host in plugin.hosts for host in registered_hosts())
def sort(plugins):
"""Sort `plugins` in-place
Their order is determined by their `order` attribute,
which defaults to their standard execution order:
1. Selection
2. Validation
3. Extraction
4. Conform
*But may be overridden.
Arguments:
plugins (list): Plug-ins to sort
"""
if not isinstance(plugins, list):
raise TypeError("plugins must be of type list")
plugins.sort(key=lambda p: p.order)
return plugins
| lgpl-3.0 |
giacomo-dantonio/hermann | spi_test.py | 1 | 1101 | #!/usr/bin/env python
#
# Bitbang'd SPI interface with an MCP3008 ADC device
# MCP3008 is 8-channel 10-bit analog to digital converter
# Connections are:
# CLK => SCLK
# DOUT => MISO
# DIN => MOSI
# CS => CE0
import time
import sys
import spidev
spi = spidev.SpiDev()
spi.open(0,0)
spi.max_speed_hz = 500000
def buildReadCommand(channel):
startBit = 0x01
singleEnded = 0x08
return [startBit, singleEnded|(channel<<4), 0]
def processAdcValue(result):
'''Take in result as array of three bytes.
Return the two lowest bits of the 2nd byte and
all of the third byte'''
byte2 = (result[1] & 0x03)
return (byte2 << 8) | result[2]
def readAdc(channel):
if ((channel > 7) or (channel < 0)):
return -1
r = spi.xfer2(buildReadCommand(channel))
return processAdcValue(r)
if __name__ == '__main__':
try:
while True:
val = readAdc(0)
print "ADC Result: ", str(val)
time.sleep(5)
except KeyboardInterrupt:
spi.close()
sys.exit(0)
| gpl-2.0 |
blazek/QGIS | python/plugins/processing/algs/qgis/Climb.py | 15 | 8653 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
Climb
begin : 2019-05-15
copyright : (C) 2019 by Håvard Tveite
email : havard.tveite@nmbu.no
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
__author__ = 'Håvard Tveite'
__date__ = '2019-03-01'
__copyright__ = '(C) 2019 by Håvard Tveite'
import os
import math
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtCore import QVariant
from qgis.core import (QgsProcessing,
QgsFeatureSink,
QgsProcessingAlgorithm,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterFeatureSink,
QgsProcessingOutputNumber,
QgsProcessingException,
QgsProcessingUtils,
QgsWkbTypes,
QgsFields,
QgsField)
from processing.algs.qgis.QgisAlgorithm import QgisAlgorithm
class Climb(QgisAlgorithm):
INPUT = 'INPUT'
OUTPUT = 'OUTPUT'
TOTALCLIMB = 'TOTALCLIMB'
TOTALDESCENT = 'TOTALDESCENT'
MINELEVATION = 'MINELEVATION'
MAXELEVATION = 'MAXELEVATION'
CLIMBATTRIBUTE = 'climb'
DESCENTATTRIBUTE = 'descent'
MINELEVATTRIBUTE = 'minelev'
MAXELEVATTRIBUTE = 'maxelev'
def name(self):
return 'climbalongline'
def displayName(self):
return self.tr('Climb Along Line')
def group(self):
return self.tr('Vector analysis')
def groupId(self):
return 'vectoranalysis'
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(
QgsProcessingParameterFeatureSource(
self.INPUT,
self.tr('Line layer'),
[QgsProcessing.TypeVectorLine]
)
)
self.addParameter(
QgsProcessingParameterFeatureSink(
self.OUTPUT,
self.tr('Climb layer')
)
)
self.addOutput(
QgsProcessingOutputNumber(
self.TOTALCLIMB,
self.tr('Total climb')
)
)
self.addOutput(
QgsProcessingOutputNumber(
self.TOTALDESCENT,
self.tr('Total descent')
)
)
self.addOutput(
QgsProcessingOutputNumber(
self.MINELEVATION,
self.tr('Minimum elevation')
)
)
self.addOutput(
QgsProcessingOutputNumber(
self.MAXELEVATION,
self.tr('Maximum elevation')
)
)
def processAlgorithm(self, parameters, context, feedback):
source = self.parameterAsSource(
parameters,
self.INPUT,
context
)
fcount = source.featureCount()
source_fields = source.fields()
hasZ = QgsWkbTypes.hasZ(source.wkbType())
if not hasZ:
raise QgsProcessingException(self.tr('The layer does not have Z values. If you have a DEM, use the Drape algorithm to extract Z values.'))
thefields = QgsFields()
climbindex = -1
descentindex = -1
minelevindex = -1
maxelevindex = -1
fieldnumber = 0
# Create new fields for climb and descent
thefields.append(QgsField(self.CLIMBATTRIBUTE, QVariant.Double))
thefields.append(QgsField(self.DESCENTATTRIBUTE, QVariant.Double))
thefields.append(QgsField(self.MINELEVATTRIBUTE, QVariant.Double))
thefields.append(QgsField(self.MAXELEVATTRIBUTE, QVariant.Double))
# combine all the vector fields
out_fields = QgsProcessingUtils.combineFields(thefields, source_fields)
layerwithz = source
(sink, dest_id) = self.parameterAsSink(parameters,
self.OUTPUT,
context,
out_fields,
layerwithz.wkbType(),
source.sourceCrs())
# get features from source (with z values)
features = layerwithz.getFeatures()
totalclimb = 0
totaldescent = 0
minelevation = float('Infinity')
maxelevation = float('-Infinity')
no_z_nodes = []
no_geometry = []
for current, feature in enumerate(features):
if feedback.isCanceled():
break
climb = 0
descent = 0
minelev = float('Infinity')
maxelev = float('-Infinity')
# In case of multigeometries we need to do the parts
parts = feature.geometry().constParts()
partnumber = 0
if not feature.hasGeometry():
no_geometry.append(self.tr(
'Feature: {feature_id}'.format(
feature_id=feature.id())
)
)
for part in parts:
# Calculate the climb
first = True
zval = 0
for idx, v in enumerate(part.vertices()):
zval = v.z()
if math.isnan(zval):
no_z_nodes.append(self.tr(
'Feature: {feature_id}, part: {part_id}, point: {point_id}'.format(
feature_id=feature.id(),
part_id=partnumber,
point_id=idx)
)
)
continue
if first:
prevz = zval
minelev = zval
maxelev = zval
first = False
else:
diff = zval - prevz
if diff > 0:
climb = climb + diff
else:
descent = descent - diff
if minelev > zval:
minelev = zval
if maxelev < zval:
maxelev = zval
prevz = zval
totalclimb = totalclimb + climb
totaldescent = totaldescent + descent
partnumber += 1
# Set the attribute values
attrs = []
# Append the attributes to the end of the existing ones
attrs.append(climb)
attrs.append(descent)
attrs.append(minelev)
attrs.append(maxelev)
attrs.extend(feature.attributes())
# Set the final attribute list
feature.setAttributes(attrs)
# Add a feature to the sink
sink.addFeature(feature, QgsFeatureSink.FastInsert)
if minelevation > minelev:
minelevation = minelev
if maxelevation < maxelev:
maxelevation = maxelev
# Update the progress bar
if fcount > 0:
feedback.setProgress(int(100 * current / fcount))
feedback.pushInfo(self.tr(
'The following features do not have geometry: {no_geometry_report}'.format(
no_geometry_report=(', '.join(no_geometry)))
)
)
feedback.pushInfo(self.tr(
'The following points do not have Z values: {no_z_report}'.format(
no_z_report=(', '.join(no_z_nodes)))
)
)
# Return the results
return {self.OUTPUT: dest_id, self.TOTALCLIMB: totalclimb,
self.TOTALDESCENT: totaldescent,
self.MINELEVATION: minelevation,
self.MAXELEVATION: maxelevation}
| gpl-2.0 |
davidvon/pipa-pay-server | site-packages/pip/_vendor/requests/sessions.py | 294 | 22290 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
from collections import Mapping
from datetime import datetime
from .auth import _basic_auth_str
from .compat import cookielib, OrderedDict, urljoin, urlparse, builtin_str
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from .utils import to_key_val_list, default_headers, to_native_string
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""
Determines appropriate setting for a given request, taking into account the
explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None.
for (k, v) in request_setting.items():
if v is None:
del merged_setting[k]
merged_setting = dict((k, v) for (k, v) in merged_setting.items() if v is not None)
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""
Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None):
"""Receives a Response. Returns a generator of Responses."""
i = 0
while resp.is_redirect:
prepared_request = req.copy()
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if i >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects)
# Release the connection back into the pool.
resp.close()
url = resp.headers['location']
method = req.method
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (parsed_rurl.scheme, url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate non-RFC2616-compliant 'location' headers
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not urlparse(url).netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4
if (resp.status_code == codes.see_other and
method != 'HEAD'):
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if resp.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if resp.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
# https://github.com/kennethreitz/requests/issues/1084
if resp.status_code not in (codes.temporary, codes.resume):
if 'Content-Length' in prepared_request.headers:
del prepared_request.headers['Content-Length']
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
extract_cookies_to_jar(prepared_request._cookies, prepared_request, resp.raw)
prepared_request._cookies.update(self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# Override the original request.
req = prepared_request
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
i += 1
yield resp
def rebuild_auth(self, prepared_request, response):
"""
When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""
This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
"""
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy() if proxies is not None else {}
if self.trust_env and not should_bypass_proxies(url):
environ_proxies = get_environ_proxies(url)
proxy = environ_proxies.get(scheme)
if proxy:
new_proxies.setdefault(scheme, environ_proxies[scheme])
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
200
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'timeout', 'proxies', 'hooks',
'params', 'verify', 'cert', 'prefetch', 'adapters', 'stream',
'trust_env', 'max_redirects']
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol to the URL of the proxy (e.g.
#: {'http': 'foo.bar:3128'}) to be used on each
#: :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL certificate default.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Should we trust the environment?
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None,
data=None,
headers=None,
cookies=None,
files=None,
auth=None,
timeout=None,
allow_redirects=True,
proxies=None,
hooks=None,
stream=None,
verify=None,
cert=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary or bytes to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of 'filename': file-like-objects
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) Float describing the timeout of the
request in seconds.
:param allow_redirects: (optional) Boolean. Set to True by default.
:param proxies: (optional) Dictionary mapping protocol to the URL of
the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) if ``True``, the SSL cert will be verified.
A CA_BUNDLE path can also be provided.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
"""
method = builtin_str(method)
# Create the Request.
req = Request(
method = method.upper(),
url = url,
headers = headers,
files = files,
data = data or {},
params = params or {},
auth = auth,
cookies = cookies,
hooks = hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
env_proxies = get_environ_proxies(url) or {}
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for configuration.
if not verify and verify is not False:
verify = os.environ.get('REQUESTS_CA_BUNDLE')
# Curl compatibility.
if not verify and verify is not False:
verify = os.environ.get('CURL_CA_BUNDLE')
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
# Send the request.
send_kwargs = {
'stream': stream,
'timeout': timeout,
'verify': verify,
'cert': cert,
'proxies': proxies,
'allow_redirects': allow_redirects,
}
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, **kwargs):
"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('POST', url, data=data, **kwargs)
def put(self, url, data=None, **kwargs):
"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest."""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if not isinstance(request, PreparedRequest):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
timeout = kwargs.get('timeout')
verify = kwargs.get('verify')
cert = kwargs.get('cert')
proxies = kwargs.get('proxies')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = datetime.utcnow()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
r.elapsed = datetime.utcnow() - start
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
if not stream:
r.content
return r
def get_adapter(self, url):
"""Returns the appropriate connnection adapter for the given URL."""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length."""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""Returns a :class:`Session` for context-management."""
return Session()
| apache-2.0 |
YefriTavarez/finance_manager | fm/utilities.py | 1 | 1245 | # -*- encoding: utf-8 -*-
import frappe
from datetime import date
from fm.api import PENDING
@frappe.whitelist()
def get_next_repayment_schedule(chasis_no):
loan_id = frappe.get_value("Loan", { "asset": chasis_no }, "name")
if not loan_id:
next_month = frappe.utils.add_months(date.today(), 1)
return next_month.strftime("%Y-%m-%d")
loan = frappe.get_doc("Loan", loan_id)
pagos_vencidos = [ row for row in loan.repayment_schedule if row.estado == PENDING ]
pagare = pagos_vencidos[0]
fecha_pagare = pagare.fecha
return fecha_pagare.strftime('%Y-%m-%d')
@frappe.whitelist()
def add_insurance_to_loan(chasis_no, total_insurance):
doc = frappe.get_doc("Loan", { "asset": chasis_no, "status": "Fully Disbursed" })
doc.vehicle_insurance = total_insurance
doc.save()
return doc.name
def s_sanitize(string):
"""Remove the most common special caracters"""
special_cars = [
(u"á", "a"), (u"Á", "A"),
(u"é", "e"), (u"É", "E"),
(u"í", "i"), (u"Í", "I"),
(u"ó", "o"), (u"Ó", "O"),
(u"ú", "u"), (u"Ú", "U"),
(u"ü", "u"), (u"Ü", "U"),
(u"ñ", "n"), (u"Ñ", "N")
]
s_sanitized = string
for pair in special_cars:
s_sanitized = s_sanitized.replace(pair[0], pair[1])
return s_sanitized.upper()
| gpl-3.0 |
Plain-Andy-legacy/android_external_chromium_org | tools/telemetry/telemetry/page/actions/seek.py | 47 | 2160 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A Telemetry page_action that performs the "seek" action on media elements.
Action parameters are:
- seconds: The media time to seek to. Test fails if not provided.
- selector: If no selector is defined then the action attempts to seek the first
media element on the page. If 'all' then seek all media elements.
- timeout_in_seconds: Maximum waiting time for the "seeked" event
(dispatched when the seeked operation completes)
to be fired. 0 means do not wait.
- log_time: If true the seek time is recorded, otherwise media
measurement will not be aware of the seek action. Used to
perform multiple seeks. Default true.
- label: A suffix string to name the seek perf measurement.
"""
from telemetry.core import exceptions
from telemetry.page.actions import media_action
from telemetry.page.actions import page_action
class SeekAction(media_action.MediaAction):
def __init__(self, seconds, selector=None, timeout_in_seconds=0,
log_time=True, label=''):
super(SeekAction, self).__init__()
self._seconds = seconds
self._selector = selector if selector else ''
self._timeout_in_seconds = timeout_in_seconds
self._log_time = log_time
self._label = label
def WillRunAction(self, tab):
"""Load the media metrics JS code prior to running the action."""
super(SeekAction, self).WillRunAction(tab)
self.LoadJS(tab, 'seek.js')
def RunAction(self, tab):
try:
tab.ExecuteJavaScript(
'window.__seekMedia("%s", "%s", %i, "%s");' %
(self._selector, self._seconds, self._log_time, self._label))
if self._timeout_in_seconds > 0:
self.WaitForEvent(tab, self._selector, 'seeked',
self._timeout_in_seconds)
except exceptions.EvaluateException:
raise page_action.PageActionFailed('Cannot seek media element(s) with '
'selector = %s.' % self._selector)
| bsd-3-clause |
coolbombom/CouchPotato | library/sqlalchemy/dialects/sybase/pysybase.py | 18 | 3152 | # pysybase.py
# Copyright (C) 2010 Michael Bayer mike_mp@zzzcomputing.com
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
Support for Sybase via the python-sybase driver.
http://python-sybase.sourceforge.net/
Connect strings are of the form::
sybase+pysybase://<username>:<password>@<dsn>/[database name]
Unicode Support
---------------
The python-sybase driver does not appear to support non-ASCII strings of any
kind at this time.
"""
from sqlalchemy import types as sqltypes, processors
from sqlalchemy.dialects.sybase.base import SybaseDialect, \
SybaseExecutionContext, SybaseSQLCompiler
class _SybNumeric(sqltypes.Numeric):
def result_processor(self, dialect, type_):
if not self.asdecimal:
return processors.to_float
else:
return sqltypes.Numeric.result_processor(self, dialect, type_)
class SybaseExecutionContext_pysybase(SybaseExecutionContext):
def set_ddl_autocommit(self, dbapi_connection, value):
if value:
# call commit() on the Sybase connection directly,
# to avoid any side effects of calling a Connection
# transactional method inside of pre_exec()
dbapi_connection.commit()
def pre_exec(self):
SybaseExecutionContext.pre_exec(self)
for param in self.parameters:
for key in list(param):
param["@" + key] = param[key]
del param[key]
class SybaseSQLCompiler_pysybase(SybaseSQLCompiler):
def bindparam_string(self, name):
return "@" + name
class SybaseDialect_pysybase(SybaseDialect):
driver = 'pysybase'
execution_ctx_cls = SybaseExecutionContext_pysybase
statement_compiler = SybaseSQLCompiler_pysybase
colspecs={
sqltypes.Numeric:_SybNumeric,
sqltypes.Float:sqltypes.Float
}
@classmethod
def dbapi(cls):
import Sybase
return Sybase
def create_connect_args(self, url):
opts = url.translate_connect_args(username='user', password='passwd')
return ([opts.pop('host')], opts)
def do_executemany(self, cursor, statement, parameters, context=None):
# calling python-sybase executemany yields:
# TypeError: string too long for buffer
for param in parameters:
cursor.execute(statement, param)
def _get_server_version_info(self, connection):
vers = connection.scalar("select @@version_number")
# i.e. 15500, 15000, 12500 == (15, 5, 0, 0), (15, 0, 0, 0),
# (12, 5, 0, 0)
return (vers / 1000, vers % 1000 / 100, vers % 100 / 10, vers % 10)
def is_disconnect(self, e):
if isinstance(e, (self.dbapi.OperationalError,
self.dbapi.ProgrammingError)):
msg = str(e)
return ('Unable to complete network request to host' in msg or
'Invalid connection state' in msg or
'Invalid cursor state' in msg)
else:
return False
dialect = SybaseDialect_pysybase
| gpl-3.0 |
mattclay/ansible | test/support/windows-integration/plugins/modules/win_user_right.py | 56 | 3205 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_user_right
version_added: '2.4'
short_description: Manage Windows User Rights
description:
- Add, remove or set User Rights for a group or users or groups.
- You can set user rights for both local and domain accounts.
options:
name:
description:
- The name of the User Right as shown by the C(Constant Name) value from
U(https://technet.microsoft.com/en-us/library/dd349804.aspx).
- The module will return an error if the right is invalid.
type: str
required: yes
users:
description:
- A list of users or groups to add/remove on the User Right.
- These can be in the form DOMAIN\user-group, user-group@DOMAIN.COM for
domain users/groups.
- For local users/groups it can be in the form user-group, .\user-group,
SERVERNAME\user-group where SERVERNAME is the name of the remote server.
- You can also add special local accounts like SYSTEM and others.
- Can be set to an empty list with I(action=set) to remove all accounts
from the right.
type: list
required: yes
action:
description:
- C(add) will add the users/groups to the existing right.
- C(remove) will remove the users/groups from the existing right.
- C(set) will replace the users/groups of the existing right.
type: str
default: set
choices: [ add, remove, set ]
notes:
- If the server is domain joined this module can change a right but if a GPO
governs this right then the changes won't last.
seealso:
- module: win_group
- module: win_group_membership
- module: win_user
author:
- Jordan Borean (@jborean93)
'''
EXAMPLES = r'''
---
- name: Replace the entries of Deny log on locally
win_user_right:
name: SeDenyInteractiveLogonRight
users:
- Guest
- Users
action: set
- name: Add account to Log on as a service
win_user_right:
name: SeServiceLogonRight
users:
- .\Administrator
- '{{ansible_hostname}}\local-user'
action: add
- name: Remove accounts who can create Symbolic links
win_user_right:
name: SeCreateSymbolicLinkPrivilege
users:
- SYSTEM
- Administrators
- DOMAIN\User
- group@DOMAIN.COM
action: remove
- name: Remove all accounts who cannot log on remote interactively
win_user_right:
name: SeDenyRemoteInteractiveLogonRight
users: []
'''
RETURN = r'''
added:
description: A list of accounts that were added to the right, this is empty
if no accounts were added.
returned: success
type: list
sample: ["NT AUTHORITY\\SYSTEM", "DOMAIN\\User"]
removed:
description: A list of accounts that were removed from the right, this is
empty if no accounts were removed.
returned: success
type: list
sample: ["SERVERNAME\\Administrator", "BUILTIN\\Administrators"]
'''
| gpl-3.0 |
sam-m888/addons-source | lxml/lxmlGramplet.py | 2 | 27861 | # Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2009 Brian G. Matherly
# Copyright (C) 2010 Douglas S. Blank
# Copyright (C) 2011-2019 Jerome Rapinat
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
# $Id: $
#------------------------------------------------------------------------
#
# Python modules
#
#------------------------------------------------------------------------
import sys
import os
from shutil import copy
from gi.repository import Gtk
#import subprocess
import logging
#------------------------------------------------------------------------
#
# GRAMPS modules
#
#------------------------------------------------------------------------
from gramps.gen.plug import Gramplet
from gramps.gen.lib import date
import gramps.gen.datehandler
from gramps.gen.const import USER_HOME, USER_PLUGINS
from gramps.gui.display import display_url
from gramps.gui.dialog import ErrorDialog
from gramps.plugins.lib.libhtml import Html, xml_lang
from gramps.gen.const import GRAMPS_LOCALE as glocale
try:
_trans = glocale.get_addon_translator(__file__)
except ValueError:
_trans = glocale.translation
_ = _trans.gettext
LOG = logging.getLogger("lxml")
#-------------------------------------------------------------------------
#
# Try to detect the presence of gzip
#
#-------------------------------------------------------------------------
try:
import gzip
GZIP_OK = True
except:
GZIP_OK = False
ErrorDialog(_('Where is gzip?'), _('"gzip" is missing'))
LOG.error('No gzip')
#-------------------------------------------------------------------------
#
# Try to detect the presence of lxml (only for using XPATH/XSLT)
#
# from xml.etree import ElementTree from default python default python has a basic XPATH/XSLT API
#
#-------------------------------------------------------------------------
try:
from lxml import etree, objectify
LXML_OK = True
# current code is working with:
# LXML_VERSION (4, 3, 4)
# LIBXML_VERSION (2, 9, 1))
# LIBXSLT_VERSION (1, 1, 28))
LXML_VERSION = etree.LXML_VERSION
LIBXML_VERSION = etree.LIBXML_VERSION
LIBXSLT_VERSION = etree.LIBXSLT_VERSION
except:
LXML_OK = False
ErrorDialog(_('Missing python3 lxml'), _('Please, try to install "python3 lxml" package.'))
LOG.error('No lxml')
#-------------------------------------------------------------------------
#
# Timestamp convertor
#
#-------------------------------------------------------------------------
def epoch(t):
"""
Try to convert timestamp
"""
try:
from datetime import datetime
from time import strftime
except:
LOG.error('Modules around time missing')
return
if t == None:
LOG.warning(_('Invalid timestamp'))
fmt = _('Unknown')
else:
date = int(t)
conv = datetime.fromtimestamp(date)
fmt = conv.strftime('%d %B %Y')
return(fmt)
#-------------------------------------------------------------------------
#
# The gramplet
#
#-------------------------------------------------------------------------
NAMESPACE = '{http://gramps-project.org/xml/1.7.1/}'
class lxmlGramplet(Gramplet):
"""
Gramplet for testing lxml
"""
def init(self):
"""
Constructs the GUI, consisting of an entry, a text view and
a Run button.
"""
# filename and selector
self.__base_path = USER_HOME
self.__file_name = "test.gramps"
self.entry = Gtk.Entry()
self.entry.set_text(os.path.join(self.__base_path, self.__file_name))
self.button = Gtk.Button()
image = Gtk.Image()
image.set_from_stock(Gtk.STOCK_OPEN, Gtk.IconSize.BUTTON)
self.button.add(image)
self.button.connect('clicked', self.__select_file)
# GUI setup:
vbox = Gtk.VBox()
hbox = Gtk.HBox()
# area
self.import_text = Gtk.TextView()
self.import_text.set_wrap_mode(Gtk.WrapMode.WORD)
self.import_text.set_editable(False)
self.text = Gtk.TextBuffer()
self.text.set_text(_('No file loaded...'))
self.import_text.set_buffer(self.text)
vbox.pack_start(self.import_text, True, True, 0) # v1
# button
button = Gtk.Button(_("Run"))
button.connect("clicked", self.run)
vbox.pack_start(button, False, False, 0) # v2
# build
hbox.pack_start(self.entry, True, True, 0)
hbox.pack_end(self.button, False, False, 0)
vbox.pack_end(hbox, False, False, 0) # v3
self.gui.get_container_widget().remove(self.gui.textview)
self.gui.get_container_widget().add_with_viewport(vbox)
vbox.show_all()
def __select_file(self, obj):
"""
Call back function to handle the open button press
"""
my_action = Gtk.FileChooserAction.SAVE
dialog = Gtk.FileChooserDialog('lxml',
action=my_action,
buttons=(Gtk.STOCK_CANCEL,
Gtk.ResponseType.CANCEL,
Gtk.STOCK_OPEN,
Gtk.ResponseType.OK))
name = os.path.basename(self.entry.get_text())
dialog.set_current_name(name)
dialog.set_current_folder(self.__base_path)
dialog.present()
status = dialog.run()
if status == Gtk.ResponseType.OK:
self.set_filename(dialog.get_filename())
dialog.destroy()
def set_filename(self, path):
"""
Set the currently selected dialog.
"""
if not path:
return
if os.path.dirname(path):
self.__base_path = os.path.dirname(path)
self.__file_name = os.path.basename(path)
else:
self.__base_path = os.getcwd()
self.__file_name = path
self.entry.set_text(os.path.join(self.__base_path, self.__file_name))
def run(self, obj):
"""
Method that is run when you click the Run button.
"""
entry = self.entry.get_text()
if ' ' in entry:
ErrorDialog(_('Space character on filename'), _('Please fix space on "%s"') % entry)
LOG.error('Space on filename')
return
self.ReadXML(entry)
def ReadXML(self, entry):
"""
Read the .gramps
"""
if GZIP_OK:
use_gzip = 1
try:
test = gzip.open(entry, "r")
test.read(1)
test.close()
except IOError:
use_gzip = 0
except ValueError:
use_gzip = 1
else:
use_gzip = 0
# lazy ...
if os.name != 'posix' and os.name != 'nt':
# GtkTextView
self.text.set_text(_('Sorry, no support for your OS yet!'))
LOG.error('Not tested under this OS')
return
filename = os.path.join(USER_PLUGINS, 'lxml', 'test.xml')
if LXML_OK and use_gzip == 1:
try:
os.system('gunzip < %s > %s' % (entry, filename))
except:
ErrorDialog(_('Is it a compressed .gramps?'), _('Cannot uncompress "%s"') % entry)
LOG.error('Cannot use gunzip command')
return
sys.stdout.write(_('From:\n "%(file1)s"\n to:\n "%(file2)s".\n') % {'file1': entry, 'file2': filename})
elif LXML_OK and use_gzip == 0:
try:
copy(entry, filename)
except:
ErrorDialog('Is it a .gramps ?', _('Cannot copy "%s"') % entry)
LOG.error('Cannot copy the file')
return
sys.stdout.write(_('From:\n "%(file1)s"\n to:\n "%(file2)s".\n') % {'file1': entry, 'file2': filename})
else:
LOG.error('lxml or gzip is missing')
return
# XSD structure via lxml
xsd = os.path.join(USER_PLUGINS, 'lxml', 'grampsxml.xsd')
try:
self.xsd(xsd, filename)
#pass
except:
ErrorDialog(_('XSD validation (lxml)'), _('Cannot validate "%(file)s" !') % {'file': entry})
LOG.debug(self.xsd(xsd, filename))
# DTD syntax via xmllint (libxml2-utils)
try:
self.check_valid(filename)
except:
LOG.info(_('xmllint: skip DTD validation for "%(file)s"') % {'file': entry})
# RNG validation via xmllint (libxml2-utils)
rng = os.path.join(USER_PLUGINS, 'lxml', 'grampsxml.rng')
try:
if os.name == 'nt':
os.system('xmllint --relaxng %s --noout %s' % (rng, filename))
else:
os.system('xmllint --relaxng file://%s --noout %s' % (rng, filename))
except:
LOG.info(_('xmllint: skip RelaxNG validation for "%(file)s"') % {'file': entry})
try:
#tree = etree.ElementTree(file=filename)
tree = etree.parse(filename)
doctype = tree.docinfo.doctype
current = '<!DOCTYPE database PUBLIC "-//Gramps//DTD Gramps XML 1.7.1//EN" "http://gramps-project.org/xml/1.7.1/grampsxml.dtd">'
if self.RNGValidation(tree, rng) == True:
try:
self.ParseXML(tree, filename)
except:
ErrorDialog(_('Parsing issue'), _('Cannot parse content of "%(file)s"') % {'file': filename})
LOG.error('Cannot parse the content of the XML copy')
return
elif doctype != current:
ErrorDialog(_('Gramps version'), _('Wrong namespace\nNeed: %s') % current)
LOG.error('Namespace is wrong')
return
else:
ErrorDialog(_('RelaxNG validation'), _('Cannot validate "%(file)s" via RelaxNG schema') % {'file': entry})
LOG.error('RelaxNG validation failed')
return
except etree.XMLSyntaxError as e:
ErrorDialog(_('File issue'), _('Cannot parse "%(file)s" via etree') % {'file': entry})
log = e.error_log.filter_from_level(etree.ErrorLevels.FATAL)
LOG.debug(log)
debug = e.error_log.last_error
LOG.debug(debug.domain_name)
LOG.debug(debug.type_name)
LOG.debug(debug.filename)
return
def ParseXML(self, tree, filename):
"""
Parse the validated .gramps
"""
root = tree.getroot()
# GtkTextView ; buffer limitation ...
self.text.set_text(_('Parsing file...'))
#LOG.info(etree.tostring(root, pretty_print=True))
# namespace issues !
namespace = root.nsmap
surname_tag = etree.SubElement(root, NAMESPACE + 'surname')
pname_tag = etree.SubElement(root, NAMESPACE + 'pname')
# variable
expr = "//*[local-name() = $name]"
# count function
# float and seems to also count the parent tag: name[0] !
count_elements = etree.XPath("count(//*[local-name() = $name])")
# textual children strings function
desc = etree.XPath('descendant-or-self::text()')
# TODO: cleanup !
# quick but not a nice method ...
msg = []
#tags = []
places = []
sources = []
surnames = []
timestamp = []
thumbs = []
LOG.info('start iteration')
for one in root.iter():
#(tag, item) = one.tag, one.items()
#print(tag, item)
for two in one.iter():
#tags.append(two.tag)
msg.append(two.items())
if two.tag == NAMESPACE + 'mediapath':
mediapath = two.text
else:
mediapath = ''
# search ptitle and time log
for three in two.iter():
# timestamp
if two.get('change'):
timestamp.append(two.get('change'))
# with namespace ...
#print(desc(three))
(tag, items) = three.tag, three.items()
if three.tag == NAMESPACE + 'pname':
text = str(three.attrib.get('value'))
translation = str(three.attrib.get('lang'))
if translation == 'None':
translation = xml_lang()[0:2]
text = text + _(' - (? or %(lang)s)') % {'lang':translation}
else:
text = text + _(' - (%(lang)s)') % {'lang':translation}
if text not in places:
places.append(text) # temp display
if three.tag == NAMESPACE + 'stitle' and three.text not in sources:
# need to add an exception
if not three.text:
three.text = ""
sources.append(three.text)
if three.tag == NAMESPACE + 'file' and three.items() not in thumbs:
thumbs.append(three.items())
# search last name
for four in three.iter():
# with namespace ...
if four.tag == NAMESPACE + 'surname' and four.text != None:
surnames.append(four.text)
LOG.info('end of loops')
# All tags
#print(tags)
# keys, values; no textual data;
# root child level items as keys for revision control ???
#revision = msg
#print(revision)
log = msg[2]
if not log:
ErrorDialog(_('Missing header'), _('Not a valid .gramps.\n'
'Cannot run the gramplet...\n'
'Please, try to use a .gramps\n'
'generated by Gramps 4.x.'))
LOG.error('header missing')
return
# dirty XML write method ...
# need to create a fake entry !
if int(count_elements(root, name = 'surname')) > 1:
nb_surnames = int(count_elements(root, name = 'surname'))
else:
nb_surnames = surnames = [_('0')]
if int(count_elements(root, name = 'pname')) > 1:
nb_pnames = int(count_elements(root, name = 'pname'))
else:
nb_pnames = places = [_('0')]
if int(count_elements(root, name = 'note')) > 1:
nb_notes = int(count_elements(root, name = 'note'))
else:
nb_notes = _('0')
if int(count_elements(root, name = 'stitle')) > 1:
nb_sources = int(count_elements(root, name = 'stitle'))
else:
nb_sources = _('0')
# time logs
timestamp.sort()
start = timestamp[0]
end = timestamp[-1]
timestamp = []
first = epoch(start)
last = epoch(end)
header = _('File parsed with') + ' LXML' + str(LXML_VERSION) + '\n\n'
[(k1, v1),(k2, v2)] = log
file_info = _('File was generated on ') + v1 + '\n\t' + _(' by Gramps ') + v2 + '\n\n'
period = _('Period: ') + first + ' => ' + last + '\n\n'
su = '\t' + str(nb_surnames) + '\t' + _(' entries for surname(s); no frequency yet') + '\n'
p = '\t' + str(nb_pnames) + '\t' + _(' entries for place(s)') + '\n'
n = '\t' + str(nb_notes) + '\t' + _(' note(s)') + '\n'
so = '\t' + str(nb_sources) + '\t' + _(' source(s)') + '\n\n'
counters = su + p + n + so
libs = 'LIBXML' + str(LIBXML_VERSION) + '\tLIBXSLT' + str(LIBXSLT_VERSION)
# GtkTextView
self.text.set_text(header + file_info + period + counters + libs)
LOG.info('### NEW FILES ###')
LOG.info('content parsed and copied')
self.WriteXML(log, first, last, surnames, places, sources)
self.PrintMedia(thumbs, mediapath)
images = os.path.join(USER_PLUGINS, 'lxml', _('Gallery.html'))
sys.stdout.write(_('2. Has generated a media index on "%(file)s".\n') % {'file': images})
self.WriteBackXML(filename, root, surnames, places, sources)
sys.stdout.write(_('3. Has written entries into "%(file)s".\n') % {'file': filename})
def xsd(self, xsd, filename):
"""
Look at schema, validation, conform, structure, content, etc...
Code for 1.7.1
"""
# syntax check against XSD for file format
schema = etree.XMLSchema(file=xsd)
parser = objectify.makeparser(schema = schema)
tree = etree.parse(filename)
root = tree.getroot()
database = objectify.fromstring(etree.tostring(root, encoding="UTF-8"), parser)
LOG.info(_('Matches XSD schema.'))
#dump = objectify.dump(database)
#print(dump)
def check_valid(self, filename):
"""
Look at schema, validation, conform, etc...
Code for 1.7.1
"""
# syntax check against DTD for file format
# xmllint --loaddtd --dtdvalid --valid --shell --noout ...
dtd = os.path.join(USER_PLUGINS, 'lxml', 'grampsxml.dtd')
try:
if os.name == 'nt':
os.system('xmllint --dtdvalid %(dtd)s --noout --dropdtd %(file)s' % {'dtd': dtd, 'file': filename})
else:
os.system('xmllint --dtdvalid file://%(dtd)s --noout --dropdtd %(file)s' % {'dtd': dtd, 'file': filename})
except:
LOG.info(_('xmllint: skip DTD validation'))
def RNGValidation(self, tree, rng):
"""
RNG Validation with ElementTree
"""
# validity check against scheme for file format
valid = etree.ElementTree(file=rng)
schema = etree.RelaxNG(valid)
return(schema.validate(tree))
def WriteXML(self, log, first, last, surnames, places, sources):
"""
Write the result of the query for distributed, shared protocols
"""
# Custom XML file in buffer
self.lang = xml_lang()
self.title = _('I am looking at ...')
self.footer = _('Content generated by Gramps')
self.surnames_title = _('Surnames')
self.places_name = _('Places')
self.sources_title = _('List of sources')
time = date.Today()
xml = etree.Element("query")
xml.set("lang", self.lang)
xml.set("title", self.title)
xml.set("footer", self.footer)
xml.set("date", gramps.gen.datehandler.displayer.display(time))
xml.set("first", first)
xml.set("last", last)
# only for info
doc = etree.ElementTree(xml)
# custom countries list (re-use some Gramps translations ...) ;)
countries = ['',
_('Australia'),
_('Brazil'),
_('Bulgaria'),
_('Canada'),
_('Chile'),
_('China'),
_('Croatia'),
_('Czech Republic'),
_('England'),
_('Finland'),
_('France'),
_('Germany'),
_('India'),
_('Japan'),
_('Norway'),
_('Portugal'),
_('Russia'),
_('Sweden'),
_('United States of America'),
]
c = etree.SubElement(xml, "clist")
self.name = _('Name')
self.country = _('Country')
c.set("pname", self.name)
c.set("country", self.country)
for country in countries:
c1 = etree.SubElement(c, "country")
c1.text = country
# data log
[(k1, v1),(k2, v2)] = log
l = etree.SubElement(xml, "log")
l.set("date", v1)
l.set("version", v2)
s = etree.SubElement(xml, "surnames")
s.set("title", self.surnames_title)
surnames.sort()
cnt = []
for surname in surnames:
if surname not in cnt:
s1 = etree.SubElement(s, "surname")
s1.text = surname
cnt.append(surname)
p = etree.SubElement(xml, "places")
p.set("pname", self.places_name)
places.sort()
for place in places:
p1 = etree.SubElement(p, "place")
p1.text = place
src = etree.SubElement(xml, "sources")
src.set("title", self.sources_title)
sources.sort()
for source in sources:
src1 = etree.SubElement(src, "source")
src1.text = source
content = etree.XML(etree.tostring(xml, encoding="UTF-8"))
# XSLT process
xslt_doc = etree.parse(os.path.join(USER_PLUGINS, 'lxml', 'query_html.xsl'))
transform = etree.XSLT(xslt_doc)
outdoc = transform(content)
#print(type(outdoc))
html = os.path.join(USER_PLUGINS, 'lxml', 'query.html')
outfile = open(html, 'w')
outfile.write(str(outdoc))
outfile.close()
# clear the etree
content.clear()
# This is the end !
sys.stdout.write(_('1. Has generated "%s".\n') % html)
LOG.info(_('Try to open\n "%s"\n into your prefered web navigator ...') % html)
display_url(html)
#self.post(html)
def PrintMedia(self, thumbs, mediapath):
"""
Print some media infos via HTML class (Gramps)
"""
LOG.info('Looking at media...')
# Web page filename extensions
_WEB_EXT = ['.html', '.htm', '.shtml', '.php', '.php3', '.cgi']
# page title
title = _('Gallery')
fname = os.path.join(USER_PLUGINS, 'lxml', _('Gallery.html'))
of = open(fname, "w")
LOG.info('Empty "Gallery.hml" file created')
# htmlinstance = page
# ignored by current code...
lang = xml_lang()
page, head, body = Html.page(title, encoding='utf-8', lang=str(lang))
head = body = ""
self.text = []
self.XHTMLWriter(fname, page, head, body, of, thumbs, mediapath)
LOG.info('End (Media)')
def __write_gallery(self, thumbs, mediapath):
"""
This procedure writes out the media
"""
LOG.info('Looking at gallery')
from gramps.gen.utils.thumbnails import get_thumbnail_path
# full clear line for proper styling
fullclear = Html("div", class_ = "fullclear", inline = True)
LOG.info('Start to enumerate for gallery')
#LOG.debug(thumbs)
for i, thumb in enumerate(thumbs):
# list of tuples [('',''),('','')]
if (list(thumb)[0])[0] == 'src':
src = (list(thumb)[0])[1]
else:
src = 'No src'
#LOG.debug(src)
if (list(thumb)[1])[0] == 'mime':
mime = (list(thumb)[1])[1]
else:
mime = 'No mime'
#LOG.debug(mime)
if (list(thumb)[2])[0] == 'checksum':
checksum = (list(thumb)[2])[1]
else:
checksum = 'No checksum'
#LOG.debug(checksum)
if (list(thumb)[2])[0] == 'description':
description = (list(thumb)[2])[1]
elif len(thumb) == 4:
description = (list(thumb)[3])[1]
else:
description = 'No description'
#LOG.debug(description)
# relative and absolute paths
src = os.path.join(mediapath, src)
# windows OS ???
if not src.startswith("/"):
src = os.path.join(USER_HOME, src)
#LOG.debug(src)
# only images
if mime.startswith("image"):
thumb = get_thumbnail_path(str(src), mtype=None, rectangle=None)
#LOG.debug(thumb)
self.text += Html('img', src=str(thumb), mtype=str(mime))
self.text += fullclear
self.text += Html('a', str(description), href=str(src), target='blank', title=str(mime))
self.text += fullclear
return self.text
def close_file(self, of):
""" will close whatever filename is passed to it """
of.close()
def XHTMLWriter(self, fname, page, head, body, of, thumbs, mediapath):
"""
Will format, write, and close the file
of -- open file that is being written to
htmlinstance -- web page created with libhtml
src/plugins/lib/libhtml.py
"""
self.__write_gallery(thumbs, mediapath)
#LOG.debug(self.text)
text = open(fname, 'w')
text.write(head)
for i, txt in enumerate(self.text):
#LOG.debug(txt)
text.write(txt + '\n') # Html.write() ?
text.close()
# closes the file
self.close_file(of)
LOG.info('Gallery generated')
def WriteBackXML(self, filename, root, surnames, places, sources):
"""
Write the result of the query back into the XML file (Gramps scheme)
"""
# Modify the XML copy of the .gramps
outfile = open(filename, 'w')
# clear the etree
root.clear()
## people/person/name/surname
people = etree.SubElement(root, "people")
for s in surnames:
person = etree.SubElement(people, "person")
name = etree.SubElement(person, "name")
surname = etree.SubElement(name, "surname")
surname.text = s
surnames = []
## places/placeobj/pname
pl = etree.SubElement(root, "places")
for p in places:
place = etree.SubElement(pl, "placeobj")
name = etree.SubElement(place, "pname")
pname = name.set('value', p)
places = []
## sources/source/stitle
src = etree.SubElement(root, "sources")
for s in sources:
source = etree.SubElement(src, "source")
stitle = etree.SubElement(source, "stitle")
stitle.text = s
sources = []
# write and close the etree
out = etree.tostring(root, method='xml', pretty_print=True)
str_out = out.decode('utf-8')
outfile.write(str_out)
outfile.close()
# clear the etree
root.clear()
def post(self, html):
"""
Try to play with request ...
"""
import urllib2
response = urllib2.urlopen('file://%s' % html)
data = response.read()
post = etree.HTML(data)
# find text function
find_text = etree.XPath("//text()", smart_strings=False)
LOG.info(find_text(post))
post.clear()
| gpl-2.0 |
xbmc/xbmc-antiquated | xbmc/lib/libPython/Python/Lib/distutils/spawn.py | 63 | 6991 | """distutils.spawn
Provides the 'spawn()' function, a front-end to various platform-
specific functions for launching another program in a sub-process.
Also provides the 'find_executable()' to search the path for a given
executable name.
"""
# This module should be kept compatible with Python 2.1.
__revision__ = "$Id: spawn.py 37828 2004-11-10 22:23:15Z loewis $"
import sys, os, string
from distutils.errors import *
from distutils import log
def spawn (cmd,
search_path=1,
verbose=0,
dry_run=0):
"""Run another program, specified as a command list 'cmd', in a new
process. 'cmd' is just the argument list for the new process, ie.
cmd[0] is the program to run and cmd[1:] are the rest of its arguments.
There is no way to run a program with a name different from that of its
executable.
If 'search_path' is true (the default), the system's executable
search path will be used to find the program; otherwise, cmd[0]
must be the exact path to the executable. If 'dry_run' is true,
the command will not actually be run.
Raise DistutilsExecError if running the program fails in any way; just
return on success.
"""
if os.name == 'posix':
_spawn_posix(cmd, search_path, dry_run=dry_run)
elif os.name == 'nt':
_spawn_nt(cmd, search_path, dry_run=dry_run)
elif os.name == 'os2':
_spawn_os2(cmd, search_path, dry_run=dry_run)
else:
raise DistutilsPlatformError, \
"don't know how to spawn programs on platform '%s'" % os.name
# spawn ()
def _nt_quote_args (args):
"""Quote command-line arguments for DOS/Windows conventions: just
wraps every argument which contains blanks in double quotes, and
returns a new argument list.
"""
# XXX this doesn't seem very robust to me -- but if the Windows guys
# say it'll work, I guess I'll have to accept it. (What if an arg
# contains quotes? What other magic characters, other than spaces,
# have to be escaped? Is there an escaping mechanism other than
# quoting?)
for i in range(len(args)):
if string.find(args[i], ' ') != -1:
args[i] = '"%s"' % args[i]
return args
def _spawn_nt (cmd,
search_path=1,
verbose=0,
dry_run=0):
executable = cmd[0]
cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(string.join([executable] + cmd[1:], ' '))
if not dry_run:
# spawn for NT requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_os2 (cmd,
search_path=1,
verbose=0,
dry_run=0):
executable = cmd[0]
#cmd = _nt_quote_args(cmd)
if search_path:
# either we find one or it stays the same
executable = find_executable(executable) or executable
log.info(string.join([executable] + cmd[1:], ' '))
if not dry_run:
# spawnv for OS/2 EMX requires a full path to the .exe
try:
rc = os.spawnv(os.P_WAIT, executable, cmd)
except OSError, exc:
# this seems to happen when the command isn't found
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if rc != 0:
# and this reflects the command running but failing
print "command '%s' failed with exit status %d" % (cmd[0], rc)
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % (cmd[0], rc)
def _spawn_posix (cmd,
search_path=1,
verbose=0,
dry_run=0):
log.info(string.join(cmd, ' '))
if dry_run:
return
exec_fn = search_path and os.execvp or os.execv
pid = os.fork()
if pid == 0: # in the child
try:
#print "cmd[0] =", cmd[0]
#print "cmd =", cmd
exec_fn(cmd[0], cmd)
except OSError, e:
sys.stderr.write("unable to execute %s: %s\n" %
(cmd[0], e.strerror))
os._exit(1)
sys.stderr.write("unable to execute %s for unknown reasons" % cmd[0])
os._exit(1)
else: # in the parent
# Loop until the child either exits or is terminated by a signal
# (ie. keep waiting if it's merely stopped)
while 1:
try:
(pid, status) = os.waitpid(pid, 0)
except OSError, exc:
import errno
if exc.errno == errno.EINTR:
continue
raise DistutilsExecError, \
"command '%s' failed: %s" % (cmd[0], exc[-1])
if os.WIFSIGNALED(status):
raise DistutilsExecError, \
"command '%s' terminated by signal %d" % \
(cmd[0], os.WTERMSIG(status))
elif os.WIFEXITED(status):
exit_status = os.WEXITSTATUS(status)
if exit_status == 0:
return # hey, it succeeded!
else:
raise DistutilsExecError, \
"command '%s' failed with exit status %d" % \
(cmd[0], exit_status)
elif os.WIFSTOPPED(status):
continue
else:
raise DistutilsExecError, \
"unknown error executing '%s': termination status %d" % \
(cmd[0], status)
# _spawn_posix ()
def find_executable(executable, path=None):
"""Try to find 'executable' in the directories listed in 'path' (a
string listing directories separated by 'os.pathsep'; defaults to
os.environ['PATH']). Returns the complete filename or None if not
found.
"""
if path is None:
path = os.environ['PATH']
paths = string.split(path, os.pathsep)
(base, ext) = os.path.splitext(executable)
if (sys.platform == 'win32' or os.name == 'os2') and (ext != '.exe'):
executable = executable + '.exe'
if not os.path.isfile(executable):
for p in paths:
f = os.path.join(p, executable)
if os.path.isfile(f):
# the file exists, we have a shot at spawn working
return f
return None
else:
return executable
# find_executable()
| gpl-2.0 |
astrofrog/glue-vispy-viewers | glue_vispy_viewers/extern/vispy/visuals/polygon.py | 20 | 3795 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Simple polygon visual based on MeshVisual and LineVisual
"""
from __future__ import division
import numpy as np
from .visual import CompoundVisual
from .mesh import MeshVisual
from .line import LineVisual
from ..color import Color
from ..geometry import PolygonData
from ..gloo import set_state
class PolygonVisual(CompoundVisual):
"""
Displays a 2D polygon
Parameters
----------
pos : array
Set of vertices defining the polygon.
color : str | tuple | list of colors
Fill color of the polygon.
border_color : str | tuple | list of colors
Border color of the polygon.
border_width : int
Border width in pixels.
**kwargs : dict
Keyword arguments to pass to `PolygonVisual`.
"""
def __init__(self, pos=None, color='black',
border_color=None, border_width=1, **kwargs):
self._mesh = MeshVisual()
self._border = LineVisual()
self._pos = pos
self._color = Color(color)
self._border_width = border_width
self._border_color = Color(border_color)
self._update()
CompoundVisual.__init__(self, [self._mesh, self._border], **kwargs)
self._mesh.set_gl_state(polygon_offset_fill=True,
polygon_offset=(1, 1), cull_face=False)
self.freeze()
def _update(self):
self.data = PolygonData(vertices=np.array(self._pos, dtype=np.float32))
if self._pos is None:
return
if not self._color.is_blank:
pts, tris = self.data.triangulate()
set_state(polygon_offset_fill=False)
self._mesh.set_data(vertices=pts, faces=tris.astype(np.uint32),
color=self._color.rgba)
if not self._border_color.is_blank:
# Close border if it is not already.
border_pos = self._pos
if np.any(border_pos[0] != border_pos[1]):
border_pos = np.concatenate([border_pos, border_pos[:1]],
axis=0)
self._border.set_data(pos=border_pos,
color=self._border_color.rgba,
width=self._border_width,
connect='strip')
self._border.update()
@property
def pos(self):
""" The vertex position of the polygon.
"""
return self._pos
@pos.setter
def pos(self, pos):
self._pos = pos
self._update()
@property
def color(self):
""" The color of the polygon.
"""
return self._color
@color.setter
def color(self, color):
self._color = Color(color, clip=True)
self._update()
@property
def border_color(self):
""" The border color of the polygon.
"""
return self._border_color
@border_color.setter
def border_color(self, border_color):
self._border_color = Color(border_color)
self._update()
@property
def mesh(self):
"""The vispy.visuals.MeshVisual that is owned by the PolygonVisual.
It is used to fill in the polygon
"""
return self._mesh
@mesh.setter
def mesh(self, mesh):
self._mesh = mesh
self._update()
@property
def border(self):
"""The vispy.visuals.LineVisual that is owned by the PolygonVisual.
It is used to draw the border of the polygon
"""
return self._border
@border.setter
def border(self, border):
self._border = border
self._update()
| bsd-2-clause |
MalloyPower/parsing-python | front-end/testsuite-python-lib/Python-3.1/Lib/encodings/cp1250.py | 272 | 13686 | """ Python Character Mapping Codec cp1250 generated from 'MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1250.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp1250',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
'\x00' # 0x00 -> NULL
'\x01' # 0x01 -> START OF HEADING
'\x02' # 0x02 -> START OF TEXT
'\x03' # 0x03 -> END OF TEXT
'\x04' # 0x04 -> END OF TRANSMISSION
'\x05' # 0x05 -> ENQUIRY
'\x06' # 0x06 -> ACKNOWLEDGE
'\x07' # 0x07 -> BELL
'\x08' # 0x08 -> BACKSPACE
'\t' # 0x09 -> HORIZONTAL TABULATION
'\n' # 0x0A -> LINE FEED
'\x0b' # 0x0B -> VERTICAL TABULATION
'\x0c' # 0x0C -> FORM FEED
'\r' # 0x0D -> CARRIAGE RETURN
'\x0e' # 0x0E -> SHIFT OUT
'\x0f' # 0x0F -> SHIFT IN
'\x10' # 0x10 -> DATA LINK ESCAPE
'\x11' # 0x11 -> DEVICE CONTROL ONE
'\x12' # 0x12 -> DEVICE CONTROL TWO
'\x13' # 0x13 -> DEVICE CONTROL THREE
'\x14' # 0x14 -> DEVICE CONTROL FOUR
'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x16 -> SYNCHRONOUS IDLE
'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
'\x18' # 0x18 -> CANCEL
'\x19' # 0x19 -> END OF MEDIUM
'\x1a' # 0x1A -> SUBSTITUTE
'\x1b' # 0x1B -> ESCAPE
'\x1c' # 0x1C -> FILE SEPARATOR
'\x1d' # 0x1D -> GROUP SEPARATOR
'\x1e' # 0x1E -> RECORD SEPARATOR
'\x1f' # 0x1F -> UNIT SEPARATOR
' ' # 0x20 -> SPACE
'!' # 0x21 -> EXCLAMATION MARK
'"' # 0x22 -> QUOTATION MARK
'#' # 0x23 -> NUMBER SIGN
'$' # 0x24 -> DOLLAR SIGN
'%' # 0x25 -> PERCENT SIGN
'&' # 0x26 -> AMPERSAND
"'" # 0x27 -> APOSTROPHE
'(' # 0x28 -> LEFT PARENTHESIS
')' # 0x29 -> RIGHT PARENTHESIS
'*' # 0x2A -> ASTERISK
'+' # 0x2B -> PLUS SIGN
',' # 0x2C -> COMMA
'-' # 0x2D -> HYPHEN-MINUS
'.' # 0x2E -> FULL STOP
'/' # 0x2F -> SOLIDUS
'0' # 0x30 -> DIGIT ZERO
'1' # 0x31 -> DIGIT ONE
'2' # 0x32 -> DIGIT TWO
'3' # 0x33 -> DIGIT THREE
'4' # 0x34 -> DIGIT FOUR
'5' # 0x35 -> DIGIT FIVE
'6' # 0x36 -> DIGIT SIX
'7' # 0x37 -> DIGIT SEVEN
'8' # 0x38 -> DIGIT EIGHT
'9' # 0x39 -> DIGIT NINE
':' # 0x3A -> COLON
';' # 0x3B -> SEMICOLON
'<' # 0x3C -> LESS-THAN SIGN
'=' # 0x3D -> EQUALS SIGN
'>' # 0x3E -> GREATER-THAN SIGN
'?' # 0x3F -> QUESTION MARK
'@' # 0x40 -> COMMERCIAL AT
'A' # 0x41 -> LATIN CAPITAL LETTER A
'B' # 0x42 -> LATIN CAPITAL LETTER B
'C' # 0x43 -> LATIN CAPITAL LETTER C
'D' # 0x44 -> LATIN CAPITAL LETTER D
'E' # 0x45 -> LATIN CAPITAL LETTER E
'F' # 0x46 -> LATIN CAPITAL LETTER F
'G' # 0x47 -> LATIN CAPITAL LETTER G
'H' # 0x48 -> LATIN CAPITAL LETTER H
'I' # 0x49 -> LATIN CAPITAL LETTER I
'J' # 0x4A -> LATIN CAPITAL LETTER J
'K' # 0x4B -> LATIN CAPITAL LETTER K
'L' # 0x4C -> LATIN CAPITAL LETTER L
'M' # 0x4D -> LATIN CAPITAL LETTER M
'N' # 0x4E -> LATIN CAPITAL LETTER N
'O' # 0x4F -> LATIN CAPITAL LETTER O
'P' # 0x50 -> LATIN CAPITAL LETTER P
'Q' # 0x51 -> LATIN CAPITAL LETTER Q
'R' # 0x52 -> LATIN CAPITAL LETTER R
'S' # 0x53 -> LATIN CAPITAL LETTER S
'T' # 0x54 -> LATIN CAPITAL LETTER T
'U' # 0x55 -> LATIN CAPITAL LETTER U
'V' # 0x56 -> LATIN CAPITAL LETTER V
'W' # 0x57 -> LATIN CAPITAL LETTER W
'X' # 0x58 -> LATIN CAPITAL LETTER X
'Y' # 0x59 -> LATIN CAPITAL LETTER Y
'Z' # 0x5A -> LATIN CAPITAL LETTER Z
'[' # 0x5B -> LEFT SQUARE BRACKET
'\\' # 0x5C -> REVERSE SOLIDUS
']' # 0x5D -> RIGHT SQUARE BRACKET
'^' # 0x5E -> CIRCUMFLEX ACCENT
'_' # 0x5F -> LOW LINE
'`' # 0x60 -> GRAVE ACCENT
'a' # 0x61 -> LATIN SMALL LETTER A
'b' # 0x62 -> LATIN SMALL LETTER B
'c' # 0x63 -> LATIN SMALL LETTER C
'd' # 0x64 -> LATIN SMALL LETTER D
'e' # 0x65 -> LATIN SMALL LETTER E
'f' # 0x66 -> LATIN SMALL LETTER F
'g' # 0x67 -> LATIN SMALL LETTER G
'h' # 0x68 -> LATIN SMALL LETTER H
'i' # 0x69 -> LATIN SMALL LETTER I
'j' # 0x6A -> LATIN SMALL LETTER J
'k' # 0x6B -> LATIN SMALL LETTER K
'l' # 0x6C -> LATIN SMALL LETTER L
'm' # 0x6D -> LATIN SMALL LETTER M
'n' # 0x6E -> LATIN SMALL LETTER N
'o' # 0x6F -> LATIN SMALL LETTER O
'p' # 0x70 -> LATIN SMALL LETTER P
'q' # 0x71 -> LATIN SMALL LETTER Q
'r' # 0x72 -> LATIN SMALL LETTER R
's' # 0x73 -> LATIN SMALL LETTER S
't' # 0x74 -> LATIN SMALL LETTER T
'u' # 0x75 -> LATIN SMALL LETTER U
'v' # 0x76 -> LATIN SMALL LETTER V
'w' # 0x77 -> LATIN SMALL LETTER W
'x' # 0x78 -> LATIN SMALL LETTER X
'y' # 0x79 -> LATIN SMALL LETTER Y
'z' # 0x7A -> LATIN SMALL LETTER Z
'{' # 0x7B -> LEFT CURLY BRACKET
'|' # 0x7C -> VERTICAL LINE
'}' # 0x7D -> RIGHT CURLY BRACKET
'~' # 0x7E -> TILDE
'\x7f' # 0x7F -> DELETE
'\u20ac' # 0x80 -> EURO SIGN
'\ufffe' # 0x81 -> UNDEFINED
'\u201a' # 0x82 -> SINGLE LOW-9 QUOTATION MARK
'\ufffe' # 0x83 -> UNDEFINED
'\u201e' # 0x84 -> DOUBLE LOW-9 QUOTATION MARK
'\u2026' # 0x85 -> HORIZONTAL ELLIPSIS
'\u2020' # 0x86 -> DAGGER
'\u2021' # 0x87 -> DOUBLE DAGGER
'\ufffe' # 0x88 -> UNDEFINED
'\u2030' # 0x89 -> PER MILLE SIGN
'\u0160' # 0x8A -> LATIN CAPITAL LETTER S WITH CARON
'\u2039' # 0x8B -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
'\u015a' # 0x8C -> LATIN CAPITAL LETTER S WITH ACUTE
'\u0164' # 0x8D -> LATIN CAPITAL LETTER T WITH CARON
'\u017d' # 0x8E -> LATIN CAPITAL LETTER Z WITH CARON
'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
'\ufffe' # 0x90 -> UNDEFINED
'\u2018' # 0x91 -> LEFT SINGLE QUOTATION MARK
'\u2019' # 0x92 -> RIGHT SINGLE QUOTATION MARK
'\u201c' # 0x93 -> LEFT DOUBLE QUOTATION MARK
'\u201d' # 0x94 -> RIGHT DOUBLE QUOTATION MARK
'\u2022' # 0x95 -> BULLET
'\u2013' # 0x96 -> EN DASH
'\u2014' # 0x97 -> EM DASH
'\ufffe' # 0x98 -> UNDEFINED
'\u2122' # 0x99 -> TRADE MARK SIGN
'\u0161' # 0x9A -> LATIN SMALL LETTER S WITH CARON
'\u203a' # 0x9B -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
'\u015b' # 0x9C -> LATIN SMALL LETTER S WITH ACUTE
'\u0165' # 0x9D -> LATIN SMALL LETTER T WITH CARON
'\u017e' # 0x9E -> LATIN SMALL LETTER Z WITH CARON
'\u017a' # 0x9F -> LATIN SMALL LETTER Z WITH ACUTE
'\xa0' # 0xA0 -> NO-BREAK SPACE
'\u02c7' # 0xA1 -> CARON
'\u02d8' # 0xA2 -> BREVE
'\u0141' # 0xA3 -> LATIN CAPITAL LETTER L WITH STROKE
'\xa4' # 0xA4 -> CURRENCY SIGN
'\u0104' # 0xA5 -> LATIN CAPITAL LETTER A WITH OGONEK
'\xa6' # 0xA6 -> BROKEN BAR
'\xa7' # 0xA7 -> SECTION SIGN
'\xa8' # 0xA8 -> DIAERESIS
'\xa9' # 0xA9 -> COPYRIGHT SIGN
'\u015e' # 0xAA -> LATIN CAPITAL LETTER S WITH CEDILLA
'\xab' # 0xAB -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xac' # 0xAC -> NOT SIGN
'\xad' # 0xAD -> SOFT HYPHEN
'\xae' # 0xAE -> REGISTERED SIGN
'\u017b' # 0xAF -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\xb0' # 0xB0 -> DEGREE SIGN
'\xb1' # 0xB1 -> PLUS-MINUS SIGN
'\u02db' # 0xB2 -> OGONEK
'\u0142' # 0xB3 -> LATIN SMALL LETTER L WITH STROKE
'\xb4' # 0xB4 -> ACUTE ACCENT
'\xb5' # 0xB5 -> MICRO SIGN
'\xb6' # 0xB6 -> PILCROW SIGN
'\xb7' # 0xB7 -> MIDDLE DOT
'\xb8' # 0xB8 -> CEDILLA
'\u0105' # 0xB9 -> LATIN SMALL LETTER A WITH OGONEK
'\u015f' # 0xBA -> LATIN SMALL LETTER S WITH CEDILLA
'\xbb' # 0xBB -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u013d' # 0xBC -> LATIN CAPITAL LETTER L WITH CARON
'\u02dd' # 0xBD -> DOUBLE ACUTE ACCENT
'\u013e' # 0xBE -> LATIN SMALL LETTER L WITH CARON
'\u017c' # 0xBF -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u0154' # 0xC0 -> LATIN CAPITAL LETTER R WITH ACUTE
'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
'\u0102' # 0xC3 -> LATIN CAPITAL LETTER A WITH BREVE
'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\u0139' # 0xC5 -> LATIN CAPITAL LETTER L WITH ACUTE
'\u0106' # 0xC6 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
'\u010c' # 0xC8 -> LATIN CAPITAL LETTER C WITH CARON
'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
'\u0118' # 0xCA -> LATIN CAPITAL LETTER E WITH OGONEK
'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
'\u011a' # 0xCC -> LATIN CAPITAL LETTER E WITH CARON
'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
'\u010e' # 0xCF -> LATIN CAPITAL LETTER D WITH CARON
'\u0110' # 0xD0 -> LATIN CAPITAL LETTER D WITH STROKE
'\u0143' # 0xD1 -> LATIN CAPITAL LETTER N WITH ACUTE
'\u0147' # 0xD2 -> LATIN CAPITAL LETTER N WITH CARON
'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
'\u0150' # 0xD5 -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xd7' # 0xD7 -> MULTIPLICATION SIGN
'\u0158' # 0xD8 -> LATIN CAPITAL LETTER R WITH CARON
'\u016e' # 0xD9 -> LATIN CAPITAL LETTER U WITH RING ABOVE
'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
'\u0170' # 0xDB -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
'\u0162' # 0xDE -> LATIN CAPITAL LETTER T WITH CEDILLA
'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
'\u0155' # 0xE0 -> LATIN SMALL LETTER R WITH ACUTE
'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
'\u0103' # 0xE3 -> LATIN SMALL LETTER A WITH BREVE
'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u013a' # 0xE5 -> LATIN SMALL LETTER L WITH ACUTE
'\u0107' # 0xE6 -> LATIN SMALL LETTER C WITH ACUTE
'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
'\u010d' # 0xE8 -> LATIN SMALL LETTER C WITH CARON
'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
'\u0119' # 0xEA -> LATIN SMALL LETTER E WITH OGONEK
'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
'\u011b' # 0xEC -> LATIN SMALL LETTER E WITH CARON
'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
'\u010f' # 0xEF -> LATIN SMALL LETTER D WITH CARON
'\u0111' # 0xF0 -> LATIN SMALL LETTER D WITH STROKE
'\u0144' # 0xF1 -> LATIN SMALL LETTER N WITH ACUTE
'\u0148' # 0xF2 -> LATIN SMALL LETTER N WITH CARON
'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
'\u0151' # 0xF5 -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
'\xf7' # 0xF7 -> DIVISION SIGN
'\u0159' # 0xF8 -> LATIN SMALL LETTER R WITH CARON
'\u016f' # 0xF9 -> LATIN SMALL LETTER U WITH RING ABOVE
'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
'\u0171' # 0xFB -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
'\u0163' # 0xFE -> LATIN SMALL LETTER T WITH CEDILLA
'\u02d9' # 0xFF -> DOT ABOVE
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| mit |
kavi112/google-python-exercises | babynames/solution/babynames.py | 212 | 3852 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
# LAB(begin solution)
# The list [year, name_and_rank, name_and_rank, ...] we'll eventually return.
names = []
# Open and read the file.
f = open(filename, 'rU')
text = f.read()
# Could process the file line-by-line, but regex on the whole text
# at once is even easier.
# Get the year.
year_match = re.search(r'Popularity\sin\s(\d\d\d\d)', text)
if not year_match:
# We didn't find a year, so we'll exit with an error message.
sys.stderr.write('Couldn\'t find the year!\n')
sys.exit(1)
year = year_match.group(1)
names.append(year)
# Extract all the data tuples with a findall()
# each tuple is: (rank, boy-name, girl-name)
tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td>\<td>(\w+)</td>', text)
#print tuples
# Store data into a dict using each name as a key and that
# name's rank number as the value.
# (if the name is already in there, don't add it, since
# this new rank will be bigger than the previous rank).
names_to_rank = {}
for rank_tuple in tuples:
(rank, boyname, girlname) = rank_tuple # unpack the tuple into 3 vars
if boyname not in names_to_rank:
names_to_rank[boyname] = rank
if girlname not in names_to_rank:
names_to_rank[girlname] = rank
# You can also write:
# for rank, boyname, girlname in tuples:
# ...
# To unpack the tuples inside a for-loop.
# Get the names, sorted in the right order
sorted_names = sorted(names_to_rank.keys())
# Build up result list, one element per line
for name in sorted_names:
names.append(name + " " + names_to_rank[name])
return names
# LAB(replace solution)
# return
# LAB(end solution)
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
# LAB(begin solution)
for filename in args:
names = extract_names(filename)
# Make text out of the whole list
text = '\n'.join(names)
if summary:
outf = open(filename + '.summary', 'w')
outf.write(text + '\n')
outf.close()
else:
print text
# LAB(end solution)
if __name__ == '__main__':
main()
| apache-2.0 |
faywong/FFPlayer | project/jni/python/src/Lib/email/parser.py | 392 | 3300 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
# Contact: email-sig@python.org
"""A parser of RFC 2822 and MIME email messages."""
__all__ = ['Parser', 'HeaderParser']
import warnings
from cStringIO import StringIO
from email.feedparser import FeedParser
from email.message import Message
class Parser:
def __init__(self, *args, **kws):
"""Parser of RFC 2822 and MIME email messages.
Creates an in-memory object tree representing the email message, which
can then be manipulated and turned over to a Generator to return the
textual representation of the message.
The string must be formatted as a block of RFC 2822 headers and header
continuation lines, optionally preceeded by a `Unix-from' header. The
header block is terminated either by the end of the string or by a
blank line.
_class is the class to instantiate for new message objects when they
must be created. This class must have a constructor that can take
zero arguments. Default is Message.Message.
"""
if len(args) >= 1:
if '_class' in kws:
raise TypeError("Multiple values for keyword arg '_class'")
kws['_class'] = args[0]
if len(args) == 2:
if 'strict' in kws:
raise TypeError("Multiple values for keyword arg 'strict'")
kws['strict'] = args[1]
if len(args) > 2:
raise TypeError('Too many arguments')
if '_class' in kws:
self._class = kws['_class']
del kws['_class']
else:
self._class = Message
if 'strict' in kws:
warnings.warn("'strict' argument is deprecated (and ignored)",
DeprecationWarning, 2)
del kws['strict']
if kws:
raise TypeError('Unexpected keyword arguments')
def parse(self, fp, headersonly=False):
"""Create a message structure from the data in a file.
Reads all the data from the file and returns the root of the message
structure. Optional headersonly is a flag specifying whether to stop
parsing after reading the headers or not. The default is False,
meaning it parses the entire contents of the file.
"""
feedparser = FeedParser(self._class)
if headersonly:
feedparser._set_headersonly()
while True:
data = fp.read(8192)
if not data:
break
feedparser.feed(data)
return feedparser.close()
def parsestr(self, text, headersonly=False):
"""Create a message structure from a string.
Returns the root of the message structure. Optional headersonly is a
flag specifying whether to stop parsing after reading the headers or
not. The default is False, meaning it parses the entire contents of
the file.
"""
return self.parse(StringIO(text), headersonly=headersonly)
class HeaderParser(Parser):
def parse(self, fp, headersonly=True):
return Parser.parse(self, fp, True)
def parsestr(self, text, headersonly=True):
return Parser.parsestr(self, text, True)
| lgpl-2.1 |
talon-one/talon_one.py | talon_one/models/campaign.py | 1 | 34622 | # coding: utf-8
"""
Talon.One API
The Talon.One API is used to manage applications and campaigns, as well as to integrate with your application. The operations in the _Integration API_ section are used to integrate with our platform, while the other operations are used to manage applications and campaigns. ### Where is the API? The API is available at the same hostname as these docs. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerProfile][] operation is `https://mycompany.talon.one/v1/customer_profiles/id` [updateCustomerProfile]: #operation--v1-customer_profiles--integrationId--put # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class Campaign(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'id': 'int',
'created': 'datetime',
'application_id': 'int',
'user_id': 'int',
'name': 'str',
'description': 'str',
'start_time': 'datetime',
'end_time': 'datetime',
'attributes': 'object',
'state': 'str',
'active_ruleset_id': 'int',
'tags': 'list[str]',
'features': 'list[str]',
'coupon_settings': 'CodeGeneratorSettings',
'referral_settings': 'CodeGeneratorSettings',
'limits': 'list[LimitConfig]',
'campaign_groups': 'list[int]',
'coupon_redemption_count': 'int',
'referral_redemption_count': 'int',
'discount_count': 'float',
'discount_effect_count': 'int',
'coupon_creation_count': 'int',
'referral_creation_count': 'int',
'created_loyalty_points_count': 'float',
'created_loyalty_points_effect_count': 'int',
'redeemed_loyalty_points_count': 'float',
'redeemed_loyalty_points_effect_count': 'int',
'last_activity': 'datetime',
'updated': 'datetime',
'created_by': 'str',
'updated_by': 'str'
}
attribute_map = {
'id': 'id',
'created': 'created',
'application_id': 'applicationId',
'user_id': 'userId',
'name': 'name',
'description': 'description',
'start_time': 'startTime',
'end_time': 'endTime',
'attributes': 'attributes',
'state': 'state',
'active_ruleset_id': 'activeRulesetId',
'tags': 'tags',
'features': 'features',
'coupon_settings': 'couponSettings',
'referral_settings': 'referralSettings',
'limits': 'limits',
'campaign_groups': 'campaignGroups',
'coupon_redemption_count': 'couponRedemptionCount',
'referral_redemption_count': 'referralRedemptionCount',
'discount_count': 'discountCount',
'discount_effect_count': 'discountEffectCount',
'coupon_creation_count': 'couponCreationCount',
'referral_creation_count': 'referralCreationCount',
'created_loyalty_points_count': 'createdLoyaltyPointsCount',
'created_loyalty_points_effect_count': 'createdLoyaltyPointsEffectCount',
'redeemed_loyalty_points_count': 'redeemedLoyaltyPointsCount',
'redeemed_loyalty_points_effect_count': 'redeemedLoyaltyPointsEffectCount',
'last_activity': 'lastActivity',
'updated': 'updated',
'created_by': 'createdBy',
'updated_by': 'updatedBy'
}
def __init__(self, id=None, created=None, application_id=None, user_id=None, name=None, description=None, start_time=None, end_time=None, attributes=None, state='enabled', active_ruleset_id=None, tags=None, features=None, coupon_settings=None, referral_settings=None, limits=None, campaign_groups=None, coupon_redemption_count=None, referral_redemption_count=None, discount_count=None, discount_effect_count=None, coupon_creation_count=None, referral_creation_count=None, created_loyalty_points_count=None, created_loyalty_points_effect_count=None, redeemed_loyalty_points_count=None, redeemed_loyalty_points_effect_count=None, last_activity=None, updated=None, created_by=None, updated_by=None, local_vars_configuration=None): # noqa: E501
"""Campaign - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._id = None
self._created = None
self._application_id = None
self._user_id = None
self._name = None
self._description = None
self._start_time = None
self._end_time = None
self._attributes = None
self._state = None
self._active_ruleset_id = None
self._tags = None
self._features = None
self._coupon_settings = None
self._referral_settings = None
self._limits = None
self._campaign_groups = None
self._coupon_redemption_count = None
self._referral_redemption_count = None
self._discount_count = None
self._discount_effect_count = None
self._coupon_creation_count = None
self._referral_creation_count = None
self._created_loyalty_points_count = None
self._created_loyalty_points_effect_count = None
self._redeemed_loyalty_points_count = None
self._redeemed_loyalty_points_effect_count = None
self._last_activity = None
self._updated = None
self._created_by = None
self._updated_by = None
self.discriminator = None
self.id = id
self.created = created
self.application_id = application_id
self.user_id = user_id
self.name = name
self.description = description
if start_time is not None:
self.start_time = start_time
if end_time is not None:
self.end_time = end_time
if attributes is not None:
self.attributes = attributes
self.state = state
if active_ruleset_id is not None:
self.active_ruleset_id = active_ruleset_id
self.tags = tags
self.features = features
if coupon_settings is not None:
self.coupon_settings = coupon_settings
if referral_settings is not None:
self.referral_settings = referral_settings
self.limits = limits
if campaign_groups is not None:
self.campaign_groups = campaign_groups
if coupon_redemption_count is not None:
self.coupon_redemption_count = coupon_redemption_count
if referral_redemption_count is not None:
self.referral_redemption_count = referral_redemption_count
if discount_count is not None:
self.discount_count = discount_count
if discount_effect_count is not None:
self.discount_effect_count = discount_effect_count
if coupon_creation_count is not None:
self.coupon_creation_count = coupon_creation_count
if referral_creation_count is not None:
self.referral_creation_count = referral_creation_count
if created_loyalty_points_count is not None:
self.created_loyalty_points_count = created_loyalty_points_count
if created_loyalty_points_effect_count is not None:
self.created_loyalty_points_effect_count = created_loyalty_points_effect_count
if redeemed_loyalty_points_count is not None:
self.redeemed_loyalty_points_count = redeemed_loyalty_points_count
if redeemed_loyalty_points_effect_count is not None:
self.redeemed_loyalty_points_effect_count = redeemed_loyalty_points_effect_count
if last_activity is not None:
self.last_activity = last_activity
if updated is not None:
self.updated = updated
if created_by is not None:
self.created_by = created_by
if updated_by is not None:
self.updated_by = updated_by
@property
def id(self):
"""Gets the id of this Campaign. # noqa: E501
Unique ID for this entity. # noqa: E501
:return: The id of this Campaign. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Campaign.
Unique ID for this entity. # noqa: E501
:param id: The id of this Campaign. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and id is None: # noqa: E501
raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501
self._id = id
@property
def created(self):
"""Gets the created of this Campaign. # noqa: E501
The exact moment this entity was created. # noqa: E501
:return: The created of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._created
@created.setter
def created(self, created):
"""Sets the created of this Campaign.
The exact moment this entity was created. # noqa: E501
:param created: The created of this Campaign. # noqa: E501
:type: datetime
"""
if self.local_vars_configuration.client_side_validation and created is None: # noqa: E501
raise ValueError("Invalid value for `created`, must not be `None`") # noqa: E501
self._created = created
@property
def application_id(self):
"""Gets the application_id of this Campaign. # noqa: E501
The ID of the application that owns this entity. # noqa: E501
:return: The application_id of this Campaign. # noqa: E501
:rtype: int
"""
return self._application_id
@application_id.setter
def application_id(self, application_id):
"""Sets the application_id of this Campaign.
The ID of the application that owns this entity. # noqa: E501
:param application_id: The application_id of this Campaign. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and application_id is None: # noqa: E501
raise ValueError("Invalid value for `application_id`, must not be `None`") # noqa: E501
self._application_id = application_id
@property
def user_id(self):
"""Gets the user_id of this Campaign. # noqa: E501
The ID of the account that owns this entity. # noqa: E501
:return: The user_id of this Campaign. # noqa: E501
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""Sets the user_id of this Campaign.
The ID of the account that owns this entity. # noqa: E501
:param user_id: The user_id of this Campaign. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and user_id is None: # noqa: E501
raise ValueError("Invalid value for `user_id`, must not be `None`") # noqa: E501
self._user_id = user_id
@property
def name(self):
"""Gets the name of this Campaign. # noqa: E501
A friendly name for this campaign. # noqa: E501
:return: The name of this Campaign. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Campaign.
A friendly name for this campaign. # noqa: E501
:param name: The name of this Campaign. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
name is not None and len(name) < 1):
raise ValueError("Invalid value for `name`, length must be greater than or equal to `1`") # noqa: E501
self._name = name
@property
def description(self):
"""Gets the description of this Campaign. # noqa: E501
A detailed description of the campaign. # noqa: E501
:return: The description of this Campaign. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this Campaign.
A detailed description of the campaign. # noqa: E501
:param description: The description of this Campaign. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and description is None: # noqa: E501
raise ValueError("Invalid value for `description`, must not be `None`") # noqa: E501
self._description = description
@property
def start_time(self):
"""Gets the start_time of this Campaign. # noqa: E501
Datetime when the campaign will become active. # noqa: E501
:return: The start_time of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._start_time
@start_time.setter
def start_time(self, start_time):
"""Sets the start_time of this Campaign.
Datetime when the campaign will become active. # noqa: E501
:param start_time: The start_time of this Campaign. # noqa: E501
:type: datetime
"""
self._start_time = start_time
@property
def end_time(self):
"""Gets the end_time of this Campaign. # noqa: E501
Datetime when the campaign will become in-active. # noqa: E501
:return: The end_time of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._end_time
@end_time.setter
def end_time(self, end_time):
"""Sets the end_time of this Campaign.
Datetime when the campaign will become in-active. # noqa: E501
:param end_time: The end_time of this Campaign. # noqa: E501
:type: datetime
"""
self._end_time = end_time
@property
def attributes(self):
"""Gets the attributes of this Campaign. # noqa: E501
Arbitrary properties associated with this campaign # noqa: E501
:return: The attributes of this Campaign. # noqa: E501
:rtype: object
"""
return self._attributes
@attributes.setter
def attributes(self, attributes):
"""Sets the attributes of this Campaign.
Arbitrary properties associated with this campaign # noqa: E501
:param attributes: The attributes of this Campaign. # noqa: E501
:type: object
"""
self._attributes = attributes
@property
def state(self):
"""Gets the state of this Campaign. # noqa: E501
A disabled or archived campaign is not evaluated for rules or coupons. # noqa: E501
:return: The state of this Campaign. # noqa: E501
:rtype: str
"""
return self._state
@state.setter
def state(self, state):
"""Sets the state of this Campaign.
A disabled or archived campaign is not evaluated for rules or coupons. # noqa: E501
:param state: The state of this Campaign. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and state is None: # noqa: E501
raise ValueError("Invalid value for `state`, must not be `None`") # noqa: E501
allowed_values = ["enabled", "disabled", "archived"] # noqa: E501
if self.local_vars_configuration.client_side_validation and state not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `state` ({0}), must be one of {1}" # noqa: E501
.format(state, allowed_values)
)
self._state = state
@property
def active_ruleset_id(self):
"""Gets the active_ruleset_id of this Campaign. # noqa: E501
ID of Ruleset this campaign applies on customer session evaluation. # noqa: E501
:return: The active_ruleset_id of this Campaign. # noqa: E501
:rtype: int
"""
return self._active_ruleset_id
@active_ruleset_id.setter
def active_ruleset_id(self, active_ruleset_id):
"""Sets the active_ruleset_id of this Campaign.
ID of Ruleset this campaign applies on customer session evaluation. # noqa: E501
:param active_ruleset_id: The active_ruleset_id of this Campaign. # noqa: E501
:type: int
"""
self._active_ruleset_id = active_ruleset_id
@property
def tags(self):
"""Gets the tags of this Campaign. # noqa: E501
A list of tags for the campaign. # noqa: E501
:return: The tags of this Campaign. # noqa: E501
:rtype: list[str]
"""
return self._tags
@tags.setter
def tags(self, tags):
"""Sets the tags of this Campaign.
A list of tags for the campaign. # noqa: E501
:param tags: The tags of this Campaign. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and tags is None: # noqa: E501
raise ValueError("Invalid value for `tags`, must not be `None`") # noqa: E501
self._tags = tags
@property
def features(self):
"""Gets the features of this Campaign. # noqa: E501
A list of features for the campaign. # noqa: E501
:return: The features of this Campaign. # noqa: E501
:rtype: list[str]
"""
return self._features
@features.setter
def features(self, features):
"""Sets the features of this Campaign.
A list of features for the campaign. # noqa: E501
:param features: The features of this Campaign. # noqa: E501
:type: list[str]
"""
if self.local_vars_configuration.client_side_validation and features is None: # noqa: E501
raise ValueError("Invalid value for `features`, must not be `None`") # noqa: E501
allowed_values = ["coupons", "referrals", "loyalty"] # noqa: E501
if (self.local_vars_configuration.client_side_validation and
not set(features).issubset(set(allowed_values))): # noqa: E501
raise ValueError(
"Invalid values for `features` [{0}], must be a subset of [{1}]" # noqa: E501
.format(", ".join(map(str, set(features) - set(allowed_values))), # noqa: E501
", ".join(map(str, allowed_values)))
)
self._features = features
@property
def coupon_settings(self):
"""Gets the coupon_settings of this Campaign. # noqa: E501
:return: The coupon_settings of this Campaign. # noqa: E501
:rtype: CodeGeneratorSettings
"""
return self._coupon_settings
@coupon_settings.setter
def coupon_settings(self, coupon_settings):
"""Sets the coupon_settings of this Campaign.
:param coupon_settings: The coupon_settings of this Campaign. # noqa: E501
:type: CodeGeneratorSettings
"""
self._coupon_settings = coupon_settings
@property
def referral_settings(self):
"""Gets the referral_settings of this Campaign. # noqa: E501
:return: The referral_settings of this Campaign. # noqa: E501
:rtype: CodeGeneratorSettings
"""
return self._referral_settings
@referral_settings.setter
def referral_settings(self, referral_settings):
"""Sets the referral_settings of this Campaign.
:param referral_settings: The referral_settings of this Campaign. # noqa: E501
:type: CodeGeneratorSettings
"""
self._referral_settings = referral_settings
@property
def limits(self):
"""Gets the limits of this Campaign. # noqa: E501
The set of limits that will operate for this campaign # noqa: E501
:return: The limits of this Campaign. # noqa: E501
:rtype: list[LimitConfig]
"""
return self._limits
@limits.setter
def limits(self, limits):
"""Sets the limits of this Campaign.
The set of limits that will operate for this campaign # noqa: E501
:param limits: The limits of this Campaign. # noqa: E501
:type: list[LimitConfig]
"""
if self.local_vars_configuration.client_side_validation and limits is None: # noqa: E501
raise ValueError("Invalid value for `limits`, must not be `None`") # noqa: E501
self._limits = limits
@property
def campaign_groups(self):
"""Gets the campaign_groups of this Campaign. # noqa: E501
The IDs of the campaign groups that own this entity. # noqa: E501
:return: The campaign_groups of this Campaign. # noqa: E501
:rtype: list[int]
"""
return self._campaign_groups
@campaign_groups.setter
def campaign_groups(self, campaign_groups):
"""Sets the campaign_groups of this Campaign.
The IDs of the campaign groups that own this entity. # noqa: E501
:param campaign_groups: The campaign_groups of this Campaign. # noqa: E501
:type: list[int]
"""
self._campaign_groups = campaign_groups
@property
def coupon_redemption_count(self):
"""Gets the coupon_redemption_count of this Campaign. # noqa: E501
Number of coupons redeemed in the campaign. # noqa: E501
:return: The coupon_redemption_count of this Campaign. # noqa: E501
:rtype: int
"""
return self._coupon_redemption_count
@coupon_redemption_count.setter
def coupon_redemption_count(self, coupon_redemption_count):
"""Sets the coupon_redemption_count of this Campaign.
Number of coupons redeemed in the campaign. # noqa: E501
:param coupon_redemption_count: The coupon_redemption_count of this Campaign. # noqa: E501
:type: int
"""
self._coupon_redemption_count = coupon_redemption_count
@property
def referral_redemption_count(self):
"""Gets the referral_redemption_count of this Campaign. # noqa: E501
Number of referral codes redeemed in the campaign. # noqa: E501
:return: The referral_redemption_count of this Campaign. # noqa: E501
:rtype: int
"""
return self._referral_redemption_count
@referral_redemption_count.setter
def referral_redemption_count(self, referral_redemption_count):
"""Sets the referral_redemption_count of this Campaign.
Number of referral codes redeemed in the campaign. # noqa: E501
:param referral_redemption_count: The referral_redemption_count of this Campaign. # noqa: E501
:type: int
"""
self._referral_redemption_count = referral_redemption_count
@property
def discount_count(self):
"""Gets the discount_count of this Campaign. # noqa: E501
Total amount of discounts redeemed in the campaign. # noqa: E501
:return: The discount_count of this Campaign. # noqa: E501
:rtype: float
"""
return self._discount_count
@discount_count.setter
def discount_count(self, discount_count):
"""Sets the discount_count of this Campaign.
Total amount of discounts redeemed in the campaign. # noqa: E501
:param discount_count: The discount_count of this Campaign. # noqa: E501
:type: float
"""
self._discount_count = discount_count
@property
def discount_effect_count(self):
"""Gets the discount_effect_count of this Campaign. # noqa: E501
Total number of times discounts were redeemed in this campaign. # noqa: E501
:return: The discount_effect_count of this Campaign. # noqa: E501
:rtype: int
"""
return self._discount_effect_count
@discount_effect_count.setter
def discount_effect_count(self, discount_effect_count):
"""Sets the discount_effect_count of this Campaign.
Total number of times discounts were redeemed in this campaign. # noqa: E501
:param discount_effect_count: The discount_effect_count of this Campaign. # noqa: E501
:type: int
"""
self._discount_effect_count = discount_effect_count
@property
def coupon_creation_count(self):
"""Gets the coupon_creation_count of this Campaign. # noqa: E501
Total number of coupons created by rules in this campaign. # noqa: E501
:return: The coupon_creation_count of this Campaign. # noqa: E501
:rtype: int
"""
return self._coupon_creation_count
@coupon_creation_count.setter
def coupon_creation_count(self, coupon_creation_count):
"""Sets the coupon_creation_count of this Campaign.
Total number of coupons created by rules in this campaign. # noqa: E501
:param coupon_creation_count: The coupon_creation_count of this Campaign. # noqa: E501
:type: int
"""
self._coupon_creation_count = coupon_creation_count
@property
def referral_creation_count(self):
"""Gets the referral_creation_count of this Campaign. # noqa: E501
Total number of referrals created by rules in this campaign. # noqa: E501
:return: The referral_creation_count of this Campaign. # noqa: E501
:rtype: int
"""
return self._referral_creation_count
@referral_creation_count.setter
def referral_creation_count(self, referral_creation_count):
"""Sets the referral_creation_count of this Campaign.
Total number of referrals created by rules in this campaign. # noqa: E501
:param referral_creation_count: The referral_creation_count of this Campaign. # noqa: E501
:type: int
"""
self._referral_creation_count = referral_creation_count
@property
def created_loyalty_points_count(self):
"""Gets the created_loyalty_points_count of this Campaign. # noqa: E501
Total number of loyalty points created by rules in this campaign. # noqa: E501
:return: The created_loyalty_points_count of this Campaign. # noqa: E501
:rtype: float
"""
return self._created_loyalty_points_count
@created_loyalty_points_count.setter
def created_loyalty_points_count(self, created_loyalty_points_count):
"""Sets the created_loyalty_points_count of this Campaign.
Total number of loyalty points created by rules in this campaign. # noqa: E501
:param created_loyalty_points_count: The created_loyalty_points_count of this Campaign. # noqa: E501
:type: float
"""
self._created_loyalty_points_count = created_loyalty_points_count
@property
def created_loyalty_points_effect_count(self):
"""Gets the created_loyalty_points_effect_count of this Campaign. # noqa: E501
Total number of loyalty point creation effects triggered by rules in this campaign. # noqa: E501
:return: The created_loyalty_points_effect_count of this Campaign. # noqa: E501
:rtype: int
"""
return self._created_loyalty_points_effect_count
@created_loyalty_points_effect_count.setter
def created_loyalty_points_effect_count(self, created_loyalty_points_effect_count):
"""Sets the created_loyalty_points_effect_count of this Campaign.
Total number of loyalty point creation effects triggered by rules in this campaign. # noqa: E501
:param created_loyalty_points_effect_count: The created_loyalty_points_effect_count of this Campaign. # noqa: E501
:type: int
"""
self._created_loyalty_points_effect_count = created_loyalty_points_effect_count
@property
def redeemed_loyalty_points_count(self):
"""Gets the redeemed_loyalty_points_count of this Campaign. # noqa: E501
Total number of loyalty points redeemed by rules in this campaign. # noqa: E501
:return: The redeemed_loyalty_points_count of this Campaign. # noqa: E501
:rtype: float
"""
return self._redeemed_loyalty_points_count
@redeemed_loyalty_points_count.setter
def redeemed_loyalty_points_count(self, redeemed_loyalty_points_count):
"""Sets the redeemed_loyalty_points_count of this Campaign.
Total number of loyalty points redeemed by rules in this campaign. # noqa: E501
:param redeemed_loyalty_points_count: The redeemed_loyalty_points_count of this Campaign. # noqa: E501
:type: float
"""
self._redeemed_loyalty_points_count = redeemed_loyalty_points_count
@property
def redeemed_loyalty_points_effect_count(self):
"""Gets the redeemed_loyalty_points_effect_count of this Campaign. # noqa: E501
Total number of loyalty point redemption effects triggered by rules in this campaign. # noqa: E501
:return: The redeemed_loyalty_points_effect_count of this Campaign. # noqa: E501
:rtype: int
"""
return self._redeemed_loyalty_points_effect_count
@redeemed_loyalty_points_effect_count.setter
def redeemed_loyalty_points_effect_count(self, redeemed_loyalty_points_effect_count):
"""Sets the redeemed_loyalty_points_effect_count of this Campaign.
Total number of loyalty point redemption effects triggered by rules in this campaign. # noqa: E501
:param redeemed_loyalty_points_effect_count: The redeemed_loyalty_points_effect_count of this Campaign. # noqa: E501
:type: int
"""
self._redeemed_loyalty_points_effect_count = redeemed_loyalty_points_effect_count
@property
def last_activity(self):
"""Gets the last_activity of this Campaign. # noqa: E501
Timestamp of the most recent event received by this campaign. # noqa: E501
:return: The last_activity of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._last_activity
@last_activity.setter
def last_activity(self, last_activity):
"""Sets the last_activity of this Campaign.
Timestamp of the most recent event received by this campaign. # noqa: E501
:param last_activity: The last_activity of this Campaign. # noqa: E501
:type: datetime
"""
self._last_activity = last_activity
@property
def updated(self):
"""Gets the updated of this Campaign. # noqa: E501
Timestamp of the most recent update to the campaign or any of its elements. # noqa: E501
:return: The updated of this Campaign. # noqa: E501
:rtype: datetime
"""
return self._updated
@updated.setter
def updated(self, updated):
"""Sets the updated of this Campaign.
Timestamp of the most recent update to the campaign or any of its elements. # noqa: E501
:param updated: The updated of this Campaign. # noqa: E501
:type: datetime
"""
self._updated = updated
@property
def created_by(self):
"""Gets the created_by of this Campaign. # noqa: E501
Name of the user who created this campaign if available. # noqa: E501
:return: The created_by of this Campaign. # noqa: E501
:rtype: str
"""
return self._created_by
@created_by.setter
def created_by(self, created_by):
"""Sets the created_by of this Campaign.
Name of the user who created this campaign if available. # noqa: E501
:param created_by: The created_by of this Campaign. # noqa: E501
:type: str
"""
self._created_by = created_by
@property
def updated_by(self):
"""Gets the updated_by of this Campaign. # noqa: E501
Name of the user who last updated this campaign if available. # noqa: E501
:return: The updated_by of this Campaign. # noqa: E501
:rtype: str
"""
return self._updated_by
@updated_by.setter
def updated_by(self, updated_by):
"""Sets the updated_by of this Campaign.
Name of the user who last updated this campaign if available. # noqa: E501
:param updated_by: The updated_by of this Campaign. # noqa: E501
:type: str
"""
self._updated_by = updated_by
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Campaign):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, Campaign):
return True
return self.to_dict() != other.to_dict()
| mit |
eerwitt/tensorflow | tensorflow/contrib/graph_editor/tests/select_test.py | 109 | 6538 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.contrib.graph_editor."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib import graph_editor as ge
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops as ops_lib
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import test
class SelectTest(test.TestCase):
def setUp(self):
self.graph = ops_lib.Graph()
with self.graph.as_default():
self.a = constant_op.constant([1., 1.], shape=[2], name="a")
with ops_lib.name_scope("foo"):
self.b = constant_op.constant([2., 2.], shape=[2], name="b")
self.c = math_ops.add(self.a, self.b, name="c")
self.d = constant_op.constant([3., 3.], shape=[2], name="d")
with ops_lib.name_scope("bar"):
self.e = math_ops.add(self.c, self.d, name="e")
self.f = math_ops.add(self.c, self.d, name="f")
self.g = math_ops.add(self.c, self.a, name="g")
with ops_lib.control_dependencies([self.c.op]):
self.h = math_ops.add(self.f, self.g, name="h")
def test_regex(self):
"""Test for ge.can_be_regex and ge.make_regex."""
self.assertTrue(ge.can_be_regex("foo"))
self.assertTrue(ge.can_be_regex(re.compile("foo")))
regex = re.compile("foo")
self.assertIs(ge.make_regex(regex), regex)
def test_get_input_output_ts(self):
"""Test for ge._get_input_ts abd ge._get_output_ts."""
self.assertEqual(len(ge.select._get_input_ts(self.graph)), 6)
self.assertEqual(len(ge.select._get_output_ts(self.graph)), 8)
def test_get_filter(self):
"""Test for various filtering operations on ts ops."""
# TODO(fkp): parameterise
self.assertEqual(len(ge.filter_ops(self.graph, True)), 8)
self.assertEqual(
len(ge.filter_ops(self.graph, lambda op: op.node_def.op == "Const")), 3)
self.assertEqual(
len(ge.filter_ops(self.graph, lambda op: op.node_def.op == "Add")), 5)
self.assertEqual(
len(ge.filter_ops_from_regex(self.graph, r"^.*\b[abc]$")), 3)
self.assertEqual(len(ge.filter_ts(self.graph, True)), 8)
self.assertEqual(
len(ge.filter_ts_from_regex(self.graph, r"^.*/[fgh]:\d$")), 3)
self.assertEqual(len(ge.get_name_scope_ops(self.graph, "foo/")), 7)
self.assertEqual(len(ge.get_name_scope_ops(self.graph, "foo/bar")), 4)
def test_get_ops_ios(self):
"""Test for ge.get_ops_ios."""
control_outputs = ge.util.ControlOutputs(self.graph)
self.assertEqual(
len(ge.get_ops_ios(
self.h.op, control_ios=control_outputs)), 3)
self.assertEqual(len(ge.get_ops_ios(self.h.op)), 2)
self.assertEqual(
len(ge.get_ops_ios(
self.c.op, control_ios=control_outputs)), 6)
self.assertEqual(len(ge.get_ops_ios(self.c.op)), 5)
def test_compute_boundary_ts_0(self):
"""Test for ge.compute_boundary_ts."""
input_ts, output_ts, inside_ts = ge.compute_boundary_ts(self.g.op)
self.assertEqual(list(input_ts), [self.c, self.a])
self.assertEqual(list(output_ts), [self.g])
self.assertEqual(list(inside_ts), [])
def test_compute_boundary_ts_1(self):
"""Test for ge.compute_boundary_ts."""
input_ts, output_ts, inside_ts = ge.compute_boundary_ts(
[self.g.op, self.h.op])
self.assertEqual(list(input_ts), [self.c, self.a, self.f])
self.assertEqual(list(output_ts), [self.h])
self.assertEqual(list(inside_ts), [self.g])
def test_compute_boundary_ts_2(self):
"""Test for ge.compute_boundary_ts."""
graph = ops_lib.Graph()
with graph.as_default():
a = constant_op.constant(1, name="a")
b = constant_op.constant(1, name="b")
c = math_ops.add(a, b, name="c")
_ = a + c
input_ts, output_ts, inside_ts = ge.compute_boundary_ts([a.op, c.op])
self.assertEqual(list(input_ts), [b])
self.assertEqual(list(output_ts), [a, c])
self.assertEqual(list(inside_ts), [a])
def test_get_within_boundary_ops_0(self):
"""Test for test_get_within_boundary_ops."""
control_outputs = ge.util.ControlOutputs(self.graph)
ops = ge.get_within_boundary_ops(
ops=self.graph,
seed_ops=self.f.op,
boundary_ops=[self.c.op, self.h.op],
inclusive=False,
control_ios=control_outputs)
self.assertEqual(len(ops), 3)
def test_get_within_boundary_ops_1(self):
"""Test for ge.test_get_within_boundary_ops."""
ops = ge.get_within_boundary_ops(
ops=self.graph, seed_ops=self.h.op, boundary_ops=[self.f.op, self.g.op])
self.assertEqual(len(ops), 3)
def test_get_walks_intersection(self):
"""Test for ge.get_walks_intersection_ops."""
ops = ge.get_walks_intersection_ops([self.c.op], [self.g.op])
self.assertEqual(len(ops), 2)
def test_get_walks_union(self):
"""Test for ge.get_walks_union_ops."""
ops = ge.get_walks_union_ops([self.f.op], [self.g.op])
self.assertEqual(len(ops), 6)
def test_select_ops(self):
parameters = (
(("^foo/",), 7),
(("^foo/bar/",), 4),
(("^foo/bar/", "a"), 5),)
for param, length in parameters:
ops = ge.select_ops(*param, graph=self.graph)
self.assertEqual(len(ops), length)
def test_select_ts(self):
parameters = (
(".*:0", 8),
(r".*/bar/\w+:0", 4),)
for regex, length in parameters:
ts = ge.select_ts(regex, graph=self.graph)
self.assertEqual(len(ts), length)
def test_select_ops_and_ts(self):
parameters = (
(("^foo/.*",), 7, 0),
(("^foo/.*", "(?#ts)^foo/bar/.*"), 7, 4),)
for param, l0, l1 in parameters:
ops, ts = ge.select_ops_and_ts(*param, graph=self.graph)
self.assertEqual(len(ops), l0)
self.assertEqual(len(ts), l1)
if __name__ == "__main__":
test.main()
| apache-2.0 |
TalShafir/ansible | lib/ansible/modules/cloud/oneandone/oneandone_firewall_policy.py | 74 | 18691 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: oneandone_firewall_policy
short_description: Configure 1&1 firewall policy.
description:
- Create, remove, reconfigure, update firewall policies.
This module has a dependency on 1and1 >= 1.0
version_added: "2.5"
options:
state:
description:
- Define a firewall policy state to create, remove, or update.
required: false
default: 'present'
choices: [ "present", "absent", "update" ]
auth_token:
description:
- Authenticating API token provided by 1&1.
required: true
api_url:
description:
- Custom API URL. Overrides the
ONEANDONE_API_URL environement variable.
required: false
name:
description:
- Firewall policy name used with present state. Used as identifier (id or name) when used with absent state.
maxLength=128
required: true
firewall_policy:
description:
- The identifier (id or name) of the firewall policy used with update state.
required: true
rules:
description:
- A list of rules that will be set for the firewall policy.
Each rule must contain protocol parameter, in addition to three optional parameters
(port_from, port_to, and source)
add_server_ips:
description:
- A list of server identifiers (id or name) to be assigned to a firewall policy.
Used in combination with update state.
required: false
remove_server_ips:
description:
- A list of server IP ids to be unassigned from a firewall policy. Used in combination with update state.
required: false
add_rules:
description:
- A list of rules that will be added to an existing firewall policy.
It is syntax is the same as the one used for rules parameter. Used in combination with update state.
required: false
remove_rules:
description:
- A list of rule ids that will be removed from an existing firewall policy. Used in combination with update state.
required: false
description:
description:
- Firewall policy description. maxLength=256
required: false
wait:
description:
- wait for the instance to be in state 'running' before returning
required: false
default: "yes"
type: bool
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
wait_interval:
description:
- Defines the number of seconds to wait when using the _wait_for methods
default: 5
requirements:
- "1and1"
- "python >= 2.6"
author:
- "Amel Ajdinovic (@aajdinov)"
- "Ethan Devenport (@edevenport)"
'''
EXAMPLES = '''
# Provisioning example. Create and destroy a firewall policy.
- oneandone_firewall_policy:
auth_token: oneandone_private_api_key
name: ansible-firewall-policy
description: Testing creation of firewall policies with ansible
rules:
-
protocol: TCP
port_from: 80
port_to: 80
source: 0.0.0.0
wait: true
wait_timeout: 500
- oneandone_firewall_policy:
auth_token: oneandone_private_api_key
state: absent
name: ansible-firewall-policy
# Update a firewall policy.
- oneandone_firewall_policy:
auth_token: oneandone_private_api_key
state: update
firewall_policy: ansible-firewall-policy
name: ansible-firewall-policy-updated
description: Testing creation of firewall policies with ansible - updated
# Add server to a firewall policy.
- oneandone_firewall_policy:
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
add_server_ips:
- server_identifier (id or name)
- server_identifier #2 (id or name)
wait: true
wait_timeout: 500
state: update
# Remove server from a firewall policy.
- oneandone_firewall_policy:
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
remove_server_ips:
- B2504878540DBC5F7634EB00A07C1EBD (server's IP id)
wait: true
wait_timeout: 500
state: update
# Add rules to a firewall policy.
- oneandone_firewall_policy:
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
description: Adding rules to an existing firewall policy
add_rules:
-
protocol: TCP
port_from: 70
port_to: 70
source: 0.0.0.0
-
protocol: TCP
port_from: 60
port_to: 60
source: 0.0.0.0
wait: true
wait_timeout: 500
state: update
# Remove rules from a firewall policy.
- oneandone_firewall_policy:
auth_token: oneandone_private_api_key
firewall_policy: ansible-firewall-policy-updated
remove_rules:
- rule_id #1
- rule_id #2
- ...
wait: true
wait_timeout: 500
state: update
'''
RETURN = '''
firewall_policy:
description: Information about the firewall policy that was processed
type: dict
sample: '{"id": "92B74394A397ECC3359825C1656D67A6", "name": "Default Policy"}'
returned: always
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.oneandone import (
get_firewall_policy,
get_server,
OneAndOneResources,
wait_for_resource_creation_completion
)
HAS_ONEANDONE_SDK = True
try:
import oneandone.client
except ImportError:
HAS_ONEANDONE_SDK = False
def _check_mode(module, result):
if module.check_mode:
module.exit_json(
changed=result
)
def _add_server_ips(module, oneandone_conn, firewall_id, server_ids):
"""
Assigns servers to a firewall policy.
"""
try:
attach_servers = []
for _server_id in server_ids:
server = get_server(oneandone_conn, _server_id, True)
attach_server = oneandone.client.AttachServer(
server_id=server['id'],
server_ip_id=next(iter(server['ips'] or []), None)['id']
)
attach_servers.append(attach_server)
if module.check_mode:
if attach_servers:
return True
return False
firewall_policy = oneandone_conn.attach_server_firewall_policy(
firewall_id=firewall_id,
server_ips=attach_servers)
return firewall_policy
except Exception as e:
module.fail_json(msg=str(e))
def _remove_firewall_server(module, oneandone_conn, firewall_id, server_ip_id):
"""
Unassigns a server/IP from a firewall policy.
"""
try:
if module.check_mode:
firewall_server = oneandone_conn.get_firewall_server(
firewall_id=firewall_id,
server_ip_id=server_ip_id)
if firewall_server:
return True
return False
firewall_policy = oneandone_conn.remove_firewall_server(
firewall_id=firewall_id,
server_ip_id=server_ip_id)
return firewall_policy
except Exception as e:
module.fail_json(msg=str(e))
def _add_firewall_rules(module, oneandone_conn, firewall_id, rules):
"""
Adds new rules to a firewall policy.
"""
try:
firewall_rules = []
for rule in rules:
firewall_rule = oneandone.client.FirewallPolicyRule(
protocol=rule['protocol'],
port_from=rule['port_from'],
port_to=rule['port_to'],
source=rule['source'])
firewall_rules.append(firewall_rule)
if module.check_mode:
firewall_policy_id = get_firewall_policy(oneandone_conn, firewall_id)
if (firewall_rules and firewall_policy_id):
return True
return False
firewall_policy = oneandone_conn.add_firewall_policy_rule(
firewall_id=firewall_id,
firewall_policy_rules=firewall_rules
)
return firewall_policy
except Exception as e:
module.fail_json(msg=str(e))
def _remove_firewall_rule(module, oneandone_conn, firewall_id, rule_id):
"""
Removes a rule from a firewall policy.
"""
try:
if module.check_mode:
rule = oneandone_conn.get_firewall_policy_rule(
firewall_id=firewall_id,
rule_id=rule_id)
if rule:
return True
return False
firewall_policy = oneandone_conn.remove_firewall_rule(
firewall_id=firewall_id,
rule_id=rule_id
)
return firewall_policy
except Exception as e:
module.fail_json(msg=str(e))
def update_firewall_policy(module, oneandone_conn):
"""
Updates a firewall policy based on input arguments.
Firewall rules and server ips can be added/removed to/from
firewall policy. Firewall policy name and description can be
updated as well.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
firewall_policy_id = module.params.get('firewall_policy')
name = module.params.get('name')
description = module.params.get('description')
add_server_ips = module.params.get('add_server_ips')
remove_server_ips = module.params.get('remove_server_ips')
add_rules = module.params.get('add_rules')
remove_rules = module.params.get('remove_rules')
changed = False
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy_id, True)
if firewall_policy is None:
_check_mode(module, False)
if name or description:
_check_mode(module, True)
firewall_policy = oneandone_conn.modify_firewall(
firewall_id=firewall_policy['id'],
name=name,
description=description)
changed = True
if add_server_ips:
if module.check_mode:
_check_mode(module, _add_server_ips(module,
oneandone_conn,
firewall_policy['id'],
add_server_ips))
firewall_policy = _add_server_ips(module, oneandone_conn, firewall_policy['id'], add_server_ips)
changed = True
if remove_server_ips:
chk_changed = False
for server_ip_id in remove_server_ips:
if module.check_mode:
chk_changed |= _remove_firewall_server(module,
oneandone_conn,
firewall_policy['id'],
server_ip_id)
_remove_firewall_server(module,
oneandone_conn,
firewall_policy['id'],
server_ip_id)
_check_mode(module, chk_changed)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
changed = True
if add_rules:
firewall_policy = _add_firewall_rules(module,
oneandone_conn,
firewall_policy['id'],
add_rules)
_check_mode(module, firewall_policy)
changed = True
if remove_rules:
chk_changed = False
for rule_id in remove_rules:
if module.check_mode:
chk_changed |= _remove_firewall_rule(module,
oneandone_conn,
firewall_policy['id'],
rule_id)
_remove_firewall_rule(module,
oneandone_conn,
firewall_policy['id'],
rule_id)
_check_mode(module, chk_changed)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True)
changed = True
return (changed, firewall_policy)
except Exception as e:
module.fail_json(msg=str(e))
def create_firewall_policy(module, oneandone_conn):
"""
Create a new firewall policy.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
name = module.params.get('name')
description = module.params.get('description')
rules = module.params.get('rules')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
wait_interval = module.params.get('wait_interval')
firewall_rules = []
for rule in rules:
firewall_rule = oneandone.client.FirewallPolicyRule(
protocol=rule['protocol'],
port_from=rule['port_from'],
port_to=rule['port_to'],
source=rule['source'])
firewall_rules.append(firewall_rule)
firewall_policy_obj = oneandone.client.FirewallPolicy(
name=name,
description=description
)
_check_mode(module, True)
firewall_policy = oneandone_conn.create_firewall_policy(
firewall_policy=firewall_policy_obj,
firewall_policy_rules=firewall_rules
)
if wait:
wait_for_resource_creation_completion(
oneandone_conn,
OneAndOneResources.firewall_policy,
firewall_policy['id'],
wait_timeout,
wait_interval)
firewall_policy = get_firewall_policy(oneandone_conn, firewall_policy['id'], True) # refresh
changed = True if firewall_policy else False
_check_mode(module, False)
return (changed, firewall_policy)
except Exception as e:
module.fail_json(msg=str(e))
def remove_firewall_policy(module, oneandone_conn):
"""
Removes a firewall policy.
module : AnsibleModule object
oneandone_conn: authenticated oneandone object
"""
try:
fp_id = module.params.get('name')
firewall_policy_id = get_firewall_policy(oneandone_conn, fp_id)
if module.check_mode:
if firewall_policy_id is None:
_check_mode(module, False)
_check_mode(module, True)
firewall_policy = oneandone_conn.delete_firewall(firewall_policy_id)
changed = True if firewall_policy else False
return (changed, {
'id': firewall_policy['id'],
'name': firewall_policy['name']
})
except Exception as e:
module.fail_json(msg=str(e))
def main():
module = AnsibleModule(
argument_spec=dict(
auth_token=dict(
type='str',
default=os.environ.get('ONEANDONE_AUTH_TOKEN')),
api_url=dict(
type='str',
default=os.environ.get('ONEANDONE_API_URL')),
name=dict(type='str'),
firewall_policy=dict(type='str'),
description=dict(type='str'),
rules=dict(type='list', default=[]),
add_server_ips=dict(type='list', default=[]),
remove_server_ips=dict(type='list', default=[]),
add_rules=dict(type='list', default=[]),
remove_rules=dict(type='list', default=[]),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
wait_interval=dict(type='int', default=5),
state=dict(type='str', default='present', choices=['present', 'absent', 'update']),
),
supports_check_mode=True
)
if not HAS_ONEANDONE_SDK:
module.fail_json(msg='1and1 required for this module')
if not module.params.get('auth_token'):
module.fail_json(
msg='The "auth_token" parameter or ' +
'ONEANDONE_AUTH_TOKEN environment variable is required.')
if not module.params.get('api_url'):
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get('auth_token'))
else:
oneandone_conn = oneandone.client.OneAndOneService(
api_token=module.params.get('auth_token'), api_url=module.params.get('api_url'))
state = module.params.get('state')
if state == 'absent':
if not module.params.get('name'):
module.fail_json(
msg="'name' parameter is required to delete a firewall policy.")
try:
(changed, firewall_policy) = remove_firewall_policy(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == 'update':
if not module.params.get('firewall_policy'):
module.fail_json(
msg="'firewall_policy' parameter is required to update a firewall policy.")
try:
(changed, firewall_policy) = update_firewall_policy(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
elif state == 'present':
for param in ('name', 'rules'):
if not module.params.get(param):
module.fail_json(
msg="%s parameter is required for new firewall policies." % param)
try:
(changed, firewall_policy) = create_firewall_policy(module, oneandone_conn)
except Exception as e:
module.fail_json(msg=str(e))
module.exit_json(changed=changed, firewall_policy=firewall_policy)
if __name__ == '__main__':
main()
| gpl-3.0 |
gmist/fix-5studio | main/model/user.py | 8 | 3029 | # coding: utf-8
from __future__ import absolute_import
import hashlib
from google.appengine.ext import ndb
from webargs.flaskparser import parser
from webargs import fields as wf
from api import fields
import model
import util
import config
class User(model.Base):
name = ndb.StringProperty(required=True)
username = ndb.StringProperty(required=True)
email = ndb.StringProperty(default='')
auth_ids = ndb.StringProperty(repeated=True)
active = ndb.BooleanProperty(default=True)
admin = ndb.BooleanProperty(default=False)
permissions = ndb.StringProperty(repeated=True)
verified = ndb.BooleanProperty(default=False)
token = ndb.StringProperty(default='')
password_hash = ndb.StringProperty(default='')
def has_permission(self, perm):
return self.admin or perm in self.permissions
def has_facebook(self):
for auth_id in self.auth_ids:
if auth_id.startswith('facebook'):
return auth_id
return None
def avatar_url_size(self, size=None):
facebook_id = self.has_facebook()
if facebook_id:
return '//graph.facebook.com/%(id)s/picture%(size)s' % {
'id': facebook_id.split('_')[1],
'size': '?width=%s&height=%s' % (size, size) if size else '',
}
return '//gravatar.com/avatar/%(hash)s?d=identicon&r=x%(size)s' % {
'hash': hashlib.md5(
(self.email or self.username).encode('utf-8')).hexdigest(),
'size': '&s=%d' % size if size > 0 else '',
}
avatar_url = property(avatar_url_size)
@classmethod
def get_dbs(
cls, admin=None, active=None, verified=None, permissions=None, **kwargs
):
args = parser.parse({
'admin': wf.Bool(missing=None),
'active': wf.Bool(missing=None),
'verified': wf.Bool(missing=None),
'permissions': wf.DelimitedList(wf.Str(), delimiter=',', missing=[]),
})
return super(User, cls).get_dbs(
admin=admin or args['admin'],
active=active or args['active'],
verified=verified or args['verified'],
permissions=permissions or args['permissions'],
**kwargs
)
@classmethod
def is_username_available(cls, username, self_key=None):
if self_key is None:
return cls.get_by('username', username) is None
user_keys, _ = util.get_keys(cls.query(), username=username, limit=2)
return not user_keys or self_key in user_keys and not user_keys[1:]
@classmethod
def is_email_available(cls, email, self_key=None):
if not config.CONFIG_DB.check_unique_email:
return True
user_keys, _ = util.get_keys(
cls.query(), email=email, verified=True, limit=2,
)
return not user_keys or self_key in user_keys and not user_keys[1:]
FIELDS = {
'active': fields.Boolean,
'admin': fields.Boolean,
'auth_ids': fields.List(fields.String),
'avatar_url': fields.String,
'email': fields.String,
'name': fields.String,
'permissions': fields.List(fields.String),
'username': fields.String,
'verified': fields.Boolean,
}
FIELDS.update(model.Base.FIELDS)
| mit |
jyotikamboj/container | django/core/handlers/wsgi.py | 20 | 9604 | from __future__ import unicode_literals
import cgi
import codecs
import logging
import sys
from io import BytesIO
from threading import Lock
import warnings
from django import http
from django.conf import settings
from django.core import signals
from django.core.handlers import base
from django.core.urlresolvers import set_script_prefix
from django.utils import datastructures
from django.utils.deprecation import RemovedInDjango19Warning
from django.utils.encoding import force_str, force_text
from django.utils.functional import cached_property
from django.utils import six
# For backwards compatibility -- lots of code uses this in the wild!
from django.http.response import REASON_PHRASES as STATUS_CODE_TEXT # NOQA
logger = logging.getLogger('django.request')
# encode() and decode() expect the charset to be a native string.
ISO_8859_1, UTF_8 = str('iso-8859-1'), str('utf-8')
class LimitedStream(object):
'''
LimitedStream wraps another stream in order to not allow reading from it
past specified amount of bytes.
'''
def __init__(self, stream, limit, buf_size=64 * 1024 * 1024):
self.stream = stream
self.remaining = limit
self.buffer = b''
self.buf_size = buf_size
def _read_limited(self, size=None):
if size is None or size > self.remaining:
size = self.remaining
if size == 0:
return b''
result = self.stream.read(size)
self.remaining -= len(result)
return result
def read(self, size=None):
if size is None:
result = self.buffer + self._read_limited()
self.buffer = b''
elif size < len(self.buffer):
result = self.buffer[:size]
self.buffer = self.buffer[size:]
else: # size >= len(self.buffer)
result = self.buffer + self._read_limited(size - len(self.buffer))
self.buffer = b''
return result
def readline(self, size=None):
while b'\n' not in self.buffer and \
(size is None or len(self.buffer) < size):
if size:
# since size is not None here, len(self.buffer) < size
chunk = self._read_limited(size - len(self.buffer))
else:
chunk = self._read_limited()
if not chunk:
break
self.buffer += chunk
sio = BytesIO(self.buffer)
if size:
line = sio.readline(size)
else:
line = sio.readline()
self.buffer = sio.read()
return line
class WSGIRequest(http.HttpRequest):
def __init__(self, environ):
script_name = get_script_name(environ)
path_info = get_path_info(environ)
if not path_info:
# Sometimes PATH_INFO exists, but is empty (e.g. accessing
# the SCRIPT_NAME URL without a trailing slash). We really need to
# operate as if they'd requested '/'. Not amazingly nice to force
# the path like this, but should be harmless.
path_info = '/'
self.environ = environ
self.path_info = path_info
# be careful to only replace the first slash in the path because of
# http://test/something and http://test//something being different as
# stated in http://www.ietf.org/rfc/rfc2396.txt
self.path = '%s/%s' % (script_name.rstrip('/'),
path_info.replace('/', '', 1))
self.META = environ
self.META['PATH_INFO'] = path_info
self.META['SCRIPT_NAME'] = script_name
self.method = environ['REQUEST_METHOD'].upper()
_, content_params = cgi.parse_header(environ.get('CONTENT_TYPE', ''))
if 'charset' in content_params:
try:
codecs.lookup(content_params['charset'])
except LookupError:
pass
else:
self.encoding = content_params['charset']
self._post_parse_error = False
try:
content_length = int(environ.get('CONTENT_LENGTH'))
except (ValueError, TypeError):
content_length = 0
self._stream = LimitedStream(self.environ['wsgi.input'], content_length)
self._read_started = False
self.resolver_match = None
def _get_scheme(self):
return self.environ.get('wsgi.url_scheme')
def _get_request(self):
warnings.warn('`request.REQUEST` is deprecated, use `request.GET` or '
'`request.POST` instead.', RemovedInDjango19Warning, 2)
if not hasattr(self, '_request'):
self._request = datastructures.MergeDict(self.POST, self.GET)
return self._request
@cached_property
def GET(self):
# The WSGI spec says 'QUERY_STRING' may be absent.
raw_query_string = get_bytes_from_wsgi(self.environ, 'QUERY_STRING', '')
return http.QueryDict(raw_query_string, encoding=self._encoding)
def _get_post(self):
if not hasattr(self, '_post'):
self._load_post_and_files()
return self._post
def _set_post(self, post):
self._post = post
@cached_property
def COOKIES(self):
raw_cookie = get_str_from_wsgi(self.environ, 'HTTP_COOKIE', '')
return http.parse_cookie(raw_cookie)
def _get_files(self):
if not hasattr(self, '_files'):
self._load_post_and_files()
return self._files
POST = property(_get_post, _set_post)
FILES = property(_get_files)
REQUEST = property(_get_request)
class WSGIHandler(base.BaseHandler):
initLock = Lock()
request_class = WSGIRequest
def __call__(self, environ, start_response):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._request_middleware is None:
with self.initLock:
try:
# Check that middleware is still uninitialized.
if self._request_middleware is None:
self.load_middleware()
except:
# Unload whatever middleware we got
self._request_middleware = None
raise
set_script_prefix(get_script_name(environ))
signals.request_started.send(sender=self.__class__, environ=environ)
try:
request = self.request_class(environ)
except UnicodeDecodeError:
logger.warning('Bad Request (UnicodeDecodeError)',
exc_info=sys.exc_info(),
extra={
'status_code': 400,
}
)
response = http.HttpResponseBadRequest()
else:
response = self.get_response(request)
response._handler_class = self.__class__
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
for c in response.cookies.values():
response_headers.append((str('Set-Cookie'), str(c.output(header=''))))
start_response(force_str(status), response_headers)
return response
def get_path_info(environ):
"""
Returns the HTTP request's PATH_INFO as a unicode string.
"""
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '/')
return path_info.decode(UTF_8)
def get_script_name(environ):
"""
Returns the equivalent of the HTTP request's SCRIPT_NAME environment
variable. If Apache mod_rewrite has been used, returns what would have been
the script name prior to any rewriting (so it's the script name as seen
from the client's perspective), unless the FORCE_SCRIPT_NAME setting is
set (to anything).
"""
if settings.FORCE_SCRIPT_NAME is not None:
return force_text(settings.FORCE_SCRIPT_NAME)
# If Apache's mod_rewrite had a whack at the URL, Apache set either
# SCRIPT_URL or REDIRECT_URL to the full resource URL before applying any
# rewrites. Unfortunately not every Web server (lighttpd!) passes this
# information through all the time, so FORCE_SCRIPT_NAME, above, is still
# needed.
script_url = get_bytes_from_wsgi(environ, 'SCRIPT_URL', '')
if not script_url:
script_url = get_bytes_from_wsgi(environ, 'REDIRECT_URL', '')
if script_url:
path_info = get_bytes_from_wsgi(environ, 'PATH_INFO', '')
script_name = script_url[:-len(path_info)]
else:
script_name = get_bytes_from_wsgi(environ, 'SCRIPT_NAME', '')
return script_name.decode(UTF_8)
def get_bytes_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as bytes.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = environ.get(str(key), str(default))
# Under Python 3, non-ASCII values in the WSGI environ are arbitrarily
# decoded with ISO-8859-1. This is wrong for Django websites where UTF-8
# is the default. Re-encode to recover the original bytestring.
return value.encode(ISO_8859_1) if six.PY3 else value
def get_str_from_wsgi(environ, key, default):
"""
Get a value from the WSGI environ dictionary as str.
key and default should be str objects. Under Python 2 they may also be
unicode objects provided they only contain ASCII characters.
"""
value = get_bytes_from_wsgi(environ, key, default)
return value.decode(UTF_8, errors='replace') if six.PY3 else value
| mit |
google/grr | grr/server/grr_response_server/gui/api_call_router_without_checks.py | 1 | 17763 | #!/usr/bin/env python
"""Implementation of a router class that does no ACL checks."""
from typing import Optional
from grr_response_server.gui import api_call_context
from grr_response_server.gui import api_call_router
from grr_response_server.gui.api_plugins import artifact as api_artifact
from grr_response_server.gui.api_plugins import client as api_client
from grr_response_server.gui.api_plugins import config as api_config
from grr_response_server.gui.api_plugins import cron as api_cron
from grr_response_server.gui.api_plugins import flow as api_flow
from grr_response_server.gui.api_plugins import hunt as api_hunt
from grr_response_server.gui.api_plugins import metadata as api_metadata
from grr_response_server.gui.api_plugins import osquery as api_osquery
from grr_response_server.gui.api_plugins import output_plugin as api_output_plugin
from grr_response_server.gui.api_plugins import reflection as api_reflection
from grr_response_server.gui.api_plugins import stats as api_stats
from grr_response_server.gui.api_plugins import timeline as api_timeline
from grr_response_server.gui.api_plugins import user as api_user
from grr_response_server.gui.api_plugins import vfs as api_vfs
from grr_response_server.gui.api_plugins import yara as api_yara
class ApiCallRouterWithoutChecks(api_call_router.ApiCallRouterStub):
"""Router that does no ACL checks whatsoever."""
# Artifacts methods.
# =================
#
def ListArtifacts(self, args, context=None):
return api_artifact.ApiListArtifactsHandler()
def UploadArtifact(self, args, context=None):
return api_artifact.ApiUploadArtifactHandler()
def DeleteArtifacts(self, args, context=None):
return api_artifact.ApiDeleteArtifactsHandler()
# Clients methods.
# ===============
#
def SearchClients(self, args, context=None):
return api_client.ApiSearchClientsHandler()
def VerifyAccess(self, args, context=None):
return api_client.ApiVerifyAccessHandler()
def GetClient(self, args, context=None):
return api_client.ApiGetClientHandler()
def GetClientVersions(self, args, context=None):
return api_client.ApiGetClientVersionsHandler()
def GetClientVersionTimes(self, args, context=None):
return api_client.ApiGetClientVersionTimesHandler()
def InterrogateClient(self, args, context=None):
return api_client.ApiInterrogateClientHandler()
def GetInterrogateOperationState(self, args, context=None):
return api_client.ApiGetInterrogateOperationStateHandler()
def GetLastClientIPAddress(self, args, context=None):
return api_client.ApiGetLastClientIPAddressHandler()
def ListClientCrashes(self, args, context=None):
return api_client.ApiListClientCrashesHandler()
def ListClientActionRequests(self, args, context=None):
return api_client.ApiListClientActionRequestsHandler()
def GetClientLoadStats(self, args, context=None):
return api_client.ApiGetClientLoadStatsHandler()
def KillFleetspeak(
self,
args: api_client.ApiKillFleetspeakArgs,
context: Optional[api_call_context.ApiCallContext] = None
) -> api_client.ApiKillFleetspeakHandler:
return api_client.ApiKillFleetspeakHandler()
def RestartFleetspeakGrrService(
self,
args: api_client.ApiRestartFleetspeakGrrServiceArgs,
context: Optional[api_call_context.ApiCallContext] = None
) -> api_client.ApiRestartFleetspeakGrrServiceHandler:
return api_client.ApiRestartFleetspeakGrrServiceHandler()
def DeleteFleetspeakPendingMessages(
self,
args: api_client.ApiDeleteFleetspeakPendingMessagesArgs,
context: Optional[api_call_context.ApiCallContext] = None
) -> api_client.ApiDeleteFleetspeakPendingMessagesHandler:
return api_client.ApiDeleteFleetspeakPendingMessagesHandler()
def GetFleetspeakPendingMessages(
self,
args: api_client.ApiGetFleetspeakPendingMessagesArgs,
context: Optional[api_call_context.ApiCallContext] = None
) -> api_client.ApiGetFleetspeakPendingMessagesHandler:
return api_client.ApiGetFleetspeakPendingMessagesHandler()
def GetFleetspeakPendingMessageCount(
self,
args: api_client.ApiGetFleetspeakPendingMessageCountArgs,
context: Optional[api_call_context.ApiCallContext] = None
) -> api_client.ApiGetFleetspeakPendingMessageCountHandler:
return api_client.ApiGetFleetspeakPendingMessageCountHandler()
# Virtual file system methods.
# ============================
#
def ListFiles(self, args, context=None):
return api_vfs.ApiListFilesHandler()
def GetVfsFilesArchive(self, args, context=None):
return api_vfs.ApiGetVfsFilesArchiveHandler()
def GetFileDetails(self, args, context=None):
return api_vfs.ApiGetFileDetailsHandler()
def GetFileText(self, args, context=None):
return api_vfs.ApiGetFileTextHandler()
def GetFileBlob(self, args, context=None):
return api_vfs.ApiGetFileBlobHandler()
def GetFileVersionTimes(self, args, context=None):
return api_vfs.ApiGetFileVersionTimesHandler()
def GetFileDownloadCommand(self, args, context=None):
return api_vfs.ApiGetFileDownloadCommandHandler()
def CreateVfsRefreshOperation(self, args, context=None):
return api_vfs.ApiCreateVfsRefreshOperationHandler()
def GetVfsRefreshOperationState(self, args, context=None):
return api_vfs.ApiGetVfsRefreshOperationStateHandler()
def GetVfsTimeline(self, args, context=None):
return api_vfs.ApiGetVfsTimelineHandler()
def GetVfsTimelineAsCsv(self, args, context=None):
return api_vfs.ApiGetVfsTimelineAsCsvHandler()
def UpdateVfsFileContent(self, args, context=None):
return api_vfs.ApiUpdateVfsFileContentHandler()
def GetVfsFileContentUpdateState(self, args, context=None):
return api_vfs.ApiGetVfsFileContentUpdateStateHandler()
def GetFileDecoders(self, args, context=None):
return api_vfs.ApiGetFileDecodersHandler()
def GetDecodedFileBlob(self, args, context=None):
return api_vfs.ApiGetDecodedFileHandler()
# Clients labels methods.
# ======================
#
def ListClientsLabels(self, args, context=None):
return api_client.ApiListClientsLabelsHandler()
def AddClientsLabels(self, args, context=None):
return api_client.ApiAddClientsLabelsHandler()
def RemoveClientsLabels(self, args, context=None):
return api_client.ApiRemoveClientsLabelsHandler()
# Clients flows methods.
# =====================
#
def ListFlows(self, args, context=None):
return api_flow.ApiListFlowsHandler()
def GetFlow(self, args, context=None):
return api_flow.ApiGetFlowHandler()
def CreateFlow(self, args, context=None):
return api_flow.ApiCreateFlowHandler()
def CancelFlow(self, args, context=None):
return api_flow.ApiCancelFlowHandler()
def ListFlowRequests(self, args, context=None):
return api_flow.ApiListFlowRequestsHandler()
def ListFlowResults(self, args, context=None):
return api_flow.ApiListFlowResultsHandler()
def ListParsedFlowResults(
self,
args: api_flow.ApiListParsedFlowResultsArgs,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_flow.ApiListParsedFlowResultsHandler:
return api_flow.ApiListParsedFlowResultsHandler()
def ListFlowApplicableParsers(
self,
args: api_flow.ApiListFlowApplicableParsersArgs,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_flow.ApiListFlowApplicableParsersHandler:
return api_flow.ApiListFlowApplicableParsersHandler()
def GetExportedFlowResults(self, args, context=None):
return api_flow.ApiGetExportedFlowResultsHandler()
def GetFlowResultsExportCommand(self, args, context=None):
return api_flow.ApiGetFlowResultsExportCommandHandler()
def GetFlowFilesArchive(self, args, context=None):
return api_flow.ApiGetFlowFilesArchiveHandler()
def ListFlowOutputPlugins(self, args, context=None):
return api_flow.ApiListFlowOutputPluginsHandler()
def ListFlowOutputPluginLogs(self, args, context=None):
return api_flow.ApiListFlowOutputPluginLogsHandler()
def ListFlowOutputPluginErrors(self, args, context=None):
return api_flow.ApiListFlowOutputPluginErrorsHandler()
def ListFlowLogs(self, args, context=None):
return api_flow.ApiListFlowLogsHandler()
def GetCollectedTimeline(self, args, context=None):
return api_timeline.ApiGetCollectedTimelineHandler()
def UploadYaraSignature(
self,
args: api_yara.ApiUploadYaraSignatureArgs,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_yara.ApiUploadYaraSignatureHandler:
del args, context # Unused.
return api_yara.ApiUploadYaraSignatureHandler()
def ExplainGlobExpression(
self,
args: api_flow.ApiExplainGlobExpressionArgs,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_flow.ApiExplainGlobExpressionHandler:
del args, context # Unused.
return api_flow.ApiExplainGlobExpressionHandler()
def ScheduleFlow(
self,
args: api_flow.ApiCreateFlowArgs,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_flow.ApiScheduleFlowHandler:
return api_flow.ApiScheduleFlowHandler()
def ListScheduledFlows(
self,
args: api_flow.ApiListScheduledFlowsArgs,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_flow.ApiListScheduledFlowsHandler:
return api_flow.ApiListScheduledFlowsHandler()
def UnscheduleFlow(
self,
args: api_flow.ApiUnscheduleFlowArgs,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_flow.ApiUnscheduleFlowHandler:
return api_flow.ApiUnscheduleFlowHandler()
def GetOsqueryResults(
self,
args: api_osquery.ApiGetOsqueryResultsArgs,
context: Optional[api_call_context.ApiCallContext] = None,
):
del args, context # Unused.
return api_osquery.ApiGetOsqueryResultsHandler()
# Cron jobs methods.
# =================
#
def ListCronJobs(self, args, context=None):
return api_cron.ApiListCronJobsHandler()
def CreateCronJob(self, args, context=None):
return api_cron.ApiCreateCronJobHandler()
def GetCronJob(self, args, context=None):
return api_cron.ApiGetCronJobHandler()
def ForceRunCronJob(self, args, context=None):
return api_cron.ApiForceRunCronJobHandler()
def ModifyCronJob(self, args, context=None):
return api_cron.ApiModifyCronJobHandler()
def ListCronJobRuns(self, args, context=None):
return api_cron.ApiListCronJobRunsHandler()
def GetCronJobRun(self, args, context=None):
return api_cron.ApiGetCronJobRunHandler()
def DeleteCronJob(self, args, context=None):
return api_cron.ApiDeleteCronJobHandler()
# Hunts methods.
# =============
#
def ListHunts(self, args, context=None):
return api_hunt.ApiListHuntsHandler()
def GetHunt(self, args, context=None):
return api_hunt.ApiGetHuntHandler()
def ListHuntErrors(self, args, context=None):
return api_hunt.ApiListHuntErrorsHandler()
def ListHuntLogs(self, args, context=None):
return api_hunt.ApiListHuntLogsHandler()
def ListHuntResults(self, args, context=None):
return api_hunt.ApiListHuntResultsHandler()
def GetExportedHuntResults(self, args, context=None):
return api_hunt.ApiGetExportedHuntResultsHandler()
def GetHuntResultsExportCommand(self, args, context=None):
return api_hunt.ApiGetHuntResultsExportCommandHandler()
def ListHuntOutputPlugins(self, args, context=None):
return api_hunt.ApiListHuntOutputPluginsHandler()
def ListHuntOutputPluginLogs(self, args, context=None):
return api_hunt.ApiListHuntOutputPluginLogsHandler()
def ListHuntOutputPluginErrors(self, args, context=None):
return api_hunt.ApiListHuntOutputPluginErrorsHandler()
def ListHuntCrashes(self, args, context=None):
return api_hunt.ApiListHuntCrashesHandler()
def GetHuntClientCompletionStats(self, args, context=None):
return api_hunt.ApiGetHuntClientCompletionStatsHandler()
def GetHuntStats(self, args, context=None):
return api_hunt.ApiGetHuntStatsHandler()
def ListHuntClients(self, args, context=None):
return api_hunt.ApiListHuntClientsHandler()
def GetHuntContext(self, args, context=None):
return api_hunt.ApiGetHuntContextHandler()
def CreateHunt(self, args, context=None):
return api_hunt.ApiCreateHuntHandler()
def ModifyHunt(self, args, context=None):
return api_hunt.ApiModifyHuntHandler()
def DeleteHunt(self, args, context=None):
return api_hunt.ApiDeleteHuntHandler()
def GetHuntFilesArchive(self, args, context=None):
return api_hunt.ApiGetHuntFilesArchiveHandler()
def GetHuntFile(self, args, context=None):
return api_hunt.ApiGetHuntFileHandler()
def GetCollectedHuntTimelines(
self,
args: api_timeline.ApiGetCollectedHuntTimelinesArgs,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_timeline.ApiGetCollectedHuntTimelinesHandler:
return api_timeline.ApiGetCollectedHuntTimelinesHandler()
def CreatePerClientFileCollectionHunt(
self, args: api_hunt.ApiCreatePerClientFileCollectionHuntArgs,
context: api_call_context.ApiCallContext
) -> api_hunt.ApiCreatePerClientFileCollectionHuntHandler:
return api_hunt.ApiCreatePerClientFileCollectionHuntHandler()
# Stats metrics methods.
# =====================
#
def ListReports(self, args, context=None):
return api_stats.ApiListReportsHandler()
def GetReport(self, args, context=None):
return api_stats.ApiGetReportHandler()
# Approvals methods.
# =================
#
def CreateClientApproval(self, args, context=None):
return api_user.ApiCreateClientApprovalHandler()
def GetClientApproval(self, args, context=None):
return api_user.ApiGetClientApprovalHandler()
def GrantClientApproval(self, args, context=None):
return api_user.ApiGrantClientApprovalHandler()
def ListClientApprovals(self, args, context=None):
return api_user.ApiListClientApprovalsHandler()
def CreateHuntApproval(self, args, context=None):
return api_user.ApiCreateHuntApprovalHandler()
def GetHuntApproval(self, args, context=None):
return api_user.ApiGetHuntApprovalHandler()
def GrantHuntApproval(self, args, context=None):
return api_user.ApiGrantHuntApprovalHandler()
def ListHuntApprovals(self, args, context=None):
return api_user.ApiListHuntApprovalsHandler()
def CreateCronJobApproval(self, args, context=None):
return api_user.ApiCreateCronJobApprovalHandler()
def GetCronJobApproval(self, args, context=None):
return api_user.ApiGetCronJobApprovalHandler()
def GrantCronJobApproval(self, args, context=None):
return api_user.ApiGrantCronJobApprovalHandler()
def ListCronJobApprovals(self, args, context=None):
return api_user.ApiListCronJobApprovalsHandler()
def ListApproverSuggestions(self, args, context=None):
return api_user.ApiListApproverSuggestionsHandler()
# User settings methods.
# =====================
#
def GetPendingUserNotificationsCount(self, args, context=None):
return api_user.ApiGetPendingUserNotificationsCountHandler()
def ListPendingUserNotifications(self, args, context=None):
return api_user.ApiListPendingUserNotificationsHandler()
def DeletePendingUserNotification(self, args, context=None):
return api_user.ApiDeletePendingUserNotificationHandler()
def ListAndResetUserNotifications(self, args, context=None):
return api_user.ApiListAndResetUserNotificationsHandler()
def GetGrrUser(self, args, context=None):
return api_user.ApiGetOwnGrrUserHandler(
interface_traits=api_user.ApiGrrUserInterfaceTraits().EnableAll())
def UpdateGrrUser(self, args, context=None):
return api_user.ApiUpdateGrrUserHandler()
# Config methods.
# ==============
#
def GetConfig(self, args, context=None):
return api_config.ApiGetConfigHandler()
def GetConfigOption(self, args, context=None):
return api_config.ApiGetConfigOptionHandler()
def ListGrrBinaries(self, args, context=None):
return api_config.ApiListGrrBinariesHandler()
def GetGrrBinary(self, args, context=None):
return api_config.ApiGetGrrBinaryHandler()
def GetGrrBinaryBlob(self, args, context=None):
return api_config.ApiGetGrrBinaryBlobHandler()
def GetUiConfig(self, args, context=None):
return api_config.ApiGetUiConfigHandler()
# Reflection methods.
# ==================
#
def ListKbFields(self, args, context=None):
return api_client.ApiListKbFieldsHandler()
def ListFlowDescriptors(self, args, context=None):
# TODO(user): move to reflection.py
return api_flow.ApiListFlowDescriptorsHandler()
def GetRDFValueDescriptor(self, args, context=None):
return api_reflection.ApiGetRDFValueDescriptorHandler()
def ListRDFValuesDescriptors(self, args, context=None):
return api_reflection.ApiListRDFValuesDescriptorsHandler()
def ListOutputPluginDescriptors(self, args, context=None):
return api_output_plugin.ApiListOutputPluginDescriptorsHandler()
def ListKnownEncodings(self, args, context=None):
return api_vfs.ApiListKnownEncodingsHandler()
def ListApiMethods(self, args, context=None):
return api_reflection.ApiListApiMethodsHandler(self)
def GetGrrVersion(
self,
args: None,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_metadata.ApiGetGrrVersionHandler:
return api_metadata.ApiGetGrrVersionHandler()
def GetOpenApiDescription(
self,
args: None,
context: Optional[api_call_context.ApiCallContext] = None,
) -> api_metadata.ApiGetOpenApiDescriptionHandler:
return api_metadata.ApiGetOpenApiDescriptionHandler(self)
| apache-2.0 |
grupoprog3/proyecto_final | Entrega Final/flask/Lib/site-packages/sqlparse/filters/aligned_indent.py | 20 | 4932 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
from sqlparse import sql, tokens as T
from sqlparse.compat import text_type
from sqlparse.utils import offset, indent
class AlignedIndentFilter(object):
join_words = (r'((LEFT\s+|RIGHT\s+|FULL\s+)?'
r'(INNER\s+|OUTER\s+|STRAIGHT\s+)?|'
r'(CROSS\s+|NATURAL\s+)?)?JOIN\b')
split_words = ('FROM',
join_words, 'ON',
'WHERE', 'AND', 'OR',
'GROUP', 'HAVING', 'LIMIT',
'ORDER', 'UNION', 'VALUES',
'SET', 'BETWEEN', 'EXCEPT')
def __init__(self, char=' ', n='\n'):
self.n = n
self.offset = 0
self.indent = 0
self.char = char
self._max_kwd_len = len('select')
def nl(self, offset=1):
# offset = 1 represent a single space after SELECT
offset = -len(offset) if not isinstance(offset, int) else offset
# add two for the space and parens
indent = self.indent * (2 + self._max_kwd_len)
return sql.Token(T.Whitespace, self.n + self.char * (
self._max_kwd_len + offset + indent + self.offset))
def _process_statement(self, tlist):
if tlist.tokens[0].is_whitespace and self.indent == 0:
tlist.tokens.pop(0)
# process the main query body
self._process(sql.TokenList(tlist.tokens))
def _process_parenthesis(self, tlist):
# if this isn't a subquery, don't re-indent
_, token = tlist.token_next_by(m=(T.DML, 'SELECT'))
if token is not None:
with indent(self):
tlist.insert_after(tlist[0], self.nl('SELECT'))
# process the inside of the parantheses
self._process_default(tlist)
# de-indent last parenthesis
tlist.insert_before(tlist[-1], self.nl())
def _process_identifierlist(self, tlist):
# columns being selected
identifiers = list(tlist.get_identifiers())
identifiers.pop(0)
[tlist.insert_before(token, self.nl()) for token in identifiers]
self._process_default(tlist)
def _process_case(self, tlist):
offset_ = len('case ') + len('when ')
cases = tlist.get_cases(skip_ws=True)
# align the end as well
end_token = tlist.token_next_by(m=(T.Keyword, 'END'))[1]
cases.append((None, [end_token]))
condition_width = [len(' '.join(map(text_type, cond))) if cond else 0
for cond, _ in cases]
max_cond_width = max(condition_width)
for i, (cond, value) in enumerate(cases):
# cond is None when 'else or end'
stmt = cond[0] if cond else value[0]
if i > 0:
tlist.insert_before(stmt, self.nl(
offset_ - len(text_type(stmt))))
if cond:
ws = sql.Token(T.Whitespace, self.char * (
max_cond_width - condition_width[i]))
tlist.insert_after(cond[-1], ws)
def _next_token(self, tlist, idx=-1):
split_words = T.Keyword, self.split_words, True
tidx, token = tlist.token_next_by(m=split_words, idx=idx)
# treat "BETWEEN x and y" as a single statement
if token and token.normalized == 'BETWEEN':
tidx, token = self._next_token(tlist, tidx)
if token and token.normalized == 'AND':
tidx, token = self._next_token(tlist, tidx)
return tidx, token
def _split_kwds(self, tlist):
tidx, token = self._next_token(tlist)
while token:
# joins are special case. only consider the first word as aligner
if token.match(T.Keyword, self.join_words, regex=True):
token_indent = token.value.split()[0]
else:
token_indent = text_type(token)
tlist.insert_before(token, self.nl(token_indent))
tidx += 1
tidx, token = self._next_token(tlist, tidx)
def _process_default(self, tlist):
self._split_kwds(tlist)
# process any sub-sub statements
for sgroup in tlist.get_sublists():
idx = tlist.token_index(sgroup)
pidx, prev_ = tlist.token_prev(idx)
# HACK: make "group/order by" work. Longer than max_len.
offset_ = 3 if (prev_ and prev_.match(T.Keyword, 'BY')) else 0
with offset(self, offset_):
self._process(sgroup)
def _process(self, tlist):
func_name = '_process_{cls}'.format(cls=type(tlist).__name__)
func = getattr(self, func_name.lower(), self._process_default)
func(tlist)
def process(self, stmt):
self._process(stmt)
return stmt
| apache-2.0 |
shoyer/xray | xarray/tests/test_backends_file_manager.py | 2 | 5585 | import gc
import pickle
import threading
from unittest import mock
import pytest
from xarray.backends.file_manager import CachingFileManager
from xarray.backends.lru_cache import LRUCache
from xarray.core.options import set_options
@pytest.fixture(params=[1, 2, 3, None])
def file_cache(request):
maxsize = request.param
if maxsize is None:
yield {}
else:
yield LRUCache(maxsize)
def test_file_manager_mock_write(file_cache):
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
lock = mock.MagicMock(spec=threading.Lock())
manager = CachingFileManager(
opener, 'filename', lock=lock, cache=file_cache)
f = manager.acquire()
f.write('contents')
manager.close()
assert not file_cache
opener.assert_called_once_with('filename')
mock_file.write.assert_called_once_with('contents')
mock_file.close.assert_called_once_with()
lock.__enter__.assert_has_calls([mock.call(), mock.call()])
@pytest.mark.parametrize('expected_warning', [None, RuntimeWarning])
def test_file_manager_autoclose(expected_warning):
mock_file = mock.Mock()
opener = mock.Mock(return_value=mock_file)
cache = {}
manager = CachingFileManager(opener, 'filename', cache=cache)
manager.acquire()
assert cache
with set_options(warn_for_unclosed_files=expected_warning is not None):
with pytest.warns(expected_warning):
del manager
gc.collect()
assert not cache
mock_file.close.assert_called_once_with()
def test_file_manager_autoclose_while_locked():
opener = mock.Mock()
lock = threading.Lock()
cache = {}
manager = CachingFileManager(opener, 'filename', lock=lock, cache=cache)
manager.acquire()
assert cache
lock.acquire()
with set_options(warn_for_unclosed_files=False):
del manager
gc.collect()
# can't clear the cache while locked, but also don't block in __del__
assert cache
def test_file_manager_repr():
opener = mock.Mock()
manager = CachingFileManager(opener, 'my-file')
assert 'my-file' in repr(manager)
def test_file_manager_refcounts():
mock_file = mock.Mock()
opener = mock.Mock(spec=open, return_value=mock_file)
cache = {}
ref_counts = {}
manager = CachingFileManager(
opener, 'filename', cache=cache, ref_counts=ref_counts)
assert ref_counts[manager._key] == 1
manager.acquire()
assert cache
manager2 = CachingFileManager(
opener, 'filename', cache=cache, ref_counts=ref_counts)
assert cache
assert manager._key == manager2._key
assert ref_counts[manager._key] == 2
with set_options(warn_for_unclosed_files=False):
del manager
gc.collect()
assert cache
assert ref_counts[manager2._key] == 1
mock_file.close.assert_not_called()
with set_options(warn_for_unclosed_files=False):
del manager2
gc.collect()
assert not ref_counts
assert not cache
def test_file_manager_replace_object():
opener = mock.Mock()
cache = {}
ref_counts = {}
manager = CachingFileManager(
opener, 'filename', cache=cache, ref_counts=ref_counts)
manager.acquire()
assert ref_counts[manager._key] == 1
assert cache
manager = CachingFileManager(
opener, 'filename', cache=cache, ref_counts=ref_counts)
assert ref_counts[manager._key] == 1
assert cache
manager.close()
def test_file_manager_write_consecutive(tmpdir, file_cache):
path1 = str(tmpdir.join('testing1.txt'))
path2 = str(tmpdir.join('testing2.txt'))
manager1 = CachingFileManager(open, path1, mode='w', cache=file_cache)
manager2 = CachingFileManager(open, path2, mode='w', cache=file_cache)
f1a = manager1.acquire()
f1a.write('foo')
f1a.flush()
f2 = manager2.acquire()
f2.write('bar')
f2.flush()
f1b = manager1.acquire()
f1b.write('baz')
assert (getattr(file_cache, 'maxsize', float('inf')) > 1) == (f1a is f1b)
manager1.close()
manager2.close()
with open(path1, 'r') as f:
assert f.read() == 'foobaz'
with open(path2, 'r') as f:
assert f.read() == 'bar'
def test_file_manager_write_concurrent(tmpdir, file_cache):
path = str(tmpdir.join('testing.txt'))
manager = CachingFileManager(open, path, mode='w', cache=file_cache)
f1 = manager.acquire()
f2 = manager.acquire()
f3 = manager.acquire()
assert f1 is f2
assert f2 is f3
f1.write('foo')
f1.flush()
f2.write('bar')
f2.flush()
f3.write('baz')
f3.flush()
manager.close()
with open(path, 'r') as f:
assert f.read() == 'foobarbaz'
def test_file_manager_write_pickle(tmpdir, file_cache):
path = str(tmpdir.join('testing.txt'))
manager = CachingFileManager(open, path, mode='w', cache=file_cache)
f = manager.acquire()
f.write('foo')
f.flush()
manager2 = pickle.loads(pickle.dumps(manager))
f2 = manager2.acquire()
f2.write('bar')
manager2.close()
manager.close()
with open(path, 'r') as f:
assert f.read() == 'foobar'
def test_file_manager_read(tmpdir, file_cache):
path = str(tmpdir.join('testing.txt'))
with open(path, 'w') as f:
f.write('foobar')
manager = CachingFileManager(open, path, cache=file_cache)
f = manager.acquire()
assert f.read() == 'foobar'
manager.close()
def test_file_manager_invalid_kwargs():
with pytest.raises(TypeError):
CachingFileManager(open, 'dummy', mode='w', invalid=True)
| apache-2.0 |
jtyuan/racetrack | src/arch/x86/isa/insts/x87/control/save_x87_status_word.py | 72 | 2410 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
# FSTSW
def macroop FNSTSW_R {
rdval t1, fsw
mov rax, rax, t1, dataSize=2
};
def macroop FNSTSW_M {
rdval t1, fsw
st t1, seg, sib, disp, dataSize=2
};
def macroop FNSTSW_P {
rdip t7
rdval t1, fsw
st t1, seg, riprel, disp, dataSize=2
};
'''
| bsd-3-clause |
byndcivilization/toy-infrastructure | flask-app/venv/lib/python3.6/site-packages/werkzeug/contrib/jsrouting.py | 513 | 8564 | # -*- coding: utf-8 -*-
"""
werkzeug.contrib.jsrouting
~~~~~~~~~~~~~~~~~~~~~~~~~~
Addon module that allows to create a JavaScript function from a map
that generates rules.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
try:
from simplejson import dumps
except ImportError:
try:
from json import dumps
except ImportError:
def dumps(*args):
raise RuntimeError('simplejson required for jsrouting')
from inspect import getmro
from werkzeug.routing import NumberConverter
from werkzeug._compat import iteritems
def render_template(name_parts, rules, converters):
result = u''
if name_parts:
for idx in range(0, len(name_parts) - 1):
name = u'.'.join(name_parts[:idx + 1])
result += u"if (typeof %s === 'undefined') %s = {}\n" % (name, name)
result += '%s = ' % '.'.join(name_parts)
result += """(function (server_name, script_name, subdomain, url_scheme) {
var converters = [%(converters)s];
var rules = %(rules)s;
function in_array(array, value) {
if (array.indexOf != undefined) {
return array.indexOf(value) != -1;
}
for (var i = 0; i < array.length; i++) {
if (array[i] == value) {
return true;
}
}
return false;
}
function array_diff(array1, array2) {
array1 = array1.slice();
for (var i = array1.length-1; i >= 0; i--) {
if (in_array(array2, array1[i])) {
array1.splice(i, 1);
}
}
return array1;
}
function split_obj(obj) {
var names = [];
var values = [];
for (var name in obj) {
if (typeof(obj[name]) != 'function') {
names.push(name);
values.push(obj[name]);
}
}
return {names: names, values: values, original: obj};
}
function suitable(rule, args) {
var default_args = split_obj(rule.defaults || {});
var diff_arg_names = array_diff(rule.arguments, default_args.names);
for (var i = 0; i < diff_arg_names.length; i++) {
if (!in_array(args.names, diff_arg_names[i])) {
return false;
}
}
if (array_diff(rule.arguments, args.names).length == 0) {
if (rule.defaults == null) {
return true;
}
for (var i = 0; i < default_args.names.length; i++) {
var key = default_args.names[i];
var value = default_args.values[i];
if (value != args.original[key]) {
return false;
}
}
}
return true;
}
function build(rule, args) {
var tmp = [];
var processed = rule.arguments.slice();
for (var i = 0; i < rule.trace.length; i++) {
var part = rule.trace[i];
if (part.is_dynamic) {
var converter = converters[rule.converters[part.data]];
var data = converter(args.original[part.data]);
if (data == null) {
return null;
}
tmp.push(data);
processed.push(part.name);
} else {
tmp.push(part.data);
}
}
tmp = tmp.join('');
var pipe = tmp.indexOf('|');
var subdomain = tmp.substring(0, pipe);
var url = tmp.substring(pipe+1);
var unprocessed = array_diff(args.names, processed);
var first_query_var = true;
for (var i = 0; i < unprocessed.length; i++) {
if (first_query_var) {
url += '?';
} else {
url += '&';
}
first_query_var = false;
url += encodeURIComponent(unprocessed[i]);
url += '=';
url += encodeURIComponent(args.original[unprocessed[i]]);
}
return {subdomain: subdomain, path: url};
}
function lstrip(s, c) {
while (s && s.substring(0, 1) == c) {
s = s.substring(1);
}
return s;
}
function rstrip(s, c) {
while (s && s.substring(s.length-1, s.length) == c) {
s = s.substring(0, s.length-1);
}
return s;
}
return function(endpoint, args, force_external) {
args = split_obj(args);
var rv = null;
for (var i = 0; i < rules.length; i++) {
var rule = rules[i];
if (rule.endpoint != endpoint) continue;
if (suitable(rule, args)) {
rv = build(rule, args);
if (rv != null) {
break;
}
}
}
if (rv == null) {
return null;
}
if (!force_external && rv.subdomain == subdomain) {
return rstrip(script_name, '/') + '/' + lstrip(rv.path, '/');
} else {
return url_scheme + '://'
+ (rv.subdomain ? rv.subdomain + '.' : '')
+ server_name + rstrip(script_name, '/')
+ '/' + lstrip(rv.path, '/');
}
};
})""" % {'converters': u', '.join(converters),
'rules': rules}
return result
def generate_map(map, name='url_map'):
"""
Generates a JavaScript function containing the rules defined in
this map, to be used with a MapAdapter's generate_javascript
method. If you don't pass a name the returned JavaScript code is
an expression that returns a function. Otherwise it's a standalone
script that assigns the function with that name. Dotted names are
resolved (so you an use a name like 'obj.url_for')
In order to use JavaScript generation, simplejson must be installed.
Note that using this feature will expose the rules
defined in your map to users. If your rules contain sensitive
information, don't use JavaScript generation!
"""
from warnings import warn
warn(DeprecationWarning('This module is deprecated'))
map.update()
rules = []
converters = []
for rule in map.iter_rules():
trace = [{
'is_dynamic': is_dynamic,
'data': data
} for is_dynamic, data in rule._trace]
rule_converters = {}
for key, converter in iteritems(rule._converters):
js_func = js_to_url_function(converter)
try:
index = converters.index(js_func)
except ValueError:
converters.append(js_func)
index = len(converters) - 1
rule_converters[key] = index
rules.append({
u'endpoint': rule.endpoint,
u'arguments': list(rule.arguments),
u'converters': rule_converters,
u'trace': trace,
u'defaults': rule.defaults
})
return render_template(name_parts=name and name.split('.') or [],
rules=dumps(rules),
converters=converters)
def generate_adapter(adapter, name='url_for', map_name='url_map'):
"""Generates the url building function for a map."""
values = {
u'server_name': dumps(adapter.server_name),
u'script_name': dumps(adapter.script_name),
u'subdomain': dumps(adapter.subdomain),
u'url_scheme': dumps(adapter.url_scheme),
u'name': name,
u'map_name': map_name
}
return u'''\
var %(name)s = %(map_name)s(
%(server_name)s,
%(script_name)s,
%(subdomain)s,
%(url_scheme)s
);''' % values
def js_to_url_function(converter):
"""Get the JavaScript converter function from a rule."""
if hasattr(converter, 'js_to_url_function'):
data = converter.js_to_url_function()
else:
for cls in getmro(type(converter)):
if cls in js_to_url_functions:
data = js_to_url_functions[cls](converter)
break
else:
return 'encodeURIComponent'
return '(function(value) { %s })' % data
def NumberConverter_js_to_url(conv):
if conv.fixed_digits:
return u'''\
var result = value.toString();
while (result.length < %s)
result = '0' + result;
return result;''' % conv.fixed_digits
return u'return value.toString();'
js_to_url_functions = {
NumberConverter: NumberConverter_js_to_url
}
| gpl-3.0 |
leighpauls/k2cro4 | native_client/site_scons/site_tools/publish.py | 18 | 9545 | #!/usr/bin/python2.4
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Publish tool for SCons."""
# List of published resources. This is a dict indexed by group name. Each
# item in this dict is a dict indexed by resource type. Items in that dict
# are lists of files for that resource.
__published = {}
#------------------------------------------------------------------------------
class PublishItem(object):
"""Item to be published."""
def __init__(self, source, subdir):
"""Initialize object.
Args:
source: Source node.
subdir: If not None, subdirectory to copy node into in
ReplicatePublished().
"""
object.__init__(self)
self.source = source
self.subdir = subdir
#------------------------------------------------------------------------------
def _InitializePublish(env):
"""Re-initializes published resources.
Args:
env: Parent environment
"""
env=env # Silence gpylint
# Clear the dict of published resources
__published.clear()
def ReplicatePublished(self, target, group_name, resource_type):
"""Replicate published resources for the group to the target directory.
Args:
self: Environment in which this function was called.
target: Target directory for resources.
group_name: Name of resource group, or a list of names of resource groups.
resource_type: Type of resources (string), or a list of resource types.
Uses the subdir parameter passed to Publish() when replicating source nodes
to the target.
Returns:
The list of target nodes from the calls to Replicate().
Since this is based on Replicate(), it will also use the REPLICATE_REPLACE
variable, if it's set in the calling environment.
"""
target_path = self.Dir(target).abspath
#GOOGLE_CHANGE(pss) - FROM THIS:
#GOOGLE_CHANGE(pss) - TO THIS:
source_list = self.GetPublishedWithSubdirs(group_name, resource_type)
#GOOGLE_CHANGE(pss) - END CHANGES
dest_nodes = []
#GOOGLE_CHANGE(pss) - FROM THIS:
# for group in self.SubstList2(group_name):
# for resource in self.SubstList2(resource_type):
# # Get items for publish group and resource type
# items = __published.get(group, {}).get(resource, [])
# for i in items:
# if i.subdir:
# dest_nodes += self.Replicate(target_path + '/' + i.subdir, i.source)
# else:
# dest_nodes += self.Replicate(target_path, i.source)
#GOOGLE_CHANGE(pss) - TO THIS:
for source in source_list:
# Add the subdir if there is one in the source tuple.
if source[1]:
dest_nodes += self.Replicate(target_path + '/' + source[1], source[0])
else:
dest_nodes += self.Replicate(target_path, source[0])
#GOOGLE_CHANGE(pss) - END CHANGES
return dest_nodes
#GOOGLE_CHANGE(pss) - FROM THIS:
# def GetPublished(self, group_name, resource_type):
# """Returns a list of the published resources of the specified type.
#
# Args:
# self: Environment in which this function was called.
# group_name: Name of resource group, or a list of names of resource groups.
# resource_type: Type of resources (string), or a list of resource types.
#
# Returns:
# A flattened list of the source nodes from calls to Publish() for the
# specified group and resource type. Returns an empty list if there are
# no matching resources.
# """
#GOOGLE_CHANGE(pss) - TO THIS:
def GetPublishedWithSubdirs(self, group_name, resource_type):
"""Returns a list of the published resources of the specified type.
Args:
self: Environment in which this function was called.
group_name: Name of resource group, or a list of names of resource groups.
resource_type: Type of resources (string), or a list of resource types.
Returns:
A flattened list of the source nodes from calls to Publish() for the
specified group and resource type. Each source node is represented
by a pair consisting of (source_node, subdir). Returns an empty list
if there are no matching resources.
"""
#GOOGLE_CHANGE(pss) - END CHANGES
source_list = []
for group in self.SubstList2(group_name):
# Get items for publish group and resource type
for resource in self.SubstList2(resource_type):
items = __published.get(group, {}).get(resource, [])
for i in items:
#GOOGLE_CHANGE(pss) - FROM THIS:
# source_list.append(i.source)
#GOOGLE_CHANGE(pss) - TO THIS:
source_list.append((i.source, i.subdir))
#GOOGLE_CHANGE(pss) - END CHANGES
return source_list
#GOOGLE_CHANGE(pss) - FROM THIS:
#GOOGLE_CHANGE(pss) - TO THIS:
def GetPublished(self, group_name, resource_type):
"""Returns a list of the published resources of the specified type.
Args:
self: Environment in which this function was called.
group_name: Name of resource group, or a list of names of resource groups.
resource_type: Type of resources (string), or a list of resource types.
Returns:
A flattened list of the source nodes from calls to Publish() for the
specified group and resource type. Returns an empty list if there are
no matching resources.
"""
source_list = self.GetPublishedWithSubdirs(group_name, resource_type)
return [source[0] for source in source_list]
#GOOGLE_CHANGE(pss) - END CHANGES
def Publish(self, group_name, resource_type, source, subdir=None):
"""Publishes resources for use by other scripts.
Args:
self: Environment in which this function was called.
group_name: Name of resource group.
resource_type: Type of resources (string).
source: Source file(s) to copy. May be a string, Node, or a list of
mixed strings or Nodes. Strings will be passed through env.Glob() to
evaluate wildcards. If a source evaluates to a directory, the entire
directory will be recursively copied.
subdir: Subdirectory to which the resources should be copied, relative to
the primary directory for that resource type, if not None.
"""
if subdir is None:
subdir = '' # Make string so we can append to it
# Evaluate SCons variables in group name
# TODO: Should Publish() be able to take a list of group names and publish
# the resource to all of them?
group_name = self.subst(group_name)
# Get list of sources
items = []
for source_entry in self.Flatten(source):
if isinstance(source_entry, str):
# Search for matches for each source entry
# TODO: Should generate an error if there were no matches? But need to
# skip this warning if this is a recursive call to self.Publish() from
# below.
source_nodes = self.Glob(source_entry)
else:
# Source entry is already a file or directory node; no need to glob it
source_nodes = [source_entry]
for s in source_nodes:
if str(s.__class__) == 'SCons.Node.FS.Dir':
# Recursively publish all files in subdirectory. Since glob('*')
# doesn't match dot files, also glob('.*').
self.Publish(group_name, resource_type,
[s.abspath + '/*', s.abspath + '/.*'],
subdir=subdir + '/' + s.name)
else:
items.append(PublishItem(s, subdir))
# Publish items, if any
if items:
# Get publish group
if group_name not in __published:
__published[group_name] = {}
group = __published[group_name]
if resource_type not in group:
group[resource_type] = []
# Publish items into group
group[resource_type] += items
def generate(env):
# NOTE: SCons requires the use of this name, which fails gpylint.
"""SCons entry point for this tool."""
# Defer initializing publish, but do before building SConscripts
env.Defer(_InitializePublish)
env.Defer('BuildEnvironmentSConscripts', after=_InitializePublish)
#GOOGLE_CHANGE(pss) - FROM THIS:
#GOOGLE_CHANGE(pss) - TO THIS:
env.AddMethod(GetPublishedWithSubdirs)
#GOOGLE_CHANGE(pss) - END CHANGES
env.AddMethod(GetPublished)
env.AddMethod(Publish)
env.AddMethod(ReplicatePublished)
| bsd-3-clause |
tempbottle/Nuitka | misc/make-upload.py | 1 | 1923 | #!/usr/bin/env python
# Copyright 2015, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, sys, shutil, subprocess
assert os.path.isfile( "setup.py" ) and open( ".git/description" ).read().strip() == "Nuitka Staging"
nuitka_version = subprocess.check_output( "./bin/nuitka --version", shell = True ).strip()
branch_name = subprocess.check_output( "git name-rev --name-only HEAD".split() ).strip()
assert branch_name in ( b"master", b"develop", b"release/" + nuitka_version, b"hotfix/" + nuitka_version ), branch_name
assert 0 == os.system( "rsync -rvlpt --exclude=deb_dist dist/ root@nuitka.net:/var/www/releases/" )
for filename in ("README.pdf", "Changelog.pdf", "Developer_Manual.pdf"):
assert 0 == os.system( "rsync %s root@nuitka.net:/var/www/doc/" % filename )
# Upload only stable releases to OpenSUSE Build Service:
if branch_name.startswith( "release" ) or branch_name == "master":
pass
elif branch_name == "develop":
for remote in "origin", "bitbucket", "github":
assert 0 == os.system( "git push --tags -f %s develop" % remote )
assert 0 == os.system( "git push %s master" % remote )
else:
sys.stdout.write( "Skipping for branch '%s'" % branch_name )
| apache-2.0 |
bram85/topydo | topydo/lib/TodoBase.py | 1 | 8341 | # Topydo - A todo.txt client written in Python.
# Copyright (C) 2014 - 2015 Bram Schoenmakers <bram@topydo.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This module contains the class that represents a single todo item.
"""
import re
from datetime import date
from topydo.lib.TodoParser import parse_line
from topydo.lib.Utils import is_valid_priority
class TodoBase(object):
"""
This class represents a single todo item in a todo.txt file. It maintains
an internal data dictionary of various attributes, but also keeps the plain
text format in shape such that it can be printed back to a file with as few
distortions as possible (no re-shuffling of attributes).
This is a base class, but supports enough to process any item in a todo.txt
file. Derived classes add some interpretation to the tags that may appear
in a todo item.
"""
def __init__(self, p_src):
self.src = ""
self.fields = {}
self.set_source_text(p_src)
def tag_value(self, p_key, p_default=None):
"""
Returns a tag value associated with p_key. Returns p_default if p_key
does not exist (which defaults to None).
"""
return self.tag_values(p_key)[0] if p_key in self.fields['tags'] else p_default
def tag_values(self, p_key):
"""
Returns a list of all tag values associated with p_key. Returns
empty list if p_key does not exist.
"""
return self.fields['tags'][p_key] if p_key in self.fields['tags'] else []
def has_tag(self, p_key, p_value=""):
"""
Returns true when there is at least one tag with the given key. If a
value is passed, it will only return true when there exists a tag with
the given key-value combination.
"""
tags = self.fields['tags']
return p_key in tags and (p_value == "" or p_value in tags[p_key])
def add_tag(self, p_key, p_value):
""" Adds a tag to the todo. """
self.set_tag(p_key, p_value, True)
def _remove_tag_helper(self, p_key, p_value):
"""
Removes a tag from the internal todo dictionary. Only those instances
with the given value are removed. If the value is empty, all tags with
the given key are removed.
"""
tags = self.fields['tags']
try:
tags[p_key] = [t for t in tags[p_key] if p_value != "" and t != p_value]
if len(tags[p_key]) == 0:
del tags[p_key]
except KeyError:
pass
def set_tag(self, p_key, p_value="", p_force_add=False, p_old_value=""):
"""
Sets a occurrence of the tag identified by p_key. Sets an arbitrary
instance of the tag when the todo contains multiple tags with this key.
When p_key does not exist, the tag is added.
When p_value is not set, the tag will be removed.
When p_force_add is true, a tag will always be added to the todo, in
case there is already a tag with the given key.
When p_old_value is set, all tags having this value will be set to the
new value.
"""
if p_value == "":
self.remove_tag(p_key, p_old_value)
return
tags = self.fields['tags']
value = p_old_value if p_old_value else self.tag_value(p_key)
if not p_force_add and value:
self._remove_tag_helper(p_key, value)
self.src = re.sub(
r'\b' + p_key + ':' + value + r'\b',
p_key + ':' + p_value,
self.src
)
else:
self.src += ' ' + p_key + ':' + p_value
try:
tags[p_key].append(p_value)
except KeyError:
tags[p_key] = [p_value]
def remove_tag(self, p_key, p_value=""):
"""
Removes a tag from the todo.
When the value is empty (default), all occurrences of the tag will be
removed.
Else, only those tags with the value will be removed.
"""
self._remove_tag_helper(p_key, p_value)
# when value == "", match any value having key p_key
value = p_value if p_value != "" else r'\S+'
self.src = re.sub(r'\s?\b' + p_key + ':' + value + r'\b', '', self.src)
def tags(self):
"""
Returns a list of tuples with key-value pairs representing tags in
this todo item.
"""
tags = self.fields['tags']
return [(t, v) for t in tags for v in tags[t]]
def set_priority(self, p_priority):
"""
Sets the priority of the todo. Must be a single capital letter [A-Z],
or None to unset the priority.
Priority remains unchanged when an invalid priority is given, or when
the task was completed.
"""
if not self.is_completed() and (p_priority is None or
is_valid_priority(p_priority)):
self.fields['priority'] = p_priority
priority_str = '' if p_priority is None else '(' + p_priority + ') '
self.src = re.sub(r'^(\([A-Z]\) )?', priority_str, self.src)
def priority(self):
"""
Returns the priority of this todo, or None if no priority is set.
"""
return self.fields['priority']
def text(self, p_with_tags=False):
""" Returns the todo text with tags stripped off. """
return self.src if p_with_tags else self.fields['text']
def source(self):
"""
Returns the source text of the todo. This is the raw text with all
the tags included.
"""
return self.text(True)
def set_source_text(self, p_text):
""" Sets the todo source text. The text will be parsed again. """
self.src = p_text.strip()
self.fields = parse_line(self.src)
def projects(self):
""" Returns a set of projects associated with this todo item. """
return set(self.fields['projects'])
def contexts(self):
""" Returns a set of contexts associated with this todo item. """
return set(self.fields['contexts'])
def is_completed(self):
""" Returns True iff this todo has been completed. """
return self.fields['completed']
def completion_date(self):
"""
Returns the completion date when the todo has been completed, or None
otherwise.
"""
return self.fields['completionDate']
def set_completed(self, p_completion_date=date.today()):
"""
Marks the todo as complete.
Sets the completed flag and sets the completion date to today.
"""
if not self.is_completed():
self.set_priority(None)
self.fields['completed'] = True
self.fields['completionDate'] = p_completion_date
self.src = re.sub(r'^(\([A-Z]\) )?',
'x ' + p_completion_date.isoformat() + ' ',
self.src)
def set_creation_date(self, p_date=date.today()):
"""
Sets the creation date of a todo. Should be passed a date object.
"""
self.fields['creationDate'] = p_date
# not particularly pretty, but inspired by
# http://bugs.python.org/issue1519638 non-existent matches trigger
# exceptions, hence the lambda
self.src = re.sub(
r'^(x \d{4}-\d{2}-\d{2} |\([A-Z]\) )?(\d{4}-\d{2}-\d{2} )?(.*)$',
lambda m:
u"{}{} {}".format(m.group(1) or '', p_date.isoformat(),
m.group(3)), self.src)
def creation_date(self):
""" Returns the creation date of a todo. """
return self.fields['creationDate']
| gpl-3.0 |
ChrisAntaki/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py | 118 | 7503 | # Copyright (C) 2012 Zan Dobersek <zandobersek@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import Port
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.xvfbdriver import XvfbDriver
from webkitpy.tool.mocktool import MockOptions
_log = logging.getLogger(__name__)
class XvfbDriverTest(unittest.TestCase):
def make_driver(self, worker_number=0, xorg_running=False, executive=None):
port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: "/mock-build"
port._server_process_constructor = MockServerProcess
if xorg_running:
port._executive._running_pids['Xorg'] = 108
driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True)
driver._startup_delay_secs = 0
return driver
def cleanup_driver(self, driver):
# Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion,
# killing the Xvfb process if present. Thus, this method should only be called from tests that do not
# intend to test the behavior of XvfbDriver.stop.
driver._xvfb_process = None
def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False):
OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs)
self.assertTrue(driver._server_process.started)
self.assertEqual(driver._server_process.env["DISPLAY"], expected_display)
def test_start_no_pixel_tests(self):
driver = self.make_driver()
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0")
self.cleanup_driver(driver)
def test_start_pixel_tests(self):
driver = self.make_driver()
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
def test_start_arbitrary_worker_number(self):
driver = self.make_driver(worker_number=17)
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
def test_next_free_display(self):
output = "Xorg /usr/bin/X :0 -auth /var/run/lightdm/root/:0 -nolisten tcp vt7 -novtswitch -background none\nXvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 2)
self.cleanup_driver(driver)
output = "X /usr/bin/X :0 vt7 -nolisten tcp -auth /var/run/xauth/A:0-8p7Ybb"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 1)
self.cleanup_driver(driver)
output = "Xvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 1)
self.cleanup_driver(driver)
output = "Xvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :3 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 2)
self.cleanup_driver(driver)
def test_start_next_worker(self):
driver = self.make_driver()
driver._next_free_display = lambda: 0
expected_logs = "MOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
driver = self.make_driver()
driver._next_free_display = lambda: 3
expected_logs = "MOCK popen: ['Xvfb', ':3', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":3", pixel_tests=True)
self.cleanup_driver(driver)
def test_stop(self):
filesystem = MockFileSystem(files={'/tmp/.X42-lock': '1234\n'})
port = Port(MockSystemHost(log_executive=True, filesystem=filesystem), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x))
driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
class FakeXvfbProcess(object):
pid = 1234
driver._xvfb_process = FakeXvfbProcess()
driver._lock_file = '/tmp/.X42-lock'
expected_logs = "MOCK kill_process pid: 1234\n"
OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs)
self.assertIsNone(driver._xvfb_process)
self.assertFalse(port._filesystem.exists(driver._lock_file))
| bsd-3-clause |
ITDevLtd/MCVirt | source/mcvirt-daemon/usr/lib/python2.7/dist-packages/mcvirt/cluster/remote.py | 1 | 1310 | """Provide interface for RPC to cluster nodes."""
# Copyright (c) 2014 - I.T. Dev Ltd
#
# This file is part of MCVirt.
#
# MCVirt is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# MCVirt is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with MCVirt. If not, see <http://www.gnu.org/licenses/>
from mcvirt.client.rpc import Connection
class Node(Connection):
"""A class to perform remote commands on MCVirt nodes."""
def __init__(self, name, node_config, **kwargs):
"""Set member variables."""
self.name = name
self.ip_address = node_config['ip_address'] if 'ip_address' in node_config else None
super(Node, self).__init__(username=node_config['username'],
password=node_config['password'],
host=self.name,
**kwargs)
| gpl-2.0 |
jaruba/chromium.src | tools/perf/page_sets/top_25_smooth.py | 11 | 5359 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
from page_sets import top_pages
def _IssueMarkerAndScroll(action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage()
interaction.End()
def _CreatePageClassWithSmoothInteractions(page_cls):
class DerivedSmoothPage(page_cls): # pylint: disable=W0232
def RunPageInteractions(self, action_runner):
_IssueMarkerAndScroll(action_runner)
return DerivedSmoothPage
class TopSmoothPage(page_module.Page):
def __init__(self, url, page_set, name='', credentials=None):
super(TopSmoothPage, self).__init__(
url=url, page_set=page_set, name=name,
credentials_path='data/credentials.json')
self.user_agent_type = 'desktop'
self.credentials = credentials
def RunPageInteractions(self, action_runner):
_IssueMarkerAndScroll(action_runner)
class GmailSmoothPage(top_pages.GmailPage):
""" Why: productivity, top google properties """
def RunPageInteractions(self, action_runner):
action_runner.ExecuteJavaScript('''
gmonkey.load('2.0', function(api) {
window.__scrollableElementForTelemetry = api.getScrollableElement();
});''')
action_runner.WaitForJavaScriptCondition(
'window.__scrollableElementForTelemetry != null')
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(
element_function='window.__scrollableElementForTelemetry')
interaction.End()
class GoogleCalendarSmoothPage(top_pages.GoogleCalendarPage):
""" Why: productivity, top google properties """
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(selector='#scrolltimedeventswk')
interaction.End()
class GoogleDocSmoothPage(top_pages.GoogleDocPage):
""" Why: productivity, top google properties; Sample doc in the link """
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollElement(selector='.kix-appview-editor')
interaction.End()
class ESPNSmoothPage(top_pages.ESPNPage):
""" Why: #1 sports """
def RunPageInteractions(self, action_runner):
interaction = action_runner.BeginGestureInteraction(
'ScrollAction', is_smooth=True)
action_runner.ScrollPage(left_start_ratio=0.1)
interaction.End()
class Top25SmoothPageSet(page_set_module.PageSet):
""" Pages hand-picked for 2012 CrOS scrolling tuning efforts. """
def __init__(self):
super(Top25SmoothPageSet, self).__init__(
user_agent_type='desktop',
archive_data_file='data/top_25.json',
bucket=page_set_module.PARTNER_BUCKET)
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.GoogleWebSearchPage)(self))
self.AddUserStory(GmailSmoothPage(self))
self.AddUserStory(GoogleCalendarSmoothPage(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.GoogleImageSearchPage)(self))
self.AddUserStory(GoogleDocSmoothPage(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.GooglePlusPage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.YoutubePage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.BlogspotPage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.WordpressPage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.FacebookPage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.LinkedinPage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.WikipediaPage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.TwitterPage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.PinterestPage)(self))
self.AddUserStory(ESPNSmoothPage(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.WeatherPage)(self))
self.AddUserStory(_CreatePageClassWithSmoothInteractions(
top_pages.YahooGamesPage)(self))
other_urls = [
# Why: #1 news worldwide (Alexa global)
'http://news.yahoo.com',
# Why: #2 news worldwide
'http://www.cnn.com',
# Why: #1 world commerce website by visits; #3 commerce in the US by
# time spent
'http://www.amazon.com',
# Why: #1 commerce website by time spent by users in US
'http://www.ebay.com',
# Why: #1 Alexa recreation
'http://booking.com',
# Why: #1 Alexa reference
'http://answers.yahoo.com',
# Why: #1 Alexa sports
'http://sports.yahoo.com/',
# Why: top tech blog
'http://techcrunch.com'
]
for url in other_urls:
self.AddUserStory(TopSmoothPage(url, self))
| bsd-3-clause |
osm-fr/osmose-backend | analysers/analyser_merge_healthcare_FR_finess.py | 3 | 7042 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
###########################################################################
## ##
## Copyrights Frédéric Rodrigo 2018 ##
## ##
## This program is free software: you can redistribute it and/or modify ##
## it under the terms of the GNU General Public License as published by ##
## the Free Software Foundation, either version 3 of the License, or ##
## (at your option) any later version. ##
## ##
## This program is distributed in the hope that it will be useful, ##
## but WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ##
## GNU General Public License for more details. ##
## ##
## You should have received a copy of the GNU General Public License ##
## along with this program. If not, see <http://www.gnu.org/licenses/>. ##
## ##
###########################################################################
import json
import csv
import io
from modules.OsmoseTranslation import T_
from .Analyser_Merge_Dynamic import Analyser_Merge_Dynamic, SubAnalyser_Merge_Dynamic
from .Analyser_Merge import SourceDataGouv, CSV, Load, Conflate, Select, Mapping
class Analyser_Merge_Healthcare_FR_Finess(Analyser_Merge_Dynamic):
def __init__(self, config, logger = None):
Analyser_Merge_Dynamic.__init__(self, config, logger)
if config.db_schema == 'france_guadeloupe':
srid = 2970
is_in = lambda dep: dep == "9A"
elif config.db_schema == 'france_guyane':
srid = 2972
is_in = lambda dep: dep == "9C"
elif config.db_schema == 'france_reunion':
srid = 2975
is_in = lambda dep: dep == "9D"
elif config.db_schema == 'france_martinique':
srid = 2973
is_in = lambda dep: dep == "9B"
elif config.db_schema == 'france_saintpierreetmiquelon':
srid = 32621
is_in = lambda dep: dep == "9E"
elif config.db_schema == 'france_mayotte':
srid = 32738
is_in = lambda dep: dep == "9F"
else:
srid = 2154
is_in = lambda dep: dep not in ("9A", "9B", "9C", "9D")
mapingfile = json.loads(open("merge_data/healthcare_FR_finess.mapping.json").read())
for r in mapingfile:
self.classFactory(SubAnalyser_Merge_Healthcare_FR_Finess, r['classes'], srid, is_in, r['categories'], r['items'], r.get('missing_osm', True), r['classes'], r['level'], r['title:fr'], r['tags_select'], r['tags_generate1'], r['tags_generate2'])
class SubAnalyser_Merge_Healthcare_FR_Finess(SubAnalyser_Merge_Dynamic):
def __init__(self, config, error_file, logger, srid, is_in, categories, items, missing_osm, classs, level, title, tags_select, tags_generate1, tags_generate2):
SubAnalyser_Merge_Dynamic.__init__(self, config, error_file, logger)
self.def_class_missing_official(item =str(items[0]), id = classs+1, level = level, tags = ['merge', 'public equipment', 'fix:imagery', 'fix:survey'],
title = T_('{0} not integrated', title))
if missing_osm is not False:
self.def_class_missing_osm(item =str(items[1]), id = classs+2, level = level, tags = ['merge', 'public equipment', 'fix:chair'],
title = T_('{0} without tag "{1}" or invalid', title, 'ref:FR:FINESS'))
self.def_class_possible_merge(item =str(items[0]+1), id = classs+3, level = level, tags = ['merge', 'public equipment', 'fix:chair', 'fix:survey'],
title = T_('{0}, integration suggestion', title))
self.init(
"https://www.data.gouv.fr/fr/datasets/finess-extraction-du-fichier-des-etablissements/",
"FINESS Extraction du Fichier des établissements",
CSV(Source_Finess(
attribution="Le ministère des solidarités et de la santé",
encoding="ISO-8859-1",
dataset="53699569a3a729239d2046eb",
resource="51a04fc8-50fa-4844-9b92-b51c69be742e")),
Load("coordxet", "coordyet", srid = srid,
select = {"categetab": categories},
where = lambda res: is_in(res["departement"])),
Conflate(
select = Select(
types = ["nodes", "ways", "relations"],
tags = tags_select),
osmRef = "ref:FR:FINESS",
conflationDistance = 200,
mapping = Mapping(
static1 = tags_generate1,
static2 = dict({"source": self.source}, **tags_generate2),
mapping1 = {"ref:FR:FINESS": "nofinesset"},
mapping2 = {
"type:FR:FINESS": "categetab",
"ref:FR:SIRET": "siret",
"phone": lambda fields: self.phone(fields["telephone"]),
"fax": lambda fields: self.phone(fields["telecopie"]),
},
text = lambda tags, fields: {"en": ", ".join(filter(lambda i: i not in (None, 'None'), [fields["rs"], fields["rslongue"], fields["complrs"], fields["compldistrib"], fields["numvoie"], fields["typvoie"], fields["voie"], fields["compvoie"], fields["lieuditbp"], fields["ligneacheminement"], fields["libcategetab"], fields["numuai"]]))} )))
def phone(self, number):
if number and len(number) == 10 and number[0] == "0":
return "+33" + number[1:]
class Source_Finess(SourceDataGouv):
def open(self):
# Cheat the parent open
self.encoding = 'UTF-8'
f = super().open()
csvreader = csv.reader(f, delimiter=u';')
structureet = [u'nofinesset,nofinessej,rs,rslongue,complrs,compldistrib,numvoie,typvoie,voie,compvoie,lieuditbp,commune,departement,libdepartement,ligneacheminement,telephone,telecopie,categetab,libcategetab,categagretab,libcategagretab,siret,codeape,codemft,libmft,codesph,libsph,dateouv,dateautor,datemaj,numuai,coordxet,coordyet,sourcecoordet,datemajcoord'.split(',')]
geolocalisation = {}
for row in csvreader:
if row[0] == 'structureet':
structureet.append(row[1:])
elif row[0] == 'geolocalisation':
geolocalisation[row[1]] = row[2:]
for row in structureet:
row += geolocalisation.get(row[0], [])
csvfile = io.StringIO()
writer = csv.writer(csvfile)
for row in structureet:
writer.writerow(row)
csvfile.seek(0)
return csvfile
| gpl-3.0 |
ARMmbed/yotta_osx_installer | workspace/lib/python2.7/site-packages/colorama/win32.py | 446 | 5121 | # Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import byref, Structure, c_char, POINTER
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
_SetConsoleTitleW = windll.kernel32.SetConsoleTitleA
_SetConsoleTitleW.argtypes = [
wintypes.LPCSTR
]
_SetConsoleTitleW.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position, adjust=True):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
if adjust:
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char.encode())
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
def SetConsoleTitle(title):
return _SetConsoleTitleW(title)
| apache-2.0 |
anryko/ansible | lib/ansible/modules/cloud/azure/azure_rm_devtestlabschedule.py | 19 | 11356 | #!/usr/bin/python
#
# Copyright (c) 2019 Zim Kalinowski, (@zikalino)
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_devtestlabschedule
version_added: "2.8"
short_description: Manage Azure DevTest Lab Schedule instance
description:
- Create, update and delete instance of Azure DecTest Lab Schedule.
options:
resource_group:
description:
- The name of the resource group.
required: True
lab_name:
description:
- The name of the lab.
required: True
name:
description:
- The name of the schedule.
required: True
choices:
- lab_vms_startup
- lab_vms_shutdown
time:
description:
- The time of day the schedule will occur.
time_zone_id:
description:
- The time zone ID.
state:
description:
- Assert the state of the Schedule.
- Use C(present) to create or update an Schedule and C(absent) to delete it.
default: present
choices:
- absent
- present
extends_documentation_fragment:
- azure
- azure_tags
author:
- Zim Kalinowski (@zikalino)
'''
EXAMPLES = '''
- name: Create (or update) DevTest Lab Schedule
azure_rm_devtestlabschedule:
resource_group: myResourceGroup
lab_name: myLab
name: lab_vms_shutdown
time: "1030"
time_zone_id: "UTC+12"
'''
RETURN = '''
id:
description:
- The identifier of the resource.
returned: always
type: str
sample: "/subscriptions/xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx/resourcegroups/myResourceGroup/providers/microsoft.devtestlab/labs/myLab/schedules/l
abVmsShutdown"
'''
import time
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
try:
from msrestazure.azure_exceptions import CloudError
from msrest.polling import LROPoller
from msrestazure.azure_operation import AzureOperationPoller
from azure.mgmt.devtestlabs import DevTestLabsClient
from msrest.serialization import Model
except ImportError:
# This is handled in azure_rm_common
pass
class Actions:
NoAction, Create, Update, Delete = range(4)
class AzureRMSchedule(AzureRMModuleBase):
"""Configuration class for an Azure RM Schedule resource"""
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(
type='str',
required=True
),
lab_name=dict(
type='str',
required=True
),
name=dict(
type='str',
required=True,
choices=['lab_vms_startup', 'lab_vms_shutdown']
),
time=dict(
type='str'
),
time_zone_id=dict(
type='str'
),
state=dict(
type='str',
default='present',
choices=['present', 'absent']
)
)
self.resource_group = None
self.lab_name = None
self.name = None
self.schedule = dict()
self.results = dict(changed=False)
self.mgmt_client = None
self.state = None
self.to_do = Actions.NoAction
required_if = [
('state', 'present', ['time', 'time_zone_id'])
]
super(AzureRMSchedule, self).__init__(derived_arg_spec=self.module_arg_spec,
supports_check_mode=True,
supports_tags=True,
required_if=required_if)
def exec_module(self, **kwargs):
"""Main module execution method"""
for key in list(self.module_arg_spec.keys()) + ['tags']:
if hasattr(self, key):
setattr(self, key, kwargs[key])
elif kwargs[key] is not None:
self.schedule[key] = kwargs[key]
self.schedule['status'] = "Enabled"
if self.name == 'lab_vms_startup':
self.name = 'LabVmsStartup'
self.schedule['task_type'] = 'LabVmsStartupTask'
elif self.name == 'lab_vms_shutdown':
self.name = 'LabVmsShutdown'
self.schedule['task_type'] = 'LabVmsShutdownTask'
if self.state == 'present':
self.schedule['daily_recurrence'] = {'time': self.schedule.pop('time')}
self.schedule['time_zone_id'] = self.schedule['time_zone_id'].upper()
response = None
self.mgmt_client = self.get_mgmt_svc_client(DevTestLabsClient,
base_url=self._cloud_environment.endpoints.resource_manager)
resource_group = self.get_resource_group(self.resource_group)
old_response = self.get_schedule()
if not old_response:
self.log("Schedule instance doesn't exist")
if self.state == 'absent':
self.log("Old instance didn't exist")
else:
self.to_do = Actions.Create
else:
self.log("Schedule instance already exists")
if self.state == 'absent':
self.to_do = Actions.Delete
elif self.state == 'present':
if (not default_compare(self.schedule, old_response, '', self.results)):
self.to_do = Actions.Update
if (self.to_do == Actions.Create) or (self.to_do == Actions.Update):
self.log("Need to Create / Update the Schedule instance")
if self.check_mode:
self.results['changed'] = True
return self.results
response = self.create_update_schedule()
self.results['changed'] = True
self.log("Creation / Update done")
elif self.to_do == Actions.Delete:
self.log("Schedule instance deleted")
self.results['changed'] = True
if self.check_mode:
return self.results
self.delete_schedule()
# This currently doesn't work as there is a bug in SDK / Service
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
else:
self.log("Schedule instance unchanged")
self.results['changed'] = False
response = old_response
if self.state == 'present':
self.results.update({
'id': response.get('id', None)
})
return self.results
def create_update_schedule(self):
'''
Creates or updates Schedule with the specified configuration.
:return: deserialized Schedule instance state dictionary
'''
self.log("Creating / Updating the Schedule instance {0}".format(self.name))
try:
response = self.mgmt_client.schedules.create_or_update(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name,
schedule=self.schedule)
if isinstance(response, LROPoller) or isinstance(response, AzureOperationPoller):
response = self.get_poller_result(response)
except CloudError as exc:
self.log('Error attempting to create the Schedule instance.')
self.fail("Error creating the Schedule instance: {0}".format(str(exc)))
return response.as_dict()
def delete_schedule(self):
'''
Deletes specified Schedule instance in the specified subscription and resource group.
:return: True
'''
self.log("Deleting the Schedule instance {0}".format(self.name))
try:
response = self.mgmt_client.schedules.delete(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name)
except CloudError as e:
self.log('Error attempting to delete the Schedule instance.')
self.fail("Error deleting the Schedule instance: {0}".format(str(e)))
return True
def get_schedule(self):
'''
Gets the properties of the specified Schedule.
:return: deserialized Schedule instance state dictionary
'''
self.log("Checking if the Schedule instance {0} is present".format(self.name))
found = False
try:
response = self.mgmt_client.schedules.get(resource_group_name=self.resource_group,
lab_name=self.lab_name,
name=self.name)
found = True
self.log("Response : {0}".format(response))
self.log("Schedule instance : {0} found".format(response.name))
except CloudError as e:
self.log('Did not find the Schedule instance.')
if found is True:
return response.as_dict()
return False
def default_compare(new, old, path, result):
if new is None:
return True
elif isinstance(new, dict):
if not isinstance(old, dict):
result['compare'] = 'changed [' + path + '] old dict is null'
return False
for k in new.keys():
if not default_compare(new.get(k), old.get(k, None), path + '/' + k, result):
return False
return True
elif isinstance(new, list):
if not isinstance(old, list) or len(new) != len(old):
result['compare'] = 'changed [' + path + '] length is different or null'
return False
if isinstance(old[0], dict):
key = None
if 'id' in old[0] and 'id' in new[0]:
key = 'id'
elif 'name' in old[0] and 'name' in new[0]:
key = 'name'
else:
key = list(old[0])[0]
new = sorted(new, key=lambda x: x.get(key, None))
old = sorted(old, key=lambda x: x.get(key, None))
else:
new = sorted(new)
old = sorted(old)
for i in range(len(new)):
if not default_compare(new[i], old[i], path + '/*', result):
return False
return True
else:
if path == '/location':
new = new.replace(' ', '').lower()
old = new.replace(' ', '').lower()
if new == old:
return True
else:
result['compare'] = 'changed [' + path + '] ' + str(new) + ' != ' + str(old)
return False
def main():
"""Main execution"""
AzureRMSchedule()
if __name__ == '__main__':
main()
| gpl-3.0 |
jalavik/inspire-next | inspire/modules/forms/form.py | 3 | 2613 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from flask.ext.wtf import Form
CFG_GROUPS_META = {
'classes': None,
'indication': None,
'description': None
}
class InspireForm(Form):
"""Generic Form class to be used in INSPIRE forms. """
def __init__(self, *args, **kwargs):
super(InspireForm, self).__init__(*args, **kwargs)
def get_groups(self):
"""Get a list of the (group metadata, list of fields)-tuples.
The last element of the list has no group metadata (i.e. None),
and contains the list of fields not assigned to any group.
"""
fields_included = set()
field_groups = []
if hasattr(self, 'groups'):
for group in self.groups:
group_obj = {
'name': group[0],
'meta': CFG_GROUPS_META.copy(),
}
fields = []
for field_name in group[1]:
if field_name in ['-', ]:
fields.append(field_name)
else:
try:
fields.append(self[field_name])
fields_included.add(field_name)
except KeyError:
pass
if len(group) == 3:
group_obj['meta'].update(group[2])
field_groups.append((group_obj, fields))
# Append missing fields not defined in groups
rest_fields = []
for field in self:
if field.name not in fields_included:
rest_fields.append(field)
if rest_fields:
field_groups.append((None, rest_fields))
return field_groups
| gpl-2.0 |
h3biomed/ansible | test/units/modules/network/onyx/test_onyx_facts.py | 68 | 2401 | #
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from units.compat.mock import patch
from units.modules.utils import set_module_args
from .onyx_module import TestOnyxModule, load_fixture
from ansible.modules.network.onyx import onyx_facts
class TestOnyxFacts(TestOnyxModule):
module = onyx_facts
def setUp(self):
super(TestOnyxFacts, self).setUp()
self.mock_run_command = patch.object(
onyx_facts.FactsBase, "_show_cmd")
self.run_command = self.mock_run_command.start()
def tearDown(self):
super(TestOnyxFacts, self).tearDown()
self.mock_run_command.stop()
def load_fixtures(self, commands=None, transport=None):
def load_from_file(*args, **kwargs):
command = args[0]
filename = "onyx_facts_%s.cfg" % command
filename = filename.replace(' ', '_')
filename = filename.replace('/', '7')
output = load_fixture(filename)
return output
self.run_command.side_effect = load_from_file
def test_onyx_facts_version(self):
set_module_args(dict(gather_subset='version'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 2)
version = facts['ansible_net_version']
self.assertEqual(version['Product name'], 'MLNX-OS')
def test_onyx_facts_modules(self):
set_module_args(dict(gather_subset='modules'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 2)
modules = facts['ansible_net_modules']
self.assertIn("MGMT", modules)
def test_onyx_facts_interfaces(self):
set_module_args(dict(gather_subset='interfaces'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 2)
interfaces = facts['ansible_net_interfaces']
self.assertEqual(len(interfaces), 2)
def test_onyx_facts_all(self):
set_module_args(dict(gather_subset='all'))
result = self.execute_module()
facts = result.get('ansible_facts')
self.assertEqual(len(facts), 4)
| gpl-3.0 |
jmckaskill/subversion | tools/examples/check-modified.py | 7 | 1678 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# USAGE: check-modified.py FILE_OR_DIR1 FILE_OR_DIR2 ...
#
# prints out the URL associated with each item
#
import sys
import os
import os.path
import svn.core
import svn.client
import svn.wc
FORCE_COMPARISON = 0
def usage():
print("Usage: " + sys.argv[0] + " FILE_OR_DIR1 FILE_OR_DIR2\n")
sys.exit(0)
def run(files):
for f in files:
dirpath = fullpath = os.path.abspath(f)
if not os.path.isdir(dirpath):
dirpath = os.path.dirname(dirpath)
adm_baton = svn.wc.adm_open(None, dirpath, False, True)
try:
entry = svn.wc.entry(fullpath, adm_baton, 0)
if svn.wc.text_modified_p(fullpath, FORCE_COMPARISON,
adm_baton):
print("M %s" % f)
else:
print(" %s" % f)
except:
print("? %s" % f)
svn.wc.adm_close(adm_baton)
if __name__ == '__main__':
run(sys.argv[1:])
| apache-2.0 |
alu042/edx-platform | lms/djangoapps/verify_student/services.py | 46 | 4978 | """
Implementation of "reverification" service to communicate with Reverification XBlock
"""
import logging
from django.core.exceptions import ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from opaque_keys.edx.keys import CourseKey
from student.models import User, CourseEnrollment
from lms.djangoapps.verify_student.models import VerificationCheckpoint, VerificationStatus, SkippedReverification
log = logging.getLogger(__name__)
class ReverificationService(object):
"""
Reverification XBlock service
"""
SKIPPED_STATUS = "skipped"
NON_VERIFIED_TRACK = "not-verified"
def get_status(self, user_id, course_id, related_assessment_location):
"""Get verification attempt status against a user for a given
'checkpoint' and 'course_id'.
Args:
user_id (str): User Id string
course_id (str): A string of course id
related_assessment_location (str): Location of Reverification XBlock
Returns: str or None
"""
user = User.objects.get(id=user_id)
course_key = CourseKey.from_string(course_id)
if not CourseEnrollment.is_enrolled_as_verified(user, course_key):
return self.NON_VERIFIED_TRACK
elif SkippedReverification.check_user_skipped_reverification_exists(user_id, course_key):
return self.SKIPPED_STATUS
try:
checkpoint_status = VerificationStatus.objects.filter(
user_id=user_id,
checkpoint__course_id=course_key,
checkpoint__checkpoint_location=related_assessment_location
).latest()
return checkpoint_status.status
except ObjectDoesNotExist:
return None
def start_verification(self, course_id, related_assessment_location):
"""Create re-verification link against a verification checkpoint.
Args:
course_id(str): A string of course id
related_assessment_location(str): Location of Reverification XBlock
Returns:
Re-verification link
"""
course_key = CourseKey.from_string(course_id)
# Get-or-create the verification checkpoint
VerificationCheckpoint.get_or_create_verification_checkpoint(course_key, related_assessment_location)
re_verification_link = reverse(
'verify_student_incourse_reverify',
args=(
unicode(course_key),
unicode(related_assessment_location)
)
)
return re_verification_link
def skip_verification(self, user_id, course_id, related_assessment_location):
"""Add skipped verification attempt entry for a user against a given
'checkpoint'.
Args:
user_id(str): User Id string
course_id(str): A string of course_id
related_assessment_location(str): Location of Reverification XBlock
Returns:
None
"""
course_key = CourseKey.from_string(course_id)
checkpoint = VerificationCheckpoint.objects.get(
course_id=course_key,
checkpoint_location=related_assessment_location
)
user = User.objects.get(id=user_id)
# user can skip a reverification attempt only if that user has not already
# skipped an attempt
try:
SkippedReverification.add_skipped_reverification_attempt(checkpoint, user_id, course_key)
except IntegrityError:
log.exception("Skipped attempt already exists for user %s: with course %s:", user_id, unicode(course_id))
return
try:
# Avoid circular import
from openedx.core.djangoapps.credit.api import set_credit_requirement_status
# As a user skips the reverification it declines to fulfill the requirement so
# requirement sets to declined.
set_credit_requirement_status(
user.username,
course_key,
'reverification',
checkpoint.checkpoint_location,
status='declined'
)
except Exception as err: # pylint: disable=broad-except
log.error("Unable to add credit requirement status for user with id %d: %s", user_id, err)
def get_attempts(self, user_id, course_id, related_assessment_location):
"""Get re-verification attempts against a user for a given 'checkpoint'
and 'course_id'.
Args:
user_id(str): User Id string
course_id(str): A string of course id
related_assessment_location(str): Location of Reverification XBlock
Returns:
Number of re-verification attempts of a user
"""
course_key = CourseKey.from_string(course_id)
return VerificationStatus.get_user_attempts(user_id, course_key, related_assessment_location)
| agpl-3.0 |
zhangziang/django-allauth | allauth/socialaccount/providers/paypal/views.py | 60 | 1606 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .provider import PaypalProvider
class PaypalOAuth2Adapter(OAuth2Adapter):
provider_id = PaypalProvider.id
supports_state = False
@property
def authorize_url(self):
path = 'webapps/auth/protocol/openidconnect/v1/authorize'
return 'https://www.{0}/{1}'.format(self._get_endpoint(), path)
@property
def access_token_url(self):
path = "v1/identity/openidconnect/tokenservice"
return 'https://api.{0}/{1}'.format(self._get_endpoint(), path)
@property
def profile_url(self):
path = 'v1/identity/openidconnect/userinfo'
return 'https://api.{0}/{1}'.format(self._get_endpoint(), path)
def _get_endpoint(self):
settings = self.get_provider().get_settings()
if settings.get('MODE') == 'live':
return 'paypal.com'
else:
return 'sandbox.paypal.com'
def complete_login(self, request, app, token, **kwargs):
response = requests.post(self.profile_url,
params={'schema':'openid',
'access_token':token})
extra_data = response.json()
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth2_login = OAuth2LoginView.adapter_view(PaypalOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(PaypalOAuth2Adapter)
| mit |
overxfl0w/Doogle | indexer.py | 1 | 4570 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Authors: Jose & Alberto #
from glob import glob
from sys import argv
from re import sub,findall,match,compile,DOTALL
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import codecs
try: from cPickle import dump,HIGHEST_PROTOCOL
except: from pickle import dump,HIGHEST_PROTOCOL
STOPWORDS = [x for x in stopwords.words('spanish')]
STEMMER = SnowballStemmer("spanish")
# USAGE #
def usage(): print """Usage: python indexer.py directorio fichero_indice stopwords stemming\n
-> directorio: Indica el directorio de los documentos\n -> fichero_indice: Indica el fichero donde se salvara el indice
-> stopwords: Indica si se eliminaran stopwords [SI\NO]\n -> stemming: Indica si se realizara stemming [SI\NO]\n
-> Las opciones stopwords y stemming incrementaran el coste temporal de la indexacion\n
"""
# Elimina caracteres no alfanumericos #
def delete_non_alphanumeric(text): return sub(u"[^\wáéíóúñçàèìòù]",u" ",text)
# Extrae los terminos del texto, cortando por \t, \n y espacios "
def term_extractor(text,stopwords,stemming):
l = process_list(text.split(" "))
if stopwords == 1: l = remove_stopwords(l)
if stemming == 1: l = make_stemming(l,STEMMER)
return l
# Eliminar caracteres nulos y pasa a minusculas los terminos de la lista #
def process_list(l): return [term.lower() for term in l if term!=""]
# Extrae la lista de noticias para un documento dado #
def extract_notices(doc): return doc.split("<DOC>")
# Dada una noticia notice, extrae la informacion data requerida #
def extract_notice_data(notice,data): return match(compile(".*<"+data+">(.*)</"+data+">.*",DOTALL),notice).group(1)
# Anyade cada termino term de la lista l al diccionario d dado y su identificador de noticia#
def add_to_dict(d,l,ident):
for term in l:
if term not in d: d[term] = [ident]
else: d[term].append(ident)
return d
# Eliminar stopwords #
def remove_stopwords(text, language='spanish'): return [w for w in text if w.lower() not in STOPWORDS]
# Hacer stemming a cada palabra de la lista l #
def make_stemming(text,stemmer): return [stemmer.stem(word) for word in text]
# Guarda un objeto serializado en el fichero dest #
def save_object(object,dest):
with open(dest,'wb') as fd: dump(object,fd,HIGHEST_PROTOCOL)
def indexer(path,index_file,stopwords,stemming):
doc_hash = {}
notice_hash = {}
notice_terms_index = {}
notice_text_index = {}
notice_title_index = {}
notice_date_index = {}
notice_category_index = {}
docs = glob(path+"/*.sgml")
docid = 0
posid = 0
stopwords = 1 if stopwords == "si" else 0
stemming = 1 if stemming == "si" else 0
for doc_file in docs:
with codecs.open(doc_file,"r",encoding="utf-8") as fd: doc = fd.read()
doc_hash[docid] = doc_file ;
notices = extract_notices(doc)[1:]
for notice in notices:
notice_title = extract_notice_data(notice,"TITLE")
notice_date = extract_notice_data(notice,"DATE")
notice_category = extract_notice_data(notice,"CATEGORY")
notice_text = extract_notice_data(notice,"TEXT")
notice_text_terms = list(set(term_extractor(delete_non_alphanumeric(notice_text.lower()),stopwords,stemming)))
notice_title_terms = list(set(term_extractor(delete_non_alphanumeric(notice_title.lower()),stopwords,stemming)))
notice_date_terms = list(set(term_extractor(delete_non_alphanumeric(notice_date.lower()),stopwords,stemming)))
notice_category_terms = list(set(term_extractor(delete_non_alphanumeric(notice_category.lower()),stopwords,stemming)))
notice_text_index[(docid,posid)] = notice_text.lower()
notice_terms_index = add_to_dict(notice_terms_index,notice_text_terms,(docid,posid))
notice_title_index = add_to_dict(notice_title_index,notice_title_terms,(docid,posid))
notice_date_index = add_to_dict(notice_date_index,notice_date_terms,(docid,posid))
notice_category_index = add_to_dict(notice_category_index,notice_category_terms,(docid,posid))
notice_hash = add_to_dict(notice_hash,[posid],(docid,notice_title,notice_date,notice_category))
posid += 1
docid += 1
save_object((doc_hash,notice_terms_index,notice_title_index,notice_date_index,notice_category_index,notice_hash,notice_text_index),index_file)
# Entry point #
if __name__ == "__main__":
if len(argv)<5 or argv[3].lower() not in ["si","no"] or argv[4].lower() not in ["si","no"]: usage(); exit()
indexer(argv[1],argv[2],argv[3].lower(),argv[4].lower())
| gpl-2.0 |
duointeractive/django-fabtastic | fabtastic/fabric/commands/c_common.py | 1 | 3745 | import os
import sys
from fabric.api import *
from fabtastic import db
from fabtastic.fabric.util import _current_host_has_role
def get_remote_db(roles='webapp_servers'):
"""
Retrieves a remote DB dump and dumps it in your project's root directory.
"""
if _current_host_has_role(roles):
dump_filename = db.util.get_db_dump_filename()
dump_path = os.path.join(env.REMOTE_CODEBASE_PATH, dump_filename)
with cd(env.REMOTE_CODEBASE_PATH):
run("workon %s && ./manage.py ft_dump_db %s" % (
env.REMOTE_VIRTUALENV_NAME,
dump_filename))
get(dump_path, dump_filename)
run("rm %s" % dump_filename)
# In a multi-host environment, target hostname is appended by Fabric.
# TODO: Make this use Fabric 1.0's improved get() when it's released.
new_filename = '%s.%s' % (dump_filename, env['host'])
# Move it back to what it should be.
local('mv %s %s' % (new_filename, dump_filename))
# Die after this to prevent executing this with more hosts.
sys.exit(0)
def sync_to_remote_db(roles='webapp_servers'):
"""
Retrieves a remote DB dump, wipes your local DB, and installs the
remote copy in place.
"""
if _current_host_has_role(roles):
dump_filename = db.util.get_db_dump_filename()
dump_path = os.path.join(env.REMOTE_CODEBASE_PATH, dump_filename)
with cd(env.REMOTE_CODEBASE_PATH):
run("workon %s && ./manage.py ft_dump_db %s" % (
env.REMOTE_VIRTUALENV_NAME,
dump_filename))
get(dump_path, dump_filename)
run("rm %s" % dump_filename)
# In a multi-host environment, target hostname is appended by Fabric.
# TODO: Make this use Fabric 1.0's improved get() when it's released.
filename_with_hostname = '%s.%s' % (dump_filename, env['host'])
if os.path.exists(filename_with_hostname):
# Move it back to what it should be.
local('mv %s %s' % (filename_with_hostname, dump_filename))
local('./manage.py ft_restore_db %s' % dump_filename, capture=False)
local('rm %s' % dump_filename)
# Die after this to prevent executing this with more hosts.
sys.exit(0)
def flush_cache(roles=['webapp_servers', 'celery_servers']):
"""
Flushes the cache.
"""
if _current_host_has_role(roles):
print("=== FLUSHING CACHE ===")
with cd(env.REMOTE_CODEBASE_PATH):
run("workon %s && ./manage.py ft_clear_cache" % env.REMOTE_VIRTUALENV_NAME)
def pip_update_reqs(roles=['webapp_servers', 'celery_servers']):
"""
Updates your virtualenv from requirements.txt.
"""
if _current_host_has_role(roles):
print("=== UPDATING REQUIREMENTS ===")
with cd(env.REMOTE_CODEBASE_PATH):
run("workon %s && ./manage.py ft_pip_update_reqs" % env.REMOTE_VIRTUALENV_NAME)
def fabtastic_update(roles=['webapp_servers', 'celery_servers']):
"""
Updates your copy of django-fabtastic from the git repository.
"""
if _current_host_has_role(roles):
print("=== UPDATING FABTASTIC ===")
with cd(env.REMOTE_CODEBASE_PATH):
run("workon %s && ./manage.py ft_fabtastic_update" % env.REMOTE_VIRTUALENV_NAME)
def collectstatic(roles='webapp_servers'):
"""
Syncs the checked out git media with S3.
"""
if _current_host_has_role(roles) and not env.already_media_synced:
print("=== SYNCING STATIC MEDIA WITH S3 ===")
with cd(env.REMOTE_CODEBASE_PATH):
run("workon %s && ./manage.py collectstatic --noinput" % env.REMOTE_VIRTUALENV_NAME)
env.already_media_synced = True | bsd-3-clause |
mattrobenolt/django | tests/template_tests/filter_tests/test_length_is.py | 360 | 3204 | from django.template.defaultfilters import length_is
from django.test import SimpleTestCase
from ..utils import setup
class LengthIsTests(SimpleTestCase):
@setup({'length_is01': '{% if some_list|length_is:"4" %}Four{% endif %}'})
def test_length_is01(self):
output = self.engine.render_to_string('length_is01', {'some_list': ['4', None, True, {}]})
self.assertEqual(output, 'Four')
@setup({'length_is02': '{% if some_list|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is02(self):
output = self.engine.render_to_string('length_is02', {'some_list': ['4', None, True, {}, 17]})
self.assertEqual(output, 'Not Four')
@setup({'length_is03': '{% if mystring|length_is:"4" %}Four{% endif %}'})
def test_length_is03(self):
output = self.engine.render_to_string('length_is03', {'mystring': 'word'})
self.assertEqual(output, 'Four')
@setup({'length_is04': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is04(self):
output = self.engine.render_to_string('length_is04', {'mystring': 'Python'})
self.assertEqual(output, 'Not Four')
@setup({'length_is05': '{% if mystring|length_is:"4" %}Four{% else %}Not Four{% endif %}'})
def test_length_is05(self):
output = self.engine.render_to_string('length_is05', {'mystring': ''})
self.assertEqual(output, 'Not Four')
@setup({'length_is06': '{% with var|length as my_length %}{{ my_length }}{% endwith %}'})
def test_length_is06(self):
output = self.engine.render_to_string('length_is06', {'var': 'django'})
self.assertEqual(output, '6')
# Boolean return value from length_is should not be coerced to a string
@setup({'length_is07': '{% if "X"|length_is:0 %}Length is 0{% else %}Length not 0{% endif %}'})
def test_length_is07(self):
output = self.engine.render_to_string('length_is07', {})
self.assertEqual(output, 'Length not 0')
@setup({'length_is08': '{% if "X"|length_is:1 %}Length is 1{% else %}Length not 1{% endif %}'})
def test_length_is08(self):
output = self.engine.render_to_string('length_is08', {})
self.assertEqual(output, 'Length is 1')
# Invalid uses that should fail silently.
@setup({'length_is09': '{{ var|length_is:"fish" }}'})
def test_length_is09(self):
output = self.engine.render_to_string('length_is09', {'var': 'django'})
self.assertEqual(output, '')
@setup({'length_is10': '{{ int|length_is:"1" }}'})
def test_length_is10(self):
output = self.engine.render_to_string('length_is10', {'int': 7})
self.assertEqual(output, '')
@setup({'length_is11': '{{ none|length_is:"1" }}'})
def test_length_is11(self):
output = self.engine.render_to_string('length_is11', {'none': None})
self.assertEqual(output, '')
class FunctionTests(SimpleTestCase):
def test_empty_list(self):
self.assertEqual(length_is([], 0), True)
self.assertEqual(length_is([], 1), False)
def test_string(self):
self.assertEqual(length_is('a', 1), True)
self.assertEqual(length_is('a', 10), False)
| bsd-3-clause |
40223142/2015cad0623 | static/Brython3.1.0-20150301-090019/Lib/html/parser.py | 737 | 19605 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import _markupbase
import re
import warnings
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
# Note:
# 1) the strict attrfind isn't really strict, but we can't make it
# correctly strict without breaking backward compatibility;
# 2) if you change attrfind remember to update locatestarttagend too;
# 3) if you change attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, strict=False):
"""Initialize and reset this instance.
If strict is set to False (the default) the parser will parse invalid
markup, otherwise it will raise an error. Note that the strict mode
is deprecated.
"""
if strict:
warnings.warn("The strict mode is deprecated.",
DeprecationWarning, stacklevel=2)
self.strict = strict
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
if self.strict:
k = self.parse_declaration(i)
else:
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
if self.strict:
self.error("EOF in middle of construct")
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
if self.strict:
self.error("EOF in middle of entity or char ref")
else:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
if self.strict:
m = locatestarttagend.match(rawdata, i)
else:
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if self.strict:
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if self.strict:
self.updatepos(i, j)
self.error("malformed start tag")
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
if self.strict:
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
if self.strict:
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:].rstrip(';'), 16)
else:
c = int(s.rstrip(';'))
return chr(c)
except ValueError:
return '&#' + s
else:
from html.entities import html5
if s in html5:
return html5[s]
elif s.endswith(';'):
return '&' + s
for x in range(2, len(s)):
if s[:x] in html5:
return html5[s[:x]] + s[x:]
else:
return '&' + s
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
replaceEntities, s, flags=re.ASCII)
| gpl-3.0 |
quantumgraph/mongo-query-aggregator | tests/test_upsert.py | 1 | 3413 | import unittest
import time
from pymongo import MongoClient
from moquag import MongoQueryAggregator
from time import sleep
from .settings import MONGO_DB_SETTINGS, logger
from collections import Counter
class TestBulk(unittest.TestCase):
def setUp(self):
self.conn = MongoClient(**MONGO_DB_SETTINGS)
self.conn.drop_database('testdb1')
self.conn.drop_database('testdb2')
self.conn.drop_database('testdb3')
def test_1(self):
'''updating one doc waiting for 0.1 sec flushing it and checking data'''
mongo_agg = MongoQueryAggregator(MONGO_DB_SETTINGS, 0.1, 10)
docs = [
{'name': 'User1', 'id': 1},
{'name': 'User2', 'id': 2}
]
self.conn['testdb1'].profiles.insert(docs)
mongo_agg.testdb1.profiles.find({'id': 1}).upsert().update({'$set': {'id2': 1}})
time.sleep(0.1)
mongo_agg.testdb1.profiles.find({'id': 2}).upsert().update({'$set': {'id2': 2}})
self.assertEqual(self.conn['testdb1'].profiles.count(), 2)
self.assertEqual(self.conn['testdb1'].profiles.count({'id2': 1}), 1)
for doc in self.conn['testdb1'].profiles.find({'id': 1}, {'id2': 1, '_id': 0}):
self.assertEqual(doc['id2'], 1)
def test_2(self):
'''inserting data using mongo_agg upsert'''
mongo_agg = MongoQueryAggregator(MONGO_DB_SETTINGS, 0.1, 5)
docs = [
{'id': 1}, {'id': 2}, {'id': 3}, {'id': 4}, {'id': 5}, {'id': 6}
]
for doc in docs:
mongo_agg.testdb1.profiles.find(doc).upsert().update({'$set': doc})
# while inserting 6 records, 6th record will flush first 5 to db
data = self.conn['testdb1'].profiles.find({}, {'_id': 0, 'id': 1}).sort([('id', 1)])
mongo_docs = []
self.assertEqual(data.count(), 5)
for doc in data:
mongo_docs.append(doc)
self.assertListEqual(mongo_docs, docs[:5]) # checking first five in docs
def test_3(self):
'''inserting 6 documents and updating it using uosert in 2 diff dbs
and checking values in both dbs'''
mongo_agg = MongoQueryAggregator(MONGO_DB_SETTINGS, 0.1, 5)
docs = [
{'id': 1}, {'id': 2}, {'id': 3}, {'id': 4}, {'id': 5}, {'id': 6}
]
self.conn['testdb1'].profiles.insert(docs[:3])
self.conn['testdb2'].profiles.insert(docs[3:])
mongo_agg.testdb1.profiles.find({}).upsert().update({'$set': {'status': 'updated'}})
mongo_agg.testdb2.profiles.find({}).upsert().update({'$set': {'status': 'updated'}})
time.sleep(0.1)
# query just to flush older queries
mongo_agg.testdb2.profiles.find({'css': 'sacas'}).update({'$set': docs[0]})
docs_in_db = []
aggregators_expected_results = {
('testdb1', 'profiles'): Counter({'nModified': 3, 'nMatched': 3}),
('testdb2', 'profiles'): Counter({'nModified': 3, 'nMatched': 3})
}
data = self.conn.testdb1.profiles.find()
output_data = []
for doc in data:
output_data.append(doc)
data = self.conn.testdb2.profiles.find()
for doc in data:
output_data.append(doc)
self.assertEqual(aggregators_expected_results, mongo_agg.get_results())
for doc in docs:
doc['status'] = 'updated'
self.assertListEqual(output_data, docs)
| mit |
ArcherSys/ASOSBLOCKLY | js/converse.js-0.8.3/docs/source/conf.py | 2 | 7768 | # -*- coding: utf-8 -*-
#
# Converse.js documentation build configuration file, created by
# sphinx-quickstart on Fri Apr 26 20:48:03 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.todo']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Converse.js'
copyright = u'2014, JC Brand'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.8.3'
# The full version, including alpha/beta/rc tags.
release = '0.8.3'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'nosidebar': True
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Conversejsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Conversejs.tex', u'Converse.js Documentation',
u'JC Brand', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'conversejs', u'Converse.js Documentation',
[u'JC Brand'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Conversejs', u'Converse.js Documentation',
u'JC Brand', 'Conversejs', 'Open Source XMPP webchat',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| gpl-3.0 |
jiajiechen/mxnet | python/mxnet/monitor.py | 46 | 5239 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=protected-access, logging-format-interpolation, invalid-name, no-member, too-many-branches
"""Monitor outputs, weights, and gradients for debugging."""
from __future__ import absolute_import
import re
import ctypes
import logging
from math import sqrt
from .ndarray import NDArray
from .base import NDArrayHandle, py_str
from . import ndarray
class Monitor(object):
"""Monitor outputs, weights, and gradients for debugging.
Parameters
----------
interval : int
Number of batches between printing.
stat_func : function
A function that computes statistics of tensors.
Takes an `NDArray` and returns an `NDArray`. Defaults to mean
absolute value |x|/size(x).
pattern : str
A regular expression specifying which tensors to monitor.
Only tensors with names that match `name_pattern` will be included.
For example, '.*weight|.*output' will print all weights and outputs and
'.*backward.*' will print all gradients.
"""
def __init__(self, interval, stat_func=None, pattern='.*', sort=False):
if stat_func is None:
def asum_stat(x):
"""returns |x|/size(x), async execution."""
return ndarray.norm(x)/sqrt(x.size)
stat_func = asum_stat
self.stat_func = stat_func
self.interval = interval
self.activated = False
self.queue = []
self.step = 0
self.exes = []
self.re_prog = re.compile(pattern)
self.sort = sort
def stat_helper(name, array):
"""wrapper for executor callback"""
array = ctypes.cast(array, NDArrayHandle)
array = NDArray(array, writable=False)
if not self.activated or not self.re_prog.match(py_str(name)):
return
self.queue.append((self.step, py_str(name), self.stat_func(array)))
self.stat_helper = stat_helper
def install(self, exe):
"""install callback to executor.
Supports installing to multiple exes.
Parameters
----------
exe : mx.executor.Executor
The Executor (returned by symbol.bind) to install to.
"""
exe.set_monitor_callback(self.stat_helper)
self.exes.append(exe)
def tic(self):
"""Start collecting stats for current batch.
Call before calling forward."""
if self.step % self.interval == 0:
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
self.queue = []
self.activated = True
self.step += 1
def toc(self):
"""End collecting for current batch and return results.
Call after computation of current batch.
Returns
-------
res : list of """
if not self.activated:
return []
for exe in self.exes:
for array in exe.arg_arrays:
array.wait_to_read()
for array in exe.aux_arrays:
array.wait_to_read()
for exe in self.exes:
for name, array in zip(exe._symbol.list_arguments(), exe.arg_arrays):
if self.re_prog.match(name):
self.queue.append((self.step, name, self.stat_func(array)))
for name, array in zip(exe._symbol.list_auxiliary_states(), exe.aux_arrays):
if self.re_prog.match(name):
self.queue.append((self.step, name, self.stat_func(array)))
self.activated = False
res = []
if self.sort:
self.queue.sort(key=lambda x: x[1])
for n, k, v_list in self.queue:
if isinstance(v_list, NDArray):
v_list = [v_list]
assert isinstance(v_list, list)
s = ''
for v in v_list:
assert isinstance(v, NDArray)
if v.shape == (1,):
s += str(v.asscalar()) + '\t'
else:
s += str(v.asnumpy()) + '\t'
res.append((n, k, s))
self.queue = []
return res
def toc_print(self):
"""End collecting and print results."""
res = self.toc()
for n, k, v in res:
logging.info('Batch: {:7d} {:30s} {:s}'.format(n, k, v))
| apache-2.0 |
ritchyteam/odoo | addons/hw_scanner/controllers/main.py | 77 | 7438 | # -*- coding: utf-8 -*-
import logging
import os
import time
from os import listdir
from os.path import join
from threading import Thread, Lock
from select import select
from Queue import Queue, Empty
import openerp
import openerp.addons.hw_proxy.controllers.main as hw_proxy
from openerp import http
from openerp.http import request
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
try:
import evdev
except ImportError:
_logger.error('Odoo module hw_scanner depends on the evdev python module')
evdev = None
class Scanner(Thread):
def __init__(self):
Thread.__init__(self)
self.lock = Lock()
self.status = {'status':'connecting', 'messages':[]}
self.input_dir = '/dev/input/by-id/'
self.barcodes = Queue()
self.keymap = {
2: ("1","!"),
3: ("2","@"),
4: ("3","#"),
5: ("4","$"),
6: ("5","%"),
7: ("6","^"),
8: ("7","&"),
9: ("8","*"),
10:("9","("),
11:("0",")"),
12:("-","_"),
13:("=","+"),
# 14 BACKSPACE
# 15 TAB
16:("q","Q"),
17:("w","W"),
18:("e","E"),
19:("r","R"),
20:("t","T"),
21:("y","Y"),
22:("u","U"),
23:("i","I"),
24:("o","O"),
25:("p","P"),
26:("[","{"),
27:("]","}"),
# 28 ENTER
# 29 LEFT_CTRL
30:("a","A"),
31:("s","S"),
32:("d","D"),
33:("f","F"),
34:("g","G"),
35:("h","H"),
36:("j","J"),
37:("k","K"),
38:("l","L"),
39:(";",":"),
40:("'","\""),
41:("`","~"),
# 42 LEFT SHIFT
43:("\\","|"),
44:("z","Z"),
45:("x","X"),
46:("c","C"),
47:("v","V"),
48:("b","B"),
49:("n","N"),
50:("m","M"),
51:(",","<"),
52:(".",">"),
53:("/","?"),
# 54 RIGHT SHIFT
57:(" "," "),
}
def lockedstart(self):
with self.lock:
if not self.isAlive():
self.daemon = True
self.start()
def set_status(self, status, message = None):
if status == self.status['status']:
if message != None and message != self.status['messages'][-1]:
self.status['messages'].append(message)
else:
self.status['status'] = status
if message:
self.status['messages'] = [message]
else:
self.status['messages'] = []
if status == 'error' and message:
_logger.error('Barcode Scanner Error: '+message)
elif status == 'disconnected' and message:
_logger.warning('Disconnected Barcode Scanner: '+message)
def get_device(self):
try:
if not evdev:
return None
devices = [ device for device in listdir(self.input_dir)]
keyboards = [ device for device in devices if ('kbd' in device) and ('keyboard' not in device.lower())]
scanners = [ device for device in devices if ('barcode' in device.lower()) or ('scanner' in device.lower())]
if len(scanners) > 0:
self.set_status('connected','Connected to '+scanners[0])
return evdev.InputDevice(join(self.input_dir,scanners[0]))
elif len(keyboards) > 0:
self.set_status('connected','Connected to '+keyboards[0])
return evdev.InputDevice(join(self.input_dir,keyboards[0]))
else:
self.set_status('disconnected','Barcode Scanner Not Found')
return None
except Exception as e:
self.set_status('error',str(e))
return None
def get_barcode(self):
""" Returns a scanned barcode. Will wait at most 5 seconds to get a barcode, and will
return barcode scanned in the past if they are not older than 5 seconds and have not
been returned before. This is necessary to catch barcodes scanned while the POS is
busy reading another barcode
"""
self.lockedstart()
while True:
try:
timestamp, barcode = self.barcodes.get(True, 5)
if timestamp > time.time() - 5:
return barcode
except Empty:
return ''
def get_status(self):
self.lockedstart()
return self.status
def run(self):
""" This will start a loop that catches all keyboard events, parse barcode
sequences and put them on a timestamped queue that can be consumed by
the point of sale's requests for barcode events
"""
self.barcodes = Queue()
barcode = []
shift = False
device = None
while True: # barcodes loop
if device: # ungrab device between barcodes and timeouts for plug & play
try:
device.ungrab()
except Exception as e:
self.set_status('error',str(e))
device = self.get_device()
if not device:
time.sleep(5) # wait until a suitable device is plugged
else:
try:
device.grab()
shift = False
barcode = []
while True: # keycode loop
r,w,x = select([device],[],[],5)
if len(r) == 0: # timeout
break
events = device.read()
for event in events:
if event.type == evdev.ecodes.EV_KEY:
#_logger.debug('Evdev Keyboard event %s',evdev.categorize(event))
if event.value == 1: # keydown events
if event.code in self.keymap:
if shift:
barcode.append(self.keymap[event.code][1])
else:
barcode.append(self.keymap[event.code][0])
elif event.code == 42 or event.code == 54: # SHIFT
shift = True
elif event.code == 28: # ENTER, end of barcode
self.barcodes.put( (time.time(),''.join(barcode)) )
barcode = []
elif event.value == 0: #keyup events
if event.code == 42 or event.code == 54: # LEFT SHIFT
shift = False
except Exception as e:
self.set_status('error',str(e))
s = Scanner()
hw_proxy.drivers['scanner'] = s
class ScannerDriver(hw_proxy.Proxy):
@http.route('/hw_proxy/scanner', type='json', auth='none', cors='*')
def scanner(self):
return s.get_barcode()
| agpl-3.0 |
annarev/tensorflow | tensorflow/lite/schema/upgrade_schema.py | 23 | 12976 | # ==============================================================================
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Upgrade script to move from pre-release schema to new schema.
Usage examples:
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.bin out.json
bazel run tensorflow/lite/schema/upgrade_schema -- in.json out.bin
bazel run tensorflow/lite/schema/upgrade_schema -- in.tflite out.tflite
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import contextlib
import json
import os
import shutil
import subprocess
import sys
import tempfile
import tensorflow as tf
from tensorflow.python.platform import resource_loader
parser = argparse.ArgumentParser(
description="Script to move TFLite models from pre-release schema to "
"new schema.")
parser.add_argument(
"input",
type=str,
help="Input TensorFlow lite file in `.json`, `.bin` or `.tflite` format.")
parser.add_argument(
"output",
type=str,
help="Output json or bin TensorFlow lite model compliant with "
"the new schema. Extension must be `.json`, `.bin` or `.tflite`.")
# RAII Temporary Directory, because flatc doesn't allow direct use of tempfiles.
@contextlib.contextmanager
def TemporaryDirectoryResource():
temporary = tempfile.mkdtemp()
try:
yield temporary
finally:
shutil.rmtree(temporary)
class Converter(object):
"""Converts TensorFlow flatbuffer models from old to new version of schema.
This can convert between any version to the latest version. It uses
an incremental upgrade strategy to go from version to version.
Usage:
converter = Converter()
converter.Convert("a.tflite", "a.json")
converter.Convert("b.json", "b.tflite")
"""
def __init__(self):
# TODO(aselle): make this work in the open source version with better
# path.
paths_to_try = [
"../../../../flatbuffers/flatc", # not bazel
"../../../../external/flatbuffers/flatc" # bazel
]
for p in paths_to_try:
self._flatc_path = resource_loader.get_path_to_datafile(p)
if os.path.exists(self._flatc_path): break
def FindSchema(base_name):
return resource_loader.get_path_to_datafile("%s" % base_name)
# Supported schemas for upgrade.
self._schemas = [
(0, FindSchema("schema_v0.fbs"), True, self._Upgrade0To1),
(1, FindSchema("schema_v1.fbs"), True, self._Upgrade1To2),
(2, FindSchema("schema_v2.fbs"), True, self._Upgrade2To3),
(3, FindSchema("schema_v3.fbs"), False, None) # Non-callable by design.
]
# Ensure schemas are sorted, and extract latest version and upgrade
# dispatch function table.
self._schemas.sort()
self._new_version, self._new_schema = self._schemas[-1][:2]
self._upgrade_dispatch = {
version: dispatch
for version, unused1, unused2, dispatch in self._schemas}
def _Read(self, input_file, schema, raw_binary=False):
"""Read a tflite model assuming the given flatbuffer schema.
If `input_file` is in bin, then we must use flatc to convert the schema
from binary to json.
Args:
input_file: a binary (flatbuffer) or json file to read from. Extension
must be `.tflite`, `.bin`, or `.json` for FlatBuffer Binary or
FlatBuffer JSON.
schema: which schema to use for reading
raw_binary: whether to assume raw_binary (versions previous to v3)
that lacked file_identifier require this.
Raises:
RuntimeError: 1. When flatc cannot be invoked.
2. When json file does not exists.
ValueError: When the extension is not json or bin.
Returns:
A dictionary representing the read tflite model.
"""
raw_binary = ["--raw-binary"] if raw_binary else []
with TemporaryDirectoryResource() as tempdir:
basename = os.path.basename(input_file)
basename_no_extension, extension = os.path.splitext(basename)
if extension in [".bin", ".tflite"]:
# Convert to json using flatc
returncode = subprocess.call([
self._flatc_path,
"-t",
"--strict-json",
"--defaults-json",
] + raw_binary + ["-o", tempdir, schema, "--", input_file])
if returncode != 0:
raise RuntimeError("flatc failed to convert from binary to json.")
json_file = os.path.join(tempdir, basename_no_extension + ".json")
if not os.path.exists(json_file):
raise RuntimeError("Could not find %r" % json_file)
elif extension == ".json":
json_file = input_file
else:
raise ValueError("Invalid extension on input file %r" % input_file)
return json.load(open(json_file))
def _Write(self, data, output_file):
"""Output a json or bin version of the flatbuffer model.
Args:
data: Dict representing the TensorFlow Lite model to write.
output_file: filename to write the converted flatbuffer to. (json,
tflite, or bin extension is required).
Raises:
ValueError: When the extension is not json or bin
RuntimeError: When flatc fails to convert json data to binary.
"""
_, extension = os.path.splitext(output_file)
with TemporaryDirectoryResource() as tempdir:
if extension == ".json":
json.dump(data, open(output_file, "w"), sort_keys=True, indent=2)
elif extension in [".tflite", ".bin"]:
input_json = os.path.join(tempdir, "temp.json")
with open(input_json, "w") as fp:
json.dump(data, fp, sort_keys=True, indent=2)
returncode = subprocess.call([
self._flatc_path, "-b", "--defaults-json", "--strict-json", "-o",
tempdir, self._new_schema, input_json
])
if returncode != 0:
raise RuntimeError("flatc failed to convert upgraded json to binary.")
shutil.copy(os.path.join(tempdir, "temp.tflite"), output_file)
else:
raise ValueError("Invalid extension on output file %r" % output_file)
def _Upgrade0To1(self, data):
"""Upgrade data from Version 0 to Version 1.
Changes: Added subgraphs (which contains a subset of formally global
entries).
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
subgraph = {}
for key_to_promote in ["tensors", "operators", "inputs", "outputs"]:
subgraph[key_to_promote] = data[key_to_promote]
del data[key_to_promote]
data["subgraphs"] = [subgraph]
def _Upgrade1To2(self, data):
"""Upgrade data from Version 1 to Version 2.
Changes: Rename operators to Conform to NN API.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
Raises:
ValueError: Throws when model builtins are numeric rather than symbols.
"""
def RemapOperator(opcode_name):
"""Go from old schema op name to new schema op name.
Args:
opcode_name: String representing the ops (see :schema.fbs).
Returns:
Converted opcode_name from V1 to V2.
"""
old_name_to_new_name = {
"CONVOLUTION": "CONV_2D",
"DEPTHWISE_CONVOLUTION": "DEPTHWISE_CONV_2D",
"AVERAGE_POOL": "AVERAGE_POOL_2D",
"MAX_POOL": "MAX_POOL_2D",
"L2_POOL": "L2_POOL_2D",
"SIGMOID": "LOGISTIC",
"L2NORM": "L2_NORMALIZATION",
"LOCAL_RESPONSE_NORM": "LOCAL_RESPONSE_NORMALIZATION",
"Basic_RNN": "RNN",
}
return (old_name_to_new_name[opcode_name]
if opcode_name in old_name_to_new_name else opcode_name)
def RemapOperatorType(operator_type):
"""Remap operator structs from old names to new names.
Args:
operator_type: String representing the builtin operator data type
string.
(see :schema.fbs).
Raises:
ValueError: When the model has consistency problems.
Returns:
Upgraded builtin operator data type as a string.
"""
old_to_new = {
"PoolOptions": "Pool2DOptions",
"DepthwiseConvolutionOptions": "DepthwiseConv2DOptions",
"ConvolutionOptions": "Conv2DOptions",
"LocalResponseNormOptions": "LocalResponseNormalizationOptions",
"BasicRNNOptions": "RNNOptions",
}
return (old_to_new[operator_type]
if operator_type in old_to_new else operator_type)
for subgraph in data["subgraphs"]:
for ops in subgraph["operators"]:
ops["builtin_options_type"] = RemapOperatorType(
ops["builtin_options_type"])
# Upgrade the operator codes
for operator_code in data["operator_codes"]:
# Check if builtin_code is the appropriate string type
# use type("") instead of str or unicode. for py2and3
if not isinstance(operator_code["builtin_code"], type(u"")):
raise ValueError("builtin_code %r is non-string. this usually means "
"your model has consistency problems." %
(operator_code["builtin_code"]))
operator_code["builtin_code"] = (RemapOperator(
operator_code["builtin_code"]))
def _Upgrade2To3(self, data):
"""Upgrade data from Version 2 to Version 3.
Changed actual read-only tensor data to be in a buffers table instead
of inline with the tensor.
Args:
data: Dictionary representing the TensorFlow lite data to be upgraded.
This will be modified in-place to be an upgraded version.
"""
buffers = [{"data": []}] # Start with 1 empty buffer
for subgraph in data["subgraphs"]:
if "tensors" not in subgraph:
continue
for tensor in subgraph["tensors"]:
if "data_buffer" not in tensor:
tensor["buffer"] = 0
else:
if tensor["data_buffer"]:
tensor[u"buffer"] = len(buffers)
buffers.append({"data": tensor["data_buffer"]})
else:
tensor["buffer"] = 0
del tensor["data_buffer"]
data["buffers"] = buffers
def _PerformUpgrade(self, data):
"""Manipulate the `data` (parsed JSON) based on changes in format.
This incrementally will upgrade from version to version within data.
Args:
data: Dictionary representing the TensorFlow data. This will be upgraded
in place.
"""
while data["version"] < self._new_version:
self._upgrade_dispatch[data["version"]](data)
data["version"] += 1
def Convert(self, input_file, output_file):
"""Perform schema conversion from input_file to output_file.
Args:
input_file: Filename of TensorFlow Lite data to convert from. Must
be `.json` or `.bin` extension files for JSON or Binary forms of
the TensorFlow FlatBuffer schema.
output_file: Filename to write to. Extension also must be `.json`
or `.bin`.
Raises:
RuntimeError: Generated when none of the upgrader supported schemas
matche the `input_file` data.
"""
# Read data in each schema (since they are incompatible). Version is
# always present. Use the read data that matches the version of the
# schema.
for version, schema, raw_binary, _ in self._schemas:
try:
data_candidate = self._Read(input_file, schema, raw_binary)
except RuntimeError:
continue # Skip and hope another schema works
if "version" not in data_candidate: # Assume version 1 if not present.
data_candidate["version"] = 1
elif data_candidate["version"] == 0: # Version 0 doesn't exist in wild.
data_candidate["version"] = 1
if data_candidate["version"] == version:
self._PerformUpgrade(data_candidate)
self._Write(data_candidate, output_file)
return
raise RuntimeError("No schema that the converter understands worked with "
"the data file you provided.")
def main(argv):
del argv
Converter().Convert(FLAGS.input, FLAGS.output)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
tf.compat.v1.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
bdero/edx-platform | common/lib/xmodule/xmodule/tests/test_tabs.py | 21 | 25887 | """Tests for Tab classes"""
from mock import MagicMock
import xmodule.tabs as tabs
import unittest
from opaque_keys.edx.locations import SlashSeparatedCourseKey
class TabTestCase(unittest.TestCase):
"""Base class for Tab-related test cases."""
def setUp(self):
self.course = MagicMock()
self.course.id = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall')
self.fake_dict_tab = {'fake_key': 'fake_value'}
self.settings = MagicMock()
self.settings.FEATURES = {}
self.reverse = lambda name, args: "name/{0}/args/{1}".format(name, ",".join(str(a) for a in args))
self.books = None
def set_up_books(self, num_books):
"""Initializes the textbooks in the course and adds the given number of books to each textbook"""
self.books = [MagicMock() for _ in range(num_books)]
for book_index, book in enumerate(self.books):
book.title = 'Book{0}'.format(book_index)
self.course.textbooks = self.books
self.course.pdf_textbooks = self.books
self.course.html_textbooks = self.books
def check_tab(
self,
tab_class,
dict_tab,
expected_link,
expected_tab_id,
expected_name='same',
invalid_dict_tab=None,
):
"""
Helper method to verify a tab class.
'tab_class' is the class of the tab that is being tested
'dict_tab' is the raw dictionary value of the tab
'expected_link' is the expected value for the hyperlink of the tab
'expected_tab_id' is the expected value for the unique id of the tab
'expected_name' is the expected value for the name of the tab
'invalid_dict_tab' is an invalid dictionary value for the tab.
Can be 'None' if the given tab class does not have any keys to validate.
"""
# create tab
tab = tab_class(dict_tab)
# name is as expected
self.assertEqual(tab.name, expected_name)
# link is as expected
self.assertEqual(tab.link_func(self.course, self.reverse), expected_link)
# verify active page name
self.assertEqual(tab.tab_id, expected_tab_id)
# validate tab
self.assertTrue(tab.validate(dict_tab))
if invalid_dict_tab:
with self.assertRaises(tabs.InvalidTabsException):
tab.validate(invalid_dict_tab)
# check get and set methods
self.check_get_and_set_methods(tab)
# check to_json and from_json methods
self.check_tab_json_methods(tab)
# check equality methods
self.check_tab_equality(tab, dict_tab)
# return tab for any additional tests
return tab
def check_tab_equality(self, tab, dict_tab):
"""Tests the equality methods on the given tab"""
self.assertEquals(tab, dict_tab) # test __eq__
ne_dict_tab = dict_tab
ne_dict_tab['type'] = 'fake_type'
self.assertNotEquals(tab, ne_dict_tab) # test __ne__: incorrect type
self.assertNotEquals(tab, {'fake_key': 'fake_value'}) # test __ne__: missing type
def check_tab_json_methods(self, tab):
"""Tests the json from and to methods on the given tab"""
serialized_tab = tab.to_json()
deserialized_tab = tab.from_json(serialized_tab)
self.assertEquals(serialized_tab, deserialized_tab)
def check_can_display_results(self, tab, expected_value=True, for_authenticated_users_only=False, for_staff_only=False):
"""Checks can display results for various users"""
if for_staff_only:
self.assertEquals(
expected_value,
tab.can_display(self.course, self.settings, is_user_authenticated=False, is_user_staff=True)
)
if for_authenticated_users_only:
self.assertEquals(
expected_value,
tab.can_display(self.course, self.settings, is_user_authenticated=True, is_user_staff=False)
)
if not for_staff_only and not for_authenticated_users_only:
self.assertEquals(
expected_value,
tab.can_display(self.course, self.settings, is_user_authenticated=False, is_user_staff=False)
)
def check_get_and_set_methods(self, tab):
"""Test __getitem__ and __setitem__ calls"""
self.assertEquals(tab['type'], tab.type)
self.assertEquals(tab['tab_id'], tab.tab_id)
with self.assertRaises(KeyError):
_ = tab['invalid_key']
self.check_get_and_set_method_for_key(tab, 'name')
self.check_get_and_set_method_for_key(tab, 'tab_id')
with self.assertRaises(KeyError):
tab['invalid_key'] = 'New Value'
def check_get_and_set_method_for_key(self, tab, key):
"""Test __getitem__ and __setitem__ for the given key"""
old_value = tab[key]
new_value = 'New Value'
tab[key] = new_value
self.assertEquals(tab[key], new_value)
tab[key] = old_value
self.assertEquals(tab[key], old_value)
class ProgressTestCase(TabTestCase):
"""Test cases for Progress Tab."""
def check_progress_tab(self):
"""Helper function for verifying the progress tab."""
return self.check_tab(
tab_class=tabs.ProgressTab,
dict_tab={'type': tabs.ProgressTab.type, 'name': 'same'},
expected_link=self.reverse('progress', args=[self.course.id.to_deprecated_string()]),
expected_tab_id=tabs.ProgressTab.type,
invalid_dict_tab=None,
)
def test_progress(self):
self.course.hide_progress_tab = False
tab = self.check_progress_tab()
self.check_can_display_results(tab, for_authenticated_users_only=True)
self.course.hide_progress_tab = True
self.check_progress_tab()
self.check_can_display_results(tab, for_authenticated_users_only=True, expected_value=False)
class WikiTestCase(TabTestCase):
"""Test cases for Wiki Tab."""
def check_wiki_tab(self):
"""Helper function for verifying the wiki tab."""
return self.check_tab(
tab_class=tabs.WikiTab,
dict_tab={'type': tabs.WikiTab.type, 'name': 'same'},
expected_link=self.reverse('course_wiki', args=[self.course.id.to_deprecated_string()]),
expected_tab_id=tabs.WikiTab.type,
invalid_dict_tab=self.fake_dict_tab,
)
def test_wiki_enabled(self):
"""Test wiki tab when Enabled setting is True"""
self.settings.WIKI_ENABLED = True
tab = self.check_wiki_tab()
self.check_can_display_results(tab)
def test_wiki_enabled_false(self):
"""Test wiki tab when Enabled setting is False"""
self.settings.WIKI_ENABLED = False
tab = self.check_wiki_tab()
self.check_can_display_results(tab, expected_value=False)
def test_wiki_visibility(self):
"""Test toggling of visibility of wiki tab"""
wiki_tab = tabs.WikiTab()
self.assertTrue(wiki_tab.is_hideable)
wiki_tab.is_hidden = True
self.assertTrue(wiki_tab['is_hidden'])
self.check_tab_json_methods(wiki_tab)
self.check_tab_equality(wiki_tab, wiki_tab.to_json())
wiki_tab['is_hidden'] = False
self.assertFalse(wiki_tab.is_hidden)
class ExternalLinkTestCase(TabTestCase):
"""Test cases for External Link Tab."""
def test_external_link(self):
link_value = 'link_value'
tab = self.check_tab(
tab_class=tabs.ExternalLinkTab,
dict_tab={'type': tabs.ExternalLinkTab.type, 'name': 'same', 'link': link_value},
expected_link=link_value,
expected_tab_id=None,
invalid_dict_tab=self.fake_dict_tab,
)
self.check_can_display_results(tab)
self.check_get_and_set_method_for_key(tab, 'link')
class StaticTabTestCase(TabTestCase):
"""Test cases for Static Tab."""
def test_static_tab(self):
url_slug = 'schmug'
tab = self.check_tab(
tab_class=tabs.StaticTab,
dict_tab={'type': tabs.StaticTab.type, 'name': 'same', 'url_slug': url_slug},
expected_link=self.reverse('static_tab', args=[self.course.id.to_deprecated_string(), url_slug]),
expected_tab_id='static_tab_schmug',
invalid_dict_tab=self.fake_dict_tab,
)
self.check_can_display_results(tab)
self.check_get_and_set_method_for_key(tab, 'url_slug')
class TextbooksTestCase(TabTestCase):
"""Test cases for Textbook Tab."""
def setUp(self):
super(TextbooksTestCase, self).setUp()
self.set_up_books(2)
self.dict_tab = MagicMock()
self.course.tabs = [
tabs.CoursewareTab(),
tabs.CourseInfoTab(),
tabs.TextbookTabs(),
tabs.PDFTextbookTabs(),
tabs.HtmlTextbookTabs(),
]
self.num_textbook_tabs = sum(1 for tab in self.course.tabs if isinstance(tab, tabs.TextbookTabsBase))
self.num_textbooks = self.num_textbook_tabs * len(self.books)
def test_textbooks_enabled(self):
type_to_reverse_name = {'textbook': 'book', 'pdftextbook': 'pdf_book', 'htmltextbook': 'html_book'}
self.settings.FEATURES['ENABLE_TEXTBOOK'] = True
num_textbooks_found = 0
for tab in tabs.CourseTabList.iterate_displayable(self.course, self.settings):
# verify all textbook type tabs
if isinstance(tab, tabs.SingleTextbookTab):
book_type, book_index = tab.tab_id.split("/", 1)
expected_link = self.reverse(
type_to_reverse_name[book_type],
args=[self.course.id.to_deprecated_string(), book_index]
)
self.assertEqual(tab.link_func(self.course, self.reverse), expected_link)
self.assertTrue(tab.name.startswith('Book{0}'.format(book_index)))
num_textbooks_found = num_textbooks_found + 1
self.assertEquals(num_textbooks_found, self.num_textbooks)
def test_textbooks_disabled(self):
self.settings.FEATURES['ENABLE_TEXTBOOK'] = False
tab = tabs.TextbookTabs(self.dict_tab)
self.check_can_display_results(tab, for_authenticated_users_only=True, expected_value=False)
class GradingTestCase(TabTestCase):
"""Test cases for Grading related Tabs."""
def check_grading_tab(self, tab_class, name, link_value):
"""Helper function for verifying the grading tab."""
return self.check_tab(
tab_class=tab_class,
dict_tab={'type': tab_class.type, 'name': name},
expected_name=name,
expected_link=self.reverse(link_value, args=[self.course.id.to_deprecated_string()]),
expected_tab_id=tab_class.type,
invalid_dict_tab=None,
)
def test_grading_tabs(self):
peer_grading_tab = self.check_grading_tab(
tabs.PeerGradingTab,
'Peer grading',
'peer_grading'
)
self.check_can_display_results(peer_grading_tab, for_authenticated_users_only=True)
open_ended_grading_tab = self.check_grading_tab(
tabs.OpenEndedGradingTab,
'Open Ended Panel',
'open_ended_notifications'
)
self.check_can_display_results(open_ended_grading_tab, for_authenticated_users_only=True)
staff_grading_tab = self.check_grading_tab(
tabs.StaffGradingTab,
'Staff grading',
'staff_grading'
)
self.check_can_display_results(staff_grading_tab, for_staff_only=True)
class NotesTestCase(TabTestCase):
"""Test cases for Notes Tab."""
def check_notes_tab(self):
"""Helper function for verifying the notes tab."""
return self.check_tab(
tab_class=tabs.NotesTab,
dict_tab={'type': tabs.NotesTab.type, 'name': 'same'},
expected_link=self.reverse('notes', args=[self.course.id.to_deprecated_string()]),
expected_tab_id=tabs.NotesTab.type,
invalid_dict_tab=self.fake_dict_tab,
)
def test_notes_tabs_enabled(self):
self.settings.FEATURES['ENABLE_STUDENT_NOTES'] = True
tab = self.check_notes_tab()
self.check_can_display_results(tab, for_authenticated_users_only=True)
def test_notes_tabs_disabled(self):
self.settings.FEATURES['ENABLE_STUDENT_NOTES'] = False
tab = self.check_notes_tab()
self.check_can_display_results(tab, expected_value=False)
class SyllabusTestCase(TabTestCase):
"""Test cases for Syllabus Tab."""
def check_syllabus_tab(self, expected_can_display_value):
"""Helper function for verifying the syllabus tab."""
name = 'Syllabus'
tab = self.check_tab(
tab_class=tabs.SyllabusTab,
dict_tab={'type': tabs.SyllabusTab.type, 'name': name},
expected_name=name,
expected_link=self.reverse('syllabus', args=[self.course.id.to_deprecated_string()]),
expected_tab_id=tabs.SyllabusTab.type,
invalid_dict_tab=None,
)
self.check_can_display_results(tab, expected_value=expected_can_display_value)
def test_syllabus_tab_enabled(self):
self.course.syllabus_present = True
self.check_syllabus_tab(True)
def test_syllabus_tab_disabled(self):
self.course.syllabus_present = False
self.check_syllabus_tab(False)
class InstructorTestCase(TabTestCase):
"""Test cases for Instructor Tab."""
def test_instructor_tab(self):
name = 'Instructor'
tab = self.check_tab(
tab_class=tabs.InstructorTab,
dict_tab={'type': tabs.InstructorTab.type, 'name': name},
expected_name=name,
expected_link=self.reverse('instructor_dashboard', args=[self.course.id.to_deprecated_string()]),
expected_tab_id=tabs.InstructorTab.type,
invalid_dict_tab=None,
)
self.check_can_display_results(tab, for_staff_only=True)
class KeyCheckerTestCase(unittest.TestCase):
"""Test cases for KeyChecker class"""
def setUp(self):
self.valid_keys = ['a', 'b']
self.invalid_keys = ['a', 'v', 'g']
self.dict_value = {'a': 1, 'b': 2, 'c': 3}
def test_key_checker(self):
self.assertTrue(tabs.key_checker(self.valid_keys)(self.dict_value, raise_error=False))
self.assertFalse(tabs.key_checker(self.invalid_keys)(self.dict_value, raise_error=False))
with self.assertRaises(tabs.InvalidTabsException):
tabs.key_checker(self.invalid_keys)(self.dict_value)
class NeedNameTestCase(unittest.TestCase):
"""Test cases for NeedName validator"""
def setUp(self):
self.valid_dict1 = {'a': 1, 'name': 2}
self.valid_dict2 = {'name': 1}
self.valid_dict3 = {'a': 1, 'name': 2, 'b': 3}
self.invalid_dict = {'a': 1, 'b': 2}
def test_need_name(self):
self.assertTrue(tabs.need_name(self.valid_dict1))
self.assertTrue(tabs.need_name(self.valid_dict2))
self.assertTrue(tabs.need_name(self.valid_dict3))
with self.assertRaises(tabs.InvalidTabsException):
tabs.need_name(self.invalid_dict)
class TabListTestCase(TabTestCase):
"""Base class for Test cases involving tab lists."""
def setUp(self):
super(TabListTestCase, self).setUp()
# invalid tabs
self.invalid_tabs = [
# less than 2 tabs
[{'type': tabs.CoursewareTab.type}],
# missing course_info
[{'type': tabs.CoursewareTab.type}, {'type': tabs.DiscussionTab.type, 'name': 'fake_name'}],
# incorrect order
[{'type': tabs.CourseInfoTab.type, 'name': 'fake_name'}, {'type': tabs.CoursewareTab.type}],
# invalid type
[{'type': tabs.CoursewareTab.type}, {'type': tabs.CourseInfoTab.type, 'name': 'fake_name'}, {'type': 'fake_type'}],
]
# tab types that should appear only once
unique_tab_types = [
tabs.CourseInfoTab.type,
tabs.CoursewareTab.type,
tabs.NotesTab.type,
tabs.TextbookTabs.type,
tabs.PDFTextbookTabs.type,
tabs.HtmlTextbookTabs.type,
]
for unique_tab_type in unique_tab_types:
self.invalid_tabs.append([
{'type': tabs.CoursewareTab.type},
{'type': tabs.CourseInfoTab.type, 'name': 'fake_name'},
# add the unique tab multiple times
{'type': unique_tab_type},
{'type': unique_tab_type},
])
# valid tabs
self.valid_tabs = [
# empty list
[],
# all valid tabs
[
{'type': tabs.CoursewareTab.type},
{'type': tabs.CourseInfoTab.type, 'name': 'fake_name'},
{'type': tabs.WikiTab.type, 'name': 'fake_name'},
{'type': tabs.DiscussionTab.type, 'name': 'fake_name'},
{'type': tabs.ExternalLinkTab.type, 'name': 'fake_name', 'link': 'fake_link'},
{'type': tabs.TextbookTabs.type},
{'type': tabs.PDFTextbookTabs.type},
{'type': tabs.HtmlTextbookTabs.type},
{'type': tabs.ProgressTab.type, 'name': 'fake_name'},
{'type': tabs.StaticTab.type, 'name': 'fake_name', 'url_slug': 'schlug'},
{'type': tabs.PeerGradingTab.type},
{'type': tabs.StaffGradingTab.type},
{'type': tabs.OpenEndedGradingTab.type},
{'type': tabs.NotesTab.type, 'name': 'fake_name'},
{'type': tabs.SyllabusTab.type},
],
# with external discussion
[
{'type': tabs.CoursewareTab.type},
{'type': tabs.CourseInfoTab.type, 'name': 'fake_name'},
{'type': tabs.ExternalDiscussionTab.type, 'name': 'fake_name', 'link': 'fake_link'}
],
]
self.all_valid_tab_list = tabs.CourseTabList().from_json(self.valid_tabs[1])
class ValidateTabsTestCase(TabListTestCase):
"""Test cases for validating tabs."""
def test_validate_tabs(self):
tab_list = tabs.CourseTabList()
for invalid_tab_list in self.invalid_tabs:
with self.assertRaises(tabs.InvalidTabsException):
tab_list.from_json(invalid_tab_list)
for valid_tab_list in self.valid_tabs:
from_json_result = tab_list.from_json(valid_tab_list)
self.assertEquals(len(from_json_result), len(valid_tab_list))
class CourseTabListTestCase(TabListTestCase):
"""Testing the generator method for iterating through displayable tabs"""
def test_initialize_default_without_syllabus(self):
self.course.tabs = []
self.course.syllabus_present = False
tabs.CourseTabList.initialize_default(self.course)
self.assertTrue(tabs.SyllabusTab() not in self.course.tabs)
def test_initialize_default_with_syllabus(self):
self.course.tabs = []
self.course.syllabus_present = True
tabs.CourseTabList.initialize_default(self.course)
self.assertTrue(tabs.SyllabusTab() in self.course.tabs)
def test_initialize_default_with_external_link(self):
self.course.tabs = []
self.course.discussion_link = "other_discussion_link"
tabs.CourseTabList.initialize_default(self.course)
self.assertTrue(tabs.ExternalDiscussionTab(link_value="other_discussion_link") in self.course.tabs)
self.assertTrue(tabs.DiscussionTab() not in self.course.tabs)
def test_initialize_default_without_external_link(self):
self.course.tabs = []
self.course.discussion_link = ""
tabs.CourseTabList.initialize_default(self.course)
self.assertTrue(tabs.ExternalDiscussionTab() not in self.course.tabs)
self.assertTrue(tabs.DiscussionTab() in self.course.tabs)
def test_iterate_displayable(self):
# enable all tab types
self.settings.FEATURES['ENABLE_TEXTBOOK'] = True
self.settings.FEATURES['ENABLE_DISCUSSION_SERVICE'] = True
self.settings.FEATURES['ENABLE_STUDENT_NOTES'] = True
self.course.hide_progress_tab = False
# create 1 book per textbook type
self.set_up_books(1)
# initialize the course tabs to a list of all valid tabs
self.course.tabs = self.all_valid_tab_list
# enumerate the tabs using the CMS call
for i, tab in enumerate(tabs.CourseTabList.iterate_displayable_cms(
self.course,
self.settings,
)):
self.assertEquals(tab.type, self.course.tabs[i].type)
# enumerate the tabs and verify textbooks and the instructor tab
for i, tab in enumerate(tabs.CourseTabList.iterate_displayable(
self.course,
self.settings,
)):
if getattr(tab, 'is_collection_item', False):
# a collection item was found as a result of a collection tab
self.assertTrue(getattr(self.course.tabs[i], 'is_collection', False))
elif i == len(self.course.tabs):
# the last tab must be the Instructor tab
self.assertEquals(tab.type, tabs.InstructorTab.type)
else:
# all other tabs must match the expected type
self.assertEquals(tab.type, self.course.tabs[i].type)
# test including non-empty collections
self.assertIn(
tabs.HtmlTextbookTabs(),
list(tabs.CourseTabList.iterate_displayable_cms(self.course, self.settings)),
)
# test not including empty collections
self.course.html_textbooks = []
self.assertNotIn(
tabs.HtmlTextbookTabs(),
list(tabs.CourseTabList.iterate_displayable_cms(self.course, self.settings)),
)
def test_get_tab_by_methods(self):
"""Tests the get_tab methods in CourseTabList"""
self.course.tabs = self.all_valid_tab_list
for tab in self.course.tabs:
# get tab by type
self.assertEquals(tabs.CourseTabList.get_tab_by_type(self.course.tabs, tab.type), tab)
# get tab by id
self.assertEquals(tabs.CourseTabList.get_tab_by_id(self.course.tabs, tab.tab_id), tab)
class DiscussionLinkTestCase(TabTestCase):
"""Test cases for discussion link tab."""
def setUp(self):
super(DiscussionLinkTestCase, self).setUp()
self.tabs_with_discussion = [
tabs.CoursewareTab(),
tabs.CourseInfoTab(),
tabs.DiscussionTab(),
tabs.TextbookTabs(),
]
self.tabs_without_discussion = [
tabs.CoursewareTab(),
tabs.CourseInfoTab(),
tabs.TextbookTabs(),
]
@staticmethod
def _reverse(course):
"""Custom reverse function"""
def reverse_discussion_link(viewname, args):
"""reverse lookup for discussion link"""
if viewname == "django_comment_client.forum.views.forum_form_discussion" and args == [course.id.to_deprecated_string()]:
return "default_discussion_link"
return reverse_discussion_link
def check_discussion(self, tab_list, expected_discussion_link, expected_can_display_value, discussion_link_in_course=""):
"""Helper function to verify whether the discussion tab exists and can be displayed"""
self.course.tabs = tab_list
self.course.discussion_link = discussion_link_in_course
discussion = tabs.CourseTabList.get_discussion(self.course)
self.assertEquals(
(
discussion is not None and
discussion.can_display(self.course, self.settings, True, True) and
(discussion.link_func(self.course, self._reverse(self.course)) == expected_discussion_link)
),
expected_can_display_value
)
def test_explicit_discussion_link(self):
"""Test that setting discussion_link overrides everything else"""
self.settings.FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
self.check_discussion(
tab_list=self.tabs_with_discussion,
discussion_link_in_course="other_discussion_link",
expected_discussion_link="other_discussion_link",
expected_can_display_value=True,
)
def test_discussions_disabled(self):
"""Test that other cases return None with discussions disabled"""
self.settings.FEATURES['ENABLE_DISCUSSION_SERVICE'] = False
for tab_list in [[], self.tabs_with_discussion, self.tabs_without_discussion]:
self.check_discussion(
tab_list=tab_list,
expected_discussion_link=not None,
expected_can_display_value=False,
)
def test_tabs_with_discussion(self):
"""Test a course with a discussion tab configured"""
self.settings.FEATURES['ENABLE_DISCUSSION_SERVICE'] = True
self.check_discussion(
tab_list=self.tabs_with_discussion,
expected_discussion_link="default_discussion_link",
expected_can_display_value=True,
)
def test_tabs_without_discussion(self):
"""Test a course with tabs configured but without a discussion tab"""
self.settings.FEATURES['ENABLE_DISCUSSION_SERVICE'] = True
self.check_discussion(
tab_list=self.tabs_without_discussion,
expected_discussion_link=not None,
expected_can_display_value=False,
)
| agpl-3.0 |
abhitopia/tensorflow | tensorflow/python/estimator/inputs/queues/feeding_queue_runner.py | 123 | 6899 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A `QueueRunner` that takes a feed function as an argument."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.framework import errors
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner as qr
class _FeedingQueueRunner(qr.QueueRunner):
"""A queue runner that allows the feeding of values such as numpy arrays."""
def __init__(self, queue=None, enqueue_ops=None, close_op=None,
cancel_op=None, feed_fns=None,
queue_closed_exception_types=None):
"""Initialize the queue runner.
For further documentation, see `queue_runner.py`. Note that
`FeedingQueueRunner` does not support construction from protobuffer nor
serialization to protobuffer.
Args:
queue: A `Queue`.
enqueue_ops: List of enqueue ops to run in threads later.
close_op: Op to close the queue. Pending enqueue ops are preserved.
cancel_op: Op to close the queue and cancel pending enqueue ops.
feed_fns: a list of functions that return a dictionary mapping fed
`Tensor`s to values. Must be the same length as `enqueue_ops`.
queue_closed_exception_types: Optional tuple of Exception types that
indicate that the queue has been closed when raised during an enqueue
operation. Defaults to
`(tf.errors.OutOfRangeError, tf.errors.CancelledError)`.
Raises:
ValueError: `feed_fns` is not `None` and has different length than
`enqueue_ops`.
"""
if queue_closed_exception_types is None:
queue_closed_exception_types = (
errors.OutOfRangeError, errors.CancelledError)
super(_FeedingQueueRunner, self).__init__(
queue, enqueue_ops, close_op,
cancel_op, queue_closed_exception_types=queue_closed_exception_types)
if feed_fns is None:
self._feed_fns = [None for _ in enqueue_ops]
else:
if len(feed_fns) != len(enqueue_ops):
raise ValueError(
"If feed_fns is not None, it must have the same length as "
"enqueue_ops.")
self._feed_fns = feed_fns
# pylint: disable=broad-except
def _run(self, sess, enqueue_op, feed_fn, coord=None):
"""Execute the enqueue op in a loop, close the queue in case of error.
Args:
sess: A `Session`.
enqueue_op: The `Operation` to run.
feed_fn: the feed function to pass to `sess.run`.
coord: Optional `Coordinator` object for reporting errors and checking
for stop conditions.
"""
# TODO(jamieas): Reduce code duplication with `QueueRunner`.
if coord:
coord.register_thread(threading.current_thread())
decremented = False
try:
while True:
if coord and coord.should_stop():
break
try:
feed_dict = None if feed_fn is None else feed_fn()
sess.run(enqueue_op, feed_dict=feed_dict)
except (errors.OutOfRangeError, errors.CancelledError):
# This exception indicates that a queue was closed.
with self._lock:
self._runs_per_session[sess] -= 1
decremented = True
if self._runs_per_session[sess] == 0:
try:
sess.run(self._close_op)
except Exception as e:
# Intentionally ignore errors from close_op.
logging.vlog(1, "Ignored exception: %s", str(e))
return
except Exception as e:
# This catches all other exceptions.
if coord:
coord.request_stop(e)
else:
logging.error("Exception in QueueRunner: %s", str(e))
with self._lock:
self._exceptions_raised.append(e)
raise
finally:
# Make sure we account for all terminations: normal or errors.
if not decremented:
with self._lock:
self._runs_per_session[sess] -= 1
def create_threads(self, sess, coord=None, daemon=False, start=False):
"""Create threads to run the enqueue ops for the given session.
This method requires a session in which the graph was launched. It creates
a list of threads, optionally starting them. There is one thread for each
op passed in `enqueue_ops`.
The `coord` argument is an optional coordinator, that the threads will use
to terminate together and report exceptions. If a coordinator is given,
this method starts an additional thread to close the queue when the
coordinator requests a stop.
If previously created threads for the given session are still running, no
new threads will be created.
Args:
sess: A `Session`.
coord: Optional `Coordinator` object for reporting errors and checking
stop conditions.
daemon: Boolean. If `True` make the threads daemon threads.
start: Boolean. If `True` starts the threads. If `False` the
caller must call the `start()` method of the returned threads.
Returns:
A list of threads.
"""
with self._lock:
try:
if self._runs_per_session[sess] > 0:
# Already started: no new threads to return.
return []
except KeyError:
# We haven't seen this session yet.
pass
self._runs_per_session[sess] = len(self._enqueue_ops)
self._exceptions_raised = []
ret_threads = [threading.Thread(target=self._run,
args=(sess, op, feed_fn, coord))
for op, feed_fn in zip(self._enqueue_ops, self._feed_fns)]
if coord:
ret_threads.append(threading.Thread(target=self._close_on_stop,
args=(sess, self._cancel_op, coord)))
for t in ret_threads:
if daemon:
t.daemon = True
if start:
t.start()
return ret_threads
def _init_from_proto(self, queue_runner_def):
raise NotImplementedError(
"{} does not support initialization from proto.".format(type(
self).__name__))
def to_proto(self):
raise NotImplementedError(
"{} does not support serialization to proto.".format(type(
self).__name__))
| apache-2.0 |
clayz/crazy-quiz-web | lib/flask/module.py | 850 | 1363 | # -*- coding: utf-8 -*-
"""
flask.module
~~~~~~~~~~~~
Implements a class that represents module blueprints.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import os
from .blueprints import Blueprint
def blueprint_is_module(bp):
"""Used to figure out if something is actually a module"""
return isinstance(bp, Module)
class Module(Blueprint):
"""Deprecated module support. Until Flask 0.6 modules were a different
name of the concept now available as blueprints in Flask. They are
essentially doing the same but have some bad semantics for templates and
static files that were fixed with blueprints.
.. versionchanged:: 0.7
Modules were deprecated in favor for blueprints.
"""
def __init__(self, import_name, name=None, url_prefix=None,
static_path=None, subdomain=None):
if name is None:
assert '.' in import_name, 'name required if package name ' \
'does not point to a submodule'
name = import_name.rsplit('.', 1)[1]
Blueprint.__init__(self, name, import_name, url_prefix=url_prefix,
subdomain=subdomain, template_folder='templates')
if os.path.isdir(os.path.join(self.root_path, 'static')):
self._static_folder = 'static'
| apache-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/computation/tests/test_compat.py | 9 | 2001 | #!/usr/bin/env python
# flake8: noqa
import nose
from itertools import product
from distutils.version import LooseVersion
import pandas as pd
from pandas.util import testing as tm
from pandas.computation.engines import _engines
import pandas.computation.expr as expr
ENGINES_PARSERS = list(product(_engines, expr._parsers))
def test_compat():
# test we have compat with our version of nu
from pandas.computation import _NUMEXPR_INSTALLED
try:
import numexpr as ne
ver = ne.__version__
if ver == LooseVersion('2.4.4'):
assert not _NUMEXPR_INSTALLED
elif ver < LooseVersion('2.1'):
with tm.assert_produces_warning(UserWarning,
check_stacklevel=False):
assert not _NUMEXPR_INSTALLED
else:
assert _NUMEXPR_INSTALLED
except ImportError:
raise nose.SkipTest("not testing numexpr version compat")
def test_invalid_numexpr_version():
for engine, parser in ENGINES_PARSERS:
yield check_invalid_numexpr_version, engine, parser
def check_invalid_numexpr_version(engine, parser):
def testit():
a, b = 1, 2
res = pd.eval('a + b', engine=engine, parser=parser)
tm.assert_equal(res, 3)
if engine == 'numexpr':
try:
import numexpr as ne
except ImportError:
raise nose.SkipTest("no numexpr")
else:
if ne.__version__ < LooseVersion('2.1'):
with tm.assertRaisesRegexp(ImportError, "'numexpr' version is "
".+, must be >= 2.1"):
testit()
elif ne.__version__ == LooseVersion('2.4.4'):
raise nose.SkipTest("numexpr version==2.4.4")
else:
testit()
else:
testit()
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.