repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
perrygeo/Fiona | fiona/transform.py | 4 | 3374 | """Coordinate and geometry warping and reprojection"""
from fiona._transform import _transform, _transform_geom
def transform(src_crs, dst_crs, xs, ys):
"""Transform coordinates from one reference system to another.
Parameters
----------
src_crs: str or dict
A string like 'EPSG:4326' or a dict of proj4 parameters like
{'proj': 'lcc', 'lat_0': 18.0, 'lat_1': 18.0, 'lon_0': -77.0}
representing the coordinate reference system on the "source"
or "from" side of the transformation.
dst_crs: str or dict
A string or dict representing the coordinate reference system
on the "destination" or "to" side of the transformation.
xs: sequence of float
A list or tuple of x coordinate values. Must have the same
length as the ``ys`` parameter.
ys: sequence of float
A list or tuple of y coordinate values. Must have the same
length as the ``xs`` parameter.
Returns
-------
xp, yp: list of float
A pair of transformed coordinate sequences. The elements of
``xp`` and ``yp`` correspond exactly to the elements of the
``xs`` and ``ys`` input parameters.
Examples
--------
>>> transform('EPSG:4326', 'EPSG:26953', [-105.0], [40.0])
([957097.0952383667], [378940.8419189212])
"""
# Function is implemented in the _transform C extension module.
return _transform(src_crs, dst_crs, xs, ys)
def transform_geom(
src_crs, dst_crs, geom,
antimeridian_cutting=False, antimeridian_offset=10.0, precision=-1):
"""Transform a geometry obj from one reference system to another.
Parameters
----------
src_crs: str or dict
A string like 'EPSG:4326' or a dict of proj4 parameters like
{'proj': 'lcc', 'lat_0': 18.0, 'lat_1': 18.0, 'lon_0': -77.0}
representing the coordinate reference system on the "source"
or "from" side of the transformation.
dst_crs: str or dict
A string or dict representing the coordinate reference system
on the "destination" or "to" side of the transformation.
geom: obj
A GeoJSON-like geometry object with 'type' and 'coordinates'
members.
antimeridian_cutting: bool, optional
``True`` to cut output geometries in two at the antimeridian,
the default is ``False`.
antimeridian_offset: float, optional
A distance in decimal degrees from the antimeridian, outside of
which geometries will not be cut.
precision: int, optional
Optional rounding precision of output coordinates, in number
of decimal places.
Returns
-------
obj
A new GeoJSON-like geometry with transformed coordinates. Note
that if the output is at the antimeridian, it may be cut and
of a different geometry ``type`` than the input, e.g., a
polygon input may result in multi-polygon output.
Examples
--------
>>> transform_geom(
... 'EPSG:4326', 'EPSG:26953',
... {'type': 'Point', 'coordinates': [-105.0, 40.0]})
{'type': 'Point', 'coordinates': (957097.0952383667, 378940.8419189212)}
"""
# Function is implemented in the _transform C extension module.
return _transform_geom(
src_crs, dst_crs, geom,
antimeridian_cutting, antimeridian_offset, precision)
| bsd-3-clause |
thopiekar/Uranium | UM/View/RenderBatch.py | 1 | 14238 | # Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
import copy
from UM.Logger import Logger
from UM.Math.Vector import Vector
from UM.View.GL.OpenGL import OpenGL
from UM.View.GL.OpenGLContext import OpenGLContext
from PyQt5.QtGui import QOpenGLVertexArrayObject
vertexBufferProperty = "__gl_vertex_buffer"
indexBufferProperty = "__gl_index_buffer"
## The RenderBatch class represent a batch of objects that should be rendered.
#
# Each RenderBatch contains a list of objects to render and all state related
# to those objects. It tries to minimize changes to state between render the
# individual objects. This means that for example the ShaderProgram used is
# only bound once, at the start of rendering. There are a few values, like
# the model-view-projection matrix that are updated for each object.
#
# Currently RenderBatch objects are created each frame including the
# VertexArrayObject (VAO). This is done to greatly simplify managing
# RenderBatch-changes. Whenever (sets of) RenderBatches are managed throughout
# the lifetime of a session, crossing multiple frames, the usage of VAO's can
# improve performance by reusing them.
class RenderBatch():
## The type of render batch.
#
# This determines some basic state values, like blending on/off and additionally
# is used to determine sorting order.
class RenderType:
NoType = 0 ## No special state changes are done.
Solid = 1 ## Depth testing and depth writing are enabled.
Transparent = 2 ## Depth testing is enabled, depth writing is disabled.
Overlay = 3 ## Depth testing is disabled.
## The mode to render objects in. These correspond to OpenGL render modes.
class RenderMode:
Points = 0x0000
Lines = 0x0001
LineLoop = 0x0002
LineStrip = 0x0003
Triangles = 0x0004
TriangleStrip = 0x0005
TriangleFan = 0x0006
## Blending mode.
class BlendMode:
NoBlending = 0 ## Blending disabled.
Normal = 1 ## Standard alpha blending, mixing source and destination values based on respective alpha channels.
Additive = 2 ## Additive blending, the value of the rendered pixel is added to the color already in the buffer.
## Init method.
#
# \param shader The shader to use for this batch.
# \param kwargs Keyword arguments.
# Possible values:
# - type: The RenderType to use for this batch. Defaults to RenderType.Solid.
# - mode: The RenderMode to use for this batch. Defaults to RenderMode.Triangles.
# - backface_cull: Whether to enable or disable backface culling. Defaults to True.
# - range: A tuple indicating the start and end of a range of triangles to render. Defaults to None.
# - sort: A modifier to influence object sorting. Lower values will cause the object to be rendered before others. Mostly relevant to Transparent mode.
# - blend_mode: The BlendMode to use to render this batch. Defaults to NoBlending when type is Solid, Normal when type is Transparent or Overlay.
# - state_setup_callback: A callback function to be called just after the state has been set up but before rendering.
# This can be used to do additional alterations to the state that can not be done otherwise.
# The callback is passed the OpenGL bindings object as first and only parameter.
# - state_teardown_callback: A callback similar to state_setup_callback, but called after everything was rendered, to handle cleaning up state changes made in state_setup_callback.
def __init__(self, shader, **kwargs):
self._shader = shader
self._render_type = kwargs.get("type", self.RenderType.Solid)
self._render_mode = kwargs.get("mode", self.RenderMode.Triangles)
self._backface_cull = kwargs.get("backface_cull", False)
self._render_range = kwargs.get("range", None)
self._sort_weight = kwargs.get("sort", 0)
self._blend_mode = kwargs.get("blend_mode", None)
if not self._blend_mode:
self._blend_mode = self.BlendMode.NoBlending if self._render_type == self.RenderType.Solid else self.BlendMode.Normal
self._state_setup_callback = kwargs.get("state_setup_callback", None)
self._state_teardown_callback = kwargs.get("state_teardown_callback", None)
self._items = []
self._view_matrix = None
self._projection_matrix = None
self._view_projection_matrix = None
self._gl = OpenGL.getInstance().getBindingsObject()
## The RenderType for this batch.
@property
def renderType(self):
return self._render_type
## The RenderMode for this batch.
@property
def renderMode(self):
return self._render_mode
## The shader for this batch.
@property
def shader(self):
return self._shader
## Whether backface culling is enabled or not.
@property
def backfaceCull(self):
return self._backface_cull
## The range of elements to render.
#
# \return The range of elements to render, as a tuple of (start, end)
@property
def renderRange(self):
return self._render_range
## The items to render.
#
# \return A list of tuples, where each item is (transform_matrix, mesh, extra_uniforms)
@property
def items(self):
return self._items
## Less-than comparison method.
#
# This sorts RenderType.Solid before RenderType.Transparent
# and RenderType.Transparent before RenderType.Overlay.
def __lt__(self, other):
if self._render_type == other._render_type:
return self._sort_weight < other._sort_weight
if self._render_type == self.RenderType.Solid:
return True
if self._render_type == self.RenderType.Transparent and other._render_type != self.RenderType.Solid:
return True
return False
## Add an item to render to this batch.
#
# \param transformation The transformation matrix to use for rendering the item.
# \param mesh The mesh to render with the transform matrix.
# \param uniforms A dict of additional uniform bindings to set when rendering the item.
# Note these are set specifically for this item.
def addItem(self, transformation, mesh, uniforms = None):
if not transformation:
Logger.log("w", "Tried to add an item to batch without transformation")
return
if not mesh:
Logger.log("w", "Tried to add an item to batch without mesh")
return
self._items.append({ "transformation": transformation, "mesh": mesh, "uniforms": uniforms})
## Render the batch.
#
# \param camera The camera to render from.
def render(self, camera):
if camera is None:
Logger.log("e", "Unable to render batch without a camera.")
return
self._shader.bind()
if self._backface_cull:
self._gl.glEnable(self._gl.GL_CULL_FACE)
else:
self._gl.glDisable(self._gl.GL_CULL_FACE)
if self._render_type == self.RenderType.Solid:
self._gl.glEnable(self._gl.GL_DEPTH_TEST)
self._gl.glDepthMask(self._gl.GL_TRUE)
elif self._render_type == self.RenderType.Transparent:
self._gl.glEnable(self._gl.GL_DEPTH_TEST)
self._gl.glDepthMask(self._gl.GL_FALSE)
elif self._render_type == self.RenderType.Overlay:
self._gl.glDisable(self._gl.GL_DEPTH_TEST)
if self._blend_mode == self.BlendMode.NoBlending:
self._gl.glDisable(self._gl.GL_BLEND)
elif self._blend_mode == self.BlendMode.Normal:
self._gl.glEnable(self._gl.GL_BLEND)
self._gl.glBlendFunc(self._gl.GL_SRC_ALPHA, self._gl.GL_ONE_MINUS_SRC_ALPHA)
elif self._blend_mode == self.BlendMode.Additive:
self._gl.glEnable(self._gl.GL_BLEND)
self._gl.glBlendFunc(self._gl.GL_SRC_ALPHA, self._gl.GL_ONE)
if self._state_setup_callback:
self._state_setup_callback(self._gl)
self._view_matrix = camera.getWorldTransformation().getInverse()
self._projection_matrix = camera.getProjectionMatrix()
self._view_projection_matrix = camera.getProjectionMatrix().multiply(self._view_matrix)
self._shader.updateBindings(
view_matrix = self._view_matrix,
projection_matrix = self._projection_matrix,
view_projection_matrix = self._view_projection_matrix,
view_position = camera.getWorldPosition(),
light_0_position = camera.getWorldPosition() + Vector(0, 50, 0)
)
# The VertexArrayObject (VAO) works like a VCR, recording buffer activities in the GPU.
# When the same buffers are used elsewhere, one can bind this VertexArrayObject to
# the context instead of uploading all buffers again.
if OpenGLContext.properties["supportsVertexArrayObjects"]:
vao = QOpenGLVertexArrayObject()
vao.create()
if not vao.isCreated():
Logger.log("e", "VAO not created. Hell breaks loose")
vao.bind()
for item in self._items:
self._renderItem(item)
if self._state_teardown_callback:
self._state_teardown_callback(self._gl)
self._shader.release()
def _renderItem(self, item):
transformation = item["transformation"]
mesh = item["mesh"]
normal_matrix = None
if mesh.hasNormals():
normal_matrix = copy.deepcopy(transformation)
normal_matrix.setRow(3, [0, 0, 0, 1])
normal_matrix.setColumn(3, [0, 0, 0, 1])
normal_matrix = normal_matrix.getInverse().getTransposed()
model_view_matrix = copy.deepcopy(transformation).preMultiply(self._view_matrix)
model_view_projection_matrix = copy.deepcopy(transformation).preMultiply(self._view_projection_matrix)
self._shader.updateBindings(
model_matrix = transformation,
normal_matrix = normal_matrix,
model_view_matrix = model_view_matrix,
model_view_projection_matrix = model_view_projection_matrix
)
if item["uniforms"] is not None:
self._shader.updateBindings(**item["uniforms"])
vertex_buffer = OpenGL.getInstance().createVertexBuffer(mesh)
vertex_buffer.bind()
if self._render_range is None:
index_buffer = OpenGL.getInstance().createIndexBuffer(mesh)
else:
# glDrawRangeElements does not work as expected and did not get the indices field working..
# Now we're just uploading a clipped part of the array and the start index always becomes 0.
index_buffer = OpenGL.getInstance().createIndexBuffer(
mesh, force_recreate = True, index_start = self._render_range[0], index_stop = self._render_range[1])
if index_buffer is not None:
index_buffer.bind()
self._shader.enableAttribute("a_vertex", "vector3f", 0)
offset = mesh.getVertexCount() * 3 * 4
if mesh.hasNormals():
self._shader.enableAttribute("a_normal", "vector3f", offset)
offset += mesh.getVertexCount() * 3 * 4
if mesh.hasColors():
self._shader.enableAttribute("a_color", "vector4f", offset)
offset += mesh.getVertexCount() * 4 * 4
if mesh.hasUVCoordinates():
self._shader.enableAttribute("a_uvs", "vector2f", offset)
offset += mesh.getVertexCount() * 2 * 4
for attribute_name in mesh.attributeNames():
attribute = mesh.getAttribute(attribute_name)
self._shader.enableAttribute(attribute["opengl_name"], attribute["opengl_type"], offset)
if attribute["opengl_type"] == "vector2f":
offset += mesh.getVertexCount() * 2 * 4
elif attribute["opengl_type"] == "vector4f":
offset += mesh.getVertexCount() * 4 * 4
elif attribute["opengl_type"] == "int":
offset += mesh.getVertexCount() * 4
elif attribute["opengl_type"] == "float":
offset += mesh.getVertexCount() * 4
else:
Logger.log("e", "Attribute with name [%s] uses non implemented type [%s]." % (attribute["opengl_name"], attribute["opengl_type"]))
self._shader.disableAttribute(attribute["opengl_name"])
if mesh.hasIndices():
if self._render_range is None:
if self._render_mode == self.RenderMode.Triangles:
self._gl.glDrawElements(self._render_mode, mesh.getFaceCount() * 3 , self._gl.GL_UNSIGNED_INT, None)
else:
self._gl.glDrawElements(self._render_mode, mesh.getFaceCount(), self._gl.GL_UNSIGNED_INT, None)
else:
if self._render_mode == self.RenderMode.Triangles:
self._gl.glDrawRangeElements(self._render_mode, self._render_range[0], self._render_range[1], self._render_range[1] - self._render_range[0], self._gl.GL_UNSIGNED_INT, None)
else:
self._gl.glDrawElements(self._render_mode, self._render_range[1] - self._render_range[0], self._gl.GL_UNSIGNED_INT, None)
else:
self._gl.glDrawArrays(self._render_mode, 0, mesh.getVertexCount())
self._shader.disableAttribute("a_vertex")
self._shader.disableAttribute("a_normal")
self._shader.disableAttribute("a_color")
self._shader.disableAttribute("a_uvs")
for attribute_name in mesh.attributeNames():
attribute = mesh.getAttribute(attribute_name)
self._shader.disableAttribute(attribute.get("opengl_name"))
vertex_buffer.release()
if index_buffer is not None:
index_buffer.release()
| lgpl-3.0 |
nate1001/chess_jay | gui.py | 1 | 5079 | '''
Copyright Nate Carson 2012
'''
from PyQt4 import QtCore, QtGui, QtSvg
import db
import data
def setAttackSum(self, forces):
for force in forces:
self._squares[force.tid].setAttackSum(force.c_white, force.c_black)
def setAttackSum(self, c_white, c_black):
c_white, c_black = c_white or 0, c_black or 0
factor = 30
color = QtGui.QColor(
min(128 + c_black * factor, 255),
max(128 - (c_white + c_black) * (factor/2), 0),
min(128 + c_white * factor, 255)
)
'''
val = (c_white - c_black) * 32
o = abs(c_white) + abs(c_black) * 64
color = QtGui.QColor(
min(max(128 - val, 0), 255)
,128
,max(min(128 + val, 255), 0)
,min(o, 255)
)
#self._piece and self._piece.setOpacity(255)
#self._piece and self._piece.setZValue(100)
self.setBrush(QtGui.QBrush(color))
'''
def setByMove(self, move):
if self._current_move is None:
self.setByString(move.boardstring())
# if we are going forward one move
elif self._current_move.halfmove() + 1 == move.halfmove():
self.movePiece(move.start, move.end)
# else re-init board
else:
self.setByString(move.boardstring())
self._current_move = move
class MainWindow(QtGui.QMainWindow):
def __init__(self):
super(MainWindow, self).__init__()
self.scene = BoardScene(settings.initial_pos)
layout = QtGui.QHBoxLayout()
#layout.addWidget(self.toolBox)
self.view = ChessView(self.scene)
layout.addWidget(self.view)
self.widget = QtGui.QWidget()
self.widget.setLayout(layout)
self.setCentralWidget(self.widget)
self.setWindowTitle("Chess Analyzer")
self.createActions()
self.createMenus()
self.createDocks()
def createActions(self):
self.action_exit = QtGui.QAction("Quit", self,
shortcut="Ctrl+Q", statusTip="Quit",
triggered=self.close)
self.action_show_square_labels = QtGui.QAction("Square Labels", self,
checkable=True, checked=settings.show_labels, statusTip="",
triggered=self.view.toggleLabels)
self.action_show_guides = QtGui.QAction("Guides", self,
checkable=True, checked=settings.show_guides, statusTip="",
triggered=self.view.toggleGuides)
def createMenus(self):
self.file_menu = self.menuBar().addMenu("&File")
self.file_menu.addAction(self.action_exit)
self.view_menu = self.menuBar().addMenu("&View")
self.view_menu.addAction(self.action_show_square_labels)
self.view_menu.addAction(self.action_show_guides)
def createDocks(self):
self.move_dock = Dock(MoveList, 'Moves', self,
self.scene.setBoard,
self.scene.setAttacked,
self.scene.setProtected,
self.scene.setAttackSum,
)
self.game_dock = Dock(GameList, 'Games', self, self.move_dock.items)
class Dock(QtGui.QDockWidget):
def __init__(self, list_class, name, parent, *args):
super(Dock, self).__init__(name, parent)
self.items = list_class(self, *args)
self.setWidget(self.items)
parent.addDockWidget(QtCore.Qt.RightDockWidgetArea, self)
parent.view_menu.addAction(self.toggleViewAction())
class DBList(QtGui.QListWidget):
def __init__(self, parent, *args):
super(DBList, self).__init__(parent, *args)
self.activated.connect(self.onActivate)
self.currentRowChanged.connect(self.onRowChanged)
self.load()
def _init(self, *args):
select = self.select(*args)
self.data = [row for row in select]
# clear any previous items
while self.takeItem(0):
pass
self.addItems([str(row) for row in self.data])
self.update()
def onActivate(self, index):
datum = self.data[index.row()]
self.doActivate(datum)
def onRowChanged(self, index):
datum = self.data[index]
self.doActivate(datum)
def select(self):
return self.klass.select()
def doActivate(self, index):
raise NotImplementedError
def load(self, *args):
raise NotImplementedError
class GameList(DBList):
klass = db.Games
def __init__(self, parent, move_list):
super(GameList, self).__init__(parent)
self.move_list = move_list
def load(self):
self._init()
def doActivate(self, game):
self.move_list.load(game.id)
class MoveList(DBList):
klass = db.Moves
def __init__(self, parent, callback, attacked_callback, protected_callback, attacksum_callback):
super(MoveList, self).__init__(parent)
self.callback = callback
self.attacked_callback = attacked_callback
self.protected_callback = protected_callback
self.attacksum_callback = attacksum_callback
def select(self, *args):
return self.klass.select(args[0])
def load(self, *args):
if args:
self._init(args)
def doActivate(self, move):
self.callback(move)
forces = db.Force.select(move.fen)
self.attacksum_callback(forces)
#attacked = db.Attacked.select(move.fen)
#self.attacked_callback(attacked)
#protected = db.Protected.select(move.fen)
#self.protected_callback(protected)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
mainWindow = MainWindow()
s = settings.board_size
l = settings.square_size
mainWindow.setGeometry(100, 50, l + s*1.1 +100, l + s*1.2 + 100)
mainWindow.show()
sys.exit(app.exec_())
| gpl-3.0 |
jimsrc/seatos | mixed/figs/sheaths.paper/src/together4.py | 1 | 11024 | #!/usr/bin/env ipython
from pylab import *
import numpy as np
import console_colors as ccl
from scipy.io.netcdf import netcdf_file
import os, sys
import matplotlib.patches as patches
import matplotlib.transforms as transforms
from numpy import array
from matplotlib.gridspec import GridSpec
import matplotlib.pyplot as plt
class gral:
def __init__(self):
self.name='name'
TS = 11
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def makefig(ax, mc, sh, TEXT, TEXT_LOC, YLIMS, varname):
LW = 0.3 # linewidth
MS = 1.5
fmc,fsh = 3.0, 1.0 # escaleos temporales
if(varname == 'Temp'):
mc.med /= 1.0e4; sh.med /= 1.0e4
mc.avr /= 1.0e4; sh.avr /= 1.0e4
mc.std_err /= 1.0e4; sh.std_err /= 1.0e4
YLIMS[0] /= 1.0e4; YLIMS[1] /= 1.0e4
TEXT_LOC['mc'][1] /= 1.0e4
TEXT_LOC['sh'][1] /= 1.0e4
# curvas del mc
time = fsh+fmc*mc.tnorm
cc = time>=fsh
ax.plot(time[cc], mc.avr[cc], 'o-', color='black', markersize=MS, label='mean', lw=LW)
ax.plot(time[cc], mc.med[cc], 'o-', color='red', alpha=.8, markersize=MS, markeredgecolor='none', label='median', lw=LW)
# sombra del mc
inf = mc.avr + mc.std_err/np.sqrt(mc.nValues)
sup = mc.avr - mc.std_err/np.sqrt(mc.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
trans = transforms.blended_transform_factory(
ax.transData, ax.transAxes)
rect1 = patches.Rectangle((fsh, 0.), width=fmc, height=1,
transform=trans, color='blue',
alpha=0.3)
ax.add_patch(rect1)
# curvas del sheath
time = fsh*sh.tnorm
cc = time<=fsh
ax.plot(time[cc], sh.avr[cc], 'o-', color='black', markersize=MS, lw=LW)
ax.plot(time[cc], sh.med[cc], 'o-', color='red', alpha=.8, markersize=MS, markeredgecolor='none', lw=LW)
# sombra del sheath
inf = sh.avr + sh.std_err/np.sqrt(sh.nValues)
sup = sh.avr - sh.std_err/np.sqrt(sh.nValues)
ax.fill_between(time[cc], inf[cc], sup[cc], facecolor='gray', alpha=0.5)
#trans = transforms.blended_transform_factory(
# ax.transData, ax.transAxes)
rect1 = patches.Rectangle((0., 0.), width=fsh, height=1,
transform=trans, color='orange',
alpha=0.3)
ax.add_patch(rect1)
#ax.legend(loc='best', fontsize=10)
ax.tick_params(labelsize=TS)
ax.grid()
ax.set_xlim(-2.0, 7.0)
ax.set_ylim(YLIMS)
ax.text(TEXT_LOC['mc'][0], TEXT_LOC['mc'][1], TEXT['mc'], fontsize=7)
ax.text(TEXT_LOC['sh'][0], TEXT_LOC['sh'][1], TEXT['sh'], fontsize=7)
if(varname in ('beta','Temp', 'rmsB', 'rmsBoB')):
ax.set_yscale('log')
else:
ax.set_yscale('linear')
return ax
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
stf = {}
stf['B'] = {
'label': 'B [nT]',
'ylims': [5., 29.],
'text_loc_1': {'mc':[4.5, 15.0], 'sh':[-1.95, 12.0]},
'text_loc_2': {'mc':[4.5, 18.0], 'sh':[-1.95, 12.0]},
'text_loc_3': {'mc':[4.5, 12.0], 'sh':[-1.95, 12.0]},
'nrow': 1
}
stf['V'] = {
'label': 'Vsw [km/s]',
'ylims': [350., 800.],
'text_loc_1': {'mc':[4.5, 500.0], 'sh':[-1.95, 520.0]},
'text_loc_2': {'mc':[4.5, 600.0], 'sh':[-1.95, 600.0]},
'text_loc_3': {'mc':[4.5, 410.0], 'sh':[-1.95, 600.0]},
'nrow': 2
}
stf['rmsBoB'] = {
'label': 'rmsBoB [1]',
'ylims': [0.015, 0.21],
'text_loc_1': {'mc':[4.5, 0.020], 'sh':[-1.95, 0.02]},
'text_loc_2': {'mc':[4.5, 0.095], 'sh':[-1.95, 0.02]},
'text_loc_3': {'mc':[4.5, 0.099], 'sh':[-1.95, 0.02]},
'nrow': 6
}
stf['rmsB'] = {
'label': 'rmsB [nT]',
'ylims': [0.1, 4.0],
'text_loc_1': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'text_loc_2': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'text_loc_3': {'mc':[4.5, 1.0], 'sh':[-1.95, 1.3]},
'nrow': 1
}
stf['beta'] = {
'label': '$\\beta$ [1]',
'ylims': [0.02, 10.0],
'text_loc_1': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'text_loc_2': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'text_loc_3': {'mc':[4.5, 0.1], 'sh':[-1.95, 0.2]},
'nrow': 5
}
stf['Pcc'] = {
'label': '$n_p$ [$cm^{-3}$]',
'ylims': [1, 23],
'text_loc_1': {'mc':[4.5, 14], 'sh':[-1.95, 16.0]},
'text_loc_2': {'mc':[4.5, 14], 'sh':[-1.95, 16.0]},
'text_loc_3': {'mc':[4.5, 11], 'sh':[-1.95, 18.0]},
'nrow': 3
}
stf['Temp'] = {
'label': 'T ($\\times 10^4$) [K]',
'ylims': [1e4, 100e4],
'text_loc_1': {'mc':[4.5, 18.0e4], 'sh':[-1.95, 20.0e4]},
'text_loc_2': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]},
'text_loc_3': {'mc':[4.5, 2.0e4], 'sh':[-1.95, 20.0e4]},
'nrow': 4
}
stf['AlphaRatio'] = {
'label': 'alpha ratio [1]',
'ylims': [0.02, 0.09],
'text_loc_1': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]},
'text_loc_2': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]},
'text_loc_3': {'mc':[4.5, 0.022], 'sh':[-1.95, 0.07]}
}
stf['CRs'] = {
'label': '$n_{GCR}$ [%]',
'ylims': [-8.0, 2.0],
'text_loc_1': {'mc':[4.5, -4.0], 'sh':[-1.95, -4.5]},
'text_loc_2': {'mc':[4.5, -7.0], 'sh':[-1.95, -4.5]},
'text_loc_3': {'mc':[4.5, -7.5], 'sh':[-1.95, -4.5]},
'nrow': 2
}
TEXT = {}
dir_figs = sys.argv[1] #'../figs'
#dir_inp_mc = '../../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dir_inp_sh = '../../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
dir_inp_mc = os.environ['RIGHT']
dir_inp_sh = os.environ['LEFT']
vlo = [100.0, 450.0, 550.0]
vhi = [450.0, 550.0, 3000.0]
nvars = len(stf.keys())
print " input: "
print " %s " % dir_inp_mc
print " %s \n" % dir_inp_sh
print " vlo, vhi: ", (vlo, vhi), '\n'
print " nvars: ", nvars
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
i=2
#fig = figure(1, figsize=(12, 15))
f = plt.figure(1, figsize=(7, 5.8))
nr = 1 # scale for row size
gs = GridSpec(nrows=3*nr, ncols=2*3)
gs.update(left=0.1, right=0.98, hspace=0.13, wspace=0.15)
for i in range(3):
fname_inp = 'MCflag2_2before.4after_fgap0.2_Wang90.0_vlo.%3.1f.vhi.%3.1f' % (vlo[i], vhi[i])
fname_inp_nro_mc = dir_inp_mc + '/n.events_' + fname_inp + '.txt'
fname_inp_nro_sh = dir_inp_sh + '/n.events_' + fname_inp + '.txt'
#n = 1 # number of row
print " ______ col %d ______" % i
for varname in ('rmsB', 'CRs'):
# abro el file para averiguar el nro de eventos
fnro_mc = open(fname_inp_nro_mc, 'r')
fnro_sh = open(fname_inp_nro_sh, 'r')
for lmc, lsh in zip(fnro_mc, fnro_sh):
l_mc = lmc.split()
l_sh = lsh.split()
if varname==l_mc[0]: # nombre de la variable
n = stf[varname]['nrow']
ax = plt.subplot(gs[(n-1)*nr:n*nr, (2*i):(2*(i+1))])
Nfinal_mc, Nfinal_sh = int(l_mc[1]), int(l_sh[1]) # nmbr of events
fnro_mc.close(); fnro_sh.close()
break
print " %s"%varname, ' Nfinal_mc:%d' % Nfinal_mc, 'Nfinal_sh:%d' % Nfinal_sh
mc, sh = gral(), gral()
fname_inp_mc = dir_inp_mc + '/' + fname_inp + '_%s.txt' % varname
fname_inp_sh = dir_inp_sh + '/' + fname_inp + '_%s.txt' % varname
mc.tnorm, mc.med, mc.avr, mc.std_err, mc.nValues = np.loadtxt(fname_inp_mc).T
sh.tnorm, sh.med, sh.avr, sh.std_err, sh.nValues = np.loadtxt(fname_inp_sh).T
# nro de datos con mas del 80% non-gap data
TEXT['mc'] = ' N: %d' % Nfinal_mc
TEXT['sh'] = ' N: %d' % Nfinal_sh
if(vlo[i]==100.0):
TEXT_LOC = stf[varname]['text_loc_1'] #1.7, 12.0
elif(vlo[i]==450.0):
TEXT_LOC = stf[varname]['text_loc_2'] #1.7, 12.0
elif(vlo[i]==550.0):
TEXT_LOC = stf[varname]['text_loc_3'] #1.7, 12.0
else:
print " ----> ERROR con 'v_lo'!"
raise SystemExit
ylims = array(stf[varname]['ylims']) #[4., 17.]
ylabel = stf[varname]['label'] #'B [nT]'
ax = makefig(ax, mc, sh, TEXT, TEXT_LOC, ylims, varname)
# ticks & labels x
ax.tick_params(labelsize=TS)
if n==2: #n==nvars-1:
ax.set_xlabel('time normalized to\nsheath/MC passage [1]', fontsize=11)
#ax.xaxis.set_ticklabels([-1,0,1,2,3])
xticks = [-2,-1,0,1,2,3,4,5,6,7]
ax.set_xticks(xticks)
ax.set_xticklabels(xticks)
else:
ax.set_xlabel('')
#ax.get_xaxis().set_ticks([])
ax.xaxis.set_ticklabels([])
# ticks & labels y
if i==0:
ax.set_ylabel(ylabel, fontsize=15)
else:
ax.set_ylabel('')
#ax.get_yaxis().set_ticks([])
ax.yaxis.set_ticklabels([])
#+++++++++++++++++++++++++ nCR & model-fit
#dirs = {}
#dirs['sheath'] = '../../../../sheaths/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dirs['mc'] = '../../../../mcs/ascii/MCflag2/wShiftCorr/_test_Vmc_'
#dirs['fname_inputs'] = 'MCflag2_2before.4after_fgap0.2_Wang90.0'
#dirs['figs'] = dir_figs
#
#par = {}
#par['lo'] = {
# 'vlo': 100.0,
# 'vhi': 450.0,
# 'tau': 2.36,
# 'bp' : 0.0,
# 'q' : -9.373,
# 'off': 0.89,
# 'bo' : 16.15
#}
#par['mid'] = {
# 'vlo': 450.0,
# 'vhi': 550.0,
# 'tau': 4.18,
# 'bp' : -0.9,
# 'q' : -6.02,
# 'off': 0.0,
# 'bo' : 11.87
#}
#par['hi'] = {
# 'vlo': 550.0,
# 'vhi': 3000.0,
# 'tau': 5.78,
# 'bp' : -0.18,
# 'q' : -5.53,
# 'off': 1.01,
# 'bo' : 14.48
#}
#
#from funcs import build_plot
#n = 3; i=0
#for i, name in zip(range(3), ('lo', 'mid', 'hi')):
# ax = plt.subplot(gs[(n-1)*nr:n*nr, (2*i):(2*(i+1))])
# build_plot(dirs, par[name], ax)
# if i==0:
# ax.set_ylabel('$n_{GCR}$ [%]', fontsize=15)
# else:
# ax.set_ylabel('')
# ax.yaxis.set_ticklabels([])
#+++++++++++++++++++++++++++++++++++++++++
#fig.tight_layout()
#fname_fig = dir_figs + '/fig_vlo.%3.1f_vhi.%3.1f_%s.png'%(vlo, vhi, varname)
fname_fig = '%s/figs_splitted_3.png' % dir_figs
savefig(fname_fig, dpi=150, bbox_inches='tight')
close()
print "\n output en:\n %s\n" % fname_fig
#EOF
| mit |
okfn/datatable-py | tabulator/parsers/xls.py | 3 | 3974 | # -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
from __future__ import unicode_literals
import six
import sys
import xlrd
from ..parser import Parser
from .. import exceptions
from .. import helpers
# Module API
class XLSParser(Parser):
"""Parser to parse Excel data format.
"""
# Public
options = [
'sheet',
'fill_merged_cells',
]
def __init__(self, loader, force_parse=False, sheet=1, fill_merged_cells=False):
self.__loader = loader
self.__sheet_pointer = sheet
self.__fill_merged_cells = fill_merged_cells
self.__force_parse = force_parse
self.__extended_rows = None
self.__encoding = None
self.__fragment = None
self.__bytes = None
@property
def closed(self):
return self.__bytes is None or self.__bytes.closed
def open(self, source, encoding=None):
self.close()
self.__encoding = encoding
self.__bytes = self.__loader.load(source, mode='b', encoding=encoding)
# Get book
file_contents = self.__bytes.read()
try:
self.__book = xlrd.open_workbook(
file_contents=file_contents,
encoding_override=encoding,
formatting_info=True,
logfile=sys.stderr
)
except NotImplementedError:
self.__book = xlrd.open_workbook(
file_contents=file_contents,
encoding_override=encoding,
formatting_info=False,
logfile=sys.stderr
)
# Get sheet
try:
if isinstance(self.__sheet_pointer, six.string_types):
self.__sheet = self.__book.sheet_by_name(self.__sheet_pointer)
else:
self.__sheet = self.__book.sheet_by_index(self.__sheet_pointer - 1)
except (xlrd.XLRDError, IndexError):
message = 'Excel document "%s" doesn\'t have a sheet "%s"'
raise exceptions.SourceError(message % (source, self.__sheet_pointer))
self.__fragment = self.__sheet.name
# Reset parser
self.reset()
def close(self):
if not self.closed:
self.__bytes.close()
def reset(self):
helpers.reset_stream(self.__bytes)
self.__extended_rows = self.__iter_extended_rows()
@property
def encoding(self):
return self.__encoding
@property
def fragment(self):
return self.__fragment
@property
def extended_rows(self):
return self.__extended_rows
# Private
def __iter_extended_rows(self):
def type_value(ctype, value):
""" Detects boolean value, int value, datetime """
# Boolean
if ctype == xlrd.XL_CELL_BOOLEAN:
return bool(value)
# Excel numbers are only float
# Float with no decimals can be cast into int
if ctype == xlrd.XL_CELL_NUMBER and value == value // 1:
return int(value)
# Datetime
if ctype == xlrd.XL_CELL_DATE:
return xlrd.xldate.xldate_as_datetime(value, self.__book.datemode)
return value
for x in range(0, self.__sheet.nrows):
row_number = x + 1
row = []
for y, value in enumerate(self.__sheet.row_values(x)):
value = type_value(self.__sheet.cell(x, y).ctype, value)
if self.__fill_merged_cells:
for xlo, xhi, ylo, yhi in self.__sheet.merged_cells:
if x in range(xlo, xhi) and y in range(ylo, yhi):
value = type_value(self.__sheet.cell(xlo, ylo).ctype,
self.__sheet.cell_value(xlo, ylo))
row.append(value)
yield (row_number, None, row)
| mit |
wevoice/wesub | apps/videos/templatetags/recent_activity.py | 1 | 1465 | # Amara, universalsubtitles.org
#
# Copyright (C) 2013 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from datetime import date
from django import template
from django.conf import settings
from django.utils.dateformat import format as date_format
from activity.models import ActivityRecord
register = template.Library()
LIMIT = settings.RECENT_ACTIVITIES_ONPAGE
@register.inclusion_tag('videos/_recent_activity.html')
def recent_activity(user):
qs = ActivityRecord.objects.for_user(user)
return {
'records': qs[:LIMIT],
'user_info': user
}
@register.inclusion_tag('videos/_video_activity.html')
def video_activity(video, user):
qs = ActivityRecord.objects.for_video(video)
return {
'records': qs[:LIMIT],
'video': video,
'user': user
}
| agpl-3.0 |
jenca-cloud/jenca-authentication | storage/storage.py | 1 | 6752 | """
A storage service for use by a Jenca Cloud authentication service.
"""
import os
from flask import Flask, json, jsonify, request, make_response
from flask.ext.sqlalchemy import SQLAlchemy
from flask_jsonschema import JsonSchema, ValidationError
from flask_negotiate import consumes
from requests import codes
db = SQLAlchemy()
class User(db.Model):
"""
A user has an email address and a password hash.
"""
email = db.Column(db.String, primary_key=True)
password_hash = db.Column(db.String)
def create_app(database_uri):
"""
Create an application with a database in a given location.
:param database_uri: The location of the database for the application.
:type database_uri: string
:return: An application instance.
:rtype: ``Flask``
"""
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = database_uri
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
db.init_app(app)
with app.app_context():
db.create_all()
return app
SQLALCHEMY_DATABASE_URI = os.environ.get('SQLALCHEMY_DATABASE_URI',
'sqlite:///:memory:')
POSTGRES_HOST = os.environ.get('POSTGRES_HOST', None)
POSTGRES_USER = os.environ.get('POSTGRES_USER', 'username')
POSTGRES_PASSWORD = os.environ.get('POSTGRES_PASSWORD', 'password')
POSTGRES_DATABASE = os.environ.get('POSTGRES_DATABASE', 'jenca-authorisation')
if POSTGRES_HOST is not None:
if POSTGRES_HOST.find('env:') == 0:
POSTGRES_HOST = os.environ.get(POSTGRES_HOST.split(':')[1])
SQLALCHEMY_DATABASE_URI = "postgres://%s:%s@%s/%s" % (
POSTGRES_USER,
POSTGRES_PASSWORD,
POSTGRES_HOST,
POSTGRES_DATABASE
)
app = create_app(database_uri=SQLALCHEMY_DATABASE_URI)
# Inputs can be validated using JSON schema.
# Schemas are in app.config['JSONSCHEMA_DIR'].
# See https://github.com/mattupstate/flask-jsonschema for details.
app.config['JSONSCHEMA_DIR'] = os.path.join(app.root_path, 'schemas')
jsonschema = JsonSchema(app)
def load_user_from_id(user_id):
"""
:param user_id: The ID of the user Flask is trying to load.
:type user_id: string
:return: The user which has the email address ``user_id`` or ``None`` if
there is no such user.
:rtype: ``User`` or ``None``.
"""
return User.query.filter_by(email=user_id).first()
@app.errorhandler(ValidationError)
def on_validation_error(error):
"""
:resjson string title: An explanation that there was a validation error.
:resjson string message: The precise validation error.
:status 400:
"""
return jsonify(
title='There was an error validating the given arguments.',
# By default on Python 2 errors will look like:
# "u'password' is a required property".
# This removes all "u'"s, and so could be dangerous.
detail=error.message.replace("u'", "'"),
), codes.BAD_REQUEST
@app.route('/users/<email>', methods=['GET', 'DELETE'])
@consumes('application/json')
def specific_user_route(email):
"""
**DELETE**:
Delete a particular user.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
:resjson string email: The email address of the deleted user.
:resjson string password_hash: The password hash of the deleted user.
:status 200: The user has been deleted.
:status 404: There is no user with the given ``email``.
**GET**:
Get information about particular user.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
:resjson string email: The email address of the user.
:resjson string password_hash: The password hash of the user.
:status 200: The requested user's information is returned.
:status 404: There is no user with the given ``email``.
"""
user = load_user_from_id(email)
if user is None:
return jsonify(
title='The requested user does not exist.',
detail='No user exists with the email "{email}"'.format(
email=email),
), codes.NOT_FOUND
elif request.method == 'DELETE':
db.session.delete(user)
db.session.commit()
return_data = jsonify(email=user.email, password_hash=user.password_hash)
return return_data, codes.OK
@jsonschema.validate('users', 'create')
def create_user():
"""
Create a new user. See ``users_route`` for details.
"""
email = request.json['email']
password_hash = request.json['password_hash']
if load_user_from_id(email) is not None:
return jsonify(
title='There is already a user with the given email address.',
detail='A user already exists with the email "{email}"'.format(
email=email),
), codes.CONFLICT
user = User(email=email, password_hash=password_hash)
db.session.add(user)
db.session.commit()
return jsonify(email=email, password_hash=password_hash), codes.CREATED
@app.route('/users', methods=['GET', 'POST'])
@consumes('application/json')
def users_route():
"""
**POST**:
Create a new user.
:param email: The email address of the new user.
:type email: string
:param password_hash: A password hash to associate with the given ``email``
address.
:type password_hash: string
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
:resjson string email: The email address of the new user.
:resjson string password_hash: The password hash of the new user.
:status 200: A user with the given ``email`` and ``password_hash`` has been
created.
:status 409: There already exists a user with the given ``email``.
**GET**:
Get information about all users.
:reqheader Content-Type: application/json
:resheader Content-Type: application/json
:resjsonarr string email: The email address of a user.
:resjsonarr string password_hash: The password hash of a user.
:status 200: Information about all users is returned.
"""
if request.method == 'POST':
return create_user()
# It the method type is not POST it is GET.
details = [
{'email': user.email, 'password_hash': user.password_hash} for user
in User.query.all()]
return make_response(
json.dumps(details),
codes.OK,
{'Content-Type': 'application/json'})
if __name__ == '__main__': # pragma: no cover
# Specifying 0.0.0.0 as the host tells the operating system to listen on
# all public IPs. This makes the server visible externally.
# See http://flask.pocoo.org/docs/0.10/quickstart/#a-minimal-application
app.run(host='0.0.0.0', port=5001)
| mit |
ryanbackman/zulip | zerver/lib/sessions.py | 6 | 2375 | from __future__ import absolute_import
import logging
from django.conf import settings
from django.contrib.auth import SESSION_KEY, get_user_model
from django.contrib.sessions.models import Session
from django.utils.timezone import now as timezone_now
from importlib import import_module
from typing import List, Mapping, Optional, Text
from zerver.models import Realm, UserProfile, get_user_profile_by_id
session_engine = import_module(settings.SESSION_ENGINE)
def get_session_dict_user(session_dict):
# type: (Mapping[Text, int]) -> Optional[int]
# Compare django.contrib.auth._get_user_session_key
try:
return get_user_model()._meta.pk.to_python(session_dict[SESSION_KEY])
except KeyError:
return None
def get_session_user(session):
# type: (Session) -> int
return get_session_dict_user(session.get_decoded())
def user_sessions(user_profile):
# type: (UserProfile) -> List[Session]
return [s for s in Session.objects.all()
if get_session_user(s) == user_profile.id]
def delete_session(session):
# type: (Session) -> None
session_engine.SessionStore(session.session_key).delete() # type: ignore # import_module
def delete_user_sessions(user_profile):
# type: (UserProfile) -> None
for session in Session.objects.all():
if get_session_user(session) == user_profile.id:
delete_session(session)
def delete_realm_user_sessions(realm):
# type: (Realm) -> None
realm_user_ids = [user_profile.id for user_profile in
UserProfile.objects.filter(realm=realm)]
for session in Session.objects.filter(expire_date__gte=timezone_now()):
if get_session_user(session) in realm_user_ids:
delete_session(session)
def delete_all_user_sessions():
# type: () -> None
for session in Session.objects.all():
delete_session(session)
def delete_all_deactivated_user_sessions():
# type: () -> None
for session in Session.objects.all():
user_profile_id = get_session_user(session)
if user_profile_id is None:
continue
user_profile = get_user_profile_by_id(user_profile_id)
if not user_profile.is_active or user_profile.realm.deactivated:
logging.info("Deactivating session for deactivated user %s" % (user_profile.email,))
delete_session(session)
| apache-2.0 |
daenamkim/ansible | lib/ansible/modules/cloud/google/gce_img.py | 49 | 6275 | #!/usr/bin/python
# Copyright 2015 Google Inc. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""An Ansible module to utilize GCE image resources."""
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: gce_img
version_added: "1.9"
short_description: utilize GCE image resources
description:
- This module can create and delete GCE private images from gzipped
compressed tarball containing raw disk data or from existing detached
disks in any zone. U(https://cloud.google.com/compute/docs/images)
options:
name:
description:
- the name of the image to create or delete
required: true
default: null
description:
description:
- an optional description
required: false
default: null
family:
description:
- an optional family name
required: false
default: null
version_added: "2.2"
source:
description:
- the source disk or the Google Cloud Storage URI to create the image from
required: false
default: null
state:
description:
- desired state of the image
required: false
default: "present"
choices: ["present", "absent"]
zone:
description:
- the zone of the disk specified by source
required: false
default: "us-central1-a"
timeout:
description:
- timeout for the operation
required: false
default: 180
version_added: "2.0"
service_account_email:
description:
- service account email
required: false
default: null
pem_file:
description:
- path to the pem file associated with the service account email
required: false
default: null
project_id:
description:
- your GCE project ID
required: false
default: null
requirements:
- "python >= 2.6"
- "apache-libcloud"
author: "Tom Melendez (supertom)"
'''
EXAMPLES = '''
# Create an image named test-image from the disk 'test-disk' in zone us-central1-a.
- gce_img:
name: test-image
source: test-disk
zone: us-central1-a
state: present
# Create an image named test-image from a tarball in Google Cloud Storage.
- gce_img:
name: test-image
source: https://storage.googleapis.com/bucket/path/to/image.tgz
# Alternatively use the gs scheme
- gce_img:
name: test-image
source: gs://bucket/path/to/image.tgz
# Delete an image named test-image.
- gce_img:
name: test-image
state: absent
'''
try:
import libcloud
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError
from libcloud.common.google import ResourceExistsError
from libcloud.common.google import ResourceNotFoundError
_ = Provider.GCE
has_libcloud = True
except ImportError:
has_libcloud = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.gce import gce_connect
GCS_URI = 'https://storage.googleapis.com/'
def create_image(gce, name, module):
"""Create an image with the specified name."""
source = module.params.get('source')
zone = module.params.get('zone')
desc = module.params.get('description')
timeout = module.params.get('timeout')
family = module.params.get('family')
if not source:
module.fail_json(msg='Must supply a source', changed=False)
if source.startswith(GCS_URI):
# source is a Google Cloud Storage URI
volume = source
elif source.startswith('gs://'):
# libcloud only accepts https URI.
volume = source.replace('gs://', GCS_URI)
else:
try:
volume = gce.ex_get_volume(source, zone)
except ResourceNotFoundError:
module.fail_json(msg='Disk %s not found in zone %s' % (source, zone),
changed=False)
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
gce_extra_args = {}
if family is not None:
gce_extra_args['family'] = family
old_timeout = gce.connection.timeout
try:
gce.connection.timeout = timeout
gce.ex_create_image(name, volume, desc, use_existing=False, **gce_extra_args)
return True
except ResourceExistsError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
finally:
gce.connection.timeout = old_timeout
def delete_image(gce, name, module):
"""Delete a specific image resource by name."""
try:
gce.ex_delete_image(name)
return True
except ResourceNotFoundError:
return False
except GoogleBaseError as e:
module.fail_json(msg=str(e), changed=False)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
family=dict(),
description=dict(),
source=dict(),
state=dict(default='present', choices=['present', 'absent']),
zone=dict(default='us-central1-a'),
service_account_email=dict(),
pem_file=dict(type='path'),
project_id=dict(),
timeout=dict(type='int', default=180)
)
)
if not has_libcloud:
module.fail_json(msg='libcloud with GCE support is required.')
gce = gce_connect(module)
name = module.params.get('name')
state = module.params.get('state')
family = module.params.get('family')
changed = False
if family is not None and hasattr(libcloud, '__version__') and libcloud.__version__ <= '0.20.1':
module.fail_json(msg="Apache Libcloud 1.0.0+ is required to use 'family' option",
changed=False)
# user wants to create an image.
if state == 'present':
changed = create_image(gce, name, module)
# user wants to delete the image.
if state == 'absent':
changed = delete_image(gce, name, module)
module.exit_json(changed=changed, name=name)
if __name__ == '__main__':
main()
| gpl-3.0 |
evernym/plenum | storage/kv_store_leveldb.py | 2 | 2887 | import os
import shutil
from typing import Iterable, Tuple
from state.util.utils import removeLockFiles
from storage.kv_store import KeyValueStorage
try:
import leveldb
except ImportError:
print('Cannot import leveldb, please install')
class KeyValueStorageLeveldb(KeyValueStorage):
def __init__(self, db_dir, db_name, open=True, read_only=False):
if 'leveldb' not in globals():
raise RuntimeError('Leveldb is needed to use this class')
self._db_path = os.path.join(db_dir, db_name)
self._read_only = read_only
self._db = None
if open:
self.open()
def __repr__(self):
return self._db_path
@property
def is_byte(self) -> bool:
return True
@property
def db_path(self) -> str:
return self._db_path
@property
def read_only(self) -> bool:
return self._read_only
@property
def closed(self):
return self._db is None
def iterator(self, start=None, end=None, include_key=True, include_value=True, prefix=None):
start = self.to_byte_repr(start) if start is not None else None
end = self.to_byte_repr(end) if end is not None else None
return self._db.RangeIter(key_from=start, key_to=end, include_value=include_value)
def put(self, key, value):
if self._read_only:
raise RuntimeError("Not supported operation in read only mode.")
key = self.to_byte_repr(key)
value = self.to_byte_repr(value)
self._db.Put(key, value)
def get(self, key):
key = self.to_byte_repr(key)
return self._db.Get(key)
def remove(self, key):
if self._read_only:
raise RuntimeError("Not supported operation in read only mode.")
key = self.to_byte_repr(key)
self._db.Delete(key)
def setBatch(self, batch: Iterable[Tuple]):
b = leveldb.WriteBatch()
for key, value in batch:
key = self.to_byte_repr(key)
value = self.to_byte_repr(value)
b.Put(key, value)
self._db.Write(b, sync=False)
def do_ops_in_batch(self, batch: Iterable[Tuple]):
b = leveldb.WriteBatch()
for op, key, value in batch:
key = self.to_byte_repr(key)
value = self.to_byte_repr(value)
if op == self.WRITE_OP:
b.Put(key, value)
elif op == self.REMOVE_OP:
b.Delete(key)
else:
raise ValueError('Unknown operation')
self._db.Write(b, sync=False)
def open(self):
self._db = leveldb.LevelDB(self.db_path)
def close(self):
removeLockFiles(self.db_path)
del self._db
self._db = None
def drop(self):
self.close()
shutil.rmtree(self.db_path)
def reset(self):
self.drop()
self.open()
| apache-2.0 |
tstanaka/bantorra.old | bin/service.py | 2 | 2511 | import os
import sys
import imp
import time
import ConfigParser
import multiprocessing
from bantorra.util import define
from bantorra.util.log import LOG as L
class ServiceControl(object):
service = {}
service_conf = {}
def __init__(self):
self.get_config()
self.register()
@classmethod
def get_config(cls, conf=""):
cls.service_conf = {}
if conf == "":
conf = os.path.join(define.APP_BIN, "port.ini")
L.debug(conf)
try:
config = ConfigParser.ConfigParser()
config.read(conf)
for section in config.sections():
for option in config.options(section):
cls.service_conf["%s" % option] = config.get(section, option)
except Exception as e:
L.warning("error: could not read config file: %s" % e)
@classmethod
def register(cls):
base_dir = define.APP_BIN
cwd = os.getcwd()
for fdn in os.listdir(base_dir):
try:
if fdn.endswith(".pyc") or fdn.endswith(".py") or fdn.endswith(".ini"):
pass
else:
sys.path.append(os.path.join(base_dir, fdn))
f, n, d = imp.find_module("server")
module = imp.load_module("server", f, n, d)
cls.service[module.NAME] = module
sys.path.remove(os.path.join(base_dir, fdn))
except Exception as e:
L.warning('error: could not search "server.py" file in %s : %s' % (fdn, e))
@classmethod
def start_all(cls):
L.info(cls.service_conf)
try:
for x in cls.service.values():
L.info(int(cls.service_conf["kancolle"]))
#x.start(int(cls.service_conf["kancolle"]))
p = multiprocessing.Process(target=x.start, args=(int(cls.service_conf["kancolle"]), ))
p.daemon = True
except Exception as e:
L.warning(str(e))
@classmethod
def stop_all(cls):
try:
for x in cls.service.values():
x.shutdown()
except Exception as e:
L.warning(str(e))
@classmethod
def restart_all(cls):
cls.stop_all()
time.sleep(3)
cls.start_all()
if __name__ == '__main__':
proc = ServiceControl()
L.info("Start Service")
proc.start_all()
time.sleep(20)
L.info("Stop Service")
proc.stop_all()
| mit |
crazyi/ef39s_kernel_3.4.0 | tools/perf/scripts/python/check-perf-trace.py | 11214 | 2503 | # perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
| gpl-2.0 |
katsikas/gnuradio | gr-digital/examples/narrowband/tunnel.py | 20 | 10285 | #!/usr/bin/env python
#
# Copyright 2005,2006,2009,2011 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
# ////////////////////////////////////////////////////////////////////
#
# This code sets up up a virtual ethernet interface (typically
# gr0), and relays packets between the interface and the GNU Radio
# PHY+MAC
#
# What this means in plain language, is that if you've got a couple
# of USRPs on different machines, and if you run this code on those
# machines, you can talk between them using normal TCP/IP
# networking.
#
# ////////////////////////////////////////////////////////////////////
from gnuradio import gr, digital
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
# from current dir
from receive_path import receive_path
from transmit_path import transmit_path
from uhd_interface import uhd_transmitter
from uhd_interface import uhd_receiver
import os, sys
import random, time, struct
#print os.getpid()
#raw_input('Attach and press enter')
# ////////////////////////////////////////////////////////////////////
#
# Use the Universal TUN/TAP device driver to move packets to/from
# kernel
#
# See /usr/src/linux/Documentation/networking/tuntap.txt
#
# ////////////////////////////////////////////////////////////////////
# Linux specific...
# TUNSETIFF ifr flags from <linux/tun_if.h>
IFF_TUN = 0x0001 # tunnel IP packets
IFF_TAP = 0x0002 # tunnel ethernet frames
IFF_NO_PI = 0x1000 # don't pass extra packet info
IFF_ONE_QUEUE = 0x2000 # beats me ;)
def open_tun_interface(tun_device_filename):
from fcntl import ioctl
mode = IFF_TAP | IFF_NO_PI
TUNSETIFF = 0x400454ca
tun = os.open(tun_device_filename, os.O_RDWR)
ifs = ioctl(tun, TUNSETIFF, struct.pack("16sH", "gr%d", mode))
ifname = ifs[:16].strip("\x00")
return (tun, ifname)
# ////////////////////////////////////////////////////////////////////
# the flow graph
# ////////////////////////////////////////////////////////////////////
class my_top_block(gr.top_block):
def __init__(self, mod_class, demod_class,
rx_callback, options):
gr.top_block.__init__(self)
# Get the modulation's bits_per_symbol
args = mod_class.extract_kwargs_from_options(options)
symbol_rate = options.bitrate / mod_class(**args).bits_per_symbol()
self.source = uhd_receiver(options.args, symbol_rate,
options.samples_per_symbol,
options.rx_freq, options.rx_gain,
options.spec, options.antenna,
options.verbose)
self.sink = uhd_transmitter(options.args, symbol_rate,
options.samples_per_symbol,
options.tx_freq, options.tx_gain,
options.spec, options.antenna,
options.verbose)
options.samples_per_symbol = self.source._sps
self.txpath = transmit_path(mod_class, options)
self.rxpath = receive_path(demod_class, rx_callback, options)
self.connect(self.txpath, self.sink)
self.connect(self.source, self.rxpath)
def send_pkt(self, payload='', eof=False):
return self.txpath.send_pkt(payload, eof)
def carrier_sensed(self):
"""
Return True if the receive path thinks there's carrier
"""
return self.rxpath.carrier_sensed()
def set_freq(self, target_freq):
"""
Set the center frequency we're interested in.
"""
self.sink.set_freq(target_freq)
self.source.set_freq(target_freq)
# ////////////////////////////////////////////////////////////////////
# Carrier Sense MAC
# ////////////////////////////////////////////////////////////////////
class cs_mac(object):
"""
Prototype carrier sense MAC
Reads packets from the TUN/TAP interface, and sends them to the
PHY. Receives packets from the PHY via phy_rx_callback, and sends
them into the TUN/TAP interface.
Of course, we're not restricted to getting packets via TUN/TAP,
this is just an example.
"""
def __init__(self, tun_fd, verbose=False):
self.tun_fd = tun_fd # file descriptor for TUN/TAP interface
self.verbose = verbose
self.tb = None # top block (access to PHY)
def set_top_block(self, tb):
self.tb = tb
def phy_rx_callback(self, ok, payload):
"""
Invoked by thread associated with PHY to pass received packet up.
@param ok: bool indicating whether payload CRC was OK
@param payload: contents of the packet (string)
"""
if self.verbose:
print "Rx: ok = %r len(payload) = %4d" % (ok, len(payload))
if ok:
os.write(self.tun_fd, payload)
def main_loop(self):
"""
Main loop for MAC.
Only returns if we get an error reading from TUN.
FIXME: may want to check for EINTR and EAGAIN and reissue read
"""
min_delay = 0.001 # seconds
while 1:
payload = os.read(self.tun_fd, 10*1024)
if not payload:
self.tb.send_pkt(eof=True)
break
if self.verbose:
print "Tx: len(payload) = %4d" % (len(payload),)
delay = min_delay
while self.tb.carrier_sensed():
sys.stderr.write('B')
time.sleep(delay)
if delay < 0.050:
delay = delay * 2 # exponential back-off
self.tb.send_pkt(payload)
# /////////////////////////////////////////////////////////////////////////////
# main
# /////////////////////////////////////////////////////////////////////////////
def main():
mods = digital.modulation_utils.type_1_mods()
demods = digital.modulation_utils.type_1_demods()
parser = OptionParser (option_class=eng_option, conflict_handler="resolve")
expert_grp = parser.add_option_group("Expert")
parser.add_option("-m", "--modulation", type="choice", choices=mods.keys(),
default='gmsk',
help="Select modulation from: %s [default=%%default]"
% (', '.join(mods.keys()),))
parser.add_option("-s", "--size", type="eng_float", default=1500,
help="set packet size [default=%default]")
parser.add_option("-v","--verbose", action="store_true", default=False)
expert_grp.add_option("-c", "--carrier-threshold", type="eng_float", default=30,
help="set carrier detect threshold (dB) [default=%default]")
expert_grp.add_option("","--tun-device-filename", default="/dev/net/tun",
help="path to tun device file [default=%default]")
transmit_path.add_options(parser, expert_grp)
receive_path.add_options(parser, expert_grp)
uhd_receiver.add_options(parser)
uhd_transmitter.add_options(parser)
for mod in mods.values():
mod.add_options(expert_grp)
for demod in demods.values():
demod.add_options(expert_grp)
(options, args) = parser.parse_args ()
if len(args) != 0:
parser.print_help(sys.stderr)
sys.exit(1)
# open the TUN/TAP interface
(tun_fd, tun_ifname) = open_tun_interface(options.tun_device_filename)
# Attempt to enable realtime scheduling
r = gr.enable_realtime_scheduling()
if r == gr.RT_OK:
realtime = True
else:
realtime = False
print "Note: failed to enable realtime scheduling"
# instantiate the MAC
mac = cs_mac(tun_fd, verbose=True)
# build the graph (PHY)
tb = my_top_block(mods[options.modulation],
demods[options.modulation],
mac.phy_rx_callback,
options)
mac.set_top_block(tb) # give the MAC a handle for the PHY
if tb.txpath.bitrate() != tb.rxpath.bitrate():
print "WARNING: Transmit bitrate = %sb/sec, Receive bitrate = %sb/sec" % (
eng_notation.num_to_str(tb.txpath.bitrate()),
eng_notation.num_to_str(tb.rxpath.bitrate()))
print "modulation: %s" % (options.modulation,)
print "freq: %s" % (eng_notation.num_to_str(options.tx_freq))
print "bitrate: %sb/sec" % (eng_notation.num_to_str(tb.txpath.bitrate()),)
print "samples/symbol: %3d" % (tb.txpath.samples_per_symbol(),)
tb.rxpath.set_carrier_threshold(options.carrier_threshold)
print "Carrier sense threshold:", options.carrier_threshold, "dB"
print
print "Allocated virtual ethernet interface: %s" % (tun_ifname,)
print "You must now use ifconfig to set its IP address. E.g.,"
print
print " $ sudo ifconfig %s 192.168.200.1" % (tun_ifname,)
print
print "Be sure to use a different address in the same subnet for each machine."
print
tb.start() # Start executing the flow graph (runs in separate threads)
mac.main_loop() # don't expect this to return...
tb.stop() # but if it does, tell flow graph to stop.
tb.wait() # wait for it to finish
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
wxwilcke/MINOS | directives/optima_D1.MP.py | 1 | 8757 | #!/usr/bin/python3
import logging
from operator import itemgetter
from multiprocessing import Process, Manager, Pool, cpu_count
from functools import partial
from math import floor
from timeit import default_timer as timer
import rdflib
from .abstract_instruction_set import AbstractInstructionSet
from readers import rdf
from writers import rule_set, pickler
from samplers import by_definition as sampler
from algorithms.semantic_rule_learning import generate_semantic_item_sets
from algorithms.semantic_rule_learning_mp import generate_semantic_association_rules,\
generate_common_behaviour_sets,\
extend_common_behaviour_sets,\
evaluate_rules
NUM_CORES_PER_CPU = 2
NUM_OF_WORKERS = cpu_count() * NUM_CORES_PER_CPU
class Directive(AbstractInstructionSet):
def __init__(self, time=""):
self.time = time
self.logger = logging.getLogger(__name__)
def print_header(self):
header = "OPTIMA: Artefact Production Events with 3 attributes"
print(header)
print('-' * len(header))
def load_dataset(self, abox, tbox):
# read graphs
kg_i = rdf.read(local_path=abox)
kg_s = rdf.read(local_path=tbox)
# sample by pattern
pattern = (None,
rdflib.RDF.type,
rdflib.URIRef("http://purl.org/crmeh#EHE1002_ContextFindProductionEvent"))
# define context
# spoor with vulling
context = [rdflib.URIRef("http://purl.org/dc/elements/1.1/source"),
[rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P4F_has_time-span"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P1F_is_identified_by"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P2F_has_type"),
rdflib.RDF.value],
[rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P108F_has_produced"),
rdflib.URIRef("http://www.cidoc-crm.org/cidoc-crm/P2F_has_type"),
rdflib.RDF.value]]
kg_i_sampled = kg_i.sample(sampler, patterns=[pattern], context=context, strict_context=False)
return (kg_i_sampled, kg_s)
def run_program(self, dataset, parameters):
self.logger.info("Starting run\nParameters:\n{}".format(
"\n".join(["\t{}: {}".format(k,v) for k,v in parameters.items()])))
self.logger.info("Distributing load over {} cores".format(NUM_OF_WORKERS))
kg_i, kg_s = dataset
# fit model
t0 = timer()
# MP manager
manager = Manager()
# generate semantic item sets from sampled graph
si_sets = manager.dict(generate_semantic_item_sets(kg_i))
# generate common behaviour sets
work = manager.Queue()
keys = list(si_sets.keys())
slices = self.diagonal_matrix_slicer(keys)
cbs_sets = manager.list()
pool = []
for i in range(NUM_OF_WORKERS):
p = Process(target=generate_common_behaviour_sets, args=(si_sets,
cbs_sets,
work,
parameters["similarity_threshold"]))
p.daemon = True
p.start()
pool.append(p)
for slce in slices:
work.put(slce)
for p in pool:
work.put(None)
# join shared variables
for p in pool:
p.join()
# extend common behaviour sets
cbs_size = 2
cbs_sets_extended = manager.list(cbs_sets)
while cbs_size < parameters["max_cbs_size"]:
func = partial(extend_common_behaviour_sets, cbs_sets_extended, parameters["similarity_threshold"])
slices = self.diagonal_matrix_slicer(cbs_sets_extended)
cbs_sets_extention = manager.list()
with Pool(processes=NUM_OF_WORKERS) as pool:
it = pool.imap_unordered(func=func, iterable=slices)
while True:
try:
cbs_subset = next(it)
cbs_sets_extention.extend(cbs_subset)
except StopIteration:
break
cbs_sets.extend(cbs_sets_extention)
cbs_sets_extended = cbs_sets_extention
cbs_size *= 2
# generate semantic item sets from sampled graph association rules
rules = manager.list()
work = manager.Queue()
size = max(1, floor(len(cbs_sets) / NUM_OF_WORKERS))
slices = [slice(i, i+size) for i in range(0, len(cbs_sets), size)]
pool = []
for i in range(NUM_OF_WORKERS):
p = Process(target=generate_semantic_association_rules, args=(kg_i,
kg_s,
cbs_sets,
work,
rules,
parameters["minimal_local_support"]))
p.daemon = True
p.start()
pool.append(p)
for slce in slices:
work.put(slce)
for p in pool:
work.put(None)
# join shared variables
for p in pool:
p.join()
# calculate support and confidence, skip those not meeting minimum requirements
final_rule_set = manager.list()
work = manager.Queue()
size = max(1, floor(len(rules) / NUM_OF_WORKERS))
slices = [slice(i, i+size) for i in range(0, len(rules), size)]
pool = []
for i in range(NUM_OF_WORKERS):
p = Process(target=evaluate_rules, args=(kg_i,
rules,
work,
final_rule_set,
parameters["minimal_support"],
parameters["minimal_confidence"]))
p.daemon = True
p.start()
pool.append(p)
for slce in slices:
work.put(slce)
for p in pool:
work.put(None)
# join shared variables
for p in pool:
p.join()
# sorting rules on both support and confidence
final_rule_set.sort(key=itemgetter(2, 1), reverse=True)
# time took
t1 = timer()
dt = t1 - t0
self.logger.info("Program completed in {:.3f} ms".format(dt))
print(" Program completed in {:.3f} ms".format(dt))
self.logger.info("Found {} rules".format(len(final_rule_set)))
print(" Found {} rules".format(len(final_rule_set)))
return final_rule_set
def write_to_file(self, path="./of/latest", output=[]):
overwrite = False
compress = True
print(" Writing output to {}...".format(path))
rule_set.pretty_write(output, path, overwrite, compress)
pickler.write(output, path+".pickle", overwrite)
def run(self, abox, tbox, output_path):
self.print_header()
print(" {}\n".format(self.time))
parameters = {}
parameters["similarity_threshold"] = .9
parameters["max_cbs_size"] = 2
parameters["minimal_local_support"] = 0.7
parameters["minimal_support"] = 0.0
parameters["minimal_confidence"] = 0.5
print(" Importing Data Sets...")
dataset = self.load_dataset(abox, tbox)
print(" Initiated Pattern Learning...")
output = self.run_program(dataset, parameters)
if len(output) > 0:
self.write_to_file(output_path, output)
def diagonal_matrix_slicer(self, items=[]):
slices = []
n = len(items)
total_work_load = sum(range(n))
avg_work_load = total_work_load / NUM_OF_WORKERS
work_load_start = n
work_load_end = work_load_start
while len(slices) < NUM_OF_WORKERS:
work_load = 0
while work_load < avg_work_load and work_load_start > 0:
work_load_start -= 1
work_load = sum(range(work_load_end, work_load_start, -1))
slices.append(range(n-work_load_end, n-work_load_start))
work_load_end = work_load_start
return slices
| gpl-3.0 |
robotichead/NearBeach | tests/settings.py | 1 | 3530 | """
Django Settings for TESTING PURPOSES
Do not utilise this settings.py file for your own project. Even if it is not
a production environment.
This file is only for the automatic testing and is not build for server use.
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'jz0k8%ecl#k!z+(9+5(^do1w!11ysus21m41m@i9c#u)*vk($o'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'NearBeach.apps.NearBeachConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'NearBeach.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
#WSGI_APPLICATION = 'untitled.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'test_db',
'USER': 'root',
'PASSWORD': 'rootpw',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
PRIVATE_MEDIA_ROOT = os.path.join(PROJECT_PATH, 'private')
PRIVATE_MEDIA_SERVER = 'DefaultServer'
PRIVATE_MEDIA_URL = '/private/'
STATIC_URL = '/static/'
STATIC_ROOT= os.path.join(BASE_DIR,'static/')
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR,'media/')
| mit |
CleverChuk/ices | Python/multijob_module.py | 1 | 3479 | """
Author: Chukwubuikem Ume-Ugwa
Email: chubiyke@gmail.com
MIT License
Copyright (c) 2017 CleverChuk
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from dataparser import *
from multiprocessing import Pool, Manager
import os
import time
manager = Manager()
heightD = manager.dict() # holds values for minimum height of each particle
TSIZE = 8 # data type size in bytes
N_OFFSETS = 44 # number of data
FCOLOR = genColor(N_OFFSETS, manager)
# Dimension of the simulation bed
xsize = 78
ysize = 112
zsize = 104
hOut = "HeightData"
def startChild(fname):
# DISALLOWED IN PYTHON
iam.fn = fname
dictn = iam.manager.dict()
mylist = iam.manager.list()
pool = Pool()
# passing offset multiplier to the producer task
pool.map(iam.producer, [i for i in range(1 , iam.N_OFFSETS)], 1)
# Feeds task from producers into the list
for i, j in self.dictn.items():
mylist.append(j[0])
# single process to handle plotting
proc = Process(target=iam.consumer, args=(mylist, ))
proc.start()
proc.join()
def multijob(fname):
"""
Handles reading and plotting of data in file with name fname
"""
print("Starting multijob from process: %d" % os.getpid())
fig = plt.figure()
axis = Axes3D(fig)
heightL = manager.list()
axis = Axes3D(fig)
axis.set_xlim([0,ysize])
axis.set_ylim([0,ysize])
axis.set_zlim([0,ysize])
axis.view_init(elev = 40, azim = 50)
coords = manager.list()
rho = readsingle(fname)
for i in range(1, N_OFFSETS):
eta_s = readsingle(fname, i * TSIZE)
# eta_s = process(rho, filter_eta(eta_s))
coords.append(getcoords(eta_s, xsize, ysize, zsize))
heightL.append(max(coords[-1][-2]) - min(coords[-1][-2]))
writtable(hOut,str(heightL).strip('[]'))
plot(coords, fig, axis, count = "ALL", fcolor = FCOLOR, multijob = (True,fname))
print("Finished multijob from process: %d" % os.getpid())
if __name__ == "__main__":
print("Starting mutiple jobs in a process task")
import timeit, sys
start_time = timeit.default_timer()
if(os.path.exists(hOut)):
os.remove(hOut)
pool = Pool()
files = list()
MAXCOUNT = 4
STEP = 2
START = 0
FNAME = "fullT{0}.dat"
## file with filesname to work on
for i in range(START, MAXCOUNT, STEP):
files.append(FNAME.format(i))
pool.map(multijob, files, 1)
elapsed = timeit.default_timer() - start_time
print("total time %d seconds" % elapsed)
print("Finished multiple job in a process task")
| mit |
laayis/yowsup | yowsup/layers/protocol_groups/protocolentities/iq_groups_leave_success.py | 39 | 1424 | from yowsup.structs import ProtocolTreeNode
from yowsup.layers.protocol_iq.protocolentities import ResultIqProtocolEntity
class SuccessLeaveGroupsIqProtocolEntity(ResultIqProtocolEntity):
'''
<iq type="result" from="g.us" id="{{ID}}">
<leave>
<group id="{{GROUP_JID}}"></group>
</leave>
</iq>
'''
def __init__(self, _id, groupId):
super(SuccessLeaveGroupsIqProtocolEntity, self).\
__init__(_from="g.us", _id=_id)
self.setProps(groupId)
def setProps(self, groupId):
self.groupId = groupId
def __str__(self):
out = super(SuccessLeaveGroupsIqProtocolEntity, self).__str__()
out += "Group Id: %s\n" % self.groupId
return out
def toProtocolTreeNode(self):
node = super(SuccessLeaveGroupsIqProtocolEntity, self).\
toProtocolTreeNode()
leaveNode = ProtocolTreeNode(
"leave", {}, [ProtocolTreeNode("group", {"id": self.groupId})]
)
node.addChild(leaveNode)
return node
@staticmethod
def fromProtocolTreeNode(node):
entity = super(SuccessLeaveGroupsIqProtocolEntity, SuccessLeaveGroupsIqProtocolEntity).fromProtocolTreeNode(node)
entity.__class__ = SuccessLeaveGroupsIqProtocolEntity
entity.setProps(
node.getChild("leave").getChild("group").getAttributeValue("id")
)
return entity
| gpl-3.0 |
Parlin-Galanodel/scrapy | scrapy/spidermiddlewares/httperror.py | 18 | 1921 | """
HttpError Spider Middleware
See documentation in docs/topics/spider-middleware.rst
"""
import logging
from scrapy.exceptions import IgnoreRequest
logger = logging.getLogger(__name__)
class HttpError(IgnoreRequest):
"""A non-200 response was filtered"""
def __init__(self, response, *args, **kwargs):
self.response = response
super(HttpError, self).__init__(*args, **kwargs)
class HttpErrorMiddleware(object):
@classmethod
def from_crawler(cls, crawler):
return cls(crawler.settings)
def __init__(self, settings):
self.handle_httpstatus_all = settings.getbool('HTTPERROR_ALLOW_ALL')
self.handle_httpstatus_list = settings.getlist('HTTPERROR_ALLOWED_CODES')
def process_spider_input(self, response, spider):
if 200 <= response.status < 300: # common case
return
meta = response.meta
if 'handle_httpstatus_all' in meta:
return
if 'handle_httpstatus_list' in meta:
allowed_statuses = meta['handle_httpstatus_list']
elif self.handle_httpstatus_all:
return
else:
allowed_statuses = getattr(spider, 'handle_httpstatus_list', self.handle_httpstatus_list)
if response.status in allowed_statuses:
return
raise HttpError(response, 'Ignoring non-200 response')
def process_spider_exception(self, response, exception, spider):
if isinstance(exception, HttpError):
spider.crawler.stats.inc_value('httperror/response_ignored_count')
spider.crawler.stats.inc_value(
'httperror/response_ignored_status_count/%s' % response.status
)
logger.info(
"Ignoring response %(response)r: HTTP status code is not handled or not allowed",
{'response': response}, extra={'spider': spider},
)
return []
| bsd-3-clause |
CyanogenMod/android_external_chromium_org | tools/telemetry/telemetry/unittest/simple_mock.py | 17 | 3280 | # Copyright 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A very very simple mock object harness."""
DONT_CARE = ''
class MockFunctionCall(object):
def __init__(self, name):
self.name = name
self.args = tuple()
self.return_value = None
self.when_called_handlers = []
def WithArgs(self, *args):
self.args = args
return self
def WillReturn(self, value):
self.return_value = value
return self
def WhenCalled(self, handler):
self.when_called_handlers.append(handler)
def VerifyEquals(self, got):
if self.name != got.name:
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
if len(self.args) != len(got.args):
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
for i in range(len(self.args)):
self_a = self.args[i]
got_a = got.args[i]
if self_a == DONT_CARE:
continue
if self_a != got_a:
raise Exception('Self %s, got %s' % (repr(self), repr(got)))
def __repr__(self):
def arg_to_text(a):
if a == DONT_CARE:
return '_'
return repr(a)
args_text = ', '.join([arg_to_text(a) for a in self.args])
if self.return_value in (None, DONT_CARE):
return '%s(%s)' % (self.name, args_text)
return '%s(%s)->%s' % (self.name, args_text, repr(self.return_value))
class MockTrace(object):
def __init__(self):
self.expected_calls = []
self.next_call_index = 0
class MockObject(object):
def __init__(self, parent_mock = None):
if parent_mock:
self._trace = parent_mock._trace # pylint: disable=W0212
else:
self._trace = MockTrace()
def __setattr__(self, name, value):
if (not hasattr(self, '_trace') or
hasattr(value, 'is_hook')):
object.__setattr__(self, name, value)
return
assert isinstance(value, MockObject)
object.__setattr__(self, name, value)
def SetAttribute(self, name, value):
setattr(self, name, value)
def ExpectCall(self, func_name, *args):
assert self._trace.next_call_index == 0
if not hasattr(self, func_name):
self._install_hook(func_name)
call = MockFunctionCall(func_name)
self._trace.expected_calls.append(call)
call.WithArgs(*args)
return call
def _install_hook(self, func_name):
def handler(*args, **_):
got_call = MockFunctionCall(
func_name).WithArgs(*args).WillReturn(DONT_CARE)
if self._trace.next_call_index >= len(self._trace.expected_calls):
raise Exception(
'Call to %s was not expected, at end of programmed trace.' %
repr(got_call))
expected_call = self._trace.expected_calls[
self._trace.next_call_index]
expected_call.VerifyEquals(got_call)
self._trace.next_call_index += 1
for h in expected_call.when_called_handlers:
h(*args)
return expected_call.return_value
handler.is_hook = True
setattr(self, func_name, handler)
class MockTimer(object):
def __init__(self):
self._elapsed_time = 0
def Sleep(self, time):
self._elapsed_time += time
def GetTime(self):
return self._elapsed_time
def SetTime(self, time):
self._elapsed_time = time
| bsd-3-clause |
rudisherry666/paigow | mainsite/settings.py | 1 | 6720 | # Django settings for paigow project.
import os
import sys
# The project starts at mainsite/ rather than top-level at the application,
# but we use a lot of things from the paigow/ folder. Create a global for
# the paigow folder as well.
PROJECT_PATH = os.path.realpath(os.path.dirname(__file__)) # mainsite/
(PAIGOW_APP_PATH, DUMMY) = os.path.split(os.path.dirname(__file__))
PAIGOW_PATH = PAIGOW_APP_PATH + "/paigow"
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Rudi Sherry', 'rudisherry666@gmail.com'),
)
MANAGERS = ADMINS
# set up the database. For local development it's easiest to
# use sqlite3, so we do that, and we depend on the developer having
# set up an environment variable on their machine called "LOCAL_DEV"
# which is set to 'true'.
try:
if (bool(os.environ.get('LOCAL_DEV', False))):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': PROJECT_PATH + '/database/paigow.sqlite',
'USER': '', # not needed since we're local, default is always trusted
'PASSWORD': '',
'HOST': '',
}
}
else:
# In heroku (where we deploy on the web), we use postgres; existence
# of the postgres database is set up by previous commands to heroku
# (TBD: make that some sort of automatic script), and the connection
# to it from python is set up by the file 'requirements.txt' which
# includes psycopg2 (the python extension for postgres).
#
# dj_database_url is an egg that uses DATABASE_URL to find the
# correct database, and that is also set up by previous heroku
# commands.
import dj_database_url
DATABASES = {
'default': dj_database_url.config(default='postgres://localhost')
}
except:
# uh-oh, something went wrong but we don't know what.
print "Unexpected error creating DATABASES:", sys.exc_info()
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'America/Los_Angeles'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PROJECT_PATH + "/static/",
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 's1upu83yei)f#39&1473$atc63=80*q==jv*c%n#f03crfm68r'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
# this allows the messages app/framework to get messages into pages
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.messages.context_processors.messages',
)
ROOT_URLCONF = 'mainsite.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'paigow.wsgi.application'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
PAIGOW_PATH + '/templates/',
PROJECT_PATH + '/templates/',
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'paigow',
)
# For testing we get fixtures from here
FIXTURE_DIRS = (
PAIGOW_PATH + '/fixtures/',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| apache-2.0 |
Sohamlad7/kernel-msm | tools/perf/scripts/python/failed-syscalls-by-pid.py | 11180 | 2058 | # failed system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide failed system call totals, broken down by pid.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s syscall-counts-by-pid.py [comm|pid]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_error_totals()
def raw_syscalls__sys_exit(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, ret):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
if ret < 0:
try:
syscalls[common_comm][common_pid][id][ret] += 1
except TypeError:
syscalls[common_comm][common_pid][id][ret] = 1
def print_error_totals():
if for_comm is not None:
print "\nsyscall errors for %s:\n\n" % (for_comm),
else:
print "\nsyscall errors:\n\n",
print "%-30s %10s\n" % ("comm [pid]", "count"),
print "%-30s %10s\n" % ("------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id in id_keys:
print " syscall: %-16s\n" % syscall_name(id),
ret_keys = syscalls[comm][pid][id].keys()
for ret, val in sorted(syscalls[comm][pid][id].iteritems(), key = lambda(k, v): (v, k), reverse = True):
print " err = %-20s %10d\n" % (strerror(ret), val),
| gpl-2.0 |
nkgilley/home-assistant | homeassistant/components/hunterdouglas_powerview/cover.py | 6 | 9380 | """Support for hunter douglas shades."""
import asyncio
import logging
from aiopvapi.helpers.constants import ATTR_POSITION1, ATTR_POSITION_DATA
from aiopvapi.resources.shade import (
ATTR_POSKIND1,
MAX_POSITION,
MIN_POSITION,
factory as PvShade,
)
import async_timeout
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_SHADE,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_call_later
from .const import (
COORDINATOR,
DEVICE_INFO,
DEVICE_MODEL,
DOMAIN,
LEGACY_DEVICE_MODEL,
PV_API,
PV_ROOM_DATA,
PV_SHADE_DATA,
ROOM_ID_IN_SHADE,
ROOM_NAME_UNICODE,
SHADE_RESPONSE,
STATE_ATTRIBUTE_ROOM_NAME,
)
from .entity import ShadeEntity
_LOGGER = logging.getLogger(__name__)
# Estimated time it takes to complete a transition
# from one state to another
TRANSITION_COMPLETE_DURATION = 30
PARALLEL_UPDATES = 1
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the hunter douglas shades."""
pv_data = hass.data[DOMAIN][entry.entry_id]
room_data = pv_data[PV_ROOM_DATA]
shade_data = pv_data[PV_SHADE_DATA]
pv_request = pv_data[PV_API]
coordinator = pv_data[COORDINATOR]
device_info = pv_data[DEVICE_INFO]
entities = []
for raw_shade in shade_data.values():
# The shade may be out of sync with the hub
# so we force a refresh when we add it if
# possible
shade = PvShade(raw_shade, pv_request)
name_before_refresh = shade.name
try:
async with async_timeout.timeout(1):
await shade.refresh()
except asyncio.TimeoutError:
# Forced refresh is not required for setup
pass
if ATTR_POSITION_DATA not in shade.raw_data:
_LOGGER.info(
"The %s shade was skipped because it is missing position data",
name_before_refresh,
)
continue
entities.append(
PowerViewShade(
shade, name_before_refresh, room_data, coordinator, device_info
)
)
async_add_entities(entities)
def hd_position_to_hass(hd_position):
"""Convert hunter douglas position to hass position."""
return round((hd_position / MAX_POSITION) * 100)
def hass_position_to_hd(hass_positon):
"""Convert hass position to hunter douglas position."""
return int(hass_positon / 100 * MAX_POSITION)
class PowerViewShade(ShadeEntity, CoverEntity):
"""Representation of a powerview shade."""
def __init__(self, shade, name, room_data, coordinator, device_info):
"""Initialize the shade."""
room_id = shade.raw_data.get(ROOM_ID_IN_SHADE)
super().__init__(coordinator, device_info, shade, name)
self._shade = shade
self._device_info = device_info
self._is_opening = False
self._is_closing = False
self._last_action_timestamp = 0
self._scheduled_transition_update = None
self._room_name = room_data.get(room_id, {}).get(ROOM_NAME_UNICODE, "")
self._current_cover_position = MIN_POSITION
self._coordinator = coordinator
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {STATE_ATTRIBUTE_ROOM_NAME: self._room_name}
@property
def supported_features(self):
"""Flag supported features."""
supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
if self._device_info[DEVICE_MODEL] != LEGACY_DEVICE_MODEL:
supported_features |= SUPPORT_STOP
return supported_features
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._current_cover_position == MIN_POSITION
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._is_opening
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._is_closing
@property
def current_cover_position(self):
"""Return the current position of cover."""
return hd_position_to_hass(self._current_cover_position)
@property
def device_class(self):
"""Return device class."""
return DEVICE_CLASS_SHADE
@property
def name(self):
"""Return the name of the shade."""
return self._shade_name
async def async_close_cover(self, **kwargs):
"""Close the cover."""
await self._async_move(0)
async def async_open_cover(self, **kwargs):
"""Open the cover."""
await self._async_move(100)
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
# Cancel any previous updates
self._async_cancel_scheduled_transition_update()
self._async_update_from_command(await self._shade.stop())
await self._async_force_refresh_state()
async def async_set_cover_position(self, **kwargs):
"""Move the shade to a specific position."""
if ATTR_POSITION not in kwargs:
return
await self._async_move(kwargs[ATTR_POSITION])
async def _async_move(self, target_hass_position):
"""Move the shade to a position."""
current_hass_position = hd_position_to_hass(self._current_cover_position)
steps_to_move = abs(current_hass_position - target_hass_position)
if not steps_to_move:
return
self._async_schedule_update_for_transition(steps_to_move)
self._async_update_from_command(
await self._shade.move(
{
ATTR_POSITION1: hass_position_to_hd(target_hass_position),
ATTR_POSKIND1: 1,
}
)
)
self._is_opening = False
self._is_closing = False
if target_hass_position > current_hass_position:
self._is_opening = True
elif target_hass_position < current_hass_position:
self._is_closing = True
self.async_write_ha_state()
@callback
def _async_update_from_command(self, raw_data):
"""Update the shade state after a command."""
if not raw_data or SHADE_RESPONSE not in raw_data:
return
self._async_process_new_shade_data(raw_data[SHADE_RESPONSE])
@callback
def _async_process_new_shade_data(self, data):
"""Process new data from an update."""
self._shade.raw_data = data
self._async_update_current_cover_position()
@callback
def _async_update_current_cover_position(self):
"""Update the current cover position from the data."""
_LOGGER.debug("Raw data update: %s", self._shade.raw_data)
position_data = self._shade.raw_data[ATTR_POSITION_DATA]
if ATTR_POSITION1 in position_data:
self._current_cover_position = position_data[ATTR_POSITION1]
self._is_opening = False
self._is_closing = False
@callback
def _async_cancel_scheduled_transition_update(self):
"""Cancel any previous updates."""
if not self._scheduled_transition_update:
return
self._scheduled_transition_update()
self._scheduled_transition_update = None
@callback
def _async_schedule_update_for_transition(self, steps):
self.async_write_ha_state()
# Cancel any previous updates
self._async_cancel_scheduled_transition_update()
est_time_to_complete_transition = 1 + int(
TRANSITION_COMPLETE_DURATION * (steps / 100)
)
_LOGGER.debug(
"Estimated time to complete transition of %s steps for %s: %s",
steps,
self.name,
est_time_to_complete_transition,
)
# Schedule an update for when we expect the transition
# to be completed.
self._scheduled_transition_update = async_call_later(
self.hass,
est_time_to_complete_transition,
self._async_complete_schedule_update,
)
async def _async_complete_schedule_update(self, _):
"""Update status of the cover."""
_LOGGER.debug("Processing scheduled update for %s", self.name)
self._scheduled_transition_update = None
await self._async_force_refresh_state()
async def _async_force_refresh_state(self):
"""Refresh the cover state and force the device cache to be bypassed."""
await self._shade.refresh()
self._async_update_current_cover_position()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""When entity is added to hass."""
self._async_update_current_cover_position()
self.async_on_remove(
self._coordinator.async_add_listener(self._async_update_shade_from_group)
)
@callback
def _async_update_shade_from_group(self):
"""Update with new data from the coordinator."""
if self._scheduled_transition_update:
# If a transition in in progress
# the data will be wrong
return
self._async_process_new_shade_data(self._coordinator.data[self._shade.id])
self.async_write_ha_state()
| apache-2.0 |
ethanfrey/aiocouchdb | aiocouchdb/v1/tests/test_server.py | 2 | 9931 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2014-2015 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
#
import asyncio
import aiocouchdb.client
import aiocouchdb.feeds
import aiocouchdb.v1.config
import aiocouchdb.v1.server
import aiocouchdb.v1.session
from . import utils
class ServerTestCase(utils.ServerTestCase):
def test_init_with_url(self):
self.assertIsInstance(self.server.resource, aiocouchdb.client.Resource)
def test_init_with_resource(self):
res = aiocouchdb.client.Resource(self.url)
server = aiocouchdb.v1.server.Server(res)
self.assertIsInstance(server.resource, aiocouchdb.client.Resource)
self.assertEqual(self.url, self.server.resource.url)
def test_info(self):
with self.response(data=b'{}'):
result = yield from self.server.info()
self.assert_request_called_with('GET')
self.assertIsInstance(result, dict)
def test_active_tasks(self):
with self.response(data=b'[]'):
result = yield from self.server.active_tasks()
self.assert_request_called_with('GET', '_active_tasks')
self.assertIsInstance(result, list)
def test_all_dbs(self):
with self.response(data=b'[]'):
result = yield from self.server.all_dbs()
self.assert_request_called_with('GET', '_all_dbs')
self.assertIsInstance(result, list)
def test_authdb(self):
db = self.server.authdb
self.assertFalse(self.request.called)
self.assertIsInstance(db, self.server.authdb_class)
def test_authdb_custom_class(self):
class CustomDatabase(object):
def __init__(self, thing, **kwargs):
self.resource = thing
server = aiocouchdb.v1.server.Server(authdb_class=CustomDatabase)
db = server.authdb
self.assertFalse(self.request.called)
self.assertIsInstance(db, server.authdb_class)
def test_authdb_name(self):
self.assertEqual(self.server.authdb.name, '_users')
server = aiocouchdb.v1.server.Server(authdb_name='_authdb')
self.assertEqual(server.authdb.name, '_authdb')
def test_config(self):
self.assertIsInstance(self.server.config,
aiocouchdb.v1.config.ServerConfig)
def test_custom_config(self):
class CustomConfig(object):
def __init__(self, thing):
self.resource = thing
server = aiocouchdb.v1.server.Server(config_class=CustomConfig)
self.assertIsInstance(server.config, CustomConfig)
def test_database(self):
result = yield from self.server.db('db')
self.assert_request_called_with('HEAD', 'db')
self.assertIsInstance(result, self.server.database_class)
def test_database_custom_class(self):
class CustomDatabase(object):
def __init__(self, thing, **kwargs):
self.resource = thing
server = aiocouchdb.v1.server.Server(self.url,
database_class=CustomDatabase)
result = yield from server.db('db')
self.assert_request_called_with('HEAD', 'db')
self.assertIsInstance(result, CustomDatabase)
self.assertIsInstance(result.resource, aiocouchdb.client.Resource)
def test_database_get_item(self):
db = self.server['db']
with self.assertRaises(AssertionError):
self.assert_request_called_with('HEAD', 'db')
self.assertIsInstance(db, self.server.database_class)
def trigger_db_update(self, db):
@asyncio.coroutine
def task():
yield from asyncio.sleep(0.1)
yield from db[utils.uuid()].update({})
asyncio.Task(task())
@utils.using_database()
def test_db_updates(self, db):
self.trigger_db_update(db)
with self.response(data=('{"db_name": "%s"}' % db.name).encode()):
event = yield from self.server.db_updates()
self.assert_request_called_with('GET', '_db_updates')
self.assertIsInstance(event, dict)
self.assertEqual(event['db_name'], db.name, event)
@utils.using_database()
def test_db_updates_feed_continuous(self, db):
self.trigger_db_update(db)
with self.response(data=('{"db_name": "%s"}' % db.name).encode()):
feed = yield from self.server.db_updates(feed='continuous',
timeout=1000,
heartbeat=False)
self.assert_request_called_with('GET', '_db_updates',
params={'feed': 'continuous',
'timeout': 1000,
'heartbeat': False})
self.assertIsInstance(feed, aiocouchdb.feeds.JsonFeed)
while True:
event = yield from feed.next()
if event is None:
break
self.assertEqual(event['db_name'], db.name, event)
@utils.using_database()
def test_db_updates_feed_eventsource(self, db):
self.trigger_db_update(db)
with self.response(data=('data: {"db_name": "%s"}' % db.name).encode()):
feed = yield from self.server.db_updates(feed='eventsource',
timeout=1000,
heartbeat=False)
self.assert_request_called_with('GET', '_db_updates',
params={'feed': 'eventsource',
'timeout': 1000,
'heartbeat': False})
self.assertIsInstance(feed, aiocouchdb.feeds.EventSourceFeed)
while True:
event = yield from feed.next()
if event is None:
break
self.assertEqual(event['data']['db_name'], db.name, event)
def test_log(self):
result = yield from self.server.log()
self.assert_request_called_with('GET', '_log')
self.assertIsInstance(result, str)
@utils.using_database('source')
@utils.using_database('target')
def test_replicate(self, source, target):
with self.response(data=b'[]'):
yield from utils.populate_database(source, 10)
with self.response(data=b'{"history": [{"docs_written": 10}]}'):
info = yield from self.server.replicate(source.name, target.name)
self.assert_request_called_with(
'POST', '_replicate', data={'source': source.name,
'target': target.name})
self.assertEqual(info['history'][0]['docs_written'], 10)
@utils.run_for('mock')
def test_replicate_kwargs(self):
all_kwargs = {
'cancel': True,
'continuous': True,
'create_target': False,
'doc_ids': ['foo', 'bar', 'baz'],
'filter': '_design/filter',
'proxy': 'http://localhost:8080',
'query_params': {'test': 'passed'},
'since_seq': 0,
'checkpoint_interval': 5000,
'connection_timeout': 60000,
'http_connections': 10,
'retries_per_request': 10,
'socket_options': '[]',
'use_checkpoints': True,
'worker_batch_size': 200,
'worker_processes': 4
}
for key, value in all_kwargs.items():
yield from self.server.replicate('source', 'target',
**{key: value})
data = {'source': 'source', 'target': 'target', key: value}
self.assert_request_called_with('POST', '_replicate', data=data)
@utils.run_for('mock')
def test_restart(self):
yield from self.server.restart()
self.assert_request_called_with('POST', '_restart')
def test_session(self):
self.assertIsInstance(self.server.session,
aiocouchdb.v1.session.Session)
def test_custom_session(self):
class CustomSession(object):
def __init__(self, thing):
self.resource = thing
server = aiocouchdb.v1.server.Server(session_class=CustomSession)
self.assertIsInstance(server.session, CustomSession)
def test_stats(self):
yield from self.server.stats()
self.assert_request_called_with('GET', '_stats')
def test_stats_flush(self):
yield from self.server.stats(flush=True)
self.assert_request_called_with('GET', '_stats', params={'flush': True})
def test_stats_range(self):
yield from self.server.stats(range=60)
self.assert_request_called_with('GET', '_stats', params={'range': 60})
def test_stats_single_metric(self):
yield from self.server.stats('httpd/requests')
self.assert_request_called_with('GET', '_stats', 'httpd', 'requests')
def test_stats_invalid_metric(self):
with self.assertRaises(ValueError):
yield from self.server.stats('httpd')
def test_uuids(self):
with self.response(data=b'{"uuids": ["..."]}'):
result = yield from self.server.uuids()
self.assert_request_called_with('GET', '_uuids')
self.assertIsInstance(result, list)
self.assertEqual(len(result), 1)
def test_uuids_count(self):
with self.response(data=b'{"uuids": ["...", "..."]}'):
result = yield from self.server.uuids(count=2)
self.assert_request_called_with('GET', '_uuids',
params={'count': 2})
self.assertIsInstance(result, list)
self.assertEqual(len(result), 2)
| bsd-2-clause |
gameduell/duell | bin/win/python2.7.9/Lib/site-packages/pip/_vendor/requests/packages/chardet/sbcsgroupprober.py | 2936 | 3291 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .sbcharsetprober import SingleByteCharSetProber
from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel,
Latin5CyrillicModel, MacCyrillicModel,
Ibm866Model, Ibm855Model)
from .langgreekmodel import Latin7GreekModel, Win1253GreekModel
from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel
from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel
from .langthaimodel import TIS620ThaiModel
from .langhebrewmodel import Win1255HebrewModel
from .hebrewprober import HebrewProber
class SBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
SingleByteCharSetProber(Win1251CyrillicModel),
SingleByteCharSetProber(Koi8rModel),
SingleByteCharSetProber(Latin5CyrillicModel),
SingleByteCharSetProber(MacCyrillicModel),
SingleByteCharSetProber(Ibm866Model),
SingleByteCharSetProber(Ibm855Model),
SingleByteCharSetProber(Latin7GreekModel),
SingleByteCharSetProber(Win1253GreekModel),
SingleByteCharSetProber(Latin5BulgarianModel),
SingleByteCharSetProber(Win1251BulgarianModel),
SingleByteCharSetProber(Latin2HungarianModel),
SingleByteCharSetProber(Win1250HungarianModel),
SingleByteCharSetProber(TIS620ThaiModel),
]
hebrewProber = HebrewProber()
logicalHebrewProber = SingleByteCharSetProber(Win1255HebrewModel,
False, hebrewProber)
visualHebrewProber = SingleByteCharSetProber(Win1255HebrewModel, True,
hebrewProber)
hebrewProber.set_model_probers(logicalHebrewProber, visualHebrewProber)
self._mProbers.extend([hebrewProber, logicalHebrewProber,
visualHebrewProber])
self.reset()
| bsd-2-clause |
edx/lettuce | tests/integration/lib/Django-1.3/tests/regressiontests/forms/localflavor/ch.py | 89 | 2604 | from django.contrib.localflavor.ch.forms import (CHZipCodeField,
CHPhoneNumberField, CHIdentityCardNumberField, CHStateSelect)
from utils import LocalFlavorTestCase
class CHLocalFlavorTests(LocalFlavorTestCase):
def test_CHStateSelect(self):
f = CHStateSelect()
out = u'''<select name="state">
<option value="AG" selected="selected">Aargau</option>
<option value="AI">Appenzell Innerrhoden</option>
<option value="AR">Appenzell Ausserrhoden</option>
<option value="BS">Basel-Stadt</option>
<option value="BL">Basel-Land</option>
<option value="BE">Berne</option>
<option value="FR">Fribourg</option>
<option value="GE">Geneva</option>
<option value="GL">Glarus</option>
<option value="GR">Graubuenden</option>
<option value="JU">Jura</option>
<option value="LU">Lucerne</option>
<option value="NE">Neuchatel</option>
<option value="NW">Nidwalden</option>
<option value="OW">Obwalden</option>
<option value="SH">Schaffhausen</option>
<option value="SZ">Schwyz</option>
<option value="SO">Solothurn</option>
<option value="SG">St. Gallen</option>
<option value="TG">Thurgau</option>
<option value="TI">Ticino</option>
<option value="UR">Uri</option>
<option value="VS">Valais</option>
<option value="VD">Vaud</option>
<option value="ZG">Zug</option>
<option value="ZH">Zurich</option>
</select>'''
self.assertEqual(f.render('state', 'AG'), out)
def test_CHZipCodeField(self):
error_format = [u'Enter a zip code in the format XXXX.']
valid = {
'1234': '1234',
'0000': '0000',
}
invalid = {
'800x': error_format,
'80 00': error_format,
}
self.assertFieldOutput(CHZipCodeField, valid, invalid)
def test_CHPhoneNumberField(self):
error_format = [u'Phone numbers must be in 0XX XXX XX XX format.']
valid = {
'012 345 67 89': '012 345 67 89',
'0123456789': '012 345 67 89',
}
invalid = {
'01234567890': error_format,
'1234567890': error_format,
}
self.assertFieldOutput(CHPhoneNumberField, valid, invalid)
def test_CHIdentityCardNumberField(self):
error_format = [u'Enter a valid Swiss identity or passport card number in X1234567<0 or 1234567890 format.']
valid = {
'C1234567<0': 'C1234567<0',
'2123456700': '2123456700',
}
invalid = {
'C1234567<1': error_format,
'2123456701': error_format,
}
self.assertFieldOutput(CHIdentityCardNumberField, valid, invalid)
| gpl-3.0 |
wzhongyuan/BigDL | pyspark/test/dev/pep8-1.7.0.py | 15 | 82481 | #!/usr/bin/env python
# pep8.py - Check Python source code formatting, according to PEP 8
# Copyright (C) 2006-2009 Johann C. Rocholl <johann@rocholl.net>
# Copyright (C) 2009-2014 Florent Xicluna <florent.xicluna@gmail.com>
# Copyright (C) 2014-2016 Ian Lee <ianlee1521@gmail.com>
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
r"""
Check Python source code formatting, according to PEP 8.
For usage and a list of options, try this:
$ python pep8.py -h
This program and its regression test suite live here:
https://github.com/pycqa/pep8
Groups of errors and warnings:
E errors
W warnings
100 indentation
200 whitespace
300 blank lines
400 imports
500 line length
600 deprecation
700 statements
900 syntax error
"""
from __future__ import with_statement
import os
import sys
import re
import time
import inspect
import keyword
import tokenize
from optparse import OptionParser
from fnmatch import fnmatch
try:
from configparser import RawConfigParser
from io import TextIOWrapper
except ImportError:
from ConfigParser import RawConfigParser
__version__ = '1.7.0'
DEFAULT_EXCLUDE = '.svn,CVS,.bzr,.hg,.git,__pycache__,.tox'
DEFAULT_IGNORE = 'E121,E123,E126,E226,E24,E704'
try:
if sys.platform == 'win32':
USER_CONFIG = os.path.expanduser(r'~\.pep8')
else:
USER_CONFIG = os.path.join(
os.getenv('XDG_CONFIG_HOME') or os.path.expanduser('~/.config'),
'pep8'
)
except ImportError:
USER_CONFIG = None
PROJECT_CONFIG = ('setup.cfg', 'tox.ini', '.pep8')
TESTSUITE_PATH = os.path.join(os.path.dirname(__file__), 'testsuite')
MAX_LINE_LENGTH = 100
REPORT_FORMAT = {
'default': '%(path)s:%(row)d:%(col)d: %(code)s %(text)s',
'pylint': '%(path)s:%(row)d: [%(code)s] %(text)s',
}
PyCF_ONLY_AST = 1024
SINGLETONS = frozenset(['False', 'None', 'True'])
KEYWORDS = frozenset(keyword.kwlist + ['print']) - SINGLETONS
UNARY_OPERATORS = frozenset(['>>', '**', '*', '+', '-'])
ARITHMETIC_OP = frozenset(['**', '*', '/', '//', '+', '-'])
WS_OPTIONAL_OPERATORS = ARITHMETIC_OP.union(['^', '&', '|', '<<', '>>', '%'])
WS_NEEDED_OPERATORS = frozenset([
'**=', '*=', '/=', '//=', '+=', '-=', '!=', '<>', '<', '>',
'%=', '^=', '&=', '|=', '==', '<=', '>=', '<<=', '>>=', '='])
WHITESPACE = frozenset(' \t')
NEWLINE = frozenset([tokenize.NL, tokenize.NEWLINE])
SKIP_TOKENS = NEWLINE.union([tokenize.INDENT, tokenize.DEDENT])
# ERRORTOKEN is triggered by backticks in Python 3
SKIP_COMMENTS = SKIP_TOKENS.union([tokenize.COMMENT, tokenize.ERRORTOKEN])
BENCHMARK_KEYS = ['directories', 'files', 'logical lines', 'physical lines']
INDENT_REGEX = re.compile(r'([ \t]*)')
RAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,')
RERAISE_COMMA_REGEX = re.compile(r'raise\s+\w+\s*,.*,\s*\w+\s*$')
ERRORCODE_REGEX = re.compile(r'\b[A-Z]\d{3}\b')
DOCSTRING_REGEX = re.compile(r'u?r?["\']')
EXTRANEOUS_WHITESPACE_REGEX = re.compile(r'[[({] | []}),;:]')
WHITESPACE_AFTER_COMMA_REGEX = re.compile(r'[,;:]\s*(?: |\t)')
COMPARE_SINGLETON_REGEX = re.compile(r'(\bNone|\bFalse|\bTrue)?\s*([=!]=)'
r'\s*(?(1)|(None|False|True))\b')
COMPARE_NEGATIVE_REGEX = re.compile(r'\b(not)\s+[^][)(}{ ]+\s+(in|is)\s')
COMPARE_TYPE_REGEX = re.compile(r'(?:[=!]=|is(?:\s+not)?)\s*type(?:s.\w+Type'
r'|\s*\(\s*([^)]*[^ )])\s*\))')
KEYWORD_REGEX = re.compile(r'(\s*)\b(?:%s)\b(\s*)' % r'|'.join(KEYWORDS))
OPERATOR_REGEX = re.compile(r'(?:[^,\s])(\s*)(?:[-+*/|!<=>%&^]+)(\s*)')
LAMBDA_REGEX = re.compile(r'\blambda\b')
HUNK_REGEX = re.compile(r'^@@ -\d+(?:,\d+)? \+(\d+)(?:,(\d+))? @@.*$')
# Work around Python < 2.6 behaviour, which does not generate NL after
# a comment which is on a line by itself.
COMMENT_WITH_NL = tokenize.generate_tokens(['#\n'].pop).send(None)[1] == '#\n'
##############################################################################
# Plugins (check functions) for physical lines
##############################################################################
def tabs_or_spaces(physical_line, indent_char):
r"""Never mix tabs and spaces.
The most popular way of indenting Python is with spaces only. The
second-most popular way is with tabs only. Code indented with a mixture
of tabs and spaces should be converted to using spaces exclusively. When
invoking the Python command line interpreter with the -t option, it issues
warnings about code that illegally mixes tabs and spaces. When using -tt
these warnings become errors. These options are highly recommended!
Okay: if a == 0:\n a = 1\n b = 1
E101: if a == 0:\n a = 1\n\tb = 1
"""
indent = INDENT_REGEX.match(physical_line).group(1)
for offset, char in enumerate(indent):
if char != indent_char:
return offset, "E101 indentation contains mixed spaces and tabs"
def tabs_obsolete(physical_line):
r"""For new projects, spaces-only are strongly recommended over tabs.
Okay: if True:\n return
W191: if True:\n\treturn
"""
indent = INDENT_REGEX.match(physical_line).group(1)
if '\t' in indent:
return indent.index('\t'), "W191 indentation contains tabs"
def trailing_whitespace(physical_line):
r"""Trailing whitespace is superfluous.
The warning returned varies on whether the line itself is blank, for easier
filtering for those who want to indent their blank lines.
Okay: spam(1)\n#
W291: spam(1) \n#
W293: class Foo(object):\n \n bang = 12
"""
physical_line = physical_line.rstrip('\n') # chr(10), newline
physical_line = physical_line.rstrip('\r') # chr(13), carriage return
physical_line = physical_line.rstrip('\x0c') # chr(12), form feed, ^L
stripped = physical_line.rstrip(' \t\v')
if physical_line != stripped:
if stripped:
return len(stripped), "W291 trailing whitespace"
else:
return 0, "W293 blank line contains whitespace"
def trailing_blank_lines(physical_line, lines, line_number, total_lines):
r"""Trailing blank lines are superfluous.
Okay: spam(1)
W391: spam(1)\n
However the last line should end with a new line (warning W292).
"""
if line_number == total_lines:
stripped_last_line = physical_line.rstrip()
if not stripped_last_line:
return 0, "W391 blank line at end of file"
if stripped_last_line == physical_line:
return len(physical_line), "W292 no newline at end of file"
def maximum_line_length(physical_line, max_line_length, multiline):
r"""Limit all lines to a maximum of 79 characters.
There are still many devices around that are limited to 80 character
lines; plus, limiting windows to 80 characters makes it possible to have
several windows side-by-side. The default wrapping on such devices looks
ugly. Therefore, please limit all lines to a maximum of 79 characters.
For flowing long blocks of text (docstrings or comments), limiting the
length to 72 characters is recommended.
Reports error E501.
"""
line = physical_line.rstrip()
length = len(line)
if length > max_line_length and not noqa(line):
# Special case for long URLs in multi-line docstrings or comments,
# but still report the error when the 72 first chars are whitespaces.
chunks = line.split()
if ((len(chunks) == 1 and multiline) or
(len(chunks) == 2 and chunks[0] == '#')) and \
len(line) - len(chunks[-1]) < max_line_length - 7:
return
if hasattr(line, 'decode'): # Python 2
# The line could contain multi-byte characters
try:
length = len(line.decode('utf-8'))
except UnicodeError:
pass
if length > max_line_length:
return (max_line_length, "E501 line too long "
"(%d > %d characters)" % (length, max_line_length))
##############################################################################
# Plugins (check functions) for logical lines
##############################################################################
def blank_lines(logical_line, blank_lines, indent_level, line_number,
blank_before, previous_logical, previous_indent_level):
r"""Separate top-level function and class definitions with two blank lines.
Method definitions inside a class are separated by a single blank line.
Extra blank lines may be used (sparingly) to separate groups of related
functions. Blank lines may be omitted between a bunch of related
one-liners (e.g. a set of dummy implementations).
Use blank lines in functions, sparingly, to indicate logical sections.
Okay: def a():\n pass\n\n\ndef b():\n pass
Okay: def a():\n pass\n\n\n# Foo\n# Bar\n\ndef b():\n pass
E301: class Foo:\n b = 0\n def bar():\n pass
E302: def a():\n pass\n\ndef b(n):\n pass
E303: def a():\n pass\n\n\n\ndef b(n):\n pass
E303: def a():\n\n\n\n pass
E304: @decorator\n\ndef a():\n pass
"""
if line_number < 3 and not previous_logical:
return # Don't expect blank lines before the first line
if previous_logical.startswith('@'):
if blank_lines:
yield 0, "E304 blank lines found after function decorator"
elif blank_lines > 2 or (indent_level and blank_lines == 2):
yield 0, "E303 too many blank lines (%d)" % blank_lines
elif logical_line.startswith(('def ', 'class ', '@')):
if indent_level:
if not (blank_before or previous_indent_level < indent_level or
DOCSTRING_REGEX.match(previous_logical)):
yield 0, "E301 expected 1 blank line, found 0"
elif blank_before != 2:
yield 0, "E302 expected 2 blank lines, found %d" % blank_before
def extraneous_whitespace(logical_line):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in these situations:
- Immediately inside parentheses, brackets or braces.
- Immediately before a comma, semicolon, or colon.
Okay: spam(ham[1], {eggs: 2})
E201: spam( ham[1], {eggs: 2})
E201: spam(ham[ 1], {eggs: 2})
E201: spam(ham[1], { eggs: 2})
E202: spam(ham[1], {eggs: 2} )
E202: spam(ham[1 ], {eggs: 2})
E202: spam(ham[1], {eggs: 2 })
E203: if x == 4: print x, y; x, y = y , x
E203: if x == 4: print x, y ; x, y = y, x
E203: if x == 4 : print x, y; x, y = y, x
"""
line = logical_line
for match in EXTRANEOUS_WHITESPACE_REGEX.finditer(line):
text = match.group()
char = text.strip()
found = match.start()
if text == char + ' ':
# assert char in '([{'
yield found + 1, "E201 whitespace after '%s'" % char
elif line[found - 1] != ',':
code = ('E202' if char in '}])' else 'E203') # if char in ',;:'
yield found, "%s whitespace before '%s'" % (code, char)
def whitespace_around_keywords(logical_line):
r"""Avoid extraneous whitespace around keywords.
Okay: True and False
E271: True and False
E272: True and False
E273: True and\tFalse
E274: True\tand False
"""
for match in KEYWORD_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E274 tab before keyword"
elif len(before) > 1:
yield match.start(1), "E272 multiple spaces before keyword"
if '\t' in after:
yield match.start(2), "E273 tab after keyword"
elif len(after) > 1:
yield match.start(2), "E271 multiple spaces after keyword"
def missing_whitespace(logical_line):
r"""Each comma, semicolon or colon should be followed by whitespace.
Okay: [a, b]
Okay: (3,)
Okay: a[1:4]
Okay: a[:4]
Okay: a[1:]
Okay: a[1:4:2]
E231: ['a','b']
E231: foo(bar,baz)
E231: [{'a':'b'}]
"""
line = logical_line
for index in range(len(line) - 1):
char = line[index]
if char in ',;:' and line[index + 1] not in WHITESPACE:
before = line[:index]
if char == ':' and before.count('[') > before.count(']') and \
before.rfind('{') < before.rfind('['):
continue # Slice syntax, no space required
if char == ',' and line[index + 1] == ')':
continue # Allow tuple with only one element: (3,)
yield index, "E231 missing whitespace after '%s'" % char
def indentation(logical_line, previous_logical, indent_char,
indent_level, previous_indent_level):
r"""Use 4 spaces per indentation level.
For really old code that you don't want to mess up, you can continue to
use 8-space tabs.
Okay: a = 1
Okay: if a == 0:\n a = 1
E111: a = 1
E114: # a = 1
Okay: for item in items:\n pass
E112: for item in items:\npass
E115: for item in items:\n# Hi\n pass
Okay: a = 1\nb = 2
E113: a = 1\n b = 2
E116: a = 1\n # b = 2
"""
c = 0 if logical_line else 3
tmpl = "E11%d %s" if logical_line else "E11%d %s (comment)"
if indent_level % 4:
yield 0, tmpl % (1 + c, "indentation is not a multiple of four")
indent_expect = previous_logical.endswith(':')
if indent_expect and indent_level <= previous_indent_level:
yield 0, tmpl % (2 + c, "expected an indented block")
elif not indent_expect and indent_level > previous_indent_level:
yield 0, tmpl % (3 + c, "unexpected indentation")
def continued_indentation(logical_line, tokens, indent_level, hang_closing,
indent_char, noqa, verbose):
r"""Continuation lines indentation.
Continuation lines should align wrapped elements either vertically
using Python's implicit line joining inside parentheses, brackets
and braces, or using a hanging indent.
When using a hanging indent these considerations should be applied:
- there should be no arguments on the first line, and
- further indentation should be used to clearly distinguish itself as a
continuation line.
Okay: a = (\n)
E123: a = (\n )
Okay: a = (\n 42)
E121: a = (\n 42)
E122: a = (\n42)
E123: a = (\n 42\n )
E124: a = (24,\n 42\n)
E125: if (\n b):\n pass
E126: a = (\n 42)
E127: a = (24,\n 42)
E128: a = (24,\n 42)
E129: if (a or\n b):\n pass
E131: a = (\n 42\n 24)
"""
first_row = tokens[0][2][0]
nrows = 1 + tokens[-1][2][0] - first_row
if noqa or nrows == 1:
return
# indent_next tells us whether the next block is indented; assuming
# that it is indented by 4 spaces, then we should not allow 4-space
# indents on the final continuation line; in turn, some other
# indents are allowed to have an extra 4 spaces.
indent_next = logical_line.endswith(':')
row = depth = 0
valid_hangs = (4,) if indent_char != '\t' else (4, 8)
# remember how many brackets were opened on each line
parens = [0] * nrows
# relative indents of physical lines
rel_indent = [0] * nrows
# for each depth, collect a list of opening rows
open_rows = [[0]]
# for each depth, memorize the hanging indentation
hangs = [None]
# visual indents
indent_chances = {}
last_indent = tokens[0][2]
visual_indent = None
last_token_multiline = False
# for each depth, memorize the visual indent column
indent = [last_indent[1]]
if verbose >= 3:
print(">>> " + tokens[0][4].rstrip())
for token_type, text, start, end, line in tokens:
newline = row < start[0] - first_row
if newline:
row = start[0] - first_row
newline = not last_token_multiline and token_type not in NEWLINE
if newline:
# this is the beginning of a continuation line.
last_indent = start
if verbose >= 3:
print("... " + line.rstrip())
# record the initial indent.
rel_indent[row] = expand_indent(line) - indent_level
# identify closing bracket
close_bracket = (token_type == tokenize.OP and text in ']})')
# is the indent relative to an opening bracket line?
for open_row in reversed(open_rows[depth]):
hang = rel_indent[row] - rel_indent[open_row]
hanging_indent = hang in valid_hangs
if hanging_indent:
break
if hangs[depth]:
hanging_indent = (hang == hangs[depth])
# is there any chance of visual indent?
visual_indent = (not close_bracket and hang > 0 and
indent_chances.get(start[1]))
if close_bracket and indent[depth]:
# closing bracket for visual indent
if start[1] != indent[depth]:
yield (start, "E124 closing bracket does not match "
"visual indentation")
elif close_bracket and not hang:
# closing bracket matches indentation of opening bracket's line
if hang_closing:
yield start, "E133 closing bracket is missing indentation"
elif indent[depth] and start[1] < indent[depth]:
if visual_indent is not True:
# visual indent is broken
yield (start, "E128 continuation line "
"under-indented for visual indent")
elif hanging_indent or (indent_next and rel_indent[row] == 8):
# hanging indent is verified
if close_bracket and not hang_closing:
yield (start, "E123 closing bracket does not match "
"indentation of opening bracket's line")
hangs[depth] = hang
elif visual_indent is True:
# visual indent is verified
indent[depth] = start[1]
elif visual_indent in (text, str):
# ignore token lined up with matching one from a previous line
pass
else:
# indent is broken
if hang <= 0:
error = "E122", "missing indentation or outdented"
elif indent[depth]:
error = "E127", "over-indented for visual indent"
elif not close_bracket and hangs[depth]:
error = "E131", "unaligned for hanging indent"
else:
hangs[depth] = hang
if hang > 4:
error = "E126", "over-indented for hanging indent"
else:
error = "E121", "under-indented for hanging indent"
yield start, "%s continuation line %s" % error
# look for visual indenting
if (parens[row] and
token_type not in (tokenize.NL, tokenize.COMMENT) and
not indent[depth]):
indent[depth] = start[1]
indent_chances[start[1]] = True
if verbose >= 4:
print("bracket depth %s indent to %s" % (depth, start[1]))
# deal with implicit string concatenation
elif (token_type in (tokenize.STRING, tokenize.COMMENT) or
text in ('u', 'ur', 'b', 'br')):
indent_chances[start[1]] = str
# special case for the "if" statement because len("if (") == 4
elif not indent_chances and not row and not depth and text == 'if':
indent_chances[end[1] + 1] = True
elif text == ':' and line[end[1]:].isspace():
open_rows[depth].append(row)
# keep track of bracket depth
if token_type == tokenize.OP:
if text in '([{':
depth += 1
indent.append(0)
hangs.append(None)
if len(open_rows) == depth:
open_rows.append([])
open_rows[depth].append(row)
parens[row] += 1
if verbose >= 4:
print("bracket depth %s seen, col %s, visual min = %s" %
(depth, start[1], indent[depth]))
elif text in ')]}' and depth > 0:
# parent indents should not be more than this one
prev_indent = indent.pop() or last_indent[1]
hangs.pop()
for d in range(depth):
if indent[d] > prev_indent:
indent[d] = 0
for ind in list(indent_chances):
if ind >= prev_indent:
del indent_chances[ind]
del open_rows[depth + 1:]
depth -= 1
if depth:
indent_chances[indent[depth]] = True
for idx in range(row, -1, -1):
if parens[idx]:
parens[idx] -= 1
break
assert len(indent) == depth + 1
if start[1] not in indent_chances:
# allow to line up tokens
indent_chances[start[1]] = text
last_token_multiline = (start[0] != end[0])
if last_token_multiline:
rel_indent[end[0] - first_row] = rel_indent[row]
if indent_next and expand_indent(line) == indent_level + 4:
pos = (start[0], indent[0] + 4)
if visual_indent:
code = "E129 visually indented line"
else:
code = "E125 continuation line"
yield pos, "%s with same indent as next logical line" % code
def whitespace_before_parameters(logical_line, tokens):
r"""Avoid extraneous whitespace.
Avoid extraneous whitespace in the following situations:
- before the open parenthesis that starts the argument list of a
function call.
- before the open parenthesis that starts an indexing or slicing.
Okay: spam(1)
E211: spam (1)
Okay: dict['key'] = list[index]
E211: dict ['key'] = list[index]
E211: dict['key'] = list [index]
"""
prev_type, prev_text, __, prev_end, __ = tokens[0]
for index in range(1, len(tokens)):
token_type, text, start, end, __ = tokens[index]
if (token_type == tokenize.OP and
text in '([' and
start != prev_end and
(prev_type == tokenize.NAME or prev_text in '}])') and
# Syntax "class A (B):" is allowed, but avoid it
(index < 2 or tokens[index - 2][1] != 'class') and
# Allow "return (a.foo for a in range(5))"
not keyword.iskeyword(prev_text)):
yield prev_end, "E211 whitespace before '%s'" % text
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_operator(logical_line):
r"""Avoid extraneous whitespace around an operator.
Okay: a = 12 + 3
E221: a = 4 + 5
E222: a = 4 + 5
E223: a = 4\t+ 5
E224: a = 4 +\t5
"""
for match in OPERATOR_REGEX.finditer(logical_line):
before, after = match.groups()
if '\t' in before:
yield match.start(1), "E223 tab before operator"
elif len(before) > 1:
yield match.start(1), "E221 multiple spaces before operator"
if '\t' in after:
yield match.start(2), "E224 tab after operator"
elif len(after) > 1:
yield match.start(2), "E222 multiple spaces after operator"
def missing_whitespace_around_operator(logical_line, tokens):
r"""Surround operators with a single space on either side.
- Always surround these binary operators with a single space on
either side: assignment (=), augmented assignment (+=, -= etc.),
comparisons (==, <, >, !=, <=, >=, in, not in, is, is not),
Booleans (and, or, not).
- If operators with different priorities are used, consider adding
whitespace around the operators with the lowest priorities.
Okay: i = i + 1
Okay: submitted += 1
Okay: x = x * 2 - 1
Okay: hypot2 = x * x + y * y
Okay: c = (a + b) * (a - b)
Okay: foo(bar, key='word', *args, **kwargs)
Okay: alpha[:-i]
E225: i=i+1
E225: submitted +=1
E225: x = x /2 - 1
E225: z = x **y
E226: c = (a+b) * (a-b)
E226: hypot2 = x*x + y*y
E227: c = a|b
E228: msg = fmt%(errno, errmsg)
"""
parens = 0
need_space = False
prev_type = tokenize.OP
prev_text = prev_end = None
for token_type, text, start, end, line in tokens:
if token_type in SKIP_COMMENTS:
continue
if text in ('(', 'lambda'):
parens += 1
elif text == ')':
parens -= 1
if need_space:
if start != prev_end:
# Found a (probably) needed space
if need_space is not True and not need_space[1]:
yield (need_space[0],
"E225 missing whitespace around operator")
need_space = False
elif text == '>' and prev_text in ('<', '-'):
# Tolerate the "<>" operator, even if running Python 3
# Deal with Python 3's annotated return value "->"
pass
else:
if need_space is True or need_space[1]:
# A needed trailing space was not found
yield prev_end, "E225 missing whitespace around operator"
elif prev_text != '**':
code, optype = 'E226', 'arithmetic'
if prev_text == '%':
code, optype = 'E228', 'modulo'
elif prev_text not in ARITHMETIC_OP:
code, optype = 'E227', 'bitwise or shift'
yield (need_space[0], "%s missing whitespace "
"around %s operator" % (code, optype))
need_space = False
elif token_type == tokenize.OP and prev_end is not None:
if text == '=' and parens:
# Allow keyword args or defaults: foo(bar=None).
pass
elif text in WS_NEEDED_OPERATORS:
need_space = True
elif text in UNARY_OPERATORS:
# Check if the operator is being used as a binary operator
# Allow unary operators: -123, -x, +1.
# Allow argument unpacking: foo(*args, **kwargs).
if (prev_text in '}])' if prev_type == tokenize.OP
else prev_text not in KEYWORDS):
need_space = None
elif text in WS_OPTIONAL_OPERATORS:
need_space = None
if need_space is None:
# Surrounding space is optional, but ensure that
# trailing space matches opening space
need_space = (prev_end, start != prev_end)
elif need_space and start == prev_end:
# A needed opening space was not found
yield prev_end, "E225 missing whitespace around operator"
need_space = False
prev_type = token_type
prev_text = text
prev_end = end
def whitespace_around_comma(logical_line):
r"""Avoid extraneous whitespace after a comma or a colon.
Note: these checks are disabled by default
Okay: a = (1, 2)
E241: a = (1, 2)
E242: a = (1,\t2)
"""
line = logical_line
for m in WHITESPACE_AFTER_COMMA_REGEX.finditer(line):
found = m.start() + 1
if '\t' in m.group():
yield found, "E242 tab after '%s'" % m.group()[0]
else:
yield found, "E241 multiple spaces after '%s'" % m.group()[0]
def whitespace_around_named_parameter_equals(logical_line, tokens):
r"""Don't use spaces around the '=' sign in function arguments.
Don't use spaces around the '=' sign when used to indicate a
keyword argument or a default parameter value.
Okay: def complex(real, imag=0.0):
Okay: return magic(r=real, i=imag)
Okay: boolean(a == b)
Okay: boolean(a != b)
Okay: boolean(a <= b)
Okay: boolean(a >= b)
Okay: def foo(arg: int = 42):
E251: def complex(real, imag = 0.0):
E251: return magic(r = real, i = imag)
"""
parens = 0
no_space = False
prev_end = None
annotated_func_arg = False
in_def = logical_line.startswith('def')
message = "E251 unexpected spaces around keyword / parameter equals"
for token_type, text, start, end, line in tokens:
if token_type == tokenize.NL:
continue
if no_space:
no_space = False
if start != prev_end:
yield (prev_end, message)
if token_type == tokenize.OP:
if text == '(':
parens += 1
elif text == ')':
parens -= 1
elif in_def and text == ':' and parens == 1:
annotated_func_arg = True
elif parens and text == ',' and parens == 1:
annotated_func_arg = False
elif parens and text == '=' and not annotated_func_arg:
no_space = True
if start != prev_end:
yield (prev_end, message)
if not parens:
annotated_func_arg = False
prev_end = end
def whitespace_before_comment(logical_line, tokens):
r"""Separate inline comments by at least two spaces.
An inline comment is a comment on the same line as a statement. Inline
comments should be separated by at least two spaces from the statement.
They should start with a # and a single space.
Each line of a block comment starts with a # and a single space
(unless it is indented text inside the comment).
Okay: x = x + 1 # Increment x
Okay: x = x + 1 # Increment x
Okay: # Block comment
E261: x = x + 1 # Increment x
E262: x = x + 1 #Increment x
E262: x = x + 1 # Increment x
E265: #Block comment
E266: ### Block comment
"""
prev_end = (0, 0)
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
inline_comment = line[:start[1]].strip()
if inline_comment:
if prev_end[0] == start[0] and start[1] < prev_end[1] + 2:
yield (prev_end,
"E261 at least two spaces before inline comment")
symbol, sp, comment = text.partition(' ')
bad_prefix = symbol not in '#:' and (symbol.lstrip('#')[:1] or '#')
if inline_comment:
if bad_prefix or comment[:1] in WHITESPACE:
yield start, "E262 inline comment should start with '# '"
elif bad_prefix and (bad_prefix != '!' or start[0] > 1):
if bad_prefix != '#':
yield start, "E265 block comment should start with '# '"
elif comment:
yield start, "E266 too many leading '#' for block comment"
elif token_type != tokenize.NL:
prev_end = end
def imports_on_separate_lines(logical_line):
r"""Imports should usually be on separate lines.
Okay: import os\nimport sys
E401: import sys, os
Okay: from subprocess import Popen, PIPE
Okay: from myclas import MyClass
Okay: from foo.bar.yourclass import YourClass
Okay: import myclass
Okay: import foo.bar.yourclass
"""
line = logical_line
if line.startswith('import '):
found = line.find(',')
if -1 < found and ';' not in line[:found]:
yield found, "E401 multiple imports on one line"
def module_imports_on_top_of_file(
logical_line, indent_level, checker_state, noqa):
r"""Imports are always put at the top of the file, just after any module
comments and docstrings, and before module globals and constants.
Okay: import os
Okay: # this is a comment\nimport os
Okay: '''this is a module docstring'''\nimport os
Okay: r'''this is a module docstring'''\nimport os
Okay: try:\n import x\nexcept:\n pass\nelse:\n pass\nimport y
Okay: try:\n import x\nexcept:\n pass\nfinally:\n pass\nimport y
E402: a=1\nimport os
E402: 'One string'\n"Two string"\nimport os
E402: a=1\nfrom sys import x
Okay: if x:\n import os
"""
def is_string_literal(line):
if line[0] in 'uUbB':
line = line[1:]
if line and line[0] in 'rR':
line = line[1:]
return line and (line[0] == '"' or line[0] == "'")
allowed_try_keywords = ('try', 'except', 'else', 'finally')
if indent_level: # Allow imports in conditional statements or functions
return
if not logical_line: # Allow empty lines or comments
return
if noqa:
return
line = logical_line
if line.startswith('import ') or line.startswith('from '):
if checker_state.get('seen_non_imports', False):
yield 0, "E402 module level import not at top of file"
elif any(line.startswith(kw) for kw in allowed_try_keywords):
# Allow try, except, else, finally keywords intermixed with imports in
# order to support conditional importing
return
elif is_string_literal(line):
# The first literal is a docstring, allow it. Otherwise, report error.
if checker_state.get('seen_docstring', False):
checker_state['seen_non_imports'] = True
else:
checker_state['seen_docstring'] = True
else:
checker_state['seen_non_imports'] = True
def compound_statements(logical_line):
r"""Compound statements (on the same line) are generally discouraged.
While sometimes it's okay to put an if/for/while with a small body
on the same line, never do this for multi-clause statements.
Also avoid folding such long lines!
Always use a def statement instead of an assignment statement that
binds a lambda expression directly to a name.
Okay: if foo == 'blah':\n do_blah_thing()
Okay: do_one()
Okay: do_two()
Okay: do_three()
E701: if foo == 'blah': do_blah_thing()
E701: for x in lst: total += x
E701: while t < 10: t = delay()
E701: if foo == 'blah': do_blah_thing()
E701: else: do_non_blah_thing()
E701: try: something()
E701: finally: cleanup()
E701: if foo == 'blah': one(); two(); three()
E702: do_one(); do_two(); do_three()
E703: do_four(); # useless semicolon
E704: def f(x): return 2*x
E731: f = lambda x: 2*x
"""
line = logical_line
last_char = len(line) - 1
found = line.find(':')
while -1 < found < last_char:
before = line[:found]
if ((before.count('{') <= before.count('}') and # {'a': 1} (dict)
before.count('[') <= before.count(']') and # [1:2] (slice)
before.count('(') <= before.count(')'))): # (annotation)
lambda_kw = LAMBDA_REGEX.search(before)
if lambda_kw:
before = line[:lambda_kw.start()].rstrip()
if before[-1:] == '=' and isidentifier(before[:-1].strip()):
yield 0, ("E731 do not assign a lambda expression, use a "
"def")
break
if before.startswith('def '):
yield 0, "E704 multiple statements on one line (def)"
else:
yield found, "E701 multiple statements on one line (colon)"
found = line.find(':', found + 1)
found = line.find(';')
while -1 < found:
if found < last_char:
yield found, "E702 multiple statements on one line (semicolon)"
else:
yield found, "E703 statement ends with a semicolon"
found = line.find(';', found + 1)
def explicit_line_join(logical_line, tokens):
r"""Avoid explicit line join between brackets.
The preferred way of wrapping long lines is by using Python's implied line
continuation inside parentheses, brackets and braces. Long lines can be
broken over multiple lines by wrapping expressions in parentheses. These
should be used in preference to using a backslash for line continuation.
E502: aaa = [123, \\n 123]
E502: aaa = ("bbb " \\n "ccc")
Okay: aaa = [123,\n 123]
Okay: aaa = ("bbb "\n "ccc")
Okay: aaa = "bbb " \\n "ccc"
Okay: aaa = 123 # \\
"""
prev_start = prev_end = parens = 0
comment = False
backslash = None
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
comment = True
if start[0] != prev_start and parens and backslash and not comment:
yield backslash, "E502 the backslash is redundant between brackets"
if end[0] != prev_end:
if line.rstrip('\r\n').endswith('\\'):
backslash = (end[0], len(line.splitlines()[-1]) - 1)
else:
backslash = None
prev_start = prev_end = end[0]
else:
prev_start = start[0]
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in ')]}':
parens -= 1
def break_around_binary_operator(logical_line, tokens):
r"""
Avoid breaks before binary operators.
The preferred place to break around a binary operator is after the
operator, not before it.
W503: (width == 0\n + height == 0)
W503: (width == 0\n and height == 0)
Okay: (width == 0 +\n height == 0)
Okay: foo(\n -x)
Okay: foo(x\n [])
Okay: x = '''\n''' + ''
Okay: foo(x,\n -y)
Okay: foo(x, # comment\n -y)
"""
def is_binary_operator(token_type, text):
# The % character is strictly speaking a binary operator, but the
# common usage seems to be to put it next to the format parameters,
# after a line break.
return ((token_type == tokenize.OP or text in ['and', 'or']) and
text not in "()[]{},:.;@=%")
line_break = False
unary_context = True
for token_type, text, start, end, line in tokens:
if token_type == tokenize.COMMENT:
continue
if ('\n' in text or '\r' in text) and token_type != tokenize.STRING:
line_break = True
else:
if (is_binary_operator(token_type, text) and line_break and
not unary_context):
yield start, "W503 line break before binary operator"
unary_context = text in '([{,;'
line_break = False
def comparison_to_singleton(logical_line, noqa):
r"""Comparison to singletons should use "is" or "is not".
Comparisons to singletons like None should always be done
with "is" or "is not", never the equality operators.
Okay: if arg is not None:
E711: if arg != None:
E711: if None == arg:
E712: if arg == True:
E712: if False == arg:
Also, beware of writing if x when you really mean if x is not None --
e.g. when testing whether a variable or argument that defaults to None was
set to some other value. The other value might have a type (such as a
container) that could be false in a boolean context!
"""
match = not noqa and COMPARE_SINGLETON_REGEX.search(logical_line)
if match:
singleton = match.group(1) or match.group(3)
same = (match.group(2) == '==')
msg = "'if cond is %s:'" % (('' if same else 'not ') + singleton)
if singleton in ('None',):
code = 'E711'
else:
code = 'E712'
nonzero = ((singleton == 'True' and same) or
(singleton == 'False' and not same))
msg += " or 'if %scond:'" % ('' if nonzero else 'not ')
yield match.start(2), ("%s comparison to %s should be %s" %
(code, singleton, msg))
def comparison_negative(logical_line):
r"""Negative comparison should be done using "not in" and "is not".
Okay: if x not in y:\n pass
Okay: assert (X in Y or X is Z)
Okay: if not (X in Y):\n pass
Okay: zz = x is not y
E713: Z = not X in Y
E713: if not X.B in Y:\n pass
E714: if not X is Y:\n pass
E714: Z = not X.B is Y
"""
match = COMPARE_NEGATIVE_REGEX.search(logical_line)
if match:
pos = match.start(1)
if match.group(2) == 'in':
yield pos, "E713 test for membership should be 'not in'"
else:
yield pos, "E714 test for object identity should be 'is not'"
def comparison_type(logical_line, noqa):
r"""Object type comparisons should always use isinstance().
Do not compare types directly.
Okay: if isinstance(obj, int):
E721: if type(obj) is type(1):
When checking if an object is a string, keep in mind that it might be a
unicode string too! In Python 2.3, str and unicode have a common base
class, basestring, so you can do:
Okay: if isinstance(obj, basestring):
Okay: if type(a1) is type(b1):
"""
match = COMPARE_TYPE_REGEX.search(logical_line)
if match and not noqa:
inst = match.group(1)
if inst and isidentifier(inst) and inst not in SINGLETONS:
return # Allow comparison for types which are not obvious
yield match.start(), "E721 do not compare types, use 'isinstance()'"
def python_3000_has_key(logical_line, noqa):
r"""The {}.has_key() method is removed in Python 3: use the 'in' operator.
Okay: if "alph" in d:\n print d["alph"]
W601: assert d.has_key('alph')
"""
pos = logical_line.find('.has_key(')
if pos > -1 and not noqa:
yield pos, "W601 .has_key() is deprecated, use 'in'"
def python_3000_raise_comma(logical_line):
r"""When raising an exception, use "raise ValueError('message')".
The older form is removed in Python 3.
Okay: raise DummyError("Message")
W602: raise DummyError, "Message"
"""
match = RAISE_COMMA_REGEX.match(logical_line)
if match and not RERAISE_COMMA_REGEX.match(logical_line):
yield match.end() - 1, "W602 deprecated form of raising exception"
def python_3000_not_equal(logical_line):
r"""New code should always use != instead of <>.
The older syntax is removed in Python 3.
Okay: if a != 'no':
W603: if a <> 'no':
"""
pos = logical_line.find('<>')
if pos > -1:
yield pos, "W603 '<>' is deprecated, use '!='"
def python_3000_backticks(logical_line):
r"""Backticks are removed in Python 3: use repr() instead.
Okay: val = repr(1 + 2)
W604: val = `1 + 2`
"""
pos = logical_line.find('`')
if pos > -1:
yield pos, "W604 backticks are deprecated, use 'repr()'"
##############################################################################
# Helper functions
##############################################################################
if sys.version_info < (3,):
# Python 2: implicit encoding.
def readlines(filename):
"""Read the source code."""
with open(filename, 'rU') as f:
return f.readlines()
isidentifier = re.compile(r'[a-zA-Z_]\w*$').match
stdin_get_value = sys.stdin.read
else:
# Python 3
def readlines(filename):
"""Read the source code."""
try:
with open(filename, 'rb') as f:
(coding, lines) = tokenize.detect_encoding(f.readline)
f = TextIOWrapper(f, coding, line_buffering=True)
return [l.decode(coding) for l in lines] + f.readlines()
except (LookupError, SyntaxError, UnicodeError):
# Fall back if file encoding is improperly declared
with open(filename, encoding='latin-1') as f:
return f.readlines()
isidentifier = str.isidentifier
def stdin_get_value():
return TextIOWrapper(sys.stdin.buffer, errors='ignore').read()
noqa = re.compile(r'# no(?:qa|pep8)\b', re.I).search
def expand_indent(line):
r"""Return the amount of indentation.
Tabs are expanded to the next multiple of 8.
>>> expand_indent(' ')
4
>>> expand_indent('\t')
8
>>> expand_indent(' \t')
8
>>> expand_indent(' \t')
16
"""
if '\t' not in line:
return len(line) - len(line.lstrip())
result = 0
for char in line:
if char == '\t':
result = result // 8 * 8 + 8
elif char == ' ':
result += 1
else:
break
return result
def mute_string(text):
"""Replace contents with 'xxx' to prevent syntax matching.
>>> mute_string('"abc"')
'"xxx"'
>>> mute_string("'''abc'''")
"'''xxx'''"
>>> mute_string("r'abc'")
"r'xxx'"
"""
# String modifiers (e.g. u or r)
start = text.index(text[-1]) + 1
end = len(text) - 1
# Triple quotes
if text[-3:] in ('"""', "'''"):
start += 2
end -= 2
return text[:start] + 'x' * (end - start) + text[end:]
def parse_udiff(diff, patterns=None, parent='.'):
"""Return a dictionary of matching lines."""
# For each file of the diff, the entry key is the filename,
# and the value is a set of row numbers to consider.
rv = {}
path = nrows = None
for line in diff.splitlines():
if nrows:
if line[:1] != '-':
nrows -= 1
continue
if line[:3] == '@@ ':
hunk_match = HUNK_REGEX.match(line)
(row, nrows) = [int(g or '1') for g in hunk_match.groups()]
rv[path].update(range(row, row + nrows))
elif line[:3] == '+++':
path = line[4:].split('\t', 1)[0]
if path[:2] == 'b/':
path = path[2:]
rv[path] = set()
return dict([(os.path.join(parent, path), rows)
for (path, rows) in rv.items()
if rows and filename_match(path, patterns)])
def normalize_paths(value, parent=os.curdir):
"""Parse a comma-separated list of paths.
Return a list of absolute paths.
"""
if not value:
return []
if isinstance(value, list):
return value
paths = []
for path in value.split(','):
path = path.strip()
if '/' in path:
path = os.path.abspath(os.path.join(parent, path))
paths.append(path.rstrip('/'))
return paths
def filename_match(filename, patterns, default=True):
"""Check if patterns contains a pattern that matches filename.
If patterns is unspecified, this always returns True.
"""
if not patterns:
return default
return any(fnmatch(filename, pattern) for pattern in patterns)
def _is_eol_token(token):
return token[0] in NEWLINE or token[4][token[3][1]:].lstrip() == '\\\n'
if COMMENT_WITH_NL:
def _is_eol_token(token, _eol_token=_is_eol_token):
return _eol_token(token) or (token[0] == tokenize.COMMENT and
token[1] == token[4])
##############################################################################
# Framework to run all checks
##############################################################################
_checks = {'physical_line': {}, 'logical_line': {}, 'tree': {}}
def _get_parameters(function):
if sys.version_info >= (3, 3):
return [parameter.name
for parameter
in inspect.signature(function).parameters.values()
if parameter.kind == parameter.POSITIONAL_OR_KEYWORD]
else:
return inspect.getargspec(function)[0]
def register_check(check, codes=None):
"""Register a new check object."""
def _add_check(check, kind, codes, args):
if check in _checks[kind]:
_checks[kind][check][0].extend(codes or [])
else:
_checks[kind][check] = (codes or [''], args)
if inspect.isfunction(check):
args = _get_parameters(check)
if args and args[0] in ('physical_line', 'logical_line'):
if codes is None:
codes = ERRORCODE_REGEX.findall(check.__doc__ or '')
_add_check(check, args[0], codes, args)
elif inspect.isclass(check):
if _get_parameters(check.__init__)[:2] == ['self', 'tree']:
_add_check(check, 'tree', codes, None)
def init_checks_registry():
"""Register all globally visible functions.
The first argument name is either 'physical_line' or 'logical_line'.
"""
mod = inspect.getmodule(register_check)
for (name, function) in inspect.getmembers(mod, inspect.isfunction):
register_check(function)
init_checks_registry()
class Checker(object):
"""Load a Python source file, tokenize it, check coding style."""
def __init__(self, filename=None, lines=None,
options=None, report=None, **kwargs):
if options is None:
options = StyleGuide(kwargs).options
else:
assert not kwargs
self._io_error = None
self._physical_checks = options.physical_checks
self._logical_checks = options.logical_checks
self._ast_checks = options.ast_checks
self.max_line_length = options.max_line_length
self.multiline = False # in a multiline string?
self.hang_closing = options.hang_closing
self.verbose = options.verbose
self.filename = filename
# Dictionary where a checker can store its custom state.
self._checker_states = {}
if filename is None:
self.filename = 'stdin'
self.lines = lines or []
elif filename == '-':
self.filename = 'stdin'
self.lines = stdin_get_value().splitlines(True)
elif lines is None:
try:
self.lines = readlines(filename)
except IOError:
(exc_type, exc) = sys.exc_info()[:2]
self._io_error = '%s: %s' % (exc_type.__name__, exc)
self.lines = []
else:
self.lines = lines
if self.lines:
ord0 = ord(self.lines[0][0])
if ord0 in (0xef, 0xfeff): # Strip the UTF-8 BOM
if ord0 == 0xfeff:
self.lines[0] = self.lines[0][1:]
elif self.lines[0][:3] == '\xef\xbb\xbf':
self.lines[0] = self.lines[0][3:]
self.report = report or options.report
self.report_error = self.report.error
def report_invalid_syntax(self):
"""Check if the syntax is valid."""
(exc_type, exc) = sys.exc_info()[:2]
if len(exc.args) > 1:
offset = exc.args[1]
if len(offset) > 2:
offset = offset[1:3]
else:
offset = (1, 0)
self.report_error(offset[0], offset[1] or 0,
'E901 %s: %s' % (exc_type.__name__, exc.args[0]),
self.report_invalid_syntax)
def readline(self):
"""Get the next line from the input buffer."""
if self.line_number >= self.total_lines:
return ''
line = self.lines[self.line_number]
self.line_number += 1
if self.indent_char is None and line[:1] in WHITESPACE:
self.indent_char = line[0]
return line
def run_check(self, check, argument_names):
"""Run a check plugin."""
arguments = []
for name in argument_names:
arguments.append(getattr(self, name))
return check(*arguments)
def init_checker_state(self, name, argument_names):
""" Prepares a custom state for the specific checker plugin."""
if 'checker_state' in argument_names:
self.checker_state = self._checker_states.setdefault(name, {})
def check_physical(self, line):
"""Run all physical checks on a raw input line."""
self.physical_line = line
for name, check, argument_names in self._physical_checks:
self.init_checker_state(name, argument_names)
result = self.run_check(check, argument_names)
if result is not None:
(offset, text) = result
self.report_error(self.line_number, offset, text, check)
if text[:4] == 'E101':
self.indent_char = line[0]
def build_tokens_line(self):
"""Build a logical line from tokens."""
logical = []
comments = []
length = 0
prev_row = prev_col = mapping = None
for token_type, text, start, end, line in self.tokens:
if token_type in SKIP_TOKENS:
continue
if not mapping:
mapping = [(0, start)]
if token_type == tokenize.COMMENT:
comments.append(text)
continue
if token_type == tokenize.STRING:
text = mute_string(text)
if prev_row:
(start_row, start_col) = start
if prev_row != start_row: # different row
prev_text = self.lines[prev_row - 1][prev_col - 1]
if prev_text == ',' or (prev_text not in '{[(' and
text not in '}])'):
text = ' ' + text
elif prev_col != start_col: # different column
text = line[prev_col:start_col] + text
logical.append(text)
length += len(text)
mapping.append((length, end))
(prev_row, prev_col) = end
self.logical_line = ''.join(logical)
self.noqa = comments and noqa(''.join(comments))
return mapping
def check_logical(self):
"""Build a line from tokens and run all logical checks on it."""
self.report.increment_logical_line()
mapping = self.build_tokens_line()
if not mapping:
return
(start_row, start_col) = mapping[0][1]
start_line = self.lines[start_row - 1]
self.indent_level = expand_indent(start_line[:start_col])
if self.blank_before < self.blank_lines:
self.blank_before = self.blank_lines
if self.verbose >= 2:
print(self.logical_line[:80].rstrip())
for name, check, argument_names in self._logical_checks:
if self.verbose >= 4:
print(' ' + name)
self.init_checker_state(name, argument_names)
for offset, text in self.run_check(check, argument_names) or ():
if not isinstance(offset, tuple):
for token_offset, pos in mapping:
if offset <= token_offset:
break
offset = (pos[0], pos[1] + offset - token_offset)
self.report_error(offset[0], offset[1], text, check)
if self.logical_line:
self.previous_indent_level = self.indent_level
self.previous_logical = self.logical_line
self.blank_lines = 0
self.tokens = []
def check_ast(self):
"""Build the file's AST and run all AST checks."""
try:
tree = compile(''.join(self.lines), '', 'exec', PyCF_ONLY_AST)
except (ValueError, SyntaxError, TypeError):
return self.report_invalid_syntax()
for name, cls, __ in self._ast_checks:
checker = cls(tree, self.filename)
for lineno, offset, text, check in checker.run():
if not self.lines or not noqa(self.lines[lineno - 1]):
self.report_error(lineno, offset, text, check)
def generate_tokens(self):
"""Tokenize the file, run physical line checks and yield tokens."""
if self._io_error:
self.report_error(1, 0, 'E902 %s' % self._io_error, readlines)
tokengen = tokenize.generate_tokens(self.readline)
try:
for token in tokengen:
if token[2][0] > self.total_lines:
return
self.maybe_check_physical(token)
yield token
except (SyntaxError, tokenize.TokenError):
self.report_invalid_syntax()
def maybe_check_physical(self, token):
"""If appropriate (based on token), check current physical line(s)."""
# Called after every token, but act only on end of line.
if _is_eol_token(token):
# Obviously, a newline token ends a single physical line.
self.check_physical(token[4])
elif token[0] == tokenize.STRING and '\n' in token[1]:
# Less obviously, a string that contains newlines is a
# multiline string, either triple-quoted or with internal
# newlines backslash-escaped. Check every physical line in the
# string *except* for the last one: its newline is outside of
# the multiline string, so we consider it a regular physical
# line, and will check it like any other physical line.
#
# Subtleties:
# - we don't *completely* ignore the last line; if it contains
# the magical "# noqa" comment, we disable all physical
# checks for the entire multiline string
# - have to wind self.line_number back because initially it
# points to the last line of the string, and we want
# check_physical() to give accurate feedback
if noqa(token[4]):
return
self.multiline = True
self.line_number = token[2][0]
for line in token[1].split('\n')[:-1]:
self.check_physical(line + '\n')
self.line_number += 1
self.multiline = False
def check_all(self, expected=None, line_offset=0):
"""Run all checks on the input file."""
self.report.init_file(self.filename, self.lines, expected, line_offset)
self.total_lines = len(self.lines)
if self._ast_checks:
self.check_ast()
self.line_number = 0
self.indent_char = None
self.indent_level = self.previous_indent_level = 0
self.previous_logical = ''
self.tokens = []
self.blank_lines = self.blank_before = 0
parens = 0
for token in self.generate_tokens():
self.tokens.append(token)
token_type, text = token[0:2]
if self.verbose >= 3:
if token[2][0] == token[3][0]:
pos = '[%s:%s]' % (token[2][1] or '', token[3][1])
else:
pos = 'l.%s' % token[3][0]
print('l.%s\t%s\t%s\t%r' %
(token[2][0], pos, tokenize.tok_name[token[0]], text))
if token_type == tokenize.OP:
if text in '([{':
parens += 1
elif text in '}])':
parens -= 1
elif not parens:
if token_type in NEWLINE:
if token_type == tokenize.NEWLINE:
self.check_logical()
self.blank_before = 0
elif len(self.tokens) == 1:
# The physical line contains only this token.
self.blank_lines += 1
del self.tokens[0]
else:
self.check_logical()
elif COMMENT_WITH_NL and token_type == tokenize.COMMENT:
if len(self.tokens) == 1:
# The comment also ends a physical line
token = list(token)
token[1] = text.rstrip('\r\n')
token[3] = (token[2][0], token[2][1] + len(token[1]))
self.tokens = [tuple(token)]
self.check_logical()
if self.tokens:
self.check_physical(self.lines[-1])
self.check_logical()
return self.report.get_file_results()
class BaseReport(object):
"""Collect the results of the checks."""
print_filename = False
def __init__(self, options):
self._benchmark_keys = options.benchmark_keys
self._ignore_code = options.ignore_code
# Results
self.elapsed = 0
self.total_errors = 0
self.counters = dict.fromkeys(self._benchmark_keys, 0)
self.messages = {}
def start(self):
"""Start the timer."""
self._start_time = time.time()
def stop(self):
"""Stop the timer."""
self.elapsed = time.time() - self._start_time
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self.filename = filename
self.lines = lines
self.expected = expected or ()
self.line_offset = line_offset
self.file_errors = 0
self.counters['files'] += 1
self.counters['physical lines'] += len(lines)
def increment_logical_line(self):
"""Signal a new logical line."""
self.counters['logical lines'] += 1
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = text[:4]
if self._ignore_code(code):
return
if code in self.counters:
self.counters[code] += 1
else:
self.counters[code] = 1
self.messages[code] = text[5:]
# Don't care about expected errors or warnings
if code in self.expected:
return
if self.print_filename and not self.file_errors:
print(self.filename)
self.file_errors += 1
self.total_errors += 1
return code
def get_file_results(self):
"""Return the count of errors and warnings for this file."""
return self.file_errors
def get_count(self, prefix=''):
"""Return the total count of errors and warnings."""
return sum([self.counters[key]
for key in self.messages if key.startswith(prefix)])
def get_statistics(self, prefix=''):
"""Get statistics for message codes that start with the prefix.
prefix='' matches all errors and warnings
prefix='E' matches all errors
prefix='W' matches all warnings
prefix='E4' matches all errors that have to do with imports
"""
return ['%-7s %s %s' % (self.counters[key], key, self.messages[key])
for key in sorted(self.messages) if key.startswith(prefix)]
def print_statistics(self, prefix=''):
"""Print overall statistics (number of errors and warnings)."""
for line in self.get_statistics(prefix):
print(line)
def print_benchmark(self):
"""Print benchmark numbers."""
print('%-7.2f %s' % (self.elapsed, 'seconds elapsed'))
if self.elapsed:
for key in self._benchmark_keys:
print('%-7d %s per second (%d total)' %
(self.counters[key] / self.elapsed, key,
self.counters[key]))
class FileReport(BaseReport):
"""Collect the results of the checks and print only the filenames."""
print_filename = True
class StandardReport(BaseReport):
"""Collect and print the results of the checks."""
def __init__(self, options):
super(StandardReport, self).__init__(options)
self._fmt = REPORT_FORMAT.get(options.format.lower(),
options.format)
self._repeat = options.repeat
self._show_source = options.show_source
self._show_pep8 = options.show_pep8
def init_file(self, filename, lines, expected, line_offset):
"""Signal a new file."""
self._deferred_print = []
return super(StandardReport, self).init_file(
filename, lines, expected, line_offset)
def error(self, line_number, offset, text, check):
"""Report an error, according to options."""
code = super(StandardReport, self).error(line_number, offset,
text, check)
if code and (self.counters[code] == 1 or self._repeat):
self._deferred_print.append(
(line_number, offset, code, text[5:], check.__doc__))
return code
def get_file_results(self):
"""Print the result and return the overall count for this file."""
self._deferred_print.sort()
for line_number, offset, code, text, doc in self._deferred_print:
print(self._fmt % {
'path': self.filename,
'row': self.line_offset + line_number, 'col': offset + 1,
'code': code, 'text': text,
})
if self._show_source:
if line_number > len(self.lines):
line = ''
else:
line = self.lines[line_number - 1]
print(line.rstrip())
print(re.sub(r'\S', ' ', line[:offset]) + '^')
if self._show_pep8 and doc:
print(' ' + doc.strip())
# stdout is block buffered when not stdout.isatty().
# line can be broken where buffer boundary since other processes
# write to same file.
# flush() after print() to avoid buffer boundary.
# Typical buffer size is 8192. line written safely when
# len(line) < 8192.
sys.stdout.flush()
return self.file_errors
class DiffReport(StandardReport):
"""Collect and print the results for the changed lines only."""
def __init__(self, options):
super(DiffReport, self).__init__(options)
self._selected = options.selected_lines
def error(self, line_number, offset, text, check):
if line_number not in self._selected[self.filename]:
return
return super(DiffReport, self).error(line_number, offset, text, check)
class StyleGuide(object):
"""Initialize a PEP-8 instance with few options."""
def __init__(self, *args, **kwargs):
# build options from the command line
self.checker_class = kwargs.pop('checker_class', Checker)
parse_argv = kwargs.pop('parse_argv', False)
config_file = kwargs.pop('config_file', False)
parser = kwargs.pop('parser', None)
# build options from dict
options_dict = dict(*args, **kwargs)
arglist = None if parse_argv else options_dict.get('paths', None)
options, self.paths = process_options(
arglist, parse_argv, config_file, parser)
if options_dict:
options.__dict__.update(options_dict)
if 'paths' in options_dict:
self.paths = options_dict['paths']
self.runner = self.input_file
self.options = options
if not options.reporter:
options.reporter = BaseReport if options.quiet else StandardReport
options.select = tuple(options.select or ())
if not (options.select or options.ignore or
options.testsuite or options.doctest) and DEFAULT_IGNORE:
# The default choice: ignore controversial checks
options.ignore = tuple(DEFAULT_IGNORE.split(','))
else:
# Ignore all checks which are not explicitly selected
options.ignore = ('',) if options.select else tuple(options.ignore)
options.benchmark_keys = BENCHMARK_KEYS[:]
options.ignore_code = self.ignore_code
options.physical_checks = self.get_checks('physical_line')
options.logical_checks = self.get_checks('logical_line')
options.ast_checks = self.get_checks('tree')
self.init_report()
def init_report(self, reporter=None):
"""Initialize the report instance."""
self.options.report = (reporter or self.options.reporter)(self.options)
return self.options.report
def check_files(self, paths=None):
"""Run all checks on the paths."""
if paths is None:
paths = self.paths
report = self.options.report
runner = self.runner
report.start()
try:
for path in paths:
if os.path.isdir(path):
self.input_dir(path)
elif not self.excluded(path):
runner(path)
except KeyboardInterrupt:
print('... stopped')
report.stop()
return report
def input_file(self, filename, lines=None, expected=None, line_offset=0):
"""Run all checks on a Python source file."""
if self.options.verbose:
print('checking %s' % filename)
fchecker = self.checker_class(
filename, lines=lines, options=self.options)
return fchecker.check_all(expected=expected, line_offset=line_offset)
def input_dir(self, dirname):
"""Check all files in this directory and all subdirectories."""
dirname = dirname.rstrip('/')
if self.excluded(dirname):
return 0
counters = self.options.report.counters
verbose = self.options.verbose
filepatterns = self.options.filename
runner = self.runner
for root, dirs, files in os.walk(dirname):
if verbose:
print('directory ' + root)
counters['directories'] += 1
for subdir in sorted(dirs):
if self.excluded(subdir, root):
dirs.remove(subdir)
for filename in sorted(files):
# contain a pattern that matches?
if ((filename_match(filename, filepatterns) and
not self.excluded(filename, root))):
runner(os.path.join(root, filename))
def excluded(self, filename, parent=None):
"""Check if the file should be excluded.
Check if 'options.exclude' contains a pattern that matches filename.
"""
if not self.options.exclude:
return False
basename = os.path.basename(filename)
if filename_match(basename, self.options.exclude):
return True
if parent:
filename = os.path.join(parent, filename)
filename = os.path.abspath(filename)
return filename_match(filename, self.options.exclude)
def ignore_code(self, code):
"""Check if the error code should be ignored.
If 'options.select' contains a prefix of the error code,
return False. Else, if 'options.ignore' contains a prefix of
the error code, return True.
"""
if len(code) < 4 and any(s.startswith(code)
for s in self.options.select):
return False
return (code.startswith(self.options.ignore) and
not code.startswith(self.options.select))
def get_checks(self, argument_name):
"""Get all the checks for this category.
Find all globally visible functions where the first argument name
starts with argument_name and which contain selected tests.
"""
checks = []
for check, attrs in _checks[argument_name].items():
(codes, args) = attrs
if any(not (code and self.ignore_code(code)) for code in codes):
checks.append((check.__name__, check, args))
return sorted(checks)
def get_parser(prog='pep8', version=__version__):
parser = OptionParser(prog=prog, version=version,
usage="%prog [options] input ...")
parser.config_options = [
'exclude', 'filename', 'select', 'ignore', 'max-line-length',
'hang-closing', 'count', 'format', 'quiet', 'show-pep8',
'show-source', 'statistics', 'verbose']
parser.add_option('-v', '--verbose', default=0, action='count',
help="print status messages, or debug with -vv")
parser.add_option('-q', '--quiet', default=0, action='count',
help="report only file names, or nothing with -qq")
parser.add_option('-r', '--repeat', default=True, action='store_true',
help="(obsolete) show all occurrences of the same error")
parser.add_option('--first', action='store_false', dest='repeat',
help="show first occurrence of each error")
parser.add_option('--exclude', metavar='patterns', default=DEFAULT_EXCLUDE,
help="exclude files or directories which match these "
"comma separated patterns (default: %default)")
parser.add_option('--filename', metavar='patterns', default='*.py',
help="when parsing directories, only check filenames "
"matching these comma separated patterns "
"(default: %default)")
parser.add_option('--select', metavar='errors', default='',
help="select errors and warnings (e.g. E,W6)")
parser.add_option('--ignore', metavar='errors', default='',
help="skip errors and warnings (e.g. E4,W) "
"(default: %s)" % DEFAULT_IGNORE)
parser.add_option('--show-source', action='store_true',
help="show source code for each error")
parser.add_option('--show-pep8', action='store_true',
help="show text of PEP 8 for each error "
"(implies --first)")
parser.add_option('--statistics', action='store_true',
help="count errors and warnings")
parser.add_option('--count', action='store_true',
help="print total number of errors and warnings "
"to standard error and set exit code to 1 if "
"total is not null")
parser.add_option('--max-line-length', type='int', metavar='n',
default=MAX_LINE_LENGTH,
help="set maximum allowed line length "
"(default: %default)")
parser.add_option('--hang-closing', action='store_true',
help="hang closing bracket instead of matching "
"indentation of opening bracket's line")
parser.add_option('--format', metavar='format', default='default',
help="set the error format [default|pylint|<custom>]")
parser.add_option('--diff', action='store_true',
help="report changes only within line number ranges in "
"the unified diff received on STDIN")
group = parser.add_option_group("Testing Options")
if os.path.exists(TESTSUITE_PATH):
group.add_option('--testsuite', metavar='dir',
help="run regression tests from dir")
group.add_option('--doctest', action='store_true',
help="run doctest on myself")
group.add_option('--benchmark', action='store_true',
help="measure processing speed")
return parser
def read_config(options, args, arglist, parser):
"""Read and parse configurations
If a config file is specified on the command line with the "--config"
option, then only it is used for configuration.
Otherwise, the user configuration (~/.config/pep8) and any local
configurations in the current directory or above will be merged together
(in that order) using the read method of ConfigParser.
"""
config = RawConfigParser()
cli_conf = options.config
local_dir = os.curdir
if USER_CONFIG and os.path.isfile(USER_CONFIG):
if options.verbose:
print('user configuration: %s' % USER_CONFIG)
config.read(USER_CONFIG)
parent = tail = args and os.path.abspath(os.path.commonprefix(args))
while tail:
if config.read(os.path.join(parent, fn) for fn in PROJECT_CONFIG):
local_dir = parent
if options.verbose:
print('local configuration: in %s' % parent)
break
(parent, tail) = os.path.split(parent)
if cli_conf and os.path.isfile(cli_conf):
if options.verbose:
print('cli configuration: %s' % cli_conf)
config.read(cli_conf)
pep8_section = parser.prog
if config.has_section(pep8_section):
option_list = dict([(o.dest, o.type or o.action)
for o in parser.option_list])
# First, read the default values
(new_options, __) = parser.parse_args([])
# Second, parse the configuration
for opt in config.options(pep8_section):
if opt.replace('_', '-') not in parser.config_options:
print(" unknown option '%s' ignored" % opt)
continue
if options.verbose > 1:
print(" %s = %s" % (opt, config.get(pep8_section, opt)))
normalized_opt = opt.replace('-', '_')
opt_type = option_list[normalized_opt]
if opt_type in ('int', 'count'):
value = config.getint(pep8_section, opt)
elif opt_type == 'string':
value = config.get(pep8_section, opt)
if normalized_opt == 'exclude':
value = normalize_paths(value, local_dir)
else:
assert opt_type in ('store_true', 'store_false')
value = config.getboolean(pep8_section, opt)
setattr(new_options, normalized_opt, value)
# Third, overwrite with the command-line options
(options, __) = parser.parse_args(arglist, values=new_options)
options.doctest = options.testsuite = False
return options
def process_options(arglist=None, parse_argv=False, config_file=None,
parser=None):
"""Process options passed either via arglist or via command line args.
Passing in the ``config_file`` parameter allows other tools, such as flake8
to specify their own options to be processed in pep8.
"""
if not parser:
parser = get_parser()
if not parser.has_option('--config'):
group = parser.add_option_group("Configuration", description=(
"The project options are read from the [%s] section of the "
"tox.ini file or the setup.cfg file located in any parent folder "
"of the path(s) being processed. Allowed options are: %s." %
(parser.prog, ', '.join(parser.config_options))))
group.add_option('--config', metavar='path', default=config_file,
help="user config file location")
# Don't read the command line if the module is used as a library.
if not arglist and not parse_argv:
arglist = []
# If parse_argv is True and arglist is None, arguments are
# parsed from the command line (sys.argv)
(options, args) = parser.parse_args(arglist)
options.reporter = None
if options.ensure_value('testsuite', False):
args.append(options.testsuite)
elif not options.ensure_value('doctest', False):
if parse_argv and not args:
if options.diff or any(os.path.exists(name)
for name in PROJECT_CONFIG):
args = ['.']
else:
parser.error('input not specified')
options = read_config(options, args, arglist, parser)
options.reporter = parse_argv and options.quiet == 1 and FileReport
options.filename = _parse_multi_options(options.filename)
options.exclude = normalize_paths(options.exclude)
options.select = _parse_multi_options(options.select)
options.ignore = _parse_multi_options(options.ignore)
if options.diff:
options.reporter = DiffReport
stdin = stdin_get_value()
options.selected_lines = parse_udiff(stdin, options.filename, args[0])
args = sorted(options.selected_lines)
return options, args
def _parse_multi_options(options, split_token=','):
r"""Split and strip and discard empties.
Turns the following:
A,
B,
into ["A", "B"]
"""
if options:
return [o.strip() for o in options.split(split_token) if o.strip()]
else:
return options
def _main():
"""Parse options and run checks on Python source."""
import signal
# Handle "Broken pipe" gracefully
try:
signal.signal(signal.SIGPIPE, lambda signum, frame: sys.exit(1))
except AttributeError:
pass # not supported on Windows
pep8style = StyleGuide(parse_argv=True)
options = pep8style.options
if options.doctest or options.testsuite:
from testsuite.support import run_tests
report = run_tests(pep8style)
else:
report = pep8style.check_files()
if options.statistics:
report.print_statistics()
if options.benchmark:
report.print_benchmark()
if options.testsuite and not options.quiet:
report.print_results()
if report.total_errors:
if options.count:
sys.stderr.write(str(report.total_errors) + '\n')
sys.exit(1)
if __name__ == '__main__':
_main()
| apache-2.0 |
KhalidGit/flask | Work/Trivia - Module 5/env/Lib/site-packages/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| apache-2.0 |
js0701/chromium-crosswalk | tools/perf/measurements/draw_properties.py | 7 | 1912 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from telemetry.page import page_test
from telemetry.timeline import model
from telemetry.timeline import tracing_config
from telemetry.value import scalar
class DrawProperties(page_test.PageTest):
def __init__(self):
super(DrawProperties, self).__init__()
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs([
'--enable-prefer-compositing-to-lcd-text',
])
def WillNavigateToPage(self, page, tab):
config = tracing_config.TracingConfig()
config.tracing_category_filter.AddDisabledByDefault(
'disabled-by-default-cc.debug.cdp-perf')
config.enable_chrome_trace = True
tab.browser.platform.tracing_controller.StartTracing(config)
def ComputeAverageOfDurations(self, timeline_model, name):
events = timeline_model.GetAllEventsOfName(name)
event_durations = [d.duration for d in events]
assert event_durations, 'Failed to find durations'
duration_sum = sum(event_durations)
duration_count = len(event_durations)
duration_avg = duration_sum / duration_count
return duration_avg
def ValidateAndMeasurePage(self, page, tab, results):
timeline_data = tab.browser.platform.tracing_controller.StopTracing()
timeline_model = model.TimelineModel(timeline_data)
pt_avg = self.ComputeAverageOfDurations(
timeline_model,
'LayerTreeHostCommon::ComputeVisibleRectsWithPropertyTrees')
results.AddValue(scalar.ScalarValue(
results.current_page, 'PT_avg_cost', 'ms', pt_avg,
description='Average time spent processing property trees'))
def DidRunPage(self, platform):
tracing_controller = platform.tracing_controller
if tracing_controller.is_tracing_running:
tracing_controller.StopTracing()
| bsd-3-clause |
puiterwijk/HttpCA | Signer/httpca_signer/database.py | 1 | 1817 | # Copyright (c) 2013, Patrick Uiterwijk <puiterwijk@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Patrick Uiterwijk nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Patrick Uiterwijk BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from configuration import config
engine = create_engine(config.get('database', 'URI'), echo=bool(config.get('database', 'echo')))
session = sessionmaker(bind=engine)
| bsd-3-clause |
40223222/-2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/color.py | 603 | 4330 | ## pygame - Python Game Library
## Copyright (C) 2000-2003 Pete Shinners
##
## This library is free software; you can redistribute it and/or
## modify it under the terms of the GNU Library General Public
## License as published by the Free Software Foundation; either
## version 2 of the License, or (at your option) any later version.
##
## This library is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## Library General Public License for more details.
##
## You should have received a copy of the GNU Library General Public
## License along with this library; if not, write to the Free
## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##
## Pete Shinners
## pete@shinners.org
"""Manipulate colors"""
try:
from colordict import THECOLORS
except ImportError:
#the colordict module isn't available
THECOLORS = {}
def Color(colorname):
"""pygame.color.Color(colorname) -> RGBA
Get RGB values from common color names
The color name can be the name of a common english color,
or a "web" style color in the form of 0xFF00FF. The english
color names are defined by the standard 'rgb' colors for X11.
With the hex color formatting you may optionally include an
alpha value, the formatting is 0xRRGGBBAA. You may also specify
a hex formatted color by starting the string with a '#'.
The color name used is case insensitive and whitespace is ignored.
"""
if colorname[:2] == '0x' or colorname[0] == '#': #webstyle
if colorname[0] == '#':
colorname = colorname[1:]
else:
colorname = colorname[2:]
a = 255
try:
r = int('0x' + colorname[0:2], 16)
g = int('0x' + colorname[2:4], 16)
b = int('0x' + colorname[4:6], 16)
if len(colorname) > 6:
a = int('0x' + colorname[6:8], 16)
except ValueError:
raise ValueError("Illegal hex color")
return r, g, b, a
else: #color name
#no spaces and lowercase
name = colorname.replace(' ', '').lower()
try:
return THECOLORS[name]
except KeyError:
raise ValueError("Illegal color name, " + name)
def _splitcolor(color, defaultalpha=255):
try:
second = int(color)
r = g = b = color
a = defaultalpha
except TypeError:
if len(color) == 4:
r, g, b, a = color
elif len(color) == 3:
r, g, b = color
a = defaultalpha
return r, g, b, a
def add(color1, color2):
"""pygame.color.add(color1, color2) -> RGBA
add two colors
Add the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color. Color values will be clamped
to the maximum color value of 255.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2)
m, i = min, int
return m(i(r1+r2), 255), m(i(g1+g2), 255), m(i(b1+b2), 255), m(i(a1+a2), 255)
def subtract(color1, color2):
"""pygame.color.subtract(color1, color2) -> RGBA
subtract two colors
Subtract the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color. Color values will be clamped
to the minimum color value of 0.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2, 0)
m, i = max, int
return m(i(r1-r2), 0), m(i(g1-g2), 0), m(i(b1-b2), 0), m(i(a1-a2), 0)
def multiply(color1, color2):
"""pygame.color.multiply(color1, color2) -> RGBA
multiply two colors
Multiply the RGB values of two colors together. If one of the
colors is only a single numeric value, it is applied to the
RGB components of the first color.
"""
r1, g1, b1, a1 = _splitcolor(color1)
r2, g2, b2, a2 = _splitcolor(color2)
m, i = min, int
return m(i(r1*r2)/255, 255), m(i(g1*g2)/255, 255), m(i(b1*b2)/255, 255), m(i(a1*a2)/255, 255)
| agpl-3.0 |
flgiordano/netcash | +/google-cloud-sdk/lib/third_party/requests/__init__.py | 151 | 1861 | # -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2015 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.6.0'
__build__ = 0x020503
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2015 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
try:
from .packages.urllib3.contrib import pyopenssl
pyopenssl.inject_into_urllib3()
except ImportError:
pass
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
| bsd-3-clause |
martingkelly/feserial | scripts/gdb/linux/lists.py | 630 | 2897 | #
# gdb helper commands and functions for Linux kernel debugging
#
# list tools
#
# Copyright (c) Thiebaud Weksteen, 2015
#
# Authors:
# Thiebaud Weksteen <thiebaud@weksteen.fr>
#
# This work is licensed under the terms of the GNU GPL version 2.
#
import gdb
from linux import utils
list_head = utils.CachedType("struct list_head")
def list_check(head):
nb = 0
if (head.type == list_head.get_type().pointer()):
head = head.dereference()
elif (head.type != list_head.get_type()):
raise gdb.GdbError('argument must be of type (struct list_head [*])')
c = head
try:
gdb.write("Starting with: {}\n".format(c))
except gdb.MemoryError:
gdb.write('head is not accessible\n')
return
while True:
p = c['prev'].dereference()
n = c['next'].dereference()
try:
if p['next'] != c.address:
gdb.write('prev.next != current: '
'current@{current_addr}={current} '
'prev@{p_addr}={p}\n'.format(
current_addr=c.address,
current=c,
p_addr=p.address,
p=p,
))
return
except gdb.MemoryError:
gdb.write('prev is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
try:
if n['prev'] != c.address:
gdb.write('next.prev != current: '
'current@{current_addr}={current} '
'next@{n_addr}={n}\n'.format(
current_addr=c.address,
current=c,
n_addr=n.address,
n=n,
))
return
except gdb.MemoryError:
gdb.write('next is not accessible: '
'current@{current_addr}={current}\n'.format(
current_addr=c.address,
current=c
))
return
c = n
nb += 1
if c == head:
gdb.write("list is consistent: {} node(s)\n".format(nb))
return
class LxListChk(gdb.Command):
"""Verify a list consistency"""
def __init__(self):
super(LxListChk, self).__init__("lx-list-check", gdb.COMMAND_DATA,
gdb.COMPLETE_EXPRESSION)
def invoke(self, arg, from_tty):
argv = gdb.string_to_argv(arg)
if len(argv) != 1:
raise gdb.GdbError("lx-list-check takes one argument")
list_check(gdb.parse_and_eval(argv[0]))
LxListChk()
| gpl-2.0 |
mola/qgis | python/plugins/GdalTools/tools/widgetPluginBase.py | 1 | 5107 | # -*- coding: utf-8 -*-
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from dialogBase import GdalToolsBaseDialog as BaseDialog
import GdalTools_utils as Utils
class GdalToolsBasePluginWidget:
def __init__(self, iface, commandName, helpFileBaseName = None, parent = None):
self.iface = iface
self.base = BaseDialog(parent, iface, self, self.windowTitle(), commandName)
self.connect(self.base, SIGNAL("processError(QProcess::ProcessError)"), self.onError)
self.connect(self.base, SIGNAL("processFinished(int, QProcess::ExitStatus)"), self.onFinished)
self.connect(self.base, SIGNAL("okClicked()"), self.onRun)
self.connect(self.base, SIGNAL("closeClicked()"), self.onClosing)
self.connect(self.base, SIGNAL("helpClicked()"), self.onHelp)
self.connect(self.base, SIGNAL("finished(bool)"), self.finished)
def someValueChanged(self):
self.emit(SIGNAL("valuesChanged(const QStringList &)"), self.getArguments())
def exec_(self):
self.someValueChanged()
return self.base.exec_()
def show_(self):
self.someValueChanged()
return self.base.show()
def setCommandViewerEnabled(self, enable):
self.base.setCommandViewerEnabled(enable)
self.someValueChanged()
def onRun(self):
self.base.onRun()
def onClosing(self):
self.base.onClosing()
def onHelp(self):
self.base.onHelp()
def onFinished(self, exitCode, status):
self.base.onFinished(exitCode, status)
def onError(self, error):
self.base.onError(error)
def getArguments(self):
pass
def getInputFileName(self):
pass
def getOutputFileName(self):
pass
def addLayerIntoCanvas(self, fileInfo):
pass
def finished(self, load):
outFn = self.getOutputFileName()
if outFn == None:
return
outFn = QString(outFn)
if outFn.isEmpty():
QMessageBox.warning(self, self.tr( "Warning" ), self.tr( "No output file created." ) )
return
fileInfo = QFileInfo(outFn)
if fileInfo.exists():
if load:
self.addLayerIntoCanvas(fileInfo)
QMessageBox.information(self, self.tr( "Finished" ), self.tr( "Processing completed." ) )
else:
QMessageBox.warning(self, self.tr( "Warning" ), self.tr( "%1 not created." ).arg( outFn ) )
# This method is useful to set up options for the command. It sets for each passed widget:
# 1. its passed signals to connect to the BasePluginWidget.someValueChanged() slot,
# 2. its enabler checkbox or enabled status,
# 3. its status as visible (hide) if the installed gdal version is greater or equal (lesser) then the passed version
#
# wdgts_sgnls_chk_ver_list: list of wdgts_sgnls_chk_ver
# wdgts_sgnls_chk_ver: tuple containing widgets, signals, enabler checkbox or enabled status, required version
def setParamsStatus(self, wdgts_sgnls_chk_ver_list):
if isinstance(wdgts_sgnls_chk_ver_list, list):
for wdgts_sgnls_chk_ver in wdgts_sgnls_chk_ver_list:
self.setParamsStatus(wdgts_sgnls_chk_ver)
return
wdgts_sgnls_chk_ver = wdgts_sgnls_chk_ver_list
if not isinstance(wdgts_sgnls_chk_ver, tuple):
return
if len(wdgts_sgnls_chk_ver) > 0:
wdgts = wdgts_sgnls_chk_ver[0]
else:
wdgts = None
if len(wdgts_sgnls_chk_ver) > 1:
sgnls = wdgts_sgnls_chk_ver[1]
else:
sgnls = None
if len(wdgts_sgnls_chk_ver) > 2:
chk = wdgts_sgnls_chk_ver[2]
else:
chk = None
if len(wdgts_sgnls_chk_ver) > 3:
ver = wdgts_sgnls_chk_ver[3]
else:
ver = None
if isinstance(wdgts, list):
for wdgt in wdgts:
self.setParamsStatus((wdgt, sgnls, chk, ver))
return
wdgt = wdgts
if not isinstance(wdgt, QWidget):
return
# if check version fails, disable the widget then hide both it and its enabler checkbox
if ver != None:
if not isinstance(ver, Utils.Version):
ver = Utils.Version(ver)
gdalVer = Utils.GdalConfig.version()
if gdalVer != None and ver > gdalVer:
wdgt.setVisible(False)
if isinstance(chk, QWidget):
chk.setVisible(False)
chk.setChecked(False)
sgnls = None
chk = False
# connects the passed signals to the BasePluginWidget.someValueChanged slot
if isinstance(sgnls, list):
for sgnl in sgnls:
self.setParamsStatus((wdgt, sgnl, chk))
return
sgnl = sgnls
if sgnl != None:
self.connect(wdgt, sgnl, self.someValueChanged)
# set the passed checkbox as widget enabler
if isinstance(chk, bool):
wdgt.setEnabled(chk)
if ( isinstance(chk, QAbstractButton) or isinstance(chk, QGroupBox) ) and \
chk.isCheckable():
wdgt.setEnabled(chk.isChecked())
self.connect(chk, SIGNAL("toggled(bool)"), wdgt.setEnabled)
self.connect(chk, SIGNAL("toggled(bool)"), self.someValueChanged)
| gpl-2.0 |
ImpregnableProgrammer/Advent-of-Code | 2016/Day_10.py | 1 | 2375 | import re
# First Part
def First_Part(s):
Bot_Dict = {}
g=0
s=s.split('\n')
while 1:
p=re.sub('(?<=output )\d+',lambda k:str(-int(k.group(0))-1),s[g%len(s)])
G=re.findall('-?\d+',p)
if p[:3]=='bot' and G[0] in Bot_Dict.keys() and len(Bot_Dict[G[0]])>1:
if sorted(Bot_Dict[G[0]],key=int)==['17','61']:
print(G[0])
break
s.pop(g%len(s))
if G[1] not in Bot_Dict.keys():
Bot_Dict[G[1]]=[]
if G[2] not in Bot_Dict.keys():
Bot_Dict[G[2]]=[]
X=len(Bot_Dict[G[1]])
Y=len(Bot_Dict[G[2]])
Bot_Dict[G[1]]+=(G[1][0]=='-' or (G[1][0]!='-' and X<2)) and [min(Bot_Dict[G[0]],key=int)] or []
Bot_Dict[G[2]]+=(G[2][0]=='-' or (G[2][0]!='-' and Y<2)) and [max(Bot_Dict[G[0]],key=int)] or []
Bot_Dict[G[0]]=(G[1][0]!='-' and X>1) and [min(Bot_Dict[G[0]],key=int)] or (G[2][0]!='-' and Y>1) and [max(Bot_Dict[G[0]],key=int)] or []
elif p[:5]=='value':
s.pop(g%len(s))
if G[1] not in Bot_Dict.keys():
Bot_Dict[G[1]]=[]
Bot_Dict[G[1]]+=len(Bot_Dict[G[1]])<2 and [G[0]] or []
g+=1
# Second Part
def Second_Part(s):
Bot_Dict = {}
g=0
s=s.split('\n')
while 1:
p=re.sub('(?<=output )\d+',lambda k:str(-int(k.group(0))-1),s[g%len(s)])
G=re.findall('-?\d+',p)
if p[:3]=='bot' and G[0] in Bot_Dict.keys() and len(Bot_Dict[G[0]])>1:
s.pop(g%len(s))
if G[1] not in Bot_Dict.keys():
Bot_Dict[G[1]]=[]
if G[2] not in Bot_Dict.keys():
Bot_Dict[G[2]]=[]
X=len(Bot_Dict[G[1]])
Y=len(Bot_Dict[G[2]])
Bot_Dict[G[1]]+=(G[1][0]=='-' or (G[1][0]!='-' and X<2)) and [min(Bot_Dict[G[0]],key=int)] or []
Bot_Dict[G[2]]+=(G[2][0]=='-' or (G[2][0]!='-' and Y<2)) and [max(Bot_Dict[G[0]],key=int)] or []
Bot_Dict[G[0]]=(G[1][0]!='-' and X>1) and [min(Bot_Dict[G[0]],key=int)] or (G[2][0]!='-' and Y>1) and [max(Bot_Dict[G[0]],key=int)] or []
elif p[:5]=='value':
s.pop(g%len(s))
if G[1] not in Bot_Dict.keys():
Bot_Dict[G[1]]=[]
Bot_Dict[G[1]]+=len(Bot_Dict[G[1]])<2 and [G[0]] or []
g+=1
if len(s)<1:
j=1
for o in Bot_Dict.keys():
if 0>int(o)>-4:
j*=int(Bot_Dict[o][0])
print(j)
break
| gpl-3.0 |
yantrabuddhi/nativeclient | tools/scons_to_gn/merge_data_test.py | 7 | 2792 | # Copyright (c) 2014 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from collections import defaultdict
import pprint
import sys
from merge_data import CreateIndexedLookup, MergeRawTree
from print_data import PrintData
CONDA = ['A1', 'A2', 'A3']
CONDB = ['B1', 'B2', 'B3']
CONFIG_ALL = [a + '_' + b for a in CONDA for b in CONDB]
CONFIG_A1 = [ 'A1_B1', 'A1_B2', 'A1_B3' ]
CONFIG_B1 = [ 'A1_B1', 'A2_B1', 'A3_B1' ]
CONFIG_SET1 = [ 'A1_B1', 'A1_B2', 'A2_B2' ]
CONFIG_SET2 = [ 'A1_B1']
CONFIG_SET3 = [ 'A1_B1', 'A2_B3']
class TestNode(object):
def __init__(self, configs):
self.configs = configs
TEST_ALL = {
'target_1': TestNode(CONFIG_ALL),
'target_2': TestNode(CONFIG_ALL),
'target_3': TestNode(CONFIG_ALL),
}
TEST_NOT3 = {
'target_1': TestNode(CONFIG_ALL),
'target_2': TestNode(CONFIG_ALL),
'target_3': TestNode(CONFIG_SET2),
}
TEST_A1_B1 = {
'target_1': TestNode(CONFIG_A1),
'target_2': TestNode(CONFIG_B1),
'target_3': TestNode(CONFIG_ALL),
}
def SameAsOriginal(table, hits):
for target in hits:
table_set = set(table[target].configs)
trans_set = set(hits[target])
if table_set != trans_set:
table_set_items = ' '.join(table_set)
trans_set_items = ' '.join(trans_set)
print 'FAIL %s: %s vs %s' % (target, table_set_items, trans_set_items)
return False
return True
def CompareOriginalToTransformed(table, transformed, name):
trans_hits = defaultdict(list)
for keya, hit_table in transformed.iteritems():
for hitsb, values in hit_table.iteritems():
for value in values:
trans_hits[value].extend([keya + '_' + b for b in hitsb.split(' ')])
if not SameAsOriginal(table, trans_hits):
PrintData(table)
PrintData(transformed)
return 1
return 0
def CompareOriginalToMerged(table, merged, name):
trans_hits = defaultdict(list)
for targets, seta, setb in merged:
for target in targets:
trans_hits[target].extend([a+'_'+b for a in seta for b in setb])
if not SameAsOriginal(table, trans_hits):
PrintData(table)
PrintData(merged)
return 1
return 0
def TestTransform(table, name, verbose=False):
transformed = CreateIndexedLookup(table, CONDA, CONDB)
cnt = CompareOriginalToTransformed(table, transformed, name)
merged = MergeRawTree(table, CONDA, CONDA, CONDB)
cnt += CompareOriginalToMerged(table, merged, name)
if verbose:
PrintData(merged)
if not cnt:
print 'PASS ' + name
return 0
print 'FAILED ' + name
return 1
if __name__ == '__main__':
retval = 0
retval += TestTransform(TEST_ALL, 'All')
retval += TestTransform(TEST_NOT3, 'Not3')
retval += TestTransform(TEST_A1_B1, 'A1B1')
sys.exit(retval)
| bsd-3-clause |
endlessm/chromium-browser | third_party/catapult/third_party/gsutil/gslib/vendored/boto/tests/unit/vpc/test_customergateway.py | 114 | 4610 | from tests.compat import OrderedDict
from tests.unit import unittest
from tests.unit import AWSMockServiceTestCase
from boto.vpc import VPCConnection, CustomerGateway
class TestDescribeCustomerGateways(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DescribeCustomerGatewaysResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGatewaySet>
<item>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>available</state>
<type>ipsec.1</type>
<ipAddress>12.1.2.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</item>
</customerGatewaySet>
</DescribeCustomerGatewaysResponse>
"""
def test_get_all_customer_gateways(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.get_all_customer_gateways(
'cgw-b4dc3961',
filters=OrderedDict([('state', ['pending', 'available']),
('ip-address', '12.1.2.3')]))
self.assert_request_parameters({
'Action': 'DescribeCustomerGateways',
'CustomerGatewayId.1': 'cgw-b4dc3961',
'Filter.1.Name': 'state',
'Filter.1.Value.1': 'pending',
'Filter.1.Value.2': 'available',
'Filter.2.Name': 'ip-address',
'Filter.2.Value.1': '12.1.2.3'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(len(api_response), 1)
self.assertIsInstance(api_response[0], CustomerGateway)
self.assertEqual(api_response[0].id, 'cgw-b4dc3961')
class TestCreateCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<CreateCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<customerGateway>
<customerGatewayId>cgw-b4dc3961</customerGatewayId>
<state>pending</state>
<type>ipsec.1</type>
<ipAddress>12.1.2.3</ipAddress>
<bgpAsn>65534</bgpAsn>
<tagSet/>
</customerGateway>
</CreateCustomerGatewayResponse>
"""
def test_create_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.create_customer_gateway(
'ipsec.1', '12.1.2.3', 65534)
self.assert_request_parameters({
'Action': 'CreateCustomerGateway',
'Type': 'ipsec.1',
'IpAddress': '12.1.2.3',
'BgpAsn': 65534},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertIsInstance(api_response, CustomerGateway)
self.assertEquals(api_response.id, 'cgw-b4dc3961')
self.assertEquals(api_response.state, 'pending')
self.assertEquals(api_response.type, 'ipsec.1')
self.assertEquals(api_response.ip_address, '12.1.2.3')
self.assertEquals(api_response.bgp_asn, 65534)
class TestDeleteCustomerGateway(AWSMockServiceTestCase):
connection_class = VPCConnection
def default_body(self):
return b"""
<DeleteCustomerGatewayResponse xmlns="http://ec2.amazonaws.com/doc/2013-10-01/">
<requestId>7a62c49f-347e-4fc4-9331-6e8eEXAMPLE</requestId>
<return>true</return>
</DeleteCustomerGatewayResponse>
"""
def test_delete_customer_gateway(self):
self.set_http_response(status_code=200)
api_response = self.service_connection.delete_customer_gateway('cgw-b4dc3961')
self.assert_request_parameters({
'Action': 'DeleteCustomerGateway',
'CustomerGatewayId': 'cgw-b4dc3961'},
ignore_params_values=['AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Timestamp',
'Version'])
self.assertEquals(api_response, True)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
garnertb/python-mapnik | test/python_tests/topojson_plugin_test.py | 3 | 3747 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function
import os
from nose.tools import assert_almost_equal, eq_
import mapnik
from .utilities import execution_path, run_all
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'topojson' in mapnik.DatasourceCache.plugin_names():
def test_topojson_init():
# topojson tests/data/json/escaped.geojson -o tests/data/json/escaped.topojson --properties
# topojson version 1.4.2
ds = mapnik.Datasource(
type='topojson',
file='../data/json/escaped.topojson')
e = ds.envelope()
assert_almost_equal(e.minx, -81.705583, places=7)
assert_almost_equal(e.miny, 41.480573, places=6)
assert_almost_equal(e.maxx, -81.705583, places=5)
assert_almost_equal(e.maxy, 41.480573, places=3)
def test_topojson_properties():
ds = mapnik.Datasource(
type='topojson',
file='../data/json/escaped.topojson')
f = ds.features_at_point(ds.envelope().center()).features[0]
eq_(len(ds.fields()), 7)
desc = ds.describe()
eq_(desc['geometry_type'], mapnik.DataGeometryType.Point)
eq_(f['name'], u'Test')
eq_(f['int'], 1)
eq_(f['description'], u'Test: \u005C')
eq_(f['spaces'], u'this has spaces')
eq_(f['double'], 1.1)
eq_(f['boolean'], True)
eq_(f['NOM_FR'], u'Qu\xe9bec')
eq_(f['NOM_FR'], u'Québec')
ds = mapnik.Datasource(
type='topojson',
file='../data/json/escaped.topojson')
f = ds.all_features()[0]
eq_(len(ds.fields()), 7)
desc = ds.describe()
eq_(desc['geometry_type'], mapnik.DataGeometryType.Point)
eq_(f['name'], u'Test')
eq_(f['int'], 1)
eq_(f['description'], u'Test: \u005C')
eq_(f['spaces'], u'this has spaces')
eq_(f['double'], 1.1)
eq_(f['boolean'], True)
eq_(f['NOM_FR'], u'Qu\xe9bec')
eq_(f['NOM_FR'], u'Québec')
def test_geojson_from_in_memory_string():
ds = mapnik.Datasource(
type='topojson',
inline=open(
'../data/json/escaped.topojson',
'r').read())
f = ds.all_features()[0]
eq_(len(ds.fields()), 7)
desc = ds.describe()
eq_(desc['geometry_type'], mapnik.DataGeometryType.Point)
eq_(f['name'], u'Test')
eq_(f['int'], 1)
eq_(f['description'], u'Test: \u005C')
eq_(f['spaces'], u'this has spaces')
eq_(f['double'], 1.1)
eq_(f['boolean'], True)
eq_(f['NOM_FR'], u'Qu\xe9bec')
eq_(f['NOM_FR'], u'Québec')
# @raises(RuntimeError)
def test_that_nonexistant_query_field_throws(**kwargs):
ds = mapnik.Datasource(
type='topojson',
file='../data/json/escaped.topojson')
eq_(len(ds.fields()), 7)
# TODO - this sorting is messed up
eq_(ds.fields(), ['name', 'int', 'description',
'spaces', 'double', 'boolean', 'NOM_FR'])
eq_(ds.field_types(), ['str', 'int',
'str', 'str', 'float', 'bool', 'str'])
# TODO - should topojson plugin throw like others?
# query = mapnik.Query(ds.envelope())
# for fld in ds.fields():
# query.add_property_name(fld)
# # also add an invalid one, triggering throw
# query.add_property_name('bogus')
# fs = ds.features(query)
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
| lgpl-2.1 |
ubear/interactive-tutorials | suds/xsd/schema.py | 192 | 14328 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{schema} module provides a intelligent representation of
an XSD schema. The I{raw} model is the XML tree and the I{model}
is the denormalized, objectified and intelligent view of the schema.
Most of the I{value-add} provided by the model is centered around
tranparent referenced type resolution and targeted denormalization.
"""
import suds.metrics
from suds import *
from suds.xsd import *
from suds.xsd.sxbuiltin import *
from suds.xsd.sxbasic import Factory as BasicFactory
from suds.xsd.sxbuiltin import Factory as BuiltinFactory
from suds.xsd.sxbase import SchemaObject
from suds.xsd.deplist import DepList
from suds.sax.element import Element
from suds.sax import splitPrefix, Namespace
from logging import getLogger
log = getLogger(__name__)
class SchemaCollection:
"""
A collection of schema objects. This class is needed because WSDLs
may contain more then one <schema/> node.
@ivar wsdl: A wsdl object.
@type wsdl: L{suds.wsdl.Definitions}
@ivar children: A list contained schemas.
@type children: [L{Schema},...]
@ivar namespaces: A dictionary of contained schemas by namespace.
@type namespaces: {str:L{Schema}}
"""
def __init__(self, wsdl):
"""
@param wsdl: A wsdl object.
@type wsdl: L{suds.wsdl.Definitions}
"""
self.wsdl = wsdl
self.children = []
self.namespaces = {}
def add(self, schema):
"""
Add a schema node to the collection. Schema(s) within the same target
namespace are consolidated.
@param schema: A schema object.
@type schema: (L{Schema})
"""
key = schema.tns[1]
existing = self.namespaces.get(key)
if existing is None:
self.children.append(schema)
self.namespaces[key] = schema
else:
existing.root.children += schema.root.children
existing.root.nsprefixes.update(schema.root.nsprefixes)
def load(self, options):
"""
Load the schema objects for the root nodes.
- de-references schemas
- merge schemas
@param options: An options dictionary.
@type options: L{options.Options}
@return: The merged schema.
@rtype: L{Schema}
"""
if options.autoblend:
self.autoblend()
for child in self.children:
child.build()
for child in self.children:
child.open_imports(options)
for child in self.children:
child.dereference()
log.debug('loaded:\n%s', self)
merged = self.merge()
log.debug('MERGED:\n%s', merged)
return merged
def autoblend(self):
"""
Ensure that all schemas within the collection
import each other which has a blending effect.
@return: self
@rtype: L{SchemaCollection}
"""
namespaces = self.namespaces.keys()
for s in self.children:
for ns in namespaces:
tns = s.root.get('targetNamespace')
if tns == ns:
continue
for imp in s.root.getChildren('import'):
if imp.get('namespace') == ns:
continue
imp = Element('import', ns=Namespace.xsdns)
imp.set('namespace', ns)
s.root.append(imp)
return self
def locate(self, ns):
"""
Find a schema by namespace. Only the URI portion of
the namespace is compared to each schema's I{targetNamespace}
@param ns: A namespace.
@type ns: (prefix,URI)
@return: The schema matching the namesapce, else None.
@rtype: L{Schema}
"""
return self.namespaces.get(ns[1])
def merge(self):
"""
Merge the contained schemas into one.
@return: The merged schema.
@rtype: L{Schema}
"""
if len(self):
schema = self.children[0]
for s in self.children[1:]:
schema.merge(s)
return schema
else:
return None
def __len__(self):
return len(self.children)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
result = ['\nschema collection']
for s in self.children:
result.append(s.str(1))
return '\n'.join(result)
class Schema:
"""
The schema is an objectification of a <schema/> (xsd) definition.
It provides inspection, lookup and type resolution.
@ivar root: The root node.
@type root: L{sax.element.Element}
@ivar baseurl: The I{base} URL for this schema.
@type baseurl: str
@ivar container: A schema collection containing this schema.
@type container: L{SchemaCollection}
@ivar children: A list of direct top level children.
@type children: [L{SchemaObject},...]
@ivar all: A list of all (includes imported) top level children.
@type all: [L{SchemaObject},...]
@ivar types: A schema types cache.
@type types: {name:L{SchemaObject}}
@ivar imports: A list of import objects.
@type imports: [L{SchemaObject},...]
@ivar elements: A list of <element/> objects.
@type elements: [L{SchemaObject},...]
@ivar attributes: A list of <attribute/> objects.
@type attributes: [L{SchemaObject},...]
@ivar groups: A list of group objects.
@type groups: [L{SchemaObject},...]
@ivar agrps: A list of attribute group objects.
@type agrps: [L{SchemaObject},...]
@ivar form_qualified: The flag indicating:
(@elementFormDefault).
@type form_qualified: bool
"""
Tag = 'schema'
def __init__(self, root, baseurl, options, container=None):
"""
@param root: The xml root.
@type root: L{sax.element.Element}
@param baseurl: The base url used for importing.
@type baseurl: basestring
@param options: An options dictionary.
@type options: L{options.Options}
@param container: An optional container.
@type container: L{SchemaCollection}
"""
self.root = root
self.id = objid(self)
self.tns = self.mktns()
self.baseurl = baseurl
self.container = container
self.children = []
self.all = []
self.types = {}
self.imports = []
self.elements = {}
self.attributes = {}
self.groups = {}
self.agrps = {}
if options.doctor is not None:
options.doctor.examine(root)
form = self.root.get('elementFormDefault')
if form is None:
self.form_qualified = False
else:
self.form_qualified = ( form == 'qualified' )
if container is None:
self.build()
self.open_imports(options)
log.debug('built:\n%s', self)
self.dereference()
log.debug('dereferenced:\n%s', self)
def mktns(self):
"""
Make the schema's target namespace.
@return: The namespace representation of the schema's
targetNamespace value.
@rtype: (prefix, uri)
"""
tns = [None, self.root.get('targetNamespace')]
if tns[1] is not None:
tns[0] = self.root.findPrefix(tns[1])
return tuple(tns)
def build(self):
"""
Build the schema (object graph) using the root node
using the factory.
- Build the graph.
- Collate the children.
"""
self.children = BasicFactory.build(self.root, self)
collated = BasicFactory.collate(self.children)
self.children = collated[0]
self.attributes = collated[2]
self.imports = collated[1]
self.elements = collated[3]
self.types = collated[4]
self.groups = collated[5]
self.agrps = collated[6]
def merge(self, schema):
"""
Merge the contents from the schema. Only objects not already contained
in this schema's collections are merged. This is to provide for bidirectional
import which produce cyclic includes.
@returns: self
@rtype: L{Schema}
"""
for item in schema.attributes.items():
if item[0] in self.attributes:
continue
self.all.append(item[1])
self.attributes[item[0]] = item[1]
for item in schema.elements.items():
if item[0] in self.elements:
continue
self.all.append(item[1])
self.elements[item[0]] = item[1]
for item in schema.types.items():
if item[0] in self.types:
continue
self.all.append(item[1])
self.types[item[0]] = item[1]
for item in schema.groups.items():
if item[0] in self.groups:
continue
self.all.append(item[1])
self.groups[item[0]] = item[1]
for item in schema.agrps.items():
if item[0] in self.agrps:
continue
self.all.append(item[1])
self.agrps[item[0]] = item[1]
schema.merged = True
return self
def open_imports(self, options):
"""
Instruct all contained L{sxbasic.Import} children to import
the schema's which they reference. The contents of the
imported schema are I{merged} in.
@param options: An options dictionary.
@type options: L{options.Options}
"""
for imp in self.imports:
imported = imp.open(options)
if imported is None:
continue
imported.open_imports(options)
log.debug('imported:\n%s', imported)
self.merge(imported)
def dereference(self):
"""
Instruct all children to perform dereferencing.
"""
all = []
indexes = {}
for child in self.children:
child.content(all)
deplist = DepList()
for x in all:
x.qualify()
midx, deps = x.dependencies()
item = (x, tuple(deps))
deplist.add(item)
indexes[x] = midx
for x, deps in deplist.sort():
midx = indexes.get(x)
if midx is None: continue
d = deps[midx]
log.debug('(%s) merging %s <== %s', self.tns[1], Repr(x), Repr(d))
x.merge(d)
def locate(self, ns):
"""
Find a schema by namespace. Only the URI portion of
the namespace is compared to each schema's I{targetNamespace}.
The request is passed to the container.
@param ns: A namespace.
@type ns: (prefix,URI)
@return: The schema matching the namesapce, else None.
@rtype: L{Schema}
"""
if self.container is not None:
return self.container.locate(ns)
else:
return None
def custom(self, ref, context=None):
"""
Get whether the specified reference is B{not} an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if B{not} a builtin, else False.
@rtype: bool
"""
if ref is None:
return True
else:
return ( not self.builtin(ref, context) )
def builtin(self, ref, context=None):
"""
Get whether the specified reference is an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if builtin, else False.
@rtype: bool
"""
w3 = 'http://www.w3.org'
try:
if isqref(ref):
ns = ref[1]
return ( ref[0] in Factory.tags and ns.startswith(w3) )
if context is None:
context = self.root
prefix = splitPrefix(ref)[0]
prefixes = context.findPrefixes(w3, 'startswith')
return ( prefix in prefixes and ref[0] in Factory.tags )
except:
return False
def instance(self, root, baseurl, options):
"""
Create and return an new schema object using the
specified I{root} and I{url}.
@param root: A schema root node.
@type root: L{sax.element.Element}
@param baseurl: A base URL.
@type baseurl: str
@param options: An options dictionary.
@type options: L{options.Options}
@return: The newly created schema object.
@rtype: L{Schema}
@note: This is only used by Import children.
"""
return Schema(root, baseurl, options)
def str(self, indent=0):
tab = '%*s'%(indent*3, '')
result = []
result.append('%s%s' % (tab, self.id))
result.append('%s(raw)' % tab)
result.append(self.root.str(indent+1))
result.append('%s(model)' % tab)
for c in self.children:
result.append(c.str(indent+1))
result.append('')
return '\n'.join(result)
def __repr__(self):
myrep = '<%s tns="%s"/>' % (self.id, self.tns[1])
return myrep.encode('utf-8')
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.str()
| apache-2.0 |
Bam4d/neon | neon/util/batch_writer.py | 2 | 12809 | # ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Process macro batches of data in a pipelined fashion.
"""
import logging
from glob import glob
import functools
import gzip
from multiprocessing import Pool
import numpy as np
import os
import tarfile
import struct
from PIL import Image as PILImage
from neon.util.compat import range, StringIO
from neon.util.persist import load_obj, save_obj
from neon.data import load_i1kmeta
from neon.util.argparser import NeonArgparser
# NOTE: We have to leave this helper function out of the class to use multiprocess pool.map
def proc_img(target_size, squarecrop, is_string=False, imgfile=None):
imgfile = StringIO(imgfile) if is_string else imgfile
im = PILImage.open(imgfile)
scale_factor = target_size / np.float32(min(im.size))
if scale_factor == 1 and im.size[0] == im.size[1] and is_string is False:
return np.fromfile(imgfile, dtype=np.uint8)
(wnew, hnew) = map(lambda x: int(round(scale_factor * x)), im.size)
if scale_factor != 1:
filt = PILImage.BICUBIC if scale_factor > 1 else PILImage.ANTIALIAS
im = im.resize((wnew, hnew), filt)
if squarecrop is True:
(cx, cy) = map(lambda x: (x - target_size) // 2, (wnew, hnew))
im = im.crop((cx, cy, cx+target_size, cy+target_size))
buf = StringIO()
im.save(buf, format='JPEG', subsampling=0, quality=95)
return buf.getvalue()
class BatchWriter(object):
def __init__(self, out_dir, image_dir, target_size=256, squarecrop=True, validation_pct=0.2,
class_samples_max=None, file_pattern='*.jpg', macro_size=3072):
np.random.seed(0)
self.out_dir = os.path.expanduser(out_dir)
self.image_dir = os.path.expanduser(image_dir)
self.macro_size = macro_size
self.num_workers = 8
self.target_size = target_size
self.squarecrop = squarecrop
self.file_pattern = file_pattern
self.class_samples_max = class_samples_max
self.validation_pct = validation_pct
self.train_file = os.path.join(self.out_dir, 'train_file.csv.gz')
self.val_file = os.path.join(self.out_dir, 'val_file.csv.gz')
self.meta_file = os.path.join(self.out_dir, 'dataset_cache.pkl')
self.global_mean = np.array([0, 0, 0]).reshape((3, 1))
self.batch_prefix = 'data_batch_'
def write_csv_files(self):
# Get the labels as the subdirs
subdirs = glob(os.path.join(self.image_dir, '*'))
self.label_names = sorted(map(lambda x: os.path.basename(x), subdirs))
indexes = range(len(self.label_names))
self.label_dict = {k: v for k, v in zip(self.label_names, indexes)}
tlines = []
vlines = []
for subdir in subdirs:
subdir_label = self.label_dict[os.path.basename(subdir)]
files = glob(os.path.join(subdir, self.file_pattern))
if self.class_samples_max is not None:
files = files[:self.class_samples_max]
lines = [(filename, subdir_label) for filename in files]
v_idx = int(self.validation_pct * len(lines))
tlines += lines[v_idx:]
vlines += lines[:v_idx]
np.random.shuffle(tlines)
if not os.path.exists(self.out_dir):
os.makedirs(self.out_dir)
for ff, ll in zip([self.train_file, self.val_file], [tlines, vlines]):
with gzip.open(ff, 'wb') as f:
f.write('filename,l_id\n')
for tup in ll:
f.write('{},{}\n'.format(*tup))
self.train_nrec = len(tlines)
self.ntrain = -(-self.train_nrec // self.macro_size)
self.train_start = 0
self.val_nrec = len(vlines)
self.nval = -(-self.val_nrec // self.macro_size)
if self.ntrain == 0:
self.val_start = 0
else:
self.val_start = 10 ** int(np.log10(self.ntrain * 10))
def parse_file_list(self, infile):
lines = np.loadtxt(infile, delimiter=',', skiprows=1, dtype={'names': ('fname', 'l_id'),
'formats': (object, 'i4')})
imfiles = [l[0] for l in lines]
labels = {'l_id': [l[1] for l in lines]}
self.nclass = {'l_id': (max(labels['l_id']) + 1)}
return imfiles, labels
def write_batches(self, name, offset, labels, imfiles):
pool = Pool(processes=self.num_workers)
npts = -(-len(imfiles) // self.macro_size)
starts = [i * self.macro_size for i in range(npts)]
is_tar = isinstance(imfiles[0], tarfile.ExFileObject)
proc_img_func = functools.partial(proc_img, self.target_size, self.squarecrop, is_tar)
imfiles = [imfiles[s:s + self.macro_size] for s in starts]
labels = [{k: v[s:s + self.macro_size] for k, v in labels.iteritems()} for s in starts]
print("Writing %s batches..." % (name))
for i, jpeg_file_batch in enumerate(imfiles):
if is_tar:
jpeg_file_batch = [j.read() for j in jpeg_file_batch]
jpeg_strings = pool.map(proc_img_func, jpeg_file_batch)
bfile = os.path.join(self.out_dir, '%s%d' % (self.batch_prefix, offset + i))
self.write_binary(jpeg_strings, labels[i], bfile)
print("Writing batch %d" % (i))
pool.close()
def write_binary(self, jpegs, labels, ofname):
num_imgs = len(jpegs)
keylist = ['l_id']
with open(ofname, 'wb') as f:
f.write(struct.pack('I', num_imgs))
f.write(struct.pack('I', len(keylist)))
for key in keylist:
ksz = len(key)
f.write(struct.pack('L' + 'B' * ksz, ksz, *bytearray(key)))
f.write(struct.pack('I' * num_imgs, *labels[key]))
for i in range(num_imgs):
jsz = len(jpegs[i])
bin = struct.pack('I' + 'B' * jsz, jsz, *bytearray(jpegs[i]))
f.write(bin)
def save_meta(self):
save_obj({'ntrain': self.ntrain,
'nval': self.nval,
'train_start': self.train_start,
'val_start': self.val_start,
'macro_size': self.macro_size,
'batch_prefix': self.batch_prefix,
'global_mean': self.global_mean,
'label_dict': self.label_dict,
'label_names': self.label_names,
'val_nrec': self.val_nrec,
'train_nrec': self.train_nrec,
'img_size': self.target_size,
'nclass': self.nclass}, self.meta_file)
def run(self):
self.write_csv_files()
if self.validation_pct == 0:
namelist = ['train']
filelist = [self.train_file]
startlist = [self.train_start]
elif self.validation_pct == 1:
namelist = ['validation']
filelist = [self.val_file]
startlist = [self.val_start]
else:
namelist = ['train', 'validation']
filelist = [self.train_file, self.val_file]
startlist = [self.train_start, self.val_start]
for sname, fname, start in zip(namelist, filelist, startlist):
print("%s %s %s" % (sname, fname, start))
if fname is not None and os.path.exists(fname):
imgs, labels = self.parse_file_list(fname)
self.write_batches(sname, start, labels, imgs)
else:
print("Skipping %s, file missing" % (sname))
self.save_meta()
class BatchWriterImagenet(BatchWriter):
# code below adapted from Alex Krizhevsky's cuda-convnet2 library,
# make-data.py
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################################################################
def run(self):
load_dir = self.image_dir
train_tar = os.path.join(load_dir, 'ILSVRC2012_img_train.tar')
validation_tar = os.path.join(load_dir, 'ILSVRC2012_img_val.tar')
for infile in (train_tar, validation_tar):
if not os.path.exists(infile):
raise IOError(infile + " not found. Please ensure you have ImageNet downloaded."
"More info here: http://www.image-net.org/download-imageurls")
# download our version of the metadata
meta_dir = load_i1kmeta(self.out_dir)
meta_file = os.path.join(meta_dir, 'neon_ILSVRC2012_devmeta.pkl')
self.meta = load_obj(meta_file)
self.__dict__.update(self.meta) # get label_dict, label_names, global_mean from meta
self.global_mean = np.mean(self.global_mean.reshape(3, -1), axis=1).reshape(3, 1)[::-1]
np.random.seed(0)
with tarfile.open(train_tar) as tf:
s_sets = tf.getmembers()
s_tars = [tarfile.open(fileobj=tf.extractfile(s)) for s in s_sets]
print('Building trainset list from synset tars.')
t_jpegfiles = []
totalsz = len(s_tars)
for i, st in enumerate(s_tars):
if i % 100 == 0:
print("%d%% ..." % (int(round((100.0 * i) / totalsz))))
t_jpegfiles += [st.extractfile(m) for m in st.getmembers()]
st.close()
print("Done loading")
np.random.shuffle(t_jpegfiles)
train_labels = [[self.label_dict[j.name[:9]]] for j in t_jpegfiles]
self.train_nrec = len(t_jpegfiles)
self.ntrain = -(-self.train_nrec // self.macro_size)
self.nclass = {'l_id': 1000}
self.train_start = 0
train_labels = {'l_id': np.array(train_labels, dtype=np.int32)}
self.write_batches('train', self.train_start, train_labels, t_jpegfiles)
with tarfile.open(validation_tar) as tf:
jpegfiles = sorted([tf.extractfile(m) for m in tf.getmembers()], key=lambda x: x.name)
self.val_nrec = len(jpegfiles)
self.nval = -(-self.val_nrec // self.macro_size)
self.val_start = 10 ** int(np.log10(self.ntrain) + 1)
val_labels = {'l_id': np.array(self.val_ground_truth, dtype=np.int32)}
self.write_batches('val', self.val_start, val_labels, jpegfiles)
self.save_meta()
if __name__ == "__main__":
parser = NeonArgparser(__doc__)
parser.add_argument('--set_type', help='(i1k|directory)', required=True,
choices=['i1k', 'directory'])
parser.add_argument('--image_dir', help='Directory to find images', required=True)
parser.add_argument('--target_size', type=int, default=256,
help='Size in pixels to scale images (Must be 256 for i1k dataset)')
parser.add_argument('--macro_size', type=int, default=5000, help='Images per processed batch')
parser.add_argument('--file_pattern', default='*.jpg', help='Image extension to include in'
'directory crawl')
args = parser.parse_args()
logger = logging.getLogger(__name__)
# Supply dataset type and location
if args.set_type == 'i1k':
bw = BatchWriterImagenet(out_dir=args.data_dir, image_dir=args.image_dir,
macro_size=args.macro_size)
else:
bw = BatchWriter(out_dir=args.data_dir, image_dir=args.image_dir,
target_size=args.target_size, macro_size=args.macro_size,
file_pattern=args.file_pattern)
bw.run()
| apache-2.0 |
lyft/incubator-airflow | tests/providers/microsoft/azure/hooks/test_azure_data_lake.py | 4 | 5669 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import json
import unittest
import mock
from airflow.models import Connection
from airflow.utils import db
class TestAzureDataLakeHook(unittest.TestCase):
def setUp(self):
db.merge_conn(
Connection(
conn_id='adl_test_key',
conn_type='azure_data_lake',
login='client_id',
password='client secret',
extra=json.dumps({"tenant": "tenant",
"account_name": "accountname"})
)
)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_conn(self, mock_lib):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
from azure.datalake.store import core
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
self.assertEqual(hook.conn_id, 'adl_test_key')
self.assertIsInstance(hook.connection, core.AzureDLFileSystem)
assert mock_lib.auth.called
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.core.AzureDLFileSystem',
autospec=True)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_check_for_blob(self, mock_lib, mock_filesystem):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.check_for_file('file_path')
mock_filesystem.glob.called
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.multithread.ADLUploader',
autospec=True)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_upload_file(self, mock_lib, mock_uploader):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.upload_file(local_path='tests/hooks/test_adl_hook.py',
remote_path='/test_adl_hook.py',
nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304)
mock_uploader.assert_called_once_with(hook.connection,
lpath='tests/hooks/test_adl_hook.py',
rpath='/test_adl_hook.py',
nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.multithread.ADLDownloader',
autospec=True)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_download_file(self, mock_lib, mock_downloader):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.download_file(local_path='test_adl_hook.py',
remote_path='/test_adl_hook.py',
nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304)
mock_downloader.assert_called_once_with(hook.connection,
lpath='test_adl_hook.py',
rpath='/test_adl_hook.py',
nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.core.AzureDLFileSystem',
autospec=True)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_list_glob(self, mock_lib, mock_fs):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.list('file_path/*')
mock_fs.return_value.glob.assert_called_once_with('file_path/*')
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.core.AzureDLFileSystem',
autospec=True)
@mock.patch('airflow.providers.microsoft.azure.hooks.azure_data_lake.lib', autospec=True)
def test_list_walk(self, mock_lib, mock_fs):
from airflow.providers.microsoft.azure.hooks.azure_data_lake import AzureDataLakeHook
hook = AzureDataLakeHook(azure_data_lake_conn_id='adl_test_key')
hook.list('file_path/some_folder/')
mock_fs.return_value.walk.assert_called_once_with('file_path/some_folder/')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
clld/clldfabric | clldfabric/varnish.py | 1 | 2713 | """
deploy with varnish:
- apt-get install varnish
- create /etc/default/varnish
- create /etc/varnish/main.vcl
- create /etc/varnish/sites.vcl
- create /etc/varnish/sites/
(and require it to contain the correct include!)
- create /etc/varnish/sites/{app.name}.vcl
- /etc/init.d/varnish restart
- adapt nginx site config
- /etc/init.d/nginx reload
"""
from fabric.contrib.files import append, exists
from fabtools import require
from fabtools import service
from clldfabric.util import (
create_file_as_root, upload_template_as_root, get_template_variables, http_auth,
)
from clldfabric.config import App
DEFAULT = """
START=yes
NFILES=131072
MEMLOCK=82000
# Default varnish instance name is the local nodename. Can be overridden with
# the -n switch, to have more instances on a single server.
# INSTANCE=$(uname -n)
DAEMON_OPTS="-a :6081 \
-T localhost:6082 \
-t 3600 \
-f /etc/varnish/main.vcl \
-S /etc/varnish/secret \
-s file,/var/lib/varnish/$INSTANCE/varnish_storage.bin,10G"
"""
MAIN_VCL = """
sub vcl_recv {
set req.http.Host = regsub(req.http.Host, "^www\.", "");
set req.http.Host = regsub(req.http.Host, ":80$", "");
}
include "/etc/varnish/sites.vcl";
"""
SITE_VCL_TEMPLATE = """
backend {app.name} {{
.host = "127.0.0.1";
.port = "{app.port}";
}}
sub vcl_recv {{
if (req.http.host ~ "^{app.domain}$") {{ set req.backend = {app.name}; }}
}}
sub vcl_fetch {{
set beresp.ttl = 3600s;
return(deliver);
}}
"""
def cache(app): # pragma: no cover
"""require an app to be put behind varnish
"""
require.deb.package('varnish')
create_file_as_root('/etc/default/varnish', DEFAULT)
create_file_as_root('/etc/varnish/main.vcl', MAIN_VCL)
sites_vcl = '/etc/varnish/sites.vcl'
site_config_dir = '/etc/varnish/sites'
site_config = '/'.join(site_config_dir, '{app.name}.vcl'.format(app=app))
include = 'include "%s";' % site_config
if exists(sites_vcl):
append(sites_vcl, include, use_sudo=True)
else:
create_file_as_root(sites_vcl, include + '\n')
require.files.directory(site_config_dir, use_sudo=True)
create_file_as_root(site_config, SITE_VCL_TEMPLATE.format(app=app))
service.restart('varnish')
template_vars = get_template_variables(App(app.name, 6081, domain=app.domain))
template_vars['SITE'] = True
upload_template_as_root(app.nginx_site, 'nginx-app.conf', template_vars)
service.reload('nginx')
def uncache(app): # pragma: no cover
tv = get_template_variables(app)
tv['auth'] = http_auth(app)
create_file_as_root(app.nginx_site, SITE_TEMPLATE.format(**tv))
service.reload('nginx')
| apache-2.0 |
KiChjang/servo | tests/wpt/web-platform-tests/tools/wave/network/api/api_handler.py | 6 | 1850 | from __future__ import absolute_import
from __future__ import unicode_literals
import json
import sys
import traceback
import logging
try:
from urllib.parse import parse_qsl
except ImportError:
from urlparse import parse_qsl
global logger
logger = logging.getLogger("wave-api-handler")
class ApiHandler(object):
def __init__(self, web_root):
self._web_root = web_root
def set_headers(self, response, headers):
if not isinstance(response.headers, list):
response.headers = []
for header in headers:
response.headers.append(header)
def send_json(self, data, response, status=None):
if status is None:
status = 200
json_string = json.dumps(data, indent=4)
response.content = json_string
self.set_headers(response, [("Content-Type", "application/json")])
response.status = status
def send_file(self, blob, file_name, response):
self.set_headers(response,
[("Content-Disposition",
"attachment;filename=" + file_name)])
response.content = blob
def send_zip(self, data, file_name, response):
response.headers = [("Content-Type", "application/x-compressed")]
self.send_file(data, file_name, response)
def parse_uri(self, request):
path = request.url_parts.path
if self._web_root is not None:
path = path[len(self._web_root):]
uri_parts = list(filter(None, path.split("/")))
return uri_parts
def parse_query_parameters(self, request):
return dict(parse_qsl(request.url_parts.query))
def handle_exception(self, message):
info = sys.exc_info()
traceback.print_tb(info[2])
logger.error("{}: {}: {}".format(message, info[0].__name__, info[1].args[0]))
| mpl-2.0 |
mcanthony/ace | static.py | 71 | 8436 | #!/usr/bin/env python
"""static - A stupidly simple WSGI way to serve static (or mixed) content.
(See the docstrings of the various functions and classes.)
Copyright (C) 2006-2009 Luke Arno - http://lukearno.com/
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to:
The Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
Luke Arno can be found at http://lukearno.com/
"""
import mimetypes
import rfc822
import time
import string
import sys
from os import path, stat, getcwd
from wsgiref import util
from wsgiref.headers import Headers
from wsgiref.simple_server import make_server
from optparse import OptionParser
try: from pkg_resources import resource_filename, Requirement
except: pass
try: import kid
except: pass
class MagicError(Exception): pass
class StatusApp:
"""Used by WSGI apps to return some HTTP status."""
def __init__(self, status, message=None):
self.status = status
if message is None:
self.message = status
else:
self.message = message
def __call__(self, environ, start_response, headers=[]):
if self.message:
Headers(headers).add_header('Content-type', 'text/plain')
start_response(self.status, headers)
if environ['REQUEST_METHOD'] == 'HEAD':
return [""]
else:
return [self.message]
class Cling(object):
"""A stupidly simple way to serve static content via WSGI.
Serve the file of the same path as PATH_INFO in self.datadir.
Look up the Content-type in self.content_types by extension
or use 'text/plain' if the extension is not found.
Serve up the contents of the file or delegate to self.not_found.
"""
block_size = 16 * 4096
index_file = 'index.html'
not_found = StatusApp('404 Not Found')
not_modified = StatusApp('304 Not Modified', "")
moved_permanently = StatusApp('301 Moved Permanently')
method_not_allowed = StatusApp('405 Method Not Allowed')
def __init__(self, root, **kw):
"""Just set the root and any other attribs passes via **kw."""
self.root = root
for k, v in kw.iteritems():
setattr(self, k, v)
def __call__(self, environ, start_response):
"""Respond to a request when called in the usual WSGI way."""
if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'):
headers = [('Allow', 'GET, HEAD')]
return self.method_not_allowed(environ, start_response, headers)
path_info = environ.get('PATH_INFO', '')
full_path = self._full_path(path_info)
if not self._is_under_root(full_path):
return self.not_found(environ, start_response)
if path.isdir(full_path):
if full_path[-1] <> '/' or full_path == self.root:
location = util.request_uri(environ, include_query=False) + '/'
if environ.get('QUERY_STRING'):
location += '?' + environ.get('QUERY_STRING')
headers = [('Location', location)]
return self.moved_permanently(environ, start_response, headers)
else:
full_path = self._full_path(path_info + self.index_file)
content_type = self._guess_type(full_path)
try:
etag, last_modified = self._conditions(full_path, environ)
headers = [('Date', rfc822.formatdate(time.time())),
('Last-Modified', last_modified),
('ETag', etag)]
if_modified = environ.get('HTTP_IF_MODIFIED_SINCE')
if if_modified and (rfc822.parsedate(if_modified)
>= rfc822.parsedate(last_modified)):
return self.not_modified(environ, start_response, headers)
if_none = environ.get('HTTP_IF_NONE_MATCH')
if if_none and (if_none == '*' or etag in if_none):
return self.not_modified(environ, start_response, headers)
file_like = self._file_like(full_path)
headers.append(('Content-Type', content_type))
start_response("200 OK", headers)
if environ['REQUEST_METHOD'] == 'GET':
return self._body(full_path, environ, file_like)
else:
return ['']
except (IOError, OSError), e:
print e
return self.not_found(environ, start_response)
def _full_path(self, path_info):
"""Return the full path from which to read."""
return self.root + path_info
def _is_under_root(self, full_path):
"""Guard against arbitrary file retrieval."""
if (path.abspath(full_path) + path.sep)\
.startswith(path.abspath(self.root) + path.sep):
return True
else:
return False
def _guess_type(self, full_path):
"""Guess the mime type using the mimetypes module."""
return mimetypes.guess_type(full_path)[0] or 'text/plain'
def _conditions(self, full_path, environ):
"""Return a tuple of etag, last_modified by mtime from stat."""
mtime = stat(full_path).st_mtime
return str(mtime), rfc822.formatdate(mtime)
def _file_like(self, full_path):
"""Return the appropriate file object."""
return open(full_path, 'rb')
def _body(self, full_path, environ, file_like):
"""Return an iterator over the body of the response."""
way_to_send = environ.get('wsgi.file_wrapper', iter_and_close)
return way_to_send(file_like, self.block_size)
def iter_and_close(file_like, block_size):
"""Yield file contents by block then close the file."""
while 1:
try:
block = file_like.read(block_size)
if block: yield block
else: raise StopIteration
except StopIteration, si:
file_like.close()
return
def cling_wrap(package_name, dir_name, **kw):
"""Return a Cling that serves from the given package and dir_name.
This uses pkg_resources.resource_filename which is not the
recommended way, since it extracts the files.
I think this works fine unless you have some _very_ serious
requirements for static content, in which case you probably
shouldn't be serving it through a WSGI app, IMHO. YMMV.
"""
resource = Requirement.parse(package_name)
return Cling(resource_filename(resource, dir_name), **kw)
def command():
parser = OptionParser(usage="%prog DIR [HOST][:][PORT]",
version="static 0.3.6")
options, args = parser.parse_args()
if len(args) in (1, 2):
if len(args) == 2:
parts = args[1].split(":")
if len(parts) == 1:
host = parts[0]
port = None
elif len(parts) == 2:
host, port = parts
else:
sys.exit("Invalid host:port specification.")
elif len(args) == 1:
host, port = None, None
if not host:
host = '0.0.0.0'
if not port:
port = 8888
try:
port = int(port)
except:
sys.exit("Invalid host:port specification.")
app = Cling(args[0])
try:
make_server(host, port, app).serve_forever()
except KeyboardInterrupt, ki:
print "Cio, baby!"
except:
sys.exit("Problem initializing server.")
else:
parser.print_help(sys.stderr)
sys.exit(1)
def test():
from wsgiref.validate import validator
app = Cling(getcwd())
try:
print "Serving " + getcwd() + " to http://localhost:8888"
make_server('0.0.0.0', 8888, validator(app)).serve_forever()
except KeyboardInterrupt, ki:
print ""
print "Ciao, baby!"
if __name__ == '__main__':
test()
| bsd-3-clause |
ledtvavs/repository.ledtv | script.module.urlresolver/lib/urlresolver/plugins/anyfiles.py | 7 | 2665 | """
Copyright (C) 2014 smokdpi
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class AnyFilesResolver(UrlResolver):
name = "anyfiles"
domains = ["anyfiles.pl"]
pattern = '(?://|\.)(anyfiles\.pl)/.*?(?:id=|v=|/)([0-9]+)'
def __init__(self):
self.net = common.Net()
self.user_agent = common.IE_USER_AGENT
self.headers = {'User-Agent': self.user_agent}
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
hostname = urlparse.urlparse(web_url).hostname
self.headers['Referer'] = web_url
response = self.net.http_GET(web_url, headers=self.headers)
response_headers = response.get_headers(as_dict=True)
cookie = response_headers.get('Set-Cookie')
if cookie:
self.headers.update({'Cookie': cookie.split(';')[0]})
html = response.content
for match in re.finditer('''<script[^>]*src=["']([^'"]+)''', html):
js_html = self.__get_js(match.group(1), self.headers, hostname)
match = re.search('''var\s+source\s*=\s*['"](http.*?mp4)''', js_html)
if match:
return match.group(1) + helpers.append_headers(self.headers)
else:
raise ResolverError('File not found')
def __get_js(self, js_url, headers, hostname):
js = ''
if not js_url.startswith('http'):
base_url = 'http://' + hostname
js_url = urlparse.urljoin(base_url, js_url)
if hostname in js_url:
js_url = js_url.replace('&', '&')
common.logger.log('Getting JS: |%s| - |%s|' % (js_url, headers))
js = self.net.http_GET(js_url, headers=headers).content
return js
def get_url(self, host, media_id):
return "http://anyfiles.pl/w.jsp?id=%s&width=640&height=360&start=0&skin=0&label=false&autostart=false" % (media_id)
| gpl-3.0 |
so0k/zulip | zproject/wsgi.py | 125 | 1178 | """
WSGI config for zulip project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "zproject.settings")
# Because import_module does not correctly handle safe circular imports we
# need to import zerver.models first before the middleware tries to import it.
import zerver.models
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| apache-2.0 |
daviddao/luminosity | sklearn-server/flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| bsd-3-clause |
LUTAN/tensorflow | tensorflow/python/training/moving_averages_test.py | 73 | 15366 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional test for moving_averages.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import moving_averages
class MovingAveragesTest(test.TestCase):
def testAssignMovingAverageWithoutZeroDebias(self):
with self.test_session():
var = variables.Variable([10.0, 11.0])
val = constant_op.constant([1.0, 2.0], dtypes.float32)
decay = 0.25
assign = moving_averages.assign_moving_average(
var, val, decay, zero_debias=False)
variables.global_variables_initializer().run()
self.assertAllClose([10.0, 11.0], var.eval())
assign.op.run()
self.assertAllClose(
[10.0 * 0.25 + 1.0 * (1.0 - 0.25), 11.0 * 0.25 + 2.0 * (1.0 - 0.25)],
var.eval())
def testAssignMovingAverage(self):
with self.test_session():
var = variables.Variable([0.0, 0.0])
val = constant_op.constant([1.0, 2.0], dtypes.float32)
decay = 0.25
assign = moving_averages.assign_moving_average(var, val, decay)
variables.global_variables_initializer().run()
self.assertAllClose([0.0, 0.0], var.eval())
assign.op.run()
self.assertAllClose([
1.0 * (1.0 - 0.25) / (1 - 0.25), 2.0 * (1.0 - 0.25) / (1 - 0.25)
], var.eval())
def testWeightedMovingAverage(self):
with self.test_session() as sess:
decay = 0.5
weight = array_ops.placeholder(dtypes.float32, [])
val = array_ops.placeholder(dtypes.float32, [])
wma = moving_averages.weighted_moving_average(val, decay, weight)
variables.global_variables_initializer().run()
# Get the first weighted moving average.
val_1 = 3.0
weight_1 = 4.0
wma_array = sess.run(wma, feed_dict={val: val_1, weight: weight_1})
numerator_1 = val_1 * weight_1 * (1.0 - decay)
denominator_1 = weight_1 * (1.0 - decay)
self.assertAllClose(numerator_1 / denominator_1, wma_array)
# Get the second weighted moving average.
val_2 = 11.0
weight_2 = 22.0
wma_array = sess.run(wma, feed_dict={val: val_2, weight: weight_2})
numerator_2 = numerator_1 * decay + val_2 * weight_2 * (1.0 - decay)
denominator_2 = denominator_1 * decay + weight_2 * (1.0 - decay)
self.assertAllClose(numerator_2 / denominator_2, wma_array)
def _Repeat(value, dim):
if dim == 1:
return value
return [value] * dim
class ExponentialMovingAverageTest(test.TestCase):
def _CheckDecay(self, ema, actual_decay, dim):
def _Scale(dk, steps):
if ema._zero_debias:
return 1 - dk**steps
else:
return 1
tens = _Repeat(10.0, dim)
thirties = _Repeat(30.0, dim)
var0 = variables.Variable(tens, name="v0")
var1 = variables.Variable(thirties, name="v1")
variables.global_variables_initializer().run()
# Note that tensor2 is not a Variable but just a plain Tensor resulting
# from the sum operation.
tensor2 = var0 + var1
update = ema.apply([var0, var1, tensor2])
avg0 = ema.average(var0)
avg1 = ema.average(var1)
avg2 = ema.average(tensor2)
self.assertItemsEqual([var0, var1], variables.moving_average_variables())
self.assertFalse(avg0 in variables.trainable_variables())
self.assertFalse(avg1 in variables.trainable_variables())
self.assertFalse(avg2 in variables.trainable_variables())
variables.global_variables_initializer().run()
self.assertEqual("v0/ExponentialMovingAverage:0", avg0.name)
self.assertEqual("v1/ExponentialMovingAverage:0", avg1.name)
self.assertEqual("add/ExponentialMovingAverage:0", avg2.name)
# Check initial values.
self.assertAllClose(tens, var0.eval())
self.assertAllClose(thirties, var1.eval())
self.assertAllClose(_Repeat(10.0 + 30.0, dim), tensor2.eval())
# Check that averages are initialized correctly.
self.assertAllClose(tens, avg0.eval())
self.assertAllClose(thirties, avg1.eval())
# Note that averages of Tensor's initialize to zeros_like since no value
# of the Tensor is known because the Op has not been run (yet).
self.assertAllClose(_Repeat(0.0, dim), avg2.eval())
# Update the averages and check.
update.run()
dk = actual_decay
expected = _Repeat(10.0 * dk + 10.0 * (1 - dk), dim)
self.assertAllClose(expected, avg0.eval())
expected = _Repeat(30.0 * dk + 30.0 * (1 - dk), dim)
self.assertAllClose(expected, avg1.eval())
expected = _Repeat(0.0 * dk + (10.0 + 30.0) * (1 - dk) / _Scale(dk, 1), dim)
self.assertAllClose(expected, avg2.eval())
# Again, update the averages and check.
update.run()
expected = _Repeat((10.0 * dk + 10.0 * (1 - dk)) * dk + 10.0 * (1 - dk),
dim)
self.assertAllClose(expected, avg0.eval())
expected = _Repeat((30.0 * dk + 30.0 * (1 - dk)) * dk + 30.0 * (1 - dk),
dim)
self.assertAllClose(expected, avg1.eval())
expected = _Repeat(((0.0 * dk + (10.0 + 30.0) * (1 - dk)) * dk +
(10.0 + 30.0) * (1 - dk)) / _Scale(dk, 2), dim)
self.assertAllClose(expected, avg2.eval())
def testAverageVariablesNoNumUpdates_Scalar(self):
with self.test_session():
ema = moving_averages.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=1)
def testAverageVariablesNoNumUpdates_Scalar_Debias(self):
with self.test_session():
ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.25, dim=1)
def testAverageVariablesNoNumUpdates_Vector(self):
with self.test_session():
ema = moving_averages.ExponentialMovingAverage(0.25)
self._CheckDecay(ema, actual_decay=0.25, dim=5)
def testAverageVariablesNoNumUpdates_Vector_Debias(self):
with self.test_session():
ema = moving_averages.ExponentialMovingAverage(0.25, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.25, dim=5)
def testAverageVariablesNumUpdates_Scalar(self):
with self.test_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=1)
def testAverageVariablesNumUpdates_Scalar_Debias(self):
with self.test_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(
0.25, num_updates=1, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.181818, dim=1)
def testAverageVariablesNumUpdates_Vector(self):
with self.test_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(0.25, num_updates=1)
self._CheckDecay(ema, actual_decay=0.181818, dim=5)
def testAverageVariablesNumUpdates_Vector_Debias(self):
with self.test_session():
# With num_updates 1, the decay applied is 0.1818
ema = moving_averages.ExponentialMovingAverage(
0.25, num_updates=1, zero_debias=True)
self._CheckDecay(ema, actual_decay=0.181818, dim=5)
def testAverageVariablesWithControlDeps(self):
with self.test_session() as sess:
v0 = variables.Variable(0, name="v0")
add_to_v0 = v0.assign_add(1)
v1 = variables.Variable([10.0], name="v1")
assign_to_v1 = v1.assign([20.0])
ema = moving_averages.ExponentialMovingAverage(0.25)
with ops.control_dependencies([add_to_v0]):
ema_op = ema.apply([v1])
# the moving average of v1 should not have any control inputs
v1_avg = ema.average(v1)
self.assertEqual([], v1_avg.initializer.control_inputs)
self.assertEqual([], v1_avg.value().op.control_inputs)
self.assertEqual([], v1_avg.value().op.control_inputs)
# We should be able to initialize v1_avg before v0.
sess.run(v1_avg.initializer)
sess.run(v0.initializer)
self.assertEqual([10.0], sess.run(v1_avg))
# running ema_op should add to v0 (in addition to updating v1_avg)
sess.run(assign_to_v1)
sess.run(ema_op)
self.assertEqual(1, sess.run(v0))
self.assertEqual([17.5], sess.run(v1_avg))
def averageVariablesNamesHelper(self, zero_debias):
with self.test_session():
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(
0.25, zero_debias=zero_debias, name="foo")
self.assertEqual("v0/foo", ema.average_name(v0))
self.assertEqual("v1/foo", ema.average_name(v1))
self.assertEqual("add/foo", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
vars_to_restore = ema.variables_to_restore()
# vars_to_restore should contain the following:
# {v0/foo : v0,
# v1/foo : v1,
# add/foo : add/foo,
# v2 : v2}
expected_names = [
ema.average_name(v0), ema.average_name(v1), ema.average_name(tensor2),
v2.op.name
]
if zero_debias:
# vars_to_restore should also contain the following:
# {add/foo/biased: add/foo/biased,
# add/foo/local_step: add/foo/local_step}
expected_names += [
ema.average_name(tensor2) + "/biased",
ema.average_name(tensor2) + "/local_step"
]
self.assertEqual(sorted(vars_to_restore.keys()), sorted(expected_names))
self.assertEqual(ema.average_name(v0), ema.average(v0).op.name)
self.assertEqual(ema.average_name(v1), ema.average(v1).op.name)
self.assertEqual(ema.average_name(tensor2), ema.average(tensor2).op.name)
def testAverageVariablesNames(self):
self.averageVariablesNamesHelper(zero_debias=True)
def testAverageVariablesNamesNoDebias(self):
self.averageVariablesNamesHelper(zero_debias=False)
def averageVariablesNamesRespectScopeHelper(self, zero_debias):
# See discussion on #2740.
with self.test_session():
with variable_scope.variable_scope("scope1"):
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
with variable_scope.variable_scope("scope2"):
ema = moving_averages.ExponentialMovingAverage(
0.25, zero_debias=zero_debias, name="foo")
self.assertEqual("scope2/scope1/v0/foo", ema.average_name(v0))
self.assertEqual("scope2/scope1/v1/foo", ema.average_name(v1))
self.assertEqual("scope2/scope1/add/foo", ema.average_name(tensor2))
ema.apply([v0, v1, tensor2])
vars_to_restore = ema.variables_to_restore()
# vars_to_restore should contain the following:
# {scope2/scope1/v0/foo : v0,
# scope2/scope1/v1/foo : v1,
# scope2/scope1/add/foo : add/foo,
# scope1/v2 : v2}
expected_names = [
ema.average_name(v0), ema.average_name(v1),
ema.average_name(tensor2), v2.op.name
]
if zero_debias:
# vars_to_restore should also contain the following:
# {scope2/scope2/scope1/add/foo/biased: add/foo/biased,
# scope2/scope2/scope1/add/foo/local_step: add/foo/local_step}
sc = "scope2/"
expected_names += [
sc + ema.average_name(tensor2) + "/biased",
sc + ema.average_name(tensor2) + "/local_step"
]
self.assertEqual(sorted(vars_to_restore.keys()), sorted(expected_names))
self.assertEqual(ema.average_name(v0), ema.average(v0).op.name)
self.assertEqual(ema.average_name(v1), ema.average(v1).op.name)
self.assertEqual(
ema.average_name(tensor2), ema.average(tensor2).op.name)
def testAverageVariablesNamesRespectScope(self):
self.averageVariablesNamesRespectScopeHelper(zero_debias=True)
def testAverageVariablesNamesRespectScopeNoDebias(self):
self.averageVariablesNamesRespectScopeHelper(zero_debias=False)
def testSubsetAverageVariablesNames(self):
with self.test_session():
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(30.0, name="v1")
# Add a non-trainable variable.
v2 = variables.Variable(20.0, name="v2", trainable=False)
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
self.assertEqual("v0/foo_avg", ema.average_name(v0))
self.assertEqual("v1/foo_avg", ema.average_name(v1))
self.assertEqual("add/foo_avg", ema.average_name(tensor2))
vars_to_restore = ema.variables_to_restore([v0, tensor2])
# vars_to_restore should contain the following:
# {v0/foo_avg : v0,
# add/foo_avg : add
# v1 : v1,
# v2 : v2}
self.assertEqual(
sorted(vars_to_restore.keys()),
sorted([
ema.average_name(v0), ema.average_name(tensor2), v1.op.name,
v2.op.name
]))
ema.apply([v0, v1, tensor2])
self.assertEqual(ema.average_name(v0), ema.average(v0).op.name)
self.assertEqual(ema.average_name(v1), ema.average(v1).op.name)
self.assertEqual(ema.average_name(tensor2), ema.average(tensor2).op.name)
def testAverageVariablesDeviceAssignment(self):
with ops.device("/job:dev_v0"):
v0 = variables.Variable(10.0, name="v0")
with ops.device("/job:dev_v1"):
v1 = gen_state_ops._variable(
shape=[1],
dtype=dtypes.float32,
name="v1",
container="",
shared_name="")
v1.set_shape([1])
tensor2 = v0 + v1
ema = moving_averages.ExponentialMovingAverage(0.25, name="foo_avg")
with ops.device("/job:default"):
ema.apply([v0, v1, tensor2])
self.assertDeviceEqual("/job:dev_v0", ema.average(v0).device)
self.assertDeviceEqual("/job:dev_v1", ema.average(v1).device)
# However, the colocation property is maintained.
self.assertEqual([b"loc:@v1"], ema.average(v1).op.colocation_groups())
self.assertDeviceEqual("/job:default", ema.average(tensor2).device)
if __name__ == "__main__":
test.main()
| apache-2.0 |
mach0/QGIS | python/gui/auto_additions/qgsadvanceddigitizingdockwidget.py | 10 | 1581 | # The following has been generated automatically from src/gui/qgsadvanceddigitizingdockwidget.h
QgsAdvancedDigitizingDockWidget.CadCapacities.baseClass = QgsAdvancedDigitizingDockWidget
CadCapacities = QgsAdvancedDigitizingDockWidget # dirty hack since SIP seems to introduce the flags in module
# monkey patching scoped based enum
QgsAdvancedDigitizingDockWidget.NoConstraint = QgsAdvancedDigitizingDockWidget.AdditionalConstraint.NoConstraint
QgsAdvancedDigitizingDockWidget.NoConstraint.is_monkey_patched = True
QgsAdvancedDigitizingDockWidget.AdditionalConstraint.NoConstraint.__doc__ = "No additional constraint"
QgsAdvancedDigitizingDockWidget.Perpendicular = QgsAdvancedDigitizingDockWidget.AdditionalConstraint.Perpendicular
QgsAdvancedDigitizingDockWidget.Perpendicular.is_monkey_patched = True
QgsAdvancedDigitizingDockWidget.AdditionalConstraint.Perpendicular.__doc__ = "Perpendicular"
QgsAdvancedDigitizingDockWidget.Parallel = QgsAdvancedDigitizingDockWidget.AdditionalConstraint.Parallel
QgsAdvancedDigitizingDockWidget.Parallel.is_monkey_patched = True
QgsAdvancedDigitizingDockWidget.AdditionalConstraint.Parallel.__doc__ = "Parallel"
QgsAdvancedDigitizingDockWidget.AdditionalConstraint.__doc__ = 'Additional constraints which can be enabled\n\n' + '* ``NoConstraint``: ' + QgsAdvancedDigitizingDockWidget.AdditionalConstraint.NoConstraint.__doc__ + '\n' + '* ``Perpendicular``: ' + QgsAdvancedDigitizingDockWidget.AdditionalConstraint.Perpendicular.__doc__ + '\n' + '* ``Parallel``: ' + QgsAdvancedDigitizingDockWidget.AdditionalConstraint.Parallel.__doc__
# --
| gpl-2.0 |
mupi/tecsaladeaula | core/migrations/0013_auto__chg_field_course_publication.py | 3 | 10297 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Course.publication'
db.alter_column(u'core_course', 'publication', self.gf('django.db.models.fields.DateField')(null=True))
def backwards(self, orm):
# Changing field 'Course.publication'
db.alter_column(u'core_course', 'publication', self.gf('django.db.models.fields.DateField')(default=datetime.datetime(1969, 12, 31, 0, 0)))
models = {
u'accounts.timtecuser': {
'Meta': {'object_name': 'TimtecUser'},
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'picture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'site': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'activities.activity': {
'Meta': {'ordering': "['-id']", 'object_name': 'Activity'},
'data': ('jsonfield.fields.JSONField', [], {}),
'expected': ('jsonfield.fields.JSONField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'core.course': {
'Meta': {'object_name': 'Course'},
'abstract': ('django.db.models.fields.TextField', [], {}),
'application': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'intro_video': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Video']", 'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'professors': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'professorcourse_set'", 'symmetrical': 'False', 'through': u"orm['core.CourseProfessor']", 'to': u"orm['accounts.TimtecUser']"}),
'pronatec': ('django.db.models.fields.TextField', [], {}),
'publication': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'requirement': ('django.db.models.fields.TextField', [], {}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '64'}),
'structure': ('django.db.models.fields.TextField', [], {}),
'students': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'studentcourse_set'", 'symmetrical': 'False', 'through': u"orm['core.CourseStudent']", 'to': u"orm['accounts.TimtecUser']"}),
'workload': ('django.db.models.fields.TextField', [], {})
},
u'core.courseprofessor': {
'Meta': {'unique_together': "(('user', 'course'),)", 'object_name': 'CourseProfessor'},
'biography': ('django.db.models.fields.TextField', [], {}),
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Course']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'instructor'", 'max_length': '128'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.TimtecUser']"})
},
u'core.coursestudent': {
'Meta': {'unique_together': "(('user', 'course'),)", 'object_name': 'CourseStudent'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Course']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.TimtecUser']"})
},
u'core.lesson': {
'Meta': {'ordering': "['position']", 'object_name': 'Lesson'},
'course': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Course']"}),
'desc': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'notes': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'draft'", 'max_length': '64'})
},
u'core.studentprogress': {
'Meta': {'unique_together': "(('user', 'unit'),)", 'object_name': 'StudentProgress'},
'complete': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'unit': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'progress'", 'to': u"orm['core.Unit']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.TimtecUser']"})
},
u'core.unit': {
'Meta': {'ordering': "['lesson', 'position']", 'object_name': 'Unit'},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'units'", 'null': 'True', 'to': u"orm['activities.Activity']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lesson': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'units'", 'to': u"orm['core.Lesson']"}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'side_notes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['core.Video']", 'null': 'True', 'blank': 'True'})
},
u'core.video': {
'Meta': {'object_name': 'Video'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'youtube_id': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['core'] | agpl-3.0 |
KyleAMoore/KanjiNani | Android/.buildozer/android/platform/build/build/python-installs/KanjiNani/kivy/core/audio/audio_gstplayer.py | 17 | 2653 | '''
Audio Gstplayer
===============
.. versionadded:: 1.8.0
Implementation of a VideoBase with Kivy :class:`~kivy.lib.gstplayer.GstPlayer`
This player is the preferred player, using Gstreamer 1.0, working on both
Python 2 and 3.
'''
from kivy.lib.gstplayer import GstPlayer, get_gst_version
from kivy.core.audio import Sound, SoundLoader
from kivy.logger import Logger
from kivy.compat import PY2
from kivy.clock import Clock
from os.path import realpath
if PY2:
from urllib import pathname2url
else:
from urllib.request import pathname2url
Logger.info('AudioGstplayer: Using Gstreamer {}'.format(
'.'.join(map(str, get_gst_version()))))
def _on_gstplayer_message(mtype, message):
if mtype == 'error':
Logger.error('AudioGstplayer: {}'.format(message))
elif mtype == 'warning':
Logger.warning('AudioGstplayer: {}'.format(message))
elif mtype == 'info':
Logger.info('AudioGstplayer: {}'.format(message))
class SoundGstplayer(Sound):
@staticmethod
def extensions():
return ('wav', 'ogg', 'mp3', 'm4a', 'flac', 'mp4')
def __init__(self, **kwargs):
self.player = None
super(SoundGstplayer, self).__init__(**kwargs)
def _on_gst_eos_sync(self):
Clock.schedule_once(self._on_gst_eos, 0)
def _on_gst_eos(self, *dt):
if self.loop:
self.player.stop()
self.player.play()
else:
self.stop()
def load(self):
self.unload()
uri = self._get_uri()
self.player = GstPlayer(uri, None, self._on_gst_eos_sync,
_on_gstplayer_message)
self.player.load()
def play(self):
# we need to set the volume everytime, it seems that stopping + playing
# the sound reset the volume.
self.player.set_volume(self.volume)
self.player.play()
super(SoundGstplayer, self).play()
def stop(self):
self.player.stop()
super(SoundGstplayer, self).stop()
def unload(self):
if self.player:
self.player.unload()
self.player = None
def seek(self, position):
self.player.seek(position / self.length)
def get_pos(self):
return self.player.get_position()
def _get_length(self):
return self.player.get_duration()
def on_volume(self, instance, volume):
self.player.set_volume(volume)
def _get_uri(self):
uri = self.filename
if not uri:
return
if '://' not in uri:
uri = 'file:' + pathname2url(realpath(uri))
return uri
SoundLoader.register(SoundGstplayer)
| gpl-3.0 |
shoheietzel/proj5-maps | env/lib/python3.6/site-packages/pip/_vendor/requests/packages/chardet/mbcssm.py | 1783 | 19590 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart, eError, eItsMe
# BIG5
BIG5_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
4,4,4,4,4,4,4,4, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
4,3,3,3,3,3,3,3, # a0 - a7
3,3,3,3,3,3,3,3, # a8 - af
3,3,3,3,3,3,3,3, # b0 - b7
3,3,3,3,3,3,3,3, # b8 - bf
3,3,3,3,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
BIG5_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,#08-0f
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart#10-17
)
Big5CharLenTable = (0, 1, 1, 2, 0)
Big5SMModel = {'classTable': BIG5_cls,
'classFactor': 5,
'stateTable': BIG5_st,
'charLenTable': Big5CharLenTable,
'name': 'Big5'}
# CP949
CP949_cls = (
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,0,0, # 00 - 0f
1,1,1,1,1,1,1,1, 1,1,1,0,1,1,1,1, # 10 - 1f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 20 - 2f
1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1, # 30 - 3f
1,4,4,4,4,4,4,4, 4,4,4,4,4,4,4,4, # 40 - 4f
4,4,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 50 - 5f
1,5,5,5,5,5,5,5, 5,5,5,5,5,5,5,5, # 60 - 6f
5,5,5,5,5,5,5,5, 5,5,5,1,1,1,1,1, # 70 - 7f
0,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 80 - 8f
6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6, # 90 - 9f
6,7,7,7,7,7,7,7, 7,7,7,7,7,8,8,8, # a0 - af
7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7, # b0 - bf
7,7,7,7,7,7,9,2, 2,3,2,2,2,2,2,2, # c0 - cf
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # d0 - df
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,2, # e0 - ef
2,2,2,2,2,2,2,2, 2,2,2,2,2,2,2,0, # f0 - ff
)
CP949_st = (
#cls= 0 1 2 3 4 5 6 7 8 9 # previous state =
eError,eStart, 3,eError,eStart,eStart, 4, 5,eError, 6, # eStart
eError,eError,eError,eError,eError,eError,eError,eError,eError,eError, # eError
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe, # eItsMe
eError,eError,eStart,eStart,eError,eError,eError,eStart,eStart,eStart, # 3
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 4
eError,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart,eStart, # 5
eError,eStart,eStart,eStart,eStart,eError,eError,eStart,eStart,eStart, # 6
)
CP949CharLenTable = (0, 1, 2, 0, 1, 1, 2, 2, 0, 2)
CP949SMModel = {'classTable': CP949_cls,
'classFactor': 10,
'stateTable': CP949_st,
'charLenTable': CP949CharLenTable,
'name': 'CP949'}
# EUC-JP
EUCJP_cls = (
4,4,4,4,4,4,4,4, # 00 - 07
4,4,4,4,4,4,5,5, # 08 - 0f
4,4,4,4,4,4,4,4, # 10 - 17
4,4,4,5,4,4,4,4, # 18 - 1f
4,4,4,4,4,4,4,4, # 20 - 27
4,4,4,4,4,4,4,4, # 28 - 2f
4,4,4,4,4,4,4,4, # 30 - 37
4,4,4,4,4,4,4,4, # 38 - 3f
4,4,4,4,4,4,4,4, # 40 - 47
4,4,4,4,4,4,4,4, # 48 - 4f
4,4,4,4,4,4,4,4, # 50 - 57
4,4,4,4,4,4,4,4, # 58 - 5f
4,4,4,4,4,4,4,4, # 60 - 67
4,4,4,4,4,4,4,4, # 68 - 6f
4,4,4,4,4,4,4,4, # 70 - 77
4,4,4,4,4,4,4,4, # 78 - 7f
5,5,5,5,5,5,5,5, # 80 - 87
5,5,5,5,5,5,1,3, # 88 - 8f
5,5,5,5,5,5,5,5, # 90 - 97
5,5,5,5,5,5,5,5, # 98 - 9f
5,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,0,5 # f8 - ff
)
EUCJP_st = (
3, 4, 3, 5,eStart,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eStart,eError,eStart,eError,eError,eError,#10-17
eError,eError,eStart,eError,eError,eError, 3,eError,#18-1f
3,eError,eError,eError,eStart,eStart,eStart,eStart#20-27
)
EUCJPCharLenTable = (2, 2, 2, 3, 1, 0)
EUCJPSMModel = {'classTable': EUCJP_cls,
'classFactor': 6,
'stateTable': EUCJP_st,
'charLenTable': EUCJPCharLenTable,
'name': 'EUC-JP'}
# EUC-KR
EUCKR_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,3,3,3, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,3,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
2,2,2,2,2,2,2,2, # e0 - e7
2,2,2,2,2,2,2,2, # e8 - ef
2,2,2,2,2,2,2,2, # f0 - f7
2,2,2,2,2,2,2,0 # f8 - ff
)
EUCKR_st = (
eError,eStart, 3,eError,eError,eError,eError,eError,#00-07
eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,eStart #08-0f
)
EUCKRCharLenTable = (0, 1, 2, 0)
EUCKRSMModel = {'classTable': EUCKR_cls,
'classFactor': 4,
'stateTable': EUCKR_st,
'charLenTable': EUCKRCharLenTable,
'name': 'EUC-KR'}
# EUC-TW
EUCTW_cls = (
2,2,2,2,2,2,2,2, # 00 - 07
2,2,2,2,2,2,0,0, # 08 - 0f
2,2,2,2,2,2,2,2, # 10 - 17
2,2,2,0,2,2,2,2, # 18 - 1f
2,2,2,2,2,2,2,2, # 20 - 27
2,2,2,2,2,2,2,2, # 28 - 2f
2,2,2,2,2,2,2,2, # 30 - 37
2,2,2,2,2,2,2,2, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,2, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,6,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,3,4,4,4,4,4,4, # a0 - a7
5,5,1,1,1,1,1,1, # a8 - af
1,1,1,1,1,1,1,1, # b0 - b7
1,1,1,1,1,1,1,1, # b8 - bf
1,1,3,1,3,3,3,3, # c0 - c7
3,3,3,3,3,3,3,3, # c8 - cf
3,3,3,3,3,3,3,3, # d0 - d7
3,3,3,3,3,3,3,3, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,3,3,3, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,3,3,0 # f8 - ff
)
EUCTW_st = (
eError,eError,eStart, 3, 3, 3, 4,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eStart,eError,#10-17
eStart,eStart,eStart,eError,eError,eError,eError,eError,#18-1f
5,eError,eError,eError,eStart,eError,eStart,eStart,#20-27
eStart,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
EUCTWCharLenTable = (0, 0, 1, 2, 2, 2, 3)
EUCTWSMModel = {'classTable': EUCTW_cls,
'classFactor': 7,
'stateTable': EUCTW_st,
'charLenTable': EUCTWCharLenTable,
'name': 'x-euc-tw'}
# GB2312
GB2312_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
3,3,3,3,3,3,3,3, # 30 - 37
3,3,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,4, # 78 - 7f
5,6,6,6,6,6,6,6, # 80 - 87
6,6,6,6,6,6,6,6, # 88 - 8f
6,6,6,6,6,6,6,6, # 90 - 97
6,6,6,6,6,6,6,6, # 98 - 9f
6,6,6,6,6,6,6,6, # a0 - a7
6,6,6,6,6,6,6,6, # a8 - af
6,6,6,6,6,6,6,6, # b0 - b7
6,6,6,6,6,6,6,6, # b8 - bf
6,6,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
6,6,6,6,6,6,6,6, # e0 - e7
6,6,6,6,6,6,6,6, # e8 - ef
6,6,6,6,6,6,6,6, # f0 - f7
6,6,6,6,6,6,6,0 # f8 - ff
)
GB2312_st = (
eError,eStart,eStart,eStart,eStart,eStart, 3,eError,#00-07
eError,eError,eError,eError,eError,eError,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eError,eError,eStart,#10-17
4,eError,eStart,eStart,eError,eError,eError,eError,#18-1f
eError,eError, 5,eError,eError,eError,eItsMe,eError,#20-27
eError,eError,eStart,eStart,eStart,eStart,eStart,eStart #28-2f
)
# To be accurate, the length of class 6 can be either 2 or 4.
# But it is not necessary to discriminate between the two since
# it is used for frequency analysis only, and we are validing
# each code range there as well. So it is safe to set it to be
# 2 here.
GB2312CharLenTable = (0, 1, 1, 1, 1, 1, 2)
GB2312SMModel = {'classTable': GB2312_cls,
'classFactor': 7,
'stateTable': GB2312_st,
'charLenTable': GB2312CharLenTable,
'name': 'GB2312'}
# Shift_JIS
SJIS_cls = (
1,1,1,1,1,1,1,1, # 00 - 07
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
2,2,2,2,2,2,2,2, # 40 - 47
2,2,2,2,2,2,2,2, # 48 - 4f
2,2,2,2,2,2,2,2, # 50 - 57
2,2,2,2,2,2,2,2, # 58 - 5f
2,2,2,2,2,2,2,2, # 60 - 67
2,2,2,2,2,2,2,2, # 68 - 6f
2,2,2,2,2,2,2,2, # 70 - 77
2,2,2,2,2,2,2,1, # 78 - 7f
3,3,3,3,3,2,2,3, # 80 - 87
3,3,3,3,3,3,3,3, # 88 - 8f
3,3,3,3,3,3,3,3, # 90 - 97
3,3,3,3,3,3,3,3, # 98 - 9f
#0xa0 is illegal in sjis encoding, but some pages does
#contain such byte. We need to be more error forgiven.
2,2,2,2,2,2,2,2, # a0 - a7
2,2,2,2,2,2,2,2, # a8 - af
2,2,2,2,2,2,2,2, # b0 - b7
2,2,2,2,2,2,2,2, # b8 - bf
2,2,2,2,2,2,2,2, # c0 - c7
2,2,2,2,2,2,2,2, # c8 - cf
2,2,2,2,2,2,2,2, # d0 - d7
2,2,2,2,2,2,2,2, # d8 - df
3,3,3,3,3,3,3,3, # e0 - e7
3,3,3,3,3,4,4,4, # e8 - ef
3,3,3,3,3,3,3,3, # f0 - f7
3,3,3,3,3,0,0,0) # f8 - ff
SJIS_st = (
eError,eStart,eStart, 3,eError,eError,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe,eError,eError,eStart,eStart,eStart,eStart #10-17
)
SJISCharLenTable = (0, 1, 1, 2, 0, 0)
SJISSMModel = {'classTable': SJIS_cls,
'classFactor': 6,
'stateTable': SJIS_st,
'charLenTable': SJISCharLenTable,
'name': 'Shift_JIS'}
# UCS2-BE
UCS2BE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2BE_st = (
5, 7, 7,eError, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 6, 6, 6, 6,eError,eError,#10-17
6, 6, 6, 6, 6,eItsMe, 6, 6,#18-1f
6, 6, 6, 6, 5, 7, 7,eError,#20-27
5, 8, 6, 6,eError, 6, 6, 6,#28-2f
6, 6, 6, 6,eError,eError,eStart,eStart #30-37
)
UCS2BECharLenTable = (2, 2, 2, 0, 2, 2)
UCS2BESMModel = {'classTable': UCS2BE_cls,
'classFactor': 6,
'stateTable': UCS2BE_st,
'charLenTable': UCS2BECharLenTable,
'name': 'UTF-16BE'}
# UCS2-LE
UCS2LE_cls = (
0,0,0,0,0,0,0,0, # 00 - 07
0,0,1,0,0,2,0,0, # 08 - 0f
0,0,0,0,0,0,0,0, # 10 - 17
0,0,0,3,0,0,0,0, # 18 - 1f
0,0,0,0,0,0,0,0, # 20 - 27
0,3,3,3,3,3,0,0, # 28 - 2f
0,0,0,0,0,0,0,0, # 30 - 37
0,0,0,0,0,0,0,0, # 38 - 3f
0,0,0,0,0,0,0,0, # 40 - 47
0,0,0,0,0,0,0,0, # 48 - 4f
0,0,0,0,0,0,0,0, # 50 - 57
0,0,0,0,0,0,0,0, # 58 - 5f
0,0,0,0,0,0,0,0, # 60 - 67
0,0,0,0,0,0,0,0, # 68 - 6f
0,0,0,0,0,0,0,0, # 70 - 77
0,0,0,0,0,0,0,0, # 78 - 7f
0,0,0,0,0,0,0,0, # 80 - 87
0,0,0,0,0,0,0,0, # 88 - 8f
0,0,0,0,0,0,0,0, # 90 - 97
0,0,0,0,0,0,0,0, # 98 - 9f
0,0,0,0,0,0,0,0, # a0 - a7
0,0,0,0,0,0,0,0, # a8 - af
0,0,0,0,0,0,0,0, # b0 - b7
0,0,0,0,0,0,0,0, # b8 - bf
0,0,0,0,0,0,0,0, # c0 - c7
0,0,0,0,0,0,0,0, # c8 - cf
0,0,0,0,0,0,0,0, # d0 - d7
0,0,0,0,0,0,0,0, # d8 - df
0,0,0,0,0,0,0,0, # e0 - e7
0,0,0,0,0,0,0,0, # e8 - ef
0,0,0,0,0,0,0,0, # f0 - f7
0,0,0,0,0,0,4,5 # f8 - ff
)
UCS2LE_st = (
6, 6, 7, 6, 4, 3,eError,eError,#00-07
eError,eError,eError,eError,eItsMe,eItsMe,eItsMe,eItsMe,#08-0f
eItsMe,eItsMe, 5, 5, 5,eError,eItsMe,eError,#10-17
5, 5, 5,eError, 5,eError, 6, 6,#18-1f
7, 6, 8, 8, 5, 5, 5,eError,#20-27
5, 5, 5,eError,eError,eError, 5, 5,#28-2f
5, 5, 5,eError, 5,eError,eStart,eStart #30-37
)
UCS2LECharLenTable = (2, 2, 2, 2, 2, 2)
UCS2LESMModel = {'classTable': UCS2LE_cls,
'classFactor': 6,
'stateTable': UCS2LE_st,
'charLenTable': UCS2LECharLenTable,
'name': 'UTF-16LE'}
# UTF-8
UTF8_cls = (
1,1,1,1,1,1,1,1, # 00 - 07 #allow 0x00 as a legal value
1,1,1,1,1,1,0,0, # 08 - 0f
1,1,1,1,1,1,1,1, # 10 - 17
1,1,1,0,1,1,1,1, # 18 - 1f
1,1,1,1,1,1,1,1, # 20 - 27
1,1,1,1,1,1,1,1, # 28 - 2f
1,1,1,1,1,1,1,1, # 30 - 37
1,1,1,1,1,1,1,1, # 38 - 3f
1,1,1,1,1,1,1,1, # 40 - 47
1,1,1,1,1,1,1,1, # 48 - 4f
1,1,1,1,1,1,1,1, # 50 - 57
1,1,1,1,1,1,1,1, # 58 - 5f
1,1,1,1,1,1,1,1, # 60 - 67
1,1,1,1,1,1,1,1, # 68 - 6f
1,1,1,1,1,1,1,1, # 70 - 77
1,1,1,1,1,1,1,1, # 78 - 7f
2,2,2,2,3,3,3,3, # 80 - 87
4,4,4,4,4,4,4,4, # 88 - 8f
4,4,4,4,4,4,4,4, # 90 - 97
4,4,4,4,4,4,4,4, # 98 - 9f
5,5,5,5,5,5,5,5, # a0 - a7
5,5,5,5,5,5,5,5, # a8 - af
5,5,5,5,5,5,5,5, # b0 - b7
5,5,5,5,5,5,5,5, # b8 - bf
0,0,6,6,6,6,6,6, # c0 - c7
6,6,6,6,6,6,6,6, # c8 - cf
6,6,6,6,6,6,6,6, # d0 - d7
6,6,6,6,6,6,6,6, # d8 - df
7,8,8,8,8,8,8,8, # e0 - e7
8,8,8,8,8,9,8,8, # e8 - ef
10,11,11,11,11,11,11,11, # f0 - f7
12,13,13,13,14,15,0,0 # f8 - ff
)
UTF8_st = (
eError,eStart,eError,eError,eError,eError, 12, 10,#00-07
9, 11, 8, 7, 6, 5, 4, 3,#08-0f
eError,eError,eError,eError,eError,eError,eError,eError,#10-17
eError,eError,eError,eError,eError,eError,eError,eError,#18-1f
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#20-27
eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,eItsMe,#28-2f
eError,eError, 5, 5, 5, 5,eError,eError,#30-37
eError,eError,eError,eError,eError,eError,eError,eError,#38-3f
eError,eError,eError, 5, 5, 5,eError,eError,#40-47
eError,eError,eError,eError,eError,eError,eError,eError,#48-4f
eError,eError, 7, 7, 7, 7,eError,eError,#50-57
eError,eError,eError,eError,eError,eError,eError,eError,#58-5f
eError,eError,eError,eError, 7, 7,eError,eError,#60-67
eError,eError,eError,eError,eError,eError,eError,eError,#68-6f
eError,eError, 9, 9, 9, 9,eError,eError,#70-77
eError,eError,eError,eError,eError,eError,eError,eError,#78-7f
eError,eError,eError,eError,eError, 9,eError,eError,#80-87
eError,eError,eError,eError,eError,eError,eError,eError,#88-8f
eError,eError, 12, 12, 12, 12,eError,eError,#90-97
eError,eError,eError,eError,eError,eError,eError,eError,#98-9f
eError,eError,eError,eError,eError, 12,eError,eError,#a0-a7
eError,eError,eError,eError,eError,eError,eError,eError,#a8-af
eError,eError, 12, 12, 12,eError,eError,eError,#b0-b7
eError,eError,eError,eError,eError,eError,eError,eError,#b8-bf
eError,eError,eStart,eStart,eStart,eStart,eError,eError,#c0-c7
eError,eError,eError,eError,eError,eError,eError,eError #c8-cf
)
UTF8CharLenTable = (0, 1, 0, 0, 0, 0, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6)
UTF8SMModel = {'classTable': UTF8_cls,
'classFactor': 16,
'stateTable': UTF8_st,
'charLenTable': UTF8CharLenTable,
'name': 'UTF-8'}
| mit |
fpadoan/metasyntactic | protobuf-2.1.0/python/stubout.py | 671 | 4940 | #!/usr/bin/python2.4
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is used for testing. The original is at:
# http://code.google.com/p/pymox/
class StubOutForTesting:
"""Sample Usage:
You want os.path.exists() to always return true during testing.
stubs = StubOutForTesting()
stubs.Set(os.path, 'exists', lambda x: 1)
...
stubs.UnsetAll()
The above changes os.path.exists into a lambda that returns 1. Once
the ... part of the code finishes, the UnsetAll() looks up the old value
of os.path.exists and restores it.
"""
def __init__(self):
self.cache = []
self.stubs = []
def __del__(self):
self.SmartUnsetAll()
self.UnsetAll()
def SmartSet(self, obj, attr_name, new_attr):
"""Replace obj.attr_name with new_attr. This method is smart and works
at the module, class, and instance level while preserving proper
inheritance. It will not stub out C types however unless that has been
explicitly allowed by the type.
This method supports the case where attr_name is a staticmethod or a
classmethod of obj.
Notes:
- If obj is an instance, then it is its class that will actually be
stubbed. Note that the method Set() does not do that: if obj is
an instance, it (and not its class) will be stubbed.
- The stubbing is using the builtin getattr and setattr. So, the __get__
and __set__ will be called when stubbing (TODO: A better idea would
probably be to manipulate obj.__dict__ instead of getattr() and
setattr()).
Raises AttributeError if the attribute cannot be found.
"""
if (inspect.ismodule(obj) or
(not inspect.isclass(obj) and obj.__dict__.has_key(attr_name))):
orig_obj = obj
orig_attr = getattr(obj, attr_name)
else:
if not inspect.isclass(obj):
mro = list(inspect.getmro(obj.__class__))
else:
mro = list(inspect.getmro(obj))
mro.reverse()
orig_attr = None
for cls in mro:
try:
orig_obj = cls
orig_attr = getattr(obj, attr_name)
except AttributeError:
continue
if orig_attr is None:
raise AttributeError("Attribute not found.")
# Calling getattr() on a staticmethod transforms it to a 'normal' function.
# We need to ensure that we put it back as a staticmethod.
old_attribute = obj.__dict__.get(attr_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
orig_attr = staticmethod(orig_attr)
self.stubs.append((orig_obj, attr_name, orig_attr))
setattr(orig_obj, attr_name, new_attr)
def SmartUnsetAll(self):
"""Reverses all the SmartSet() calls, restoring things to their original
definition. Its okay to call SmartUnsetAll() repeatedly, as later calls
have no effect if no SmartSet() calls have been made.
"""
self.stubs.reverse()
for args in self.stubs:
setattr(*args)
self.stubs = []
def Set(self, parent, child_name, new_child):
"""Replace child_name's old definition with new_child, in the context
of the given parent. The parent could be a module when the child is a
function at module scope. Or the parent could be a class when a class'
method is being replaced. The named child is set to new_child, while
the prior definition is saved away for later, when UnsetAll() is called.
This method supports the case where child_name is a staticmethod or a
classmethod of parent.
"""
old_child = getattr(parent, child_name)
old_attribute = parent.__dict__.get(child_name)
if old_attribute is not None and isinstance(old_attribute, staticmethod):
old_child = staticmethod(old_child)
self.cache.append((parent, old_child, child_name))
setattr(parent, child_name, new_child)
def UnsetAll(self):
"""Reverses all the Set() calls, restoring things to their original
definition. Its okay to call UnsetAll() repeatedly, as later calls have
no effect if no Set() calls have been made.
"""
# Undo calls to Set() in reverse order, in case Set() was called on the
# same arguments repeatedly (want the original call to be last one undone)
self.cache.reverse()
for (parent, old_child, child_name) in self.cache:
setattr(parent, child_name, old_child)
self.cache = []
| apache-2.0 |
bspink/django | tests/auth_tests/urls.py | 189 | 4614 | from django.conf.urls import url
from django.contrib import admin
from django.contrib.auth import views
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.urls import urlpatterns as auth_urlpatterns
from django.contrib.messages.api import info
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import RequestContext, Template
from django.views.decorators.cache import never_cache
class CustomRequestAuthenticationForm(AuthenticationForm):
def __init__(self, request, *args, **kwargs):
assert isinstance(request, HttpRequest)
super(CustomRequestAuthenticationForm, self).__init__(request, *args, **kwargs)
@never_cache
def remote_user_auth_view(request):
"Dummy view for remote user tests"
t = Template("Username is {{ user }}.")
c = RequestContext(request, {})
return HttpResponse(t.render(c))
def auth_processor_no_attr_access(request):
render(request, 'context_processors/auth_attrs_no_access.html')
# *After* rendering, we check whether the session was accessed
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_attr_access(request):
render(request, 'context_processors/auth_attrs_access.html')
return render(request,
'context_processors/auth_attrs_test_access.html',
{'session_accessed': request.session.accessed})
def auth_processor_user(request):
return render(request, 'context_processors/auth_attrs_user.html')
def auth_processor_perms(request):
return render(request, 'context_processors/auth_attrs_perms.html')
def auth_processor_perm_in_perms(request):
return render(request, 'context_processors/auth_attrs_perm_in_perms.html')
def auth_processor_messages(request):
info(request, "Message 1")
return render(request, 'context_processors/auth_attrs_messages.html')
def userpage(request):
pass
def custom_request_auth_login(request):
return views.login(request, authentication_form=CustomRequestAuthenticationForm)
# special urls for auth test cases
urlpatterns = auth_urlpatterns + [
url(r'^logout/custom_query/$', views.logout, dict(redirect_field_name='follow')),
url(r'^logout/next_page/$', views.logout, dict(next_page='/somewhere/')),
url(r'^logout/next_page/named/$', views.logout, dict(next_page='password_reset')),
url(r'^remote_user/$', remote_user_auth_view),
url(r'^password_reset_from_email/$', views.password_reset, dict(from_email='staffmember@example.com')),
url(r'^password_reset/custom_redirect/$', views.password_reset, dict(post_reset_redirect='/custom/')),
url(r'^password_reset/custom_redirect/named/$', views.password_reset, dict(post_reset_redirect='password_reset')),
url(r'^password_reset/html_email_template/$', views.password_reset,
dict(html_email_template_name='registration/html_password_reset_email.html')),
url(r'^reset/custom/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
dict(post_reset_redirect='/custom/')),
url(r'^reset/custom/named/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$',
views.password_reset_confirm,
dict(post_reset_redirect='password_reset')),
url(r'^password_change/custom/$', views.password_change, dict(post_change_redirect='/custom/')),
url(r'^password_change/custom/named/$', views.password_change, dict(post_change_redirect='password_reset')),
url(r'^admin_password_reset/$', views.password_reset, dict(is_admin_site=True)),
url(r'^login_required/$', login_required(views.password_reset)),
url(r'^login_required_login_url/$', login_required(views.password_reset, login_url='/somewhere/')),
url(r'^auth_processor_no_attr_access/$', auth_processor_no_attr_access),
url(r'^auth_processor_attr_access/$', auth_processor_attr_access),
url(r'^auth_processor_user/$', auth_processor_user),
url(r'^auth_processor_perms/$', auth_processor_perms),
url(r'^auth_processor_perm_in_perms/$', auth_processor_perm_in_perms),
url(r'^auth_processor_messages/$', auth_processor_messages),
url(r'^custom_request_auth_login/$', custom_request_auth_login),
url(r'^userpage/(.+)/$', userpage, name="userpage"),
# This line is only required to render the password reset with is_admin=True
url(r'^admin/', admin.site.urls),
]
| bsd-3-clause |
mikewiebe-ansible/ansible | lib/ansible/plugins/lookup/avi.py | 23 | 4760 | # python 3 headers, required if submitting to Ansible
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
lookup: avi
author: Sandeep Bandi <sandeepb@avinetworks.com>
version_added: 2.9
short_description: Look up ``Avi`` objects.
description:
- Given an object_type, fetch all the objects of that type or fetch
the specific object that matches the name/uuid given via options.
- For single object lookup. If you want the output to be a list, you may
want to pass option wantlist=True to the plugin.
options:
obj_type:
description:
- type of object to query
required: True
obj_name:
description:
- name of the object to query
obj_uuid:
description:
- UUID of the object to query
extends_documentation_fragment: avi
"""
EXAMPLES = """
# Lookup query for all the objects of a specific type.
- debug: msg="{{ lookup('avi', avi_credentials=avi_credentials, obj_type='virtualservice') }}"
# Lookup query for an object with the given name and type.
- debug: msg="{{ lookup('avi', avi_credentials=avi_credentials, obj_name='vs1', obj_type='virtualservice', wantlist=True) }}"
# Lookup query for an object with the given UUID and type.
- debug: msg="{{ lookup('avi', obj_uuid='virtualservice-5c0e183a-690a-45d8-8d6f-88c30a52550d', obj_type='virtualservice') }}"
# We can replace lookup with query function to always the get the output as list.
# This is helpful for looping.
- debug: msg="{{ query('avi', obj_uuid='virtualservice-5c0e183a-690a-45d8-8d6f-88c30a52550d', obj_type='virtualservice') }}"
"""
RETURN = """
_raw:
description:
- One ore more objects returned from ``Avi`` API.
type: list
elements: dictionary
"""
from ansible.module_utils._text import to_native
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.plugins.lookup import LookupBase
from ansible.utils.display import Display
from ansible.module_utils.network.avi.avi_api import (ApiSession,
AviCredentials,
AviServerError,
ObjectNotFound,
APIError)
display = Display()
def _api(avi_session, path, **kwargs):
'''
Generic function to handle both /<obj_type>/<obj_uuid> and /<obj_type>
API resource endpoints.
'''
rsp = []
try:
rsp_data = avi_session.get(path, **kwargs).json()
if 'results' in rsp_data:
rsp = rsp_data['results']
else:
rsp.append(rsp_data)
except ObjectNotFound as e:
display.warning('Resource not found. Please check obj_name/'
'obj_uuid/obj_type are spelled correctly.')
display.v(to_native(e))
except (AviServerError, APIError) as e:
raise AnsibleError(to_native(e))
except Exception as e:
# Generic excption handling for connection failures
raise AnsibleError('Unable to communicate with controller'
'due to error: %s' % to_native(e))
return rsp
class LookupModule(LookupBase):
def run(self, terms, variables=None, avi_credentials=None, **kwargs):
api_creds = AviCredentials(**avi_credentials)
# Create the session using avi_credentials
try:
avi = ApiSession(avi_credentials=api_creds)
except Exception as e:
raise AnsibleError(to_native(e))
# Return an empty list if the object is not found
rsp = []
try:
path = kwargs.pop('obj_type')
except KeyError:
raise AnsibleError("Please pass the obj_type for lookup")
if kwargs.get('obj_name', None):
name = kwargs.pop('obj_name')
try:
display.v("Fetching obj: %s of type: %s" % (name, path))
rsp_data = avi.get_object_by_name(path, name, **kwargs)
if rsp_data:
# Append the return data only if it is not None. i.e object
# with specified name is present
rsp.append(rsp_data)
except AviServerError as e:
raise AnsibleError(to_native(e))
elif kwargs.get('obj_uuid', None):
obj_uuid = kwargs.pop('obj_uuid')
obj_path = "%s/%s" % (path, obj_uuid)
display.v("Fetching obj: %s of type: %s" % (obj_uuid, path))
rsp = _api(avi, obj_path, **kwargs)
else:
display.v("Fetching all objects of type: %s" % path)
rsp = _api(avi, path, **kwargs)
return rsp
| gpl-3.0 |
rockyzhang/zhangyanhit-python-for-android-mips | python3-alpha/python3-src/Lib/xmlrpc/server.py | 45 | 35818 | """XML-RPC Servers.
This module can be used to create simple XML-RPC servers
by creating a server and either installing functions, a
class instance, or by extending the SimpleXMLRPCServer
class.
It can also be used to handle XML-RPC requests in a CGI
environment using CGIXMLRPCRequestHandler.
The Doc* classes can be used to create XML-RPC servers that
serve pydoc-style documentation in response to HTTP
GET requests. This documentation is dynamically generated
based on the functions and methods registered with the
server.
A list of possible usage patterns follows:
1. Install functions:
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
2. Install an instance:
class MyFuncs:
def __init__(self):
# make all of the sys functions available through sys.func_name
import sys
self.sys = sys
def _listMethods(self):
# implement this method so that system.listMethods
# knows to advertise the sys methods
return list_public_methods(self) + \
['sys.' + method for method in list_public_methods(self.sys)]
def pow(self, x, y): return pow(x, y)
def add(self, x, y) : return x + y
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(MyFuncs())
server.serve_forever()
3. Install an instance with custom dispatch method:
class Math:
def _listMethods(self):
# this method must be present for system.listMethods
# to work
return ['add', 'pow']
def _methodHelp(self, method):
# this method must be present for system.methodHelp
# to work
if method == 'add':
return "add(2,3) => 5"
elif method == 'pow':
return "pow(x, y[, z]) => number"
else:
# By convention, return empty
# string if no help is available
return ""
def _dispatch(self, method, params):
if method == 'pow':
return pow(*params)
elif method == 'add':
return params[0] + params[1]
else:
raise ValueError('bad method')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_introspection_functions()
server.register_instance(Math())
server.serve_forever()
4. Subclass SimpleXMLRPCServer:
class MathServer(SimpleXMLRPCServer):
def _dispatch(self, method, params):
try:
# We are forcing the 'export_' prefix on methods that are
# callable through XML-RPC to prevent potential security
# problems
func = getattr(self, 'export_' + method)
except AttributeError:
raise Exception('method "%s" is not supported' % method)
else:
return func(*params)
def export_add(self, x, y):
return x + y
server = MathServer(("localhost", 8000))
server.serve_forever()
5. CGI script:
server = CGIXMLRPCRequestHandler()
server.register_function(pow)
server.handle_request()
"""
# Written by Brian Quinlan (brian@sweetapp.com).
# Based on code written by Fredrik Lundh.
from xmlrpc.client import Fault, dumps, loads, gzip_encode, gzip_decode
from http.server import BaseHTTPRequestHandler
import http.server
import socketserver
import sys
import os
import re
import pydoc
import inspect
import traceback
try:
import fcntl
except ImportError:
fcntl = None
def resolve_dotted_attribute(obj, attr, allow_dotted_names=True):
"""resolve_dotted_attribute(a, 'b.c.d') => a.b.c.d
Resolves a dotted attribute name to an object. Raises
an AttributeError if any attribute in the chain starts with a '_'.
If the optional allow_dotted_names argument is false, dots are not
supported and this function operates similar to getattr(obj, attr).
"""
if allow_dotted_names:
attrs = attr.split('.')
else:
attrs = [attr]
for i in attrs:
if i.startswith('_'):
raise AttributeError(
'attempt to access private attribute "%s"' % i
)
else:
obj = getattr(obj,i)
return obj
def list_public_methods(obj):
"""Returns a list of attribute strings, found in the specified
object, which represent callable attributes"""
return [member for member in dir(obj)
if not member.startswith('_') and
hasattr(getattr(obj, member), '__call__')]
class SimpleXMLRPCDispatcher:
"""Mix-in class that dispatches XML-RPC requests.
This class is used to register XML-RPC method handlers
and then to dispatch them. This class doesn't need to be
instanced directly when used by SimpleXMLRPCServer but it
can be instanced when used by the MultiPathXMLRPCServer
"""
def __init__(self, allow_none=False, encoding=None):
self.funcs = {}
self.instance = None
self.allow_none = allow_none
self.encoding = encoding or 'utf-8'
def register_instance(self, instance, allow_dotted_names=False):
"""Registers an instance to respond to XML-RPC requests.
Only one instance can be installed at a time.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called. Methods beginning with an '_'
are considered private and will not be called by
SimpleXMLRPCServer.
If a registered function matches a XML-RPC request, then it
will be called instead of the registered instance.
If the optional allow_dotted_names argument is true and the
instance does not have a _dispatch method, method names
containing dots are supported and resolved, as long as none of
the name segments start with an '_'.
*** SECURITY WARNING: ***
Enabling the allow_dotted_names options allows intruders
to access your module's global variables and may allow
intruders to execute arbitrary code on your machine. Only
use this option on a secure, closed network.
"""
self.instance = instance
self.allow_dotted_names = allow_dotted_names
def register_function(self, function, name=None):
"""Registers a function to respond to XML-RPC requests.
The optional name argument can be used to set a Unicode name
for the function.
"""
if name is None:
name = function.__name__
self.funcs[name] = function
def register_introspection_functions(self):
"""Registers the XML-RPC introspection methods in the system
namespace.
see http://xmlrpc.usefulinc.com/doc/reserved.html
"""
self.funcs.update({'system.listMethods' : self.system_listMethods,
'system.methodSignature' : self.system_methodSignature,
'system.methodHelp' : self.system_methodHelp})
def register_multicall_functions(self):
"""Registers the XML-RPC multicall method in the system
namespace.
see http://www.xmlrpc.com/discuss/msgReader$1208"""
self.funcs.update({'system.multicall' : self.system_multicall})
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
"""Dispatches an XML-RPC method from marshalled (XML) data.
XML-RPC methods are dispatched from the marshalled (XML) data
using the _dispatch method and the result is returned as
marshalled data. For backwards compatibility, a dispatch
function can be provided as an argument (see comment in
SimpleXMLRPCRequestHandler.do_POST) but overriding the
existing method through subclassing is the preferred means
of changing method dispatch behavior.
"""
try:
params, method = loads(data)
# generate response
if dispatch_method is not None:
response = dispatch_method(method, params)
else:
response = self._dispatch(method, params)
# wrap response in a singleton tuple
response = (response,)
response = dumps(response, methodresponse=1,
allow_none=self.allow_none, encoding=self.encoding)
except Fault as fault:
response = dumps(fault, allow_none=self.allow_none,
encoding=self.encoding)
except:
# report exception back to server
exc_type, exc_value, exc_tb = sys.exc_info()
response = dumps(
Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none,
)
return response.encode(self.encoding)
def system_listMethods(self):
"""system.listMethods() => ['add', 'subtract', 'multiple']
Returns a list of the methods supported by the server."""
methods = set(self.funcs.keys())
if self.instance is not None:
# Instance can implement _listMethod to return a list of
# methods
if hasattr(self.instance, '_listMethods'):
methods |= set(self.instance._listMethods())
# if the instance has a _dispatch method then we
# don't have enough information to provide a list
# of methods
elif not hasattr(self.instance, '_dispatch'):
methods |= set(list_public_methods(self.instance))
return sorted(methods)
def system_methodSignature(self, method_name):
"""system.methodSignature('add') => [double, int, int]
Returns a list describing the signature of the method. In the
above example, the add method takes two integers as arguments
and returns a double result.
This server does NOT support system.methodSignature."""
# See http://xmlrpc.usefulinc.com/doc/sysmethodsig.html
return 'signatures not supported'
def system_methodHelp(self, method_name):
"""system.methodHelp('add') => "Adds two integers together"
Returns a string containing documentation for the specified method."""
method = None
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
# Instance can implement _methodHelp to return help for a method
if hasattr(self.instance, '_methodHelp'):
return self.instance._methodHelp(method_name)
# if the instance has a _dispatch method then we
# don't have enough information to provide help
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name,
self.allow_dotted_names
)
except AttributeError:
pass
# Note that we aren't checking that the method actually
# be a callable object of some kind
if method is None:
return ""
else:
import pydoc
return pydoc.getdoc(method)
def system_multicall(self, call_list):
"""system.multicall([{'methodName': 'add', 'params': [2, 2]}, ...]) => \
[[4], ...]
Allows the caller to package multiple XML-RPC calls into a single
request.
See http://www.xmlrpc.com/discuss/msgReader$1208
"""
results = []
for call in call_list:
method_name = call['methodName']
params = call['params']
try:
# XXX A marshalling error in any response will fail the entire
# multicall. If someone cares they should fix this.
results.append([self._dispatch(method_name, params)])
except Fault as fault:
results.append(
{'faultCode' : fault.faultCode,
'faultString' : fault.faultString}
)
except:
exc_type, exc_value, exc_tb = sys.exc_info()
results.append(
{'faultCode' : 1,
'faultString' : "%s:%s" % (exc_type, exc_value)}
)
return results
def _dispatch(self, method, params):
"""Dispatches the XML-RPC method.
XML-RPC calls are forwarded to a registered function that
matches the called XML-RPC method name. If no such function
exists then the call is forwarded to the registered instance,
if available.
If the registered instance has a _dispatch method then that
method will be called with the name of the XML-RPC method and
its parameters as a tuple
e.g. instance._dispatch('add',(2,3))
If the registered instance does not have a _dispatch method
then the instance will be searched to find a matching method
and, if found, will be called.
Methods beginning with an '_' are considered private and will
not be called.
"""
func = None
try:
# check to see if a matching function has been registered
func = self.funcs[method]
except KeyError:
if self.instance is not None:
# check for a _dispatch method
if hasattr(self.instance, '_dispatch'):
return self.instance._dispatch(method, params)
else:
# call instance method directly
try:
func = resolve_dotted_attribute(
self.instance,
method,
self.allow_dotted_names
)
except AttributeError:
pass
if func is not None:
return func(*params)
else:
raise Exception('method "%s" is not supported' % method)
class SimpleXMLRPCRequestHandler(BaseHTTPRequestHandler):
"""Simple XML-RPC request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
"""
# Class attribute listing the accessible path components;
# paths not on this list will result in a 404 error.
rpc_paths = ('/', '/RPC2')
#if not None, encode responses larger than this, if possible
encode_threshold = 1400 #a common MTU
#Override form StreamRequestHandler: full buffering of output
#and no Nagle.
wbufsize = -1
disable_nagle_algorithm = True
# a re to match a gzip Accept-Encoding
aepattern = re.compile(r"""
\s* ([^\s;]+) \s* #content-coding
(;\s* q \s*=\s* ([0-9\.]+))? #q
""", re.VERBOSE | re.IGNORECASE)
def accept_encodings(self):
r = {}
ae = self.headers.get("Accept-Encoding", "")
for e in ae.split(","):
match = self.aepattern.match(e)
if match:
v = match.group(3)
v = float(v) if v else 1.0
r[match.group(1)] = v
return r
def is_rpc_path_valid(self):
if self.rpc_paths:
return self.path in self.rpc_paths
else:
# If .rpc_paths is empty, just assume all paths are legal
return True
def do_POST(self):
"""Handles the HTTP POST request.
Attempts to interpret all HTTP POST requests as XML-RPC calls,
which are forwarded to the server's _dispatch method for handling.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
try:
# Get arguments by reading body of request.
# We read this in chunks to avoid straining
# socket.read(); around the 10 or 15Mb mark, some platforms
# begin to have problems (bug #792570).
max_chunk_size = 10*1024*1024
size_remaining = int(self.headers["content-length"])
L = []
while size_remaining:
chunk_size = min(size_remaining, max_chunk_size)
L.append(self.rfile.read(chunk_size))
size_remaining -= len(L[-1])
data = b''.join(L)
data = self.decode_request_content(data)
if data is None:
return #response has been sent
# In previous versions of SimpleXMLRPCServer, _dispatch
# could be overridden in this class, instead of in
# SimpleXMLRPCDispatcher. To maintain backwards compatibility,
# check to see if a subclass implements _dispatch and dispatch
# using that method if present.
response = self.server._marshaled_dispatch(
data, getattr(self, '_dispatch', None), self.path
)
except Exception as e: # This should only happen if the module is buggy
# internal error, report as HTTP server error
self.send_response(500)
# Send information about the exception if requested
if hasattr(self.server, '_send_traceback_header') and \
self.server._send_traceback_header:
self.send_header("X-exception", str(e))
trace = traceback.format_exc()
trace = str(trace.encode('ASCII', 'backslashreplace'), 'ASCII')
self.send_header("X-traceback", trace)
self.send_header("Content-length", "0")
self.end_headers()
else:
self.send_response(200)
self.send_header("Content-type", "text/xml")
if self.encode_threshold is not None:
if len(response) > self.encode_threshold:
q = self.accept_encodings().get("gzip", 0)
if q:
try:
response = gzip_encode(response)
self.send_header("Content-Encoding", "gzip")
except NotImplementedError:
pass
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def decode_request_content(self, data):
#support gzip encoding of request
encoding = self.headers.get("content-encoding", "identity").lower()
if encoding == "identity":
return data
if encoding == "gzip":
try:
return gzip_decode(data)
except NotImplementedError:
self.send_response(501, "encoding %r not supported" % encoding)
except ValueError:
self.send_response(400, "error decoding gzip content")
else:
self.send_response(501, "encoding %r not supported" % encoding)
self.send_header("Content-length", "0")
self.end_headers()
def report_404 (self):
# Report a 404 error
self.send_response(404)
response = b'No such page'
self.send_header("Content-type", "text/plain")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
def log_request(self, code='-', size='-'):
"""Selectively log an accepted request."""
if self.server.logRequests:
BaseHTTPRequestHandler.log_request(self, code, size)
class SimpleXMLRPCServer(socketserver.TCPServer,
SimpleXMLRPCDispatcher):
"""Simple XML-RPC server.
Simple XML-RPC server that allows functions and a single instance
to be installed to handle requests. The default implementation
attempts to dispatch XML-RPC calls to the functions or instance
installed in the server. Override the _dispatch method inhereted
from SimpleXMLRPCDispatcher to change this behavior.
"""
allow_reuse_address = True
# Warning: this is for debugging purposes only! Never set this to True in
# production code, as will be sending out sensitive information (exception
# and stack trace details) when exceptions are raised inside
# SimpleXMLRPCRequestHandler.do_POST
_send_traceback_header = False
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
self.logRequests = logRequests
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
socketserver.TCPServer.__init__(self, addr, requestHandler, bind_and_activate)
# [Bug #1222790] If possible, set close-on-exec flag; if a
# method spawns a subprocess, the subprocess shouldn't have
# the listening socket open.
if fcntl is not None and hasattr(fcntl, 'FD_CLOEXEC'):
flags = fcntl.fcntl(self.fileno(), fcntl.F_GETFD)
flags |= fcntl.FD_CLOEXEC
fcntl.fcntl(self.fileno(), fcntl.F_SETFD, flags)
class MultiPathXMLRPCServer(SimpleXMLRPCServer):
"""Multipath XML-RPC Server
This specialization of SimpleXMLRPCServer allows the user to create
multiple Dispatcher instances and assign them to different
HTTP request paths. This makes it possible to run two or more
'virtual XML-RPC servers' at the same port.
Make sure that the requestHandler accepts the paths in question.
"""
def __init__(self, addr, requestHandler=SimpleXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests, allow_none,
encoding, bind_and_activate)
self.dispatchers = {}
self.allow_none = allow_none
self.encoding = encoding
def add_dispatcher(self, path, dispatcher):
self.dispatchers[path] = dispatcher
return dispatcher
def get_dispatcher(self, path):
return self.dispatchers[path]
def _marshaled_dispatch(self, data, dispatch_method = None, path = None):
try:
response = self.dispatchers[path]._marshaled_dispatch(
data, dispatch_method, path)
except:
# report low level exception back to server
# (each dispatcher should have handled their own
# exceptions)
exc_type, exc_value = sys.exc_info()[:2]
response = xmlrpclib.dumps(
xmlrpclib.Fault(1, "%s:%s" % (exc_type, exc_value)),
encoding=self.encoding, allow_none=self.allow_none)
return response
class CGIXMLRPCRequestHandler(SimpleXMLRPCDispatcher):
"""Simple handler for XML-RPC data passed through CGI."""
def __init__(self, allow_none=False, encoding=None):
SimpleXMLRPCDispatcher.__init__(self, allow_none, encoding)
def handle_xmlrpc(self, request_text):
"""Handle a single XML-RPC request"""
response = self._marshaled_dispatch(request_text)
print('Content-Type: text/xml')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_get(self):
"""Handle a single HTTP GET request.
Default implementation indicates an error because
XML-RPC uses the POST method.
"""
code = 400
message, explain = BaseHTTPRequestHandler.responses[code]
response = http.server.DEFAULT_ERROR_MESSAGE % \
{
'code' : code,
'message' : message,
'explain' : explain
}
response = response.encode('utf-8')
print('Status: %d %s' % (code, message))
print('Content-Type: %s' % http.server.DEFAULT_ERROR_CONTENT_TYPE)
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def handle_request(self, request_text=None):
"""Handle a single XML-RPC request passed through a CGI post method.
If no XML data is given then it is read from stdin. The resulting
XML-RPC response is printed to stdout along with the correct HTTP
headers.
"""
if request_text is None and \
os.environ.get('REQUEST_METHOD', None) == 'GET':
self.handle_get()
else:
# POST data is normally available through stdin
try:
length = int(os.environ.get('CONTENT_LENGTH', None))
except (ValueError, TypeError):
length = -1
if request_text is None:
request_text = sys.stdin.read(length)
self.handle_xmlrpc(request_text)
# -----------------------------------------------------------------------------
# Self documenting XML-RPC Server.
class ServerHTMLDoc(pydoc.HTMLDoc):
"""Class used to generate pydoc HTML document for a server"""
def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
"""Mark up some plain text, given a context of symbols to look for.
Each context dictionary maps object names to anchor names."""
escape = escape or self.escape
results = []
here = 0
# XXX Note that this regular expression does not allow for the
# hyperlinking of arbitrary strings being used as method
# names. Only methods with names consisting of word characters
# and '.'s are hyperlinked.
pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
r'RFC[- ]?(\d+)|'
r'PEP[- ]?(\d+)|'
r'(self\.)?((?:\w|\.)+))\b')
while 1:
match = pattern.search(text, here)
if not match: break
start, end = match.span()
results.append(escape(text[here:start]))
all, scheme, rfc, pep, selfdot, name = match.groups()
if scheme:
url = escape(all).replace('"', '"')
results.append('<a href="%s">%s</a>' % (url, url))
elif rfc:
url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif pep:
url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
results.append('<a href="%s">%s</a>' % (url, escape(all)))
elif text[end:end+1] == '(':
results.append(self.namelink(name, methods, funcs, classes))
elif selfdot:
results.append('self.<strong>%s</strong>' % name)
else:
results.append(self.namelink(name, classes))
here = end
results.append(escape(text[here:]))
return ''.join(results)
def docroutine(self, object, name, mod=None,
funcs={}, classes={}, methods={}, cl=None):
"""Produce HTML documentation for a function or method object."""
anchor = (cl and cl.__name__ or '') + '-' + name
note = ''
title = '<a name="%s"><strong>%s</strong></a>' % (
self.escape(anchor), self.escape(name))
if inspect.ismethod(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
# exclude the argument bound to the instance, it will be
# confusing to the non-Python user
argspec = inspect.formatargspec (
args[1:],
varargs,
varkw,
defaults,
formatvalue=self.formatvalue
)
elif inspect.isfunction(object):
args, varargs, varkw, defaults = inspect.getargspec(object)
argspec = inspect.formatargspec(
args, varargs, varkw, defaults, formatvalue=self.formatvalue)
else:
argspec = '(...)'
if isinstance(object, tuple):
argspec = object[0] or argspec
docstring = object[1] or ""
else:
docstring = pydoc.getdoc(object)
decl = title + argspec + (note and self.grey(
'<font face="helvetica, arial">%s</font>' % note))
doc = self.markup(
docstring, self.preformat, funcs, classes, methods)
doc = doc and '<dd><tt>%s</tt></dd>' % doc
return '<dl><dt>%s</dt>%s</dl>\n' % (decl, doc)
def docserver(self, server_name, package_documentation, methods):
"""Produce HTML documentation for an XML-RPC server."""
fdict = {}
for key, value in methods.items():
fdict[key] = '#-' + key
fdict[value] = fdict[key]
server_name = self.escape(server_name)
head = '<big><big><strong>%s</strong></big></big>' % server_name
result = self.heading(head, '#ffffff', '#7799ee')
doc = self.markup(package_documentation, self.preformat, fdict)
doc = doc and '<tt>%s</tt>' % doc
result = result + '<p>%s</p>\n' % doc
contents = []
method_items = sorted(methods.items())
for key, value in method_items:
contents.append(self.docroutine(value, key, funcs=fdict))
result = result + self.bigsection(
'Methods', '#ffffff', '#eeaa77', ''.join(contents))
return result
class XMLRPCDocGenerator:
"""Generates documentation for an XML-RPC server.
This class is designed as mix-in and should not
be constructed directly.
"""
def __init__(self):
# setup variables used for HTML documentation
self.server_name = 'XML-RPC Server Documentation'
self.server_documentation = \
"This server exports the following methods through the XML-RPC "\
"protocol."
self.server_title = 'XML-RPC Server Documentation'
def set_server_title(self, server_title):
"""Set the HTML title of the generated server documentation"""
self.server_title = server_title
def set_server_name(self, server_name):
"""Set the name of the generated HTML server documentation"""
self.server_name = server_name
def set_server_documentation(self, server_documentation):
"""Set the documentation string for the entire server."""
self.server_documentation = server_documentation
def generate_html_documentation(self):
"""generate_html_documentation() => html documentation for the server
Generates HTML documentation for the server using introspection for
installed functions and instances that do not implement the
_dispatch method. Alternatively, instances can choose to implement
the _get_method_argstring(method_name) method to provide the
argument string used in the documentation and the
_methodHelp(method_name) method to provide the help text used
in the documentation."""
methods = {}
for method_name in self.system_listMethods():
if method_name in self.funcs:
method = self.funcs[method_name]
elif self.instance is not None:
method_info = [None, None] # argspec, documentation
if hasattr(self.instance, '_get_method_argstring'):
method_info[0] = self.instance._get_method_argstring(method_name)
if hasattr(self.instance, '_methodHelp'):
method_info[1] = self.instance._methodHelp(method_name)
method_info = tuple(method_info)
if method_info != (None, None):
method = method_info
elif not hasattr(self.instance, '_dispatch'):
try:
method = resolve_dotted_attribute(
self.instance,
method_name
)
except AttributeError:
method = method_info
else:
method = method_info
else:
assert 0, "Could not find method in self.functions and no "\
"instance installed"
methods[method_name] = method
documenter = ServerHTMLDoc()
documentation = documenter.docserver(
self.server_name,
self.server_documentation,
methods
)
return documenter.page(self.server_title, documentation)
class DocXMLRPCRequestHandler(SimpleXMLRPCRequestHandler):
"""XML-RPC and documentation request handler class.
Handles all HTTP POST requests and attempts to decode them as
XML-RPC requests.
Handles all HTTP GET requests and interprets them as requests
for documentation.
"""
def do_GET(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
# Check that the path is legal
if not self.is_rpc_path_valid():
self.report_404()
return
response = self.server.generate_html_documentation().encode('utf-8')
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-length", str(len(response)))
self.end_headers()
self.wfile.write(response)
class DocXMLRPCServer( SimpleXMLRPCServer,
XMLRPCDocGenerator):
"""XML-RPC and HTML documentation server.
Adds the ability to serve server documentation to the capabilities
of SimpleXMLRPCServer.
"""
def __init__(self, addr, requestHandler=DocXMLRPCRequestHandler,
logRequests=True, allow_none=False, encoding=None,
bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
XMLRPCDocGenerator.__init__(self)
class DocCGIXMLRPCRequestHandler( CGIXMLRPCRequestHandler,
XMLRPCDocGenerator):
"""Handler for XML-RPC data and documentation requests passed through
CGI"""
def handle_get(self):
"""Handles the HTTP GET request.
Interpret all HTTP GET requests as requests for server
documentation.
"""
response = self.generate_html_documentation().encode('utf-8')
print('Content-Type: text/html')
print('Content-Length: %d' % len(response))
print()
sys.stdout.flush()
sys.stdout.buffer.write(response)
sys.stdout.buffer.flush()
def __init__(self):
CGIXMLRPCRequestHandler.__init__(self)
XMLRPCDocGenerator.__init__(self)
if __name__ == '__main__':
print('Running XML-RPC server on port 8000')
server = SimpleXMLRPCServer(("localhost", 8000))
server.register_function(pow)
server.register_function(lambda x,y: x+y, 'add')
server.serve_forever()
| apache-2.0 |
Chilledheart/chromium | tools/telemetry/third_party/pyserial/serial/urlhandler/protocol_hwgrep.py | 159 | 1526 | #! python
#
# Python Serial Port Extension for Win32, Linux, BSD, Jython
# see __init__.py
#
# This module implements a special URL handler that uses the port listing to
# find ports by searching the string descriptions.
#
# (C) 2011 Chris Liechti <cliechti@gmx.net>
# this is distributed under a free software license, see license.txt
#
# URL format: hwgrep://regexp
import serial
import serial.tools.list_ports
class Serial(serial.Serial):
"""Just inherit the native Serial port implementation and patch the open function."""
def setPort(self, value):
"""translate port name before storing it"""
if isinstance(value, basestring) and value.startswith('hwgrep://'):
serial.Serial.setPort(self, self.fromURL(value))
else:
serial.Serial.setPort(self, value)
def fromURL(self, url):
"""extract host and port from an URL string"""
if url.lower().startswith("hwgrep://"): url = url[9:]
# use a for loop to get the 1st element from the generator
for port, desc, hwid in serial.tools.list_ports.grep(url):
return port
else:
raise serial.SerialException('no ports found matching regexp %r' % (url,))
# override property
port = property(serial.Serial.getPort, setPort, doc="Port setting")
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
#~ s = Serial('hwgrep://ttyS0')
s = Serial(None)
s.port = 'hwgrep://ttyS0'
print s
| bsd-3-clause |
dkarakats/edx-platform | lms/djangoapps/licenses/models.py | 150 | 2538 | import logging
from django.db import models, transaction
from student.models import User
from xmodule_django.models import CourseKeyField
log = logging.getLogger("edx.licenses")
class CourseSoftware(models.Model):
name = models.CharField(max_length=255)
full_name = models.CharField(max_length=255)
url = models.CharField(max_length=255)
course_id = CourseKeyField(max_length=255)
def __unicode__(self):
return u'{0} for {1}'.format(self.name, self.course_id)
class UserLicense(models.Model):
software = models.ForeignKey(CourseSoftware, db_index=True)
user = models.ForeignKey(User, null=True)
serial = models.CharField(max_length=255)
def get_courses_licenses(user, courses):
course_ids = set(course.id for course in courses)
all_software = CourseSoftware.objects.filter(course_id__in=course_ids)
assigned_licenses = UserLicense.objects.filter(software__in=all_software,
user=user)
licenses = dict.fromkeys(all_software, None)
for license in assigned_licenses:
licenses[license.software] = license
log.info(assigned_licenses)
log.info(licenses)
return licenses
def get_license(user, software):
try:
# TODO: temporary fix for when somehow a user got more that one license.
# The proper fix should use Meta.unique_together in the UserLicense model.
licenses = UserLicense.objects.filter(user=user, software=software)
license = licenses[0] if licenses else None
except UserLicense.DoesNotExist:
license = None
return license
def get_or_create_license(user, software):
license = get_license(user, software)
if license is None:
license = _create_license(user, software)
return license
def _create_license(user, software):
license = None
try:
# find one license that has not been assigned, locking the
# table/rows with select_for_update to prevent race conditions
with transaction.commit_on_success():
selected = UserLicense.objects.select_for_update()
license = selected.filter(user__isnull=True, software=software)[0]
license.user = user
license.save()
except IndexError:
# there are no free licenses
log.error('No serial numbers available for %s', software)
license = None
# TODO [rocha]look if someone has unenrolled from the class
# and already has a serial number
return license
| agpl-3.0 |
caveman-dick/ansible | lib/ansible/module_utils/cloudstack.py | 7 | 24378 | # -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import time
from ansible.module_utils._text import to_text
try:
from cs import CloudStack, CloudStackException, read_config
HAS_LIB_CS = True
except ImportError:
HAS_LIB_CS = False
CS_HYPERVISORS = [
'KVM', 'kvm',
'VMware', 'vmware',
'BareMetal', 'baremetal',
'XenServer', 'xenserver',
'LXC', 'lxc',
'HyperV', 'hyperv',
'UCS', 'ucs',
'OVM', 'ovm',
'Simulator', 'simulator',
]
if sys.version_info > (3,):
long = int
def cs_argument_spec():
return dict(
api_key=dict(default=os.environ.get('CLOUDSTACK_KEY')),
api_secret=dict(default=os.environ.get('CLOUDSTACK_SECRET'), no_log=True),
api_url=dict(default=os.environ.get('CLOUDSTACK_ENDPOINT')),
api_http_method=dict(choices=['get', 'post'], default=os.environ.get('CLOUDSTACK_METHOD') or 'get'),
api_timeout=dict(type='int', default=os.environ.get('CLOUDSTACK_TIMEOUT') or 10),
api_region=dict(default=os.environ.get('CLOUDSTACK_REGION') or 'cloudstack'),
)
def cs_required_together():
return [['api_key', 'api_secret']]
class AnsibleCloudStack(object):
def __init__(self, module):
if not HAS_LIB_CS:
module.fail_json(msg="python library cs required: pip install cs")
self.result = {
'changed': False,
'diff': {
'before': dict(),
'after': dict()
}
}
# Common returns, will be merged with self.returns
# search_for_key: replace_with_key
self.common_returns = {
'id': 'id',
'name': 'name',
'created': 'created',
'zonename': 'zone',
'state': 'state',
'project': 'project',
'account': 'account',
'domain': 'domain',
'displaytext': 'display_text',
'displayname': 'display_name',
'description': 'description',
}
# Init returns dict for use in subclasses
self.returns = {}
# these values will be casted to int
self.returns_to_int = {}
# these keys will be compared case sensitive in self.has_changed()
self.case_sensitive_keys = [
'id',
'displaytext',
'displayname',
'description',
]
self.module = module
self._connect()
# Helper for VPCs
self._vpc_networks_ids = None
self.domain = None
self.account = None
self.project = None
self.ip_address = None
self.network = None
self.vpc = None
self.zone = None
self.vm = None
self.vm_default_nic = None
self.os_type = None
self.hypervisor = None
self.capabilities = None
self.network_acl = None
def _connect(self):
api_region = self.module.params.get('api_region') or os.environ.get('CLOUDSTACK_REGION')
try:
config = read_config(api_region)
except KeyError:
config = {}
api_config = {
'endpoint': self.module.params.get('api_url') or config.get('endpoint'),
'key': self.module.params.get('api_key') or config.get('key'),
'secret': self.module.params.get('api_secret') or config.get('secret'),
'timeout': self.module.params.get('api_timeout') or config.get('timeout'),
'method': self.module.params.get('api_http_method') or config.get('method'),
}
self.result.update({
'api_region': api_region,
'api_url': api_config['endpoint'],
'api_key': api_config['key'],
'api_timeout': api_config['timeout'],
'api_http_method': api_config['method'],
})
if not all([api_config['endpoint'], api_config['key'], api_config['secret']]):
self.fail_json(msg="Missing api credentials: can not authenticate")
self.cs = CloudStack(**api_config)
def fail_json(self, **kwargs):
self.result.update(kwargs)
self.module.fail_json(**self.result)
def get_or_fallback(self, key=None, fallback_key=None):
value = self.module.params.get(key)
if not value:
value = self.module.params.get(fallback_key)
return value
def has_changed(self, want_dict, current_dict, only_keys=None):
result = False
for key, value in want_dict.items():
# Optionally limit by a list of keys
if only_keys and key not in only_keys:
continue
# Skip None values
if value is None:
continue
if key in current_dict:
if isinstance(value, (int, float, long, complex)):
# ensure we compare the same type
if isinstance(value, int):
current_dict[key] = int(current_dict[key])
elif isinstance(value, float):
current_dict[key] = float(current_dict[key])
elif isinstance(value, long):
current_dict[key] = long(current_dict[key])
elif isinstance(value, complex):
current_dict[key] = complex(current_dict[key])
if value != current_dict[key]:
self.result['diff']['before'][key] = current_dict[key]
self.result['diff']['after'][key] = value
result = True
else:
before_value = to_text(current_dict[key])
after_value = to_text(value)
if self.case_sensitive_keys and key in self.case_sensitive_keys:
if before_value != after_value:
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
# Test for diff in case insensitive way
elif before_value.lower() != after_value.lower():
self.result['diff']['before'][key] = before_value
self.result['diff']['after'][key] = after_value
result = True
else:
self.result['diff']['before'][key] = None
self.result['diff']['after'][key] = to_text(value)
result = True
return result
def _get_by_key(self, key=None, my_dict=None):
if my_dict is None:
my_dict = {}
if key:
if key in my_dict:
return my_dict[key]
self.fail_json(msg="Something went wrong: %s not found" % key)
return my_dict
def query_api(self, command, **args):
try:
res = getattr(self.cs, command)(**args)
if 'errortext' in res:
self.fail_json(msg="Failed: '%s'" % res['errortext'])
except CloudStackException as e:
self.fail_json(msg='CloudStackException: %s' % str(e))
return res
def get_network_acl(self, key=None):
if self.network_acl is None:
args = {
'name': self.module.params.get('network_acl'),
'vpcid': self.get_vpc(key='id'),
}
network_acls = self.query_api('listNetworkACLLists', **args)
if network_acls:
self.network_acl = network_acls['networkacllist'][0]
self.result['network_acl'] = self.network_acl['name']
if self.network_acl:
return self._get_by_key(key, self.network_acl)
else:
self.fail_json(msg="Network ACL %s not found" % self.module.params.get('network_acl'))
def get_vpc(self, key=None):
"""Return a VPC dictionary or the value of given key of."""
if self.vpc:
return self._get_by_key(key, self.vpc)
vpc = self.module.params.get('vpc')
if not vpc:
vpc = os.environ.get('CLOUDSTACK_VPC')
if not vpc:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
if not vpcs:
self.fail_json(msg="No VPCs available.")
for v in vpcs['vpc']:
if vpc in [v['name'], v['displaytext'], v['id']]:
# Fail if the identifyer matches more than one VPC
if self.vpc:
self.fail_json(msg="More than one VPC found with the provided identifyer '%s'" % vpc)
else:
self.vpc = v
self.result['vpc'] = v['name']
if self.vpc:
return self._get_by_key(key, self.vpc)
self.fail_json(msg="VPC '%s' not found" % vpc)
def is_vpc_network(self, network_id):
"""Returns True if network is in VPC."""
# This is an efficient way to query a lot of networks at a time
if self._vpc_networks_ids is None:
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vpcs = self.query_api('listVPCs', **args)
self._vpc_networks_ids = []
if vpcs:
for vpc in vpcs['vpc']:
for n in vpc.get('network', []):
self._vpc_networks_ids.append(n['id'])
return network_id in self._vpc_networks_ids
def get_network(self, key=None):
"""Return a network dictionary or the value of given key of."""
if self.network:
return self._get_by_key(key, self.network)
network = self.module.params.get('network')
if not network:
vpc_name = self.get_vpc(key='name')
if vpc_name:
self.fail_json(msg="Could not find network for VPC '%s' due missing argument: network" % vpc_name)
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
'vpcid': self.get_vpc(key='id')
}
networks = self.query_api('listNetworks', **args)
if not networks:
self.fail_json(msg="No networks available.")
for n in networks['network']:
# ignore any VPC network if vpc param is not given
if 'vpcid' in n and not self.get_vpc(key='id'):
continue
if network in [n['displaytext'], n['name'], n['id']]:
self.result['network'] = n['name']
self.network = n
return self._get_by_key(key, self.network)
self.fail_json(msg="Network '%s' not found" % network)
def get_project(self, key=None):
if self.project:
return self._get_by_key(key, self.project)
project = self.module.params.get('project')
if not project:
project = os.environ.get('CLOUDSTACK_PROJECT')
if not project:
return None
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id')
}
projects = self.query_api('listProjects', **args)
if projects:
for p in projects['project']:
if project.lower() in [p['name'].lower(), p['id']]:
self.result['project'] = p['name']
self.project = p
return self._get_by_key(key, self.project)
self.fail_json(msg="project '%s' not found" % project)
def get_ip_address(self, key=None):
if self.ip_address:
return self._get_by_key(key, self.ip_address)
ip_address = self.module.params.get('ip_address')
if not ip_address:
self.fail_json(msg="IP address param 'ip_address' is required")
args = {
'ipaddress': ip_address,
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'vpcid': self.get_vpc(key='id'),
}
ip_addresses = self.query_api('listPublicIpAddresses', **args)
if not ip_addresses:
self.fail_json(msg="IP address '%s' not found" % args['ipaddress'])
self.ip_address = ip_addresses['publicipaddress'][0]
return self._get_by_key(key, self.ip_address)
def get_vm_guest_ip(self):
vm_guest_ip = self.module.params.get('vm_guest_ip')
default_nic = self.get_vm_default_nic()
if not vm_guest_ip:
return default_nic['ipaddress']
for secondary_ip in default_nic['secondaryip']:
if vm_guest_ip == secondary_ip['ipaddress']:
return vm_guest_ip
self.fail_json(msg="Secondary IP '%s' not assigned to VM" % vm_guest_ip)
def get_vm_default_nic(self):
if self.vm_default_nic:
return self.vm_default_nic
nics = self.query_api('listNics', virtualmachineid=self.get_vm(key='id'))
if nics:
for n in nics['nic']:
if n['isdefault']:
self.vm_default_nic = n
return self.vm_default_nic
self.fail_json(msg="No default IP address of VM '%s' found" % self.module.params.get('vm'))
def get_vm(self, key=None):
if self.vm:
return self._get_by_key(key, self.vm)
vm = self.module.params.get('vm')
if not vm:
self.fail_json(msg="Virtual machine param 'vm' is required")
args = {
'account': self.get_account(key='name'),
'domainid': self.get_domain(key='id'),
'projectid': self.get_project(key='id'),
'zoneid': self.get_zone(key='id'),
}
vms = self.query_api('listVirtualMachines', **args)
if vms:
for v in vms['virtualmachine']:
if vm.lower() in [v['name'].lower(), v['displayname'].lower(), v['id']]:
self.vm = v
return self._get_by_key(key, self.vm)
self.fail_json(msg="Virtual machine '%s' not found" % vm)
def get_disk_offering(self, key=None):
disk_offering = self.module.params.get('disk_offering')
if not disk_offering:
return None
# Do not add domain filter for disk offering listing.
disk_offerings = self.query_api('listDiskOfferings')
if disk_offerings:
for d in disk_offerings['diskoffering']:
if disk_offering in [d['displaytext'], d['name'], d['id']]:
return self._get_by_key(key, d)
self.fail_json(msg="Disk offering '%s' not found" % disk_offering)
def get_zone(self, key=None):
if self.zone:
return self._get_by_key(key, self.zone)
zone = self.module.params.get('zone')
if not zone:
zone = os.environ.get('CLOUDSTACK_ZONE')
zones = self.query_api('listZones')
if not zones:
self.fail_json(msg="No zones available. Please create a zone first")
# use the first zone if no zone param given
if not zone:
self.zone = zones['zone'][0]
self.result['zone'] = self.zone['name']
return self._get_by_key(key, self.zone)
if zones:
for z in zones['zone']:
if zone.lower() in [z['name'].lower(), z['id']]:
self.result['zone'] = z['name']
self.zone = z
return self._get_by_key(key, self.zone)
self.fail_json(msg="zone '%s' not found" % zone)
def get_os_type(self, key=None):
if self.os_type:
return self._get_by_key(key, self.zone)
os_type = self.module.params.get('os_type')
if not os_type:
return None
os_types = self.query_api('listOsTypes')
if os_types:
for o in os_types['ostype']:
if os_type in [o['description'], o['id']]:
self.os_type = o
return self._get_by_key(key, self.os_type)
self.fail_json(msg="OS type '%s' not found" % os_type)
def get_hypervisor(self):
if self.hypervisor:
return self.hypervisor
hypervisor = self.module.params.get('hypervisor')
hypervisors = self.query_api('listHypervisors')
# use the first hypervisor if no hypervisor param given
if not hypervisor:
self.hypervisor = hypervisors['hypervisor'][0]['name']
return self.hypervisor
for h in hypervisors['hypervisor']:
if hypervisor.lower() == h['name'].lower():
self.hypervisor = h['name']
return self.hypervisor
self.fail_json(msg="Hypervisor '%s' not found" % hypervisor)
def get_account(self, key=None):
if self.account:
return self._get_by_key(key, self.account)
account = self.module.params.get('account')
if not account:
account = os.environ.get('CLOUDSTACK_ACCOUNT')
if not account:
return None
domain = self.module.params.get('domain')
if not domain:
self.fail_json(msg="Account must be specified with Domain")
args = {
'name': account,
'domainid': self.get_domain(key='id'),
'listall': True
}
accounts = self.query_api('listAccounts', **args)
if accounts:
self.account = accounts['account'][0]
self.result['account'] = self.account['name']
return self._get_by_key(key, self.account)
self.fail_json(msg="Account '%s' not found" % account)
def get_domain(self, key=None):
if self.domain:
return self._get_by_key(key, self.domain)
domain = self.module.params.get('domain')
if not domain:
domain = os.environ.get('CLOUDSTACK_DOMAIN')
if not domain:
return None
args = {
'listall': True,
}
domains = self.query_api('listDomains', **args)
if domains:
for d in domains['domain']:
if d['path'].lower() in [domain.lower(), "root/" + domain.lower(), "root" + domain.lower()]:
self.domain = d
self.result['domain'] = d['path']
return self._get_by_key(key, self.domain)
self.fail_json(msg="Domain '%s' not found" % domain)
def query_tags(self, resource, resource_type):
args = {
'resourceids': resource['id'],
'resourcetype': resource_type,
}
tags = self.query_api('listTags', **args)
return self.get_tags(resource=tags, key='tag')
def get_tags(self, resource=None, key='tags'):
existing_tags = []
for tag in resource.get(key) or []:
existing_tags.append({'key': tag['key'], 'value': tag['value']})
return existing_tags
def _process_tags(self, resource, resource_type, tags, operation="create"):
if tags:
self.result['changed'] = True
if not self.module.check_mode:
args = {
'resourceids': resource['id'],
'resourcetype': resource_type,
'tags': tags,
}
if operation == "create":
response = self.query_api('createTags', **args)
else:
response = self.query_api('deleteTags', **args)
self.poll_job(response)
def _tags_that_should_exist_or_be_updated(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in tags if tag not in existing_tags]
def _tags_that_should_not_exist(self, resource, tags):
existing_tags = self.get_tags(resource)
return [tag for tag in existing_tags if tag not in tags]
def ensure_tags(self, resource, resource_type=None):
if not resource_type or not resource:
self.fail_json(msg="Error: Missing resource or resource_type for tags.")
if 'tags' in resource:
tags = self.module.params.get('tags')
if tags is not None:
self._process_tags(resource, resource_type, self._tags_that_should_not_exist(resource, tags), operation="delete")
self._process_tags(resource, resource_type, self._tags_that_should_exist_or_be_updated(resource, tags))
resource['tags'] = self.query_tags(resource=resource, resource_type=resource_type)
return resource
def get_capabilities(self, key=None):
if self.capabilities:
return self._get_by_key(key, self.capabilities)
capabilities = self.query_api('listCapabilities')
self.capabilities = capabilities['capability']
return self._get_by_key(key, self.capabilities)
def poll_job(self, job=None, key=None):
if 'jobid' in job:
while True:
res = self.query_api('queryAsyncJobResult', jobid=job['jobid'])
if res['jobstatus'] != 0 and 'jobresult' in res:
if 'errortext' in res['jobresult']:
self.fail_json(msg="Failed: '%s'" % res['jobresult']['errortext'])
if key and key in res['jobresult']:
job = res['jobresult'][key]
break
time.sleep(2)
return job
def get_result(self, resource):
if resource:
returns = self.common_returns.copy()
returns.update(self.returns)
for search_key, return_key in returns.items():
if search_key in resource:
self.result[return_key] = resource[search_key]
# Bad bad API does not always return int when it should.
for search_key, return_key in self.returns_to_int.items():
if search_key in resource:
self.result[return_key] = int(resource[search_key])
if 'tags' in resource:
self.result['tags'] = resource['tags']
return self.result
| gpl-3.0 |
AltSchool/django | tests/migrations/test_base.py | 292 | 4620 | import os
import shutil
import tempfile
from contextlib import contextmanager
from importlib import import_module
from django.apps import apps
from django.db import connection
from django.db.migrations.recorder import MigrationRecorder
from django.test import TransactionTestCase
from django.test.utils import extend_sys_path
from django.utils.module_loading import module_dir
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
def tearDown(self):
# Reset applied-migrations state.
recorder = MigrationRecorder(connection)
recorder.migration_qs.filter(app='migrations').delete()
def get_table_description(self, table):
with connection.cursor() as cursor:
return connection.introspection.get_table_description(cursor, table)
def assertTableExists(self, table):
with connection.cursor() as cursor:
self.assertIn(table, connection.introspection.table_names(cursor))
def assertTableNotExists(self, table):
with connection.cursor() as cursor:
self.assertNotIn(table, connection.introspection.table_names(cursor))
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in self.get_table_description(table)])
def assertColumnNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], True)
def assertColumnNotNull(self, table, column):
self.assertEqual([c.null_ok for c in self.get_table_description(table) if c.name == column][0], False)
def assertIndexExists(self, table, columns, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["index"]
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)
def assertFKExists(self, table, columns, to, value=True):
with connection.cursor() as cursor:
self.assertEqual(
value,
any(
c["foreign_key"] == to
for c in connection.introspection.get_constraints(cursor, table).values()
if c['columns'] == list(columns)
),
)
def assertFKNotExists(self, table, columns, to, value=True):
return self.assertFKExists(table, columns, to, False)
@contextmanager
def temporary_migration_module(self, app_label='migrations', module=None):
"""
Allows testing management commands in a temporary migrations module.
Wrap all invocations to makemigrations and squashmigrations with this
context manager in order to avoid creating migration files in your
source tree inadvertently.
Takes the application label that will be passed to makemigrations or
squashmigrations and the Python path to a migrations module.
The migrations module is used as a template for creating the temporary
migrations module. If it isn't provided, the application's migrations
module is used, if it exists.
Returns the filesystem path to the temporary migrations module.
"""
temp_dir = tempfile.mkdtemp()
try:
target_dir = tempfile.mkdtemp(dir=temp_dir)
with open(os.path.join(target_dir, '__init__.py'), 'w'):
pass
target_migrations_dir = os.path.join(target_dir, 'migrations')
if module is None:
module = apps.get_app_config(app_label).name + '.migrations'
try:
source_migrations_dir = module_dir(import_module(module))
except (ImportError, ValueError):
pass
else:
shutil.copytree(source_migrations_dir, target_migrations_dir)
with extend_sys_path(temp_dir):
new_module = os.path.basename(target_dir) + '.migrations'
with self.settings(MIGRATION_MODULES={app_label: new_module}):
yield target_migrations_dir
finally:
shutil.rmtree(temp_dir)
| bsd-3-clause |
Transtech/omim | 3party/protobuf/python/google/protobuf/service.py | 243 | 9144 | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# https://developers.google.com/protocol-buffers/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""DEPRECATED: Declares the RPC service interfaces.
This module declares the abstract interfaces underlying proto2 RPC
services. These are intended to be independent of any particular RPC
implementation, so that proto2 services can be used on top of a variety
of implementations. Starting with version 2.3.0, RPC implementations should
not try to build on these, but should instead provide code generator plugins
which generate code specific to the particular RPC implementation. This way
the generated code can be more appropriate for the implementation in use
and can avoid unnecessary layers of indirection.
"""
__author__ = 'petar@google.com (Petar Petrov)'
class RpcException(Exception):
"""Exception raised on failed blocking RPC method call."""
pass
class Service(object):
"""Abstract base interface for protocol-buffer-based RPC services.
Services themselves are abstract classes (implemented either by servers or as
stubs), but they subclass this base interface. The methods of this
interface can be used to call the methods of the service without knowing
its exact type at compile time (analogous to the Message interface).
"""
def GetDescriptor():
"""Retrieves this service's descriptor."""
raise NotImplementedError
def CallMethod(self, method_descriptor, rpc_controller,
request, done):
"""Calls a method of the service specified by method_descriptor.
If "done" is None then the call is blocking and the response
message will be returned directly. Otherwise the call is asynchronous
and "done" will later be called with the response value.
In the blocking case, RpcException will be raised on error.
Preconditions:
* method_descriptor.service == GetDescriptor
* request is of the exact same classes as returned by
GetRequestClass(method).
* After the call has started, the request must not be modified.
* "rpc_controller" is of the correct type for the RPC implementation being
used by this Service. For stubs, the "correct type" depends on the
RpcChannel which the stub is using.
Postconditions:
* "done" will be called when the method is complete. This may be
before CallMethod() returns or it may be at some point in the future.
* If the RPC failed, the response value passed to "done" will be None.
Further details about the failure can be found by querying the
RpcController.
"""
raise NotImplementedError
def GetRequestClass(self, method_descriptor):
"""Returns the class of the request message for the specified method.
CallMethod() requires that the request is of a particular subclass of
Message. GetRequestClass() gets the default instance of this required
type.
Example:
method = service.GetDescriptor().FindMethodByName("Foo")
request = stub.GetRequestClass(method)()
request.ParseFromString(input)
service.CallMethod(method, request, callback)
"""
raise NotImplementedError
def GetResponseClass(self, method_descriptor):
"""Returns the class of the response message for the specified method.
This method isn't really needed, as the RpcChannel's CallMethod constructs
the response protocol message. It's provided anyway in case it is useful
for the caller to know the response type in advance.
"""
raise NotImplementedError
class RpcController(object):
"""An RpcController mediates a single method call.
The primary purpose of the controller is to provide a way to manipulate
settings specific to the RPC implementation and to find out about RPC-level
errors. The methods provided by the RpcController interface are intended
to be a "least common denominator" set of features which we expect all
implementations to support. Specific implementations may provide more
advanced features (e.g. deadline propagation).
"""
# Client-side methods below
def Reset(self):
"""Resets the RpcController to its initial state.
After the RpcController has been reset, it may be reused in
a new call. Must not be called while an RPC is in progress.
"""
raise NotImplementedError
def Failed(self):
"""Returns true if the call failed.
After a call has finished, returns true if the call failed. The possible
reasons for failure depend on the RPC implementation. Failed() must not
be called before a call has finished. If Failed() returns true, the
contents of the response message are undefined.
"""
raise NotImplementedError
def ErrorText(self):
"""If Failed is true, returns a human-readable description of the error."""
raise NotImplementedError
def StartCancel(self):
"""Initiate cancellation.
Advises the RPC system that the caller desires that the RPC call be
canceled. The RPC system may cancel it immediately, may wait awhile and
then cancel it, or may not even cancel the call at all. If the call is
canceled, the "done" callback will still be called and the RpcController
will indicate that the call failed at that time.
"""
raise NotImplementedError
# Server-side methods below
def SetFailed(self, reason):
"""Sets a failure reason.
Causes Failed() to return true on the client side. "reason" will be
incorporated into the message returned by ErrorText(). If you find
you need to return machine-readable information about failures, you
should incorporate it into your response protocol buffer and should
NOT call SetFailed().
"""
raise NotImplementedError
def IsCanceled(self):
"""Checks if the client cancelled the RPC.
If true, indicates that the client canceled the RPC, so the server may
as well give up on replying to it. The server should still call the
final "done" callback.
"""
raise NotImplementedError
def NotifyOnCancel(self, callback):
"""Sets a callback to invoke on cancel.
Asks that the given callback be called when the RPC is canceled. The
callback will always be called exactly once. If the RPC completes without
being canceled, the callback will be called after completion. If the RPC
has already been canceled when NotifyOnCancel() is called, the callback
will be called immediately.
NotifyOnCancel() must be called no more than once per request.
"""
raise NotImplementedError
class RpcChannel(object):
"""Abstract interface for an RPC channel.
An RpcChannel represents a communication line to a service which can be used
to call that service's methods. The service may be running on another
machine. Normally, you should not use an RpcChannel directly, but instead
construct a stub {@link Service} wrapping it. Example:
Example:
RpcChannel channel = rpcImpl.Channel("remotehost.example.com:1234")
RpcController controller = rpcImpl.Controller()
MyService service = MyService_Stub(channel)
service.MyMethod(controller, request, callback)
"""
def CallMethod(self, method_descriptor, rpc_controller,
request, response_class, done):
"""Calls the method identified by the descriptor.
Call the given method of the remote service. The signature of this
procedure looks the same as Service.CallMethod(), but the requirements
are less strict in one important way: the request object doesn't have to
be of any specific class as long as its descriptor is method.input_type.
"""
raise NotImplementedError
| apache-2.0 |
gauribhoite/personfinder | env/site-packages/django/template/backends/jinja2.py | 91 | 2118 | # Since this package contains a "django" module, this is required on Python 2.
from __future__ import absolute_import
import sys
import jinja2
from django.conf import settings
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.utils import six
from django.utils.module_loading import import_string
from .base import BaseEngine
from .utils import csrf_input_lazy, csrf_token_lazy
class Jinja2(BaseEngine):
app_dirname = 'jinja2'
def __init__(self, params):
params = params.copy()
options = params.pop('OPTIONS').copy()
super(Jinja2, self).__init__(params)
environment = options.pop('environment', 'jinja2.Environment')
environment_cls = import_string(environment)
options.setdefault('autoescape', True)
options.setdefault('loader', jinja2.FileSystemLoader(self.template_dirs))
options.setdefault('auto_reload', settings.DEBUG)
options.setdefault('undefined',
jinja2.DebugUndefined if settings.DEBUG else jinja2.Undefined)
self.env = environment_cls(**options)
def from_string(self, template_code):
return Template(self.env.from_string(template_code))
def get_template(self, template_name):
try:
return Template(self.env.get_template(template_name))
except jinja2.TemplateNotFound as exc:
six.reraise(TemplateDoesNotExist, TemplateDoesNotExist(exc.args),
sys.exc_info()[2])
except jinja2.TemplateSyntaxError as exc:
six.reraise(TemplateSyntaxError, TemplateSyntaxError(exc.args),
sys.exc_info()[2])
class Template(object):
def __init__(self, template):
self.template = template
def render(self, context=None, request=None):
if context is None:
context = {}
if request is not None:
context['request'] = request
context['csrf_input'] = csrf_input_lazy(request)
context['csrf_token'] = csrf_token_lazy(request)
return self.template.render(context)
| apache-2.0 |
udrg/kalibr | aslam_offline_calibration/kalibr/python/kalibr_imu_camera_calibration/IccCalibrator.py | 4 | 9778 | import aslam_backend as aopt
import aslam_splines as asp
import IccUtil as util
import incremental_calibration as inc
import kalibr_common as kc
import sm
import gc
import numpy as np
import multiprocessing
import sys
# make numpy print prettier
np.set_printoptions(suppress=True)
CALIBRATION_GROUP_ID = 0
HELPER_GROUP_ID = 1
def addSplineDesignVariables(problem, dvc, setActive=True, group_id=HELPER_GROUP_ID):
for i in range(0,dvc.numDesignVariables()):
dv = dvc.designVariable(i)
dv.setActive(setActive)
problem.addDesignVariable(dv, group_id)
class IccCalibrator(object):
def __init__(self):
self.ImuList = []
def initDesignVariables(self, problem, poseSpline, noTimeCalibration, noChainExtrinsics=True, \
estimateGravityLength=False, initialGravityEstimate=np.array([0.0,9.81,0.0])):
# Initialize the system pose spline (always attached to imu0)
self.poseDv = asp.BSplinePoseDesignVariable( poseSpline )
addSplineDesignVariables(problem, self.poseDv)
# Add the calibration target orientation design variable. (expressed as gravity vector in target frame)
if estimateGravityLength:
self.gravityDv = aopt.EuclideanPointDv( initialGravityEstimate )
else:
self.gravityDv = aopt.EuclideanDirection( initialGravityEstimate )
self.gravityExpression = self.gravityDv.toExpression()
self.gravityDv.setActive( True )
problem.addDesignVariable(self.gravityDv, HELPER_GROUP_ID)
#Add all DVs for all IMUs
for imu in self.ImuList:
imu.addDesignVariables( problem )
#Add all DVs for the camera chain
self.CameraChain.addDesignVariables( problem, noTimeCalibration, noChainExtrinsics )
def addPoseMotionTerms(self, problem, tv, rv):
wt = 1.0/tv;
wr = 1.0/rv
W = np.diag([wt,wt,wt,wr,wr,wr])
asp.addMotionErrorTerms(problem, self.poseDv, W, errorOrder)
#add camera to sensor list (create list if necessary)
def registerCamChain(self, sensor):
self.CameraChain = sensor
def registerImu(self, sensor):
self.ImuList.append( sensor )
def buildProblem( self,
splineOrder=6,
poseKnotsPerSecond=70,
biasKnotsPerSecond=70,
doPoseMotionError=False,
mrTranslationVariance=1e6,
mrRotationVariance=1e5,
doBiasMotionError=True,
blakeZisserCam=-1,
huberAccel=-1,
huberGyro=-1,
noTimeCalibration=False,
noChainExtrinsics=True,
maxIterations=20,
gyroNoiseScale=1.0,
accelNoiseScale=1.0,
timeOffsetPadding=0.02,
verbose=False ):
print "\tSpline order: %d" % (splineOrder)
print "\tPose knots per second: %d" % (poseKnotsPerSecond)
print "\tDo pose motion regularization: %s" % (doPoseMotionError)
print "\t\txddot translation variance: %f" % (mrTranslationVariance)
print "\t\txddot rotation variance: %f" % (mrRotationVariance)
print "\tBias knots per second: %d" % (biasKnotsPerSecond)
print "\tDo bias motion regularization: %s" % (doBiasMotionError)
print "\tBlake-Zisserman on reprojection errors %s" % blakeZisserCam
print "\tAcceleration Huber width (sigma): %f" % (huberAccel)
print "\tGyroscope Huber width (sigma): %f" % (huberGyro)
print "\tDo time calibration: %s" % (not noTimeCalibration)
print "\tMax iterations: %d" % (maxIterations)
print "\tTime offset padding: %f" % (timeOffsetPadding)
############################################
## initialize camera chain
############################################
#estimate the timeshift for all cameras to the main imu
self.noTimeCalibration = noTimeCalibration
if not noTimeCalibration:
for cam in self.CameraChain.camList:
cam.findTimeshiftCameraImuPrior(self.ImuList[0], verbose)
#obtain orientation prior between main imu and camera chain (if no external input provided)
#and initial estimate for the direction of gravity
self.CameraChain.findOrientationPriorCameraChainToImu(self.ImuList[0])
estimatedGravity = self.CameraChain.getEstimatedGravity()
############################################
## init optimization problem
############################################
#initialize a pose spline using the camera poses in the camera chain
poseSpline = self.CameraChain.initializePoseSplineFromCameraChain(splineOrder, poseKnotsPerSecond, timeOffsetPadding)
# Initialize bias splines for all IMUs
for imu in self.ImuList:
imu.initBiasSplines(poseSpline, splineOrder, biasKnotsPerSecond)
# Now I can build the problem
problem = inc.CalibrationOptimizationProblem()
# Initialize all design variables.
self.initDesignVariables(problem, poseSpline, noTimeCalibration, noChainExtrinsics, initialGravityEstimate = estimatedGravity)
############################################
## add error terms
############################################
#Add calibration target reprojection error terms for all camera in chain
self.CameraChain.addCameraChainErrorTerms(problem, self.poseDv, blakeZissermanDf=blakeZisserCam, timeOffsetPadding=timeOffsetPadding)
# Initialize IMU error terms.
for imu in self.ImuList:
imu.addAccelerometerErrorTerms(problem, self.poseDv, self.gravityExpression, mSigma=huberAccel, accelNoiseScale=accelNoiseScale)
imu.addGyroscopeErrorTerms(problem, self.poseDv, mSigma=huberGyro, gyroNoiseScale=gyroNoiseScale, g_w=self.gravityExpression)
# Add the bias motion terms.
if doBiasMotionError:
imu.addBiasMotionTerms(problem)
# Add the pose motion terms.
if doPoseMotionError:
self.addPoseMotionTerms(problem, mrTranslationVariance, mrRotationVariance)
# Add a gravity prior
self.problem = problem
def optimize(self, options=None, maxIterations=30, recoverCov=False):
if options is None:
options = aopt.Optimizer2Options()
options.verbose = True
options.doLevenbergMarquardt = True
options.levenbergMarquardtLambdaInit = 10.0
options.nThreads = max(1,multiprocessing.cpu_count()-1)
options.convergenceDeltaX = 1e-5
options.convergenceDeltaJ = 1e-2
options.maxIterations = maxIterations
options.trustRegionPolicy = aopt.LevenbergMarquardtTrustRegionPolicy(options.levenbergMarquardtLambdaInit)
options.linearSolver = aopt.BlockCholeskyLinearSystemSolver()
#run the optimization
self.optimizer = aopt.Optimizer2(options)
self.optimizer.setProblem(self.problem)
optimizationFailed=False
try:
retval = self.optimizer.optimize()
if retval.linearSolverFailure:
optimizationFailed = True
except:
optimizationFailed = True
if optimizationFailed:
sm.logError("Optimization failed!")
raise RuntimeError("Optimization failed!")
#free some memory
del self.optimizer
gc.collect()
if recoverCov:
self.recoverCovariance()
def recoverCovariance(self):
#Covariance ordering (=dv ordering)
#ORDERING: N=num cams
# 1. transformation imu-cam0 --> 6
# 2. camera time2imu --> 1*numCams (only if enabled)
print "Recovering covariance..."
estimator = inc.IncrementalEstimator(CALIBRATION_GROUP_ID)
rval = estimator.addBatch(self.problem, True)
est_stds = np.sqrt(estimator.getSigma2Theta().diagonal())
#split and store the variance
self.std_trafo_ic = np.array(est_stds[0:6])
self.std_times = np.array(est_stds[6:])
def saveImuSetParametersYaml(self, resultFile):
imuSetConfig = kc.ImuSetParameters(resultFile, True)
for imu in self.ImuList:
imuConfig = imu.getImuConfig()
imuSetConfig.addImuParameters(imu_parameters=imuConfig)
imuSetConfig.writeYaml(resultFile)
def saveCamChainParametersYaml(self, resultFile):
chain = self.CameraChain.chainConfig
nCams = len(self.CameraChain.camList)
# Calibration results
for camNr in range(0,nCams):
#cam-cam baselines
if camNr > 0:
T_cB_cA, baseline = self.CameraChain.getResultBaseline(camNr-1, camNr)
chain.setExtrinsicsLastCamToHere(camNr, T_cB_cA)
#imu-cam trafos
T_ci = self.CameraChain.getResultTrafoImuToCam(camNr)
chain.setExtrinsicsImuToCam(camNr, T_ci)
if not self.noTimeCalibration:
#imu to cam timeshift
timeshift = float(self.CameraChain.getResultTimeShift(camNr))
chain.setTimeshiftCamImu(camNr, timeshift)
try:
chain.writeYaml(resultFile)
except:
print "ERROR: Could not write parameters to file: {0}\n".format(resultFile)
| bsd-3-clause |
bruckhaus/challenges | python_challenges/project_euler/p003_largest_prime_factor.py | 1 | 1769 | __author__ = 'tilmannbruckhaus'
# Largest prime factor
# Problem 3
# The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
class LargestPrimeFactor:
def __init__(self):
pass
@staticmethod
def is_divisible(number, divisor):
return number % divisor == 0
@staticmethod
def is_prime(number):
result = True
candidate = 2
while candidate <= number ** 0.5:
if LargestPrimeFactor.is_divisible(number, candidate):
result = False
candidate += 1
return result
@staticmethod
def find(n):
if __name__ == '__main__':
print "\nFinding largest prime factor of", n
L = LargestPrimeFactor
result = 1
candidate = 2
while candidate <= n ** 0.5:
if L.is_divisible(n, candidate) and L.is_prime(candidate):
factor = n / candidate
if L.is_prime(factor):
return factor
else:
result = candidate
n = factor
if __name__ == '__main__':
print "found prime factor: ", result
else:
candidate += 1
return result
if __name__ == '__main__':
print LargestPrimeFactor.find(2)
print LargestPrimeFactor.find(3)
print LargestPrimeFactor.find(4)
print LargestPrimeFactor.find(7)
print LargestPrimeFactor.find(15)
print LargestPrimeFactor.find(44)
print LargestPrimeFactor.find(99)
print LargestPrimeFactor.find(111)
print LargestPrimeFactor.find(1577)
print LargestPrimeFactor.find(19 * 37 * 83)
print LargestPrimeFactor.find(600851475143)
| mit |
jnns/wagtail | wagtail/wagtailsnippets/views/snippets.py | 4 | 10439 | from django.http import Http404
from django.shortcuts import get_object_or_404, render, redirect
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from wagtail.wagtailadmin.edit_handlers import ObjectList, extract_panel_definitions_from_model_class
from wagtail.wagtailadmin.utils import permission_denied
from wagtail.wagtailsnippets.models import get_snippet_content_types
from wagtail.wagtailsnippets.permissions import get_permission_name, user_can_edit_snippet_type
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.forms import SearchForm
from wagtail.wagtailsearch.index import class_is_indexed
from wagtail.wagtailsearch.backends import get_search_backend
# == Helper functions ==
def get_snippet_type_name(content_type):
""" e.g. given the 'advert' content type, return ('Advert', 'Adverts') """
# why oh why is this so convoluted?
opts = content_type.model_class()._meta
return (
force_text(opts.verbose_name),
force_text(opts.verbose_name_plural)
)
def get_snippet_type_description(content_type):
""" return the meta description of the class associated with the given content type """
opts = content_type.model_class()._meta
try:
return force_text(opts.description)
except:
return ''
def get_content_type_from_url_params(app_name, model_name):
"""
retrieve a content type from an app_name / model_name combo.
Throw Http404 if not a valid snippet type
"""
try:
content_type = ContentType.objects.get_by_natural_key(app_name, model_name)
except ContentType.DoesNotExist:
raise Http404
if content_type not in get_snippet_content_types():
# don't allow people to hack the URL to edit content types that aren't registered as snippets
raise Http404
return content_type
SNIPPET_EDIT_HANDLERS = {}
def get_snippet_edit_handler(model):
if model not in SNIPPET_EDIT_HANDLERS:
panels = extract_panel_definitions_from_model_class(model)
edit_handler = ObjectList(panels).bind_to_model(model)
SNIPPET_EDIT_HANDLERS[model] = edit_handler
return SNIPPET_EDIT_HANDLERS[model]
# == Views ==
def index(request):
snippet_types = [
(
get_snippet_type_name(content_type)[1],
get_snippet_type_description(content_type),
content_type
)
for content_type in get_snippet_content_types()
if user_can_edit_snippet_type(request.user, content_type)
]
return render(request, 'wagtailsnippets/snippets/index.html', {
'snippet_types': sorted(snippet_types, key=lambda x: x[0].lower()),
})
def list(request, content_type_app_name, content_type_model_name):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
model = content_type.model_class()
permissions = [
get_permission_name(action, model)
for action in ['add', 'change', 'delete']
]
if not any([request.user.has_perm(perm) for perm in permissions]):
return permission_denied(request)
snippet_type_name, snippet_type_name_plural = get_snippet_type_name(content_type)
items = model.objects.all()
# Search
is_searchable = class_is_indexed(model)
is_searching = False
search_query = None
if is_searchable and 'q' in request.GET:
search_form = SearchForm(request.GET, placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': snippet_type_name_plural
})
if search_form.is_valid():
search_query = search_form.cleaned_data['q']
search_backend = get_search_backend()
items = search_backend.search(search_query, items)
is_searching = True
else:
search_form = SearchForm(placeholder=_("Search %(snippet_type_name)s") % {
'snippet_type_name': snippet_type_name_plural
})
# Pagination
p = request.GET.get('p', 1)
paginator = Paginator(items, 20)
try:
paginated_items = paginator.page(p)
except PageNotAnInteger:
paginated_items = paginator.page(1)
except EmptyPage:
paginated_items = paginator.page(paginator.num_pages)
# Template
if request.is_ajax():
template = 'wagtailsnippets/snippets/results.html'
else:
template = 'wagtailsnippets/snippets/type_index.html'
return render(request, template, {
'content_type': content_type,
'snippet_type_name': snippet_type_name,
'snippet_type_name_plural': snippet_type_name_plural,
'items': paginated_items,
'can_add_snippet': request.user.has_perm(get_permission_name('add', model)),
'is_searchable': is_searchable,
'search_form': search_form,
'is_searching': is_searching,
'query_string': search_query,
})
def create(request, content_type_app_name, content_type_model_name):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
model = content_type.model_class()
permission = get_permission_name('add', model)
if not request.user.has_perm(permission):
return permission_denied(request)
snippet_type_name = get_snippet_type_name(content_type)[0]
instance = model()
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.POST:
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' created.").format(
snippet_type=capfirst(get_snippet_type_name(content_type)[0]),
instance=instance
),
buttons=[
messages.button(reverse('wagtailsnippets:edit', args=(content_type_app_name, content_type_model_name, instance.id)), _('Edit'))
]
)
return redirect('wagtailsnippets:list', content_type.app_label, content_type.model)
else:
messages.error(request, _("The snippet could not be created due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/create.html', {
'content_type': content_type,
'snippet_type_name': snippet_type_name,
'edit_handler': edit_handler,
})
def edit(request, content_type_app_name, content_type_model_name, id):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
model = content_type.model_class()
permission = get_permission_name('change', model)
if not request.user.has_perm(permission):
return permission_denied(request)
snippet_type_name = get_snippet_type_name(content_type)[0]
instance = get_object_or_404(model, id=id)
edit_handler_class = get_snippet_edit_handler(model)
form_class = edit_handler_class.get_form_class(model)
if request.POST:
form = form_class(request.POST, request.FILES, instance=instance)
if form.is_valid():
form.save()
messages.success(
request,
_("{snippet_type} '{instance}' updated.").format(
snippet_type=capfirst(snippet_type_name),
instance=instance
),
buttons=[
messages.button(reverse('wagtailsnippets:edit', args=(content_type_app_name, content_type_model_name, instance.id)), _('Edit'))
]
)
return redirect('wagtailsnippets:list', content_type.app_label, content_type.model)
else:
messages.error(request, _("The snippet could not be saved due to errors."))
edit_handler = edit_handler_class(instance=instance, form=form)
else:
form = form_class(instance=instance)
edit_handler = edit_handler_class(instance=instance, form=form)
return render(request, 'wagtailsnippets/snippets/edit.html', {
'content_type': content_type,
'snippet_type_name': snippet_type_name,
'instance': instance,
'edit_handler': edit_handler
})
def delete(request, content_type_app_name, content_type_model_name, id):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
model = content_type.model_class()
permission = get_permission_name('delete', model)
if not request.user.has_perm(permission):
return permission_denied(request)
snippet_type_name = get_snippet_type_name(content_type)[0]
instance = get_object_or_404(model, id=id)
if request.POST:
instance.delete()
messages.success(
request,
_("{snippet_type} '{instance}' deleted.").format(
snippet_type=capfirst(snippet_type_name),
instance=instance
)
)
return redirect('wagtailsnippets:list', content_type.app_label, content_type.model)
return render(request, 'wagtailsnippets/snippets/confirm_delete.html', {
'content_type': content_type,
'snippet_type_name': snippet_type_name,
'instance': instance,
})
def usage(request, content_type_app_name, content_type_model_name, id):
content_type = get_content_type_from_url_params(content_type_app_name, content_type_model_name)
model = content_type.model_class()
instance = get_object_or_404(model, id=id)
# Pagination
p = request.GET.get('p', 1)
paginator = Paginator(instance.get_usage(), 20)
try:
used_by = paginator.page(p)
except PageNotAnInteger:
used_by = paginator.page(1)
except EmptyPage:
used_by = paginator.page(paginator.num_pages)
return render(request, "wagtailsnippets/snippets/usage.html", {
'instance': instance,
'used_by': used_by
})
| bsd-3-clause |
ingadhoc/surveyor | surveyor_payments/__openerp__.py | 5 | 1696 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Surveyor Payments',
'version': '8.0.1.0.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Surveyor Payments
=================
Allow to register payments on taks and give availability to track them.
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'license': 'AGPL-3',
'images': [
],
'depends': [
'project',
'account',
],
'data': [
'project_task_view.xml',
'security/ir.model.access.csv',
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
javier3407/Plugin.Video.JAV.BRAR | resources/tools/update.py | 17 | 6005 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# PalcoTV - XBMC Add-on by Juarrox (juarrox@gmail.com)
# Version 0.2.9 (18.07.2014)
#------------------------------------------------------------
# License: GPL (http://www.gnu.org/licenses/gpl-3.0.html)
# Gracias a la librería plugintools de Jesús (www.mimediacenter.info)
import os
import sys
import urllib
import urllib2
import re
import shutil
import zipfile
import xbmc
import xbmcgui
import xbmcaddon
import xbmcplugin
import plugintools
libdir = xbmc.translatePath(os.path.join('special://xbmc/system/players/dvdplayer/', ''))
home = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/', ''))
tools = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/resources/tools', ''))
addons = xbmc.translatePath(os.path.join('special://home/addons/', ''))
resources = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/resources', ''))
art = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/art', ''))
tmp = xbmc.translatePath(os.path.join('special://home/addons/plugin.video.palcotv/tmp', ''))
playlists = xbmc.translatePath(os.path.join('special://home/addons/playlists', ''))
icon = art + 'icon.png'
fanart = 'fanart.jpg'
def bajalib(params, platform, libdir, filename):
plugintools.log("[PalcoTV-0.2.99].bajalib "+platform)
url = "https://dl.dropboxusercontent.com/u/8036850/librtmp/librtmp-" + platform + ".zip"
try:
librtmp_zipfile = "librtmp-" + platform + ".zip"
url = "https://dl.dropboxusercontent.com/u/8036850/librtmp/" + librtmp_zipfile
plugintools.log("librtmp_zipfile= "+librtmp_zipfile)
plugintools.log("url= "+url)
r = urllib2.urlopen(url)
f = open(playlists + librtmp_zipfile, "wb")
f.write(r.read())
f.close()
except IOError:
return -1
zfobj = zipfile.ZipFile(playlists + librtmp_zipfile)
for name in zfobj.namelist():
try:
outfile = open(os.path.join(playlists, name), 'wb')
outfile.write(zfobj.read(name))
outfile.close()
except IOError:
pass #There was a problem. Continue...
zfobj.close()
shutil.copyfile(playlists + filename, libdir + filename)
try:
os.remove(playlists + librtmp_zipfile)
os.remove(playlists + filename)
except IOError:
pass
xbmc.executebuiltin("Notification(%s,%s,%i,%s)" % ('PalcoTV', "Librería actualizada!", 3 , art+'icon.png'))
return 0 #succesful
def get_system_platform(params):
plugintools.log("[PalcoTV-0.2.99].get_system_platform " + repr(params))
if xbmc.getCondVisibility( "system.platform.ipad" ):
platform = "linux"
# Var / Stash /Applications/ XBMC.app / Frameworks / librtmp.0.dylib
libdir = xbmc.translatePath(os.path.join('special://xbmc/Frameworks/', ''))
plugintools.log("dir= "+libdir)
filename = "librtmp.0.dylib"
shutil.copyfile(libdir + 'librtmp.0.dylib', libdir + 'librtmp.0.dylib')
# fh = open(libdir + filename, "wb")
bajalib(params, platform)
if xbmc.getCondVisibility( "system.platform.iphone" ):
platform = "iphone"
# Var / Stash /Applications/ XBMC.app / Frameworks / librtmp.0.dylib
libdir = xbmc.translatePath(os.path.join('special://xbmc/Frameworks/', ''))
plugintools.log("dir= "+libdir)
filename = "librtmp.0.dylib"
shutil.copyfile(libdir + 'librtmp.0.dylib', libdir + 'librtmp.0.dylib')
# fh = open(libdir + filename, "wb")
bajalib(params, platform)
if xbmc.getCondVisibility( "system.platform.appletv" ):
platform = "appletv"
# Var / Stash /Applications/ XBMC.app / Frameworks / librtmp.0.dylib
libdir = xbmc.translatePath(os.path.join('special://xbmc/Frameworks/', ''))
plugintools.log("dir= "+libdir)
filename = "librtmp.0.dylib"
shutil.copyfile(libdir + 'librtmp.0.dylib', libdir + 'librtmp.0.dylib')
# fh = open(libdir + filename, "wb")
bajalib(params, platform)
elif xbmc.getCondVisibility( "system.platform.linux" ):
platform = "android"
# /data / data / org.xbmc.xbmc / lib / librtmp.so
libdir = xbmc.translatePath(os.path.join('special://data/data/org.xbmc.xbmc/lib/', ''))
plugintools.log("dir= "+libdir)
filename = "librtmp.so"
shutil.copyfile(libdir + 'librtmp.so', libdir + 'librtmp.so')
bajalib(params, platform, libdir, filename)
elif xbmc.getCondVisibility( "system.platform.windows" ):
platform = "windows"
# Program Files (x86)/XBMC/system/players/dvdplayer/librtmp.dll
# Archivos de Programa/XBMC/system/players/dvdplayer/librtmp.dll
# Da igual porque special://xbmc/ apunta a la carpeta de instalación de XBMC
libdir = xbmc.translatePath(os.path.join('special://xbmc/system/players/dvdplayer/', ''))
filename = "librtmp.dll"
plugintools.log("dir= "+libdir)
filename = "librtmp.dll"
shutil.copyfile(libdir + 'librtmp.dll', libdir + 'librtmp_bakup.dll')
# fh = open(libdir + filename, "wb")
bajalib(params, platform, libdir, filename)
elif xbmc.getCondVisibility( "system.platform.osx" ):
platform = "osx"
# Var / Stash /Applications/ XBMC.app / Frameworks / librtmp.0.dylib
libdir = xbmc.translatePath(os.path.join('special://xbmc/Frameworks/', ''))
plugintools.log("dir= "+libdir)
filename = "librtmp.0.dylib"
shutil.copyfile(libdir + 'librtmp.0.dylib', libdir + 'librtmp.0.dylib')
# fh = open(libdir + filename, "wb")
bajalib(params, platform)
else:
platform = "unknow"
plugintools.log("plataforma= "+platform)
| gpl-3.0 |
mfriesen/tentacle | src/tentacle/dht/dht_bucket_routing_table.py | 1 | 4577 | from tentacle.dht.routing_table import DHTRoutingTable, distance
from math import pow
MAX_BUCKET_SIZE = 8
class DHTBucket(object):
def __init__(self):
self._nodes = dict()
def add_node(self, dhtNode):
self._nodes[dhtNode._id] = dhtNode
def is_bucket_full(self):
return len(self._nodes) >= MAX_BUCKET_SIZE
def is_empty(self):
return len(self._nodes) == 0
def values(self):
return self._nodes.values()
def truncate(self, compare_node_id):
if len(self._nodes) > MAX_BUCKET_SIZE:
distance_map = dict()
for s in self._nodes:
distance_map[distance(compare_node_id, s)] = s
l = sorted(distance_map)
for i in range(0, len(self._nodes) - MAX_BUCKET_SIZE):
del self._nodes[distance_map[l[i]]]
class DHTBucketNode(object):
def __init__(self, min_, max_):
self._bucket = DHTBucket()
self._min = int(min_)
self._max = int(max_)
self._left = None
self._right = None
def add_node(self, dhtNode):
self._bucket.add_node(dhtNode)
def is_bucket_full(self):
return self._bucket.is_bucket_full()
def is_node_id_within_bucket(self, node_id):
return (self._min < node_id) and (node_id <= self._max)
class DHTBucketRoutingTable(DHTRoutingTable):
_root = None
def __init__(self, id_):
self._root = DHTBucketNode(min_ = 0, max_ = pow(2, 160))
self._id = id_
def __create_node__(self, min_, max_):
bucketNode = DHTBucketNode(min_ = min_, max_ = max_)
return bucketNode
def add_node(self, dhtNode):
bucketNode = self.__find_bucket__(self._root, dhtNode)
bucketNode.add_node(dhtNode)
if bucketNode.is_bucket_full():
self.__split_bucket__(bucketNode)
def __find_bucket__(self, bucketNode, dhtNode):
if bucketNode is not None and bucketNode.is_node_id_within_bucket(dhtNode._id):
if bucketNode._left is not None and bucketNode._left.is_node_id_within_bucket(dhtNode._id):
bucketNode = self.__find_bucket__(bucketNode._left, dhtNode)
if bucketNode._right is not None and bucketNode._right.is_node_id_within_bucket(dhtNode._id):
bucketNode = self.__find_bucket__(bucketNode._right, dhtNode)
return bucketNode
def __split_bucket__(self, bucketNode):
if bucketNode.is_bucket_full():
half = (bucketNode._max - bucketNode._min) / 2
left_node = self.__create_node__(bucketNode._min, bucketNode._min + half)
right_node = self.__create_node__(bucketNode._min + half + 1, bucketNode._max)
for node_id in bucketNode._bucket._nodes:
dhtNode = bucketNode._bucket._nodes[node_id]
if right_node.is_node_id_within_bucket(dhtNode._id):
right_node.add_node(dhtNode)
elif left_node.is_node_id_within_bucket(dhtNode._id):
left_node.add_node(dhtNode)
if not left_node._bucket.is_empty() and not right_node._bucket.is_empty() and bucketNode.is_node_id_within_bucket(self._id):
bucketNode._bucket = None
bucketNode._left = left_node
bucketNode._right = right_node
self.__split_bucket__(left_node)
self.__split_bucket__(right_node)
else: # only keep the closest nodes
bucketNode._bucket.truncate(self._id)
def find_closest_nodes(self, id_):
bucket = DHTBucket()
self.__find_closest_nodes__(self._root, bucket, id_)
return bucket
def __find_closest_nodes__(self, bucketNode, bucket, id_):
if bucketNode is not None and bucketNode.is_node_id_within_bucket(id_):
self.__find_closest_nodes__(bucketNode._left, bucket, id_)
self.__find_closest_nodes__(bucketNode._right, bucket, id_)
if bucketNode._bucket is not None and not bucket.is_bucket_full():
for node_id in bucketNode._bucket._nodes:
dhtNode = bucketNode._bucket._nodes[node_id]
bucket.add_node(dhtNode)
bucket.truncate(id_)
| apache-2.0 |
Alwnikrotikz/androguard | androguard/decompiler/decompiler.py | 38 | 16547 | # This file is part of Androguard.
#
# Copyright (C) 2013, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen, PIPE, STDOUT
import tempfile
import os
from androguard.core.androconf import rrmdir
from androguard.decompiler.dad import decompile
PYGMENTS = True
try:
from pygments.filter import Filter
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import TerminalFormatter
from pygments.token import Token
except ImportError:
PYGMENTS = False
class Filter:
pass
class Dex2Jar:
def __init__(self, vm, path_dex2jar="./decompiler/dex2jar/", bin_dex2jar="dex2jar.sh", tmp_dir="/tmp/"):
pathtmp = tmp_dir
if not os.path.exists(pathtmp):
os.makedirs(pathtmp)
fd, fdname = tempfile.mkstemp(dir=pathtmp)
fd = os.fdopen(fd, "w+b")
fd.write(vm.get_buff())
fd.flush()
fd.close()
compile = Popen([path_dex2jar + bin_dex2jar, fdname], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
os.unlink(fdname)
self.jarfile = fdname + "_dex2jar.jar"
def get_jar(self):
return self.jarfile
class DecompilerDex2Jad:
def __init__(self, vm, path_dex2jar="./decompiler/dex2jar/", bin_dex2jar="dex2jar.sh", path_jad="./decompiler/jad/", bin_jad="jad", tmp_dir="/tmp/"):
self.classes = {}
self.classes_failed = []
pathtmp = tmp_dir
if not os.path.exists(pathtmp):
os.makedirs(pathtmp)
fd, fdname = tempfile.mkstemp(dir=pathtmp)
fd = os.fdopen(fd, "w+b")
fd.write(vm.get_buff())
fd.flush()
fd.close()
compile = Popen([path_dex2jar + bin_dex2jar, fdname], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
os.unlink(fdname)
pathclasses = fdname + "dex2jar/"
compile = Popen(["unzip", fdname + "_dex2jar.jar", "-d", pathclasses], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
os.unlink(fdname + "_dex2jar.jar")
for root, dirs, files in os.walk(pathclasses, followlinks=True):
if files != []:
for f in files:
real_filename = root
if real_filename[-1] != "/":
real_filename += "/"
real_filename += f
compile = Popen([path_jad + bin_jad, "-o", "-d", root, real_filename], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
for i in vm.get_classes():
fname = pathclasses + "/" + i.get_name()[1:-1] + ".jad"
if os.path.isfile(fname) == True:
fd = open(fname, "r")
self.classes[i.get_name()] = fd.read()
fd.close()
else:
self.classes_failed.append(i.get_name())
rrmdir(pathclasses)
def get_source_method(self, method):
class_name = method.get_class_name()
method_name = method.get_name()
if class_name not in self.classes:
return ""
if PYGMENTS:
lexer = get_lexer_by_name("java", stripall=True)
lexer.add_filter(MethodFilter(method_name=method_name))
formatter = TerminalFormatter()
result = highlight(self.classes[class_name], lexer, formatter)
return result
return self.classes[class_name]
def display_source(self, method):
print self.get_source_method(method)
def get_source_class(self, _class):
return self.classes[_class.get_name()]
def get_all(self, class_name):
if class_name not in self.classes:
return ""
if PYGMENTS:
lexer = get_lexer_by_name("java", stripall=True)
formatter = TerminalFormatter()
result = highlight(self.classes[class_name], lexer, formatter)
return result
return self.classes[class_name]
def display_all(self, _class):
print self.get_all(_class.get_name())
class DecompilerDex2WineJad:
def __init__(self, vm, path_dex2jar="./decompiler/dex2jar/", bin_dex2jar="dex2jar.sh", path_jad="./decompiler/jad/", bin_jad="jad", tmp_dir="/tmp/"):
self.classes = {}
self.classes_failed = []
pathtmp = tmp_dir
if not os.path.exists(pathtmp):
os.makedirs(pathtmp)
fd, fdname = tempfile.mkstemp(dir=pathtmp)
fd = os.fdopen(fd, "w+b")
fd.write(vm.get_buff())
fd.flush()
fd.close()
compile = Popen([path_dex2jar + bin_dex2jar, fdname], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
os.unlink(fdname)
pathclasses = fdname + "dex2jar/"
compile = Popen(["unzip", fdname + "_dex2jar.jar", "-d", pathclasses], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
os.unlink(fdname + "_dex2jar.jar")
for root, dirs, files in os.walk(pathclasses, followlinks=True):
if files != []:
for f in files:
real_filename = root
if real_filename[-1] != "/":
real_filename += "/"
real_filename += f
compile = Popen(["wine", path_jad + bin_jad, "-o", "-d", root, real_filename], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
for i in vm.get_classes():
fname = pathclasses + "/" + i.get_name()[1:-1] + ".jad"
if os.path.isfile(fname) == True:
fd = open(fname, "r")
self.classes[i.get_name()] = fd.read()
fd.close()
else:
self.classes_failed.append(i.get_name())
rrmdir(pathclasses)
def get_source_method(self, method):
class_name = method.get_class_name()
method_name = method.get_name()
if class_name not in self.classes:
return ""
if PYGMENTS:
lexer = get_lexer_by_name("java", stripall=True)
lexer.add_filter(MethodFilter(method_name=method_name))
formatter = TerminalFormatter()
result = highlight(self.classes[class_name], lexer, formatter)
return result
return self.classes[class_name]
def display_source(self, method):
print self.get_source_method(method)
def get_source_class(self, _class):
return self.classes[_class.get_name()]
def get_all(self, class_name):
if class_name not in self.classes:
return ""
if PYGMENTS:
lexer = get_lexer_by_name("java", stripall=True)
formatter = TerminalFormatter()
result = highlight(self.classes[class_name], lexer, formatter)
return result
return self.classes[class_name]
def display_all(self, _class):
print self.get_all(_class.get_name())
class DecompilerDed:
def __init__(self, vm, path="./decompiler/ded/", bin_ded="ded.sh", tmp_dir="/tmp/"):
self.classes = {}
self.classes_failed = []
pathtmp = tmp_dir
if not os.path.exists(pathtmp) :
os.makedirs( pathtmp )
fd, fdname = tempfile.mkstemp( dir=pathtmp )
fd = os.fdopen(fd, "w+b")
fd.write( vm.get_buff() )
fd.flush()
fd.close()
dirname = tempfile.mkdtemp(prefix=fdname + "-src")
compile = Popen([ path + bin_ded, "-c", "-o", "-d", dirname, fdname ], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
os.unlink( fdname )
findsrc = None
for root, dirs, files in os.walk( dirname + "/optimized-decompiled/" ) :
if dirs != [] :
for f in dirs :
if f == "src" :
findsrc = root
if findsrc[-1] != "/" :
findsrc += "/"
findsrc += f
break
if findsrc != None :
break
for i in vm.get_classes() :
fname = findsrc + "/" + i.get_name()[1:-1] + ".java"
#print fname
if os.path.isfile(fname) == True :
fd = open(fname, "r")
self.classes[ i.get_name() ] = fd.read()
fd.close()
else :
self.classes_failed.append( i.get_name() )
rrmdir( dirname )
def get_source_method(self, method):
class_name = method.get_class_name()
method_name = method.get_name()
if class_name not in self.classes:
return ""
lexer = get_lexer_by_name("java", stripall=True)
lexer.add_filter(MethodFilter(method_name=method_name))
formatter = TerminalFormatter()
result = highlight(self.classes[class_name], lexer, formatter)
return result
def display_source(self, method):
print self.get_source_method(method)
def get_all(self, class_name):
if class_name not in self.classes:
return ""
lexer = get_lexer_by_name("java", stripall=True)
formatter = TerminalFormatter()
result = highlight(self.classes[class_name], lexer, formatter)
return result
def get_source_class(self, _class):
return self.classes[_class.get_name()]
def display_all(self, _class):
print self.get_all(_class.get_name())
class DecompilerDex2Fernflower:
def __init__(self,
vm,
path_dex2jar="./decompiler/dex2jar/",
bin_dex2jar="dex2jar.sh",
path_fernflower="./decompiler/fernflower/",
bin_fernflower="fernflower.jar",
options_fernflower={"dgs": '1', "asc": '1'},
tmp_dir="/tmp/"):
self.classes = {}
self.classes_failed = []
pathtmp = tmp_dir
if not os.path.exists(pathtmp):
os.makedirs(pathtmp)
fd, fdname = tempfile.mkstemp(dir=pathtmp)
fd = os.fdopen(fd, "w+b")
fd.write(vm.get_buff())
fd.flush()
fd.close()
compile = Popen([path_dex2jar + bin_dex2jar, fdname], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
os.unlink(fdname)
pathclasses = fdname + "dex2jar/"
compile = Popen(["unzip", fdname + "_dex2jar.jar", "-d", pathclasses], stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
os.unlink(fdname + "_dex2jar.jar")
for root, dirs, files in os.walk(pathclasses, followlinks=True):
if files != []:
for f in files:
real_filename = root
if real_filename[-1] != "/":
real_filename += "/"
real_filename += f
l = ["java", "-jar", path_fernflower + bin_fernflower]
for option in options_fernflower:
l.append("-%s:%s" % (option, options_fernflower[option]))
l.append(real_filename)
l.append(root)
compile = Popen(l, stdout=PIPE, stderr=STDOUT)
stdout, stderr = compile.communicate()
for i in vm.get_classes():
fname = pathclasses + "/" + i.get_name()[1:-1] + ".java"
if os.path.isfile(fname) == True:
fd = open(fname, "r")
self.classes[i.get_name()] = fd.read()
fd.close()
else:
self.classes_failed.append(i.get_name())
rrmdir(pathclasses)
def get_source_method(self, method):
class_name = method.get_class_name()
method_name = method.get_name()
if class_name not in self.classes:
return ""
if PYGMENTS:
lexer = get_lexer_by_name("java", stripall=True)
lexer.add_filter(MethodFilter(method_name=method_name))
formatter = TerminalFormatter()
result = highlight(self.classes[class_name], lexer, formatter)
return result
return self.classes[class_name]
def display_source(self, method):
print self.get_source_method(method)
def get_source_class(self, _class):
return self.classes[_class.get_name()]
def get_all(self, class_name):
if class_name not in self.classes:
return ""
if PYGMENTS:
lexer = get_lexer_by_name("java", stripall=True)
formatter = TerminalFormatter()
result = highlight(self.classes[class_name], lexer, formatter)
return result
return self.classes[class_name]
def display_all(self, _class):
print self.get_all(_class.get_name())
class MethodFilter(Filter):
def __init__(self, **options):
Filter.__init__(self, **options)
self.method_name = options["method_name"]
#self.descriptor = options["descriptor"]
self.present = False
self.get_desc = True #False
def filter(self, lexer, stream) :
a = []
l = []
rep = []
for ttype, value in stream:
if self.method_name == value and (ttype is Token.Name.Function or ttype is Token.Name) :
#print ttype, value
item_decl = -1
for i in range(len(a)-1, 0, -1) :
if a[i][0] is Token.Keyword.Declaration :
if a[i][1] != "class" :
item_decl = i
break
if item_decl != -1 :
self.present = True
l.extend( a[item_decl:] )
if self.present and ttype is Token.Keyword.Declaration :
item_end = -1
for i in range(len(l)-1, 0, -1) :
if l[i][0] is Token.Operator and l[i][1] == "}" :
item_end = i
break
if item_end != -1 :
rep.extend( l[:item_end+1] )
l = []
self.present = False
if self.present :
l.append( (ttype, value) )
a.append( (ttype, value) )
if self.present :
nb = 0
item_end = -1
for i in range(len(l)-1, 0, -1) :
if l[i][0] is Token.Operator and l[i][1] == "}" :
nb += 1
if nb == 2 :
item_end = i
break
rep.extend( l[:item_end+1] )
return rep
class DecompilerDAD:
def __init__(self, vm, vmx):
self.vm = vm
self.vmx = vmx
def get_source_method(self, m):
mx = self.vmx.get_method(m)
z = decompile.DvMethod(mx)
z.process()
result = z.get_source()
return result
def display_source(self, m):
result = self.get_source_method(m)
if PYGMENTS:
lexer = get_lexer_by_name("java", stripall=True)
formatter = TerminalFormatter()
result = highlight(result, lexer, formatter)
print result
def get_source_class(self, _class):
c = decompile.DvClass(_class, self.vmx)
c.process()
result = c.get_source()
return result
def display_all(self, _class):
result = self.get_source_class(_class)
if PYGMENTS:
lexer = get_lexer_by_name("java", stripall=True)
formatter = TerminalFormatter()
result = highlight(result, lexer, formatter)
print result
def get_all(self, class_name):
pass
| apache-2.0 |
Alwnikrotikz/cortex-vfx | test/IECore/MemoryUsage.py | 9 | 2757 | ##########################################################################
#
# Copyright (c) 2007, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
from IECore import *
class TestMemoryUsage( unittest.TestCase ) :
def testMultipleReferences( self ) :
"""When an object has multiple references to the same child, that child
should not be counted multiple times in the memory usage total."""
c = CompoundObject()
d = IntVectorData( 10000 )
c["a"] = d
m = c.memoryUsage()
dm = d.memoryUsage()
c["b"] = d
self.assert_( c.memoryUsage() < m + dm )
def testCopiedDataReferences( self ) :
"""Copied data shouldn't use additional memory unless the copies have
been modified by writing."""
c = CompoundObject()
d = IntVectorData( 10000 )
c["a"] = d
c["b"] = d.copy()
c2 = CompoundObject()
c2["a"] = d
c2["b"] = d
self.assert_( abs( c.memoryUsage() - c2.memoryUsage() ) < 10 )
# writing to the copy should now increase the memory usage
m = c.memoryUsage()
c["b"][0] = 100
self.assert_( c.memoryUsage()!=m )
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
vipullakhani/mi-instrument | mi/dataset/parser/test/test_velpt_ab.py | 7 | 29210 | #!/usr/bin/env python
"""
@package mi.dataset.parser.test.test_velpt_ab
@file mi-dataset/mi/dataset/parser/test/test_velpt_ab_dcl.py
@author Chris Goodrich
@brief Test code for the velpt_ab parser
"""
__author__ = 'Chris Goodrich'
import os
import re
from nose.plugins.attrib import attr
from mi.core.exceptions import ConfigurationException
from mi.dataset.dataset_parser import DataSetDriverConfigKeys
from mi.dataset.driver.velpt_ab.resource import RESOURCE_PATH
from mi.dataset.parser.common_regexes import FLOAT_REGEX, END_OF_LINE_REGEX
from mi.dataset.parser.velpt_ab import VelptAbParser, VelptAbParticleClassKey
from mi.dataset.parser.velpt_ab_particles import VelptAbInstrumentDataParticle,\
VelptAbDiagnosticsHeaderParticle, VelptAbDiagnosticsDataParticle, VelptAbInstrumentMetadataParticle
from mi.dataset.test.test_parser import ParserUnitTestCase
from mi.logging import log
@attr('UNIT', group='mi')
class VelptAbParserUnitTestCase(ParserUnitTestCase):
"""
velpt_ab_dcl Parser unit test suite
"""
def setUp(self):
ParserUnitTestCase.setUp(self)
self._parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {
VelptAbParticleClassKey.METADATA_PARTICLE_CLASS: VelptAbDiagnosticsHeaderParticle,
VelptAbParticleClassKey.DIAGNOSTICS_PARTICLE_CLASS: VelptAbDiagnosticsDataParticle,
VelptAbParticleClassKey.INSTRUMENT_METADATA_PARTICLE_CLASS: VelptAbInstrumentMetadataParticle,
VelptAbParticleClassKey.INSTRUMENT_PARTICLE_CLASS: VelptAbInstrumentDataParticle
}
}
self._incomplete_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_dcl_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None
}
self._bad_parser_config = {
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.velpt_ab_particles',
DataSetDriverConfigKeys.PARTICLE_CLASS: None,
DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {}
}
def test_simple(self):
"""
Read files and verify that all expected particles can be read.
Verify that the contents of the particles are correct.
This is the happy path test.
"""
log.debug('===== START TEST SIMPLE =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST SIMPLE =====')
def test_jumbled(self):
"""
Read files and verify that all expected particles can be read.
This particular data file has the velocity data records
preceded by the diagnostics records, a situation not likely
to occur on a deployed instrument but anything is possible!
The logic in the parser will not produce an instrument metadata
particle (configuration data) until it encounters a velocity or
a diagnostics record. Assumes that all the configuration records are
at the beginning of the file. This is reasonable as the instrument is
configured before being deployed. So the config records would be stored
first. Verify that the contents of the particles are correct.
There should be no exceptions generated.
"""
log.debug('===== START TEST SIMPLE NOT IN ORDER =====')
# Test the telemetered version
with open(os.path.join(RESOURCE_PATH, 'jumbled_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'jumbled_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST SIMPLE NOT IN ORDER =====')
def test_too_few_diagnostics_records(self):
"""
The file used in this test has only 19 diagnostics records in the second set.
Twenty are expected. The records are all still processed.
The error is simply noted.
"""
log.debug('===== START TEST NOT ENOUGH DIAGNOSTICS RECORDS =====')
with open(os.path.join(RESOURCE_PATH, 'too_few_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'too_few_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST NOT ENOUGH DIAGNOSTICS RECORDS =====')
def test_too_many_diagnostics_records(self):
"""
The file used in this test has 21 diagnostics records in the second set.
Twenty are expected. The records are all still processed.
The error is simply noted.
"""
log.debug('===== START TEST TOO MANY DIAGNOSTICS RECORDS =====')
with open(os.path.join(RESOURCE_PATH, 'too_many_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 73
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'too_many_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST TOO MANY DIAGNOSTICS RECORDS =====')
def test_invalid_sync_byte(self):
"""
The file used in this test has extra bytes between records which need to be skipped
in order to process the correct number of particles. All records are still processed.
"""
log.debug('===== START TEST INVALID SYNC BYTE =====')
with open(os.path.join(RESOURCE_PATH, 'extra_bytes_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST INVALID SYNC BYTE =====')
def test_invalid_record_id(self):
"""
The file used in this test has one record with an invalid ID byte.
This results in 71 particles being retrieved instead of 72.
"""
log.debug('===== START TEST INVALID RECORD ID =====')
with open(os.path.join(RESOURCE_PATH, 'bad_id_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_id_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST INVALID RECORD ID =====')
def test_truncated_file(self):
"""
The file used in this test has a malformed (too short) record at
the end of the file.This results in 71 particles being retrieved
instead of 72.
"""
log.debug('===== START TEST FOUND TRUNCATED FILE =====')
with open(os.path.join(RESOURCE_PATH, 'truncated_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'truncated_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST FOUND TRUNCATED FILE =====')
def test_bad_velocity_checksum(self):
"""
The file used in this test has a record with a bad checksum.
This results in 71 particles being retrieved instead of 72.
"""
log.debug('===== START TEST FOUND BAD VELOCITY CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_velocity_checksum_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as \
file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_velocity_checksum_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD VELOCITY CHECKSUM =====')
def test_bad_diagnostic_checksum(self):
"""
The file used in this test has a record with a bad checksum.
This results in 71 particles being retrieved instead of 72.
"""
log.debug('===== START TEST FOUND BAD DIAGNOSTICS CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_diag_checksum_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'too_few_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD DIAGNOSTICS CHECKSUM =====')
def test_missing_hardware_config(self):
"""
The file used in this test has no hardware configuration record.
Instrument metadata will still be produced but the fields from
the hardware config will NOT be included.
"""
log.debug('===== START TEST MISSING HARDWARE CONFIG =====')
with open(os.path.join(RESOURCE_PATH, 'no_hardware_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_hardware_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING HARDWARE CONFIG =====')
def test_missing_head_config(self):
"""
The file used in this test has no head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST MISSING HEAD CONFIG =====')
with open(os.path.join(RESOURCE_PATH, 'no_head_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_head_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING HEAD CONFIG =====')
def test_missing_user_config(self):
"""
The file used in this test has no user configuration record.
Instrument metadata will still be produced but the fields from
the user config will NOT be included.
"""
log.debug('===== START TEST MISSING USER CONFIG =====')
with open(os.path.join(RESOURCE_PATH, 'no_user_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_user_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING USER CONFIG =====')
def test_missing_all_config(self):
"""
The file used in this test has no user configuration record.
Instrument metadata will still be produced but the fields from
the user config will NOT be included.
"""
log.debug('===== START TEST MISSING ALL CONFIG RECORDS =====')
with open(os.path.join(RESOURCE_PATH, 'no_config_recs_VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_config_recs_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING ALL CONFIG RECORDS =====')
def test_head_config_bad_checksum(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST HEAD CONFIG BAD CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_in_head_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_head_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST HEAD CONFIG BAD CHECKSUM =====')
def test_hardware_config_bad_checksum(self):
"""
The file used in this test has a bad checksum in the hardware configuration record.
Instrument metadata will still be produced but the fields from
the hardware config will NOT be included.
"""
log.debug('===== START TEST HARDWARE CONFIG BAD CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_in_hardware_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_hardware_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST HARDWARE CONFIG BAD CHECKSUM =====')
def test_user_config_bad_checksum(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST USER CONFIG BAD CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_in_user_config_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_user_config_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST USER CONFIG BAD CHECKSUM =====')
def test_diag_header_bad_checksum(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST DIAGNOSTICS HEADER BAD CHECKSUM =====')
with open(os.path.join(RESOURCE_PATH, 'bad_checksum_in_diag_header_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_diag_header_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST DIAGNOSTICS HEADER BAD CHECKSUM =====')
def test_missing_diag_header(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST MISSING DIAGNOSTICS HEADER =====')
with open(os.path.join(RESOURCE_PATH, 'no_diag_header_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 71
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_diag_header_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST MISSING DIAGNOSTICS HEADER =====')
def test_random_diag_record(self):
"""
The file used in this test has a bad checksum in the head configuration record.
Instrument metadata will still be produced but the fields from
the head config will NOT be included.
"""
log.debug('===== START TEST RANDOM DIAGNOSTIC RECORD FOUND =====')
with open(os.path.join(RESOURCE_PATH, 'random_diag_record_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 72
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'random_diag_record_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST RANDOM DIAGNOSTIC RECORD FOUND =====')
def test_no_diag_recs(self):
"""
The file used in this test has a single diagnostic header record but no diagnostic
records. No diagnostic particles will be produced.
"""
log.debug('===== START TEST NO DIAGNOSTIC RECORDS FOUND =====')
with open(os.path.join(RESOURCE_PATH, 'no_diag_recs_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 51
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'no_diag_recs_VELPT_SN_11402_2014-07-02.yml', RESOURCE_PATH)
log.debug('===== END TEST NO DIAGNOSTIC RECORDS FOUND =====')
def test_bad_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST BAD CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, 'VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
with self.assertRaises(ConfigurationException):
parser = VelptAbParser(self._bad_parser_config,
file_handle,
self.exception_callback)
log.debug('===== END TEST BAD CONFIGURATION =====')
def test_partial_configuration(self):
"""
Attempt to build a parser with a bad configuration.
"""
log.debug('===== START TEST PARTIAL CONFIGURATION =====')
with open(os.path.join(RESOURCE_PATH, 'VELPT_SN_11402_2014-07-02.aqd'), 'rb') as file_handle:
with self.assertRaises(ConfigurationException):
parser = VelptAbParser(self._incomplete_parser_config,
file_handle,
self.exception_callback)
log.debug('===== END TEST PARTIAL CONFIGURATION =====')
def test_bad_diag_checksum_19_recs(self):
"""
The file used in this test has a power record with a missing timestamp.
This results in 9 particles being retrieved instead of 10, and also result in the exception
callback being called.
"""
log.debug('===== START TEST FOUND BAD DIAG HDR CHECKSUM AND TOO FEW RECS =====')
with open(os.path.join(RESOURCE_PATH, 'bad_diag_hdr_checksum_19_diag_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 116
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_diag_hdr_checksum_19_diag_VELPT_SN_11402_2014-07-02.yml',
RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD DIAG HDR CHECKSUM AND TOO FEW RECS =====')
def test_bad_diag_checksum_21_recs(self):
"""
The file used in this test has a power record with a missing timestamp.
This results in 9 particles being retrieved instead of 10, and also result in the exception
callback being called.
"""
log.debug('===== START TEST FOUND BAD DIAG HDR CHECKSUM AND TOO MANY RECS =====')
with open(os.path.join(RESOURCE_PATH, 'bad_diag_hdr_checksum_21_diag_VELPT_SN_11402_2014-07-02.aqd'), 'rb')\
as file_handle:
num_particles_to_request = num_expected_particles = 118
parser = VelptAbParser(self._parser_config,
file_handle,
self.exception_callback)
particles = parser.get_records(num_particles_to_request)
self.assertEquals(len(particles), num_expected_particles)
self.assert_particles(particles, 'bad_diag_hdr_checksum_21_diag_VELPT_SN_11402_2014-07-02.yml',
RESOURCE_PATH)
log.debug('===== END TEST FOUND BAD DIAG HDR CHECKSUM AND TOO MANY RECS =====')
def fix_yml_pressure_params(self):
"""
This helper tool was used to modify the yml files in response to ticket #4341
"""
pressure_regex = r' pressure:\s+(0.\d+)'
for file_name in os.listdir(RESOURCE_PATH):
if file_name.endswith('.yml'):
with open(os.path.join(RESOURCE_PATH, file_name), 'rU') as in_file_id:
out_file_name = file_name + '.new'
log.info('fixing file %s', file_name)
log.info('creating file %s', out_file_name)
out_file_id = open(os.path.join(RESOURCE_PATH, out_file_name), 'w')
for line in in_file_id:
match = re.match(pressure_regex, line)
if match is not None:
new_value = float(match.group(1)) * 1000.0
new_line = ' pressure_mbar: ' + str(new_value)
out_file_id.write(new_line + '\n')
else:
out_file_id.write(line)
out_file_id.close()
def fix_yml_float_params(self):
"""
This helper tool was used to modify the yml files in response to ticket #8564
"""
param_change_table = [
('battery_voltage', 'battery_voltage_dV', 10),
('sound_speed_analog2', 'sound_speed_dms', 10),
('heading', 'heading_decidegree', 10),
('pitch', 'pitch_decidegree', 10),
('roll', 'roll_decidegree', 10),
('pressure_mbar', 'pressure_mbar', 1),
('temperature', 'temperature_centidegree', 100),
('velocity_beam1', 'velocity_beam1', 1),
('velocity_beam2', 'velocity_beam2', 1),
('velocity_beam3', 'velocity_beam3', 1)
]
for file_name in os.listdir(RESOURCE_PATH):
if file_name.endswith('.yml'):
with open(os.path.join(RESOURCE_PATH, file_name), 'rU') as in_file_id:
out_file_name = file_name + '.new'
log.info('fixing file %s', file_name)
log.info('creating file %s', out_file_name)
out_file_id = open(os.path.join(RESOURCE_PATH, out_file_name), 'w')
for line in in_file_id:
new_line = line
for param_name, new_name, mult in param_change_table:
param_regex = r'\s+' + param_name + r':\s+(' + FLOAT_REGEX + ')' + END_OF_LINE_REGEX
match = re.match(param_regex, line)
if match is not None:
new_value = int(float(match.group(1)) * mult)
new_line = ' ' + new_name + ': ' + str(new_value) + '\n'
log.info('%s', new_line)
out_file_id.write(new_line)
out_file_id.close()
| bsd-2-clause |
ylatuya/Flumotion | flumotion/test/test_component_providers.py | 1 | 11789 | # vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2004,2005,2006,2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
import os
import shutil
import tempfile
from twisted.internet import defer, reactor
from twisted.trial import unittest
import twisted.copyright
if twisted.copyright.version == "SVN-Trunk":
SKIP_MSG = "Twisted 2.0.1 thread pool is broken for tests"
else:
SKIP_MSG = None
from flumotion.common import testsuite
from flumotion.component.misc.httpserver import localpath
from flumotion.component.misc.httpserver import localprovider
from flumotion.component.misc.httpserver import cachedprovider
from flumotion.component.misc.httpserver.fileprovider \
import InsecureError, NotFoundError, CannotOpenError
attr = testsuite.attr
class LocalPath(testsuite.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix=".flumotion.test")
a = os.path.join(self.path, 'a')
open(a, "w").write('test file a')
B = os.path.join(self.path, 'B')
os.mkdir(B)
c = os.path.join(self.path, 'B', 'c')
open(c, "w").write('test file c')
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def testExistingPath(self):
local = localpath.LocalPath(self.path)
self.failUnless(isinstance(local, localpath.LocalPath))
def testChildExistingFile(self):
child = localpath.LocalPath(self.path).child('a')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildExistingDir(self):
child = localpath.LocalPath(self.path).child('B')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildTraversingDir(self):
local = localpath.LocalPath(self.path)
child = local.child('B').child('c')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildNonExistingFile(self):
child = localpath.LocalPath(self.path).child('foo')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildTraversingNonExistingDir(self):
local = localpath.LocalPath(self.path)
child = local.child('foo').child('bar')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildInsecurePathTooDeep(self):
local = localpath.LocalPath(self.path)
self.assertRaises(InsecureError, local.child, 'B/c')
def testChildInsecurePathTooDeepAndNonExisting(self):
local = localpath.LocalPath(self.path)
self.assertRaises(InsecureError, local.child, 'foo/bar')
def testChildInsecurePathRoot(self):
local = localpath.LocalPath(self.path)
self.assertRaises(InsecureError, local.child, '/foo')
def testChildInsecurePathUp(self):
local = localpath.LocalPath(self.path)
self.assertRaises(InsecureError, local.child, '..')
class LocalPathCachedProvider(testsuite.TestCase):
skip = SKIP_MSG
def setUp(self):
self.path = tempfile.mkdtemp(suffix=".flumotion.test")
a = os.path.join(self.path, 'a')
open(a, "w").write('test file a')
B = os.path.join(self.path, 'B')
os.mkdir(B)
c = os.path.join(self.path, 'B', 'c')
open(c, "w").write('test file c')
plugProps = {"properties": {"path": self.path}}
self.fileProviderPlug = \
cachedprovider.FileProviderLocalCachedPlug(plugProps)
return self.fileProviderPlug.start(component=None)
def tearDown(self):
d = defer.maybeDeferred(self.fileProviderPlug.stop, component=None)
def _rmTempDir(result):
shutil.rmtree(self.path, ignore_errors=True)
d.addBoth(_rmTempDir)
return d
def testExistingPath(self):
local = self.fileProviderPlug.getRootPath()
self.failUnless(isinstance(local, cachedprovider.LocalPath))
def testChildExistingFile(self):
child = self.fileProviderPlug.getRootPath().child('a')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildExistingDir(self):
child = self.fileProviderPlug.getRootPath().child('B')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildTraversingDir(self):
local = self.fileProviderPlug.getRootPath()
child = local.child('B').child('c')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildNonExistingFile(self):
child = self.fileProviderPlug.getRootPath().child('foo')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildTraversingNonExistingDir(self):
local = self.fileProviderPlug.getRootPath()
child = local.child('foo').child('bar')
self.failUnless(isinstance(child, localpath.LocalPath))
def testChildInsecurePathTooDeep(self):
local = self.fileProviderPlug.getRootPath()
self.assertRaises(InsecureError, local.child, 'B/c')
def testChildInsecurePathTooDeepAndNonExisting(self):
local = self.fileProviderPlug.getRootPath()
self.assertRaises(InsecureError, local.child, 'foo/bar')
def testChildInsecurePathRoot(self):
local = self.fileProviderPlug.getRootPath()
self.assertRaises(InsecureError, local.child, '/foo')
def testChildInsecurePathUp(self):
local = self.fileProviderPlug.getRootPath()
self.assertRaises(InsecureError, local.child, '..')
def testOpenExisting(self):
child = self.fileProviderPlug.getRootPath().child('a')
child.open()
def testOpenTraversingExistingDir(self):
local = self.fileProviderPlug.getRootPath()
child = local.child('B').child('c')
child.open()
def testOpendir(self):
local = self.fileProviderPlug.getRootPath()
self.assertRaises(CannotOpenError, local.open)
def testOpenNonExisting(self):
local = self.fileProviderPlug.getRootPath()
child = local.child('foo')
self.assertRaises(NotFoundError, child.open)
def testOpenTraversingNonExistingDir(self):
local = self.fileProviderPlug.getRootPath()
child = local.child('foo').child('bar')
self.assertRaises(NotFoundError, child.open)
class LocalPathLocalProvider(testsuite.TestCase):
def setUp(self):
self.path = tempfile.mkdtemp(suffix=".flumotion.test")
a = os.path.join(self.path, 'a')
open(a, "w").write('test file a')
B = os.path.join(self.path, 'B')
os.mkdir(B)
c = os.path.join(self.path, 'B', 'c')
open(c, "w").write('test file c')
self.local = localprovider.LocalPath(self.path)
def tearDown(self):
shutil.rmtree(self.path, ignore_errors=True)
def testOpenExisting(self):
child = self.local.child('a')
child.open()
def testOpenTraversingExistingDir(self):
child = self.local.child('B').child('c')
child.open()
def testOpendir(self):
self.assertRaises(CannotOpenError, self.local.open)
def testOpenNonExisting(self):
child = self.local.child('foo')
self.assertRaises(NotFoundError, child.open)
def testOpenTraversingNonExistingDir(self):
child = self.local.child('foo').child('bar')
self.assertRaises(NotFoundError, child.open)
class CachedProviderFileTest(testsuite.TestCase):
skip = SKIP_MSG
def setUp(self):
from twisted.python import threadpool
reactor.threadpool = threadpool.ThreadPool(0, 10)
reactor.threadpool.start()
self.src_path = tempfile.mkdtemp(suffix=".src")
self.cache_path = tempfile.mkdtemp(suffix=".cache")
plugProps = {"properties": {"path": self.src_path,
"cache-dir": self.cache_path}}
self.fileProviderPlug = \
cachedprovider.FileProviderLocalCachedPlug(plugProps)
d = self.fileProviderPlug.start(None)
self.dataSize = 7
self.data = "foo bar"
# the old parameter assures newer files will be taken into account
# (avoid timing problems), like in testModifySrc
self.testFileName = self.createFile('a', self.data, old=True)
return d
def _tearDown(self):
shutil.rmtree(self.src_path, ignore_errors=True)
shutil.rmtree(self.cache_path, ignore_errors=True)
reactor.threadpool.stop()
reactor.threadpool = None
def tearDown(self):
d = defer.maybeDeferred(self.fileProviderPlug.stop, None)
d.addCallback(lambda _: self._tearDown())
return d
@attr('slow')
def testModifySrc(self):
newData = "bar foo"
d = self.openFile('a')
d.addCallback(self.readFile, self.dataSize)
d.addCallback(pass_through, self.cachedFile.close)
d.addCallback(pass_through, self.createFile, 'a', newData)
d.addCallback(lambda _: self.openFile('a'))
d.addCallback(self.readFile, self.dataSize)
d.addCallback(pass_through, self.cachedFile.close)
d.addCallback(self.assertEqual, newData)
return d
def testSeekend(self):
d = self.openFile('a')
d.addCallback(pass_through, self.cachedFile.seek, self.dataSize-5)
d.addCallback(self.readFile, 5)
d.addCallback(pass_through, self.cachedFile.close)
d.addCallback(self.assertEqual, self.data[-5:])
return d
@attr('slow')
def testCachedFile(self):
d = self.openFile('a')
d.addCallback(self.readFile, self.dataSize)
d.addCallback(delay, 1)
d.addCallback(pass_through, self.cachedFile.close)
d.addCallback(lambda _: self.getCachePath(self.testFileName))
d.addCallback(self.checkPathExists)
return d
def testSimpleIntegrity(self):
d = self.openFile('a')
d.addCallback(self.readFile, self.dataSize)
d.addCallback(pass_through, self.cachedFile.close)
d.addCallback(lambda data:
self.failUnlessEqual(self.data, data))
return d
def getCachePath(self, path):
return self.fileProviderPlug.cache.getCachePath(path)
def getTempPath(self, path):
return self.fileProviderPlug.getTempPath(path)
def checkPathExists(self, p):
self.failUnless(os.path.exists(p))
def createFile(self, name, data, old=False):
testFileName = os.path.join(self.src_path, name)
testFile = open(testFileName, "w")
testFile.write(data)
testFile.close()
if old:
stats = os.stat(testFileName)
os.utime(testFileName, (1, 1))
return testFileName
def openFile(self, name):
self.cachedFile = \
self.fileProviderPlug.getRootPath().child(name).open()
return defer.succeed(self.cachedFile)
def readFile(self, _, size):
return self.cachedFile.read(size)
def pass_through(result, fun, *args, **kwargs):
fun(*args, **kwargs)
return result
def delay(ret, t):
d = defer.Deferred()
reactor.callLater(t, d.callback, ret)
return d
| gpl-2.0 |
GTHubT/GTSever | src/cpp/GTServer/third_party/JSONParser/jsoncpp-1.8.1/devtools/licenseupdater.py | 3 | 3941 | """Updates the license text in source file.
"""
from __future__ import print_function
# An existing license is found if the file starts with the string below,
# and ends with the first blank line.
LICENSE_BEGIN = "// Copyright "
BRIEF_LICENSE = LICENSE_BEGIN + """2007-2010 The JsonCpp Authors
// Distributed under MIT license, or public domain if desired and
// recognized in your jurisdiction.
// See file LICENSE for detail or copy at http://jsoncpp.sourceforge.net/LICENSE
""".replace('\r\n','\n')
def update_license(path, dry_run, show_diff):
"""Update the license statement in the specified file.
Parameters:
path: path of the C++ source file to update.
dry_run: if True, just print the path of the file that would be updated,
but don't change it.
show_diff: if True, print the path of the file that would be modified,
as well as the change made to the file.
"""
with open(path, 'rt') as fin:
original_text = fin.read().replace('\r\n','\n')
newline = fin.newlines and fin.newlines[0] or '\n'
if not original_text.startswith(LICENSE_BEGIN):
# No existing license found => prepend it
new_text = BRIEF_LICENSE + original_text
else:
license_end_index = original_text.index('\n\n') # search first blank line
new_text = BRIEF_LICENSE + original_text[license_end_index+2:]
if original_text != new_text:
if not dry_run:
with open(path, 'wb') as fout:
fout.write(new_text.replace('\n', newline))
print('Updated', path)
if show_diff:
import difflib
print('\n'.join(difflib.unified_diff(original_text.split('\n'),
new_text.split('\n'))))
return True
return False
def update_license_in_source_directories(source_dirs, dry_run, show_diff):
"""Updates license text in C++ source files found in directory source_dirs.
Parameters:
source_dirs: list of directory to scan for C++ sources. Directories are
scanned recursively.
dry_run: if True, just print the path of the file that would be updated,
but don't change it.
show_diff: if True, print the path of the file that would be modified,
as well as the change made to the file.
"""
from devtools import antglob
prune_dirs = antglob.prune_dirs + 'scons-local* ./build* ./libs ./dist'
for source_dir in source_dirs:
cpp_sources = antglob.glob(source_dir,
includes = '''**/*.h **/*.cpp **/*.inl''',
prune_dirs = prune_dirs)
for source in cpp_sources:
update_license(source, dry_run, show_diff)
def main():
usage = """%prog DIR [DIR2...]
Updates license text in sources of the project in source files found
in the directory specified on the command-line.
Example of call:
python devtools\licenseupdater.py include src -n --diff
=> Show change that would be made to the sources.
python devtools\licenseupdater.py include src
=> Update license statement on all sources in directories include/ and src/.
"""
from optparse import OptionParser
parser = OptionParser(usage=usage)
parser.allow_interspersed_args = False
parser.add_option('-n', '--dry-run', dest="dry_run", action='store_true', default=False,
help="""Only show what files are updated, do not update the files""")
parser.add_option('--diff', dest="show_diff", action='store_true', default=False,
help="""On update, show change made to the file.""")
parser.enable_interspersed_args()
options, args = parser.parse_args()
update_license_in_source_directories(args, options.dry_run, options.show_diff)
print('Done')
if __name__ == '__main__':
import sys
import os.path
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
main()
| gpl-3.0 |
kadaradam/ScavengeSurvive | misc/coordheatmap.py | 2 | 1715 | import timeit
import os
import copy
import io
import re
import itertools as IT
from PIL import Image, ImageDraw, ImageColor, ImageFont
import heatmap
o = re.compile(r'CreateObject\(([0-9]+),\s*([\-\+]?[0-9]*\.[0-9]+),\s*([\-\+]?[0-9]*\.[0-9]+),\s*([\-\+]?[0-9]*\.[0-9]+),\s*([\-\+]?[0-9]*\.[0-9]+),\s*([\-\+]?[0-9]*\.[0-9]+),\s*([\-\+]?[0-9]*\.[0-9]+)\);')
DOT_RADIUS = 6
class Object():
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def load_obj(filename):
objs = []
with io.open(filename) as f:
for l in f:
r = o.match(l)
if r:
objs.append(Object(
float(r.group(2)), # x
float(r.group(3)), # y
float(r.group(4)))) # z
return objs
def draw_obj(im, draw, objs):
print(len(objs), "objs spawns being drawn")
x = 0.0
y = 0.0
for s in objs:
x = s.x + 3000
y = 6000 - (s.y + 3000)
draw.ellipse([x - DOT_RADIUS, y - DOT_RADIUS, x + DOT_RADIUS, y + DOT_RADIUS], outline=(255, 255, 255), fill=(0, 0, 0))
def generate_obj_heatmap(im, draw, objs):
points = []
for l in objs:
points.append([int(l.x + 3000), int(l.y + 3000)])
hm = heatmap.Heatmap(libpath="C:\\Python34\\Lib\\site-packages\\heatmap\\cHeatmap-x86.dll")
hmimg = hm.heatmap(
points,
dotsize=150,
size=(6000, 6000),
scheme='classic',
area=((0, 0), (6000, 6000)))
im.paste(hmimg, mask=hmimg)
im.save("object-heatmap.jpg")
def core():
objs = []
objs += load_obj("in")
# Initialise PIL stuff
mapimg = Image.open("gtasa-blank-1.0.jpg")
draw = ImageDraw.Draw(mapimg)
# Generate dots
draw_obj(mapimg, draw, objs)
mapimg.save("object-map.jpg")
# generate heatmap
generate_obj_heatmap(copy.copy(mapimg), draw, objs)
if __name__ == '__main__':
main()
| gpl-3.0 |
dims/neutron | neutron/plugins/ml2/drivers/mech_sriov/agent/common/exceptions.py | 4 | 1249 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron._i18n import _
from neutron.common import exceptions as n_exc
class SriovNicError(n_exc.NeutronException):
pass
class InvalidDeviceError(SriovNicError):
message = _("Invalid Device %(dev_name)s: %(reason)s")
class IpCommandError(SriovNicError):
message = _("ip command failed: %(reason)s")
class IpCommandOperationNotSupportedError(SriovNicError):
message = _("Operation not supported on device %(dev_name)s")
class InvalidPciSlotError(SriovNicError):
message = _("Invalid pci slot %(pci_slot)s")
class IpCommandDeviceError(SriovNicError):
message = _("ip command failed on device %(dev_name)s: %(reason)s")
| apache-2.0 |
lonnon/passphrase | passphrase.py | 1 | 1042 | #!/usr/bin/env python
# passphrase
#
# Generates a passphrase from random words (four by default, number may
# be specified as first argument on command line). By default, words are
# between 6 and 10 characters in length. Different minimum and maximum
# word lengths may be specified in the seconda and third command line
# arguments, respectively.
import random
import sys
import os
import glob
import re
try: length = int(sys.argv[1])
except IndexError: length = 4
try: minimum = int(sys.argv[2])
except IndexError: minimum = 6
try: maximum = int(sys.argv[3])
except IndexError: maximum = 10
if minimum > maximum:
maximum = minimum
dictionaries = {int(re.search(r'[0-9]+$', f).group()):f for f in glob.glob('dictionary_letters_*')}
words = list()
for i in range(minimum, maximum + 1):
with open(dictionaries[i], mode='r', encoding='utf-8') as dictionary:
for line in dictionary:
words.append(line.strip())
r = random.Random()
output = [r.choice(words) for n in range(length)]
print(' '.join(output))
print
| mit |
wolfskaempf/ga_statistics | lib/python2.7/site-packages/django/contrib/gis/db/backends/postgis/base.py | 102 | 1620 | from django.conf import settings
from django.db.backends.base.base import NO_DB_ALIAS
from django.db.backends.postgresql_psycopg2.base import \
DatabaseWrapper as Psycopg2DatabaseWrapper
from django.utils.functional import cached_property
from .creation import PostGISCreation
from .features import DatabaseFeatures
from .introspection import PostGISIntrospection
from .operations import PostGISOperations
from .schema import PostGISSchemaEditor
class DatabaseWrapper(Psycopg2DatabaseWrapper):
SchemaEditorClass = PostGISSchemaEditor
def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs)
if kwargs.get('alias', '') != NO_DB_ALIAS:
self.features = DatabaseFeatures(self)
self.creation = PostGISCreation(self)
self.ops = PostGISOperations(self)
self.introspection = PostGISIntrospection(self)
@cached_property
def template_postgis(self):
template_postgis = getattr(settings, 'POSTGIS_TEMPLATE', 'template_postgis')
with self._nodb_connection.cursor() as cursor:
cursor.execute('SELECT 1 FROM pg_database WHERE datname = %s LIMIT 1;', (template_postgis,))
if cursor.fetchone():
return template_postgis
return None
def prepare_database(self):
super(DatabaseWrapper, self).prepare_database()
if self.template_postgis is None:
# Check that postgis extension is installed on PostGIS >= 2
with self.cursor() as cursor:
cursor.execute("CREATE EXTENSION IF NOT EXISTS postgis")
| mit |
ZenDevelopmentSystems/scikit-learn | examples/applications/plot_tomography_l1_reconstruction.py | 204 | 5442 | """
======================================================================
Compressive sensing: tomography reconstruction with L1 prior (Lasso)
======================================================================
This example shows the reconstruction of an image from a set of parallel
projections, acquired along different angles. Such a dataset is acquired in
**computed tomography** (CT).
Without any prior information on the sample, the number of projections
required to reconstruct the image is of the order of the linear size
``l`` of the image (in pixels). For simplicity we consider here a sparse
image, where only pixels on the boundary of objects have a non-zero
value. Such data could correspond for example to a cellular material.
Note however that most images are sparse in a different basis, such as
the Haar wavelets. Only ``l/7`` projections are acquired, therefore it is
necessary to use prior information available on the sample (its
sparsity): this is an example of **compressive sensing**.
The tomography projection operation is a linear transformation. In
addition to the data-fidelity term corresponding to a linear regression,
we penalize the L1 norm of the image to account for its sparsity. The
resulting optimization problem is called the :ref:`lasso`. We use the
class :class:`sklearn.linear_model.Lasso`, that uses the coordinate descent
algorithm. Importantly, this implementation is more computationally efficient
on a sparse matrix, than the projection operator used here.
The reconstruction with L1 penalization gives a result with zero error
(all pixels are successfully labeled with 0 or 1), even if noise was
added to the projections. In comparison, an L2 penalization
(:class:`sklearn.linear_model.Ridge`) produces a large number of labeling
errors for the pixels. Important artifacts are observed on the
reconstructed image, contrary to the L1 penalization. Note in particular
the circular artifact separating the pixels in the corners, that have
contributed to fewer projections than the central disk.
"""
print(__doc__)
# Author: Emmanuelle Gouillart <emmanuelle.gouillart@nsup.org>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from scipy import ndimage
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
import matplotlib.pyplot as plt
def _weights(x, dx=1, orig=0):
x = np.ravel(x)
floor_x = np.floor((x - orig) / dx)
alpha = (x - orig - floor_x * dx) / dx
return np.hstack((floor_x, floor_x + 1)), np.hstack((1 - alpha, alpha))
def _generate_center_coordinates(l_x):
X, Y = np.mgrid[:l_x, :l_x]
center = l_x / 2.
X += 0.5 - center
Y += 0.5 - center
return X, Y
def build_projection_operator(l_x, n_dir):
""" Compute the tomography design matrix.
Parameters
----------
l_x : int
linear size of image array
n_dir : int
number of angles at which projections are acquired.
Returns
-------
p : sparse matrix of shape (n_dir l_x, l_x**2)
"""
X, Y = _generate_center_coordinates(l_x)
angles = np.linspace(0, np.pi, n_dir, endpoint=False)
data_inds, weights, camera_inds = [], [], []
data_unravel_indices = np.arange(l_x ** 2)
data_unravel_indices = np.hstack((data_unravel_indices,
data_unravel_indices))
for i, angle in enumerate(angles):
Xrot = np.cos(angle) * X - np.sin(angle) * Y
inds, w = _weights(Xrot, dx=1, orig=X.min())
mask = np.logical_and(inds >= 0, inds < l_x)
weights += list(w[mask])
camera_inds += list(inds[mask] + i * l_x)
data_inds += list(data_unravel_indices[mask])
proj_operator = sparse.coo_matrix((weights, (camera_inds, data_inds)))
return proj_operator
def generate_synthetic_data():
""" Synthetic binary data """
rs = np.random.RandomState(0)
n_pts = 36.
x, y = np.ogrid[0:l, 0:l]
mask_outer = (x - l / 2) ** 2 + (y - l / 2) ** 2 < (l / 2) ** 2
mask = np.zeros((l, l))
points = l * rs.rand(2, n_pts)
mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
mask = ndimage.gaussian_filter(mask, sigma=l / n_pts)
res = np.logical_and(mask > mask.mean(), mask_outer)
return res - ndimage.binary_erosion(res)
# Generate synthetic images, and projections
l = 128
proj_operator = build_projection_operator(l, l / 7.)
data = generate_synthetic_data()
proj = proj_operator * data.ravel()[:, np.newaxis]
proj += 0.15 * np.random.randn(*proj.shape)
# Reconstruction with L2 (Ridge) penalization
rgr_ridge = Ridge(alpha=0.2)
rgr_ridge.fit(proj_operator, proj.ravel())
rec_l2 = rgr_ridge.coef_.reshape(l, l)
# Reconstruction with L1 (Lasso) penalization
# the best value of alpha was determined using cross validation
# with LassoCV
rgr_lasso = Lasso(alpha=0.001)
rgr_lasso.fit(proj_operator, proj.ravel())
rec_l1 = rgr_lasso.coef_.reshape(l, l)
plt.figure(figsize=(8, 3.3))
plt.subplot(131)
plt.imshow(data, cmap=plt.cm.gray, interpolation='nearest')
plt.axis('off')
plt.title('original image')
plt.subplot(132)
plt.imshow(rec_l2, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L2 penalization')
plt.axis('off')
plt.subplot(133)
plt.imshow(rec_l1, cmap=plt.cm.gray, interpolation='nearest')
plt.title('L1 penalization')
plt.axis('off')
plt.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0,
right=1)
plt.show()
| bsd-3-clause |
MSusik/invenio | invenio/legacy/bibcatalog/task.py | 3 | 16234 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2013 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
BibCatalog task
Based on configured plug-ins this task will create tickets for records.
"""
import sys
import getopt
import os
import traceback
from invenio.legacy.bibsched.bibtask import \
task_init, \
task_set_option, \
task_get_option, write_message, \
task_update_progress, \
task_sleep_now_if_required
from invenio.config import \
CFG_VERSION, \
CFG_PYLIBDIR
from invenio.legacy.docextract.task import \
split_ids, \
fetch_last_updated, \
store_last_updated
from invenio.legacy.search_engine import \
get_collection_reclist, \
perform_request_search
from invenio.legacy.bibcatalog.api import BIBCATALOG_SYSTEM
from invenio.legacy.bibcatalog.utils import record_id_from_record
from invenio.legacy.bibcatalog.dblayer import \
get_all_new_records, \
get_all_modified_records
from invenio.legacy.bibedit.utils import get_bibrecord
from invenio.pluginutils import PluginContainer
class BibCatalogPluginException(Exception):
"""Raised when something is wrong with ticket plugins"""
class BibCatalogTicket(object):
"""
Represents a Ticket to create using BibCatalog API.
"""
def __init__(self, subject="", body="", queue="", ticketid=None, recid=-1):
self.subject = subject
self.queue = queue
self.body = body
self.ticketid = ticketid
self.recid = recid
def __repr__(self):
return "<BibCatalogTicket(subject=%(subject)s,queue=%(queue)s,recid=%(recid)s)>" % {
"subject": self.subject,
"queue": self.queue,
"recid": self.recid
}
def submit(self):
"""
Submits the ticket using BibCatalog API.
@raise Exception: if ticket creation is not successful.
@return bool: True if created, False if not.
"""
if not self.exists():
self.ticketid = BIBCATALOG_SYSTEM.ticket_submit(
subject=self.subject,
queue=self.queue,
text=self.body,
recordid=self.recid)
return True
return False
def exists(self):
"""
Does the ticket already exist in the RT system?
@return results: Evaluates to True if it exists, False if not.
"""
results = BIBCATALOG_SYSTEM.ticket_search(None,
recordid=self.recid,
queue=self.queue,
subject=self.subject)
return results
def task_check_options():
""" Reimplement this method for having the possibility to check options
before submitting the task, in order for example to provide default
values. It must return False if there are errors in the options.
"""
if not task_get_option('new') \
and not task_get_option('modified') \
and not task_get_option('recids') \
and not task_get_option('collections')\
and not task_get_option('reportnumbers'):
print >>sys.stderr, 'Error: No records specified, you need' \
' to specify which records to run on'
return False
ticket_plugins = {}
all_plugins, error_messages = load_ticket_plugins()
if error_messages:
# We got broken plugins. We alert only for now.
print >>sys.stderr, "\n".join(error_messages)
if task_get_option('tickets'):
# Tickets specified
for ticket in task_get_option('tickets'):
if ticket not in all_plugins.get_enabled_plugins():
print ticket
print >>sys.stderr, 'Error: plugin %s is broken or does not exist'
return False
ticket_plugins[ticket] = all_plugins[ticket]
elif task_get_option('all-tickets'):
ticket_plugins = all_plugins.get_enabled_plugins()
else:
print >>sys.stderr, 'Error: No tickets specified, you need' \
' to specify at least one ticket type to create'
return False
task_set_option('tickets', ticket_plugins)
if not BIBCATALOG_SYSTEM:
print >>sys.stderr, 'Error: no cataloging system defined'
return False
res = BIBCATALOG_SYSTEM.check_system()
if res:
print >>sys.stderr, 'Error while checking cataloging system: %s' % \
(res,)
return True
def task_parse_options(key, value, opts, args): # pylint: disable-msg=W0613
""" Must be defined for bibtask to create a task """
if args:
# There should be no standalone arguments for any bibcatalog job
# This will catch args before the job is shipped to Bibsched
raise StandardError("Error: Unrecognised argument '%s'." % args[0])
if key in ('-a', '--new'):
task_set_option('new', True)
elif key in ('-m', '--modified'):
task_set_option('modified', True)
elif key in ('-c', '--collections'):
collections = task_get_option('collections')
if not collections:
collections = set()
task_set_option('collections', collections)
for v in value.split(","):
collections.update(get_collection_reclist(v))
elif key in ('-i', '--recids'):
recids = task_get_option('recids')
if not recids:
recids = set()
task_set_option('recids', recids)
recids.update(split_ids(value))
elif key in ('--tickets',):
tickets = task_get_option('tickets')
if not tickets:
tickets = set()
task_set_option('tickets', tickets)
for item in value.split(','):
tickets.add(item.strip())
elif key in ('--all-tickets',):
task_set_option('all-tickets', True)
elif key in ('-q', '--query'):
query = task_get_option('query')
if not query:
query = set()
task_set_option('query', query)
query.add(value)
elif key in ('-r', '--reportnumbers'):
reportnumbers = task_get_option('reportnumbers')
if not reportnumbers:
reportnumbers = set()
task_set_option('reportnumbers', reportnumbers)
reportnumbers.add(value)
return True
def task_run_core():
"""
Main daemon task.
Returns True when run successfully. False otherwise.
"""
# Dictionary of "plugin_name" -> func
tickets_to_apply = task_get_option('tickets')
write_message("Ticket plugins found: %s" %
(str(tickets_to_apply),), verbose=9)
task_update_progress("Loading records")
records_concerned = get_recids_to_load()
write_message("%i record(s) found" %
(len(records_concerned),))
records_processed = 0
for record, last_date in load_records_from_id(records_concerned):
records_processed += 1
recid = record_id_from_record(record)
task_update_progress("Processing records %s/%s (%i%%)"
% (records_processed, len(records_concerned),
int(float(records_processed) / len(records_concerned) * 100)))
task_sleep_now_if_required(can_stop_too=True)
for ticket_name, plugin in tickets_to_apply.items():
if plugin:
write_message("Running template %s for %s" % (ticket_name, recid),
verbose=5)
try:
ticket = BibCatalogTicket(recid=int(recid))
if plugin['check_record'](ticket, record):
ticket = plugin['generate_ticket'](ticket, record)
write_message("Ticket to be generated: %s" % (ticket,), verbose=5)
res = ticket.submit()
if res:
write_message("Ticket #%s created for %s" %
(ticket.ticketid, recid))
else:
write_message("Ticket already exists for %s" %
(recid,))
else:
write_message("Skipping record %s", (recid,))
except Exception, e:
write_message("Error submitting ticket for record %s:" % (recid,))
write_message(traceback.format_exc())
raise e
else:
raise BibCatalogPluginException("Plugin not valid in %s" % (ticket_name,))
if last_date:
store_last_updated(recid, last_date, name="bibcatalog")
write_message("%i record(s) processed" %
(len(records_concerned),))
return True
def load_ticket_plugins():
"""
Will load all the ticket plugins found under CFG_BIBCATALOG_PLUGIN_DIR.
Returns a tuple of plugin_object, list of errors.
"""
# TODO add to configfile
CFG_BIBCATALOG_PLUGIN_DIR = os.path.join(CFG_PYLIBDIR,
"invenio",
"bibcatalog_ticket_templates",
"*.py")
# Load plugins
plugins = PluginContainer(CFG_BIBCATALOG_PLUGIN_DIR,
plugin_builder=_bibcatalog_plugin_builder)
# Remove __init__ if applicable
try:
plugins.disable_plugin("__init__")
except KeyError:
pass
error_messages = []
# Check for broken plug-ins
broken = plugins.get_broken_plugins()
if broken:
error_messages = []
for plugin, info in broken.items():
error_messages.append("Failed to load %s:\n"
" %s" % (plugin, "".join(traceback.format_exception(*info))))
return plugins, error_messages
def get_recids_to_load():
"""
Generates the final list of record IDs to load.
Returns a list of tuples like: (recid, date)
"""
recids_given = task_get_option("recids", default=[])
query_given = task_get_option("query")
reportnumbers_given = task_get_option("reportnumbers")
if query_given:
write_message("Performing given search query: %s" % (query_given,))
result = perform_request_search(p=query_given,
of='id',
rg=0,
wl=0)
recids_given.extend(result)
if reportnumbers_given:
write_message("Searching for records referring to given reportnumbers")
for reportnumber in reportnumbers_given:
result = perform_request_search(p='reportnumber:%s' % (reportnumber,),
of='id',
rg=0,
wl=0)
recids_given.extend(result)
recids_given = [(recid, None) for recid in recids_given]
last_id, last_date = fetch_last_updated(name="bibcatalog")
records_found = []
if task_get_option("new", default=False):
records_found.extend(get_all_new_records(since=last_date, last_id=last_id))
if task_get_option("modified", default=False):
records_found.extend(get_all_modified_records(since=last_date, last_id=last_id))
for recid, date in records_found:
recids_given.append((recid, date))
return recids_given
def load_records_from_id(records):
"""
Given a record tuple of record id and last updated/created date,
this function will yield a tuple with the record id replaced with
a record structure iterativly.
@param record: tuple of (recid, date-string) Ex: (1, 2012-12-12 12:12:12)
@type record: tuple
@yield: tuple of (record structure (dict), date-string)
"""
for recid, date in records:
record = get_bibrecord(int(recid))
if not record:
write_message("Error: could not load record %s" % (recid,))
continue
yield record, date
def _bibcatalog_plugin_builder(plugin_name, plugin_code): # pylint: disable-msg=W0613
"""
Custom builder for pluginutils.
@param plugin_name: the name of the plugin.
@type plugin_name: string
@param plugin_code: the code of the module as just read from
filesystem.
@type plugin_code: module
@return: the plugin
"""
final_plugin = {}
final_plugin["check_record"] = getattr(plugin_code, "check_record", None)
final_plugin["generate_ticket"] = getattr(plugin_code, "generate_ticket", None)
return final_plugin
def main():
"""Constructs the BibCatalog bibtask."""
usage = """
Non-daemon options:
-l, --list-tickets List available tickets.
Scheduled (daemon) options:
Selection of records (Required):
-a, --new Run on all newly inserted records.
-m, --modified Run on all newly modified records.
-i, --recids= Record id for extraction.
-c, --collections= Run on all records in a specific collection.
-q, --query= Specify a search query to fetch records to run on.
-r, --reportnumbers= Run on all records related with specific arXiv ids.
Selection of tickets (Required):
--tickets= Specify which tickets to run.
--all-tickets Run on all tickets
Examples:
(run a periodical daemon job on a given ticket template)
bibcatalog -a --tickets metadata_curation -s1h
(run all tickets on a set of records)
bibcatalog --recids 1,2 -i 3 --all-tickets
(run some tickets on a collection)
bibcatalog --collections "Articles" --tickets metadata_curation,reference_curation
"""
try:
opts, dummy = getopt.getopt(sys.argv[1:], "l", ["list-tickets"])
except getopt.GetoptError:
opts = []
for opt, dummy in opts:
if opt in ["-l", "--list-tickets"]:
all_plugins, error_messages = load_ticket_plugins()
if error_messages:
# We got broken plugins. We alert only for now.
print >>sys.stderr, "\n".join(error_messages)
print "Enabled tickets:"
for plugin in all_plugins.get_enabled_plugins():
print " " + plugin
print "Run `$ bibcatalog --tickets=<ticket-name>` to select a ticket template."
return
# Build and submit the task
task_init(authorization_action='runbibcatalog',
authorization_msg="BibCatalog Task Submission",
description="",
help_specific_usage=usage,
version="Invenio v%s" % CFG_VERSION,
specific_params=("hVv:i:c:q:r:am",
["help",
"version",
"verbose=",
"recids=",
"collections=",
"query=",
"reportnumbers=",
"new",
"modified",
"tickets=",
"all-tickets"]),
task_submit_elaborate_specific_parameter_fnc=task_parse_options,
task_submit_check_options_fnc=task_check_options,
task_run_fnc=task_run_core)
| gpl-2.0 |
nkrim/passwordgen | src/worddict.py | 1 | 4612 | # Copyright 2017 Noah Krim
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
from shutil import copyfileobj
from tempfile import TemporaryFile, mkstemp
from .utils import *
class WordDictionary:
class LengthSetMap:
def __init__(self):
self._words = [{''}]
def __bool__(self):
return self.maxlength() > 0
def __getitem__(self, length):
return self._words[length]
def __iter__(self):
return self._words.__iter__()
def __len__(self):
return len(self._words)
def __str__(self):
return '\n'.join(','.join(sorted(word_set)) for word_set in self._words[1:])
def add(self, word, length=-1):
if length < 0:
length = len(word)
while length >= len(self._words):
self._words.append(set())
self._words[length].add(word)
def maxlength(self):
for i in reversed(range(len(self._words))):
if len(self._words[i]) > 0:
return i
return 0
def __init__(self, words_file, wordmap=None):
self.words_file = words_file
if not wordmap:
wordmap = WordDictionary.parse(self.words_file, formatted=True)
self.wordmap = wordmap
def getWordPool(self, length_lower=None, length_upper=None):
if not length_upper:
length_upper = length_lower
if length_lower != None:
pool = {w for lenset in self.wordmap[length_lower:length_upper+1] for w in lenset}
else:
pool = set()
if not pool:
pool = {w for lenset in self.wordmap[1:] for w in lenset}
if not pool:
pool = {w for w in self.wordmap[0]}
return pool
@staticmethod
def parse(file_path, formatted=False):
wordmap = WordDictionary.LengthSetMap()
if formatted:
length = 1
with open(file_path, 'r') as f:
for line in f:
for w in line.split(','):
w = w.strip()
if w:
wordmap.add(w, length)
length += 1
else:
sub_re = re.compile(r'[\-\']')
split_re = re.compile(r'[^a-zA-Z]+')
with open(file_path, 'r') as f:
for line in f:
for w in split_re.split(sub_re.sub('', line)):
if w:
wordmap.add(w)
return wordmap
@staticmethod
def backup(words_file):
# Copy old `words.txt` to `words.txt.old`
try:
with open(words_file, 'r') as f:
with open(words_file+'.old', 'w') as old:
copyfileobj(f, old)
except IOError:
printerr('No formatted words file could be found at %r, skipping backup' % words_file)
except:
printerr('Could not backup words file from %r to %r' % (words_file, words_file+'.old'))
else:
return True
return False
@staticmethod
def revert(words_file):
# Revert `words.txt.old` to `words.txt`
_, temp_file = mkstemp()
old_file = words_file+'.old'
try:
with open(old_file, 'r') as old:
with open(temp_file, 'w') as temp:
copyfileobj(old, temp)
except IOError:
printerr('No backup file found at %r' % old_file)
except:
printerr('Could not load backup file %r' % old_file)
else:
if WordDictionary.backup(words_file):
try:
with open(temp_file, 'r') as temp:
with open(words_file, 'w') as f:
copyfileobj(temp, f)
except IOError:
printerr('No words file found at %r' % words_file)
except:
printerr('Could not revert backup to %r, attempting to restore overwritten backup' % words_file)
try:
with open(temp_file, 'r') as temp:
with open(old_file, 'w') as old:
copyfileobj(temp, old)
except:
printerr('Could not restore the overwritten backup. Backup is lost.')
else:
os.remove(temp_file)
return True
os.remove(temp_file)
return False
@staticmethod
def setWordsFile(words_file, file_path, backup=True, formatted=False):
# Read input file
try:
wordmap = WordDictionary.parse(file_path, formatted)
except FileNotFoundError:
printerr('Could not find file %r' % file_path)
return None
# Backup words file
if backup:
WordDictionary.backup(words_file)
# Write new words file
try:
with open(words_file, 'w') as f:
f.write(str(wordmap))
except Exception as e:
printerr('Could not write new words file: %s' % e)
return None
# Return wordmap
return WordDictionary(words_file, wordmap) | apache-2.0 |
wireservice/agate | agate/aggregations/mad.py | 4 | 1347 | #!/usr/bin/env python
from agate.aggregations.base import Aggregation
from agate.aggregations.has_nulls import HasNulls
from agate.aggregations.median import Median
from agate.data_types import Number
from agate.exceptions import DataTypeError
from agate.utils import median
from agate.warns import warn_null_calculation
class MAD(Aggregation):
"""
Calculate the `median absolute deviation <http://en.wikipedia.org/wiki/Median_absolute_deviation>`_
of a column.
:param column_name:
The name of a column containing :class:`.Number` data.
"""
def __init__(self, column_name):
self._column_name = column_name
self._median = Median(column_name)
def get_aggregate_data_type(self, table):
return Number()
def validate(self, table):
column = table.columns[self._column_name]
if not isinstance(column.data_type, Number):
raise DataTypeError('MAD can only be applied to columns containing Number data.')
has_nulls = HasNulls(self._column_name).run(table)
if has_nulls:
warn_null_calculation(self, column)
def run(self, table):
column = table.columns[self._column_name]
data = column.values_without_nulls_sorted()
m = self._median.run(table)
return median(tuple(abs(n - m) for n in data))
| mit |
robkinyon/python-oauth2 | oauth2/clients/smtp.py | 884 | 1680 | """
The MIT License
Copyright (c) 2007-2010 Leah Culver, Joe Stump, Mark Paschal, Vic Fryzel
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import oauth2
import smtplib
import base64
class SMTP(smtplib.SMTP):
"""SMTP wrapper for smtplib.SMTP that implements XOAUTH."""
def authenticate(self, url, consumer, token):
if consumer is not None and not isinstance(consumer, oauth2.Consumer):
raise ValueError("Invalid consumer.")
if token is not None and not isinstance(token, oauth2.Token):
raise ValueError("Invalid token.")
self.docmd('AUTH', 'XOAUTH %s' % \
base64.b64encode(oauth2.build_xoauth_string(url, consumer, token)))
| mit |
benspaulding/django | django/contrib/humanize/templatetags/humanize.py | 6 | 8164 | from __future__ import unicode_literals
import re
from datetime import date, datetime, timedelta
from django import template
from django.conf import settings
from django.template import defaultfilters
from django.utils.encoding import force_unicode
from django.utils.formats import number_format
from django.utils.translation import pgettext, ungettext, ugettext as _
from django.utils.timezone import is_aware, utc
register = template.Library()
@register.filter(is_safe=True)
def ordinal(value):
"""
Converts an integer to its ordinal as a string. 1 is '1st', 2 is '2nd',
3 is '3rd', etc. Works for any integer.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
suffixes = (_('th'), _('st'), _('nd'), _('rd'), _('th'), _('th'), _('th'), _('th'), _('th'), _('th'))
if value % 100 in (11, 12, 13): # special case
return "%d%s" % (value, suffixes[0])
return "%d%s" % (value, suffixes[value % 10])
@register.filter(is_safe=True)
def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, float):
value = int(value)
except (TypeError, ValueError):
return intcomma(value, False)
else:
return number_format(value, force_grouping=True)
orig = force_unicode(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new, use_l10n)
# A tuple of standard large number to their converters
intword_converters = (
(6, lambda number: (
ungettext('%(value).1f million', '%(value).1f million', number),
ungettext('%(value)s million', '%(value)s million', number),
)),
(9, lambda number: (
ungettext('%(value).1f billion', '%(value).1f billion', number),
ungettext('%(value)s billion', '%(value)s billion', number),
)),
(12, lambda number: (
ungettext('%(value).1f trillion', '%(value).1f trillion', number),
ungettext('%(value)s trillion', '%(value)s trillion', number),
)),
(15, lambda number: (
ungettext('%(value).1f quadrillion', '%(value).1f quadrillion', number),
ungettext('%(value)s quadrillion', '%(value)s quadrillion', number),
)),
(18, lambda number: (
ungettext('%(value).1f quintillion', '%(value).1f quintillion', number),
ungettext('%(value)s quintillion', '%(value)s quintillion', number),
)),
(21, lambda number: (
ungettext('%(value).1f sextillion', '%(value).1f sextillion', number),
ungettext('%(value)s sextillion', '%(value)s sextillion', number),
)),
(24, lambda number: (
ungettext('%(value).1f septillion', '%(value).1f septillion', number),
ungettext('%(value)s septillion', '%(value)s septillion', number),
)),
(27, lambda number: (
ungettext('%(value).1f octillion', '%(value).1f octillion', number),
ungettext('%(value)s octillion', '%(value)s octillion', number),
)),
(30, lambda number: (
ungettext('%(value).1f nonillion', '%(value).1f nonillion', number),
ungettext('%(value)s nonillion', '%(value)s nonillion', number),
)),
(33, lambda number: (
ungettext('%(value).1f decillion', '%(value).1f decillion', number),
ungettext('%(value)s decillion', '%(value)s decillion', number),
)),
(100, lambda number: (
ungettext('%(value).1f googol', '%(value).1f googol', number),
ungettext('%(value)s googol', '%(value)s googol', number),
)),
)
@register.filter(is_safe=False)
def intword(value):
"""
Converts a large integer to a friendly text representation. Works best
for numbers over 1 million. For example, 1000000 becomes '1.0 million',
1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
value = defaultfilters.floatformat(value, 1)
template = string_formatted
else:
template = float_formatted
return template % {'value': value}
for exponent, converters in intword_converters:
large_number = 10 ** exponent
if value < large_number * 1000:
new_value = value / float(large_number)
return _check_for_i18n(new_value, *converters(new_value))
return value
@register.filter(is_safe=True)
def apnumber(value):
"""
For numbers 1-9, returns the number spelled out. Otherwise, returns the
number. This follows Associated Press style.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if not 0 < value < 10:
return value
return (_('one'), _('two'), _('three'), _('four'), _('five'), _('six'), _('seven'), _('eight'), _('nine'))[value-1]
@register.filter
def naturalday(value, arg=None):
"""
For date values that are tomorrow, today or yesterday compared to
present day returns representing string. Otherwise, returns a string
formatted according to settings.DATE_FORMAT.
"""
try:
tzinfo = getattr(value, 'tzinfo', None)
value = date(value.year, value.month, value.day)
except AttributeError:
# Passed value wasn't a date object
return value
except ValueError:
# Date arguments out of range
return value
today = datetime.now(tzinfo).date()
delta = value - today
if delta.days == 0:
return _('today')
elif delta.days == 1:
return _('tomorrow')
elif delta.days == -1:
return _('yesterday')
return defaultfilters.date(value, arg)
@register.filter
def naturaltime(value):
"""
For date and time values shows how many seconds, minutes or hours ago
compared to current timestamp returns representing string.
"""
if not isinstance(value, date): # datetime is a subclass of date
return value
now = datetime.now(utc if is_aware(value) else None)
if value < now:
delta = now - value
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s ago'
) % {'delta': defaultfilters.timesince(value)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
'a second ago', '%(count)s seconds ago', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
'a minute ago', '%(count)s minutes ago', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
'an hour ago', '%(count)s hours ago', count
) % {'count': count}
else:
delta = value - now
if delta.days != 0:
return pgettext(
'naturaltime', '%(delta)s from now'
) % {'delta': defaultfilters.timeuntil(value)}
elif delta.seconds == 0:
return _('now')
elif delta.seconds < 60:
return ungettext(
'a second from now', '%(count)s seconds from now', delta.seconds
) % {'count': delta.seconds}
elif delta.seconds // 60 < 60:
count = delta.seconds // 60
return ungettext(
'a minute from now', '%(count)s minutes from now', count
) % {'count': count}
else:
count = delta.seconds // 60 // 60
return ungettext(
'an hour from now', '%(count)s hours from now', count
) % {'count': count}
| bsd-3-clause |
wavefrontHQ/python-client | wavefront_api_client/api/access_policy_api.py | 1 | 11678 | # coding: utf-8
"""
Wavefront REST API
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from wavefront_api_client.api_client import ApiClient
class AccessPolicyApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_access_policy(self, **kwargs): # noqa: E501
"""Get the access policy # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_access_policy(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ResponseContainerAccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_access_policy_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_access_policy_with_http_info(**kwargs) # noqa: E501
return data
def get_access_policy_with_http_info(self, **kwargs): # noqa: E501
"""Get the access policy # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_access_policy_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: ResponseContainerAccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_access_policy" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/accesspolicy', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAccessPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_access_policy(self, **kwargs): # noqa: E501
"""Update the access policy # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_access_policy(async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccessPolicy body: Example Body: <pre>{ \"policyRules\": [{ \"name\": \"rule name\", \"description\": \"desc\", \"action\": \"ALLOW\", \"subnet\": \"12.148.72.0/23\" }] }</pre>
:return: ResponseContainerAccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_access_policy_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.update_access_policy_with_http_info(**kwargs) # noqa: E501
return data
def update_access_policy_with_http_info(self, **kwargs): # noqa: E501
"""Update the access policy # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_access_policy_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param AccessPolicy body: Example Body: <pre>{ \"policyRules\": [{ \"name\": \"rule name\", \"description\": \"desc\", \"action\": \"ALLOW\", \"subnet\": \"12.148.72.0/23\" }] }</pre>
:return: ResponseContainerAccessPolicy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_access_policy" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/accesspolicy', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAccessPolicy', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def validate_url(self, **kwargs): # noqa: E501
"""Validate a given url and ip address # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.validate_url(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str ip:
:return: ResponseContainerAccessPolicyAction
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.validate_url_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.validate_url_with_http_info(**kwargs) # noqa: E501
return data
def validate_url_with_http_info(self, **kwargs): # noqa: E501
"""Validate a given url and ip address # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.validate_url_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str ip:
:return: ResponseContainerAccessPolicyAction
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['ip'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method validate_url" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'ip' in params:
query_params.append(('ip', params['ip'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/api/v2/accesspolicy/validate', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ResponseContainerAccessPolicyAction', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| apache-2.0 |
ustramooner/CouchPotato | library/mako/pyparser.py | 21 | 16885 | # ast.py
# Copyright (C) Mako developers
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Handles parsing of Python code.
Parsing to AST is done via _ast on Python > 2.5, otherwise the compiler
module is used.
"""
from StringIO import StringIO
from mako import exceptions, util
import operator
if util.py3k:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None', 'print'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('arg')
else:
# words that cannot be assigned to (notably
# smaller than the total keys in __builtins__)
reserved = set(['True', 'False', 'None'])
# the "id" attribute on a function node
arg_id = operator.attrgetter('id')
try:
import _ast
util.restore__ast(_ast)
import _ast_util
except ImportError:
_ast = None
from compiler import parse as compiler_parse
from compiler import visitor
def parse(code, mode='exec', **exception_kwargs):
"""Parse an expression into AST"""
try:
if _ast:
return _ast_util.parse(code, '<unknown>', mode)
else:
if isinstance(code, unicode):
code = code.encode('ascii', 'backslashreplace')
return compiler_parse(code, mode)
except Exception, e:
raise exceptions.SyntaxException("(%s) %s (%s)" % (e.__class__.__name__, str(e), repr(code[0:50])), **exception_kwargs)
if _ast:
class FindIdentifiers(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.in_assign_targets = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visit_ClassDef(self, node):
self._add_declared(node.name)
def visit_Assign(self, node):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.value)
in_a = self.in_assign_targets
self.in_assign_targets = True
for n in node.targets:
self.visit(n)
self.in_assign_targets = in_a
if util.py3k:
# ExceptHandler is in Python 2, but this
# block only works in Python 3 (and is required there)
def visit_ExceptHandler(self, node):
if node.name is not None:
self._add_declared(node.name)
if node.type is not None:
self.listener.undeclared_identifiers.add(node.type.id)
for statement in node.body:
self.visit(statement)
def visit_FunctionDef(self, node):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.args.args:
if arg_id(arg) in self.local_ident_stack:
saved[arg_id(arg)] = True
else:
self.local_ident_stack[arg_id(arg)] = True
for n in node.body:
self.visit(n)
self.in_function = inf
for arg in node.args.args:
if arg_id(arg) not in saved:
del self.local_ident_stack[arg_id(arg)]
def visit_For(self, node):
# flip around visit
self.visit(node.iter)
self.visit(node.target)
for statement in node.body:
self.visit(statement)
for statement in node.orelse:
self.visit(statement)
def visit_Name(self, node):
if isinstance(node.ctx, _ast.Store):
self._add_declared(node.id)
if node.id not in reserved and \
node.id not in self.listener.declared_identifiers and \
node.id not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.id)
def visit_Import(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
self._add_declared(name.name.split('.')[0])
def visit_ImportFrom(self, node):
for name in node.names:
if name.asname is not None:
self._add_declared(name.asname)
else:
if name.name == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **self.exception_kwargs)
self._add_declared(name.name)
class FindTuple(_ast_util.NodeVisitor):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visit_Tuple(self, node):
for n in node.elts:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
class ParseFunc(_ast_util.NodeVisitor):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visit_FunctionDef(self, node):
self.listener.funcname = node.name
argnames = [arg_id(arg) for arg in node.args.args]
if node.args.vararg:
argnames.append(node.args.vararg)
if node.args.kwarg:
argnames.append(node.args.kwarg)
self.listener.argnames = argnames
self.listener.defaults = node.args.defaults # ast
self.listener.varargs = node.args.vararg
self.listener.kwargs = node.args.kwarg
class ExpressionGenerator(object):
def __init__(self, astnode):
self.generator = _ast_util.SourceGenerator(' ' * 4)
self.generator.visit(astnode)
def value(self):
return ''.join(self.generator.result)
else:
class FindIdentifiers(object):
def __init__(self, listener, **exception_kwargs):
self.in_function = False
self.local_ident_stack = {}
self.listener = listener
self.exception_kwargs = exception_kwargs
def _add_declared(self, name):
if not self.in_function:
self.listener.declared_identifiers.add(name)
def visitClass(self, node, *args):
self._add_declared(node.name)
def visitAssName(self, node, *args):
self._add_declared(node.name)
def visitAssign(self, node, *args):
# flip around the visiting of Assign so the expression gets evaluated first,
# in the case of a clause like "x=x+5" (x is undeclared)
self.visit(node.expr, *args)
for n in node.nodes:
self.visit(n, *args)
def visitFunction(self,node, *args):
self._add_declared(node.name)
# push function state onto stack. dont log any
# more identifiers as "declared" until outside of the function,
# but keep logging identifiers as "undeclared".
# track argument names in each function header so they arent counted as "undeclared"
saved = {}
inf = self.in_function
self.in_function = True
for arg in node.argnames:
if arg in self.local_ident_stack:
saved[arg] = True
else:
self.local_ident_stack[arg] = True
for n in node.getChildNodes():
self.visit(n, *args)
self.in_function = inf
for arg in node.argnames:
if arg not in saved:
del self.local_ident_stack[arg]
def visitFor(self, node, *args):
# flip around visit
self.visit(node.list, *args)
self.visit(node.assign, *args)
self.visit(node.body, *args)
def visitName(self, node, *args):
if node.name not in reserved and node.name not in self.listener.declared_identifiers and node.name not in self.local_ident_stack:
self.listener.undeclared_identifiers.add(node.name)
def visitImport(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
self._add_declared(mod.split('.')[0])
def visitFrom(self, node, *args):
for (mod, alias) in node.names:
if alias is not None:
self._add_declared(alias)
else:
if mod == '*':
raise exceptions.CompileException("'import *' is not supported, since all identifier names must be explicitly declared. Please use the form 'from <modulename> import <name1>, <name2>, ...' instead.", **self.exception_kwargs)
self._add_declared(mod)
def visit(self, expr):
visitor.walk(expr, self) #, walker=walker())
class FindTuple(object):
def __init__(self, listener, code_factory, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
self.code_factory = code_factory
def visitTuple(self, node, *args):
for n in node.nodes:
p = self.code_factory(n, **self.exception_kwargs)
self.listener.codeargs.append(p)
self.listener.args.append(ExpressionGenerator(n).value())
self.listener.declared_identifiers = self.listener.declared_identifiers.union(p.declared_identifiers)
self.listener.undeclared_identifiers = self.listener.undeclared_identifiers.union(p.undeclared_identifiers)
def visit(self, expr):
visitor.walk(expr, self) #, walker=walker())
class ParseFunc(object):
def __init__(self, listener, **exception_kwargs):
self.listener = listener
self.exception_kwargs = exception_kwargs
def visitFunction(self, node, *args):
self.listener.funcname = node.name
self.listener.argnames = node.argnames
self.listener.defaults = node.defaults
self.listener.varargs = node.varargs
self.listener.kwargs = node.kwargs
def visit(self, expr):
visitor.walk(expr, self)
class ExpressionGenerator(object):
"""given an AST node, generates an equivalent literal Python expression."""
def __init__(self, astnode):
self.buf = StringIO()
visitor.walk(astnode, self) #, walker=walker())
def value(self):
return self.buf.getvalue()
def operator(self, op, node, *args):
self.buf.write("(")
self.visit(node.left, *args)
self.buf.write(" %s " % op)
self.visit(node.right, *args)
self.buf.write(")")
def booleanop(self, op, node, *args):
self.visit(node.nodes[0])
for n in node.nodes[1:]:
self.buf.write(" " + op + " ")
self.visit(n, *args)
def visitConst(self, node, *args):
self.buf.write(repr(node.value))
def visitAssName(self, node, *args):
# TODO: figure out OP_ASSIGN, other OP_s
self.buf.write(node.name)
def visitName(self, node, *args):
self.buf.write(node.name)
def visitMul(self, node, *args):
self.operator("*", node, *args)
def visitAnd(self, node, *args):
self.booleanop("and", node, *args)
def visitOr(self, node, *args):
self.booleanop("or", node, *args)
def visitBitand(self, node, *args):
self.booleanop("&", node, *args)
def visitBitor(self, node, *args):
self.booleanop("|", node, *args)
def visitBitxor(self, node, *args):
self.booleanop("^", node, *args)
def visitAdd(self, node, *args):
self.operator("+", node, *args)
def visitGetattr(self, node, *args):
self.visit(node.expr, *args)
self.buf.write(".%s" % node.attrname)
def visitSub(self, node, *args):
self.operator("-", node, *args)
def visitNot(self, node, *args):
self.buf.write("not ")
self.visit(node.expr)
def visitDiv(self, node, *args):
self.operator("/", node, *args)
def visitFloorDiv(self, node, *args):
self.operator("//", node, *args)
def visitSubscript(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
[self.visit(x) for x in node.subs]
self.buf.write("]")
def visitUnarySub(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitUnaryAdd(self, node, *args):
self.buf.write("-")
self.visit(node.expr)
def visitSlice(self, node, *args):
self.visit(node.expr)
self.buf.write("[")
if node.lower is not None:
self.visit(node.lower)
self.buf.write(":")
if node.upper is not None:
self.visit(node.upper)
self.buf.write("]")
def visitDict(self, node):
self.buf.write("{")
c = node.getChildren()
for i in range(0, len(c), 2):
self.visit(c[i])
self.buf.write(": ")
self.visit(c[i+1])
if i<len(c) -2:
self.buf.write(", ")
self.buf.write("}")
def visitTuple(self, node):
self.buf.write("(")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write(")")
def visitList(self, node):
self.buf.write("[")
c = node.getChildren()
for i in range(0, len(c)):
self.visit(c[i])
if i<len(c) - 1:
self.buf.write(", ")
self.buf.write("]")
def visitListComp(self, node):
self.buf.write("[")
self.visit(node.expr)
self.buf.write(" ")
for n in node.quals:
self.visit(n)
self.buf.write("]")
def visitListCompFor(self, node):
self.buf.write(" for ")
self.visit(node.assign)
self.buf.write(" in ")
self.visit(node.list)
for n in node.ifs:
self.visit(n)
def visitListCompIf(self, node):
self.buf.write(" if ")
self.visit(node.test)
def visitCompare(self, node):
self.visit(node.expr)
for tup in node.ops:
self.buf.write(tup[0])
self.visit(tup[1])
def visitCallFunc(self, node, *args):
self.visit(node.node)
self.buf.write("(")
if len(node.args):
self.visit(node.args[0])
for a in node.args[1:]:
self.buf.write(", ")
self.visit(a)
self.buf.write(")")
class walker(visitor.ASTVisitor):
def dispatch(self, node, *args):
print "Node:", str(node)
#print "dir:", dir(node)
return visitor.ASTVisitor.dispatch(self, node, *args)
| gpl-3.0 |
bac/horizon | openstack_dashboard/enabled/_1070_project_ng_details_panel.py | 7 | 1183 | # (c) Copyright 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# The slug of the dashboard the PANEL associated with. Required.
PANEL_DASHBOARD = 'project'
# The slug of the panel group the PANEL is associated with.
# If you want the panel to show up without a panel group,
# use the panel group "default".
PANEL_GROUP = 'compute'
# The slug of the panel to be added to HORIZON_CONFIG. Required.
PANEL = 'ngdetails'
# If set to True, this settings file will not be added to the settings.
DISABLED = False
# Python panel class of the PANEL to be added.
ADD_PANEL = 'openstack_dashboard.dashboards.project.ngdetails.panel.NGDetails'
| apache-2.0 |
777jon/g3_kernel | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
Lujeni/ansible | lib/ansible/modules/network/files/net_get.py | 47 | 2165 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: net_get
version_added: "2.6"
author: "Deepak Agrawal (@dagrawal)"
short_description: Copy a file from a network device to Ansible Controller
description:
- This module provides functionality to copy file from network device to
ansible controller.
extends_documentation_fragment: network_agnostic
options:
src:
description:
- Specifies the source file. The path to the source file can either be
the full path on the network device or a relative path as per path
supported by destination network device.
required: true
protocol:
description:
- Protocol used to transfer file.
default: scp
choices: ['scp', 'sftp']
dest:
description:
- Specifies the destination file. The path to the destination file can
either be the full path on the Ansible control host or a relative
path from the playbook or role root directory.
default:
- Same filename as specified in I(src). The path will be playbook root
or role root directory if playbook is part of a role.
requirements:
- "scp"
notes:
- Some devices need specific configurations to be enabled before scp can work
These configuration should be pre-configured before using this module
e.g ios - C(ip scp server enable).
- User privilege to do scp on network device should be pre-configured
e.g. ios - need user privilege 15 by default for allowing scp.
- Default destination of source file.
"""
EXAMPLES = """
- name: copy file from the network device to Ansible controller
net_get:
src: running_cfg_ios1.txt
- name: copy file from ios to common location at /tmp
net_get:
src: running_cfg_sw1.txt
dest : /tmp/ios1.txt
"""
RETURN = """
"""
| gpl-3.0 |
RaoUmer/django | tests/modeltests/get_or_create/tests.py | 68 | 2499 | from __future__ import absolute_import
from datetime import date
import traceback
from django.db import IntegrityError
from django.test import TestCase
from .models import Person, ManualPrimaryKeyTest
class GetOrCreateTests(TestCase):
def test_get_or_create(self):
p = Person.objects.create(
first_name='John', last_name='Lennon', birthday=date(1940, 10, 9)
)
p, created = Person.objects.get_or_create(
first_name="John", last_name="Lennon", defaults={
"birthday": date(1940, 10, 9)
}
)
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 1)
p, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)
self.assertTrue(created)
self.assertEqual(Person.objects.count(), 2)
# If we execute the exact same statement, it won't create a Person.
p, created = Person.objects.get_or_create(
first_name='George', last_name='Harrison', defaults={
'birthday': date(1943, 2, 25)
}
)
self.assertFalse(created)
self.assertEqual(Person.objects.count(), 2)
# If you don't specify a value or default value for all required
# fields, you will get an error.
self.assertRaises(IntegrityError,
Person.objects.get_or_create, first_name="Tom", last_name="Smith"
)
# If you specify an existing primary key, but different other fields,
# then you will get an error and data will not be updated.
m = ManualPrimaryKeyTest.objects.create(id=1, data="Original")
self.assertRaises(IntegrityError,
ManualPrimaryKeyTest.objects.get_or_create, id=1, data="Different"
)
self.assertEqual(ManualPrimaryKeyTest.objects.get(id=1).data, "Original")
# get_or_create should raise IntegrityErrors with the full traceback.
# This is tested by checking that a known method call is in the traceback.
# We cannot use assertRaises/assertRaises here because we need to inspect
# the actual traceback. Refs #16340.
try:
ManualPrimaryKeyTest.objects.get_or_create(id=1, data="Different")
except IntegrityError as e:
formatted_traceback = traceback.format_exc()
self.assertIn('obj.save', formatted_traceback)
| bsd-3-clause |
back-to/streamlink | src/streamlink/plugins/crunchyroll.py | 1 | 13696 | import argparse
import datetime
import re
import logging
from uuid import uuid4
from streamlink.plugin import Plugin, PluginError, PluginArguments, PluginArgument
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
log = logging.getLogger(__name__)
STREAM_WEIGHTS = {
"low": 240,
"mid": 420,
"high": 720,
"ultra": 1080,
}
STREAM_NAMES = {
"120k": "low",
"328k": "mid",
"864k": "high"
}
def parse_timestamp(ts):
"""Takes ISO 8601 format(string) and converts into a utc datetime(naive)"""
return (
datetime.datetime.strptime(ts[:-7], "%Y-%m-%dT%H:%M:%S") +
datetime.timedelta(hours=int(ts[-5:-3]), minutes=int(ts[-2:])) *
int(ts[-6:-5] + "1")
)
_url_re = re.compile(r"""
http(s)?://(\w+\.)?crunchyroll\.
(?:
com|de|es|fr|co.jp
)
(?:/[^/&?]+)?
/[^/&?]+-(?P<media_id>\d+)
""", re.VERBOSE)
_api_schema = validate.Schema({
"error": bool,
validate.optional("code"): validate.text,
validate.optional("message"): validate.text,
validate.optional("data"): object,
})
_media_schema = validate.Schema(
{
"stream_data": validate.any(
None,
{
"streams": validate.all(
[{
"quality": validate.any(validate.text, None),
"url": validate.url(
scheme="http",
path=validate.endswith(".m3u8")
),
validate.optional("video_encode_id"): validate.text
}]
)
}
)
},
validate.get("stream_data")
)
_login_schema = validate.Schema({
"auth": validate.text,
"expires": validate.all(
validate.text,
validate.transform(parse_timestamp)
),
"user": {
"username": validate.any(validate.text, None),
"email": validate.text
}
})
_session_schema = validate.Schema(
{
"session_id": validate.text
},
validate.get("session_id")
)
class CrunchyrollAPIError(Exception):
"""Exception thrown by the Crunchyroll API when an error occurs"""
def __init__(self, msg, code):
Exception.__init__(self, msg)
self.msg = msg
self.code = code
class CrunchyrollAPI(object):
_api_url = "https://api.crunchyroll.com/{0}.0.json"
_default_locale = "en_US"
_user_agent = "Dalvik/1.6.0 (Linux; U; Android 4.4.2; Android SDK built for x86 Build/KK)"
_version_code = 444
_version_name = "2.1.10"
_access_token = "Scwg9PRRZ19iVwD"
_access_type = "com.crunchyroll.crunchyroid"
def __init__(self, cache, session, session_id=None, locale=_default_locale):
"""Abstract the API to access to Crunchyroll data.
Can take saved credentials to use on it's calls to the API.
"""
self.cache = cache
self.session = session
self.session_id = session_id
if self.session_id: # if the session ID is setup don't use the cached auth token
self.auth = None
else:
self.auth = cache.get("auth")
self.device_id = cache.get("device_id") or self.generate_device_id()
self.locale = locale
self.headers = {
"X-Android-Device-Is-GoogleTV": "0",
"X-Android-Device-Product": "google_sdk_x86",
"X-Android-Device-Model": "Android SDK built for x86",
"Using-Brightcove-Player": "1",
"X-Android-Release": "4.4.2",
"X-Android-SDK": "19",
"X-Android-Application-Version-Name": self._version_name,
"X-Android-Application-Version-Code": str(self._version_code),
'User-Agent': self._user_agent
}
def _api_call(self, entrypoint, params=None, schema=None):
"""Makes a call against the api.
:param entrypoint: API method to call.
:param params: parameters to include in the request data.
:param schema: schema to use to validate the data
"""
url = self._api_url.format(entrypoint)
# Default params
params = params or {}
if self.session_id:
params.update({
"session_id": self.session_id
})
else:
params.update({
"device_id": self.device_id,
"device_type": self._access_type,
"access_token": self._access_token,
"version": self._version_code
})
params.update({
"locale": self.locale.replace('_', ''),
})
if self.session_id:
params["session_id"] = self.session_id
# The certificate used by Crunchyroll cannot be verified in some environments.
res = self.session.http.post(url, data=params, headers=self.headers, verify=False)
json_res = self.session.http.json(res, schema=_api_schema)
if json_res["error"]:
err_msg = json_res.get("message", "Unknown error")
err_code = json_res.get("code", "unknown_error")
raise CrunchyrollAPIError(err_msg, err_code)
data = json_res.get("data")
if schema:
data = schema.validate(data, name="API response")
return data
def generate_device_id(self):
device_id = str(uuid4())
# cache the device id
self.cache.set("device_id", 365 * 24 * 60 * 60)
log.debug("Device ID: {0}".format(device_id))
return device_id
def start_session(self):
"""
Starts a session against Crunchyroll's server.
Is recommended that you call this method before making any other calls
to make sure you have a valid session against the server.
"""
params = {}
if self.auth:
params["auth"] = self.auth
self.session_id = self._api_call("start_session", params, schema=_session_schema)
log.debug("Session created with ID: {0}".format(self.session_id))
return self.session_id
def login(self, username, password):
"""
Authenticates the session to be able to access restricted data from
the server (e.g. premium restricted videos).
"""
params = {
"account": username,
"password": password
}
login = self._api_call("login", params, schema=_login_schema)
self.auth = login["auth"]
self.cache.set("auth", login["auth"], expires_at=login["expires"])
return login
def authenticate(self):
data = self._api_call("authenticate", {"auth": self.auth}, schema=_login_schema)
self.auth = data["auth"]
self.cache.set("auth", data["auth"], expires_at=data["expires"])
return data
def get_info(self, media_id, fields=None, schema=None):
"""
Returns the data for a certain media item.
:param media_id: id that identifies the media item to be accessed.
:param fields: list of the media"s field to be returned. By default the
API returns some fields, but others are not returned unless they are
explicity asked for. I have no real documentation on the fields, but
they all seem to start with the "media." prefix (e.g. media.name,
media.stream_data).
:param schema: validation schema to use
"""
params = {"media_id": media_id}
if fields:
params["fields"] = ",".join(fields)
return self._api_call("info", params, schema=schema)
class Crunchyroll(Plugin):
arguments = PluginArguments(
PluginArgument(
"username",
metavar="USERNAME",
requires=["password"],
help="A Crunchyroll username to allow access to restricted streams."
),
PluginArgument(
"password",
sensitive=True,
metavar="PASSWORD",
nargs="?",
const=None,
default=None,
help="""
A Crunchyroll password for use with --crunchyroll-username.
If left blank you will be prompted.
"""
),
PluginArgument(
"purge-credentials",
action="store_true",
help="""
Purge cached Crunchyroll credentials to initiate a new session
and reauthenticate.
"""
),
PluginArgument(
"session-id",
sensitive=True,
metavar="SESSION_ID",
help="""
Set a specific session ID for crunchyroll, can be used to bypass
region restrictions. If using an authenticated session ID, it is
recommended that the authentication parameters be omitted as the
session ID is account specific.
Note: The session ID will be overwritten if authentication is used
and the session ID does not match the account.
"""
),
# Deprecated, uses the general locale setting
PluginArgument(
"locale",
metavar="LOCALE",
help=argparse.SUPPRESS
)
)
@classmethod
def can_handle_url(self, url):
return _url_re.match(url)
@classmethod
def stream_weight(cls, key):
weight = STREAM_WEIGHTS.get(key)
if weight:
return weight, "crunchyroll"
return Plugin.stream_weight(key)
def _get_streams(self):
api = self._create_api()
match = _url_re.match(self.url)
media_id = int(match.group("media_id"))
try:
# the media.stream_data field is required, no stream data is returned otherwise
info = api.get_info(media_id, fields=["media.stream_data"], schema=_media_schema)
except CrunchyrollAPIError as err:
raise PluginError(u"Media lookup error: {0}".format(err.msg))
if not info:
return
streams = {}
# The adaptive quality stream sometimes a subset of all the other streams listed, ultra is no included
has_adaptive = any([s[u"quality"] == u"adaptive" for s in info[u"streams"]])
if has_adaptive:
self.logger.debug(u"Loading streams from adaptive playlist")
for stream in filter(lambda x: x[u"quality"] == u"adaptive", info[u"streams"]):
for q, s in HLSStream.parse_variant_playlist(self.session, stream[u"url"]).items():
# rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams
name = STREAM_NAMES.get(q, q)
streams[name] = s
# If there is no adaptive quality stream then parse each individual result
for stream in info[u"streams"]:
if stream[u"quality"] != u"adaptive":
# the video_encode_id indicates that the stream is not a variant playlist
if u"video_encode_id" in stream:
streams[stream[u"quality"]] = HLSStream(self.session, stream[u"url"])
else:
# otherwise the stream url is actually a list of stream qualities
for q, s in HLSStream.parse_variant_playlist(self.session, stream[u"url"]).items():
# rename the bitrates to low, mid, or high. ultra doesn't seem to appear in the adaptive streams
name = STREAM_NAMES.get(q, q)
streams[name] = s
return streams
def _create_api(self):
"""Creates a new CrunchyrollAPI object, initiates it's session and
tries to authenticate it either by using saved credentials or the
user's username and password.
"""
if self.options.get("purge_credentials"):
self.cache.set("session_id", None, 0)
self.cache.set("auth", None, 0)
self.cache.set("session_id", None, 0)
# use the crunchyroll locale as an override, for backwards compatibility
locale = self.get_option("locale") or self.session.localization.language_code
api = CrunchyrollAPI(self.cache,
self.session,
session_id=self.get_option("session_id"),
locale=locale)
if not self.get_option("session_id"):
self.logger.debug("Creating session with locale: {0}", locale)
api.start_session()
if api.auth:
self.logger.debug("Using saved credentials")
login = api.authenticate()
self.logger.info("Successfully logged in as '{0}'",
login["user"]["username"] or login["user"]["email"])
elif self.options.get("username"):
try:
self.logger.debug("Attempting to login using username and password")
api.login(self.options.get("username"),
self.options.get("password"))
login = api.authenticate()
self.logger.info("Logged in as '{0}'",
login["user"]["username"] or login["user"]["email"])
except CrunchyrollAPIError as err:
raise PluginError(u"Authentication error: {0}".format(err.msg))
else:
self.logger.warning(
"No authentication provided, you won't be able to access "
"premium restricted content"
)
return api
__plugin__ = Crunchyroll
| bsd-2-clause |
chjost/clebsch_gordan | group/quat.py | 1 | 6929 | """Class for the quaternions with inversion."""
import numpy as np
import utils
# quaternion parameters for the group O from Table 71.1 in:
# Simon L. Altmann, Peter Herzig, "Point-Group Theory Tables",
# Second Edition (corrected), Wien (2011)
V12 = np.sqrt(0.5) # sqrt(1/2)
# [[ lambda, Lambda_1, Lambda_2, Lambda_3 ]]
qPar = np.asarray(
[[ 1.0, 0.0, 0.0, 0.0 ],
[ 0.0, 1.0, 0.0, 0.0 ],
[ 0.0, 0.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.0, 1.0 ],
[ 0.5, 0.5, 0.5, 0.5 ],
[ 0.5,-0.5,-0.5, 0.5 ],
[ 0.5, 0.5,-0.5,-0.5 ],
[ 0.5,-0.5, 0.5,-0.5 ],
[ 0.5,-0.5,-0.5,-0.5 ],
[ 0.5, 0.5, 0.5,-0.5 ],
[ 0.5,-0.5, 0.5, 0.5 ],
[ 0.5, 0.5,-0.5, 0.5 ],
[ V12, V12, 0.0, 0.0 ],
[ V12, 0.0, V12, 0.0 ],
[ V12, 0.0, 0.0, V12 ],
[ V12,-V12, 0.0, 0.0 ],
[ V12, 0.0,-V12, 0.0 ],
[ V12, 0.0, 0.0,-V12 ],
[ 0.0, V12, V12, 0.0 ],
[ 0.0,-V12, V12, 0.0 ],
[ 0.0, V12, 0.0, V12 ],
[ 0.0, 0.0,-V12,-V12 ],
[ 0.0, V12, 0.0,-V12 ],
[ 0.0, 0.0,-V12, V12 ]])
class QNew(object):
def __init__(self):
self.q = np.zeros((4,))
self.i = int(1)
self.prec = 1e-6
@classmethod
def create_from_vector(cls, vector, inversion):
tmp = cls()
_vec = np.asarray(vector)
tmp.q = _vec.copy()
_inv = int(inversion)
tmp.i = _inv
return tmp
def __eq__(self, other):
if not isinstance(other, QNew):
return False
if np.allclose(self.q, other.q) and self.i == other.i:
return True
return False
def __ne__(self, other):
if not isinstance(other, QNew):
return True
if not np.allclose(self.q, other.q) or self.i != other.i:
return True
return False
def __abs__(self):
return np.sqrt(np.dot(self.q, self.q))
def __neg__(self):
self.q = -self.q
self.i = -self.i
return self
def __mul__(self, other):
q1 = self.q
q2 = other.q
tvec = np.zeros_like(q1)
tvec[0] = q1[0]*q2[0] - q1[1]*q2[1] - q1[2]*q2[2] - q1[3]*q2[3]
tvec[1] = q1[0]*q2[1] + q1[1]*q2[0] + q1[2]*q2[3] - q1[3]*q2[2]
tvec[2] = q1[0]*q2[2] - q1[1]*q2[3] + q1[2]*q2[0] + q1[3]*q2[1]
tvec[3] = q1[0]*q2[3] + q1[1]*q2[2] - q1[2]*q2[1] + q1[3]*q2[0]
tinv = self.i * other.i
return QNew.create_from_vector(tvec, tinv)
def conj(self):
tvec = self.q * np.asarray([1., -1., -1., -1.])
return QNew.create_from_vector(tvec, self.i)
def norm(self):
return np.dot(self.q, self.q)
def __str__(self):
return "[%r, %r, %r, %r] with inversion %d" % (self.q[0], self.q[1], self.q[2], self.q[3], self.i)
def comp(self, vec):
return np.allclose(self.q, vec)
# code inspired by the quaternion package of moble
# https://github.com/moble/quaternion
def rotation_matrix(self, inv=False):
n = self.norm()
if np.abs(n) < self.prec:
raise ZeroDivisionError("Norm of quaternion is zero.")
_q = self.q
if np.abs(1-n) < self.prec:
res = np.array(
[[1-2*(_q[2]**2 + _q[3]**2), 2*(_q[1]*_q[2] - _q[3]*_q[0]),
2*(_q[1]*_q[3] + _q[2]*_q[0])],
[2*(_q[1]*_q[2] + _q[3]*_q[0]), 1-2*(_q[1]**2 + _q[3]**2),
2*(_q[2]*_q[3] - _q[1]*_q[0])],
[2*(_q[1]*_q[3] - _q[2]*_q[0]), 2*(_q[2]*_q[3] + _q[1]*_q[0]),
1-2*(_q[1]**2 + _q[2]**2)]])
else:
res = np.array(
[[1-2*(_q[2]**2 + _q[3]**2)/n, 2*(_q[1]*_q[2] - _q[3]*_q[0])/n,
2*(_q[1]*_q[3] + _q[2]*_q[0])/n],
[2*(_q[1]*_q[2] + _q[3]*_q[0])/n, 1-2*(_q[1]**2 + _q[3]**2)/n,
2*(_q[2]*_q[3] - _q[1]*_q[0])/n],
[2*(_q[1]*_q[3] - _q[2]*_q[0])/n, 2*(_q[2]*_q[3] + _q[1]*_q[0])/n,
1-2*(_q[1]**2 + _q[2]**2)/n]])
if inv:
res *= self.i
return res
def base(self, inv=False):
pass
def R(self, j, mp, m):
"""compute transformation matrix element
j j 0 1 2 3
R (Q) = R (Q , Q , Q , Q )
m'm m'm
-j
in __
j \ j j
[R.u] = /__ u R (Q) ,
m m'=j m' m'm
according to the formula:
__ ___________________________
j \ /(j-m')(j+m' )(j-m )(j+m) j+m-k j-m'-k m'-m+k k
R = /__ \/ ( k )(m'-m+k)(m'-m+k)( k ) (a) (a*) (b) (-b*)
m'm k
0 3 2 1
where a := Q - i.Q ; b := -Q -i.Q .
first three arguments to be provided as multiplicities:
[J] = 2j+1, [M] = 2m+1, [MP] = 2m'+1, these are always integer
[-3/2] --> -2; [-1] --> -1; [-1/2] --> 0; [0] --> 1; [1/2] --> 2, etc.
"""
a = complex( self.q[0], -self.q[3] )
ac = complex( self.q[0], self.q[3] ) # complex conjugate of a
b = complex(-self.q[2], -self.q[1] )
mbc = complex( self.q[2], -self.q[1] ) # - complex conjugate of b
res = complex( 0.0 )
j_p_mp = ( j + mp - 2 ) // 2 # j+m'
j_m_mp = ( j - mp ) // 2 # j-m'
j_p_m = ( j + m - 2 ) // 2 # j+m
j_m_m = ( j - m ) // 2 # j-m
if j_p_mp < 0 or j_m_mp < 0 or j_p_m < 0 or j_m_m < 0:
return res
# prepare constant arrays
mp_m_m = j_p_mp - j_p_m
n = np.asarray([j_m_mp, j_p_mp, j_m_m, j_p_m])
kp = np.asarray([0, mp_m_m, mp_m_m, 0])
_a = np.asarray([a, ac, b, mbc])
aexp = np.asarray([j_p_m, j_m_mp, mp_m_m, 0])
# get range for loop
k_mx = int(j_p_m if (j_p_m < j_m_mp) else j_m_mp)
k_mn = int(-j_p_mp+j_p_m if (-j_p_mp+j_p_m > 0) else 0)
for k in range(k_mn, k_mx+1):
_k = kp + k
factor = np.sqrt(np.prod(utils.binomial(n, _k))*complex(1.))
_aexp = aexp + np.asarray([-k, -k, k, k])
prod = np.prod(np.power(_a, _aexp))
res += factor * prod
return res
def R_matrix(self, j):
multi = int(2*j+1)
res = np.zeros((multi, multi), dtype=complex)
# the sorting is important, start at largest m
# and decrease
for im in range(multi):
m = j - im
for imp in range(multi):
mp = j - imp
res[im, imp] = self.R(multi, int(2*m+1), int(2*mp+1))
if j%2:
res *= self.i
return res
def omega(self):
return 2*np.arccos(self.q[0])
if __name__ == "__main__":
print("for checks execute the test script")
| gpl-3.0 |
mozilla/captain | vendor/lib/python/django_browserid/tests/test_auth.py | 10 | 7619 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from django.conf import settings
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.test import TestCase
from mock import ANY, Mock, patch
from django_browserid.auth import BrowserIDBackend, default_username_algo, verify
from django_browserid.tests import mock_browserid
try:
from django.contrib.auth import get_user_model
from django_browserid.tests.models import CustomUser
except ImportError:
get_user_model = False
def new_user(email, username=None):
"""Creates a user with the specified email for testing."""
if username is None:
username = default_username_algo(email)
return User.objects.create_user(username, email)
class BrowserIDBackendTests(TestCase):
def auth(self, verified_email=None, browserid_extra=None):
"""
Attempt to authenticate a user with BrowserIDBackend.
If verified_email is None, verification will fail, otherwise it will
pass and return the specified email.
"""
with mock_browserid(verified_email):
backend = BrowserIDBackend()
return backend.authenticate(assertion='asdf', audience='asdf', browserid_extra=browserid_extra)
def test_failed_verification(self):
# If verification fails, return None.
self.assertTrue(self.auth(None) is None)
def test_duplicate_emails(self):
# If there are two users with the same email address, return None.
new_user('a@example.com', 'test1')
new_user('a@example.com', 'test2')
self.assertTrue(self.auth('a@example.com') is None)
def test_auth_success(self):
# If a single user is found with the verified email, return an instance
# of their user object.
user = new_user('a@example.com')
self.assertEqual(self.auth('a@example.com'), user)
@patch.object(settings, 'BROWSERID_CREATE_USER', False)
def test_no_create_user(self):
# If user creation is disabled and no user is found, return None.
self.assertTrue(self.auth('a@example.com') is None)
@patch.object(settings, 'BROWSERID_CREATE_USER', True)
def test_create_user(self):
# If user creation is enabled and no user is found, return a new
# User.
user = self.auth('a@example.com')
self.assertTrue(user is not None)
self.assertTrue(isinstance(user, User))
self.assertEqual(user.email, 'a@example.com')
@patch.object(settings, 'BROWSERID_CREATE_USER',
'django_browserid.tests.test_auth.new_user')
@patch('django_browserid.tests.test_auth.new_user')
def test_custom_create_user(self, create_user):
# If user creation is enabled with a custom create function and no user
# is found, return the new user created with the custom function.
create_user.return_value = 'test'
self.assertEqual(self.auth('a@example.com'), 'test')
create_user.assert_called_with('a@example.com')
@patch.object(settings, 'BROWSERID_USERNAME_ALGO')
@patch.object(settings, 'BROWSERID_CREATE_USER', True)
def test_custom_username_algorithm(self, username_algo):
# If a custom username algorithm is specified, use it!
username_algo.return_value = 'test'
user = self.auth('a@b.com')
self.assertEqual(user.username, 'test')
@patch('django_browserid.auth.user_created')
@patch.object(settings, 'BROWSERID_CREATE_USER', True)
def test_user_created_signal(self, user_created):
# Test that the user_created signal is called when a new user is
# created.
user = self.auth('a@b.com')
user_created.send.assert_called_with(ANY, user=user)
@patch('django_browserid.auth.verify', wraps=verify)
def test_verify_called_with_browserid_extra(self, user_verify):
dic = {'a': 'AlphaA'}
self.auth('a@b.com', browserid_extra=dic)
user_verify.assert_called_with('asdf', 'asdf', extra_params=dic)
def test_get_user(self):
# If a user is retrieved by the BrowserIDBackend, it should have
# 'django_browserid.auth.BrowserIDBackend' for the backend attribute.
user = new_user('a@example.com')
backend = BrowserIDBackend()
self.assertEqual(backend.get_user(user.id).backend,
'django_browserid.auth.BrowserIDBackend')
def test_overriding_valid_email(self):
class PickyBackend(BrowserIDBackend):
def is_valid_email(self, email):
return email != 'a@example.com'
new_user('a@example.com', 'test1')
new_user('b@example.com', 'test2')
with mock_browserid('a@example.com'):
backend = PickyBackend()
result = backend.authenticate(assertion='asdf', audience='asdf')
self.assertTrue(not result)
with mock_browserid('b@example.com'):
backend = PickyBackend()
result = backend.authenticate(assertion='asdf', audience='asdf')
self.assertTrue(result)
@patch('django_browserid.auth.logger')
def test_create_user_integrity_error(self, logger):
# If an IntegrityError is raised during user creation, attempt to re-fetch the user in case
# the user was created since we checked for the existing account.
backend = BrowserIDBackend()
backend.User = Mock()
error = IntegrityError()
backend.User.objects.create_user.side_effect = error
backend.User.objects.get.return_value = 'asdf'
self.assertEqual(backend.create_user('a@example.com'), 'asdf')
# If get raises a DoesNotExist exception, re-raise the original exception.
backend.User.DoesNotExist = Exception
backend.User.objects.get.side_effect = backend.User.DoesNotExist
with self.assertRaises(IntegrityError) as e:
backend.create_user('a@example.com')
self.assertEqual(e.exception, error)
if get_user_model:
# Only run custom user model tests if we're using a version of Django that
# supports it.
@patch.object(settings, 'AUTH_USER_MODEL', 'tests.CustomUser')
class CustomUserModelTests(TestCase):
def _auth(self, backend=None, verified_email=None):
if backend is None:
backend = BrowserIDBackend()
with mock_browserid(verified_email):
return backend.authenticate(assertion='asdf', audience='asdf')
def test_existing_user(self):
"""If a custom user exists with the given email, return them."""
user = CustomUser.objects.create(email='a@test.com')
authed_user = self._auth(verified_email='a@test.com')
self.assertEqual(user, authed_user)
@patch.object(settings, 'BROWSERID_CREATE_USER', True)
def test_create_new_user(self):
"""
If a custom user does not exist with the given email, create a new
user and return them.
"""
class CustomUserBrowserIDBackend(BrowserIDBackend):
def create_user(self, email):
return CustomUser.objects.create(email=email)
user = self._auth(backend=CustomUserBrowserIDBackend(),
verified_email='b@test.com')
self.assertTrue(isinstance(user, CustomUser))
self.assertEqual(user.email, 'b@test.com')
| mpl-2.0 |
kmee/odoo-brazil-banking | l10n_br_account_banking_payment_cnab/model/payment_mode.py | 1 | 2946 | # -*- coding: utf-8 -*-
# #############################################################################
#
#
# Copyright (C) 2012 KMEE (http://www.kmee.com.br)
# @author Fernando Marcato Rodrigues
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
from openerp.addons import decimal_precision as dp
from ..constantes import TIPO_SERVICO, FORMA_LANCAMENTO, \
COMPLEMENTO_TIPO_SERVICO, CODIGO_FINALIDADE_TED, AVISO_FAVORECIDO
class PaymentMode(models.Model):
_inherit = "payment.mode"
condicao_emissao_papeleta = fields.Selection(
[('1', 'Banco emite e Processa'),
('2', 'Cliente emite e banco processa'), ],
u'Condição Emissão de Papeleta', default='1')
cnab_percent_interest = fields.Float(string=u"Percentual de Juros",
digits=dp.get_precision('Account'))
comunicacao_2 = fields.Char("Comunicação para o sacador avalista")
tipo_servico = fields.Selection(
selection=TIPO_SERVICO,
string=u'Tipo de Serviço',
help=u'Campo G025 do CNAB'
)
forma_lancamento = fields.Selection(
selection=FORMA_LANCAMENTO,
string=u'Forma Lançamento',
help=u'Campo G029 do CNAB'
)
codigo_convenio = fields.Char(
size=20,
string=u'Código do Convênio no Banco',
help=u'Campo G007 do CNAB',
default=u'0001222130126',
)
codigo_finalidade_doc = fields.Selection(
selection=COMPLEMENTO_TIPO_SERVICO,
string=u'Complemento do Tipo de Serviço',
help=u'Campo P005 do CNAB'
)
codigo_finalidade_ted = fields.Selection(
selection=CODIGO_FINALIDADE_TED,
string=u'Código Finalidade da TED',
help=u'Campo P011 do CNAB'
)
codigo_finalidade_complementar = fields.Char(
size=2,
string=u'Código de finalidade complementar',
help=u'Campo P013 do CNAB',
)
aviso_ao_favorecido = fields.Selection(
selection=AVISO_FAVORECIDO,
string=u'Aviso ao Favorecido',
help=u'Campo P006 do CNAB',
default=0,
)
# A exportação CNAB não se encaixa somente nos parâmetros de
# débito e crédito.
| agpl-3.0 |
cortesi/qtile | libqtile/widget/prompt.py | 1 | 27051 | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2011 Aldo Cortesi
# Copyright (c) 2010 Philip Kranz
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2011 Paul Colomiets
# Copyright (c) 2011-2012 roger
# Copyright (c) 2011-2012, 2014 Tycho Andersen
# Copyright (c) 2012 Dustin Lacewell
# Copyright (c) 2012 Laurie Clark-Michalek
# Copyright (c) 2012-2014 Craig Barnes
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2014 ramnes
# Copyright (c) 2014 Sean Vig
# Copyright (C) 2015, Juan Riquelme González
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import glob
import os
import pickle
import six
import string
from collections import OrderedDict, deque
from libqtile.log_utils import logger
from libqtile.command import _SelectError
from . import base
from .. import bar, command, hook, pangocffi, utils, xcbq, xkeysyms
class NullCompleter(object):
def __init__(self, qtile):
self.qtile = qtile
self.thisfinal = ""
def actual(self):
return self.thisfinal
def reset(self):
pass
def complete(self, txt):
return txt
class FileCompleter(object):
def __init__(self, qtile, _testing=False):
self._testing = _testing
self.qtile = qtile
self.thisfinal = None
self.reset()
def actual(self):
return self.thisfinal
def reset(self):
self.lookup = None
def complete(self, txt):
"""Returns the next completion for txt, or None if there is no completion"""
if not self.lookup:
self.lookup = []
if txt == "" or txt[0] not in "~/":
txt = "~/" + txt
path = os.path.expanduser(txt)
if os.path.isdir(path):
files = glob.glob(os.path.join(path, "*"))
prefix = txt
else:
files = glob.glob(path + "*")
prefix = os.path.dirname(txt)
prefix = prefix.rstrip("/") or "/"
for f in files:
display = os.path.join(prefix, os.path.basename(f))
if os.path.isdir(f):
display += "/"
self.lookup.append((display, f))
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = ret[1]
return ret[0]
class QshCompleter(object):
def __init__(self, qtile):
self.qtile = qtile
self.client = command.CommandRoot(self.qtile)
self.thisfinal = None
self.reset()
def actual(self):
return self.thisfinal
def reset(self):
self.lookup = None
self.path = ''
self.offset = -1
def complete(self, txt):
txt = txt.lower()
if not self.lookup:
self.lookup = []
path = txt.split('.')[:-1]
self.path = '.'.join(path)
term = txt.split('.')[-1]
if len(self.path) > 0:
self.path += '.'
contains_cmd = 'self.client.%s_contains' % self.path
try:
contains = eval(contains_cmd)
except AttributeError:
contains = []
for obj in contains:
if obj.lower().startswith(term):
self.lookup.append((obj, obj))
commands_cmd = 'self.client.%scommands()' % self.path
try:
commands = eval(commands_cmd)
except (command.CommandError, AttributeError):
commands = []
for cmd in commands:
if cmd.lower().startswith(term):
self.lookup.append((cmd + '()', cmd + '()'))
self.offset = -1
self.lookup.append((term, term))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = self.path + ret[0]
return self.path + ret[0]
class GroupCompleter(object):
def __init__(self, qtile):
self.qtile = qtile
self.thisfinal = None
self.lookup = None
self.offset = None
def actual(self):
"""Returns the current actual value"""
return self.thisfinal
def reset(self):
self.lookup = None
self.offset = -1
def complete(self, txt):
"""Returns the next completion for txt, or None if there is no completion"""
txt = txt.lower()
if not self.lookup:
self.lookup = []
for group in self.qtile.groupMap.keys():
if group.lower().startswith(txt):
self.lookup.append((group, group))
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = ret[1]
return ret[0]
class WindowCompleter(object):
def __init__(self, qtile):
self.qtile = qtile
self.thisfinal = None
self.lookup = None
self.offset = None
def actual(self):
"""Returns the current actual value"""
return self.thisfinal
def reset(self):
self.lookup = None
self.offset = -1
def complete(self, txt):
"""Returns the next completion for txt, or None if there is no completion"""
if not self.lookup:
self.lookup = []
for wid, window in self.qtile.windowMap.items():
if window.group and window.name.lower().startswith(txt):
self.lookup.append((window.name, wid))
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = ret[1]
return ret[0]
class CommandCompleter(object):
"""
Parameters
==========
_testing :
disables reloading of the lookup table to make testing possible.
"""
DEFAULTPATH = "/bin:/usr/bin:/usr/local/bin"
def __init__(self, qtile, _testing=False):
self.lookup = None
self.offset = -1
self.thisfinal = None
self._testing = _testing
def actual(self):
"""Returns the current actual value"""
return self.thisfinal
def executable(self, fpath):
return os.access(fpath, os.X_OK)
def reset(self):
self.lookup = None
self.offset = -1
def complete(self, txt):
"""Returns the next completion for txt, or None if there is no completion"""
if not self.lookup:
if not self._testing:
# Lookup is a set of (display value, actual value) tuples.
self.lookup = []
if txt and txt[0] in "~/":
path = os.path.expanduser(txt)
if os.path.isdir(path):
files = glob.glob(os.path.join(path, "*"))
prefix = txt
else:
files = glob.glob(path + "*")
prefix = os.path.dirname(txt)
prefix = prefix.rstrip("/") or "/"
for f in files:
if self.executable(f):
display = os.path.join(prefix, os.path.basename(f))
if os.path.isdir(f):
display += "/"
self.lookup.append((display, f))
else:
dirs = os.environ.get("PATH", self.DEFAULTPATH).split(":")
for d in dirs:
try:
d = os.path.expanduser(d)
for cmd in glob.iglob(os.path.join(d, "%s*" % txt)):
if self.executable(cmd):
self.lookup.append(
(
os.path.basename(cmd),
cmd
),
)
except OSError:
pass
self.lookup.sort()
self.offset = -1
self.lookup.append((txt, txt))
self.offset += 1
if self.offset >= len(self.lookup):
self.offset = 0
ret = self.lookup[self.offset]
self.thisfinal = ret[1]
return ret[0]
class Prompt(base._TextBox):
"""A widget that prompts for user input
Input should be started using the ``.startInput()`` method on this class.
"""
completers = {
"file": FileCompleter,
"qshell": QshCompleter,
"cmd": CommandCompleter,
"group": GroupCompleter,
"window": WindowCompleter,
None: NullCompleter
}
orientations = base.ORIENTATION_HORIZONTAL
defaults = [("cursor", True, "Show a cursor"),
("cursorblink", 0.5, "Cursor blink rate. 0 to disable."),
("cursor_color", "bef098",
"Color for the cursor and text over it."),
("prompt", "{prompt}: ", "Text displayed at the prompt"),
("record_history", True, "Keep a record of executed commands"),
("max_history", 100,
"Commands to keep in history. 0 for no limit."),
("ignore_dups_history", False,
"Don't store duplicates in history"),
("bell_style", "audible",
"Alert at the begin/end of the command history. " +
"Possible values: 'audible', 'visual' and None."),
("visual_bell_color", "ff0000",
"Color for the visual bell (changes prompt background)."),
("visual_bell_time", 0.2,
"Visual bell duration (in seconds).")]
def __init__(self, name="prompt", **config):
base._TextBox.__init__(self, "", bar.CALCULATED, **config)
self.add_defaults(Prompt.defaults)
self.name = name
self.active = False
self.completer = None
# Define key handlers (action to do when hit an specific key)
self.keyhandlers = {
xkeysyms.keysyms['Tab']: self._trigger_complete,
xkeysyms.keysyms['BackSpace']: self._delete_char(),
xkeysyms.keysyms['Delete']: self._delete_char(False),
xkeysyms.keysyms['KP_Delete']: self._delete_char(False),
xkeysyms.keysyms['Escape']: self._unfocus,
xkeysyms.keysyms['Return']: self._send_cmd,
xkeysyms.keysyms['KP_Enter']: self._send_cmd,
xkeysyms.keysyms['Up']: self._get_prev_cmd,
xkeysyms.keysyms['KP_Up']: self._get_prev_cmd,
xkeysyms.keysyms['Down']: self._get_next_cmd,
xkeysyms.keysyms['KP_Down']: self._get_next_cmd,
xkeysyms.keysyms['Left']: self._move_cursor(),
xkeysyms.keysyms['KP_Left']: self._move_cursor(),
xkeysyms.keysyms['Right']: self._move_cursor("right"),
xkeysyms.keysyms['KP_Right']: self._move_cursor("right"),
}
printables = [int(hex(x), 16) for x in range(127)]
printables = {x: self._write_char for x in printables if
chr(x) in string.printable}
self.keyhandlers.update(printables)
if self.bell_style == "visual":
self.original_background = self.background
# If history record is on, get saved history or create history record
if self.record_history:
self.history_path = os.path.join(utils.get_cache_dir(),
'prompt_history')
if os.path.exists(self.history_path):
with open(self.history_path, 'rb') as f:
try:
self.history = pickle.load(f)
if self.ignore_dups_history:
self._dedup_history()
except: # noqa: E722
# unfortunately, pickle doesn't wrap its errors, so we
# can't detect what's a pickle error and what's not.
logger.exception("failed to load prompt history")
self.history = {x: deque(maxlen=self.max_history)
for x in self.completers}
# self.history of size does not match.
if len(self.history) != len(self.completers):
self.history = {x: deque(maxlen=self.max_history)
for x in self.completers}
if self.max_history != \
self.history[list(self.history)[0]].maxlen:
self.history = {x: deque(self.history[x],
self.max_history)
for x in self.completers}
else:
self.history = {x: deque(maxlen=self.max_history)
for x in self.completers}
def _configure(self, qtile, bar):
self.markup = True
base._TextBox._configure(self, qtile, bar)
def f(win):
if self.active and not win == self.bar.window:
self._unfocus()
hook.subscribe.client_focus(f)
def startInput(self, prompt, callback,
complete=None, strict_completer=False):
"""Run the prompt
Displays a prompt and starts to take one line of keyboard input from
the user. When done, calls the callback with the input string as
argument. If history record is enabled, also allows to browse between
previous commands with ↑ and ↓, and execute them (untouched or
modified). When history is exhausted, fires an alert. It tries to
mimic, in some way, the shell behavior.
Parameters
==========
complete :
Tab-completion. Can be None, "cmd", "file", "group", "qshell" or
"window".
prompt :
text displayed at the prompt, e.g. "spawn: "
callback :
function to call with returned value.
complete :
completer to use.
strict_completer :
When True the return value wil be the exact completer result where
available.
"""
if self.cursor and self.cursorblink and not self.active:
self.timeout_add(self.cursorblink, self._blink)
self.display = self.prompt.format(prompt=prompt)
self.display = pangocffi.markup_escape_text(self.display)
self.active = True
self.userInput = ""
self.archivedInput = ""
self.show_cursor = self.cursor
self.cursor_position = 0
self.callback = callback
self.completer = self.completers[complete](self.qtile)
self.strict_completer = strict_completer
self._update()
self.bar.widget_grab_keyboard(self)
if self.record_history:
self.completer_history = self.history[complete]
self.position = len(self.completer_history)
def calculate_length(self):
if self.text:
width = min(
self.layout.width,
self.bar.width
) + self.actual_padding * 2
return width
else:
return 0
def _blink(self):
self.show_cursor = not self.show_cursor
self._update()
if self.active:
self.timeout_add(self.cursorblink, self._blink)
def _highlight_text(self, text):
color = utils.hex(self.cursor_color)
text = '<span foreground="{0}">{1}</span>'.format(color, text)
if self.show_cursor:
text = '<u>{}</u>'.format(text)
return text
def _update(self):
if self.active:
self.text = self.archivedInput or self.userInput
cursor = pangocffi.markup_escape_text(" ")
if self.cursor_position < len(self.text):
txt1 = self.text[:self.cursor_position]
txt2 = self.text[self.cursor_position]
txt3 = self.text[self.cursor_position + 1:]
for text in (txt1, txt2, txt3):
text = pangocffi.markup_escape_text(text)
txt2 = self._highlight_text(txt2)
self.text = "{0}{1}{2}{3}".format(txt1, txt2, txt3, cursor)
else:
self.text = pangocffi.markup_escape_text(self.text)
self.text += self._highlight_text(cursor)
self.text = self.display + self.text
else:
self.text = ""
self.bar.draw()
def _trigger_complete(self):
# Trigger the auto completion in user input
self.userInput = self.completer.complete(self.userInput)
self.cursor_position = len(self.userInput)
def _history_to_input(self):
# Move actual command (when exploring history) to user input and update
# history position (right after the end)
if self.archivedInput:
self.userInput = self.archivedInput
self.archivedInput = ""
self.position = len(self.completer_history)
def _insert_before_cursor(self, charcode):
# Insert a character (given their charcode) in input, before the cursor
txt1 = self.userInput[:self.cursor_position]
txt2 = self.userInput[self.cursor_position:]
self.userInput = txt1 + chr(charcode) + txt2
self.cursor_position += 1
def _delete_char(self, backspace=True):
# Return a function that deletes character from the input text.
# If backspace is True, function will emulate backspace, else Delete.
def f():
self._history_to_input()
step = -1 if backspace else 0
if not backspace and self.cursor_position == len(self.userInput):
self._alert()
elif len(self.userInput) > 0 and self.cursor_position + step > -1:
txt1 = self.userInput[:self.cursor_position + step]
txt2 = self.userInput[self.cursor_position + step + 1:]
self.userInput = txt1 + txt2
if step:
self.cursor_position += step
else:
self._alert()
return f
def _write_char(self):
# Add pressed (legal) char key to user input.
# No LookupString in XCB... oh, the shame! Unicode users beware!
self._history_to_input()
self._insert_before_cursor(self.key)
def _unfocus(self):
# Remove focus from the widget
self.active = False
self._update()
self.bar.widget_ungrab_keyboard()
def _send_cmd(self):
# Send the prompted text for execution
self._unfocus()
if self.strict_completer:
self.userInput = self.actual_value or self.userInput
del self.actual_value
self._history_to_input()
if self.userInput:
# If history record is activated, also save command in history
if self.record_history:
# ensure no dups in history
if self.ignore_dups_history and (self.userInput in self.completer_history):
self.completer_history.remove(self.userInput)
self.position -= 1
self.completer_history.append(self.userInput)
if self.position < self.max_history:
self.position += 1
if six.PY3:
os.makedirs(os.path.dirname(self.history_path), exist_ok=True)
else:
try:
os.makedirs(os.path.dirname(self.history_path))
except OSError: # file exists
pass
with open(self.history_path, mode='wb') as f:
pickle.dump(self.history, f, protocol=2)
self.callback(self.userInput)
def _alert(self):
# Fire an alert (audible or visual), if bell style is not None.
if self.bell_style == "audible":
self.qtile.conn.conn.core.Bell(0)
elif self.bell_style == "visual":
self.background = self.visual_bell_color
self.timeout_add(self.visual_bell_time, self._stop_visual_alert)
def _stop_visual_alert(self):
self.background = self.original_background
self._update()
def _get_prev_cmd(self):
# Get the previous command in history.
# If there isn't more previous commands, ring system bell
if self.record_history:
if not self.position:
self._alert()
else:
self.position -= 1
self.archivedInput = self.completer_history[self.position]
self.cursor_position = len(self.archivedInput)
def _get_next_cmd(self):
# Get the next command in history.
# If the last command was already reached, ring system bell.
if self.record_history:
if self.position == len(self.completer_history):
self._alert()
elif self.position < len(self.completer_history):
self.position += 1
if self.position == len(self.completer_history):
self.archivedInput = ""
else:
self.archivedInput = self.completer_history[self.position]
self.cursor_position = len(self.archivedInput)
def _cursor_to_left(self):
# Move cursor to left, if possible
if self.cursor_position:
self.cursor_position -= 1
else:
self._alert()
def _cursor_to_right(self):
# move cursor to right, if possible
command = self.archivedInput or self.userInput
if self.cursor_position < len(command):
self.cursor_position += 1
else:
self._alert()
def _move_cursor(self, direction="left"):
# Move the cursor to left or right, according to direction
if direction == "left":
return self._cursor_to_left
elif direction == "right":
return self._cursor_to_right
def _get_keyhandler(self, k):
# Return the action (a function) to do according the pressed key (k).
self.key = k
if k in self.keyhandlers:
if k != xkeysyms.keysyms['Tab']:
self.actual_value = self.completer.actual()
self.completer.reset()
return self.keyhandlers[k]
def handle_KeyPress(self, e):
"""KeyPress handler for the minibuffer.
Currently only supports ASCII characters.
"""
mask = xcbq.ModMasks["shift"] | xcbq.ModMasks["lock"]
state = 1 if e.state & mask else 0
keysym = self.qtile.conn.code_to_syms[e.detail][state]
handle_key = self._get_keyhandler(keysym)
if handle_key:
handle_key()
del self.key
self._update()
def cmd_fake_keypress(self, key):
class Dummy(object):
pass
d = Dummy()
keysym = xcbq.keysyms[key]
d.detail = self.qtile.conn.keysym_to_keycode(keysym)
d.state = 0
self.handle_KeyPress(d)
def cmd_info(self):
"""Returns a dictionary of info for this object"""
return dict(
name=self.name,
width=self.width,
text=self.text,
active=self.active,
)
def cmd_exec_general(
self, prompt, object_name, cmd_name, selector=None, completer=None):
"""
Execute a cmd of any object. For example layout, group, window, widget
, etc with a string that is obtained from startInput.
Parameters
==========
prompt :
Text displayed at the prompt.
object_name :
Name of a object in Qtile. This string has to be 'layout', 'widget',
'bar', 'window' or 'screen'.
cmd_name :
Execution command of selected object using object_name and selector.
selector :
This value select a specific object within a object list that is
obtained by object_name.
If this value is None, current object is selected. e.g. current layout,
current window and current screen.
completer:
Completer to use.
config example:
Key([alt, 'shift'], 'a',
lazy.widget['prompt'].exec_general(
'section(add)',
'layout',
'add_section'))
"""
try:
obj = self.qtile.select([(object_name, selector)])
except _SelectError:
logger.warn("cannot select a object")
return
cmd = obj.command(cmd_name)
if not cmd:
logger.warn("command not found")
return
def f(args):
if args:
cmd(args)
self.startInput(prompt, f, completer)
def _dedup_history(self):
"""Filter the history deque, clearing all duplicate values."""
self.history = {x: self._dedup_deque(self.history[x])
for x in self.completers}
def _dedup_deque(self, dq):
return deque(_LastUpdatedOrderedDict.fromkeys(dq))
class _LastUpdatedOrderedDict(OrderedDict):
"""Store items in the order the keys were last added."""
def __setitem__(self, key, value):
if key in self:
del self[key]
OrderedDict.__setitem__(self, key, value)
| mit |
paterson/servo | tests/wpt/harness/wptrunner/browsers/__init__.py | 134 | 1535 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
"""Subpackage where each product is defined. Each product is created by adding a
a .py file containing a __wptrunner__ variable in the global scope. This must be
a dictionary with the fields
"product": Name of the product, assumed to be unique.
"browser": String indicating the Browser implementation used to launch that
product.
"executor": Dictionary with keys as supported test types and values as the name
of the Executor implemantation that will be used to run that test
type.
"browser_kwargs": String naming function that takes product, binary,
prefs_root and the wptrunner.run_tests kwargs dict as arguments
and returns a dictionary of kwargs to use when creating the
Browser class.
"executor_kwargs": String naming a function that takes http server url and
timeout multiplier and returns kwargs to use when creating
the executor class.
"env_options": String naming a funtion of no arguments that returns the
arguments passed to the TestEnvironment.
All classes and functions named in the above dict must be imported into the
module global scope.
"""
product_list = ["b2g",
"chrome",
"firefox",
"servo",
"servodriver"]
| mpl-2.0 |
3nd0y/esp8266-dht22 | lib/ArduinoJson-master/third-party/gtest-1.7.0/test/gtest_output_test.py | 1733 | 12005 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# --gtest_internal_skip_environment_and_ad_hoc_tests flag.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'--gtest_internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'rb')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual)
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| unlicense |
TheGhostHuCodes/spy_dir | spy_dir.py | 1 | 2182 | #!/usr/bin/env python
import os
import os.path as pt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import argparse
#TODO: take decimal places as parameter for printing.
def sizeof_pp(num):
for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB']:
if abs(num) < 1024.0:
return "%3.2f %s" % (num, unit)
num /= 1024.0
return "%.2f %s" % (num, 'Yi')
def xtic_formatter(num, tick_index):
return sizeof_pp(num)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='.')
parser.add_argument('dir_path', metavar='Path', type=str, help='')
parser.add_argument('-p', '--plot', action='store_true')
args = parser.parse_args()
sizes = []
symlink_count = 0
for root, dirs, files in os.walk(args.dir_path, followlinks=False):
for name in files:
fullpath = pt.join(root, name)
if not os.path.islink(fullpath):
sizes.append(pt.getsize(fullpath))
else:
symlink_count += 1
sizes.sort()
print("Searching in directory: {0}".format(args.dir_path))
print("Files Inspected: {0}".format(len(sizes)))
print("Maxfilesize: " + sizeof_pp(sizes[-1]))
print("Symlinks found: {0}".format(symlink_count))
percentile = 95
index = len(sizes) * (percentile / 100.)
print("{0}% of files smaller than: ~".format(percentile) + sizeof_pp(
sizes[int(index)]))
sizesArray = np.asarray(sizes)
if (args.plot):
bins = min(len(sizes) / 10, 200)
plt.figure(figsize=(8, 8))
ax = plt.subplot(111)
# Adjust y-axis to show bins of height 1 and max bin height.
n, _, _ = plt.hist(sizesArray, bins, log=True)
plt.ylim(0.5, max(n) * 1.1)
plt.xlabel("File Size (bytes)")
plt.ylabel("Log(Number of Files)")
plt.title("File size histogram for: {0}".format(args.dir_path))
x_formatter = mpl.ticker.ScalarFormatter(useOffset=False)
x_formatter.set_scientific(False)
x_format = mpl.ticker.FuncFormatter(xtic_formatter)
ax.xaxis.set_major_formatter(x_format)
plt.show()
| apache-2.0 |
MichaelDoyle/Diamond | src/collectors/diskusage/diskusage.py | 15 | 10481 | # coding=utf-8
"""
Collect IO Stats
Note: You may need to artificially generate some IO load on a disk/partition
before graphite will generate the metrics.
* http://www.kernel.org/doc/Documentation/iostats.txt
#### Dependencies
* /proc/diskstats
"""
import diamond.collector
import diamond.convertor
import time
import os
import re
try:
import psutil
except ImportError:
psutil = None
class DiskUsageCollector(diamond.collector.Collector):
MAX_VALUES = {
'reads': 4294967295,
'reads_merged': 4294967295,
'reads_milliseconds': 4294967295,
'writes': 4294967295,
'writes_merged': 4294967295,
'writes_milliseconds': 4294967295,
'io_milliseconds': 4294967295,
'io_milliseconds_weighted': 4294967295
}
LastCollectTime = None
def get_default_config_help(self):
config_help = super(DiskUsageCollector, self).get_default_config_help()
config_help.update({
'devices': "A regex of which devices to gather metrics for." +
" Defaults to md, sd, xvd, disk, and dm devices",
'sector_size': 'The size to use to calculate sector usage',
'send_zero': 'Send io data even when there is no io',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DiskUsageCollector, self).get_default_config()
config.update({
'path': 'iostat',
'devices': ('PhysicalDrive[0-9]+$' +
'|md[0-9]+$' +
'|sd[a-z]+[0-9]*$' +
'|x?vd[a-z]+[0-9]*$' +
'|disk[0-9]+$' +
'|dm\-[0-9]+$'),
'sector_size': 512,
'send_zero': False,
})
return config
def get_disk_statistics(self):
"""
Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...)
"""
result = {}
if os.access('/proc/diskstats', os.R_OK):
self.proc_diskstats = True
fp = open('/proc/diskstats')
try:
for line in fp:
try:
columns = line.split()
# On early linux v2.6 versions, partitions have only 4
# output fields not 11. From linux 2.6.25 partitions
# have the full stats set.
if len(columns) < 14:
continue
major = int(columns[0])
minor = int(columns[1])
device = columns[2]
if ((device.startswith('ram') or
device.startswith('loop'))):
continue
result[(major, minor)] = {
'device': device,
'reads': float(columns[3]),
'reads_merged': float(columns[4]),
'reads_sectors': float(columns[5]),
'reads_milliseconds': float(columns[6]),
'writes': float(columns[7]),
'writes_merged': float(columns[8]),
'writes_sectors': float(columns[9]),
'writes_milliseconds': float(columns[10]),
'io_in_progress': float(columns[11]),
'io_milliseconds': float(columns[12]),
'io_milliseconds_weighted': float(columns[13])
}
except ValueError:
continue
finally:
fp.close()
else:
self.proc_diskstats = False
if not psutil:
self.log.error('Unable to import psutil')
return None
disks = psutil.disk_io_counters(True)
sector_size = int(self.config['sector_size'])
for disk in disks:
result[(0, len(result))] = {
'device': disk,
'reads': disks[disk].read_count,
'reads_sectors': disks[disk].read_bytes / sector_size,
'reads_milliseconds': disks[disk].read_time,
'writes': disks[disk].write_count,
'writes_sectors': disks[disk].write_bytes / sector_size,
'writes_milliseconds': disks[disk].write_time,
'io_milliseconds':
disks[disk].read_time + disks[disk].write_time,
'io_milliseconds_weighted':
disks[disk].read_time + disks[disk].write_time
}
return result
def collect(self):
# Handle collection time intervals correctly
CollectTime = time.time()
time_delta = float(self.config['interval'])
if self.LastCollectTime:
time_delta = CollectTime - self.LastCollectTime
if not time_delta:
time_delta = float(self.config['interval'])
self.LastCollectTime = CollectTime
exp = self.config['devices']
reg = re.compile(exp)
results = self.get_disk_statistics()
if not results:
self.log.error('No diskspace metrics retrieved')
return None
for key, info in results.iteritems():
metrics = {}
name = info['device']
if not reg.match(name):
continue
for key, value in info.iteritems():
if key == 'device':
continue
oldkey = key
for unit in self.config['byte_unit']:
key = oldkey
if key.endswith('sectors'):
key = key.replace('sectors', unit)
value /= (1024 / int(self.config['sector_size']))
value = diamond.convertor.binary.convert(value=value,
oldUnit='kB',
newUnit=unit)
self.MAX_VALUES[key] = diamond.convertor.binary.convert(
value=diamond.collector.MAX_COUNTER,
oldUnit='byte',
newUnit=unit)
metric_name = '.'.join([info['device'], key])
# io_in_progress is a point in time counter, !derivative
if key != 'io_in_progress':
metric_value = self.derivative(
metric_name,
value,
self.MAX_VALUES[key],
time_delta=False)
else:
metric_value = value
metrics[key] = metric_value
if self.proc_diskstats:
metrics['read_requests_merged_per_second'] = (
metrics['reads_merged'] / time_delta)
metrics['write_requests_merged_per_second'] = (
metrics['writes_merged'] / time_delta)
metrics['reads_per_second'] = metrics['reads'] / time_delta
metrics['writes_per_second'] = metrics['writes'] / time_delta
for unit in self.config['byte_unit']:
metric_name = 'read_%s_per_second' % unit
key = 'reads_%s' % unit
metrics[metric_name] = metrics[key] / time_delta
metric_name = 'write_%s_per_second' % unit
key = 'writes_%s' % unit
metrics[metric_name] = metrics[key] / time_delta
# Set to zero so the nodes are valid even if we have 0 io for
# the metric duration
metric_name = 'average_request_size_%s' % unit
metrics[metric_name] = 0
metrics['io'] = metrics['reads'] + metrics['writes']
metrics['average_queue_length'] = (
metrics['io_milliseconds_weighted'] / time_delta / 1000.0)
metrics['util_percentage'] = (
metrics['io_milliseconds'] / time_delta / 10.0)
if metrics['reads'] > 0:
metrics['read_await'] = (
metrics['reads_milliseconds'] / metrics['reads'])
else:
metrics['read_await'] = 0
if metrics['writes'] > 0:
metrics['write_await'] = (
metrics['writes_milliseconds'] / metrics['writes'])
else:
metrics['write_await'] = 0
for unit in self.config['byte_unit']:
rkey = 'reads_%s' % unit
wkey = 'writes_%s' % unit
metric_name = 'average_request_size_%s' % unit
if (metrics['io'] > 0):
metrics[metric_name] = (
metrics[rkey] + metrics[wkey]) / metrics['io']
else:
metrics[metric_name] = 0
metrics['iops'] = metrics['io'] / time_delta
if (metrics['io'] > 0):
metrics['service_time'] = (
metrics['io_milliseconds'] / metrics['io'])
metrics['await'] = (
metrics['reads_milliseconds'] +
metrics['writes_milliseconds']) / metrics['io']
else:
metrics['service_time'] = 0
metrics['await'] = 0
# http://www.scribd.com/doc/15013525
# Page 28
metrics['concurrent_io'] = (
(metrics['reads_per_second'] + metrics['writes_per_second']) *
(metrics['service_time'] / 1000.0))
# Only publish when we have io figures
if (metrics['io'] > 0 or self.config['send_zero']):
for key in metrics:
metric_name = '.'.join([info['device'], key]).replace(
'/', '_')
self.publish(metric_name, metrics[key], precision=3)
| mit |
hdinsight/hue | desktop/core/ext-py/boto-2.38.0/boto/ec2/instancestatus.py | 181 | 6854 | # Copyright (c) 2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates.
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class Details(dict):
"""
A dict object that contains name/value pairs which provide
more detailed information about the status of the system
or the instance.
"""
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'name':
self._name = value
elif name == 'status':
self[self._name] = value
else:
setattr(self, name, value)
class Event(object):
"""
A status event for an instance.
:ivar code: A string indicating the event type.
:ivar description: A string describing the reason for the event.
:ivar not_before: A datestring describing the earliest time for
the event.
:ivar not_after: A datestring describing the latest time for
the event.
"""
def __init__(self, code=None, description=None,
not_before=None, not_after=None):
self.code = code
self.description = description
self.not_before = not_before
self.not_after = not_after
def __repr__(self):
return 'Event:%s' % self.code
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'code':
self.code = value
elif name == 'description':
self.description = value
elif name == 'notBefore':
self.not_before = value
elif name == 'notAfter':
self.not_after = value
else:
setattr(self, name, value)
class Status(object):
"""
A generic Status object used for system status and instance status.
:ivar status: A string indicating overall status.
:ivar details: A dict containing name-value pairs which provide
more details about the current status.
"""
def __init__(self, status=None, details=None):
self.status = status
if not details:
details = Details()
self.details = details
def __repr__(self):
return 'Status:%s' % self.status
def startElement(self, name, attrs, connection):
if name == 'details':
return self.details
return None
def endElement(self, name, value, connection):
if name == 'status':
self.status = value
else:
setattr(self, name, value)
class EventSet(list):
def startElement(self, name, attrs, connection):
if name == 'item':
event = Event()
self.append(event)
return event
else:
return None
def endElement(self, name, value, connection):
setattr(self, name, value)
class InstanceStatus(object):
"""
Represents an EC2 Instance status as reported by
DescribeInstanceStatus request.
:ivar id: The instance identifier.
:ivar zone: The availability zone of the instance.
:ivar events: A list of events relevant to the instance.
:ivar state_code: An integer representing the current state
of the instance.
:ivar state_name: A string describing the current state
of the instance.
:ivar system_status: A Status object that reports impaired
functionality that stems from issues related to the systems
that support an instance, such as such as hardware failures
and network connectivity problems.
:ivar instance_status: A Status object that reports impaired
functionality that arises from problems internal to the instance.
"""
def __init__(self, id=None, zone=None, events=None,
state_code=None, state_name=None):
self.id = id
self.zone = zone
self.events = events
self.state_code = state_code
self.state_name = state_name
self.system_status = Status()
self.instance_status = Status()
def __repr__(self):
return 'InstanceStatus:%s' % self.id
def startElement(self, name, attrs, connection):
if name == 'eventsSet':
self.events = EventSet()
return self.events
elif name == 'systemStatus':
return self.system_status
elif name == 'instanceStatus':
return self.instance_status
else:
return None
def endElement(self, name, value, connection):
if name == 'instanceId':
self.id = value
elif name == 'availabilityZone':
self.zone = value
elif name == 'code':
self.state_code = int(value)
elif name == 'name':
self.state_name = value
else:
setattr(self, name, value)
class InstanceStatusSet(list):
"""
A list object that contains the results of a call to
DescribeInstanceStatus request. Each element of the
list will be an InstanceStatus object.
:ivar next_token: If the response was truncated by
the EC2 service, the next_token attribute of the
object will contain the string that needs to be
passed in to the next request to retrieve the next
set of results.
"""
def __init__(self, connection=None):
list.__init__(self)
self.connection = connection
self.next_token = None
def startElement(self, name, attrs, connection):
if name == 'item':
status = InstanceStatus()
self.append(status)
return status
else:
return None
def endElement(self, name, value, connection):
if name == 'nextToken':
self.next_token = value
setattr(self, name, value)
| apache-2.0 |
tadebayo/myedge | myvenv/Lib/site-packages/setuptools/monkey.py | 80 | 5255 | """
Monkey patching of distutils.
"""
import sys
import distutils.filelist
import platform
import types
import functools
import inspect
from .py26compat import import_module
import six
import setuptools
__all__ = []
"""
Everything is private. Contact the project team
if you think you need this functionality.
"""
def get_unpatched(item):
lookup = (
get_unpatched_class if isinstance(item, six.class_types) else
get_unpatched_function if isinstance(item, types.FunctionType) else
lambda item: None
)
return lookup(item)
def get_unpatched_class(cls):
"""Protect against re-patching the distutils if reloaded
Also ensures that no other distutils extension monkeypatched the distutils
first.
"""
external_bases = (
cls
for cls in inspect.getmro(cls)
if not cls.__module__.startswith('setuptools')
)
base = next(external_bases)
if not base.__module__.startswith('distutils'):
msg = "distutils has already been patched by %r" % cls
raise AssertionError(msg)
return base
def patch_all():
# we can't patch distutils.cmd, alas
distutils.core.Command = setuptools.Command
has_issue_12885 = sys.version_info <= (3, 5, 3)
if has_issue_12885:
# fix findall bug in distutils (http://bugs.python.org/issue12885)
distutils.filelist.findall = setuptools.findall
needs_warehouse = (
sys.version_info < (2, 7, 13)
or
(3, 0) < sys.version_info < (3, 3, 7)
or
(3, 4) < sys.version_info < (3, 4, 6)
or
(3, 5) < sys.version_info <= (3, 5, 3)
)
if needs_warehouse:
warehouse = 'https://upload.pypi.org/legacy/'
distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse
_patch_distribution_metadata_write_pkg_file()
_patch_distribution_metadata_write_pkg_info()
# Install Distribution throughout the distutils
for module in distutils.dist, distutils.core, distutils.cmd:
module.Distribution = setuptools.dist.Distribution
# Install the patched Extension
distutils.core.Extension = setuptools.extension.Extension
distutils.extension.Extension = setuptools.extension.Extension
if 'distutils.command.build_ext' in sys.modules:
sys.modules['distutils.command.build_ext'].Extension = (
setuptools.extension.Extension
)
patch_for_msvc_specialized_compiler()
def _patch_distribution_metadata_write_pkg_file():
"""Patch write_pkg_file to also write Requires-Python/Requires-External"""
distutils.dist.DistributionMetadata.write_pkg_file = (
setuptools.dist.write_pkg_file
)
def _patch_distribution_metadata_write_pkg_info():
"""
Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local
encoding to save the pkg_info. Monkey-patch its write_pkg_info method to
correct this undesirable behavior.
"""
environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2)
if not environment_local:
return
distutils.dist.DistributionMetadata.write_pkg_info = (
setuptools.dist.write_pkg_info
)
def patch_func(replacement, target_mod, func_name):
"""
Patch func_name in target_mod with replacement
Important - original must be resolved by name to avoid
patching an already patched function.
"""
original = getattr(target_mod, func_name)
# set the 'unpatched' attribute on the replacement to
# point to the original.
vars(replacement).setdefault('unpatched', original)
# replace the function in the original module
setattr(target_mod, func_name, replacement)
def get_unpatched_function(candidate):
return getattr(candidate, 'unpatched')
def patch_for_msvc_specialized_compiler():
"""
Patch functions in distutils to use standalone Microsoft Visual C++
compilers.
"""
# import late to avoid circular imports on Python < 3.5
msvc = import_module('setuptools.msvc')
if platform.system() != 'Windows':
# Compilers only availables on Microsoft Windows
return
def patch_params(mod_name, func_name):
"""
Prepare the parameters for patch_func to patch indicated function.
"""
repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_'
repl_name = repl_prefix + func_name.lstrip('_')
repl = getattr(msvc, repl_name)
mod = import_module(mod_name)
if not hasattr(mod, func_name):
raise ImportError(func_name)
return repl, mod, func_name
# Python 2.7 to 3.4
msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler')
# Python 3.5+
msvc14 = functools.partial(patch_params, 'distutils._msvccompiler')
try:
# Patch distutils.msvc9compiler
patch_func(*msvc9('find_vcvarsall'))
patch_func(*msvc9('query_vcvarsall'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler._get_vc_env
patch_func(*msvc14('_get_vc_env'))
except ImportError:
pass
try:
# Patch distutils._msvccompiler.gen_lib_options for Numpy
patch_func(*msvc14('gen_lib_options'))
except ImportError:
pass
| mit |
Thanu/stratos | components/org.apache.stratos.python.cartridge.agent/src/main/python/cartridge.agent/cartridge.agent/modules/databridge/thrift/thrift/protocol/TProtocol.py | 19 | 10843 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from ..Thrift import *
class TProtocolException(TException):
"""Custom Protocol Exception class"""
UNKNOWN = 0
INVALID_DATA = 1
NEGATIVE_SIZE = 2
SIZE_LIMIT = 3
BAD_VERSION = 4
def __init__(self, type=UNKNOWN, message=None):
TException.__init__(self, message)
self.type = type
class TProtocolBase:
"""Base class for Thrift protocol driver."""
def __init__(self, trans):
self.trans = trans
def writeMessageBegin(self, name, ttype, seqid):
pass
def writeMessageEnd(self):
pass
def writeStructBegin(self, name):
pass
def writeStructEnd(self):
pass
def writeFieldBegin(self, name, ttype, fid):
pass
def writeFieldEnd(self):
pass
def writeFieldStop(self):
pass
def writeMapBegin(self, ktype, vtype, size):
pass
def writeMapEnd(self):
pass
def writeListBegin(self, etype, size):
pass
def writeListEnd(self):
pass
def writeSetBegin(self, etype, size):
pass
def writeSetEnd(self):
pass
def writeBool(self, bool_val):
pass
def writeByte(self, byte):
pass
def writeI16(self, i16):
pass
def writeI32(self, i32):
pass
def writeI64(self, i64):
pass
def writeDouble(self, dub):
pass
def writeString(self, str_val):
pass
def readMessageBegin(self):
pass
def readMessageEnd(self):
pass
def readStructBegin(self):
pass
def readStructEnd(self):
pass
def readFieldBegin(self):
pass
def readFieldEnd(self):
pass
def readMapBegin(self):
pass
def readMapEnd(self):
pass
def readListBegin(self):
pass
def readListEnd(self):
pass
def readSetBegin(self):
pass
def readSetEnd(self):
pass
def readBool(self):
pass
def readByte(self):
pass
def readI16(self):
pass
def readI32(self):
pass
def readI64(self):
pass
def readDouble(self):
pass
def readString(self):
pass
def skip(self, ttype):
if ttype == TType.STOP:
return
elif ttype == TType.BOOL:
self.readBool()
elif ttype == TType.BYTE:
self.readByte()
elif ttype == TType.I16:
self.readI16()
elif ttype == TType.I32:
self.readI32()
elif ttype == TType.I64:
self.readI64()
elif ttype == TType.DOUBLE:
self.readDouble()
elif ttype == TType.STRING:
self.readString()
elif ttype == TType.STRUCT:
name = self.readStructBegin()
while True:
(name, ttype, id) = self.readFieldBegin()
if ttype == TType.STOP:
break
self.skip(ttype)
self.readFieldEnd()
self.readStructEnd()
elif ttype == TType.MAP:
(ktype, vtype, size) = self.readMapBegin()
for i in xrange(size):
self.skip(ktype)
self.skip(vtype)
self.readMapEnd()
elif ttype == TType.SET:
(etype, size) = self.readSetBegin()
for i in xrange(size):
self.skip(etype)
self.readSetEnd()
elif ttype == TType.LIST:
(etype, size) = self.readListBegin()
for i in xrange(size):
self.skip(etype)
self.readListEnd()
# tuple of: ( 'reader method' name, is_container bool, 'writer_method' name )
_TTYPE_HANDLERS = (
(None, None, False), # 0 TType.STOP
(None, None, False), # 1 TType.VOID # TODO: handle void?
('readBool', 'writeBool', False), # 2 TType.BOOL
('readByte', 'writeByte', False), # 3 TType.BYTE and I08
('readDouble', 'writeDouble', False), # 4 TType.DOUBLE
(None, None, False), # 5 undefined
('readI16', 'writeI16', False), # 6 TType.I16
(None, None, False), # 7 undefined
('readI32', 'writeI32', False), # 8 TType.I32
(None, None, False), # 9 undefined
('readI64', 'writeI64', False), # 10 TType.I64
('readString', 'writeString', False), # 11 TType.STRING and UTF7
('readContainerStruct', 'writeContainerStruct', True), # 12 *.STRUCT
('readContainerMap', 'writeContainerMap', True), # 13 TType.MAP
('readContainerSet', 'writeContainerSet', True), # 14 TType.SET
('readContainerList', 'writeContainerList', True), # 15 TType.LIST
(None, None, False), # 16 TType.UTF8 # TODO: handle utf8 types?
(None, None, False) # 17 TType.UTF16 # TODO: handle utf16 types?
)
def readFieldByTType(self, ttype, spec):
try:
(r_handler, w_handler, is_container) = self._TTYPE_HANDLERS[ttype]
except IndexError:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid field type %d' % (ttype))
if r_handler is None:
raise TProtocolException(type=TProtocolException.INVALID_DATA,
message='Invalid field type %d' % (ttype))
reader = getattr(self, r_handler)
if not is_container:
return reader()
return reader(spec)
def readContainerList(self, spec):
results = []
ttype, tspec = spec[0], spec[1]
r_handler = self._TTYPE_HANDLERS[ttype][0]
reader = getattr(self, r_handler)
(list_type, list_len) = self.readListBegin()
if tspec is None:
# list values are simple types
for idx in xrange(list_len):
results.append(reader())
else:
# this is like an inlined readFieldByTType
container_reader = self._TTYPE_HANDLERS[list_type][0]
val_reader = getattr(self, container_reader)
for idx in xrange(list_len):
val = val_reader(tspec)
results.append(val)
self.readListEnd()
return results
def readContainerSet(self, spec):
results = set()
ttype, tspec = spec[0], spec[1]
r_handler = self._TTYPE_HANDLERS[ttype][0]
reader = getattr(self, r_handler)
(set_type, set_len) = self.readSetBegin()
if tspec is None:
# set members are simple types
for idx in xrange(set_len):
results.add(reader())
else:
container_reader = self._TTYPE_HANDLERS[set_type][0]
val_reader = getattr(self, container_reader)
for idx in xrange(set_len):
results.add(val_reader(tspec))
self.readSetEnd()
return results
def readContainerStruct(self, spec):
(obj_class, obj_spec) = spec
obj = obj_class()
obj.read(self)
return obj
def readContainerMap(self, spec):
results = dict()
key_ttype, key_spec = spec[0], spec[1]
val_ttype, val_spec = spec[2], spec[3]
(map_ktype, map_vtype, map_len) = self.readMapBegin()
# TODO: compare types we just decoded with thrift_spec and
# abort/skip if types disagree
key_reader = getattr(self, self._TTYPE_HANDLERS[key_ttype][0])
val_reader = getattr(self, self._TTYPE_HANDLERS[val_ttype][0])
# list values are simple types
for idx in xrange(map_len):
if key_spec is None:
k_val = key_reader()
else:
k_val = self.readFieldByTType(key_ttype, key_spec)
if val_spec is None:
v_val = val_reader()
else:
v_val = self.readFieldByTType(val_ttype, val_spec)
# this raises a TypeError with unhashable keys types
# i.e. this fails: d=dict(); d[[0,1]] = 2
results[k_val] = v_val
self.readMapEnd()
return results
def readStruct(self, obj, thrift_spec):
self.readStructBegin()
while True:
(fname, ftype, fid) = self.readFieldBegin()
if ftype == TType.STOP:
break
try:
field = thrift_spec[fid]
except IndexError:
self.skip(ftype)
else:
if field is not None and ftype == field[1]:
fname = field[2]
fspec = field[3]
val = self.readFieldByTType(ftype, fspec)
setattr(obj, fname, val)
else:
self.skip(ftype)
self.readFieldEnd()
self.readStructEnd()
def writeContainerStruct(self, val, spec):
val.write(self)
def writeContainerList(self, val, spec):
self.writeListBegin(spec[0], len(val))
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]]
e_writer = getattr(self, w_handler)
if not is_container:
for elem in val:
e_writer(elem)
else:
for elem in val:
e_writer(elem, spec[1])
self.writeListEnd()
def writeContainerSet(self, val, spec):
self.writeSetBegin(spec[0], len(val))
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[spec[0]]
e_writer = getattr(self, w_handler)
if not is_container:
for elem in val:
e_writer(elem)
else:
for elem in val:
e_writer(elem, spec[1])
self.writeSetEnd()
def writeContainerMap(self, val, spec):
k_type = spec[0]
v_type = spec[2]
ignore, ktype_name, k_is_container = self._TTYPE_HANDLERS[k_type]
ignore, vtype_name, v_is_container = self._TTYPE_HANDLERS[v_type]
k_writer = getattr(self, ktype_name)
v_writer = getattr(self, vtype_name)
self.writeMapBegin(k_type, v_type, len(val))
for m_key, m_val in val.iteritems():
if not k_is_container:
k_writer(m_key)
else:
k_writer(m_key, spec[1])
if not v_is_container:
v_writer(m_val)
else:
v_writer(m_val, spec[3])
self.writeMapEnd()
def writeStruct(self, obj, thrift_spec):
self.writeStructBegin(obj.__class__.__name__)
for field in thrift_spec:
if field is None:
continue
fname = field[2]
val = getattr(obj, fname)
if val is None:
# skip writing out unset fields
continue
fid = field[0]
ftype = field[1]
fspec = field[3]
# get the writer method for this value
self.writeFieldBegin(fname, ftype, fid)
self.writeFieldByTType(ftype, val, fspec)
self.writeFieldEnd()
self.writeFieldStop()
self.writeStructEnd()
def writeFieldByTType(self, ttype, val, spec):
r_handler, w_handler, is_container = self._TTYPE_HANDLERS[ttype]
writer = getattr(self, w_handler)
if is_container:
writer(val, spec)
else:
writer(val)
class TProtocolFactory:
def getProtocol(self, trans):
pass
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.