repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
colelloa/python | duplicate_image.py | 1 | 1959 | from PIL import Image, ImageDraw
#image naming convention xcoord_ycoord_alpha.png
#where coords are those of left corner of box to fill
class ImageDuplicator:
def __init__(self, orig_path, diameter, root_dir):
self.BALL_DIAMETER = diameter
self.IMAGE_ROOT_DIRECTORY = root_dir
# open grass image
self.grass_path = orig_path
img = Image.open(orig_path) #too big to make a permanent instance variable
dim = img.size
self.width = dim[0]
self.length = dim[1] #need the width and length when creating loop in higher abstraction level
del img # garbage collection
def draw_circle(self, left_corner):
#create draw object from original path
img = Image.open(self.grass_path)
draw_obj = ImageDraw.Draw(img)
#prepare to draw
x0 = left_corner[0]
y0 = left_corner[1]
x1 = x0 + self.BALL_DIAMETER #create box to draw in
y1 = y0 + self.BALL_DIAMETER
draw_obj.ellipse((x0, y0, x1, y1), fill = 'white', outline ='white')
#alpha is 1.00 for newly drawn circle, represented as 100
new_path = "{0}/{1}_{2}_100.png".format(self.IMAGE_ROOT_DIRECTORY, x0, y0)
img.save(new_path)
del draw_obj #garbage collection
del img
return new_path #to make it easier to blend
def blend_images(self, background_path, ball_path, left_corner, percentage):
#according to docs at http://effbot.org/imagingbook/image.htm: out = image1 * (1.0 - alpha) + image2 * alpha
alpha = percentage/100.0
background = Image.open(background_path)
ball = Image.open(ball_path)
final_path = "{0}/{1}_{2}_{3}.png".format(self.IMAGE_ROOT_DIRECTORY, left_corner[0], left_corner[1], percentage)
out = Image.blend(background, ball, alpha)
out.save(final_path)
del background
del ball
del out #collect garbage
| mit |
fpgaminer/listen-to-bitcoin-mining | pyglet/gl/wglext_nv.py | 9 | 71630 | # ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Wrapper for http://developer.download.nvidia.com/opengl/includes/wglext.h
Generated by tools/gengl.py.
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: gengl.py 601 2007-02-04 05:36:59Z Alex.Holkner $'
from ctypes import *
from pyglet.gl.lib import link_WGL as _link_function
from pyglet.gl.lib import c_ptrdiff_t, c_void
# BEGIN GENERATED CONTENT (do not edit below this line)
# This content is generated by tools/gengl.py.
# Wrapper for http://developer.download.nvidia.com/opengl/includes/wglext.h
# H (C:\cygwin\home\Alex\pyglet\tools\wgl.h:7)
# H (C:\cygwin\home\Alex\pyglet\tools\wgl.h:7)
WIN32_LEAN_AND_MEAN = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:40
GLAPI = 0 # http://developer.download.nvidia.com/opengl/includes/wglext.h:51
WGL_WGLEXT_VERSION = 6 # http://developer.download.nvidia.com/opengl/includes/wglext.h:60
# ARB_buffer_region (http://developer.download.nvidia.com/opengl/includes/wglext.h:62)
WGL_FRONT_COLOR_BUFFER_BIT_ARB = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:63
WGL_BACK_COLOR_BUFFER_BIT_ARB = 2 # http://developer.download.nvidia.com/opengl/includes/wglext.h:64
WGL_DEPTH_BUFFER_BIT_ARB = 4 # http://developer.download.nvidia.com/opengl/includes/wglext.h:65
WGL_STENCIL_BUFFER_BIT_ARB = 8 # http://developer.download.nvidia.com/opengl/includes/wglext.h:66
# ARB_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:69)
WGL_SAMPLE_BUFFERS_ARB = 8257 # http://developer.download.nvidia.com/opengl/includes/wglext.h:70
WGL_SAMPLES_ARB = 8258 # http://developer.download.nvidia.com/opengl/includes/wglext.h:71
# ARB_extensions_string (http://developer.download.nvidia.com/opengl/includes/wglext.h:74)
# ARB_pixel_format (http://developer.download.nvidia.com/opengl/includes/wglext.h:77)
WGL_NUMBER_PIXEL_FORMATS_ARB = 8192 # http://developer.download.nvidia.com/opengl/includes/wglext.h:78
WGL_DRAW_TO_WINDOW_ARB = 8193 # http://developer.download.nvidia.com/opengl/includes/wglext.h:79
WGL_DRAW_TO_BITMAP_ARB = 8194 # http://developer.download.nvidia.com/opengl/includes/wglext.h:80
WGL_ACCELERATION_ARB = 8195 # http://developer.download.nvidia.com/opengl/includes/wglext.h:81
WGL_NEED_PALETTE_ARB = 8196 # http://developer.download.nvidia.com/opengl/includes/wglext.h:82
WGL_NEED_SYSTEM_PALETTE_ARB = 8197 # http://developer.download.nvidia.com/opengl/includes/wglext.h:83
WGL_SWAP_LAYER_BUFFERS_ARB = 8198 # http://developer.download.nvidia.com/opengl/includes/wglext.h:84
WGL_SWAP_METHOD_ARB = 8199 # http://developer.download.nvidia.com/opengl/includes/wglext.h:85
WGL_NUMBER_OVERLAYS_ARB = 8200 # http://developer.download.nvidia.com/opengl/includes/wglext.h:86
WGL_NUMBER_UNDERLAYS_ARB = 8201 # http://developer.download.nvidia.com/opengl/includes/wglext.h:87
WGL_TRANSPARENT_ARB = 8202 # http://developer.download.nvidia.com/opengl/includes/wglext.h:88
WGL_TRANSPARENT_RED_VALUE_ARB = 8247 # http://developer.download.nvidia.com/opengl/includes/wglext.h:89
WGL_TRANSPARENT_GREEN_VALUE_ARB = 8248 # http://developer.download.nvidia.com/opengl/includes/wglext.h:90
WGL_TRANSPARENT_BLUE_VALUE_ARB = 8249 # http://developer.download.nvidia.com/opengl/includes/wglext.h:91
WGL_TRANSPARENT_ALPHA_VALUE_ARB = 8250 # http://developer.download.nvidia.com/opengl/includes/wglext.h:92
WGL_TRANSPARENT_INDEX_VALUE_ARB = 8251 # http://developer.download.nvidia.com/opengl/includes/wglext.h:93
WGL_SHARE_DEPTH_ARB = 8204 # http://developer.download.nvidia.com/opengl/includes/wglext.h:94
WGL_SHARE_STENCIL_ARB = 8205 # http://developer.download.nvidia.com/opengl/includes/wglext.h:95
WGL_SHARE_ACCUM_ARB = 512 # http://developer.download.nvidia.com/opengl/includes/wglext.h:96
WGL_SUPPORT_GDI_ARB = 512 # http://developer.download.nvidia.com/opengl/includes/wglext.h:97
WGL_SUPPORT_OPENGL_ARB = 8208 # http://developer.download.nvidia.com/opengl/includes/wglext.h:98
WGL_DOUBLE_BUFFER_ARB = 8209 # http://developer.download.nvidia.com/opengl/includes/wglext.h:99
WGL_STEREO_ARB = 8210 # http://developer.download.nvidia.com/opengl/includes/wglext.h:100
WGL_PIXEL_TYPE_ARB = 8211 # http://developer.download.nvidia.com/opengl/includes/wglext.h:101
WGL_COLOR_BITS_ARB = 8212 # http://developer.download.nvidia.com/opengl/includes/wglext.h:102
WGL_RED_BITS_ARB = 8213 # http://developer.download.nvidia.com/opengl/includes/wglext.h:103
WGL_RED_SHIFT_ARB = 8214 # http://developer.download.nvidia.com/opengl/includes/wglext.h:104
WGL_GREEN_BITS_ARB = 8215 # http://developer.download.nvidia.com/opengl/includes/wglext.h:105
WGL_GREEN_SHIFT_ARB = 8216 # http://developer.download.nvidia.com/opengl/includes/wglext.h:106
WGL_BLUE_BITS_ARB = 8217 # http://developer.download.nvidia.com/opengl/includes/wglext.h:107
WGL_BLUE_SHIFT_ARB = 8218 # http://developer.download.nvidia.com/opengl/includes/wglext.h:108
WGL_ALPHA_BITS_ARB = 8219 # http://developer.download.nvidia.com/opengl/includes/wglext.h:109
WGL_ALPHA_SHIFT_ARB = 8220 # http://developer.download.nvidia.com/opengl/includes/wglext.h:110
WGL_ACCUM_BITS_ARB = 8221 # http://developer.download.nvidia.com/opengl/includes/wglext.h:111
WGL_ACCUM_RED_BITS_ARB = 513 # http://developer.download.nvidia.com/opengl/includes/wglext.h:112
WGL_ACCUM_GREEN_BITS_ARB = 513 # http://developer.download.nvidia.com/opengl/includes/wglext.h:113
WGL_ACCUM_BLUE_BITS_ARB = 8224 # http://developer.download.nvidia.com/opengl/includes/wglext.h:114
WGL_ACCUM_ALPHA_BITS_ARB = 8225 # http://developer.download.nvidia.com/opengl/includes/wglext.h:115
WGL_DEPTH_BITS_ARB = 8226 # http://developer.download.nvidia.com/opengl/includes/wglext.h:116
WGL_STENCIL_BITS_ARB = 8227 # http://developer.download.nvidia.com/opengl/includes/wglext.h:117
WGL_AUX_BUFFERS_ARB = 8228 # http://developer.download.nvidia.com/opengl/includes/wglext.h:118
WGL_NO_ACCELERATION_ARB = 8229 # http://developer.download.nvidia.com/opengl/includes/wglext.h:119
WGL_GENERIC_ACCELERATION_ARB = 8230 # http://developer.download.nvidia.com/opengl/includes/wglext.h:120
WGL_FULL_ACCELERATION_ARB = 8231 # http://developer.download.nvidia.com/opengl/includes/wglext.h:121
WGL_SWAP_EXCHANGE_ARB = 8232 # http://developer.download.nvidia.com/opengl/includes/wglext.h:122
WGL_SWAP_COPY_ARB = 8233 # http://developer.download.nvidia.com/opengl/includes/wglext.h:123
WGL_SWAP_UNDEFINED_ARB = 8234 # http://developer.download.nvidia.com/opengl/includes/wglext.h:124
WGL_TYPE_RGBA_ARB = 8235 # http://developer.download.nvidia.com/opengl/includes/wglext.h:125
WGL_TYPE_COLORINDEX_ARB = 8236 # http://developer.download.nvidia.com/opengl/includes/wglext.h:126
# ARB_make_current_read (http://developer.download.nvidia.com/opengl/includes/wglext.h:129)
ERROR_INVALID_PIXEL_TYPE_ARB = 8259 # http://developer.download.nvidia.com/opengl/includes/wglext.h:130
ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB = 8276 # http://developer.download.nvidia.com/opengl/includes/wglext.h:131
# ARB_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:134)
WGL_DRAW_TO_PBUFFER_ARB = 8237 # http://developer.download.nvidia.com/opengl/includes/wglext.h:135
WGL_MAX_PBUFFER_PIXELS_ARB = 514 # http://developer.download.nvidia.com/opengl/includes/wglext.h:136
WGL_MAX_PBUFFER_WIDTH_ARB = 514 # http://developer.download.nvidia.com/opengl/includes/wglext.h:137
WGL_MAX_PBUFFER_HEIGHT_ARB = 8240 # http://developer.download.nvidia.com/opengl/includes/wglext.h:138
WGL_PBUFFER_LARGEST_ARB = 8243 # http://developer.download.nvidia.com/opengl/includes/wglext.h:139
WGL_PBUFFER_WIDTH_ARB = 8244 # http://developer.download.nvidia.com/opengl/includes/wglext.h:140
WGL_PBUFFER_HEIGHT_ARB = 8245 # http://developer.download.nvidia.com/opengl/includes/wglext.h:141
WGL_PBUFFER_LOST_ARB = 8246 # http://developer.download.nvidia.com/opengl/includes/wglext.h:142
# ARB_render_texture (http://developer.download.nvidia.com/opengl/includes/wglext.h:145)
WGL_BIND_TO_TEXTURE_RGB_ARB = 8304 # http://developer.download.nvidia.com/opengl/includes/wglext.h:146
WGL_BIND_TO_TEXTURE_RGBA_ARB = 8305 # http://developer.download.nvidia.com/opengl/includes/wglext.h:147
WGL_TEXTURE_FORMAT_ARB = 8306 # http://developer.download.nvidia.com/opengl/includes/wglext.h:148
WGL_TEXTURE_TARGET_ARB = 8307 # http://developer.download.nvidia.com/opengl/includes/wglext.h:149
WGL_MIPMAP_TEXTURE_ARB = 8308 # http://developer.download.nvidia.com/opengl/includes/wglext.h:150
WGL_TEXTURE_RGB_ARB = 8309 # http://developer.download.nvidia.com/opengl/includes/wglext.h:151
WGL_TEXTURE_RGBA_ARB = 8310 # http://developer.download.nvidia.com/opengl/includes/wglext.h:152
WGL_NO_TEXTURE_ARB = 8311 # http://developer.download.nvidia.com/opengl/includes/wglext.h:153
WGL_TEXTURE_CUBE_MAP_ARB = 8312 # http://developer.download.nvidia.com/opengl/includes/wglext.h:154
WGL_TEXTURE_1D_ARB = 8313 # http://developer.download.nvidia.com/opengl/includes/wglext.h:155
WGL_TEXTURE_2D_ARB = 8314 # http://developer.download.nvidia.com/opengl/includes/wglext.h:156
WGL_MIPMAP_LEVEL_ARB = 8315 # http://developer.download.nvidia.com/opengl/includes/wglext.h:157
WGL_CUBE_MAP_FACE_ARB = 8316 # http://developer.download.nvidia.com/opengl/includes/wglext.h:158
WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB = 8317 # http://developer.download.nvidia.com/opengl/includes/wglext.h:159
WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB = 519 # http://developer.download.nvidia.com/opengl/includes/wglext.h:160
WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB = 519 # http://developer.download.nvidia.com/opengl/includes/wglext.h:161
WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB = 8320 # http://developer.download.nvidia.com/opengl/includes/wglext.h:162
WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB = 8321 # http://developer.download.nvidia.com/opengl/includes/wglext.h:163
WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB = 8322 # http://developer.download.nvidia.com/opengl/includes/wglext.h:164
WGL_FRONT_LEFT_ARB = 8323 # http://developer.download.nvidia.com/opengl/includes/wglext.h:165
WGL_FRONT_RIGHT_ARB = 8324 # http://developer.download.nvidia.com/opengl/includes/wglext.h:166
WGL_BACK_LEFT_ARB = 8325 # http://developer.download.nvidia.com/opengl/includes/wglext.h:167
WGL_BACK_RIGHT_ARB = 8326 # http://developer.download.nvidia.com/opengl/includes/wglext.h:168
WGL_AUX0_ARB = 8327 # http://developer.download.nvidia.com/opengl/includes/wglext.h:169
WGL_AUX1_ARB = 8328 # http://developer.download.nvidia.com/opengl/includes/wglext.h:170
WGL_AUX2_ARB = 8329 # http://developer.download.nvidia.com/opengl/includes/wglext.h:171
WGL_AUX3_ARB = 8330 # http://developer.download.nvidia.com/opengl/includes/wglext.h:172
WGL_AUX4_ARB = 8331 # http://developer.download.nvidia.com/opengl/includes/wglext.h:173
WGL_AUX5_ARB = 8332 # http://developer.download.nvidia.com/opengl/includes/wglext.h:174
WGL_AUX6_ARB = 8333 # http://developer.download.nvidia.com/opengl/includes/wglext.h:175
WGL_AUX7_ARB = 520 # http://developer.download.nvidia.com/opengl/includes/wglext.h:176
WGL_AUX8_ARB = 520 # http://developer.download.nvidia.com/opengl/includes/wglext.h:177
WGL_AUX9_ARB = 8336 # http://developer.download.nvidia.com/opengl/includes/wglext.h:178
# ARB_pixel_format_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:181)
WGL_TYPE_RGBA_FLOAT_ARB = 8608 # http://developer.download.nvidia.com/opengl/includes/wglext.h:182
# EXT_make_current_read (http://developer.download.nvidia.com/opengl/includes/wglext.h:185)
ERROR_INVALID_PIXEL_TYPE_EXT = 8259 # http://developer.download.nvidia.com/opengl/includes/wglext.h:186
# EXT_pixel_format (http://developer.download.nvidia.com/opengl/includes/wglext.h:189)
WGL_NUMBER_PIXEL_FORMATS_EXT = 8192 # http://developer.download.nvidia.com/opengl/includes/wglext.h:190
WGL_DRAW_TO_WINDOW_EXT = 8193 # http://developer.download.nvidia.com/opengl/includes/wglext.h:191
WGL_DRAW_TO_BITMAP_EXT = 8194 # http://developer.download.nvidia.com/opengl/includes/wglext.h:192
WGL_ACCELERATION_EXT = 8195 # http://developer.download.nvidia.com/opengl/includes/wglext.h:193
WGL_NEED_PALETTE_EXT = 8196 # http://developer.download.nvidia.com/opengl/includes/wglext.h:194
WGL_NEED_SYSTEM_PALETTE_EXT = 8197 # http://developer.download.nvidia.com/opengl/includes/wglext.h:195
WGL_SWAP_LAYER_BUFFERS_EXT = 8198 # http://developer.download.nvidia.com/opengl/includes/wglext.h:196
WGL_SWAP_METHOD_EXT = 8199 # http://developer.download.nvidia.com/opengl/includes/wglext.h:197
WGL_NUMBER_OVERLAYS_EXT = 8200 # http://developer.download.nvidia.com/opengl/includes/wglext.h:198
WGL_NUMBER_UNDERLAYS_EXT = 8201 # http://developer.download.nvidia.com/opengl/includes/wglext.h:199
WGL_TRANSPARENT_EXT = 8202 # http://developer.download.nvidia.com/opengl/includes/wglext.h:200
WGL_TRANSPARENT_VALUE_EXT = 8203 # http://developer.download.nvidia.com/opengl/includes/wglext.h:201
WGL_SHARE_DEPTH_EXT = 8204 # http://developer.download.nvidia.com/opengl/includes/wglext.h:202
WGL_SHARE_STENCIL_EXT = 8205 # http://developer.download.nvidia.com/opengl/includes/wglext.h:203
WGL_SHARE_ACCUM_EXT = 512 # http://developer.download.nvidia.com/opengl/includes/wglext.h:204
WGL_SUPPORT_GDI_EXT = 512 # http://developer.download.nvidia.com/opengl/includes/wglext.h:205
WGL_SUPPORT_OPENGL_EXT = 8208 # http://developer.download.nvidia.com/opengl/includes/wglext.h:206
WGL_DOUBLE_BUFFER_EXT = 8209 # http://developer.download.nvidia.com/opengl/includes/wglext.h:207
WGL_STEREO_EXT = 8210 # http://developer.download.nvidia.com/opengl/includes/wglext.h:208
WGL_PIXEL_TYPE_EXT = 8211 # http://developer.download.nvidia.com/opengl/includes/wglext.h:209
WGL_COLOR_BITS_EXT = 8212 # http://developer.download.nvidia.com/opengl/includes/wglext.h:210
WGL_RED_BITS_EXT = 8213 # http://developer.download.nvidia.com/opengl/includes/wglext.h:211
WGL_RED_SHIFT_EXT = 8214 # http://developer.download.nvidia.com/opengl/includes/wglext.h:212
WGL_GREEN_BITS_EXT = 8215 # http://developer.download.nvidia.com/opengl/includes/wglext.h:213
WGL_GREEN_SHIFT_EXT = 8216 # http://developer.download.nvidia.com/opengl/includes/wglext.h:214
WGL_BLUE_BITS_EXT = 8217 # http://developer.download.nvidia.com/opengl/includes/wglext.h:215
WGL_BLUE_SHIFT_EXT = 8218 # http://developer.download.nvidia.com/opengl/includes/wglext.h:216
WGL_ALPHA_BITS_EXT = 8219 # http://developer.download.nvidia.com/opengl/includes/wglext.h:217
WGL_ALPHA_SHIFT_EXT = 8220 # http://developer.download.nvidia.com/opengl/includes/wglext.h:218
WGL_ACCUM_BITS_EXT = 8221 # http://developer.download.nvidia.com/opengl/includes/wglext.h:219
WGL_ACCUM_RED_BITS_EXT = 513 # http://developer.download.nvidia.com/opengl/includes/wglext.h:220
WGL_ACCUM_GREEN_BITS_EXT = 513 # http://developer.download.nvidia.com/opengl/includes/wglext.h:221
WGL_ACCUM_BLUE_BITS_EXT = 8224 # http://developer.download.nvidia.com/opengl/includes/wglext.h:222
WGL_ACCUM_ALPHA_BITS_EXT = 8225 # http://developer.download.nvidia.com/opengl/includes/wglext.h:223
WGL_DEPTH_BITS_EXT = 8226 # http://developer.download.nvidia.com/opengl/includes/wglext.h:224
WGL_STENCIL_BITS_EXT = 8227 # http://developer.download.nvidia.com/opengl/includes/wglext.h:225
WGL_AUX_BUFFERS_EXT = 8228 # http://developer.download.nvidia.com/opengl/includes/wglext.h:226
WGL_NO_ACCELERATION_EXT = 8229 # http://developer.download.nvidia.com/opengl/includes/wglext.h:227
WGL_GENERIC_ACCELERATION_EXT = 8230 # http://developer.download.nvidia.com/opengl/includes/wglext.h:228
WGL_FULL_ACCELERATION_EXT = 8231 # http://developer.download.nvidia.com/opengl/includes/wglext.h:229
WGL_SWAP_EXCHANGE_EXT = 8232 # http://developer.download.nvidia.com/opengl/includes/wglext.h:230
WGL_SWAP_COPY_EXT = 8233 # http://developer.download.nvidia.com/opengl/includes/wglext.h:231
WGL_SWAP_UNDEFINED_EXT = 8234 # http://developer.download.nvidia.com/opengl/includes/wglext.h:232
WGL_TYPE_RGBA_EXT = 8235 # http://developer.download.nvidia.com/opengl/includes/wglext.h:233
WGL_TYPE_COLORINDEX_EXT = 8236 # http://developer.download.nvidia.com/opengl/includes/wglext.h:234
# EXT_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:237)
WGL_DRAW_TO_PBUFFER_EXT = 8237 # http://developer.download.nvidia.com/opengl/includes/wglext.h:238
WGL_MAX_PBUFFER_PIXELS_EXT = 514 # http://developer.download.nvidia.com/opengl/includes/wglext.h:239
WGL_MAX_PBUFFER_WIDTH_EXT = 514 # http://developer.download.nvidia.com/opengl/includes/wglext.h:240
WGL_MAX_PBUFFER_HEIGHT_EXT = 8240 # http://developer.download.nvidia.com/opengl/includes/wglext.h:241
WGL_OPTIMAL_PBUFFER_WIDTH_EXT = 8241 # http://developer.download.nvidia.com/opengl/includes/wglext.h:242
WGL_OPTIMAL_PBUFFER_HEIGHT_EXT = 8242 # http://developer.download.nvidia.com/opengl/includes/wglext.h:243
WGL_PBUFFER_LARGEST_EXT = 8243 # http://developer.download.nvidia.com/opengl/includes/wglext.h:244
WGL_PBUFFER_WIDTH_EXT = 8244 # http://developer.download.nvidia.com/opengl/includes/wglext.h:245
WGL_PBUFFER_HEIGHT_EXT = 8245 # http://developer.download.nvidia.com/opengl/includes/wglext.h:246
# EXT_depth_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:249)
WGL_DEPTH_FLOAT_EXT = 8256 # http://developer.download.nvidia.com/opengl/includes/wglext.h:250
# 3DFX_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:253)
WGL_SAMPLE_BUFFERS_3DFX = 8288 # http://developer.download.nvidia.com/opengl/includes/wglext.h:254
WGL_SAMPLES_3DFX = 8289 # http://developer.download.nvidia.com/opengl/includes/wglext.h:255
# EXT_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:258)
WGL_SAMPLE_BUFFERS_EXT = 8257 # http://developer.download.nvidia.com/opengl/includes/wglext.h:259
WGL_SAMPLES_EXT = 8258 # http://developer.download.nvidia.com/opengl/includes/wglext.h:260
# I3D_digital_video_control (http://developer.download.nvidia.com/opengl/includes/wglext.h:263)
WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D = 8272 # http://developer.download.nvidia.com/opengl/includes/wglext.h:264
WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D = 8273 # http://developer.download.nvidia.com/opengl/includes/wglext.h:265
WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D = 8274 # http://developer.download.nvidia.com/opengl/includes/wglext.h:266
WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D = 8275 # http://developer.download.nvidia.com/opengl/includes/wglext.h:267
# I3D_gamma (http://developer.download.nvidia.com/opengl/includes/wglext.h:270)
WGL_GAMMA_TABLE_SIZE_I3D = 516 # http://developer.download.nvidia.com/opengl/includes/wglext.h:271
WGL_GAMMA_EXCLUDE_DESKTOP_I3D = 516 # http://developer.download.nvidia.com/opengl/includes/wglext.h:272
# I3D_genlock (http://developer.download.nvidia.com/opengl/includes/wglext.h:275)
WGL_GENLOCK_SOURCE_MULTIVIEW_I3D = 8260 # http://developer.download.nvidia.com/opengl/includes/wglext.h:276
WGL_GENLOCK_SOURCE_EXTENAL_SYNC_I3D = 8261 # http://developer.download.nvidia.com/opengl/includes/wglext.h:277
WGL_GENLOCK_SOURCE_EXTENAL_FIELD_I3D = 8262 # http://developer.download.nvidia.com/opengl/includes/wglext.h:278
WGL_GENLOCK_SOURCE_EXTENAL_TTL_I3D = 8263 # http://developer.download.nvidia.com/opengl/includes/wglext.h:279
WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D = 8264 # http://developer.download.nvidia.com/opengl/includes/wglext.h:280
WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D = 8265 # http://developer.download.nvidia.com/opengl/includes/wglext.h:281
WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D = 8266 # http://developer.download.nvidia.com/opengl/includes/wglext.h:282
WGL_GENLOCK_SOURCE_EDGE_RISING_I3D = 8267 # http://developer.download.nvidia.com/opengl/includes/wglext.h:283
WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D = 8268 # http://developer.download.nvidia.com/opengl/includes/wglext.h:284
# I3D_image_buffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:287)
WGL_IMAGE_BUFFER_MIN_ACCESS_I3D = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:288
WGL_IMAGE_BUFFER_LOCK_I3D = 2 # http://developer.download.nvidia.com/opengl/includes/wglext.h:289
# I3D_swap_frame_lock (http://developer.download.nvidia.com/opengl/includes/wglext.h:292)
# NV_render_depth_texture (http://developer.download.nvidia.com/opengl/includes/wglext.h:295)
WGL_BIND_TO_TEXTURE_DEPTH_NV = 8355 # http://developer.download.nvidia.com/opengl/includes/wglext.h:296
WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV = 8356 # http://developer.download.nvidia.com/opengl/includes/wglext.h:297
WGL_DEPTH_TEXTURE_FORMAT_NV = 8357 # http://developer.download.nvidia.com/opengl/includes/wglext.h:298
WGL_TEXTURE_DEPTH_COMPONENT_NV = 8358 # http://developer.download.nvidia.com/opengl/includes/wglext.h:299
WGL_DEPTH_COMPONENT_NV = 8359 # http://developer.download.nvidia.com/opengl/includes/wglext.h:300
# NV_render_texture_rectangle (http://developer.download.nvidia.com/opengl/includes/wglext.h:303)
WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV = 8352 # http://developer.download.nvidia.com/opengl/includes/wglext.h:304
WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV = 8353 # http://developer.download.nvidia.com/opengl/includes/wglext.h:305
WGL_TEXTURE_RECTANGLE_NV = 8354 # http://developer.download.nvidia.com/opengl/includes/wglext.h:306
# ATI_pixel_format_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:309)
WGL_TYPE_RGBA_FLOAT_ATI = 8608 # http://developer.download.nvidia.com/opengl/includes/wglext.h:310
WGL_RGBA_FLOAT_MODE_ATI = 34848 # http://developer.download.nvidia.com/opengl/includes/wglext.h:311
WGL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI = 34869 # http://developer.download.nvidia.com/opengl/includes/wglext.h:312
# NV_float_buffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:315)
WGL_FLOAT_COMPONENTS_NV = 8368 # http://developer.download.nvidia.com/opengl/includes/wglext.h:316
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV = 8369 # http://developer.download.nvidia.com/opengl/includes/wglext.h:317
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV = 8370 # http://developer.download.nvidia.com/opengl/includes/wglext.h:318
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV = 8371 # http://developer.download.nvidia.com/opengl/includes/wglext.h:319
WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV = 8372 # http://developer.download.nvidia.com/opengl/includes/wglext.h:320
WGL_TEXTURE_FLOAT_R_NV = 8373 # http://developer.download.nvidia.com/opengl/includes/wglext.h:321
WGL_TEXTURE_FLOAT_RG_NV = 8374 # http://developer.download.nvidia.com/opengl/includes/wglext.h:322
WGL_TEXTURE_FLOAT_RGB_NV = 8375 # http://developer.download.nvidia.com/opengl/includes/wglext.h:323
WGL_TEXTURE_FLOAT_RGBA_NV = 8376 # http://developer.download.nvidia.com/opengl/includes/wglext.h:324
# NV_swap_group (http://developer.download.nvidia.com/opengl/includes/wglext.h:327)
# NV_gpu_affinity (http://developer.download.nvidia.com/opengl/includes/wglext.h:330)
WGL_ERROR_INCOMPATIBLE_AFFINITY_MASKS_NV = 8400 # http://developer.download.nvidia.com/opengl/includes/wglext.h:331
WGL_ERROR_MISSING_AFFINITY_MASK_NV = 8401 # http://developer.download.nvidia.com/opengl/includes/wglext.h:332
# ARB_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:338)
HANDLE = POINTER(None) # C:\cygwin\home\Alex\pyglet\tools\wgl.h:58
HPBUFFERARB = HANDLE # http://developer.download.nvidia.com/opengl/includes/wglext.h:339
# EXT_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:341)
HPBUFFEREXT = HANDLE # http://developer.download.nvidia.com/opengl/includes/wglext.h:342
# NV_gpu_affinity (http://developer.download.nvidia.com/opengl/includes/wglext.h:345)
HGPUNV = HANDLE # http://developer.download.nvidia.com/opengl/includes/wglext.h:346
class struct__GPU_DEVICE(Structure):
__slots__ = [
'cb',
'DeviceName',
'DeviceString',
'Flags',
'rcVirtualScreen',
]
DWORD = c_ulong # C:\cygwin\home\Alex\pyglet\tools\wgl.h:54
CHAR = c_char # C:\cygwin\home\Alex\pyglet\tools\wgl.h:47
class struct_tagRECT(Structure):
__slots__ = [
'left',
'top',
'right',
'bottom',
]
LONG = c_long # C:\cygwin\home\Alex\pyglet\tools\wgl.h:53
struct_tagRECT._fields_ = [
('left', LONG),
('top', LONG),
('right', LONG),
('bottom', LONG),
]
RECT = struct_tagRECT # C:\cygwin\home\Alex\pyglet\tools\wgl.h:200
struct__GPU_DEVICE._fields_ = [
('cb', DWORD),
('DeviceName', CHAR * 32),
('DeviceString', CHAR * 128),
('Flags', DWORD),
('rcVirtualScreen', RECT),
]
GPU_DEVICE = struct__GPU_DEVICE # http://developer.download.nvidia.com/opengl/includes/wglext.h:353
PGPU_DEVICE = POINTER(struct__GPU_DEVICE) # http://developer.download.nvidia.com/opengl/includes/wglext.h:353
# ARB_buffer_region (http://developer.download.nvidia.com/opengl/includes/wglext.h:356)
WGL_ARB_buffer_region = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:357
HDC = HANDLE # C:\cygwin\home\Alex\pyglet\tools\wgl.h:61
UINT = c_uint # C:\cygwin\home\Alex\pyglet\tools\wgl.h:50
# http://developer.download.nvidia.com/opengl/includes/wglext.h:359
wglCreateBufferRegionARB = _link_function('wglCreateBufferRegionARB', HANDLE, [HDC, c_int, UINT], 'ARB_buffer_region')
VOID = None # C:\cygwin\home\Alex\pyglet\tools\wgl.h:45
# http://developer.download.nvidia.com/opengl/includes/wglext.h:360
wglDeleteBufferRegionARB = _link_function('wglDeleteBufferRegionARB', VOID, [HANDLE], 'ARB_buffer_region')
BOOL = c_long # C:\cygwin\home\Alex\pyglet\tools\wgl.h:52
# http://developer.download.nvidia.com/opengl/includes/wglext.h:361
wglSaveBufferRegionARB = _link_function('wglSaveBufferRegionARB', BOOL, [HANDLE, c_int, c_int, c_int, c_int], 'ARB_buffer_region')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:362
wglRestoreBufferRegionARB = _link_function('wglRestoreBufferRegionARB', BOOL, [HANDLE, c_int, c_int, c_int, c_int, c_int, c_int], 'ARB_buffer_region')
PFNWGLCREATEBUFFERREGIONARBPROC = CFUNCTYPE(HANDLE, HDC, c_int, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:364
PFNWGLDELETEBUFFERREGIONARBPROC = CFUNCTYPE(VOID, HANDLE) # http://developer.download.nvidia.com/opengl/includes/wglext.h:365
PFNWGLSAVEBUFFERREGIONARBPROC = CFUNCTYPE(BOOL, HANDLE, c_int, c_int, c_int, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:366
PFNWGLRESTOREBUFFERREGIONARBPROC = CFUNCTYPE(BOOL, HANDLE, c_int, c_int, c_int, c_int, c_int, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:367
# ARB_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:370)
WGL_ARB_multisample = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:371
# ARB_extensions_string (http://developer.download.nvidia.com/opengl/includes/wglext.h:374)
WGL_ARB_extensions_string = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:375
# http://developer.download.nvidia.com/opengl/includes/wglext.h:377
wglGetExtensionsStringARB = _link_function('wglGetExtensionsStringARB', c_char_p, [HDC], 'ARB_extensions_string')
PFNWGLGETEXTENSIONSSTRINGARBPROC = CFUNCTYPE(c_char_p, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:379
# ARB_pixel_format (http://developer.download.nvidia.com/opengl/includes/wglext.h:382)
WGL_ARB_pixel_format = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:383
# http://developer.download.nvidia.com/opengl/includes/wglext.h:385
wglGetPixelFormatAttribivARB = _link_function('wglGetPixelFormatAttribivARB', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)], 'ARB_pixel_format')
FLOAT = c_float # C:\cygwin\home\Alex\pyglet\tools\wgl.h:55
# http://developer.download.nvidia.com/opengl/includes/wglext.h:386
wglGetPixelFormatAttribfvARB = _link_function('wglGetPixelFormatAttribfvARB', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)], 'ARB_pixel_format')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:387
wglChoosePixelFormatARB = _link_function('wglChoosePixelFormatARB', BOOL, [HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)], 'ARB_pixel_format')
PFNWGLGETPIXELFORMATATTRIBIVARBPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:389
PFNWGLGETPIXELFORMATATTRIBFVARBPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:390
PFNWGLCHOOSEPIXELFORMATARBPROC = CFUNCTYPE(BOOL, HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:391
# ARB_make_current_read (http://developer.download.nvidia.com/opengl/includes/wglext.h:394)
WGL_ARB_make_current_read = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:395
HGLRC = HANDLE # C:\cygwin\home\Alex\pyglet\tools\wgl.h:60
# http://developer.download.nvidia.com/opengl/includes/wglext.h:397
wglMakeContextCurrentARB = _link_function('wglMakeContextCurrentARB', BOOL, [HDC, HDC, HGLRC], 'ARB_make_current_read')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:398
wglGetCurrentReadDCARB = _link_function('wglGetCurrentReadDCARB', HDC, [], 'ARB_make_current_read')
PFNWGLMAKECONTEXTCURRENTARBPROC = CFUNCTYPE(BOOL, HDC, HDC, HGLRC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:400
PFNWGLGETCURRENTREADDCARBPROC = CFUNCTYPE(HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:401
# ARB_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:404)
WGL_ARB_pbuffer = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:405
# http://developer.download.nvidia.com/opengl/includes/wglext.h:407
wglCreatePbufferARB = _link_function('wglCreatePbufferARB', HPBUFFERARB, [HDC, c_int, c_int, c_int, POINTER(c_int)], 'ARB_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:408
wglGetPbufferDCARB = _link_function('wglGetPbufferDCARB', HDC, [HPBUFFERARB], 'ARB_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:409
wglReleasePbufferDCARB = _link_function('wglReleasePbufferDCARB', c_int, [HPBUFFERARB, HDC], 'ARB_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:410
wglDestroyPbufferARB = _link_function('wglDestroyPbufferARB', BOOL, [HPBUFFERARB], 'ARB_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:411
wglQueryPbufferARB = _link_function('wglQueryPbufferARB', BOOL, [HPBUFFERARB, c_int, POINTER(c_int)], 'ARB_pbuffer')
PFNWGLCREATEPBUFFERARBPROC = CFUNCTYPE(HPBUFFERARB, HDC, c_int, c_int, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:413
PFNWGLGETPBUFFERDCARBPROC = CFUNCTYPE(HDC, HPBUFFERARB) # http://developer.download.nvidia.com/opengl/includes/wglext.h:414
PFNWGLRELEASEPBUFFERDCARBPROC = CFUNCTYPE(c_int, HPBUFFERARB, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:415
PFNWGLDESTROYPBUFFERARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB) # http://developer.download.nvidia.com/opengl/includes/wglext.h:416
PFNWGLQUERYPBUFFERARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:417
# ARB_render_texture (http://developer.download.nvidia.com/opengl/includes/wglext.h:420)
WGL_ARB_render_texture = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:421
# http://developer.download.nvidia.com/opengl/includes/wglext.h:423
wglBindTexImageARB = _link_function('wglBindTexImageARB', BOOL, [HPBUFFERARB, c_int], 'ARB_render_texture')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:424
wglReleaseTexImageARB = _link_function('wglReleaseTexImageARB', BOOL, [HPBUFFERARB, c_int], 'ARB_render_texture')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:425
wglSetPbufferAttribARB = _link_function('wglSetPbufferAttribARB', BOOL, [HPBUFFERARB, POINTER(c_int)], 'ARB_render_texture')
PFNWGLBINDTEXIMAGEARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:427
PFNWGLRELEASETEXIMAGEARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:428
PFNWGLSETPBUFFERATTRIBARBPROC = CFUNCTYPE(BOOL, HPBUFFERARB, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:429
# ARB_pixel_format_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:432)
WGL_ARB_pixel_format_float = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:433
# EXT_display_color_table (http://developer.download.nvidia.com/opengl/includes/wglext.h:436)
WGL_EXT_display_color_table = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:437
GLboolean = c_ubyte # C:\cygwin\home\Alex\pyglet\tools\wgl.h:18
GLushort = c_ushort # C:\cygwin\home\Alex\pyglet\tools\wgl.h:25
# http://developer.download.nvidia.com/opengl/includes/wglext.h:439
wglCreateDisplayColorTableEXT = _link_function('wglCreateDisplayColorTableEXT', GLboolean, [GLushort], 'EXT_display_color_table')
GLuint = c_uint # C:\cygwin\home\Alex\pyglet\tools\wgl.h:26
# http://developer.download.nvidia.com/opengl/includes/wglext.h:440
wglLoadDisplayColorTableEXT = _link_function('wglLoadDisplayColorTableEXT', GLboolean, [POINTER(GLushort), GLuint], 'EXT_display_color_table')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:441
wglBindDisplayColorTableEXT = _link_function('wglBindDisplayColorTableEXT', GLboolean, [GLushort], 'EXT_display_color_table')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:442
wglDestroyDisplayColorTableEXT = _link_function('wglDestroyDisplayColorTableEXT', VOID, [GLushort], 'EXT_display_color_table')
PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, GLushort) # http://developer.download.nvidia.com/opengl/includes/wglext.h:444
PFNWGLLOADDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, POINTER(GLushort), GLuint) # http://developer.download.nvidia.com/opengl/includes/wglext.h:445
PFNWGLBINDDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(GLboolean, GLushort) # http://developer.download.nvidia.com/opengl/includes/wglext.h:446
PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC = CFUNCTYPE(VOID, GLushort) # http://developer.download.nvidia.com/opengl/includes/wglext.h:447
# EXT_extensions_string (http://developer.download.nvidia.com/opengl/includes/wglext.h:450)
WGL_EXT_extensions_string = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:451
# http://developer.download.nvidia.com/opengl/includes/wglext.h:453
wglGetExtensionsStringEXT = _link_function('wglGetExtensionsStringEXT', c_char_p, [], 'EXT_extensions_string')
PFNWGLGETEXTENSIONSSTRINGEXTPROC = CFUNCTYPE(c_char_p) # http://developer.download.nvidia.com/opengl/includes/wglext.h:455
# EXT_make_current_read (http://developer.download.nvidia.com/opengl/includes/wglext.h:458)
WGL_EXT_make_current_read = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:459
# http://developer.download.nvidia.com/opengl/includes/wglext.h:461
wglMakeContextCurrentEXT = _link_function('wglMakeContextCurrentEXT', BOOL, [HDC, HDC, HGLRC], 'EXT_make_current_read')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:462
wglGetCurrentReadDCEXT = _link_function('wglGetCurrentReadDCEXT', HDC, [], 'EXT_make_current_read')
PFNWGLMAKECONTEXTCURRENTEXTPROC = CFUNCTYPE(BOOL, HDC, HDC, HGLRC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:464
PFNWGLGETCURRENTREADDCEXTPROC = CFUNCTYPE(HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:465
# EXT_pbuffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:468)
WGL_EXT_pbuffer = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:469
# http://developer.download.nvidia.com/opengl/includes/wglext.h:471
wglCreatePbufferEXT = _link_function('wglCreatePbufferEXT', HPBUFFEREXT, [HDC, c_int, c_int, c_int, POINTER(c_int)], 'EXT_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:472
wglGetPbufferDCEXT = _link_function('wglGetPbufferDCEXT', HDC, [HPBUFFEREXT], 'EXT_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:473
wglReleasePbufferDCEXT = _link_function('wglReleasePbufferDCEXT', c_int, [HPBUFFEREXT, HDC], 'EXT_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:474
wglDestroyPbufferEXT = _link_function('wglDestroyPbufferEXT', BOOL, [HPBUFFEREXT], 'EXT_pbuffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:475
wglQueryPbufferEXT = _link_function('wglQueryPbufferEXT', BOOL, [HPBUFFEREXT, c_int, POINTER(c_int)], 'EXT_pbuffer')
PFNWGLCREATEPBUFFEREXTPROC = CFUNCTYPE(HPBUFFEREXT, HDC, c_int, c_int, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:477
PFNWGLGETPBUFFERDCEXTPROC = CFUNCTYPE(HDC, HPBUFFEREXT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:478
PFNWGLRELEASEPBUFFERDCEXTPROC = CFUNCTYPE(c_int, HPBUFFEREXT, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:479
PFNWGLDESTROYPBUFFEREXTPROC = CFUNCTYPE(BOOL, HPBUFFEREXT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:480
PFNWGLQUERYPBUFFEREXTPROC = CFUNCTYPE(BOOL, HPBUFFEREXT, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:481
# EXT_pixel_format (http://developer.download.nvidia.com/opengl/includes/wglext.h:484)
WGL_EXT_pixel_format = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:485
# http://developer.download.nvidia.com/opengl/includes/wglext.h:487
wglGetPixelFormatAttribivEXT = _link_function('wglGetPixelFormatAttribivEXT', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)], 'EXT_pixel_format')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:488
wglGetPixelFormatAttribfvEXT = _link_function('wglGetPixelFormatAttribfvEXT', BOOL, [HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)], 'EXT_pixel_format')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:489
wglChoosePixelFormatEXT = _link_function('wglChoosePixelFormatEXT', BOOL, [HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)], 'EXT_pixel_format')
PFNWGLGETPIXELFORMATATTRIBIVEXTPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:491
PFNWGLGETPIXELFORMATATTRIBFVEXTPROC = CFUNCTYPE(BOOL, HDC, c_int, c_int, UINT, POINTER(c_int), POINTER(FLOAT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:492
PFNWGLCHOOSEPIXELFORMATEXTPROC = CFUNCTYPE(BOOL, HDC, POINTER(c_int), POINTER(FLOAT), UINT, POINTER(c_int), POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:493
# EXT_swap_control (http://developer.download.nvidia.com/opengl/includes/wglext.h:496)
WGL_EXT_swap_control = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:497
# http://developer.download.nvidia.com/opengl/includes/wglext.h:499
wglSwapIntervalEXT = _link_function('wglSwapIntervalEXT', BOOL, [c_int], 'EXT_swap_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:500
wglGetSwapIntervalEXT = _link_function('wglGetSwapIntervalEXT', c_int, [], 'EXT_swap_control')
PFNWGLSWAPINTERVALEXTPROC = CFUNCTYPE(BOOL, c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:502
PFNWGLGETSWAPINTERVALEXTPROC = CFUNCTYPE(c_int) # http://developer.download.nvidia.com/opengl/includes/wglext.h:503
# EXT_depth_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:506)
WGL_EXT_depth_float = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:507
# NV_vertex_array_range (http://developer.download.nvidia.com/opengl/includes/wglext.h:510)
WGL_NV_vertex_array_range = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:511
GLsizei = c_int # C:\cygwin\home\Alex\pyglet\tools\wgl.h:23
GLfloat = c_float # C:\cygwin\home\Alex\pyglet\tools\wgl.h:27
# http://developer.download.nvidia.com/opengl/includes/wglext.h:513
wglAllocateMemoryNV = _link_function('wglAllocateMemoryNV', POINTER(c_void), [GLsizei, GLfloat, GLfloat, GLfloat], 'NV_vertex_array_range')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:514
wglFreeMemoryNV = _link_function('wglFreeMemoryNV', None, [POINTER(None)], 'NV_vertex_array_range')
PFNWGLALLOCATEMEMORYNVPROC = CFUNCTYPE(POINTER(c_void), GLsizei, GLfloat, GLfloat, GLfloat) # http://developer.download.nvidia.com/opengl/includes/wglext.h:516
PFNWGLFREEMEMORYNVPROC = CFUNCTYPE(None, POINTER(None)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:517
# 3DFX_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:520)
WGL_3DFX_multisample = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:521
# EXT_multisample (http://developer.download.nvidia.com/opengl/includes/wglext.h:524)
WGL_EXT_multisample = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:525
# OML_sync_control (http://developer.download.nvidia.com/opengl/includes/wglext.h:528)
WGL_OML_sync_control = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:529
INT64 = c_longlong # C:\cygwin\home\Alex\pyglet\tools\wgl.h:42
# http://developer.download.nvidia.com/opengl/includes/wglext.h:531
wglGetSyncValuesOML = _link_function('wglGetSyncValuesOML', BOOL, [HDC, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
INT32 = c_int # C:\cygwin\home\Alex\pyglet\tools\wgl.h:35
# http://developer.download.nvidia.com/opengl/includes/wglext.h:532
wglGetMscRateOML = _link_function('wglGetMscRateOML', BOOL, [HDC, POINTER(INT32), POINTER(INT32)], 'OML_sync_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:533
wglSwapBuffersMscOML = _link_function('wglSwapBuffersMscOML', INT64, [HDC, INT64, INT64, INT64], 'OML_sync_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:534
wglSwapLayerBuffersMscOML = _link_function('wglSwapLayerBuffersMscOML', INT64, [HDC, c_int, INT64, INT64, INT64], 'OML_sync_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:535
wglWaitForMscOML = _link_function('wglWaitForMscOML', BOOL, [HDC, INT64, INT64, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:536
wglWaitForSbcOML = _link_function('wglWaitForSbcOML', BOOL, [HDC, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)], 'OML_sync_control')
PFNWGLGETSYNCVALUESOMLPROC = CFUNCTYPE(BOOL, HDC, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:538
PFNWGLGETMSCRATEOMLPROC = CFUNCTYPE(BOOL, HDC, POINTER(INT32), POINTER(INT32)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:539
PFNWGLSWAPBUFFERSMSCOMLPROC = CFUNCTYPE(INT64, HDC, INT64, INT64, INT64) # http://developer.download.nvidia.com/opengl/includes/wglext.h:540
PFNWGLSWAPLAYERBUFFERSMSCOMLPROC = CFUNCTYPE(INT64, HDC, c_int, INT64, INT64, INT64) # http://developer.download.nvidia.com/opengl/includes/wglext.h:541
PFNWGLWAITFORMSCOMLPROC = CFUNCTYPE(BOOL, HDC, INT64, INT64, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:542
PFNWGLWAITFORSBCOMLPROC = CFUNCTYPE(BOOL, HDC, INT64, POINTER(INT64), POINTER(INT64), POINTER(INT64)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:543
# I3D_digital_video_control (http://developer.download.nvidia.com/opengl/includes/wglext.h:546)
WGL_I3D_digital_video_control = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:547
# http://developer.download.nvidia.com/opengl/includes/wglext.h:549
wglGetDigitalVideoParametersI3D = _link_function('wglGetDigitalVideoParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_digital_video_control')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:550
wglSetDigitalVideoParametersI3D = _link_function('wglSetDigitalVideoParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_digital_video_control')
PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:552
PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:553
# I3D_gamma (http://developer.download.nvidia.com/opengl/includes/wglext.h:556)
WGL_I3D_gamma = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:557
# http://developer.download.nvidia.com/opengl/includes/wglext.h:559
wglGetGammaTableParametersI3D = _link_function('wglGetGammaTableParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_gamma')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:560
wglSetGammaTableParametersI3D = _link_function('wglSetGammaTableParametersI3D', BOOL, [HDC, c_int, POINTER(c_int)], 'I3D_gamma')
USHORT = c_ushort # C:\cygwin\home\Alex\pyglet\tools\wgl.h:49
# http://developer.download.nvidia.com/opengl/includes/wglext.h:561
wglGetGammaTableI3D = _link_function('wglGetGammaTableI3D', BOOL, [HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)], 'I3D_gamma')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:562
wglSetGammaTableI3D = _link_function('wglSetGammaTableI3D', BOOL, [HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)], 'I3D_gamma')
PFNWGLGETGAMMATABLEPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:564
PFNWGLSETGAMMATABLEPARAMETERSI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(c_int)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:565
PFNWGLGETGAMMATABLEI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:566
PFNWGLSETGAMMATABLEI3DPROC = CFUNCTYPE(BOOL, HDC, c_int, POINTER(USHORT), POINTER(USHORT), POINTER(USHORT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:567
# I3D_genlock (http://developer.download.nvidia.com/opengl/includes/wglext.h:570)
WGL_I3D_genlock = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:571
# http://developer.download.nvidia.com/opengl/includes/wglext.h:573
wglEnableGenlockI3D = _link_function('wglEnableGenlockI3D', BOOL, [HDC], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:574
wglDisableGenlockI3D = _link_function('wglDisableGenlockI3D', BOOL, [HDC], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:575
wglIsEnabledGenlockI3D = _link_function('wglIsEnabledGenlockI3D', BOOL, [HDC, POINTER(BOOL)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:576
wglGenlockSourceI3D = _link_function('wglGenlockSourceI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:577
wglGetGenlockSourceI3D = _link_function('wglGetGenlockSourceI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:578
wglGenlockSourceEdgeI3D = _link_function('wglGenlockSourceEdgeI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:579
wglGetGenlockSourceEdgeI3D = _link_function('wglGetGenlockSourceEdgeI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:580
wglGenlockSampleRateI3D = _link_function('wglGenlockSampleRateI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:581
wglGetGenlockSampleRateI3D = _link_function('wglGetGenlockSampleRateI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:582
wglGenlockSourceDelayI3D = _link_function('wglGenlockSourceDelayI3D', BOOL, [HDC, UINT], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:583
wglGetGenlockSourceDelayI3D = _link_function('wglGetGenlockSourceDelayI3D', BOOL, [HDC, POINTER(UINT)], 'I3D_genlock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:584
wglQueryGenlockMaxSourceDelayI3D = _link_function('wglQueryGenlockMaxSourceDelayI3D', BOOL, [HDC, POINTER(UINT), POINTER(UINT)], 'I3D_genlock')
PFNWGLENABLEGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:586
PFNWGLDISABLEGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:587
PFNWGLISENABLEDGENLOCKI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(BOOL)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:588
PFNWGLGENLOCKSOURCEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:589
PFNWGLGETGENLOCKSOURCEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:590
PFNWGLGENLOCKSOURCEEDGEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:591
PFNWGLGETGENLOCKSOURCEEDGEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:592
PFNWGLGENLOCKSAMPLERATEI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:593
PFNWGLGETGENLOCKSAMPLERATEI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:594
PFNWGLGENLOCKSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:595
PFNWGLGETGENLOCKSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:596
PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(UINT), POINTER(UINT)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:597
# I3D_image_buffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:600)
WGL_I3D_image_buffer = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:601
LPVOID = POINTER(None) # C:\cygwin\home\Alex\pyglet\tools\wgl.h:45
# http://developer.download.nvidia.com/opengl/includes/wglext.h:603
wglCreateImageBufferI3D = _link_function('wglCreateImageBufferI3D', LPVOID, [HDC, DWORD, UINT], 'I3D_image_buffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:604
wglDestroyImageBufferI3D = _link_function('wglDestroyImageBufferI3D', BOOL, [HDC, LPVOID], 'I3D_image_buffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:605
wglAssociateImageBufferEventsI3D = _link_function('wglAssociateImageBufferEventsI3D', BOOL, [HDC, POINTER(HANDLE), POINTER(LPVOID), POINTER(DWORD), UINT], 'I3D_image_buffer')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:606
wglReleaseImageBufferEventsI3D = _link_function('wglReleaseImageBufferEventsI3D', BOOL, [HDC, POINTER(LPVOID), UINT], 'I3D_image_buffer')
PFNWGLCREATEIMAGEBUFFERI3DPROC = CFUNCTYPE(LPVOID, HDC, DWORD, UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:608
PFNWGLDESTROYIMAGEBUFFERI3DPROC = CFUNCTYPE(BOOL, HDC, LPVOID) # http://developer.download.nvidia.com/opengl/includes/wglext.h:609
PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(HANDLE), POINTER(LPVOID), POINTER(DWORD), UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:610
PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC = CFUNCTYPE(BOOL, HDC, POINTER(LPVOID), UINT) # http://developer.download.nvidia.com/opengl/includes/wglext.h:611
# I3D_swap_frame_lock (http://developer.download.nvidia.com/opengl/includes/wglext.h:614)
WGL_I3D_swap_frame_lock = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:615
# http://developer.download.nvidia.com/opengl/includes/wglext.h:617
wglEnableFrameLockI3D = _link_function('wglEnableFrameLockI3D', BOOL, [], 'I3D_swap_frame_lock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:618
wglDisableFrameLockI3D = _link_function('wglDisableFrameLockI3D', BOOL, [], 'I3D_swap_frame_lock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:619
wglIsEnabledFrameLockI3D = _link_function('wglIsEnabledFrameLockI3D', BOOL, [POINTER(BOOL)], 'I3D_swap_frame_lock')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:620
wglQueryFrameLockMasterI3D = _link_function('wglQueryFrameLockMasterI3D', BOOL, [POINTER(BOOL)], 'I3D_swap_frame_lock')
PFNWGLENABLEFRAMELOCKI3DPROC = CFUNCTYPE(BOOL) # http://developer.download.nvidia.com/opengl/includes/wglext.h:622
PFNWGLDISABLEFRAMELOCKI3DPROC = CFUNCTYPE(BOOL) # http://developer.download.nvidia.com/opengl/includes/wglext.h:623
PFNWGLISENABLEDFRAMELOCKI3DPROC = CFUNCTYPE(BOOL, POINTER(BOOL)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:624
PFNWGLQUERYFRAMELOCKMASTERI3DPROC = CFUNCTYPE(BOOL, POINTER(BOOL)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:625
# I3D_swap_frame_usage (http://developer.download.nvidia.com/opengl/includes/wglext.h:628)
WGL_I3D_swap_frame_usage = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:629
# http://developer.download.nvidia.com/opengl/includes/wglext.h:631
wglGetFrameUsageI3D = _link_function('wglGetFrameUsageI3D', BOOL, [POINTER(c_float)], 'I3D_swap_frame_usage')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:632
wglBeginFrameTrackingI3D = _link_function('wglBeginFrameTrackingI3D', BOOL, [], 'I3D_swap_frame_usage')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:633
wglEndFrameTrackingI3D = _link_function('wglEndFrameTrackingI3D', BOOL, [], 'I3D_swap_frame_usage')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:634
wglQueryFrameTrackingI3D = _link_function('wglQueryFrameTrackingI3D', BOOL, [POINTER(DWORD), POINTER(DWORD), POINTER(c_float)], 'I3D_swap_frame_usage')
PFNWGLGETFRAMEUSAGEI3DPROC = CFUNCTYPE(BOOL, POINTER(c_float)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:636
PFNWGLBEGINFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL) # http://developer.download.nvidia.com/opengl/includes/wglext.h:637
PFNWGLENDFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL) # http://developer.download.nvidia.com/opengl/includes/wglext.h:638
PFNWGLQUERYFRAMETRACKINGI3DPROC = CFUNCTYPE(BOOL, POINTER(DWORD), POINTER(DWORD), POINTER(c_float)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:639
# ATI_pixel_format_float (http://developer.download.nvidia.com/opengl/includes/wglext.h:642)
WGL_ATI_pixel_format_float = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:643
# NV_render_depth_texture (http://developer.download.nvidia.com/opengl/includes/wglext.h:646)
WGL_NV_render_depth_texture = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:647
# NV_render_texture_rectangle (http://developer.download.nvidia.com/opengl/includes/wglext.h:650)
WGL_NV_render_texture_rectangle = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:651
# NV_float_buffer (http://developer.download.nvidia.com/opengl/includes/wglext.h:654)
WGL_NV_float_buffer = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:655
# NV_swap_group (http://developer.download.nvidia.com/opengl/includes/wglext.h:658)
WGL_NV_swap_group = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:659
# http://developer.download.nvidia.com/opengl/includes/wglext.h:661
wglJoinSwapGroupNV = _link_function('wglJoinSwapGroupNV', BOOL, [HDC, GLuint], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:662
wglBindSwapBarrierNV = _link_function('wglBindSwapBarrierNV', BOOL, [GLuint, GLuint], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:663
wglQuerySwapGroupNV = _link_function('wglQuerySwapGroupNV', BOOL, [HDC, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:664
wglQueryMaxSwapGroupsNV = _link_function('wglQueryMaxSwapGroupsNV', BOOL, [HDC, POINTER(GLuint), POINTER(GLuint)], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:665
wglQueryFrameCountNV = _link_function('wglQueryFrameCountNV', BOOL, [HDC, POINTER(GLuint)], 'NV_swap_group')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:666
wglResetFrameCountNV = _link_function('wglResetFrameCountNV', BOOL, [HDC], 'NV_swap_group')
PFNWGLJOINSWAPGROUPNVPROC = CFUNCTYPE(BOOL, HDC, GLuint) # http://developer.download.nvidia.com/opengl/includes/wglext.h:668
PFNWGLBINDSWAPBARRIERNVPROC = CFUNCTYPE(BOOL, GLuint, GLuint) # http://developer.download.nvidia.com/opengl/includes/wglext.h:669
PFNWGLQUERYSWAPGROUPNVPROC = CFUNCTYPE(BOOL, HDC, POINTER(GLuint), POINTER(GLuint)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:670
PFNWGLQUERYMAXSWAPGROUPSNVPROC = CFUNCTYPE(BOOL, HDC, POINTER(GLuint), POINTER(GLuint)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:671
PFNWGLQUERYFRAMECOUNTNVPROC = CFUNCTYPE(BOOL, HDC, POINTER(GLuint)) # http://developer.download.nvidia.com/opengl/includes/wglext.h:672
PFNWGLRESETFRAMECOUNTNVPROC = CFUNCTYPE(BOOL, HDC) # http://developer.download.nvidia.com/opengl/includes/wglext.h:673
# NV_gpu_affinity (http://developer.download.nvidia.com/opengl/includes/wglext.h:676)
WGL_NV_gpu_affinity = 1 # http://developer.download.nvidia.com/opengl/includes/wglext.h:677
# http://developer.download.nvidia.com/opengl/includes/wglext.h:679
wglEnumGpusNV = _link_function('wglEnumGpusNV', BOOL, [UINT, POINTER(HGPUNV)], 'NV_gpu_affinity')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:680
wglEnumGpuDevicesNV = _link_function('wglEnumGpuDevicesNV', BOOL, [HGPUNV, UINT, PGPU_DEVICE], 'NV_gpu_affinity')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:681
wglCreateAffinityDCNV = _link_function('wglCreateAffinityDCNV', HDC, [POINTER(HGPUNV)], 'NV_gpu_affinity')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:682
wglEnumGpusFromAffinityDCNV = _link_function('wglEnumGpusFromAffinityDCNV', BOOL, [HDC, UINT, POINTER(HGPUNV)], 'NV_gpu_affinity')
# http://developer.download.nvidia.com/opengl/includes/wglext.h:683
wglDeleteDCNV = _link_function('wglDeleteDCNV', BOOL, [HDC], 'NV_gpu_affinity')
__all__ = ['WIN32_LEAN_AND_MEAN', 'GLAPI', 'WGL_WGLEXT_VERSION',
'WGL_FRONT_COLOR_BUFFER_BIT_ARB', 'WGL_BACK_COLOR_BUFFER_BIT_ARB',
'WGL_DEPTH_BUFFER_BIT_ARB', 'WGL_STENCIL_BUFFER_BIT_ARB',
'WGL_SAMPLE_BUFFERS_ARB', 'WGL_SAMPLES_ARB', 'WGL_NUMBER_PIXEL_FORMATS_ARB',
'WGL_DRAW_TO_WINDOW_ARB', 'WGL_DRAW_TO_BITMAP_ARB', 'WGL_ACCELERATION_ARB',
'WGL_NEED_PALETTE_ARB', 'WGL_NEED_SYSTEM_PALETTE_ARB',
'WGL_SWAP_LAYER_BUFFERS_ARB', 'WGL_SWAP_METHOD_ARB',
'WGL_NUMBER_OVERLAYS_ARB', 'WGL_NUMBER_UNDERLAYS_ARB', 'WGL_TRANSPARENT_ARB',
'WGL_TRANSPARENT_RED_VALUE_ARB', 'WGL_TRANSPARENT_GREEN_VALUE_ARB',
'WGL_TRANSPARENT_BLUE_VALUE_ARB', 'WGL_TRANSPARENT_ALPHA_VALUE_ARB',
'WGL_TRANSPARENT_INDEX_VALUE_ARB', 'WGL_SHARE_DEPTH_ARB',
'WGL_SHARE_STENCIL_ARB', 'WGL_SHARE_ACCUM_ARB', 'WGL_SUPPORT_GDI_ARB',
'WGL_SUPPORT_OPENGL_ARB', 'WGL_DOUBLE_BUFFER_ARB', 'WGL_STEREO_ARB',
'WGL_PIXEL_TYPE_ARB', 'WGL_COLOR_BITS_ARB', 'WGL_RED_BITS_ARB',
'WGL_RED_SHIFT_ARB', 'WGL_GREEN_BITS_ARB', 'WGL_GREEN_SHIFT_ARB',
'WGL_BLUE_BITS_ARB', 'WGL_BLUE_SHIFT_ARB', 'WGL_ALPHA_BITS_ARB',
'WGL_ALPHA_SHIFT_ARB', 'WGL_ACCUM_BITS_ARB', 'WGL_ACCUM_RED_BITS_ARB',
'WGL_ACCUM_GREEN_BITS_ARB', 'WGL_ACCUM_BLUE_BITS_ARB',
'WGL_ACCUM_ALPHA_BITS_ARB', 'WGL_DEPTH_BITS_ARB', 'WGL_STENCIL_BITS_ARB',
'WGL_AUX_BUFFERS_ARB', 'WGL_NO_ACCELERATION_ARB',
'WGL_GENERIC_ACCELERATION_ARB', 'WGL_FULL_ACCELERATION_ARB',
'WGL_SWAP_EXCHANGE_ARB', 'WGL_SWAP_COPY_ARB', 'WGL_SWAP_UNDEFINED_ARB',
'WGL_TYPE_RGBA_ARB', 'WGL_TYPE_COLORINDEX_ARB',
'ERROR_INVALID_PIXEL_TYPE_ARB', 'ERROR_INCOMPATIBLE_DEVICE_CONTEXTS_ARB',
'WGL_DRAW_TO_PBUFFER_ARB', 'WGL_MAX_PBUFFER_PIXELS_ARB',
'WGL_MAX_PBUFFER_WIDTH_ARB', 'WGL_MAX_PBUFFER_HEIGHT_ARB',
'WGL_PBUFFER_LARGEST_ARB', 'WGL_PBUFFER_WIDTH_ARB', 'WGL_PBUFFER_HEIGHT_ARB',
'WGL_PBUFFER_LOST_ARB', 'WGL_BIND_TO_TEXTURE_RGB_ARB',
'WGL_BIND_TO_TEXTURE_RGBA_ARB', 'WGL_TEXTURE_FORMAT_ARB',
'WGL_TEXTURE_TARGET_ARB', 'WGL_MIPMAP_TEXTURE_ARB', 'WGL_TEXTURE_RGB_ARB',
'WGL_TEXTURE_RGBA_ARB', 'WGL_NO_TEXTURE_ARB', 'WGL_TEXTURE_CUBE_MAP_ARB',
'WGL_TEXTURE_1D_ARB', 'WGL_TEXTURE_2D_ARB', 'WGL_MIPMAP_LEVEL_ARB',
'WGL_CUBE_MAP_FACE_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB', 'WGL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB',
'WGL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB', 'WGL_FRONT_LEFT_ARB',
'WGL_FRONT_RIGHT_ARB', 'WGL_BACK_LEFT_ARB', 'WGL_BACK_RIGHT_ARB',
'WGL_AUX0_ARB', 'WGL_AUX1_ARB', 'WGL_AUX2_ARB', 'WGL_AUX3_ARB',
'WGL_AUX4_ARB', 'WGL_AUX5_ARB', 'WGL_AUX6_ARB', 'WGL_AUX7_ARB',
'WGL_AUX8_ARB', 'WGL_AUX9_ARB', 'WGL_TYPE_RGBA_FLOAT_ARB',
'ERROR_INVALID_PIXEL_TYPE_EXT', 'WGL_NUMBER_PIXEL_FORMATS_EXT',
'WGL_DRAW_TO_WINDOW_EXT', 'WGL_DRAW_TO_BITMAP_EXT', 'WGL_ACCELERATION_EXT',
'WGL_NEED_PALETTE_EXT', 'WGL_NEED_SYSTEM_PALETTE_EXT',
'WGL_SWAP_LAYER_BUFFERS_EXT', 'WGL_SWAP_METHOD_EXT',
'WGL_NUMBER_OVERLAYS_EXT', 'WGL_NUMBER_UNDERLAYS_EXT', 'WGL_TRANSPARENT_EXT',
'WGL_TRANSPARENT_VALUE_EXT', 'WGL_SHARE_DEPTH_EXT', 'WGL_SHARE_STENCIL_EXT',
'WGL_SHARE_ACCUM_EXT', 'WGL_SUPPORT_GDI_EXT', 'WGL_SUPPORT_OPENGL_EXT',
'WGL_DOUBLE_BUFFER_EXT', 'WGL_STEREO_EXT', 'WGL_PIXEL_TYPE_EXT',
'WGL_COLOR_BITS_EXT', 'WGL_RED_BITS_EXT', 'WGL_RED_SHIFT_EXT',
'WGL_GREEN_BITS_EXT', 'WGL_GREEN_SHIFT_EXT', 'WGL_BLUE_BITS_EXT',
'WGL_BLUE_SHIFT_EXT', 'WGL_ALPHA_BITS_EXT', 'WGL_ALPHA_SHIFT_EXT',
'WGL_ACCUM_BITS_EXT', 'WGL_ACCUM_RED_BITS_EXT', 'WGL_ACCUM_GREEN_BITS_EXT',
'WGL_ACCUM_BLUE_BITS_EXT', 'WGL_ACCUM_ALPHA_BITS_EXT', 'WGL_DEPTH_BITS_EXT',
'WGL_STENCIL_BITS_EXT', 'WGL_AUX_BUFFERS_EXT', 'WGL_NO_ACCELERATION_EXT',
'WGL_GENERIC_ACCELERATION_EXT', 'WGL_FULL_ACCELERATION_EXT',
'WGL_SWAP_EXCHANGE_EXT', 'WGL_SWAP_COPY_EXT', 'WGL_SWAP_UNDEFINED_EXT',
'WGL_TYPE_RGBA_EXT', 'WGL_TYPE_COLORINDEX_EXT', 'WGL_DRAW_TO_PBUFFER_EXT',
'WGL_MAX_PBUFFER_PIXELS_EXT', 'WGL_MAX_PBUFFER_WIDTH_EXT',
'WGL_MAX_PBUFFER_HEIGHT_EXT', 'WGL_OPTIMAL_PBUFFER_WIDTH_EXT',
'WGL_OPTIMAL_PBUFFER_HEIGHT_EXT', 'WGL_PBUFFER_LARGEST_EXT',
'WGL_PBUFFER_WIDTH_EXT', 'WGL_PBUFFER_HEIGHT_EXT', 'WGL_DEPTH_FLOAT_EXT',
'WGL_SAMPLE_BUFFERS_3DFX', 'WGL_SAMPLES_3DFX', 'WGL_SAMPLE_BUFFERS_EXT',
'WGL_SAMPLES_EXT', 'WGL_DIGITAL_VIDEO_CURSOR_ALPHA_FRAMEBUFFER_I3D',
'WGL_DIGITAL_VIDEO_CURSOR_ALPHA_VALUE_I3D',
'WGL_DIGITAL_VIDEO_CURSOR_INCLUDED_I3D',
'WGL_DIGITAL_VIDEO_GAMMA_CORRECTED_I3D', 'WGL_GAMMA_TABLE_SIZE_I3D',
'WGL_GAMMA_EXCLUDE_DESKTOP_I3D', 'WGL_GENLOCK_SOURCE_MULTIVIEW_I3D',
'WGL_GENLOCK_SOURCE_EXTENAL_SYNC_I3D', 'WGL_GENLOCK_SOURCE_EXTENAL_FIELD_I3D',
'WGL_GENLOCK_SOURCE_EXTENAL_TTL_I3D', 'WGL_GENLOCK_SOURCE_DIGITAL_SYNC_I3D',
'WGL_GENLOCK_SOURCE_DIGITAL_FIELD_I3D', 'WGL_GENLOCK_SOURCE_EDGE_FALLING_I3D',
'WGL_GENLOCK_SOURCE_EDGE_RISING_I3D', 'WGL_GENLOCK_SOURCE_EDGE_BOTH_I3D',
'WGL_IMAGE_BUFFER_MIN_ACCESS_I3D', 'WGL_IMAGE_BUFFER_LOCK_I3D',
'WGL_BIND_TO_TEXTURE_DEPTH_NV', 'WGL_BIND_TO_TEXTURE_RECTANGLE_DEPTH_NV',
'WGL_DEPTH_TEXTURE_FORMAT_NV', 'WGL_TEXTURE_DEPTH_COMPONENT_NV',
'WGL_DEPTH_COMPONENT_NV', 'WGL_BIND_TO_TEXTURE_RECTANGLE_RGB_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_RGBA_NV', 'WGL_TEXTURE_RECTANGLE_NV',
'WGL_TYPE_RGBA_FLOAT_ATI', 'WGL_RGBA_FLOAT_MODE_ATI',
'WGL_COLOR_CLEAR_UNCLAMPED_VALUE_ATI', 'WGL_FLOAT_COMPONENTS_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_R_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RG_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGB_NV',
'WGL_BIND_TO_TEXTURE_RECTANGLE_FLOAT_RGBA_NV', 'WGL_TEXTURE_FLOAT_R_NV',
'WGL_TEXTURE_FLOAT_RG_NV', 'WGL_TEXTURE_FLOAT_RGB_NV',
'WGL_TEXTURE_FLOAT_RGBA_NV', 'WGL_ERROR_INCOMPATIBLE_AFFINITY_MASKS_NV',
'WGL_ERROR_MISSING_AFFINITY_MASK_NV', 'HPBUFFERARB', 'HPBUFFEREXT', 'HGPUNV',
'GPU_DEVICE', 'PGPU_DEVICE', 'WGL_ARB_buffer_region',
'wglCreateBufferRegionARB', 'wglDeleteBufferRegionARB',
'wglSaveBufferRegionARB', 'wglRestoreBufferRegionARB',
'PFNWGLCREATEBUFFERREGIONARBPROC', 'PFNWGLDELETEBUFFERREGIONARBPROC',
'PFNWGLSAVEBUFFERREGIONARBPROC', 'PFNWGLRESTOREBUFFERREGIONARBPROC',
'WGL_ARB_multisample', 'WGL_ARB_extensions_string',
'wglGetExtensionsStringARB', 'PFNWGLGETEXTENSIONSSTRINGARBPROC',
'WGL_ARB_pixel_format', 'wglGetPixelFormatAttribivARB',
'wglGetPixelFormatAttribfvARB', 'wglChoosePixelFormatARB',
'PFNWGLGETPIXELFORMATATTRIBIVARBPROC', 'PFNWGLGETPIXELFORMATATTRIBFVARBPROC',
'PFNWGLCHOOSEPIXELFORMATARBPROC', 'WGL_ARB_make_current_read',
'wglMakeContextCurrentARB', 'wglGetCurrentReadDCARB',
'PFNWGLMAKECONTEXTCURRENTARBPROC', 'PFNWGLGETCURRENTREADDCARBPROC',
'WGL_ARB_pbuffer', 'wglCreatePbufferARB', 'wglGetPbufferDCARB',
'wglReleasePbufferDCARB', 'wglDestroyPbufferARB', 'wglQueryPbufferARB',
'PFNWGLCREATEPBUFFERARBPROC', 'PFNWGLGETPBUFFERDCARBPROC',
'PFNWGLRELEASEPBUFFERDCARBPROC', 'PFNWGLDESTROYPBUFFERARBPROC',
'PFNWGLQUERYPBUFFERARBPROC', 'WGL_ARB_render_texture', 'wglBindTexImageARB',
'wglReleaseTexImageARB', 'wglSetPbufferAttribARB',
'PFNWGLBINDTEXIMAGEARBPROC', 'PFNWGLRELEASETEXIMAGEARBPROC',
'PFNWGLSETPBUFFERATTRIBARBPROC', 'WGL_ARB_pixel_format_float',
'WGL_EXT_display_color_table', 'wglCreateDisplayColorTableEXT',
'wglLoadDisplayColorTableEXT', 'wglBindDisplayColorTableEXT',
'wglDestroyDisplayColorTableEXT', 'PFNWGLCREATEDISPLAYCOLORTABLEEXTPROC',
'PFNWGLLOADDISPLAYCOLORTABLEEXTPROC', 'PFNWGLBINDDISPLAYCOLORTABLEEXTPROC',
'PFNWGLDESTROYDISPLAYCOLORTABLEEXTPROC', 'WGL_EXT_extensions_string',
'wglGetExtensionsStringEXT', 'PFNWGLGETEXTENSIONSSTRINGEXTPROC',
'WGL_EXT_make_current_read', 'wglMakeContextCurrentEXT',
'wglGetCurrentReadDCEXT', 'PFNWGLMAKECONTEXTCURRENTEXTPROC',
'PFNWGLGETCURRENTREADDCEXTPROC', 'WGL_EXT_pbuffer', 'wglCreatePbufferEXT',
'wglGetPbufferDCEXT', 'wglReleasePbufferDCEXT', 'wglDestroyPbufferEXT',
'wglQueryPbufferEXT', 'PFNWGLCREATEPBUFFEREXTPROC',
'PFNWGLGETPBUFFERDCEXTPROC', 'PFNWGLRELEASEPBUFFERDCEXTPROC',
'PFNWGLDESTROYPBUFFEREXTPROC', 'PFNWGLQUERYPBUFFEREXTPROC',
'WGL_EXT_pixel_format', 'wglGetPixelFormatAttribivEXT',
'wglGetPixelFormatAttribfvEXT', 'wglChoosePixelFormatEXT',
'PFNWGLGETPIXELFORMATATTRIBIVEXTPROC', 'PFNWGLGETPIXELFORMATATTRIBFVEXTPROC',
'PFNWGLCHOOSEPIXELFORMATEXTPROC', 'WGL_EXT_swap_control',
'wglSwapIntervalEXT', 'wglGetSwapIntervalEXT', 'PFNWGLSWAPINTERVALEXTPROC',
'PFNWGLGETSWAPINTERVALEXTPROC', 'WGL_EXT_depth_float',
'WGL_NV_vertex_array_range', 'wglAllocateMemoryNV', 'wglFreeMemoryNV',
'PFNWGLALLOCATEMEMORYNVPROC', 'PFNWGLFREEMEMORYNVPROC',
'WGL_3DFX_multisample', 'WGL_EXT_multisample', 'WGL_OML_sync_control',
'wglGetSyncValuesOML', 'wglGetMscRateOML', 'wglSwapBuffersMscOML',
'wglSwapLayerBuffersMscOML', 'wglWaitForMscOML', 'wglWaitForSbcOML',
'PFNWGLGETSYNCVALUESOMLPROC', 'PFNWGLGETMSCRATEOMLPROC',
'PFNWGLSWAPBUFFERSMSCOMLPROC', 'PFNWGLSWAPLAYERBUFFERSMSCOMLPROC',
'PFNWGLWAITFORMSCOMLPROC', 'PFNWGLWAITFORSBCOMLPROC',
'WGL_I3D_digital_video_control', 'wglGetDigitalVideoParametersI3D',
'wglSetDigitalVideoParametersI3D', 'PFNWGLGETDIGITALVIDEOPARAMETERSI3DPROC',
'PFNWGLSETDIGITALVIDEOPARAMETERSI3DPROC', 'WGL_I3D_gamma',
'wglGetGammaTableParametersI3D', 'wglSetGammaTableParametersI3D',
'wglGetGammaTableI3D', 'wglSetGammaTableI3D',
'PFNWGLGETGAMMATABLEPARAMETERSI3DPROC',
'PFNWGLSETGAMMATABLEPARAMETERSI3DPROC', 'PFNWGLGETGAMMATABLEI3DPROC',
'PFNWGLSETGAMMATABLEI3DPROC', 'WGL_I3D_genlock', 'wglEnableGenlockI3D',
'wglDisableGenlockI3D', 'wglIsEnabledGenlockI3D', 'wglGenlockSourceI3D',
'wglGetGenlockSourceI3D', 'wglGenlockSourceEdgeI3D',
'wglGetGenlockSourceEdgeI3D', 'wglGenlockSampleRateI3D',
'wglGetGenlockSampleRateI3D', 'wglGenlockSourceDelayI3D',
'wglGetGenlockSourceDelayI3D', 'wglQueryGenlockMaxSourceDelayI3D',
'PFNWGLENABLEGENLOCKI3DPROC', 'PFNWGLDISABLEGENLOCKI3DPROC',
'PFNWGLISENABLEDGENLOCKI3DPROC', 'PFNWGLGENLOCKSOURCEI3DPROC',
'PFNWGLGETGENLOCKSOURCEI3DPROC', 'PFNWGLGENLOCKSOURCEEDGEI3DPROC',
'PFNWGLGETGENLOCKSOURCEEDGEI3DPROC', 'PFNWGLGENLOCKSAMPLERATEI3DPROC',
'PFNWGLGETGENLOCKSAMPLERATEI3DPROC', 'PFNWGLGENLOCKSOURCEDELAYI3DPROC',
'PFNWGLGETGENLOCKSOURCEDELAYI3DPROC',
'PFNWGLQUERYGENLOCKMAXSOURCEDELAYI3DPROC', 'WGL_I3D_image_buffer',
'wglCreateImageBufferI3D', 'wglDestroyImageBufferI3D',
'wglAssociateImageBufferEventsI3D', 'wglReleaseImageBufferEventsI3D',
'PFNWGLCREATEIMAGEBUFFERI3DPROC', 'PFNWGLDESTROYIMAGEBUFFERI3DPROC',
'PFNWGLASSOCIATEIMAGEBUFFEREVENTSI3DPROC',
'PFNWGLRELEASEIMAGEBUFFEREVENTSI3DPROC', 'WGL_I3D_swap_frame_lock',
'wglEnableFrameLockI3D', 'wglDisableFrameLockI3D', 'wglIsEnabledFrameLockI3D',
'wglQueryFrameLockMasterI3D', 'PFNWGLENABLEFRAMELOCKI3DPROC',
'PFNWGLDISABLEFRAMELOCKI3DPROC', 'PFNWGLISENABLEDFRAMELOCKI3DPROC',
'PFNWGLQUERYFRAMELOCKMASTERI3DPROC', 'WGL_I3D_swap_frame_usage',
'wglGetFrameUsageI3D', 'wglBeginFrameTrackingI3D', 'wglEndFrameTrackingI3D',
'wglQueryFrameTrackingI3D', 'PFNWGLGETFRAMEUSAGEI3DPROC',
'PFNWGLBEGINFRAMETRACKINGI3DPROC', 'PFNWGLENDFRAMETRACKINGI3DPROC',
'PFNWGLQUERYFRAMETRACKINGI3DPROC', 'WGL_ATI_pixel_format_float',
'WGL_NV_render_depth_texture', 'WGL_NV_render_texture_rectangle',
'WGL_NV_float_buffer', 'WGL_NV_swap_group', 'wglJoinSwapGroupNV',
'wglBindSwapBarrierNV', 'wglQuerySwapGroupNV', 'wglQueryMaxSwapGroupsNV',
'wglQueryFrameCountNV', 'wglResetFrameCountNV', 'PFNWGLJOINSWAPGROUPNVPROC',
'PFNWGLBINDSWAPBARRIERNVPROC', 'PFNWGLQUERYSWAPGROUPNVPROC',
'PFNWGLQUERYMAXSWAPGROUPSNVPROC', 'PFNWGLQUERYFRAMECOUNTNVPROC',
'PFNWGLRESETFRAMECOUNTNVPROC', 'WGL_NV_gpu_affinity', 'wglEnumGpusNV',
'wglEnumGpuDevicesNV', 'wglCreateAffinityDCNV', 'wglEnumGpusFromAffinityDCNV',
'wglDeleteDCNV']
# END GENERATED CONTENT (do not edit above this line)
| gpl-3.0 |
OliverWalter/amdtk | recipes/wsj_segments/local/get_docs_from_prompts.py | 3 | 11222 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# author : Santosh
# e-mail : kcraj2[AT]gmail[DOT]com
# Date created : 08 Jul 2016
# Last modified : 11 Jul 2016
"""------------------------------------------------------------------------
Browse the WSJ0 and WSJ1 corpora, parse the prompts (transcriptions),
and categorize the utts into documents (documents are articles).
This article info is obtained from the <PROMPT_ID>.
Dumps the unique train/test keys in data/
------------------------------------------------------------------------"""
import os
import sys
# import socket
import string
import argparse
import re
def read_lexicon(lex_file):
""" Read the lexicon and load it in dictionary """
lex = {}
phs = {}
with open(lex_file, "r") as fpr:
for line in fpr:
line = line.strip().lower()
tokens = line.split(" ")
ph_seq = ""
for i, tok in enumerate(tokens):
if tok.strip() == '':
continue
else:
if i > 0:
ph = re.sub('[0-9]', '', tok)
phs[tok] = ph
ph_seq += ph + " "
lex[tokens[0]] = ph_seq.strip()
if VERBOSE:
print('No. of phonemes:', len(phs), 'after mapping:',
len(set(list(phs.values()))))
print('No. of words in lexicon:', len(lex))
return lex
def read_all_prompts(fpaths):
""" Get all prompts in one list """
data = []
for fp in fpaths:
with open(fp, 'r') as fpr:
data += fpr.read().split("\n")
new_data = [d for d in data if len(d.strip()) > 1]
return new_data
def read_simple_flist(fpath):
""" read every line and put in list """
fids = []
with open(fpath, 'r') as fpr:
fids = fpr.read().split("\n")
if fids[-1].strip() == '':
fids = fids[:-1]
return fids
def get_ptx_fpaths(out_fpath_file):
""" Get all the file paths of prompts files """
os.system("find " + WSJ0 + " -type f -name \"*.ptx\" > " + out_fpath_file)
os.system("find " + WSJ1 + " -type f -name \"*.ptx\" >> " + out_fpath_file)
def get_docIDs_from_prompts(data, doc_d, utt_d, utt_txt_d, utt_ph_d, lex):
""" Parse the prompts and get the utt to doc ID mappings """
found = 0
not_found = 0 # tokens not found in lexicon
incom = 0
txt_utt_d = {} # utt txt to utt ID mapping (to get the unique utts)
# oth = {}
for utt_line in data:
utt_line = utt_line.strip()
vals = utt_line.split("(")
id_tmp = vals[-1][:-1]
utt = utt_line[:-len(id_tmp)-2].strip().lower()
"""
translator = str.maketrans({key: None for key in string.punctuation})
clean_utt = utt.translate(translator)
clean_utt = re.sub("\s\s+", " ", clean_utt) # remove multiple spaces
utt = clean_utt
"""
utt = re.sub("\.|,|\"|\?|\(|\)|;|\&|\$|\%|\{|\}|\[|\]|:|/|~|`|\!", "", utt)
utt = re.sub("\-", " ", utt)
# m = re.search("^\'[a-z]", utt)
# if m is not None:
# utt = re.sub("\'", "", utt)
pt_tmp = id_tmp.split(" ")[-1].split(".")
utt_id = id_tmp.split(" ")[0].strip()
# https://catalog.ldc.upenn.edu/docs/LDC93S6A/csrnov92.html
# ptx format (<UTT_ID> <PROMPT_ID>)
# PROMPT_ID = <YEAR>.<FILE-NUMBER>.<ARTICLE-NUMBER>.<PARAGRAPH-NUMBER>.<SENTENCE-NUMBER>
# article ID as doc ID
doc_id = ''
if len(pt_tmp) == 5:
doc_id = pt_tmp[2] # 2 => get article ID
else:
incom += 1
# oth[pt_tmp[0]] = 1
# update the doc_d dictionary
if doc_id in doc_d:
doc_d[doc_id].append(utt_id)
else:
doc_d[doc_id] = [utt_id]
# check if the sentence is repeating
if utt in txt_utt_d:
txt_utt_d[utt].append(utt_id)
else:
txt_utt_d[utt] = [utt_id]
# update the utt_d and utt_txt_d dictionaries
if utt_id in utt_d:
continue
else:
utt_d[utt_id] = doc_id
utt_txt_d[utt_id] = utt
utt_ph = ""
tokens = utt.split()
for tok in tokens:
try:
utt_ph += lex[tok] + " "
found += 1
except KeyError:
not_found += 1
# m = re.search('[0-9]+', tok)
# if m is None:
# print(tok) #, 'not found in lexicon.')
utt_ph_d[utt_id] = utt_ph
if VERBOSE:
print('Utts with incomplete prompt IDs:', incom)
print('No. of tokens not found in lexicon:', not_found,
'({:.2f} %)'.format((float(not_found) * 100) / found))
return txt_utt_d
def dump_utts_into_docs(utt_ids, doc_d, utt_txt_d, utt_ph_d, out_word_dir,
out_ph_dir, txt_utt_d, pwd, base):
""" Dump the utts in utt_ids into corresponding documents and save them
in out_word_dir/ out_ph_dir/"""
fpu = None
if VERBOSE:
fpu = open(pwd + '../data/repeating_utts_' + base + '.txt', 'w')
count = 0
uniq_utt = {}
uniq_keys = []
uniq_doc_ids = []
for doc_id in sorted(list(doc_d.keys())):
utt_l = sorted(doc_d[doc_id])
out_word_f = out_word_dir + doc_id + ".txt"
out_ph_f = out_ph_dir + doc_id + ".txt"
utts_to_dump = []
utts_to_dump_ph = []
utt_l2 = sorted(list(set(utt_ids) & set(utt_l)))
count += len(utt_l2)
for utt_id in utt_l2:
try:
utt_ids_l = txt_utt_d[utt_txt_d[utt_id]]
if VERBOSE:
if len(utt_ids_l) > 0:
for uid in utt_ids_l:
fpu.write(utt_txt_d[utt_id] + ":" + uid + "\n")
except KeyError:
print('Cannot find sentence.')
try:
uniq_utt[utt_txt_d[utt_id]] += 1
except KeyError:
uniq_utt[utt_txt_d[utt_id]] = 1
# utts_to_dump.append(utt_id + " " + utt_txt_d[utt_id])
# utts_to_dump_ph.append(utt_id + " " + utt_ph_d[utt_id])
utts_to_dump.append(utt_txt_d[utt_id])
utts_to_dump_ph.append(utt_ph_d[utt_id])
uniq_keys.append(utt_id)
if len(utts_to_dump) > 0:
uniq_doc_ids.append(doc_id)
with open(out_word_f, 'w') as fpw, open(out_ph_f, 'w') as fpp:
fpw.write("\n".join(utts_to_dump) + "\n")
fpp.write("\n".join(utts_to_dump_ph) + "\n")
uniq_keys = sorted(uniq_keys)
uniq_key_f = pwd + "../data/" + base + "_unique.keys"
with open(uniq_key_f, 'w') as fpw:
fpw.write("\n".join(uniq_keys) + "\n")
uniq_doc_ids = sorted(uniq_doc_ids)
uniq_doc_f = pwd + "../data/" + base + "_docs.keys"
with open(uniq_doc_f, 'w') as fpw:
fpw.write("\n".join(uniq_doc_ids) + "\n")
if VERBOSE:
print("No. of utts used:", count)
print("No. of unique utts:", len(uniq_utt))
fpu.close()
def main():
""" main method """
pwd = os.path.dirname(os.path.realpath(__file__)) + "/"
out_fpath_file = pwd + "../data/prompts.fpaths"
get_ptx_fpaths(out_fpath_file)
fpaths = read_simple_flist(out_fpath_file)
lex_file = pwd + "../data/lexicon.txt"
lex = read_lexicon(lex_file)
if VERBOSE:
print('Total no. of prompt files:', len(fpaths))
# data = read_ptx_file('all_ptx.txt')
data = read_all_prompts(fpaths)
if VERBOSE:
print('Total no. of prompts:', len(data))
utt_txt_d = {} # utt ID to text mapping
utt_ph_d = {} # utt ID to phoneme seq mapping
utt_d = {} # utt ID to docID mapping
doc_d = {} # doc to utt [] mapping
txt_utt_d = get_docIDs_from_prompts(data, doc_d, utt_d, utt_txt_d,
utt_ph_d, lex)
if VERBOSE:
with open(pwd + '../data/unique_utt_IDs.txt', 'w') as fpw:
for txt, uid_l in txt_utt_d.items():
fpw.write(txt + " " + ",".join(uid_l) + "\n")
print('No. of docs (articles):', len(doc_d))
print('No. of utts with doc IDs:', len(utt_d))
print('No. of utts with doc IDs and text:', len(utt_txt_d))
print('No. of unique utts (based on text):', len(txt_utt_d))
train_ids = read_simple_flist(pwd + '../data/training_si84.keys')
train_ids += read_simple_flist(pwd + '../data/training_si284.keys')
test_ids = read_simple_flist(pwd + '../data/test_eval92.keys')
if VERBOSE:
print('Train utt IDs:', len(train_ids))
print('Test utt IDs:', len(test_ids))
# Dump the utts in respective documents
out_dir = os.path.realpath(pwd + "/EVAL/topics/wsj/") + "/"
train_out = out_dir + "words/"
test_out = out_dir + "words/"
train_ph_out = out_dir + "phonemes/"
test_ph_out = out_dir + "phonemes/"
os.makedirs(out_dir, exist_ok=True)
os.system("mkdir -p " + train_out + " " + test_out + " " + \
train_ph_out + " " + test_ph_out)
if VERBOSE:
print('Created the dirs:')
print(out_dir + '\n' + train_out + '\n' + test_out + '\n' + \
train_ph_out + '\n' + test_ph_out)
dump_utts_into_docs(sorted(train_ids), doc_d, utt_txt_d, utt_ph_d,
train_out, train_ph_out, txt_utt_d, pwd, "training")
dump_utts_into_docs(sorted(test_ids), doc_d, utt_txt_d, utt_ph_d,
test_out, test_ph_out, txt_utt_d, pwd, "test")
print('Data preparation for topic based document clustering is done.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('wsj0', help='path to wsj0')
parser.add_argument('wsj1', help='path to wsj1')
parser.add_argument('--verbose', action='store_true',
help='Display useless information while processing.')
args = parser.parse_args()
WSJ0 = os.path.realpath(args.wsj0) + "/"
WSJ1 = os.path.realpath(args.wsj1) + "/"
VERBOSE = args.verbose
main()
"""
Lucas didn't like this automation
host_addr = socket.gethostbyaddr(socket.gethostname())[0]
host = host_addr.split(".")[1]
VERBOSE = False
WSJ0 = ''
WSJ1 = ''
# BUT cluster
if host == "fit":
print('Host: BUT cluster')
WSJ0 = "/mnt/matylda2/data/WSJ0/"
WSJ1 = "/mnt/matylda2/data/WSJ1/"
# CLSP cluster
elif host == "clsp":
print('Host: CLSP cluster')
WSJ0 = "/export/corpora5/LDC/LDC93S6B/"
WSJ1 = "/export/corpora5/LDC/LDC94S13B/"
else:
print("Manually enter the path of WSJ0 and WSJ1 in the source file:",
sys.argv[0])
sys.exit()
"""
| bsd-2-clause |
polltooh/FineGrainedAction | nn/gen_test_file_fc7.py | 1 | 2206 | import os
import cv2
import random
import numpy as np
image_dir = "/home/mscvadmin/action/FineGrainedAction/data/test_image/"
frame_dir = "/home/mscvadmin/action/FineGrainedAction/data/test_video/"
def delete_last_empty_line(s):
while(len(s) >= 1 and s[-1] == '\n'):
s = s[:-1]
return s
# assume there is only one image
def get_image(label_name):
list_name = os.listdir(image_dir + label_name)
list_name = [image_dir + label_name + "/" + f.replace(".jpg", ".fc7") for f in list_name]
return list_name[0]
def get_frame(label_name):
list_name = os.listdir(frame_dir + label_name)
frame_list = list()
for name in list_name:
curr_path = frame_dir + label_name + "/" + name + "/"
if (os.path.isdir(curr_path)):
file_list_name = curr_path + "/file_list.txt"
with open (file_list_name, "r") as f:
file_data = (f.read())
file_data = delete_last_empty_line(file_data)
data_list = file_data.split("\n")
for d in data_list:
frame_list.append(curr_path + d.replace(".jpg", ".fc7"))
return frame_list
def get_list(label_name):
image = get_image(label_name)
frame_list = get_frame(label_name)
return image, frame_list
def gen_list():
label_name = "nba_dunk"
query_image, test_frame = get_list("nba_dunk")
# query_image = get_image(label_name)
# jumpshot_image = get_image("nba_jumpshot")
# layup_image = get_image("nba_layup")
with open("file_list_test_" + label_name + "_fc7.txt", "w") as f:
for i in range(len(test_frame)):
f.write(query_image)
f.write(" ")
f.write(test_frame[i])
f.write(" ")
f.write("-1")
f.write("\n")
# f.write(jumpshot_image)
# f.write(" ")
# f.write(test_frame[i])
# f.write(" ")
# f.write("-1")
# f.write("\n")
# f.write(layup_image)
# f.write(" ")
# f.write(test_frame[i])
# f.write(" ")
# f.write("-1")
# f.write("\n")
if __name__ == "__main__":
gen_list()
| mit |
drkitty/bitty | assembler/test.py | 1 | 1687 | #!/usr/bin/env python3
import ast
import difflib
import glob
import subprocess as sub
import sys
from sys import stdout, stderr
TEMP = "/tmp/28d8vjsdfu239ur0z89fd8f23pq2038h8vc"
devnull = open("/dev/null", "w")
def main(asm_name):
fail = []
for testname in sorted(glob.glob("test-lex/*")):
stdout.write(testname + "\n")
with open(testname) as src:
asm = sub.Popen(
(asm_name,), stdin=src, stdout=sub.PIPE, stderr=sub.STDOUT)
out, _ = asm.communicate()
out = out.decode('utf-8')
if asm.returncode != 0:
stderr.write("TEST FAILED: Can't lex\n")
stderr.write("Output:\n")
for line in out.splitlines():
stderr.write(" " + line + "\n")
fail.append(testname)
for testname in sorted(glob.glob("test-no-lex/*")):
stdout.write(testname + "\n")
with open(testname) as src:
asm = sub.Popen(
(asm_name,), stdin=src, stdout=sub.PIPE, stderr=sub.STDOUT)
out, _ = asm.communicate()
out = out.decode('utf-8')
if asm.returncode == 0:
stderr.write("TEST FAILED: Lex should fail\n")
stderr.write("Output:\n")
for line in out.splitlines():
stderr.write(" " + line + "\n")
fail.append(testname)
if fail:
stdout.write("FAILED: " + ", ".join(fail) + "\n")
else:
stdout.write("PASSED\n")
if __name__ == '__main__':
if len(sys.argv) != 2:
stderr.write("Usage: test.py ./asm\n")
exit(1)
main(sys.argv[1])
| mit |
asm-products/movie-database-service | ani/lib/python2.7/site-packages/django/db/models/aggregates.py | 114 | 2601 | """
Classes to represent the definitions of aggregate functions.
"""
from django.db.models.constants import LOOKUP_SEP
def refs_aggregate(lookup_parts, aggregates):
"""
A little helper method to check if the lookup_parts contains references
to the given aggregates set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for match.
"""
for i in range(len(lookup_parts) + 1):
if LOOKUP_SEP.join(lookup_parts[0:i]) in aggregates:
return True
return False
class Aggregate(object):
"""
Default Aggregate definition.
"""
def __init__(self, lookup, **extra):
"""Instantiate a new aggregate.
* lookup is the field on which the aggregate operates.
* extra is a dictionary of additional data to provide for the
aggregate definition
Also utilizes the class variables:
* name, the identifier for this aggregate function.
"""
self.lookup = lookup
self.extra = extra
def _default_alias(self):
return '%s__%s' % (self.lookup, self.name.lower())
default_alias = property(_default_alias)
def add_to_query(self, query, alias, col, source, is_summary):
"""Add the aggregate to the nominated query.
This method is used to convert the generic Aggregate definition into a
backend-specific definition.
* query is the backend-specific query instance to which the aggregate
is to be added.
* col is a column reference describing the subject field
of the aggregate. It can be an alias, or a tuple describing
a table and column name.
* source is the underlying field or aggregate definition for
the column reference. If the aggregate is not an ordinal or
computed type, this reference is used to determine the coerced
output type of the aggregate.
* is_summary is a boolean that is set True if the aggregate is a
summary value rather than an annotation.
"""
klass = getattr(query.aggregates_module, self.name)
aggregate = klass(col, source=source, is_summary=is_summary, **self.extra)
query.aggregates[alias] = aggregate
class Avg(Aggregate):
name = 'Avg'
class Count(Aggregate):
name = 'Count'
class Max(Aggregate):
name = 'Max'
class Min(Aggregate):
name = 'Min'
class StdDev(Aggregate):
name = 'StdDev'
class Sum(Aggregate):
name = 'Sum'
class Variance(Aggregate):
name = 'Variance'
| agpl-3.0 |
adrian-ionescu/apache-spark | examples/src/main/python/streaming/queue_stream.py | 150 | 1763 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Create a queue of RDDs that will be mapped/reduced one at a time in
1 second intervals.
To run this example use
`$ bin/spark-submit examples/src/main/python/streaming/queue_stream.py
"""
import time
from pyspark import SparkContext
from pyspark.streaming import StreamingContext
if __name__ == "__main__":
sc = SparkContext(appName="PythonStreamingQueueStream")
ssc = StreamingContext(sc, 1)
# Create the queue through which RDDs can be pushed to
# a QueueInputDStream
rddQueue = []
for i in range(5):
rddQueue += [ssc.sparkContext.parallelize([j for j in range(1, 1001)], 10)]
# Create the QueueInputDStream and use it do some processing
inputStream = ssc.queueStream(rddQueue)
mappedStream = inputStream.map(lambda x: (x % 10, 1))
reducedStream = mappedStream.reduceByKey(lambda a, b: a + b)
reducedStream.pprint()
ssc.start()
time.sleep(6)
ssc.stop(stopSparkContext=True, stopGraceFully=True)
| apache-2.0 |
r-owen/stui | TUI/Inst/GuideMonitor/GuideMonitorWindow.py | 1 | 12690 | #!/usr/bin/env python
"""Seeing monitor
History:
2010-10-01 ROwen Initial version.
2010-11-17 ROwen Added measured and applied offsets for all guider corrections.
Split RA, Dec and rotator into separate graphs.
Added net rotator offset.
2010-11-19 ROwen Display scaleFac as "percent": (scaleFac - 1) * 100
2010-11-22 ROwen Changed Scale scaling from 1e2 to 1e6.
2010-12-10 ROwen Reduced the memory leak by increasing updateInterval from its default value of 0.9 sec
to 10 seconds. Return to the default value again once the matplotlib bug is fixed.
2011-01-03 ROwen Modified to use new version of StripChartWdg.
Added measured FWHM to the seeing plot.
Added preliminary display of measured FWHM of each in-focus probe (no labelling).
2011-01-18 ROwen Net values are shown as steps, since that properly reflects reality.
2012-06-04 ROwen Fix clear button.
2013-03-21 ROwen Modified to use guider keyword gprobeBits instead of synthetic keyword fullGProbeBits
now that ticket #433 is fixed!
2015-11-03 ROwen Replace "== None" with "is None" and "!= None" with "is not None" to modernize the code.
"""
import Tkinter
import matplotlib
import RO.CnvUtil
import RO.PhysConst
import RO.Wdg
import TUI.Base.StripChartWdg
import TUI.Models
WindowName = "Inst.Guide Monitor"
def addWindow(tlSet):
"""Create the window for TUI.
"""
tlSet.createToplevel(
name = WindowName,
defGeom = "+434+22",
visible = False,
resizable = True,
wdgFunc = GuideMonitorWdg,
)
class GuideMonitorWdg(Tkinter.Frame):
"""Monitor guide corrections
"""
def __init__(self, master, timeRange=1800, width=9, height=9):
"""Create a GuideMonitorWdg
Inputs:
- master: parent Tk widget
- timeRange: range of time displayed (seconds)
- width: width of plot (inches)
- height: height of plot (inches)
"""
Tkinter.Frame.__init__(self, master)
self.tccModel = TUI.Models.getModel("tcc")
self.guiderModel = TUI.Models.getModel("guider")
self.probeInfoDict = dict() # dict of probe number (starting from 1): ProbeInfo
self.stripChartWdg = TUI.Base.StripChartWdg.StripChartWdg(
master = self,
timeRange = timeRange,
updateInterval = 10,
numSubplots = 6,
width = width,
height = height,
cnvTimeFunc = TUI.Base.StripChartWdg.TimeConverter(useUTC=True),
)
self.stripChartWdg.grid(row=0, column=0, sticky="nwes")
self.grid_rowconfigure(0, weight=1)
self.grid_columnconfigure(0, weight=1)
# the default ticks are not nice, so be explicit
self.stripChartWdg.xaxis.set_major_locator(matplotlib.dates.MinuteLocator(byminute=range(0, 61, 5)))
subplotInd = 0
# RA/Dec arc offset subplot
def arcsecFromPVT(val):
return 3600.0 * RO.CnvUtil.posFromPVT(val)
self.stripChartWdg.plotKeyVar(
label="RA net offset",
subplotInd=subplotInd,
keyVar=self.tccModel.objArcOff,
keyInd=0,
func=arcsecFromPVT,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="RA measured err",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisError,
keyInd=0,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="RA applied corr",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisChange,
keyInd=0,
color="green",
)
self.stripChartWdg.showY(-0.5, 0.5, subplotInd=subplotInd)
self.stripChartWdg.addConstantLine(0.0, subplotInd=subplotInd, color="gray")
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("RA Arc Off (\")")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
self.stripChartWdg.plotKeyVar(
label="Dec net offset",
subplotInd=subplotInd,
keyVar=self.tccModel.objArcOff,
keyInd=1,
func=arcsecFromPVT,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="Dec measured err",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisError,
keyInd=1,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="Dec applied corr",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisChange,
keyInd=1,
color="green",
)
self.stripChartWdg.showY(-0.5, 0.5, subplotInd=subplotInd)
self.stripChartWdg.addConstantLine(0.0, subplotInd=subplotInd, color="gray")
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Dec Arc Off (\")")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
# rotator offset subplot
self.stripChartWdg.plotKeyVar(
label="Rot net offset",
subplotInd=subplotInd,
keyVar=self.tccModel.guideOff,
keyInd=2,
func=arcsecFromPVT,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="Rot measured err",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisError,
keyInd=2,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="Rot applied corr",
subplotInd=subplotInd,
keyVar=self.guiderModel.axisChange,
keyInd=2,
color="green",
)
self.stripChartWdg.showY(-2.0, 2.0, subplotInd=subplotInd)
self.stripChartWdg.addConstantLine(0.0, subplotInd=subplotInd, color="gray")
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Rot Off (\")")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
# seeing subplot
self.seeingSubplotInd = subplotInd
self.stripChartWdg.plotKeyVar(
label="Measured",
subplotInd=subplotInd,
keyVar=self.guiderModel.fwhm,
keyInd=1,
color="blue",
)
self.stripChartWdg.plotKeyVar(
label="Theoretical",
subplotInd=subplotInd,
keyVar=self.guiderModel.seeing,
keyInd=0,
color="green",
)
self.stripChartWdg.showY(1.0, 1.2, subplotInd=subplotInd)
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Seeing (\")")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
# focus subplot
self.stripChartWdg.plotKeyVar(
label="Focus net offset",
subplotInd=subplotInd,
keyVar=self.tccModel.secFocus,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="Focus measured err",
subplotInd=subplotInd,
keyVar=self.guiderModel.focusError,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="Focus applied corr",
subplotInd=subplotInd,
keyVar=self.guiderModel.focusChange,
color="green",
)
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Focus (um)")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
# scale subplot
def cnvAbsScale(val):
return (val - 1.0) * 1.0e6
def cnvDeltaScale(val):
return val * 1.0e6
self.stripChartWdg.plotKeyVar(
label="Scale net",
subplotInd=subplotInd,
keyVar=self.tccModel.scaleFac,
func=cnvAbsScale,
color="blue",
drawstyle="steps-post",
)
self.stripChartWdg.plotKeyVar(
label="Scale measured err",
subplotInd=subplotInd,
func=cnvDeltaScale,
keyVar=self.guiderModel.scaleError,
color="gray",
)
self.stripChartWdg.plotKeyVar(
label="Scale applied corr",
subplotInd=subplotInd,
func=cnvDeltaScale,
keyVar=self.guiderModel.scaleChange,
color="green",
)
self.stripChartWdg.addConstantLine(0.0, subplotInd=subplotInd, color="gray")
self.stripChartWdg.subplotArr[subplotInd].yaxis.set_label_text("Scale 1e6")
self.stripChartWdg.subplotArr[subplotInd].legend(loc=3, frameon=False)
subplotInd += 1
self.guiderModel.probe.addCallback(self.probeCallback)
self.clearWdg = RO.Wdg.Button(master = self, text = "C", callFunc = self.clearCharts)
self.clearWdg.grid(row=0, column=0, sticky = "sw")
def cartridgeLoadedCallback(self, keyVar):
"""guider.cartridgeLoaded keyvar callback
When seen ditch all guide-probe-specific lines
"""
self.clearProbeInfo()
def clearCharts(self, wdg=None):
"""Clear all strip charts
"""
self.stripChartWdg.clear()
def clearProbeInfo(self):
"""Clear self.probeInfoDict and remove associated lines from plots
"""
for probeInfo in self.probeInfoDict.itervalues():
probeInfo.remove()
self.probeInfoDict = dict()
def probeCallback(self, keyVar):
"""guider.probe callback
If guide probe is broken, unused or out of focus do nothing. Otherwise:
- If probeInfo does not exist, create it and the associated plot line
- Plot data. If probe is disabled then plot "nan" so that no point shows
and lines remain broken if the probe is re-enabled later.
"""
# print "%s.probeCallback(%s)" % (self, keyVar)
if (not keyVar.isCurrent) or (not keyVar.isGenuine) or (keyVar[1] is None):
return
if (not self.guiderModel.gprobeBits.isCurrent) or (self.guiderModel.gprobeBits[0] is None):
return
probeNum = keyVar[1]
if self.guiderModel.gprobeBits[probeNum - 1] & 3 > 0:
# broken or unused
return
if abs(keyVar[6]) > 50:
# not an in-focus probe
return
probeInfo = self.probeInfoDict.get(probeNum)
if probeInfo is None:
probeInfo = ProbeInfo(num=probeNum, guideMonitorWdg=self)
self.probeInfoDict[probeNum] = probeInfo
probeInfo.plotData(keyVar)
class ProbeInfo(object):
def __init__(self, num, guideMonitorWdg):
"""Information about a guide probe, including lines on the strip chart
"""
self.num = int(num)
self.guiderModel = guideMonitorWdg.guiderModel
self.stripChartWdg = guideMonitorWdg.stripChartWdg
self.fwhmLine = self.stripChartWdg.addLine(
subplotInd=guideMonitorWdg.seeingSubplotInd,
color = "blue",
linestyle = "",
marker = ",",
)
def plotData(self, keyVar):
"""guider.probe callback
Plot data. If probe is disabled then plot "nan" so that no point shows
an lines remain broken if the probe is re-enabled later.
"""
# print "%s.probeCallback(%s)" % (self, keyVar)
if self.guiderModel.gprobeBits[self.num - 1] & 7 > 0:
# broken, unused or disabled; testing broken or unused is paranoia
# since this object should never have been created, but costs no extra time
# print "%s.plotData(%s); plot NaN" % (self, keyVar)
self.fwhmLine.addPoint(float("nan"))
else:
# print "%s.plotData(%s); plot %s" % (self, keyVar, keyVar[5])
self.fwhmLine.addPoint(keyVar[5])
def remove(self):
"""Remove all associated plot lines
"""
self.stripChartWdg.remove(self.fwhmLine)
def __str__(self):
return "ProbeInfo(%s)" % (self.num,)
if __name__ == "__main__":
import TestData
addWindow(TestData.tuiModel.tlSet)
TestData.tuiModel.tlSet.makeVisible(WindowName)
TestData.runTest()
TestData.tuiModel.reactor.run()
| bsd-3-clause |
kanagasabapathi/python-for-android | python3-alpha/python3-src/Lib/test/test_hashlib.py | 47 | 14178 | # Test hashlib module
#
# $Id$
#
# Copyright (C) 2005-2010 Gregory P. Smith (greg@krypto.org)
# Licensed to PSF under a Contributor Agreement.
#
import array
import hashlib
import itertools
import sys
try:
import threading
except ImportError:
threading = None
import unittest
import warnings
from test import support
from test.support import _4G, precisionbigmemtest
# Were we compiled --with-pydebug or with #define Py_DEBUG?
COMPILED_WITH_PYDEBUG = hasattr(sys, 'gettotalrefcount')
def hexstr(s):
assert isinstance(s, bytes), repr(s)
h = "0123456789abcdef"
r = ''
for i in s:
r += h[(i >> 4) & 0xF] + h[i & 0xF]
return r
class HashLibTestCase(unittest.TestCase):
supported_hash_names = ( 'md5', 'MD5', 'sha1', 'SHA1',
'sha224', 'SHA224', 'sha256', 'SHA256',
'sha384', 'SHA384', 'sha512', 'SHA512' )
_warn_on_extension_import = COMPILED_WITH_PYDEBUG
def _conditional_import_module(self, module_name):
"""Import a module and return a reference to it or None on failure."""
try:
exec('import '+module_name)
except ImportError as error:
if self._warn_on_extension_import:
warnings.warn('Did a C extension fail to compile? %s' % error)
return locals().get(module_name)
def __init__(self, *args, **kwargs):
algorithms = set()
for algorithm in self.supported_hash_names:
algorithms.add(algorithm.lower())
self.constructors_to_test = {}
for algorithm in algorithms:
self.constructors_to_test[algorithm] = set()
# For each algorithm, test the direct constructor and the use
# of hashlib.new given the algorithm name.
for algorithm, constructors in self.constructors_to_test.items():
constructors.add(getattr(hashlib, algorithm))
def _test_algorithm_via_hashlib_new(data=None, _alg=algorithm):
if data is None:
return hashlib.new(_alg)
return hashlib.new(_alg, data)
constructors.add(_test_algorithm_via_hashlib_new)
_hashlib = self._conditional_import_module('_hashlib')
if _hashlib:
# These two algorithms should always be present when this module
# is compiled. If not, something was compiled wrong.
assert hasattr(_hashlib, 'openssl_md5')
assert hasattr(_hashlib, 'openssl_sha1')
for algorithm, constructors in self.constructors_to_test.items():
constructor = getattr(_hashlib, 'openssl_'+algorithm, None)
if constructor:
constructors.add(constructor)
_md5 = self._conditional_import_module('_md5')
if _md5:
self.constructors_to_test['md5'].add(_md5.md5)
_sha1 = self._conditional_import_module('_sha1')
if _sha1:
self.constructors_to_test['sha1'].add(_sha1.sha1)
_sha256 = self._conditional_import_module('_sha256')
if _sha256:
self.constructors_to_test['sha224'].add(_sha256.sha224)
self.constructors_to_test['sha256'].add(_sha256.sha256)
_sha512 = self._conditional_import_module('_sha512')
if _sha512:
self.constructors_to_test['sha384'].add(_sha512.sha384)
self.constructors_to_test['sha512'].add(_sha512.sha512)
super(HashLibTestCase, self).__init__(*args, **kwargs)
def test_hash_array(self):
a = array.array("b", range(10))
constructors = self.constructors_to_test.values()
for cons in itertools.chain.from_iterable(constructors):
c = cons(a)
c.hexdigest()
def test_algorithms_guaranteed(self):
self.assertEqual(hashlib.algorithms_guaranteed,
set(_algo for _algo in self.supported_hash_names
if _algo.islower()))
def test_algorithms_available(self):
self.assertTrue(set(hashlib.algorithms_guaranteed).
issubset(hashlib.algorithms_available))
def test_unknown_hash(self):
try:
hashlib.new('spam spam spam spam spam')
except ValueError:
pass
else:
self.assertTrue(0 == "hashlib didn't reject bogus hash name")
def test_get_builtin_constructor(self):
get_builtin_constructor = hashlib.__dict__[
'__get_builtin_constructor']
self.assertRaises(ValueError, get_builtin_constructor, 'test')
try:
import _md5
except ImportError:
pass
# This forces an ImportError for "import _md5" statements
sys.modules['_md5'] = None
try:
self.assertRaises(ValueError, get_builtin_constructor, 'md5')
finally:
if '_md5' in locals():
sys.modules['_md5'] = _md5
else:
del sys.modules['_md5']
def test_hexdigest(self):
for name in self.supported_hash_names:
h = hashlib.new(name)
assert isinstance(h.digest(), bytes), name
self.assertEqual(hexstr(h.digest()), h.hexdigest())
def test_large_update(self):
aas = b'a' * 128
bees = b'b' * 127
cees = b'c' * 126
for name in self.supported_hash_names:
m1 = hashlib.new(name)
m1.update(aas)
m1.update(bees)
m1.update(cees)
m2 = hashlib.new(name)
m2.update(aas + bees + cees)
self.assertEqual(m1.digest(), m2.digest())
def check(self, name, data, digest):
constructors = self.constructors_to_test[name]
# 2 is for hashlib.name(...) and hashlib.new(name, ...)
self.assertGreaterEqual(len(constructors), 2)
for hash_object_constructor in constructors:
computed = hash_object_constructor(data).hexdigest()
self.assertEqual(
computed, digest,
"Hash algorithm %s constructed using %s returned hexdigest"
" %r for %d byte input data that should have hashed to %r."
% (name, hash_object_constructor,
computed, len(data), digest))
def check_no_unicode(self, algorithm_name):
# Unicode objects are not allowed as input.
constructors = self.constructors_to_test[algorithm_name]
for hash_object_constructor in constructors:
self.assertRaises(TypeError, hash_object_constructor, 'spam')
def test_no_unicode(self):
self.check_no_unicode('md5')
self.check_no_unicode('sha1')
self.check_no_unicode('sha224')
self.check_no_unicode('sha256')
self.check_no_unicode('sha384')
self.check_no_unicode('sha512')
def test_case_md5_0(self):
self.check('md5', b'', 'd41d8cd98f00b204e9800998ecf8427e')
def test_case_md5_1(self):
self.check('md5', b'abc', '900150983cd24fb0d6963f7d28e17f72')
def test_case_md5_2(self):
self.check('md5',
b'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789',
'd174ab98d277d9f5a5611c2c9f419d9f')
@precisionbigmemtest(size=_4G + 5, memuse=1)
def test_case_md5_huge(self, size):
if size == _4G + 5:
try:
self.check('md5', b'A'*size, 'c9af2dff37468ce5dfee8f2cfc0a9c6d')
except OverflowError:
pass # 32-bit arch
@precisionbigmemtest(size=_4G - 1, memuse=1)
def test_case_md5_uintmax(self, size):
if size == _4G - 1:
try:
self.check('md5', b'A'*size, '28138d306ff1b8281f1a9067e1a1a2b3')
except OverflowError:
pass # 32-bit arch
# use the three examples from Federal Information Processing Standards
# Publication 180-1, Secure Hash Standard, 1995 April 17
# http://www.itl.nist.gov/div897/pubs/fip180-1.htm
def test_case_sha1_0(self):
self.check('sha1', b"",
"da39a3ee5e6b4b0d3255bfef95601890afd80709")
def test_case_sha1_1(self):
self.check('sha1', b"abc",
"a9993e364706816aba3e25717850c26c9cd0d89d")
def test_case_sha1_2(self):
self.check('sha1',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"84983e441c3bd26ebaae4aa1f95129e5e54670f1")
def test_case_sha1_3(self):
self.check('sha1', b"a" * 1000000,
"34aa973cd4c4daa4f61eeb2bdbad27316534016f")
# use the examples from Federal Information Processing Standards
# Publication 180-2, Secure Hash Standard, 2002 August 1
# http://csrc.nist.gov/publications/fips/fips180-2/fips180-2.pdf
def test_case_sha224_0(self):
self.check('sha224', b"",
"d14a028c2a3a2bc9476102bb288234c415a2b01f828ea62ac5b3e42f")
def test_case_sha224_1(self):
self.check('sha224', b"abc",
"23097d223405d8228642a477bda255b32aadbce4bda0b3f7e36c9da7")
def test_case_sha224_2(self):
self.check('sha224',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"75388b16512776cc5dba5da1fd890150b0c6455cb4f58b1952522525")
def test_case_sha224_3(self):
self.check('sha224', b"a" * 1000000,
"20794655980c91d8bbb4c1ea97618a4bf03f42581948b2ee4ee7ad67")
def test_case_sha256_0(self):
self.check('sha256', b"",
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855")
def test_case_sha256_1(self):
self.check('sha256', b"abc",
"ba7816bf8f01cfea414140de5dae2223b00361a396177a9cb410ff61f20015ad")
def test_case_sha256_2(self):
self.check('sha256',
b"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq",
"248d6a61d20638b8e5c026930c3e6039a33ce45964ff2167f6ecedd419db06c1")
def test_case_sha256_3(self):
self.check('sha256', b"a" * 1000000,
"cdc76e5c9914fb9281a1c7e284d73e67f1809a48a497200e046d39ccc7112cd0")
def test_case_sha384_0(self):
self.check('sha384', b"",
"38b060a751ac96384cd9327eb1b1e36a21fdb71114be07434c0cc7bf63f6e1da"+
"274edebfe76f65fbd51ad2f14898b95b")
def test_case_sha384_1(self):
self.check('sha384', b"abc",
"cb00753f45a35e8bb5a03d699ac65007272c32ab0eded1631a8b605a43ff5bed"+
"8086072ba1e7cc2358baeca134c825a7")
def test_case_sha384_2(self):
self.check('sha384',
b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
b"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"09330c33f71147e83d192fc782cd1b4753111b173b3b05d22fa08086e3b0f712"+
"fcc7c71a557e2db966c3e9fa91746039")
def test_case_sha384_3(self):
self.check('sha384', b"a" * 1000000,
"9d0e1809716474cb086e834e310a4a1ced149e9c00f248527972cec5704c2a5b"+
"07b8b3dc38ecc4ebae97ddd87f3d8985")
def test_case_sha512_0(self):
self.check('sha512', b"",
"cf83e1357eefb8bdf1542850d66d8007d620e4050b5715dc83f4a921d36ce9ce"+
"47d0d13c5d85f2b0ff8318d2877eec2f63b931bd47417a81a538327af927da3e")
def test_case_sha512_1(self):
self.check('sha512', b"abc",
"ddaf35a193617abacc417349ae20413112e6fa4e89a97ea20a9eeee64b55d39a"+
"2192992a274fc1a836ba3c23a3feebbd454d4423643ce80e2a9ac94fa54ca49f")
def test_case_sha512_2(self):
self.check('sha512',
b"abcdefghbcdefghicdefghijdefghijkefghijklfghijklmghijklmn"+
b"hijklmnoijklmnopjklmnopqklmnopqrlmnopqrsmnopqrstnopqrstu",
"8e959b75dae313da8cf4f72814fc143f8f7779c6eb9f7fa17299aeadb6889018"+
"501d289e4900f7e4331b99dec4b5433ac7d329eeb6dd26545e96e55b874be909")
def test_case_sha512_3(self):
self.check('sha512', b"a" * 1000000,
"e718483d0ce769644e2e42c7bc15b4638e1f98b13b2044285632a803afa973eb"+
"de0ff244877ea60a4cb0432ce577c31beb009c5c2c49aa2e4eadb217ad8cc09b")
def test_gil(self):
# Check things work fine with an input larger than the size required
# for multithreaded operation (which is hardwired to 2048).
gil_minsize = 2048
m = hashlib.md5()
m.update(b'1')
m.update(b'#' * gil_minsize)
m.update(b'1')
self.assertEqual(m.hexdigest(), 'cb1e1a2cbc80be75e19935d621fb9b21')
m = hashlib.md5(b'x' * gil_minsize)
self.assertEqual(m.hexdigest(), 'cfb767f225d58469c5de3632a8803958')
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.reap_threads
def test_threaded_hashing(self):
# Updating the same hash object from several threads at once
# using data chunk sizes containing the same byte sequences.
#
# If the internal locks are working to prevent multiple
# updates on the same object from running at once, the resulting
# hash will be the same as doing it single threaded upfront.
hasher = hashlib.sha1()
num_threads = 5
smallest_data = b'swineflu'
data = smallest_data*200000
expected_hash = hashlib.sha1(data*num_threads).hexdigest()
def hash_in_chunks(chunk_size, event):
index = 0
while index < len(data):
hasher.update(data[index:index+chunk_size])
index += chunk_size
event.set()
events = []
for threadnum in range(num_threads):
chunk_size = len(data) // (10**threadnum)
assert chunk_size > 0
assert chunk_size % len(smallest_data) == 0
event = threading.Event()
events.append(event)
threading.Thread(target=hash_in_chunks,
args=(chunk_size, event)).start()
for event in events:
event.wait()
self.assertEqual(expected_hash, hasher.hexdigest())
def test_main():
support.run_unittest(HashLibTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
WillGuan105/django | tests/view_tests/tests/test_csrf.py | 253 | 3203 | from django.test import Client, SimpleTestCase, override_settings
from django.utils.translation import override
@override_settings(ROOT_URLCONF="view_tests.urls")
class CsrfViewTests(SimpleTestCase):
def setUp(self):
super(CsrfViewTests, self).setUp()
self.client = Client(enforce_csrf_checks=True)
@override_settings(
USE_I18N=True,
MIDDLEWARE_CLASSES=[
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
],
)
def test_translation(self):
"""
Test that an invalid request is rejected with a localized error message.
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
self.assertContains(response,
"CSRF verification failed. Request aborted.",
status_code=403)
with self.settings(LANGUAGE_CODE='nl'), override('en-us'):
response = self.client.post('/')
self.assertContains(response, "Verboden", status_code=403)
self.assertContains(response,
"CSRF-verificatie mislukt. Verzoek afgebroken.",
status_code=403)
@override_settings(
SECURE_PROXY_SSL_HEADER=('HTTP_X_FORWARDED_PROTO', 'https')
)
def test_no_referer(self):
"""
Referer header is strictly checked for POST over HTTPS. Trigger the
exception by sending an incorrect referer.
"""
response = self.client.post('/', HTTP_X_FORWARDED_PROTO='https')
self.assertContains(response,
"You are seeing this message because this HTTPS "
"site requires a 'Referer header' to be "
"sent by your Web browser, but none was sent.",
status_code=403)
def test_no_cookies(self):
"""
The CSRF cookie is checked for POST. Failure to send this cookie should
provide a nice error message.
"""
response = self.client.post('/')
self.assertContains(response,
"You are seeing this message because this site "
"requires a CSRF cookie when submitting forms. "
"This cookie is required for security reasons, to "
"ensure that your browser is not being hijacked "
"by third parties.",
status_code=403)
# In Django 1.10, this can be changed to TEMPLATES=[] because the code path
# that reads the TEMPLATE_* settings in that case will have been removed.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.dummy.TemplateStrings',
}])
def test_no_django_template_engine(self):
"""
The CSRF view doesn't depend on the TEMPLATES configuration (#24388).
"""
response = self.client.post('/')
self.assertContains(response, "Forbidden", status_code=403)
| bsd-3-clause |
glorizen/nupic | tests/unit/nupic/algorithms/nab_detector_test.py | 31 | 10843 | #! /usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Test the NuPIC imports run as expected in
nab/detectors/numenta/numenta_detector.py. They are
nupic/algorithms/anomaly_likelihood and
nupic/frameworks/opf/modelfactory.ModelFactory. The intent here is not to test
functionality but rather that the functions are able to run in NAB.
NAB repo: https://github.com/numenta/NAB
"""
import copy
import csv
import datetime
import os
import unittest
from nupic.algorithms import anomaly_likelihood as an
from nupic.frameworks.opf.modelfactory import ModelFactory
from nupic.frameworks.opf.clamodel import CLAModel
from nupic.support.unittesthelpers.testcasebase import TestCaseBase
def _getDateList(numSamples, startDatetime):
"""
Generate a sequence of sample dates starting at startDatetime and incrementing
every 5 minutes.
@param numSamples (int) number of datetimes to generate
@param startDatetime (datetime) the start (first) datetime
@return dateList (list) generated sequence of datetimes
"""
dateList = []
td = datetime.timedelta(minutes=5)
currentDate = startDatetime + td
for _ in xrange(numSamples):
dateList.append(currentDate)
currentDate = currentDate + td
return dateList
def _addSampleData(numSamples=20, spikeValue=1.0, spikePeriod=10):
"""
Add sample anomaly data to the existing/new data list. Data is constant 0.0,
where anomalies are spikes to 1.0 at an interval set by spikePeriod. The test
data is trivial, as explicit testing of functions is done in other unit tests.
@param numSamples (int) number of data entries to produce
@param spikeValue (float) value of the anomaly spikes
@param spikePeriod (int) periodicity of anomaly spikes, where one will
occur for every spikePeriod data entries
@return data (list) list of generated data entries
"""
# Generate datetimes
lastDate = datetime.datetime(2015, 4, 1)
dateList = _getDateList(numSamples, lastDate)
# Generate data with anomaly spikes
data = []
for idx, date in enumerate(dateList):
if (spikePeriod > 0) and ( (idx + 1) % spikePeriod == 0):
data.append([date, idx, spikeValue])
else:
data.append([date, idx, 0.0])
return data
def _writeToCSV(data, headers, fileName):
"""
Write list of data to CSV.
@param data (list) list of data entries, where each row is a list
@param headers (list) column headers, where each entry in list is
a string
"""
with open(fileName, "wb") as f:
writer = csv.writer(f, delimiter=",", lineterminator="\n")
writer.writerow(headers)
writer.writerows(data)
class NABTest(TestCaseBase):
def setUp(self):
# Generate sample data, save to CSV (not used now, but put in place
# for future NAB tests)
self.data = _addSampleData()
self.dataFileName = "temp_data.csv"
_writeToCSV(self.data, ["datetime", "index", "value"], self.dataFileName)
def tearDown(self):
os.remove(self.dataFileName)
def testModelCreator(self):
"""
Tests the ModelFactory.create() method in
"nupic/frameworks/opf/modelfactory.py" by creating a new model object, as
in "NAB/detectors/numenta/numenta_detector.py".
Model paramaters are same as in NAB v0.8.
"""
# Create model as in NAB/.../numenta_detector.py
modelParams = {
"aggregationInfo": {
"days": 0,
"fields": [],
"hours": 0,
"microseconds": 0,
"milliseconds": 0,
"minutes": 0,
"months": 0,
"seconds": 0,
"weeks": 0,
"years": 0
},
"model": "CLA",
"modelParams": {
"anomalyParams": {
"anomalyCacheRecords": None,
"autoDetectThreshold": None,
"autoDetectWaitRecords": 5030
},
"clEnable": False,
"clParams": {
"alpha": 0.035828933612158,
"clVerbosity": 0,
"regionName": "CLAClassifierRegion",
"steps": "1"
},
"inferenceType": "TemporalAnomaly",
"sensorParams": {
"encoders": {
"timestamp_timeOfDay": {
"fieldname": "timestamp",
"name": "timestamp_timeOfDay",
"timeOfDay": [
21,
9.49122334747737
],
"type": "DateEncoder"
},
"timestamp_dayOfWeek": None,
"timestamp_weekend": None,
"value": {
"name": "value",
"fieldname": "value",
"numBuckets": 94.0,
"seed": 42,
"type": "RandomDistributedScalarEncoder"
}
},
"sensorAutoReset": None,
"verbosity": 0
},
"spEnable": True,
"spParams": {
"potentialPct": 0.8,
"columnCount": 2048,
"globalInhibition": 1,
"inputWidth": 0,
"maxBoost": 1.0,
"numActiveColumnsPerInhArea": 40,
"seed": 1956,
"spVerbosity": 0,
"spatialImp": "cpp",
"synPermActiveInc": 0.0015,
"synPermConnected": 0.1,
"synPermInactiveDec": 0.0005
},
"tpEnable": True,
"tpParams": {
"activationThreshold": 13,
"cellsPerColumn": 32,
"columnCount": 2048,
"globalDecay": 0.0,
"initialPerm": 0.21,
"inputWidth": 2048,
"maxAge": 0,
"maxSegmentsPerCell": 128,
"maxSynapsesPerSegment": 32,
"minThreshold": 10,
"newSynapseCount": 20,
"outputType": "normal",
"pamLength": 3,
"permanenceDec": 0.1,
"permanenceInc": 0.1,
"seed": 1960,
"temporalImp": "cpp",
"verbosity": 0
},
"trainSPNetOnlyIfRequested": False
},
"predictAheadTime": None,
"version": 1
}
sensorParams = (modelParams["modelParams"]["sensorParams"]
["encoders"]["value"])
sensorParams["resolution"] = max(0.001,
(1.2 - 0.2) / sensorParams.pop("numBuckets"))
model = ModelFactory.create(modelParams)
self.assertIs(type(model), CLAModel, msg="The created model is not a"
"CLAModel, but rather is of type %s" % type(model))
def testNABAnomalyLikelihood(self):
"""
Tests the specific calls to nupic/algorithms/anomaly_likelihood as they"re
made in "NAB/detectors/numenta/numenta_detector.py".
Note "NAB/.../numenta_detector.py" has its own class AnomalyLikelihood,
different from nupic/algorithms/anomaly_likelihood.AnomalyLikelihood, but
which calls the functions estimateAnomalyLikelihoods() and
updateAnomalyLikelihoods() from "nupic/algorithms/anomaly_likelihood.py".
"""
# AnomalyLikelihood object initial values
iteration = 0
probationaryPeriod = 4
historicalScores = []
likelihoodList = []
for dataPoint in self.data:
# Ignore the first probationaryPeriod data points
if len(historicalScores) < probationaryPeriod:
likelihood = 0.5
else:
if iteration % 4 == 0:
_, _, distribution = an.estimateAnomalyLikelihoods(
historicalScores,
skipRecords = probationaryPeriod)
likelihoods, _, distribution = an.updateAnomalyLikelihoods(
[dataPoint], distribution)
likelihood = 1.0 - likelihoods[0]
historicalScores.append(dataPoint)
iteration += 1
likelihoodList.append(likelihood)
truthLikelihoodList = [0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.044565462999999972, 0.044565462999999972,
0.044565462999999972, 0.044565462999999972,
0.90319951499999995, 0.90319951499999995,
0.90319951499999995, 0.90319951499999995,
0.78814460099999994, 0.78814460099999994,
0.78814460099999994, 0.78814460099999994]
for i in xrange(len(likelihoodList)):
self.assertAlmostEqual(likelihoodList[i], truthLikelihoodList[i],
msg="unequal values are at index %i" % i)
if __name__ == "__main__":
unittest.main()
| agpl-3.0 |
liyongyue/dnsspider | build/lib/dns/rdtypes/ANY/CERT.py | 2 | 4317 | # Copyright (C) 2003-2005 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import cStringIO
import struct
import dns.exception
import dns.dnssec
import dns.rdata
import dns.tokenizer
_ctype_by_value = {
1 : 'PKIX',
2 : 'SPKI',
3 : 'PGP',
253 : 'URI',
254 : 'OID',
}
_ctype_by_name = {
'PKIX' : 1,
'SPKI' : 2,
'PGP' : 3,
'URI' : 253,
'OID' : 254,
}
def _ctype_from_text(what):
v = _ctype_by_name.get(what)
if not v is None:
return v
return int(what)
def _ctype_to_text(what):
v = _ctype_by_value.get(what)
if not v is None:
return v
return str(what)
class CERT(dns.rdata.Rdata):
"""CERT record
@ivar certificate_type: certificate type
@type certificate_type: int
@ivar key_tag: key tag
@type key_tag: int
@ivar algorithm: algorithm
@type algorithm: int
@ivar certificate: the certificate or CRL
@type certificate: string
@see: RFC 2538"""
__slots__ = ['certificate_type', 'key_tag', 'algorithm', 'certificate']
def __init__(self, rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate):
super(CERT, self).__init__(rdclass, rdtype)
self.certificate_type = certificate_type
self.key_tag = key_tag
self.algorithm = algorithm
self.certificate = certificate
def to_text(self, origin=None, relativize=True, **kw):
certificate_type = _ctype_to_text(self.certificate_type)
return "%s %d %s %s" % (certificate_type, self.key_tag,
dns.dnssec.algorithm_to_text(self.algorithm),
dns.rdata._base64ify(self.certificate))
def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True):
certificate_type = _ctype_from_text(tok.get_string())
key_tag = tok.get_uint16()
algorithm = dns.dnssec.algorithm_from_text(tok.get_string())
if algorithm < 0 or algorithm > 255:
raise dns.exception.SyntaxError, "bad algorithm type"
chunks = []
while 1:
t = tok.get()
if t[0] == dns.tokenizer.EOL or t[0] == dns.tokenizer.EOF:
break
if t[0] != dns.tokenizer.IDENTIFIER:
raise dns.exception.SyntaxError
chunks.append(t[1])
b64 = ''.join(chunks)
certificate = b64.decode('base64_codec')
return cls(rdclass, rdtype, certificate_type, key_tag,
algorithm, certificate)
from_text = classmethod(from_text)
def to_wire(self, file, compress = None, origin = None):
prefix = struct.pack("!HHB", self.certificate_type, self.key_tag,
self.algorithm)
file.write(prefix)
file.write(self.certificate)
def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None):
prefix = wire[current : current + 5]
current += 5
rdlen -= 5
if rdlen < 0:
raise dns.exception.FormError
(certificate_type, key_tag, algorithm) = struct.unpack("!HHB", prefix)
certificate = wire[current : current + rdlen]
return cls(rdclass, rdtype, certificate_type, key_tag, algorithm,
certificate)
from_wire = classmethod(from_wire)
def _cmp(self, other):
f = cStringIO.StringIO()
self.to_wire(f)
wire1 = f.getvalue()
f.seek(0)
f.truncate()
other.to_wire(f)
wire2 = f.getvalue()
f.close()
return cmp(wire1, wire2)
| isc |
frankvdp/django | tests/auth_tests/test_remote_user.py | 70 | 10801 | from datetime import datetime
from django.conf import settings
from django.contrib.auth import authenticate
from django.contrib.auth.backends import RemoteUserBackend
from django.contrib.auth.middleware import RemoteUserMiddleware
from django.contrib.auth.models import User
from django.test import TestCase, modify_settings, override_settings
from django.utils import timezone
@override_settings(ROOT_URLCONF='auth_tests.urls')
class RemoteUserTest(TestCase):
middleware = 'django.contrib.auth.middleware.RemoteUserMiddleware'
backend = 'django.contrib.auth.backends.RemoteUserBackend'
header = 'REMOTE_USER'
# Usernames to be passed in REMOTE_USER for the test_known_user test case.
known_user = 'knownuser'
known_user2 = 'knownuser2'
def setUp(self):
self.patched_settings = modify_settings(
AUTHENTICATION_BACKENDS={'append': self.backend},
MIDDLEWARE={'append': self.middleware},
)
self.patched_settings.enable()
def tearDown(self):
self.patched_settings.disable()
def test_no_remote_user(self):
"""
Tests requests where no remote user is specified and insures that no
users get created.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous)
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: None})
self.assertTrue(response.context['user'].is_anonymous)
self.assertEqual(User.objects.count(), num_users)
response = self.client.get('/remote_user/', **{self.header: ''})
self.assertTrue(response.context['user'].is_anonymous)
self.assertEqual(User.objects.count(), num_users)
def test_unknown_user(self):
"""
Tests the case where the username passed in the header does not exist
as a User.
"""
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(response.context['user'].username, 'newuser')
self.assertEqual(User.objects.count(), num_users + 1)
User.objects.get(username='newuser')
# Another request with same user should not create any new users.
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertEqual(User.objects.count(), num_users + 1)
def test_known_user(self):
"""
Tests the case where the username passed in the header is a valid User.
"""
User.objects.create(username='knownuser')
User.objects.create(username='knownuser2')
num_users = User.objects.count()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
self.assertEqual(User.objects.count(), num_users)
# A different user passed in the headers causes the new user
# to be logged in.
response = self.client.get('/remote_user/',
**{self.header: self.known_user2})
self.assertEqual(response.context['user'].username, 'knownuser2')
self.assertEqual(User.objects.count(), num_users)
def test_last_login(self):
"""
A user's last_login is set the first time they make a
request but not updated in subsequent requests with the same session.
"""
user = User.objects.create(username='knownuser')
# Set last_login to something so we can determine if it changes.
default_login = datetime(2000, 1, 1)
if settings.USE_TZ:
default_login = default_login.replace(tzinfo=timezone.utc)
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertNotEqual(default_login, response.context['user'].last_login)
user = User.objects.get(username='knownuser')
user.last_login = default_login
user.save()
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(default_login, response.context['user'].last_login)
def test_header_disappears(self):
"""
A logged in user is logged out automatically when
the REMOTE_USER header disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER header disappears. Should trigger logout.
response = self.client.get('/remote_user/')
self.assertTrue(response.context['user'].is_anonymous)
# verify the remoteuser middleware will not remove a user
# authenticated via another backend
User.objects.create_user(username='modeluser', password='foo')
self.client.login(username='modeluser', password='foo')
authenticate(username='modeluser', password='foo')
response = self.client.get('/remote_user/')
self.assertEqual(response.context['user'].username, 'modeluser')
def test_user_switch_forces_new_login(self):
"""
If the username in the header changes between requests
that the original user is logged out
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/',
**{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# During the session, the REMOTE_USER changes to a different user.
response = self.client.get('/remote_user/',
**{self.header: "newnewuser"})
# The current user is not the prior remote_user.
# In backends that create a new user, username is "newnewuser"
# In backends that do not create new users, it is '' (anonymous user)
self.assertNotEqual(response.context['user'].username, 'knownuser')
def test_inactive_user(self):
User.objects.create(username='knownuser', is_active=False)
response = self.client.get('/remote_user/', **{self.header: 'knownuser'})
self.assertTrue(response.context['user'].is_anonymous)
class RemoteUserNoCreateBackend(RemoteUserBackend):
"""Backend that doesn't create unknown users."""
create_unknown_user = False
class RemoteUserNoCreateTest(RemoteUserTest):
"""
Contains the same tests as RemoteUserTest, but using a custom auth backend
class that doesn't create unknown users.
"""
backend = 'auth_tests.test_remote_user.RemoteUserNoCreateBackend'
def test_unknown_user(self):
num_users = User.objects.count()
response = self.client.get('/remote_user/', **{self.header: 'newuser'})
self.assertTrue(response.context['user'].is_anonymous)
self.assertEqual(User.objects.count(), num_users)
class AllowAllUsersRemoteUserBackendTest(RemoteUserTest):
"""Backend that allows inactive users."""
backend = 'django.contrib.auth.backends.AllowAllUsersRemoteUserBackend'
def test_inactive_user(self):
user = User.objects.create(username='knownuser', is_active=False)
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, user.username)
class CustomRemoteUserBackend(RemoteUserBackend):
"""
Backend that overrides RemoteUserBackend methods.
"""
def clean_username(self, username):
"""
Grabs username before the @ character.
"""
return username.split('@')[0]
def configure_user(self, user):
"""
Sets user's email address.
"""
user.email = 'user@example.com'
user.save()
return user
class RemoteUserCustomTest(RemoteUserTest):
"""
Tests a custom RemoteUserBackend subclass that overrides the clean_username
and configure_user methods.
"""
backend = 'auth_tests.test_remote_user.CustomRemoteUserBackend'
# REMOTE_USER strings with email addresses for the custom backend to
# clean.
known_user = 'knownuser@example.com'
known_user2 = 'knownuser2@example.com'
def test_known_user(self):
"""
The strings passed in REMOTE_USER should be cleaned and the known users
should not have been configured with an email address.
"""
super().test_known_user()
self.assertEqual(User.objects.get(username='knownuser').email, '')
self.assertEqual(User.objects.get(username='knownuser2').email, '')
def test_unknown_user(self):
"""
The unknown user created should be configured with an email address.
"""
super().test_unknown_user()
newuser = User.objects.get(username='newuser')
self.assertEqual(newuser.email, 'user@example.com')
class CustomHeaderMiddleware(RemoteUserMiddleware):
"""
Middleware that overrides custom HTTP auth user header.
"""
header = 'HTTP_AUTHUSER'
class CustomHeaderRemoteUserTest(RemoteUserTest):
"""
Tests a custom RemoteUserMiddleware subclass with custom HTTP auth user
header.
"""
middleware = (
'auth_tests.test_remote_user.CustomHeaderMiddleware'
)
header = 'HTTP_AUTHUSER'
class PersistentRemoteUserTest(RemoteUserTest):
"""
PersistentRemoteUserMiddleware keeps the user logged in even if the
subsequent calls do not contain the header value.
"""
middleware = 'django.contrib.auth.middleware.PersistentRemoteUserMiddleware'
require_header = False
def test_header_disappears(self):
"""
A logged in user is kept logged in even if the REMOTE_USER header
disappears during the same browser session.
"""
User.objects.create(username='knownuser')
# Known user authenticates
response = self.client.get('/remote_user/', **{self.header: self.known_user})
self.assertEqual(response.context['user'].username, 'knownuser')
# Should stay logged in if the REMOTE_USER header disappears.
response = self.client.get('/remote_user/')
self.assertFalse(response.context['user'].is_anonymous)
self.assertEqual(response.context['user'].username, 'knownuser')
| bsd-3-clause |
divio/djangocms-text-ckeditor | djangocms_text_ckeditor/migrations/0004_auto_20160706_1339.py | 1 | 1795 | import re
from django.db import migrations, models
def _replace_text_body(model, input_pattern, output_tag, id_format):
regex = re.compile(input_pattern)
def _do_replace(match):
before_id, plugin_id, after_id = match.groups()
if not plugin_id:
return ''
bits = []
if before_id:
bits.append(before_id.strip())
bits.append(id_format.format(plugin_id))
if after_id:
bits.append(after_id.strip())
# By using .join() we ensure the correct
# amount of spaces are used to separate the different
# attributes.
tag_attrs = ' '.join(bits)
return output_tag.format(tag_attrs)
lookup = model.objects.filter
for plugin in model.objects.all():
new_body, count = regex.subn(_do_replace, plugin.body)
if count >= 1:
# Only update body if there were plugins in the text
lookup(pk=plugin.pk).update(body=new_body)
def forwards(apps, schema_editor):
_replace_text_body(
model=apps.get_model('djangocms_text_ckeditor', 'Text'),
input_pattern=r'<img ([^>]*)\bid="plugin_obj_(?P<pk>\d+)"([^>]*)/?>',
output_tag='<cms-plugin {}></cms-plugin>',
id_format='id="{}"',
)
def backwards(apps, schema_editor):
_replace_text_body(
model=apps.get_model('djangocms_text_ckeditor', 'Text'),
input_pattern=r'<cms-plugin ([^>]*)\bid="(?P<pk>\d+)"([^>]*)/?></cms-plugin>',
output_tag='<img {}>',
id_format='id="plugin_obj_{}"',
)
class Migration(migrations.Migration):
dependencies = [
('djangocms_text_ckeditor', '0003_set_related_name_for_cmsplugin_ptr'),
]
operations = [
migrations.RunPython(forwards, backwards)
]
| bsd-3-clause |
criccomini/airflow | tests/contrib/hooks/test_aws_hook.py | 6 | 7498 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
import boto3
from airflow import configuration
from airflow.models import Connection
from airflow.contrib.hooks.aws_hook import AwsHook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
try:
from moto import mock_emr, mock_dynamodb2, mock_sts
except ImportError:
mock_emr = None
mock_dynamodb2 = None
mock_sts = None
class TestAwsHook(unittest.TestCase):
@mock_emr
def setUp(self):
configuration.load_test_config()
@unittest.skipIf(mock_emr is None, 'mock_emr package not present')
@mock_emr
def test_get_client_type_returns_a_boto3_client_of_the_requested_type(self):
client = boto3.client('emr', region_name='us-east-1')
if len(client.list_clusters()['Clusters']):
raise ValueError('AWS not properly mocked')
hook = AwsHook(aws_conn_id='aws_default')
client_from_hook = hook.get_client_type('emr')
self.assertEqual(client_from_hook.list_clusters()['Clusters'], [])
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamo2 package not present')
@mock_dynamodb2
def test_get_resource_type_returns_a_boto3_resource_of_the_requested_type(self):
hook = AwsHook(aws_conn_id='aws_default')
resource_from_hook = hook.get_resource_type('dynamodb')
# this table needs to be created in production
table = resource_from_hook.create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 0)
@unittest.skipIf(mock_dynamodb2 is None, 'mock_dynamo2 package not present')
@mock_dynamodb2
def test_get_session_returns_a_boto3_session(self):
hook = AwsHook(aws_conn_id='aws_default')
session_from_hook = hook.get_session()
resource_from_session = session_from_hook.resource('dynamodb')
table = resource_from_session.create_table(
TableName='test_airflow',
KeySchema=[
{
'AttributeName': 'id',
'KeyType': 'HASH'
},
],
AttributeDefinitions=[
{
'AttributeName': 'name',
'AttributeType': 'S'
}
],
ProvisionedThroughput={
'ReadCapacityUnits': 10,
'WriteCapacityUnits': 10
}
)
table.meta.client.get_waiter(
'table_exists').wait(TableName='test_airflow')
self.assertEqual(table.item_count, 0)
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_login(self, mock_get_connection):
mock_connection = Connection(login='aws_access_key_id',
password='aws_secret_access_key')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'aws_access_key_id')
self.assertEqual(credentials_from_hook.secret_key, 'aws_secret_access_key')
self.assertIsNone(credentials_from_hook.token)
@mock.patch.object(AwsHook, 'get_connection')
def test_get_credentials_from_extra(self, mock_get_connection):
mock_connection = Connection(
extra='{"aws_access_key_id": "aws_access_key_id",'
'"aws_secret_access_key": "aws_secret_access_key"}'
)
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'aws_access_key_id')
self.assertEqual(credentials_from_hook.secret_key, 'aws_secret_access_key')
self.assertIsNone(credentials_from_hook.token)
@unittest.skipIf(mock_sts is None, 'mock_sts package not present')
@mock.patch.object(AwsHook, 'get_connection')
@mock_sts
def test_get_credentials_from_role_arn(self, mock_get_connection):
mock_connection = Connection(
extra='{"role_arn":"arn:aws:iam::123456:role/role_arn"}')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'AKIAIOSFODNN7EXAMPLE')
self.assertEqual(credentials_from_hook.secret_key,
'aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY')
self.assertEqual(credentials_from_hook.token,
'BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh'
'3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4I'
'gRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15'
'fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE')
@unittest.skipIf(mock_sts is None, 'mock_sts package not present')
@mock.patch.object(AwsHook, 'get_connection')
@mock_sts
def test_get_credentials_from_role_arn_with_external_id(self, mock_get_connection):
mock_connection = Connection(
extra='{"role_arn":"arn:aws:iam::123456:role/role_arn",'
' "external_id":"external_id"}')
mock_get_connection.return_value = mock_connection
hook = AwsHook()
credentials_from_hook = hook.get_credentials()
self.assertEqual(credentials_from_hook.access_key, 'AKIAIOSFODNN7EXAMPLE')
self.assertEqual(credentials_from_hook.secret_key,
'aJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY')
self.assertEqual(credentials_from_hook.token,
'BQoEXAMPLEH4aoAH0gNCAPyJxz4BlCFFxWNE1OPTgk5TthT+FvwqnKwRcOIfrRh'
'3c/LTo6UDdyJwOOvEVPvLXCrrrUtdnniCEXAMPLE/IvU1dYUg2RVAJBanLiHb4I'
'gRmpRV3zrkuWJOgQs8IZZaIv2BXIa2R4OlgkBN9bkUDNCJiBeb/AXlzBBko7b15'
'fjrBs2+cTQtpZ3CYWFXG8C5zqx37wnOE49mRl/+OtkIKGO7fAE')
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
filipenf/ansible | lib/ansible/plugins/lookup/csvfile.py | 9 | 3502 | # (c) 2013, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import codecs
import csv
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.utils.unicode import to_bytes, to_str, to_unicode
class CSVRecoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding='utf-8'):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def next(self):
return self.reader.next().encode("utf-8")
class CSVReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding='utf-8', **kwds):
f = CSVRecoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = self.reader.next()
return [to_unicode(s) for s in row]
def __iter__(self):
return self
class LookupModule(LookupBase):
def read_csv(self, filename, key, delimiter, encoding='utf-8', dflt=None, col=1):
try:
f = open(filename, 'r')
creader = CSVReader(f, delimiter=to_bytes(delimiter), encoding=encoding)
for row in creader:
if row[0] == key:
return row[int(col)]
except Exception as e:
raise AnsibleError("csvfile: %s" % to_str(e))
return dflt
def run(self, terms, variables=None, **kwargs):
ret = []
for term in terms:
params = term.split()
key = params[0]
paramvals = {
'col' : "1", # column to return
'default' : None,
'delimiter' : "TAB",
'file' : 'ansible.csv',
'encoding' : 'utf-8',
}
# parameters specified?
try:
for param in params[1:]:
name, value = param.split('=')
assert(name in paramvals)
paramvals[name] = value
except (ValueError, AssertionError) as e:
raise AnsibleError(e)
if paramvals['delimiter'] == 'TAB':
paramvals['delimiter'] = "\t"
lookupfile = self.find_file_in_search_path(variables, 'files', paramvals['file'])
var = self.read_csv(lookupfile, key, paramvals['delimiter'], paramvals['encoding'], paramvals['default'], paramvals['col'])
if var is not None:
if type(var) is list:
for v in var:
ret.append(v)
else:
ret.append(var)
return ret
| gpl-3.0 |
jswope00/griffinx | common/lib/xmodule/xmodule/modulestore/tests/test_modulestore.py | 197 | 2393 | """
Defines a test function, check_has_course_method, useful in various modulestore tests.
This file should potentially be renamed "utilties" since this file contains no tests.
"""
from nose.tools import assert_equals, assert_true, assert_false # pylint: disable=no-name-in-module
def check_has_course_method(modulestore, locator, locator_key_fields):
error_message = "Called has_course with query {0} and ignore_case is {1}."
for ignore_case in [True, False]:
# should find the course with exact locator
assert_true(modulestore.has_course(locator, ignore_case))
for key_field in locator_key_fields:
if getattr(locator, key_field):
locator_changes_that_should_not_be_found = [ # pylint: disable=invalid-name
# replace value for one of the keys
{key_field: 'fake'},
# add a character at the end
{key_field: getattr(locator, key_field) + 'X'},
# add a character in the beginning
{key_field: 'X' + getattr(locator, key_field)},
]
for changes in locator_changes_that_should_not_be_found:
search_locator = locator.replace(**changes)
assert_false(
modulestore.has_course(search_locator),
error_message.format(search_locator, ignore_case)
)
# test case [in]sensitivity
locator_case_changes = [
{key_field: getattr(locator, key_field).upper()},
{key_field: getattr(locator, key_field).capitalize()},
{key_field: getattr(locator, key_field).capitalize().swapcase()},
]
for changes in locator_case_changes:
search_locator = locator.replace(**changes)
# if ignore_case is true, the course would be found with a different-cased course locator.
# if ignore_case is false, the course should NOT found given an incorrectly-cased locator.
assert_equals(
modulestore.has_course(search_locator, ignore_case) is not None,
ignore_case,
error_message.format(search_locator, ignore_case)
)
| agpl-3.0 |
stewartsmith/drizzle | tests/qp_tests/randgen_basic/drizzledumpRestoreRandom_test.py | 4 | 1462 | #! /usr/bin/env python
# -*- mode: python; indent-tabs-mode: nil; -*-
# vim:expandtab:shiftwidth=2:tabstop=2:smarttab:
#
# Copyright (C) 2011 Patrick Crews
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[]]
servers = []
server_manager = None
test_executor = None
class basicTest(mysqlBaseTestCase):
def test_drizzledump1(self):
test_cmd = "./gentest.pl --gendata=conf/drizzle/drizzle.zz --grammar=conf/drizzle/drizzledump.yy --Validator=Drizzledump --queries=25 --seed=time --threads=1"
retcode, output = self.execute_randgen(test_cmd, test_executor, servers[0])
self.assertEqual(retcode, 0, msg =output)
def tearDown(self):
server_manager.reset_servers(test_executor.name)
| gpl-2.0 |
auag92/n2dm | Asap-3.8.4/Python/asap3/nanoparticle_mc/resizecluster.py | 1 | 1944 | from asap3 import CoordinationNumbers
import numpy as np
from asap3.MonteCarlo.Moves import SurfaceMove
from ase import Atom
import random as rd
def mychoice(a, n):
"Replaces numpy.random.choice(a, n, False) as our numpy is ancient."
a = list(a)
while len(a) > n:
# Remove a random element
del a[np.random.randint(len(a))]
return np.array(a)
def resizecluster(atoms, nwanted):
nactual = len(atoms)
if nactual == nwanted:
return
elif nactual > nwanted:
removeatoms(atoms, nactual - nwanted)
else:
addatoms(atoms, nwanted - nactual)
def removeatoms(atoms, n):
"Remove n atoms from the cluster."
while n > 0:
# We still have atoms to remove
# Find the ones with lowest coordination number
coords = CoordinationNumbers(atoms)
coordination = coords.min()
idx = np.arange(len(atoms))
candidates = idx[np.less_equal(coords, coordination)]
# candidates now contains the indices of the atoms with
# low coordination number
if len(candidates) > n:
# We have too many candidates, must choose.
candidates = mychoice(candidates, n)
del atoms[candidates]
n -= len(candidates)
def addatoms(atoms,n):
element = atoms[0].symbol
SM = SurfaceMove()
SM.set_atoms(atoms)
idx = SM.vacant_indexes()
#Find the n highest coordinated sites(this is in ids of SM)
coords = SM.vacant_coordinations[idx]
candidates = idx[np.greater_equal(coords,coords.max())]
vacPos = SM.vacant_positions
if len(candidates)>=n: #If have sufficient no. candidates
chosenIDS = rd.sample(range(0,len(candidates)),n) #Random ids
id1 = candidates[chosenIDS] #the random ids
else:
id1 = candidates
for i in id1:
#print "Adding atom at site", id1
atoms.append(Atom(element, position=vacPos[i]))
if len(id1) < n:
# We did not add enough atoms
addatoms(atoms, n - len(id1))
def objarraysize(arr): #Outputs the size of array objects of arrays
out = 0
for i in range(0,arr.size):
out+=arr[i].size
return out
| mit |
tedder/ansible | lib/ansible/modules/remote_management/manageiq/manageiq_group.py | 14 | 23075 | #!/usr/bin/python
#
# (c) 2018, Evert Mulder <evertmulder@gmail.com> (base on manageiq_user.py by Daniel Korn <korndaniel1@gmail.com>)
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: manageiq_group
short_description: Management of groups in ManageIQ.
extends_documentation_fragment: manageiq
version_added: '2.8'
author: Evert Mulder (@evertmulder)
description:
- The manageiq_group module supports adding, updating and deleting groups in ManageIQ.
options:
state:
description:
- absent - group should not exist, present - group should be.
choices: ['absent', 'present']
default: 'present'
description:
description:
- The group description.
required: true
default: null
role_id:
description:
- The the group role id
required: false
default: null
role:
description:
- The the group role name
- The C(role_id) has precedence over the C(role) when supplied.
required: false
default: null
tenant_id:
description:
- The tenant for the group identified by the tenant id.
required: false
default: null
tenant:
description:
- The tenant for the group identified by the tenant name.
- The C(tenant_id) has precedence over the C(tenant) when supplied.
- Tenant names are case sensitive.
required: false
default: null
managed_filters:
description: The tag values per category
type: dict
required: false
default: null
managed_filters_merge_mode:
description:
- In merge mode existing categories are kept or updated, new categories are added.
- In replace mode all categories will be replaced with the supplied C(managed_filters).
choices: [ merge, replace ]
default: replace
belongsto_filters:
description: A list of strings with a reference to the allowed host, cluster or folder
type: list
required: false
default: null
belongsto_filters_merge_mode:
description:
- In merge mode existing settings are merged with the supplied C(belongsto_filters).
- In replace mode current values are replaced with the supplied C(belongsto_filters).
choices: [ merge, replace ]
default: replace
'''
EXAMPLES = '''
- name: Create a group in ManageIQ with the role EvmRole-user and tenant 'my_tenant'
manageiq_group:
description: 'MyGroup-user'
role: 'EvmRole-user'
tenant: 'my_tenant'
manageiq_connection:
url: 'https://manageiq_server'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Create a group in ManageIQ with the role EvmRole-user and tenant with tenant_id 4
manageiq_group:
description: 'MyGroup-user'
role: 'EvmRole-user'
tenant_id: 4
manageiq_connection:
url: 'https://manageiq_server'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name:
- Create or update a group in ManageIQ with the role EvmRole-user and tenant my_tenant.
- Apply 3 prov_max_cpu and 2 department tags to the group.
- Limit access to a cluster for the group.
manageiq_group:
description: 'MyGroup-user'
role: 'EvmRole-user'
tenant: my_tenant
managed_filters:
prov_max_cpu:
- '1'
- '2'
- '4'
department:
- defense
- engineering
managed_filters_merge_mode: replace
belongsto_filters:
- "/belongsto/ExtManagementSystem|ProviderName/EmsFolder|Datacenters/EmsFolder|dc_name/EmsFolder|host/EmsCluster|Cluster name"
belongsto_filters_merge_mode: merge
manageiq_connection:
url: 'https://manageiq_server'
username: 'admin'
password: 'smartvm'
verify_ssl: False
- name: Delete a group in ManageIQ
manageiq_group:
state: 'absent'
description: 'MyGroup-user'
manageiq_connection:
url: 'http://127.0.0.1:3000'
username: 'admin'
password: 'smartvm'
- name: Delete a group in ManageIQ using a token
manageiq_group:
state: 'absent'
description: 'MyGroup-user'
manageiq_connection:
url: 'http://127.0.0.1:3000'
token: 'sometoken'
'''
RETURN = '''
group:
description: The group.
returned: success
type: complex
contains:
description:
description: The group description
returned: success
type: str
id:
description: The group id
returned: success
type: int
group_type:
description: The group type, system or user
returned: success
type: str
role:
description: The group role name
returned: success
type: str
tenant:
description: The group tenant name
returned: success
type: str
managed_filters:
description: The tag values per category
returned: success
type: dict
belongsto_filters:
description: A list of strings with a reference to the allowed host, cluster or folder
returned: success
type: list
created_on:
description: Group creation date
returned: success
type: str
example: 2018-08-12T08:37:55+00:00
updated_on:
description: Group update date
returned: success
type: int
example: 2018-08-12T08:37:55+00:00
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.manageiq import ManageIQ, manageiq_argument_spec
class ManageIQgroup(object):
"""
Object to execute group management operations in manageiq.
"""
def __init__(self, manageiq):
self.manageiq = manageiq
self.module = self.manageiq.module
self.api_url = self.manageiq.api_url
self.client = self.manageiq.client
def group(self, description):
""" Search for group object by description.
Returns:
the group, or None if group was not found.
"""
groups = self.client.collections.groups.find_by(description=description)
if len(groups) == 0:
return None
else:
return groups[0]
def tenant(self, tenant_id, tenant_name):
""" Search for tenant entity by name or id
Returns:
the tenant entity, None if no id or name was supplied
"""
if tenant_id:
tenant = self.client.get_entity('tenants', tenant_id)
if not tenant:
self.module.fail_json(msg="Tenant with id '%s' not found in manageiq" % str(tenant_id))
return tenant
else:
if tenant_name:
tenant_res = self.client.collections.tenants.find_by(name=tenant_name)
if not tenant_res:
self.module.fail_json(msg="Tenant '%s' not found in manageiq" % tenant_name)
if len(tenant_res) > 1:
self.module.fail_json(msg="Multiple tenants found in manageiq with name '%s" % tenant_name)
tenant = tenant_res[0]
return tenant
else:
# No tenant name or tenant id supplied
return None
def role(self, role_id, role_name):
""" Search for a role object by name or id.
Returns:
the role entity, None no id or name was supplied
the role, or send a module Fail signal if role not found.
"""
if role_id:
role = self.client.get_entity('roles', role_id)
if not role:
self.module.fail_json(msg="Role with id '%s' not found in manageiq" % str(role_id))
return role
else:
if role_name:
role_res = self.client.collections.roles.find_by(name=role_name)
if not role_res:
self.module.fail_json(msg="Role '%s' not found in manageiq" % role_name)
if len(role_res) > 1:
self.module.fail_json(msg="Multiple roles found in manageiq with name '%s" % role_name)
return role_res[0]
else:
# No role name or role id supplied
return None
@staticmethod
def merge_dict_values(norm_current_values, norm_updated_values):
""" Create an merged update object for manageiq group filters.
The input dict contain the tag values per category.
If the new values contain the category, all tags for that category are replaced
If the new values do not contain the category, the existing tags are kept
Returns:
the nested array with the merged values, used in the update post body
"""
# If no updated values are supplied, in merge mode, the original values must be returned
# otherwise the existing tag filters will be removed.
if norm_current_values and (not norm_updated_values):
return norm_current_values
# If no existing tag filters exist, use the user supplied values
if (not norm_current_values) and norm_updated_values:
return norm_updated_values
# start with norm_current_values's keys and values
res = norm_current_values.copy()
# replace res with norm_updated_values's keys and values
res.update(norm_updated_values)
return res
def delete_group(self, group):
""" Deletes a group from manageiq.
Returns:
a dict of:
changed: boolean indicating if the entity was updated.
msg: a short message describing the operation executed.
"""
try:
url = '%s/groups/%s' % (self.api_url, group['id'])
self.client.post(url, action='delete')
except Exception as e:
self.module.fail_json(msg="failed to delete group %s: %s" % (group['description'], str(e)))
return dict(
changed=True,
msg="deleted group %s with id %i" % (group['description'], group['id']))
def edit_group(self, group, description, role, tenant, norm_managed_filters, managed_filters_merge_mode,
belongsto_filters, belongsto_filters_merge_mode):
""" Edit a manageiq group.
Returns:
a dict of:
changed: boolean indicating if the entity was updated.
msg: a short message describing the operation executed.
"""
if role or norm_managed_filters or belongsto_filters:
group.reload(attributes=['miq_user_role_name', 'entitlement'])
try:
current_role = group['miq_user_role_name']
except AttributeError:
current_role = None
changed = False
resource = {}
if description and group['description'] != description:
resource['description'] = description
changed = True
if tenant and group['tenant_id'] != tenant['id']:
resource['tenant'] = dict(id=tenant['id'])
changed = True
if role and current_role != role['name']:
resource['role'] = dict(id=role['id'])
changed = True
if norm_managed_filters or belongsto_filters:
# Only compare if filters are supplied
entitlement = group['entitlement']
if 'filters' not in entitlement:
# No existing filters exist, use supplied filters
managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
changed = True
else:
current_filters = entitlement['filters']
new_filters = self.edit_group_edit_filters(current_filters,
norm_managed_filters, managed_filters_merge_mode,
belongsto_filters, belongsto_filters_merge_mode)
if new_filters:
resource['filters'] = new_filters
changed = True
if not changed:
return dict(
changed=False,
msg="group %s is not changed." % group['description'])
# try to update group
try:
self.client.post(group['href'], action='edit', resource=resource)
changed = True
except Exception as e:
self.module.fail_json(msg="failed to update group %s: %s" % (group['name'], str(e)))
return dict(
changed=changed,
msg="successfully updated the group %s with id %s" % (group['description'], group['id']))
def edit_group_edit_filters(self, current_filters, norm_managed_filters, managed_filters_merge_mode,
belongsto_filters, belongsto_filters_merge_mode):
""" Edit a manageiq group filters.
Returns:
None if no the group was not updated
If the group was updated the post body part for updating the group
"""
filters_updated = False
new_filters_resource = {}
# Process belongsto filters
if 'belongsto' in current_filters:
current_belongsto_set = set(current_filters['belongsto'])
else:
current_belongsto_set = set()
if belongsto_filters:
new_belongsto_set = set(belongsto_filters)
else:
new_belongsto_set = set()
if current_belongsto_set == new_belongsto_set:
new_filters_resource['belongsto'] = current_filters['belongsto']
else:
if belongsto_filters_merge_mode == 'merge':
current_belongsto_set.update(new_belongsto_set)
new_filters_resource['belongsto'] = list(current_belongsto_set)
else:
new_filters_resource['belongsto'] = list(new_belongsto_set)
filters_updated = True
# Process belongsto managed filter tags
# The input is in the form dict with keys are the categories and the tags are supplied string array
# ManageIQ, the current_managed, uses an array of arrays. One array of categories.
# We normalize the user input from a dict with arrays to a dict of sorted arrays
# We normalize the current manageiq array of arrays also to a dict of sorted arrays so we can compare
norm_current_filters = self.manageiq_filters_to_sorted_dict(current_filters)
if norm_current_filters == norm_managed_filters:
new_filters_resource['managed'] = current_filters['managed']
else:
if managed_filters_merge_mode == 'merge':
merged_dict = self.merge_dict_values(norm_current_filters, norm_managed_filters)
new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(merged_dict)
else:
new_filters_resource['managed'] = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
filters_updated = True
if not filters_updated:
return None
return new_filters_resource
def create_group(self, description, role, tenant, norm_managed_filters, belongsto_filters):
""" Creates the group in manageiq.
Returns:
the created group id, name, created_on timestamp,
updated_on timestamp.
"""
# check for required arguments
for key, value in dict(description=description).items():
if value in (None, ''):
self.module.fail_json(msg="missing required argument: %s" % key)
url = '%s/groups' % self.api_url
resource = {'description': description}
if role is not None:
resource['role'] = dict(id=role['id'])
if tenant is not None:
resource['tenant'] = dict(id=tenant['id'])
if norm_managed_filters or belongsto_filters:
managed_tag_filters_post_body = self.normalized_managed_tag_filters_to_miq(norm_managed_filters)
resource['filters'] = {'managed': managed_tag_filters_post_body, "belongsto": belongsto_filters}
try:
result = self.client.post(url, action='create', resource=resource)
except Exception as e:
self.module.fail_json(msg="failed to create group %s: %s" % (description, str(e)))
return dict(
changed=True,
msg="successfully created group %s" % description,
group_id=result['results'][0]['id']
)
@staticmethod
def normalized_managed_tag_filters_to_miq(norm_managed_filters):
if not norm_managed_filters:
return None
return list(norm_managed_filters.values())
@staticmethod
def manageiq_filters_to_sorted_dict(current_filters):
if 'managed' not in current_filters:
return None
res = {}
for tag_list in current_filters['managed']:
tag_list.sort()
key = tag_list[0].split('/')[2]
res[key] = tag_list
return res
@staticmethod
def normalize_user_managed_filters_to_sorted_dict(managed_filters, module):
if not managed_filters:
return None
res = {}
for cat_key in managed_filters:
cat_array = []
if not isinstance(managed_filters[cat_key], list):
module.fail_json(msg='Entry "{0}" of managed_filters must be a list!'.format(cat_key))
for tags in managed_filters[cat_key]:
miq_managed_tag = "/managed/" + cat_key + "/" + tags
cat_array.append(miq_managed_tag)
# Do not add empty categories. ManageIQ will remove all categories that are not supplied
if cat_array:
cat_array.sort()
res[cat_key] = cat_array
return res
@staticmethod
def create_result_group(group):
""" Creates the ansible result object from a manageiq group entity
Returns:
a dict with the group id, description, role, tenant, filters, group_type, created_on, updated_on
"""
try:
role_name = group['miq_user_role_name']
except AttributeError:
role_name = None
managed_filters = None
belongsto_filters = None
if 'filters' in group['entitlement']:
filters = group['entitlement']['filters']
if 'belongsto' in filters:
belongsto_filters = filters['belongsto']
if 'managed' in filters:
managed_filters = {}
for tag_list in filters['managed']:
key = tag_list[0].split('/')[2]
tags = []
for t in tag_list:
tags.append(t.split('/')[3])
managed_filters[key] = tags
return dict(
id=group['id'],
description=group['description'],
role=role_name,
tenant=group['tenant']['name'],
managed_filters=managed_filters,
belongsto_filters=belongsto_filters,
group_type=group['group_type'],
created_on=group['created_on'],
updated_on=group['updated_on'],
)
def main():
argument_spec = dict(
description=dict(required=True, type='str'),
state=dict(choices=['absent', 'present'], default='present'),
role_id=dict(required=False, type='int'),
role=dict(required=False, type='str'),
tenant_id=dict(required=False, type='int'),
tenant=dict(required=False, type='str'),
managed_filters=dict(required=False, type='dict'),
managed_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
belongsto_filters=dict(required=False, type='list', elements='str'),
belongsto_filters_merge_mode=dict(required=False, choices=['merge', 'replace'], default='replace'),
)
# add the manageiq connection arguments to the arguments
argument_spec.update(manageiq_argument_spec())
module = AnsibleModule(
argument_spec=argument_spec
)
description = module.params['description']
state = module.params['state']
role_id = module.params['role_id']
role_name = module.params['role']
tenant_id = module.params['tenant_id']
tenant_name = module.params['tenant']
managed_filters = module.params['managed_filters']
managed_filters_merge_mode = module.params['managed_filters_merge_mode']
belongsto_filters = module.params['belongsto_filters']
belongsto_filters_merge_mode = module.params['belongsto_filters_merge_mode']
manageiq = ManageIQ(module)
manageiq_group = ManageIQgroup(manageiq)
group = manageiq_group.group(description)
# group should not exist
if state == "absent":
# if we have a group, delete it
if group:
res_args = manageiq_group.delete_group(group)
# if we do not have a group, nothing to do
else:
res_args = dict(
changed=False,
msg="group %s: does not exist in manageiq" % description)
# group should exist
if state == "present":
tenant = manageiq_group.tenant(tenant_id, tenant_name)
role = manageiq_group.role(role_id, role_name)
norm_managed_filters = manageiq_group.normalize_user_managed_filters_to_sorted_dict(managed_filters, module)
# if we have a group, edit it
if group:
res_args = manageiq_group.edit_group(group, description, role, tenant,
norm_managed_filters, managed_filters_merge_mode,
belongsto_filters, belongsto_filters_merge_mode)
# if we do not have a group, create it
else:
res_args = manageiq_group.create_group(description, role, tenant, norm_managed_filters, belongsto_filters)
group = manageiq.client.get_entity('groups', res_args['group_id'])
group.reload(expand='resources', attributes=['miq_user_role_name', 'tenant', 'entitlement'])
res_args['group'] = manageiq_group.create_result_group(group)
module.exit_json(**res_args)
if __name__ == "__main__":
main()
| gpl-3.0 |
buguelos/odoo | addons/point_of_sale/wizard/pos_details.py | 225 | 2386 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import osv, fields
class pos_details(osv.osv_memory):
_name = 'pos.details'
_description = 'Sales Details'
_columns = {
'date_start': fields.date('Date Start', required=True),
'date_end': fields.date('Date End', required=True),
'user_ids': fields.many2many('res.users', 'pos_details_report_user_rel', 'user_id', 'wizard_id', 'Salespeople'),
}
_defaults = {
'date_start': fields.date.context_today,
'date_end': fields.date.context_today,
}
def print_report(self, cr, uid, ids, context=None):
"""
To get the date and print the report
@param self: The object pointer.
@param cr: A database cursor
@param uid: ID of the user currently logged in
@param context: A standard dictionary
@return : retrun report
"""
if context is None:
context = {}
datas = {'ids': context.get('active_ids', [])}
res = self.read(cr, uid, ids, ['date_start', 'date_end', 'user_ids'], context=context)
res = res and res[0] or {}
datas['form'] = res
if res.get('id',False):
datas['ids']=[res['id']]
return self.pool['report'].get_action(cr, uid, [], 'point_of_sale.report_detailsofsales', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yausern/stlab | TimeDomain_v2/Tektronix_AWG520.py | 2 | 24579 | # Tektronix_AWG520.py class, to perform the communication between the Wrapper and the device
# Pieter de Groot <pieterdegroot@gmail.com>, 2008
# Martijn Schaafsma <qtlab@mcschaafsma.nl>, 2008
# Vishal Ranjan, 2012
# ron schutjens, 2012
# Modified by: Sarwan Peiter
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from stlab.devices.instrument import instrument
import types
import time
from datetime import datetime
import logging
import numpy as np
import struct
import os
class Tektronix_AWG520(instrument):
'''
This is the python driver for the Tektronix AWG520
Arbitrary Waveform Generator
Usage:
Initialize with
<name> = instruments.create('name', 'Tektronix_AWG520', address='<GPIB address>',
reset=<bool>, numpoints=<int>)
think about: clock, waveform length
TODO:
1) Get All
2) Remove test_send??
3) Add docstrings
'''
def __init__(self,
name,
addr='TCPIP::192.168.1.27::1234::SOCKET',
reset=False,
numpoints=1000,
awg_file_dir="D:\\AWG_sequences\\",
**kw):
'''
Initializes the AWG520.
Input:
name (string) : name of the instrument
address (string) : GPIB address
reset (bool) : resets to default values, default=false
numpoints (int) : sets the number of datapoints
Output:
None
'''
logging.debug(__name__ + ' : Initializing instrument')
super().__init__(
addr=addr, reset=False, verb=False, read_termination='\n\r')
self._address = addr
self._values = {}
self._values['files'] = {}
self.awg_file_dir = awg_file_dir
self._numpoints = numpoints
self.filename = ''
# self.init_dir()
if reset:
self.reset()
else:
self.get_all()
def sync_awg(self):
"""
Assert if the AWG is ready.
Returns:
bool: True, irrespective of anything.
"""
self.dev.write('*WAI')
#get state AWG
def get_state(self):
state = int(self.dev.query('AWGC:RSTATE?'))
if state == 0:
return 'Idle'
elif state == 1:
return 'Waiting for trigger'
elif state == 2:
return 'Running'
else:
logging.error(__name__ + ' : AWG in undefined state')
return 'error'
def start(self):
self.dev.write('AWGC:RUN')
return self.get_state()
def stop(self):
self.dev.write('AWGC:STOP')
def get_folder_contents(self):
return self.dev.query('mmem:cat?')
def get_current_folder_name(self):
return self.dev.query('mmem:cdir?')
def set_current_folder_name(self, file_path):
self.dev.write('mmem:cdir "%s"' % file_path)
def change_folder(self, dir):
self.dev.write('mmem:cdir "%s"' % dir)
def goto_root(self):
self.dev.write('mmem:cdir')
def make_directory(self, dir, root):
'''
makes a directory
if root = True, new dir in main folder
'''
if root == True:
self.goto_root()
self.dev.write('MMEMory:MDIRectory "%s"' % dir)
else:
self.dev.write('MMEMory:MDIRectory "%s"' % dir)
def get_all(self):
'''
Reads all implemented parameters from the instrument,
and updates the wrapper.
Input:
None
Output:
None
'''
logging.info(__name__ + ' : Reading all data from instrument')
print('Instrument State: ', self.get_state())
print('Mode:', self.get_trigger_mode())
print('Trigger impedance (Ohm):', self.get_trigger_impedance())
print('Trigger level (V): ', self.get_trigger_level())
print('Number of points: ', self.get_numpoints())
print('Sample rate (Hz): ', self.get_clock())
print('Reference Oscillator: ', self.get_refclock())
for i in range(1, 3):
print('Amplitude Channel{} (V): '.format(i), self.get_amplitude(i))
print('Offset Channel{} (V):'.format(i), self.get_offset(i))
print('Channel{} Marker1_low (V)'.format(i),
self.get_marker1_low(i))
print('Channel{} Marker1_high (V)'.format(i),
self.get_marker1_high(i))
print('Channel{} Marker2_low (V)'.format(i),
self.get_marker2_low(i))
print('Channel{} Marker2_high (V)'.format(i),
self.get_marker2_high(i))
print('Channel{} state: '.format(i), self.get_status(i))
def clear_waveforms(self):
'''
Clears the waveform on both channels.
Input:
None
Output:
None
'''
logging.debug(__name__ + ' : Clear waveforms from channels')
self.dev.write('SOUR1:FUNC:USER ""')
self.dev.write('SOUR2:FUNC:USER ""')
def set_trigger_mode_on(self):
'''
Sets the trigger mode to 'On'
Input:
None
Output:
None
'''
logging.debug(__name__ + ' : Set trigger mode tot TRIG')
self.dev.write('AWGC:RMOD TRIG')
def set_trigger_mode_off(self):
'''
Sets the trigger mode to 'Cont'
Input:
None
Output:
None
'''
logging.debug(__name__ + ' : Set trigger mode to CONT')
self.dev.write('AWGC:RMOD CONT')
def set_trigger_impedance_1e3(self):
'''
Sets the trigger impedance to 1 kOhm
Input:
None
Output:
None
'''
logging.debug(__name__ + ' : Set trigger impedance to 1e3 Ohm')
self.dev.write('TRIG:IMP 1e3')
def set_trigger_impedance_50(self):
'''
Sets the trigger impedance to 50 Ohm
Input:
None
Output:
None
'''
logging.debug(__name__ + ' : Set trigger impedance to 50 Ohm')
self.dev.write('TRIG:IMP 50')
# Parameters
def get_trigger_mode(self):
'''
Reads the trigger mode from the instrument
Input:
None
Output:
mode (string) : 'Trig' or 'Cont' depending on the mode
'''
logging.debug(__name__ + ' : Get trigger mode from instrument')
return self.dev.query('AWGC:RMOD?')
def set_trigger_mode(self, mod):
'''
Sets trigger mode of the instrument
Input:
mod (string) : Either 'Trig' or 'Cont' depending on the mode
Output:
None
'''
if (mod.upper() == 'TRIG'):
self.set_trigger_mode_on()
elif (mod.upper() == 'CONT'):
self.set_trigger_mode_off()
else:
logging.error(
__name__ +
' : Unable to set trigger mode to %s, expected "TRIG" or "CONT"'
% mod)
def get_trigger_impedance(self):
'''
Reads the trigger impedance from the instrument
Input:
None
Output:
impedance (??) : 1e3 or 50 depending on the mode
'''
logging.debug(__name__ + ' : Get trigger impedance from instrument')
return self.dev.query('TRIG:IMP?')
def set_trigger_impedance(self, mod):
'''
Sets the trigger impedance of the instrument
Input:
mod (int) : Either 1e3 of 50 depending on the mode
Output:
None
'''
if (mod == 1e3):
self.set_trigger_impedance_1e3()
elif (mod == 50):
self.set_trigger_impedance_50()
else:
logging.error(
__name__ +
' : Unable to set trigger impedance to %s, expected "1e3" or "50"'
% mod)
def get_trigger_level(self):
'''
Reads the trigger level from the instrument
Input:
None
Output:
None
'''
logging.debug(__name__ + ' : Get trigger level from instrument')
return float(self.dev.query('TRIG:LEV?'))
def set_trigger_level(self, level):
'''
Sets the trigger level of the instrument
Input:
level (float) : trigger level in volts
'''
logging.debug(__name__ + ' : Trigger level set to %.3f' % level)
self.dev.write('TRIG:LEV %.3f' % level)
def get_trigger_interval(self):
'''
the internal trigger interval in seconds.
'''
logging.debug(__name__ + ' : Get trigger interval from instrument')
return float(self.dev.query('TRIG:TIM?'))
def set_trigger_interval(self, interval):
'''
sets the internal trigger interval - effectively the repetition
rate of the experiment.
Input:
interval (float)
'''
if 1.e-6>interval or interval>10.:
print("out of range!")
return
logging.debug(__name__ + ' : Trigger interval set to %.3f' % interval)
self.dev.write('TRIG:TIM %.6fs' % interval)
def force_trigger(self):
'''
forces a trigger event (used for wait_trigger option in sequences)
Ron
'''
return self.dev.write('TRIG:SEQ:IMM')
def force_logicjump(self):
'''
forces a jumplogic event (used as a conditional event during waveform
executions)
note: jump_logic events&mode have to be set properly!
Ron
'''
return self.dev.write('AWGC:EVEN:SEQ:IMM')
def set_run_mode(self, mode):
'''
sets the run mode of the AWG.
mode can be: CONTinuous,TRIGgered,GATed,ENHanced
Ron
'''
return self.dev.write('AWGC:RMOD %s' % mode)
def get_run_mode(self):
'''
sets the run mode of the AWG
Ron
'''
return self.dev.query('AWGC:RMOD?')
def set_jumpmode(self, mode):
'''
sets the jump mode for jump logic events, possibilities:
LOGic,TABle,SOFTware
give mode as string
note: jump_logic events&mode have to be set properly!
Ron
'''
return self.dev.write('AWGC:ENH:SEQ:JMOD %s' % mode)
def get_jumpmode(self, mode):
'''
get the jump mode for jump logic events
Ron
'''
return self.dev.query('AWGC:ENH:SEQ:JMOD?')
def get_numpoints(self):
'''
Returns the number of datapoints in each wave
Input:
None
Output:
numpoints (int) : Number of datapoints in each wave
'''
return self._numpoints
def set_numpoints(self, numpts):
'''
Sets the number of datapoints in each wave.
This acts on both channels.
Input:
numpts (int) : The number of datapoints in each wave
Output:
None
'''
logging.debug(__name__ + ' : Trying to set numpoints to %s' % numpts)
if numpts != self._numpoints:
logging.warning(
__name__ +
' : changing numpoints. This will clear all waveforms!')
response = 'yes' #raw_input('type "yes" to continue')
if response is 'yes':
logging.debug(__name__ + ' : Setting numpoints to %s' % numpts)
self._numpoints = numpts
self.clear_waveforms()
else:
print('aborted')
def get_clock(self):
'''
Returns the clockfrequency, which is the rate at which the datapoints are
sent to the designated output
Input:
None
Output:
clock (int) : frequency in Hz
'''
return self.dev.query('SOUR:FREQ?')
def set_clock(self, clock):
'''
Sets the rate at which the datapoints are sent to the designated output channel
Input:
clock (int) : frequency in Hz
Output:
None
'''
logging.warning(
__name__ +
' : Clock set to %s. This is not fully functional yet. To avoid problems, it is better not to change the clock during operation'
% clock)
self._clock = clock
self.dev.write('SOUR:FREQ %f' % clock)
def get_refclock(self):
'''
Asks AWG whether the 10 MHz reference is set to the
internal source or an external one.
Input:
None
Output:
'INT' or 'EXT'
'''
return self.dev.query('SOUR1:ROSC:SOUR?')
def set_refclock_ext(self):
'''
Sets the reference clock to internal or external.
'''
self.dev.write('SOUR1:ROSC:SOUR EXT')
def set_refclock_int(self):
'''
Sets the reference clock to internal or external
'''
self.dev.write('SOUR1:ROSC:SOUR INT')
def get_amplitude(self, channel):
'''
Reads the amplitude of the designated channel from the instrument
Input:
channel (int) : 1 or 2, the number of the designated channel
Output:
amplitude (float) : the amplitude of the signal in Volts
'''
logging.debug(
__name__ +
' : Get amplitude of channel %s from instrument' % channel)
return float(self.dev.query('SOUR%s:VOLT:LEV:IMM:AMPL?' % channel))
def set_amplitude(self, amp, channel):
'''
Sets the amplitude of the designated channel of the instrument
Input:
amp (float) : amplitude in Volts
channel (int) : 1 or 2, the number of the designated channel
Output:
None
'''
logging.debug(__name__ + ' : Set amplitude of channel %s to %.6f' %
(channel, amp))
self.dev.write('SOUR%s:VOLT:LEV:IMM:AMPL %.6f' % (channel, amp))
def get_offset(self, channel):
'''
Reads the offset of the designated channel of the instrument
Input:
channel (int) : 1 or 2, the number of the designated channel
Output:
offset (float) : offset of designated channel in Volts
'''
logging.debug(__name__ + ' : Get offset of channel %s' % channel)
return float(self.dev.query('SOUR%s:VOLT:LEV:IMM:OFFS?' % channel))
def set_offset(self, offset, channel):
'''
Sets the offset of the designated channel of the instrument
Input:
offset (float) : offset in Volts
channel (int) : 1 or 2, the number of the designated channel
Output:
None
'''
logging.debug(__name__ + ' : Set offset of channel %s to %.6f' %
(channel, offset))
self.dev.write('SOUR%s:VOLT:LEV:IMM:OFFS %.6f' % (channel, offset))
def get_marker1_low(self, channel):
'''
Gets the low level for marker1 on the designated channel.
Input:
channel (int) : 1 or 2, the number of the designated channel
Output:
low (float) : low level in Volts
'''
logging.debug(
__name__ + ' : Get lower bound of marker1 of channel %s' % channel)
return float(
self.dev.query('SOUR%s:MARK1:VOLT:LEV:IMM:LOW?' % channel))
def set_marker1_low(self, low, channel):
'''
Sets the low level for marker1 on the designated channel.
Input:
low (float) : low level in Volts
channel (int) : 1 or 2, the number of the designated channel
Output:
None
'''
logging.debug(
__name__ + ' : Set lower bound of marker1 of channel %s to %.3f' %
(channel, low))
self.dev.write('SOUR%s:MARK1:VOLT:LEV:IMM:LOW %.3f' % (channel, low))
def get_marker1_high(self, channel):
'''
Gets the high level for marker1 on the designated channel.
Input:
channel (int) : 1 or 2, the number of the designated channel
Output:
high (float) : high level in Volts
'''
logging.debug(
__name__ + ' : Get upper bound of marker1 of channel %s' % channel)
return float(
self.dev.query('SOUR%s:MARK1:VOLT:LEV:IMM:HIGH?' % channel))
def set_marker1_high(self, high, channel):
'''
Sets the high level for marker1 on the designated channel.
Input:
high (float) : high level in Volts
channel (int) : 1 or 2, the number of the designated channel
Output:
None
'''
logging.debug(
__name__ + ' : Set upper bound of marker1 of channel %s to %.3f' %
(channel, high))
self.dev.write('SOUR%s:MARK1:VOLT:LEV:IMM:HIGH %.3f' % (channel, high))
def get_marker2_low(self, channel):
'''
Gets the low level for marker2 on the designated channel.
Input:
channel (int) : 1 or 2, the number of the designated channel
Output:
low (float) : low level in Volts
'''
logging.debug(
__name__ + ' : Get lower bound of marker2 of channel %s' % channel)
return float(
self.dev.query('SOUR%s:MARK2:VOLT:LEV:IMM:LOW?' % channel))
def set_marker2_low(self, low, channel):
'''
Sets the low level for marker2 on the designated channel.
Input:
low (float) : low level in Volts
channel (int) : 1 or 2, the number of the designated channel
Output:
None
'''
logging.debug(
__name__ + ' : Set lower bound of marker2 of channel %s to %.3f' %
(channel, low))
self.dev.write('SOUR%s:MARK2:VOLT:LEV:IMM:LOW %.3f' % (channel, low))
def get_marker2_high(self, channel):
'''
Gets the high level for marker2 on the designated channel.
Input:
channel (int) : 1 or 2, the number of the designated channel
Output:
high (float) : high level in Volts
'''
logging.debug(
__name__ + ' : Get upper bound of marker2 of channel %s' % channel)
return float(
self.dev.query('SOUR%s:MARK2:VOLT:LEV:IMM:HIGH?' % channel))
def set_marker2_high(self, high, channel):
'''
Sets the high level for marker2 on the designated channel.
Input:
high (float) : high level in Volts
channel (int) : 1 or 2, the number of the designated channel
Output:
None
'''
logging.debug(
__name__ + ' : Set upper bound of marker2 of channel %s to %.3f' %
(channel, high))
self.dev.write('SOUR%s:MARK2:VOLT:LEV:IMM:HIGH %.3f' % (channel, high))
def get_status(self, channel):
'''
Gets the status of the designated channel.
Input:
channel (int) : 1 or 2, the number of the designated channel
Output:
None
'''
logging.debug(__name__ + ' : Get status of channel %s' % channel)
outp = int(self.dev.query('OUTP%s?' % channel))
if (outp == 0):
return 'off'
elif (outp == 1):
return 'on'
else:
logging.debug(
__name__ + ' : Read invalid status from instrument %s' % outp)
return 'an error occurred while reading status from instrument'
def set_status(self, status, channel):
'''
Sets the status of designated channel.
Input:
status (string) : 'On' or 'Off'
channel (int) : channel number
Output:
None
'''
logging.debug(__name__ + ' : Set status of channel %s to %s' %
(channel, status))
if (status.upper() == 'ON'):
self.dev.write('OUTP%s ON' % channel)
elif (status.upper() == 'OFF'):
self.dev.write('OUTP%s OFF' % channel)
else:
logging.debug(
__name__ + ' : Try to set status to invalid value %s' % status)
print('Tried to set status to invalid value %s' % status)
# query for string with filenames
def get_filenames(self):
logging.debug(__name__ + ' : Read filenames from instrument')
return self.dev.query('MMEM:CAT? "MAIN"')
def init_dir(self):
print('Initializing directory for AWG file transfering......')
self.dir = os.path.join(
self.awg_file_dir,
'AwgFiles' + datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
try:
os.makedirs(self.dir)
except OSError as e:
if e.errno != errno.EEXIST:
raise # This was not a "directory exist" error..
def set_sequence(self, filename):
'''
loads a sequence file on all channels.
Waveforms/patterns to be executed on respective channel
must be defined inside the sequence file itself
make sure to send all waveforms before setting a seq
'''
self.dev.write('FUNC:USER "%s","MAIN"' % (filename))
# Send waveform to the device
def gen_waveform_files(self, w, m1, m2, filename, clock):
"""
Sends a complete waveform. All parameters need to be specified.
choose a file extension 'wfm' (must end with .pat)
See also: resend_waveform()
Input:
w (float[numpoints]) : waveform
m1 (int[numpoints]) : marker1
m2 (int[numpoints]) : marker2
filename (string) : filename
clock (int) : frequency (Hz)
Output:
None
"""
logging.debug(
__name__ + ' : Generating wfm files %s for instrument' % filename)
self.filename = filename
# Check for errors
dim = len(w)
if (not ((len(w) == len(m1)) and ((len(m1) == len(m2))))):
return 'error'
m = m1 + np.multiply(m2, 2)
ws = b''
for i in range(0, len(w)):
ws = ws + struct.pack('<fB', w[i], int(m[i]))
s1 = 'MAGIC 1000\r\n'
s3 = ws
s4 = 'CLOCK %.10e\r\n' % clock
s2 = '#' + str(len(str(len(s3)))) + str(len(s3))
mes = s1.encode('ASCII') + s2.encode('ASCII') + s3 + s4.encode('ASCII')
with open(os.path.join(self.dir, self.filename), 'wb') as d:
d.write(mes)
d.close()
def gen_sequence_file(self, wfs1, wfs2, rep, wait, goto, logic_jump,
filename):
'''
Sends a sequence file
Inputs (mandatory):
wfs1: list of filenames for ch1 (all must end with .pat)
wfs2: list of filenames for ch2 (all must end with .pat)
rep: list
wait: list
goto: list
logic_jump: list
filename: name of output file (must end with .seq)
Output:
None
'''
logging.debug(
__name__ + ' : Generating sequence %s for instrument' % filename)
self.filename = filename
N = str(len(rep))
s1 = 'MAGIC 3002\r\n'
s3 = 'LINES %s\n' % N
s4 = ''
for k in range(len(rep)):
s4 = s4 + '"%s","%s",%s,%s,%s,%s\r\n' % (
wfs1[k], wfs2[k], rep[k], wait[k], goto[k], logic_jump[k])
mes = s1.encode("ASCII") + s3.encode("ASCII") + s4.encode("ASCII")
with open(os.path.join(self.dir, self.filename), 'wb') as d:
d.write(mes)
d.close()
def do_get_AWG_model(self):
return 'AWG520'
def GetMetadataString(self): #Should return a string of metadata adequate to write to a file
pass
| gpl-3.0 |
inveniosoftware/invenio-previewer | tests/test_utils.py | 1 | 2173 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2016-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Test of utilities module."""
from __future__ import absolute_import, print_function
import pytest
from mock import patch
from six import BytesIO
from invenio_previewer import current_previewer
from invenio_previewer.utils import detect_encoding
def test_default_file_reader(app, record_with_file):
"""Test view by default."""
record, testfile = record_with_file
file_ = current_previewer.record_file_factory(
None, record, testfile.key)
assert file_.version_id == testfile.version_id
@pytest.mark.parametrize('string, confidence, encoding, detect', [
(u'Γκρήκ Στρίνγκ'.encode('utf-8'), 0.99000, 'UTF-8', 'UTF-8'),
(u'dhǾk: kjd köd, ddȪj@dd.k'.encode('utf-8'), 0.87625, 'UTF-8', None),
(u'क्या हाल तुम या कर रहे हो?'.encode('utf-8'), 0.99000, 'UTF-8', 'UTF-8'),
(u'石原氏 移転は「既定路線」'.encode('euc-jp'), 0.46666, 'EUC-JP', None),
(u'Hi bye sigh die'.encode('utf-8'), 1.00000, 'UTF-8', 'UTF-8'),
(u'Monkey donkey cow crow'.encode('euc-jp'), 0.00000, 'ASCII', None),
(u'Monkey donkey cow crow'.encode('euc-jp'), 0.90000, 'EUC-JP', None),
(u'Monkey donkey cow crow'.encode('euc-jp'), 0.90001, 'EUC-JP', 'EUC-JP'),
(u'Monkey donkey cow crow'.encode('euc-jp'), 0.50000, 'UTF-8', None),
])
def test_detect_encoding(app, string, confidence, encoding, detect):
"""Test encoding detection."""
f = BytesIO(string)
initial_position = f.tell()
with patch('cchardet.detect') as mock_detect:
mock_detect.return_value = {'encoding': encoding,
'confidence': confidence}
assert detect_encoding(f) is detect
assert f.tell() == initial_position
def test_detect_encoding_exception(app):
f = BytesIO(u'Γκρήκ Στρίνγκ'.encode('utf-8'))
with patch('cchardet.detect', Exception):
assert detect_encoding(f) is None
| mit |
40323240/personal-cdw11 | users/a/g7/ag7_40123149_3.py | 2 | 5212 | # 各組分別在各自的 .py 程式中建立應用程式 (第1步/總共3步)
from flask import Blueprint, render_template, make_response
# 利用 Blueprint建立 ag1, 並且 url 前綴為 /ag1, 並設定 template 存放目錄
ag7_40123149_3 = Blueprint('ag7_40123149_3', __name__, url_prefix='/ag7_40123149', template_folder='templates')
# scrum1_task1 為完整可以單獨執行的繪圖程式
@ag7_40123149_3.route('/ag7_40123149_3')
def one():
outstring = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>網際 2D 繪圖</title>
<!-- IE 9: display inline SVG -->
<meta http-equiv="X-UA-Compatible" content="IE=9">
<script type="text/javascript" src="http://brython.info/src/brython_dist.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/Cango-8v03.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/Cango2D-7v01-min.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/CangoAxes-1v33.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/flintlockPartDefs-02.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/CangoAnimation-4v01.js"></script>
<script type="text/javascript" src="http://2015fallhw.github.io/cptocadp/static/gearUtils-05.js"></script>
</head>
<body>
<script>
window.onload=function(){
brython(1);
}
</script>
<canvas id='gear1' width='800' height='750'></canvas>
<script type="text/python">
# 將 導入的 document 設為 doc 主要原因在於與舊程式碼相容
from browser import document as doc
# 由於 Python3 與 Javascript 程式碼已經不再混用, 因此來自 Javascript 的變數, 必須居中透過 window 物件轉換
from browser import window
# 針對 Javascript 既有的物件, 則必須透過 JSConstructor 轉換
from javascript import JSConstructor
import math
# 主要用來取得畫布大小
canvas = doc["gear1"]
# 此程式採用 Cango Javascript 程式庫繪圖, 因此無需 ctx
#ctx = canvas.getContext("2d")
# 針對類別的轉換, 將 Cango.js 中的 Cango 物件轉為 Python cango 物件
cango = JSConstructor(window.Cango)
# 針對變數的轉換, shapeDefs 在 Cango 中資料型別為變數, 可以透過 window 轉換
shapedefs = window.shapeDefs
# 目前 Cango 結合 Animation 在 Brython 尚無法運作, 此刻只能繪製靜態圖形
# in CangoAnimation.js
#interpolate1 = window.interpolate
# Cobi 與 createGearTooth 都是 Cango Javascript 程式庫中的物件
cobj = JSConstructor(window.Cobj)
creategeartooth = JSConstructor(window.createGearTooth)
# 經由 Cango 轉換成 Brython 的 cango, 指定將圖畫在 id="plotarea" 的 canvas 上
cgo = cango("gear1")
######################################
# 畫正齒輪輪廓
#####################################
def spur(cx, cy, m, n, pa):
# n 為齒數
#n = 25
# pa 為壓力角
#pa = 25
# m 為模數, 根據畫布的寬度, 計算適合的模數大小
# Module = mm of pitch diameter per tooth
#m = 0.8*canvas.width/n
# pr 為節圓半徑
pr = n*m/2 # gear Pitch radius
# generate gear
data = creategeartooth(m, n, pa)
# Brython 程式中的 print 會將資料印在 Browser 的 console 區
#print(data)
gearTooth = cobj(data, "SHAPE", {
"fillColor":"#ddd0dd",
"border": True,
"strokeColor": "#606060" })
gearTooth.rotate(180/n) # rotate gear 1/2 tooth to mesh
# 單齒的齒形資料經過旋轉後, 將資料複製到 gear 物件中
gear = gearTooth.dup()
# gear 為單一齒的輪廓資料
#cgo.render(gearTooth)
# 利用單齒輪廓旋轉, 產生整個正齒輪外形
for i in range(1, n):
# 將 gearTooth 中的資料複製到 newTooth
newTooth = gearTooth.dup()
# 配合迴圈, newTooth 的齒形資料進行旋轉, 然後利用 appendPath 方法, 將資料併入 gear
newTooth.rotate(360*i/n)
# appendPath 為 Cango 程式庫中的方法, 第二個變數為 True, 表示要刪除最前頭的 Move to SVG Path 標註符號
gear.appendPath(newTooth, True) # trim move command = True
# 建立軸孔
# add axle hole, hr 為 hole radius
hr = 0.6*pr # diameter of gear shaft
shaft = cobj(shapedefs.circle(hr), "PATH")
shaft.revWinding()
gear.appendPath(shaft) # retain the 'moveTo' command for shaft sub path
#cx = canvas.width/2
#cy = canvas.height/2
gear.translate(cx, cy)
# render 繪出靜態正齒輪輪廓
cgo.render(gear)
# 接著繪製齒輪的基準線
deg = math.pi/180
Line = cobj(['M', cx, cy, 'L', cx+pr*math.cos(180/n*deg), cy+pr*math.sin(180/n*deg)], "PARH", {'strokeColor':'blue' ,'linWidth':4})
cgo.render(Line)
cx = canvas.width/2
cy = canvas.height/2
# n 為齒數
n = 70
# pa 為壓力角
pa = 25
# m 為模數, 根據畫布的寬度, 計算適合的模數大小
# Module = mm of pitch diameter per tooth
m = 0.8*canvas.width/n/4
spur(cx-93, cy, m, n, pa)
spur(cx, cy, m, 11, pa)
spur(cx+28, cy, m, 13, pa)
</script>
</body>
</html>
'''
return outstring
| agpl-3.0 |
btabibian/scikit-learn | sklearn/feature_extraction/tests/test_text.py | 8 | 35969 | from __future__ import unicode_literals
import warnings
from sklearn.feature_extraction.text import strip_tags
from sklearn.feature_extraction.text import strip_accents_unicode
from sklearn.feature_extraction.text import strip_accents_ascii
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.base import clone
import numpy as np
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from numpy.testing import assert_raises
from sklearn.utils.testing import (assert_equal, assert_false, assert_true,
assert_not_equal, assert_almost_equal,
assert_in, assert_less, assert_greater,
assert_warns_message, assert_raise_message,
clean_warning_registry, SkipTest)
from collections import defaultdict, Mapping
from functools import partial
import pickle
from io import StringIO
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
NOTJUNK_FOOD_DOCS = (
"the salad celeri copyright",
"the salad salad sparkling water copyright",
"the the celeri celeri copyright",
"the tomato tomato salad water",
"the tomato salad water copyright",
)
ALL_FOOD_DOCS = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
def uppercase(s):
return strip_accents_unicode(s).upper()
def strip_eacute(s):
return s.replace('\xe9', 'e')
def split_tokenize(s):
return s.split()
def lazy_analyze(s):
return ['the_ultimate_feature']
def test_strip_accents():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_unicode(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_unicode(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '\u0627' # simple halef
assert_equal(strip_accents_unicode(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_unicode(a), expected)
def test_to_ascii():
# check some classical latin accentuated symbols
a = '\xe0\xe1\xe2\xe3\xe4\xe5\xe7\xe8\xe9\xea\xeb'
expected = 'aaaaaaceeee'
assert_equal(strip_accents_ascii(a), expected)
a = '\xec\xed\xee\xef\xf1\xf2\xf3\xf4\xf5\xf6\xf9\xfa\xfb\xfc\xfd'
expected = 'iiiinooooouuuuy'
assert_equal(strip_accents_ascii(a), expected)
# check some arabic
a = '\u0625' # halef with a hamza below
expected = '' # halef has no direct ascii match
assert_equal(strip_accents_ascii(a), expected)
# mix letters accentuated and not
a = "this is \xe0 test"
expected = 'this is a test'
assert_equal(strip_accents_ascii(a), expected)
def test_word_analyzer_unigrams():
for Vectorizer in (CountVectorizer, HashingVectorizer):
wa = Vectorizer(strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon']
assert_equal(wa(text), expected)
text = "This is a test, really.\n\n I met Harry yesterday."
expected = ['this', 'is', 'test', 'really', 'met', 'harry',
'yesterday']
assert_equal(wa(text), expected)
wa = Vectorizer(input='file').build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['this', 'is', 'test', 'with', 'file', 'like',
'object']
assert_equal(wa(text), expected)
# with custom preprocessor
wa = Vectorizer(preprocessor=uppercase).build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
" c'\xe9tait pas tr\xeas bon.")
expected = ['AI', 'MANGE', 'DU', 'KANGOUROU', 'CE', 'MIDI',
'ETAIT', 'PAS', 'TRES', 'BON']
assert_equal(wa(text), expected)
# with custom tokenizer
wa = Vectorizer(tokenizer=split_tokenize,
strip_accents='ascii').build_analyzer()
text = ("J'ai mang\xe9 du kangourou ce midi, "
"c'\xe9tait pas tr\xeas bon.")
expected = ["j'ai", 'mange', 'du', 'kangourou', 'ce', 'midi,',
"c'etait", 'pas', 'tres', 'bon.']
assert_equal(wa(text), expected)
def test_word_analyzer_unigrams_and_bigrams():
wa = CountVectorizer(analyzer="word", strip_accents='unicode',
ngram_range=(1, 2)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
expected = ['ai', 'mange', 'du', 'kangourou', 'ce', 'midi',
'etait', 'pas', 'tres', 'bon', 'ai mange', 'mange du',
'du kangourou', 'kangourou ce', 'ce midi', 'midi etait',
'etait pas', 'pas tres', 'tres bon']
assert_equal(wa(text), expected)
def test_unicode_decode_error():
# decode_error default to strict, so this should fail
# First, encode (as bytes) a unicode string.
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon."
text_bytes = text.encode('utf-8')
# Then let the Analyzer try to decode it as ascii. It should fail,
# because we have given it an incorrect encoding.
wa = CountVectorizer(ngram_range=(1, 2), encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, wa, text_bytes)
ca = CountVectorizer(analyzer='char', ngram_range=(3, 6),
encoding='ascii').build_analyzer()
assert_raises(UnicodeDecodeError, ca, text_bytes)
def test_char_ngram_analyzer():
cnga = CountVectorizer(analyzer='char', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "J'ai mang\xe9 du kangourou ce midi, c'\xe9tait pas tr\xeas bon"
expected = ["j'a", "'ai", 'ai ', 'i m', ' ma']
assert_equal(cnga(text)[:5], expected)
expected = ['s tres', ' tres ', 'tres b', 'res bo', 'es bon']
assert_equal(cnga(text)[-5:], expected)
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
expected = [' yeste', 'yester', 'esterd', 'sterda', 'terday']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("This is a test with a file-like object!")
expected = ['thi', 'his', 'is ', 's i', ' is']
assert_equal(cnga(text)[:5], expected)
def test_char_wb_ngram_analyzer():
cnga = CountVectorizer(analyzer='char_wb', strip_accents='unicode',
ngram_range=(3, 6)).build_analyzer()
text = "This \n\tis a test, really.\n\n I met Harry yesterday"
expected = [' th', 'thi', 'his', 'is ', ' thi']
assert_equal(cnga(text)[:5], expected)
expected = ['yester', 'esterd', 'sterda', 'terday', 'erday ']
assert_equal(cnga(text)[-5:], expected)
cnga = CountVectorizer(input='file', analyzer='char_wb',
ngram_range=(3, 6)).build_analyzer()
text = StringIO("A test with a file-like object!")
expected = [' a ', ' te', 'tes', 'est', 'st ', ' tes']
assert_equal(cnga(text)[:6], expected)
def test_countvectorizer_custom_vocabulary():
vocab = {"pizza": 0, "beer": 1}
terms = set(vocab.keys())
# Try a few of the supported types.
for typ in [dict, list, iter, partial(defaultdict, int)]:
v = typ(vocab)
vect = CountVectorizer(vocabulary=v)
vect.fit(JUNK_FOOD_DOCS)
if isinstance(v, Mapping):
assert_equal(vect.vocabulary_, vocab)
else:
assert_equal(set(vect.vocabulary_), terms)
X = vect.transform(JUNK_FOOD_DOCS)
assert_equal(X.shape[1], len(terms))
def test_countvectorizer_custom_vocabulary_pipeline():
what_we_like = ["pizza", "beer"]
pipe = Pipeline([
('count', CountVectorizer(vocabulary=what_we_like)),
('tfidf', TfidfTransformer())])
X = pipe.fit_transform(ALL_FOOD_DOCS)
assert_equal(set(pipe.named_steps['count'].vocabulary_),
set(what_we_like))
assert_equal(X.shape[1], len(what_we_like))
def test_countvectorizer_custom_vocabulary_repeated_indeces():
vocab = {"pizza": 0, "beer": 0}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("vocabulary contains repeated indices", str(e).lower())
def test_countvectorizer_custom_vocabulary_gap_index():
vocab = {"pizza": 1, "beer": 2}
try:
CountVectorizer(vocabulary=vocab)
except ValueError as e:
assert_in("doesn't contain index", str(e).lower())
def test_countvectorizer_stop_words():
cv = CountVectorizer()
cv.set_params(stop_words='english')
assert_equal(cv.get_stop_words(), ENGLISH_STOP_WORDS)
cv.set_params(stop_words='_bad_str_stop_')
assert_raises(ValueError, cv.get_stop_words)
cv.set_params(stop_words='_bad_unicode_stop_')
assert_raises(ValueError, cv.get_stop_words)
stoplist = ['some', 'other', 'words']
cv.set_params(stop_words=stoplist)
assert_equal(cv.get_stop_words(), set(stoplist))
def test_countvectorizer_empty_vocabulary():
try:
vect = CountVectorizer(vocabulary=[])
vect.fit(["foo"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
try:
v = CountVectorizer(max_df=1.0, stop_words="english")
# fit on stopwords only
v.fit(["to be or not to be", "and me too", "and so do you"])
assert False, "we shouldn't get here"
except ValueError as e:
assert_in("empty vocabulary", str(e).lower())
def test_fit_countvectorizer_twice():
cv = CountVectorizer()
X1 = cv.fit_transform(ALL_FOOD_DOCS[:5])
X2 = cv.fit_transform(ALL_FOOD_DOCS[5:])
assert_not_equal(X1.shape[1], X2.shape[1])
def test_tf_idf_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# this is robust to features with only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=True, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
def test_tfidf_no_smoothing():
X = [[1, 1, 1],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
tfidf = tr.fit_transform(X).toarray()
assert_true((tfidf >= 0).all())
# check normalization
assert_array_almost_equal((tfidf ** 2).sum(axis=1), [1., 1., 1.])
# the lack of smoothing make IDF fragile in the presence of feature with
# only zeros
X = [[1, 1, 0],
[1, 1, 0],
[1, 0, 0]]
tr = TfidfTransformer(smooth_idf=False, norm='l2')
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
1. / np.array([0.])
numpy_provides_div0_warning = len(w) == 1
in_warning_message = 'divide by zero'
tfidf = assert_warns_message(RuntimeWarning, in_warning_message,
tr.fit_transform, X).toarray()
if not numpy_provides_div0_warning:
raise SkipTest("Numpy does not provide div 0 warnings.")
def test_sublinear_tf():
X = [[1], [2], [3]]
tr = TfidfTransformer(sublinear_tf=True, use_idf=False, norm=None)
tfidf = tr.fit_transform(X).toarray()
assert_equal(tfidf[0], 1)
assert_greater(tfidf[1], tfidf[0])
assert_greater(tfidf[2], tfidf[1])
assert_less(tfidf[1], 2)
assert_less(tfidf[2], 3)
def test_vectorizer():
# raw documents as an iterator
train_data = iter(ALL_FOOD_DOCS[:-1])
test_data = [ALL_FOOD_DOCS[-1]]
n_train = len(ALL_FOOD_DOCS) - 1
# test without vocabulary
v1 = CountVectorizer(max_df=0.5)
counts_train = v1.fit_transform(train_data)
if hasattr(counts_train, 'tocsr'):
counts_train = counts_train.tocsr()
assert_equal(counts_train[0, v1.vocabulary_["pizza"]], 2)
# build a vectorizer v1 with the same vocabulary as the one fitted by v1
v2 = CountVectorizer(vocabulary=v1.vocabulary_)
# compare that the two vectorizer give the same output on the test sample
for v in (v1, v2):
counts_test = v.transform(test_data)
if hasattr(counts_test, 'tocsr'):
counts_test = counts_test.tocsr()
vocabulary = v.vocabulary_
assert_equal(counts_test[0, vocabulary["salad"]], 1)
assert_equal(counts_test[0, vocabulary["tomato"]], 1)
assert_equal(counts_test[0, vocabulary["water"]], 1)
# stop word from the fixed list
assert_false("the" in vocabulary)
# stop word found automatically by the vectorizer DF thresholding
# words that are high frequent across the complete corpus are likely
# to be not informative (either real stop words of extraction
# artifacts)
assert_false("copyright" in vocabulary)
# not present in the sample
assert_equal(counts_test[0, vocabulary["coke"]], 0)
assert_equal(counts_test[0, vocabulary["burger"]], 0)
assert_equal(counts_test[0, vocabulary["beer"]], 0)
assert_equal(counts_test[0, vocabulary["pizza"]], 0)
# test tf-idf
t1 = TfidfTransformer(norm='l1')
tfidf = t1.fit(counts_train).transform(counts_train).toarray()
assert_equal(len(t1.idf_), len(v1.vocabulary_))
assert_equal(tfidf.shape, (n_train, len(v1.vocabulary_)))
# test tf-idf with new data
tfidf_test = t1.transform(counts_test).toarray()
assert_equal(tfidf_test.shape, (len(test_data), len(v1.vocabulary_)))
# test tf alone
t2 = TfidfTransformer(norm='l1', use_idf=False)
tf = t2.fit(counts_train).transform(counts_train).toarray()
assert_false(hasattr(t2, "idf_"))
# test idf transform with unlearned idf vector
t3 = TfidfTransformer(use_idf=True)
assert_raises(ValueError, t3.transform, counts_train)
# test idf transform with incompatible n_features
X = [[1, 1, 5],
[1, 1, 0]]
t3.fit(X)
X_incompt = [[1, 3],
[1, 3]]
assert_raises(ValueError, t3.transform, X_incompt)
# L1-normalized term frequencies sum to one
assert_array_almost_equal(np.sum(tf, axis=1), [1.0] * n_train)
# test the direct tfidf vectorizer
# (equivalent to term count vectorizer + tfidf transformer)
train_data = iter(ALL_FOOD_DOCS[:-1])
tv = TfidfVectorizer(norm='l1')
tv.max_df = v1.max_df
tfidf2 = tv.fit_transform(train_data).toarray()
assert_false(tv.fixed_vocabulary_)
assert_array_almost_equal(tfidf, tfidf2)
# test the direct tfidf vectorizer with new data
tfidf_test2 = tv.transform(test_data).toarray()
assert_array_almost_equal(tfidf_test, tfidf_test2)
# test transform on unfitted vectorizer with empty vocabulary
v3 = CountVectorizer(vocabulary=None)
assert_raises(ValueError, v3.transform, train_data)
# ascii preprocessor?
v3.set_params(strip_accents='ascii', lowercase=False)
assert_equal(v3.build_preprocessor(), strip_accents_ascii)
# error on bad strip_accents param
v3.set_params(strip_accents='_gabbledegook_', preprocessor=None)
assert_raises(ValueError, v3.build_preprocessor)
# error with bad analyzer type
v3.set_params = '_invalid_analyzer_type_'
assert_raises(ValueError, v3.build_analyzer)
def test_tfidf_vectorizer_setters():
tv = TfidfVectorizer(norm='l2', use_idf=False, smooth_idf=False,
sublinear_tf=False)
tv.norm = 'l1'
assert_equal(tv._tfidf.norm, 'l1')
tv.use_idf = True
assert_true(tv._tfidf.use_idf)
tv.smooth_idf = True
assert_true(tv._tfidf.smooth_idf)
tv.sublinear_tf = True
assert_true(tv._tfidf.sublinear_tf)
def test_hashing_vectorizer():
v = HashingVectorizer()
X = v.transform(ALL_FOOD_DOCS)
token_nnz = X.nnz
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# By default the hashed values receive a random sign and l2 normalization
# makes the feature values bounded
assert_true(np.min(X.data) > -1)
assert_true(np.min(X.data) < 0)
assert_true(np.max(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 2), 1.0)
# Check vectorization with some non-default parameters
v = HashingVectorizer(ngram_range=(1, 2), non_negative=True, norm='l1')
X = v.transform(ALL_FOOD_DOCS)
assert_equal(X.shape, (len(ALL_FOOD_DOCS), v.n_features))
assert_equal(X.dtype, v.dtype)
# ngrams generate more non zeros
ngrams_nnz = X.nnz
assert_true(ngrams_nnz > token_nnz)
assert_true(ngrams_nnz < 2 * token_nnz)
# makes the feature values bounded
assert_true(np.min(X.data) > 0)
assert_true(np.max(X.data) < 1)
# Check that the rows are normalized
for i in range(X.shape[0]):
assert_almost_equal(np.linalg.norm(X[0].data, 1), 1.0)
def test_feature_names():
cv = CountVectorizer(max_df=0.5)
# test for Value error on unfitted/empty vocabulary
assert_raises(ValueError, cv.get_feature_names)
X = cv.fit_transform(ALL_FOOD_DOCS)
n_samples, n_features = X.shape
assert_equal(len(cv.vocabulary_), n_features)
feature_names = cv.get_feature_names()
assert_equal(len(feature_names), n_features)
assert_array_equal(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'],
feature_names)
for idx, name in enumerate(feature_names):
assert_equal(idx, cv.vocabulary_.get(name))
def test_vectorizer_max_features():
vec_factories = (
CountVectorizer,
TfidfVectorizer,
)
expected_vocabulary = set(['burger', 'beer', 'salad', 'pizza'])
expected_stop_words = set([u'celeri', u'tomato', u'copyright', u'coke',
u'sparkling', u'water', u'the'])
for vec_factory in vec_factories:
# test bounded number of extracted features
vectorizer = vec_factory(max_df=0.6, max_features=4)
vectorizer.fit(ALL_FOOD_DOCS)
assert_equal(set(vectorizer.vocabulary_), expected_vocabulary)
assert_equal(vectorizer.stop_words_, expected_stop_words)
def test_count_vectorizer_max_features():
# Regression test: max_features didn't work correctly in 0.14.
cv_1 = CountVectorizer(max_features=1)
cv_3 = CountVectorizer(max_features=3)
cv_None = CountVectorizer(max_features=None)
counts_1 = cv_1.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_3 = cv_3.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
counts_None = cv_None.fit_transform(JUNK_FOOD_DOCS).sum(axis=0)
features_1 = cv_1.get_feature_names()
features_3 = cv_3.get_feature_names()
features_None = cv_None.get_feature_names()
# The most common feature is "the", with frequency 7.
assert_equal(7, counts_1.max())
assert_equal(7, counts_3.max())
assert_equal(7, counts_None.max())
# The most common feature should be the same
assert_equal("the", features_1[np.argmax(counts_1)])
assert_equal("the", features_3[np.argmax(counts_3)])
assert_equal("the", features_None[np.argmax(counts_None)])
def test_vectorizer_max_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', max_df=1.0)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.max_df = 0.5 # 0.5 * 3 documents -> max_doc_count == 1.5
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
vect.max_df = 1
vect.fit(test_data)
assert_true('a' not in vect.vocabulary_.keys()) # {ae} ignored
assert_equal(len(vect.vocabulary_.keys()), 4) # {bcdt} remain
assert_true('a' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 2)
def test_vectorizer_min_df():
test_data = ['abc', 'dea', 'eat']
vect = CountVectorizer(analyzer='char', min_df=1)
vect.fit(test_data)
assert_true('a' in vect.vocabulary_.keys())
assert_equal(len(vect.vocabulary_.keys()), 6)
assert_equal(len(vect.stop_words_), 0)
vect.min_df = 2
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdt} ignored
assert_equal(len(vect.vocabulary_.keys()), 2) # {ae} remain
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 4)
vect.min_df = 0.8 # 0.8 * 3 documents -> min_doc_count == 2.4
vect.fit(test_data)
assert_true('c' not in vect.vocabulary_.keys()) # {bcdet} ignored
assert_equal(len(vect.vocabulary_.keys()), 1) # {a} remains
assert_true('c' in vect.stop_words_)
assert_equal(len(vect.stop_words_), 5)
def test_count_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = CountVectorizer(analyzer='char', max_df=1.0)
X = vect.fit_transform(test_data).toarray()
assert_array_equal(['a', 'b', 'c', 'd', 'e'], vect.get_feature_names())
assert_array_equal([[3, 1, 1, 0, 0],
[1, 2, 0, 1, 1]], X)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = CountVectorizer(analyzer='char', max_df=1.0, binary=True)
X = vect.fit_transform(test_data).toarray()
assert_array_equal([[1, 1, 1, 0, 0],
[1, 1, 0, 1, 1]], X)
# check the ability to change the dtype
vect = CountVectorizer(analyzer='char', max_df=1.0,
binary=True, dtype=np.float32)
X_sparse = vect.fit_transform(test_data)
assert_equal(X_sparse.dtype, np.float32)
def test_hashed_binary_occurrences():
# by default multiple occurrences are counted as longs
test_data = ['aaabc', 'abbde']
vect = HashingVectorizer(analyzer='char', non_negative=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X[0:1].data), 3)
assert_equal(np.max(X[1:2].data), 2)
assert_equal(X.dtype, np.float64)
# using boolean features, we can fetch the binary occurrence info
# instead.
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None)
X = vect.transform(test_data)
assert_equal(np.max(X.data), 1)
assert_equal(X.dtype, np.float64)
# check the ability to change the dtype
vect = HashingVectorizer(analyzer='char', non_negative=True, binary=True,
norm=None, dtype=np.float64)
X = vect.transform(test_data)
assert_equal(X.dtype, np.float64)
def test_vectorizer_inverse_transform():
# raw documents
data = ALL_FOOD_DOCS
for vectorizer in (TfidfVectorizer(), CountVectorizer()):
transformed_data = vectorizer.fit_transform(data)
inversed_data = vectorizer.inverse_transform(transformed_data)
analyze = vectorizer.build_analyzer()
for doc, inversed_terms in zip(data, inversed_data):
terms = np.sort(np.unique(analyze(doc)))
inversed_terms = np.sort(np.unique(inversed_terms))
assert_array_equal(terms, inversed_terms)
# Test that inverse_transform also works with numpy arrays
transformed_data = transformed_data.toarray()
inversed_data2 = vectorizer.inverse_transform(transformed_data)
for terms, terms2 in zip(inversed_data, inversed_data2):
assert_array_equal(np.sort(terms), np.sort(terms2))
def test_count_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.2, random_state=0)
pipeline = Pipeline([('vect', CountVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'svc__loss': ('hinge', 'squared_hinge')
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
def test_vectorizer_pipeline_grid_selection():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
# split the dataset for model development and final evaluation
train_data, test_data, target_train, target_test = train_test_split(
data, target, test_size=.1, random_state=0)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
'vect__norm': ('l1', 'l2'),
'svc__loss': ('hinge', 'squared_hinge'),
}
# find the best parameters for both the feature extraction and the
# classifier
grid_search = GridSearchCV(pipeline, parameters, n_jobs=1)
# Check that the best model found by grid search is 100% correct on the
# held out evaluation set.
pred = grid_search.fit(train_data, target_train).predict(test_data)
assert_array_equal(pred, target_test)
# on this toy dataset bigram representation which is used in the last of
# the grid_search is considered the best estimator since they all converge
# to 100% accuracy models
assert_equal(grid_search.best_score_, 1.0)
best_vectorizer = grid_search.best_estimator_.named_steps['vect']
assert_equal(best_vectorizer.ngram_range, (1, 1))
assert_equal(best_vectorizer.norm, 'l2')
assert_false(best_vectorizer.fixed_vocabulary_)
def test_vectorizer_pipeline_cross_validation():
# raw documents
data = JUNK_FOOD_DOCS + NOTJUNK_FOOD_DOCS
# label junk food as -1, the others as +1
target = [-1] * len(JUNK_FOOD_DOCS) + [1] * len(NOTJUNK_FOOD_DOCS)
pipeline = Pipeline([('vect', TfidfVectorizer()),
('svc', LinearSVC())])
cv_scores = cross_val_score(pipeline, data, target, cv=3)
assert_array_equal(cv_scores, [1., 1., 1.])
def test_vectorizer_unicode():
# tests that the count vectorizer works with cyrillic.
document = (
"\xd0\x9c\xd0\xb0\xd1\x88\xd0\xb8\xd0\xbd\xd0\xbd\xd0\xbe\xd0"
"\xb5 \xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb5\xd0\xbd\xd0\xb8\xd0"
"\xb5 \xe2\x80\x94 \xd0\xbe\xd0\xb1\xd1\x88\xd0\xb8\xd1\x80\xd0\xbd"
"\xd1\x8b\xd0\xb9 \xd0\xbf\xd0\xbe\xd0\xb4\xd1\x80\xd0\xb0\xd0\xb7"
"\xd0\xb4\xd0\xb5\xd0\xbb \xd0\xb8\xd1\x81\xd0\xba\xd1\x83\xd1\x81"
"\xd1\x81\xd1\x82\xd0\xb2\xd0\xb5\xd0\xbd\xd0\xbd\xd0\xbe\xd0\xb3"
"\xd0\xbe \xd0\xb8\xd0\xbd\xd1\x82\xd0\xb5\xd0\xbb\xd0\xbb\xd0"
"\xb5\xd0\xba\xd1\x82\xd0\xb0, \xd0\xb8\xd0\xb7\xd1\x83\xd1\x87"
"\xd0\xb0\xd1\x8e\xd1\x89\xd0\xb8\xd0\xb9 \xd0\xbc\xd0\xb5\xd1\x82"
"\xd0\xbe\xd0\xb4\xd1\x8b \xd0\xbf\xd0\xbe\xd1\x81\xd1\x82\xd1\x80"
"\xd0\xbe\xd0\xb5\xd0\xbd\xd0\xb8\xd1\x8f \xd0\xb0\xd0\xbb\xd0\xb3"
"\xd0\xbe\xd1\x80\xd0\xb8\xd1\x82\xd0\xbc\xd0\xbe\xd0\xb2, \xd1\x81"
"\xd0\xbf\xd0\xbe\xd1\x81\xd0\xbe\xd0\xb1\xd0\xbd\xd1\x8b\xd1\x85 "
"\xd0\xbe\xd0\xb1\xd1\x83\xd1\x87\xd0\xb0\xd1\x82\xd1\x8c\xd1\x81\xd1"
"\x8f.")
vect = CountVectorizer()
X_counted = vect.fit_transform([document])
assert_equal(X_counted.shape, (1, 15))
vect = HashingVectorizer(norm=None, non_negative=True)
X_hashed = vect.transform([document])
assert_equal(X_hashed.shape, (1, 2 ** 20))
# No collisions on such a small dataset
assert_equal(X_counted.nnz, X_hashed.nnz)
# When norm is None and non_negative, the tokens are counted up to
# collisions
assert_array_equal(np.sort(X_counted.data), np.sort(X_hashed.data))
def test_tfidf_vectorizer_with_fixed_vocabulary():
# non regression smoke test for inheritance issues
vocabulary = ['pizza', 'celeri']
vect = TfidfVectorizer(vocabulary=vocabulary)
X_1 = vect.fit_transform(ALL_FOOD_DOCS)
X_2 = vect.transform(ALL_FOOD_DOCS)
assert_array_almost_equal(X_1.toarray(), X_2.toarray())
assert_true(vect.fixed_vocabulary_)
def test_pickling_vectorizer():
instances = [
HashingVectorizer(),
HashingVectorizer(norm='l1'),
HashingVectorizer(binary=True),
HashingVectorizer(ngram_range=(1, 2)),
CountVectorizer(),
CountVectorizer(preprocessor=strip_tags),
CountVectorizer(analyzer=lazy_analyze),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS),
TfidfVectorizer(),
TfidfVectorizer(analyzer=lazy_analyze),
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
]
for orig in instances:
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_equal(copy.get_params(), orig.get_params())
assert_array_equal(
copy.fit_transform(JUNK_FOOD_DOCS).toarray(),
orig.fit_transform(JUNK_FOOD_DOCS).toarray())
def test_countvectorizer_vocab_sets_when_pickling():
# ensure that vocabulary of type set is coerced to a list to
# preserve iteration ordering after deserialization
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_set = set(rng.choice(vocab_words, size=5, replace=False))
cv = CountVectorizer(vocabulary=vocab_set)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_countvectorizer_vocab_dicts_when_pickling():
rng = np.random.RandomState(0)
vocab_words = np.array(['beer', 'burger', 'celeri', 'coke', 'pizza',
'salad', 'sparkling', 'tomato', 'water'])
for x in range(0, 100):
vocab_dict = dict()
words = rng.choice(vocab_words, size=5, replace=False)
for y in range(0, 5):
vocab_dict[words[y]] = y
cv = CountVectorizer(vocabulary=vocab_dict)
unpickled_cv = pickle.loads(pickle.dumps(cv))
cv.fit(ALL_FOOD_DOCS)
unpickled_cv.fit(ALL_FOOD_DOCS)
assert_equal(cv.get_feature_names(), unpickled_cv.get_feature_names())
def test_stop_words_removal():
# Ensure that deleting the stop_words_ attribute doesn't affect transform
fitted_vectorizers = (
TfidfVectorizer().fit(JUNK_FOOD_DOCS),
CountVectorizer(preprocessor=strip_tags).fit(JUNK_FOOD_DOCS),
CountVectorizer(strip_accents=strip_eacute).fit(JUNK_FOOD_DOCS)
)
for vect in fitted_vectorizers:
vect_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
vect.stop_words_ = None
stop_None_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
delattr(vect, 'stop_words_')
stop_del_transform = vect.transform(JUNK_FOOD_DOCS).toarray()
assert_array_equal(stop_None_transform, vect_transform)
assert_array_equal(stop_del_transform, vect_transform)
def test_pickling_transformer():
X = CountVectorizer().fit_transform(JUNK_FOOD_DOCS)
orig = TfidfTransformer().fit(X)
s = pickle.dumps(orig)
copy = pickle.loads(s)
assert_equal(type(copy), orig.__class__)
assert_array_equal(
copy.fit_transform(X).toarray(),
orig.fit_transform(X).toarray())
def test_non_unique_vocab():
vocab = ['a', 'b', 'c', 'a', 'a']
vect = CountVectorizer(vocabulary=vocab)
assert_raises(ValueError, vect.fit, [])
def test_hashingvectorizer_nan_in_docs():
# np.nan can appear when using pandas to load text fields from a csv file
# with missing values.
message = "np.nan is an invalid document, expected byte or unicode string."
exception = ValueError
def func():
hv = HashingVectorizer()
hv.fit_transform(['hello world', np.nan, 'hello hello'])
assert_raise_message(exception, message, func)
def test_tfidfvectorizer_binary():
# Non-regression test: TfidfVectorizer used to ignore its "binary" param.
v = TfidfVectorizer(binary=True, use_idf=False, norm=None)
assert_true(v.binary)
X = v.fit_transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X.ravel(), [1, 1, 1, 0])
X2 = v.transform(['hello world', 'hello hello']).toarray()
assert_array_equal(X2.ravel(), [1, 1, 1, 0])
def test_tfidfvectorizer_export_idf():
vect = TfidfVectorizer(use_idf=True)
vect.fit(JUNK_FOOD_DOCS)
assert_array_almost_equal(vect.idf_, vect._tfidf.idf_)
def test_vectorizer_vocab_clone():
vect_vocab = TfidfVectorizer(vocabulary=["the"])
vect_vocab_clone = clone(vect_vocab)
vect_vocab.fit(ALL_FOOD_DOCS)
vect_vocab_clone.fit(ALL_FOOD_DOCS)
assert_equal(vect_vocab_clone.vocabulary_, vect_vocab.vocabulary_)
def test_vectorizer_string_object_as_input():
message = ("Iterable over raw text documents expected, "
"string object received.")
for vec in [CountVectorizer(), TfidfVectorizer(), HashingVectorizer()]:
assert_raise_message(
ValueError, message, vec.fit_transform, "hello world!")
assert_raise_message(
ValueError, message, vec.fit, "hello world!")
assert_raise_message(
ValueError, message, vec.transform, "hello world!")
| bsd-3-clause |
pombredanne/splash | splash/tests/stress.py | 3 | 8128 | from __future__ import print_function
import sys, requests, random, optparse, time, json
from itertools import islice
from threading import Thread
from collections import Counter
import requests
import six
from six.moves.queue import Queue
from .utils import SplashServer, MockServer
class StressTest():
def __init__(self, reqs, host="localhost:8050", requests=1000, concurrency=50, shuffle=False, verbose=False):
self.reqs = reqs
self.host = host
self.requests = requests
self.concurrency = concurrency
self.shuffle = shuffle
self.verbose = verbose
def run(self):
args = list(islice(self.reqs, self.requests))
if self.shuffle:
random.shuffle(args)
print("Total requests: %d" % len(args))
print("Concurrency : %d" % self.concurrency)
starttime = time.time()
q, p = Queue(), Queue()
for _ in six.moves.range(self.concurrency):
t = Thread(target=worker, args=(self.host, q, p, self.verbose))
t.daemon = True
t.start()
for a in args:
q.put(a)
q.join()
outputs = []
for _ in six.moves.range(self.requests):
outputs.append(p.get())
elapsed = time.time() - starttime
print()
print("Total requests: %d" % len(args))
print("Concurrency : %d" % self.concurrency)
print("Elapsed time : %.3fs" % elapsed)
print("Avg time p/req: %.3fs" % (elapsed/len(args)))
print("Received (per status code or error):")
for c, n in Counter(outputs).items():
print(" %s: %d" % (c, n))
def worker(host, q, p, verbose=False):
url = "http://%s/render.html" % host
while True:
try:
args = q.get()
t = time.time()
r = requests.get(url, params=args)
t = time.time() - t
p.put(r.status_code)
if verbose:
print(". %d %.3fs %s" % (r.status_code, t, args))
else:
sys.stdout.write(".")
sys.stdout.flush()
except Exception as e:
p.put(type(e))
if verbose:
print("E %.3fs %s" % (t, args))
else:
sys.stdout.write("E")
sys.stdout.flush()
finally:
q.task_done()
class MockArgs(object):
ok_urls = 0.5
error_urls = 0.3
timeout_urls = 0.2
def __init__(self, requests=1000):
self.requests = requests
def _ok_urls(self):
url = ["http://localhost:8998/jsrender"]
return int(self.requests * self.ok_urls) * url
def _error_urls(self):
url = ["http://non-existent-host/"]
return int(self.requests * self.error_urls) * url
def _timeout_urls(self):
url = ["http://localhost:8998/delay?n=10&timeout=0.5"]
return int(self.requests * self.timeout_urls) * url
def __iter__(self):
ok_urls = self._ok_urls()
error_urls = self._error_urls()
timeout_urls = self._timeout_urls()
print("Expected codes: HTTP200x%d, HTTP502x%d, HTTP504x%d" % (
len(ok_urls), len(error_urls), len(timeout_urls)))
urls = ok_urls + error_urls + timeout_urls
return ({"url": x} for x in urls)
class ArgsFromUrlFile(object):
def __init__(self, urlfile):
self.urlfile = urlfile
def __iter__(self):
for line in open(self.urlfile):
url = line.rstrip()
if '://' not in url:
url = 'http://' + url
yield {"url": url, "timeout": 60}
class ArgsFromLogfile(object):
def __init__(self, logfile):
self.logfile = logfile
def __iter__(self):
for l in open(self.logfile):
if "[stats]" in l:
d = json.loads(l[33:].rstrip())
yield d['args']
def lua_runonce(script, timeout=60., splash_args=None, **kwargs):
""" Start splash server, execute lua script in it and return the output.
:type script: str
:param script: Script to be executed.
:type timeout: float
:param timeout: Timeout value for the execution request.
:param splash_args: Extra parameters for splash server invocation.
:type kwargs: dict
:param kwargs: Any other parameters are passed as arguments to the request
and will be available via ``splash.args``.
This function also starts a `MockServer`. If `url` kwarg has scheme=mock,
e.g., "mock://jsrender", it will be resolved as a url pointing to
corresponding mock server resource.
"""
if splash_args is None:
splash_args = ['--disable-lua-sandbox',
'--allowed-schemes=file,http,https', ]
with SplashServer(extra_args=splash_args) as s, \
MockServer() as ms:
if kwargs.get('url', '').startswith('mock://'):
kwargs['url'] = ms.url(kwargs['url'][7:])
params = {'lua_source': script}
params.update(kwargs)
resp = requests.get(s.url('execute'), params=params, timeout=timeout)
if resp.ok:
return resp.content
else:
raise RuntimeError(resp.text)
def benchmark_png(url, viewport=None, wait=0.5, render_all=1,
width=None, height=None, nrepeats=3, timeout=60.):
f = """
function main(splash)
local resp, err = splash:go(splash.args.url)
assert(resp, err)
assert(splash:wait(tonumber(splash.args.wait)))
-- if viewport is 'full' it should be set only after waiting
if splash.args.viewport ~= nil and splash.args.viewport ~= "full" then
local w, h = string.match(splash.args.viewport, '^(%d+)x(%d+)')
if w == nil or h == nil then
error('Invalid viewport size format: ' .. splash.args.viewport)
end
splash:set_viewport_size(tonumber(w), tonumber(h))
end
local susage = splash:get_perf_stats()
local nrepeats = tonumber(splash.args.nrepeats)
local render_all = splash.args.render_all or splash.args.viewport == 'full'
local png, err
for i = 1, nrepeats do
png, err = splash:png{width=splash.args.width,
height=splash.args.height,
render_all=render_all}
assert(png, err)
end
local eusage = splash:get_perf_stats()
return {
wallclock_secs=(eusage.walltime - susage.walltime) / nrepeats,
maxrss=eusage.maxrss,
cpu_secs=(eusage.cputime - susage.cputime) / nrepeats,
png=png,
}
end
"""
return json.loads(lua_runonce(
f, url=url, width=width, height=height, render_all=render_all,
nrepeats=nrepeats, wait=wait, viewport=viewport, timeout=timeout))
def parse_opts():
op = optparse.OptionParser()
op.add_option("-H", dest="host", default="localhost:8050",
help="splash hostname & port (default: %default)")
op.add_option("-u", dest="urlfile", metavar="FILE",
help="read urls from FILE instead of using mock server ones")
op.add_option("-l", dest="logfile", metavar="FILE",
help="read urls from splash log file (useful for replaying)")
op.add_option("-s", dest="shuffle", action="store_true", default=False,
help="shuffle (randomize) requests (default: %default)")
op.add_option("-v", dest="verbose", action="store_true", default=False,
help="verbose mode (default: %default)")
op.add_option("-c", dest="concurrency", type="int", default=50,
help="concurrency (default: %default)")
op.add_option("-n", dest="requests", type="int", default=1000,
help="number of requests (default: %default)")
return op.parse_args()
def main():
opts, _ = parse_opts()
if opts.urlfile:
urls = ArgsFromUrlFile(opts.urlfile)
elif opts.logfile:
urls = ArgsFromLogfile(opts.logfile)
else:
urls = MockArgs(opts.requests)
t = StressTest(urls, opts.host, opts.requests, opts.concurrency, opts.shuffle, opts.verbose)
t.run()
if __name__ == "__main__":
main()
| bsd-3-clause |
qnub/django-cms | cms/south_migrations/0067_auto__add_field_aliaspluginmodel_alias_placeholder__chg_field_aliasplu.py | 59 | 18849 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'AliasPluginModel.alias_placeholder'
db.add_column(u'cms_aliaspluginmodel', 'alias_placeholder',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='alias_placeholder', null=True, to=orm['cms.Placeholder']),
keep_default=False)
# Changing field 'AliasPluginModel.plugin'
db.alter_column(u'cms_aliaspluginmodel', 'plugin_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['cms.CMSPlugin']))
def backwards(self, orm):
# Deleting field 'AliasPluginModel.alias_placeholder'
db.delete_column(u'cms_aliaspluginmodel', 'alias_placeholder_id')
# Changing field 'AliasPluginModel.plugin'
db.alter_column(u'cms_aliaspluginmodel', 'plugin_id', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['cms.CMSPlugin']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.aliaspluginmodel': {
'Meta': {'object_name': 'AliasPluginModel', '_ormbases': ['cms.CMSPlugin']},
'alias_placeholder': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_placeholder'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'plugin': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'alias_reference'", 'null': 'True', 'to': "orm['cms.CMSPlugin']"})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause |
endlessm/chromium-browser | third_party/skia/infra/bots/recipe_modules/run/api.py | 12 | 3526 | # Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=W0201
from recipe_engine import recipe_api
TEST_DEFAULT_ASSET_VERSION = '42'
class SkiaStepApi(recipe_api.RecipeApi):
def __init__(self, *args, **kwargs):
"""Initialize the recipe module."""
super(SkiaStepApi, self).__init__(*args, **kwargs)
self._already_ran = {}
self._ccache = None
self._checked_for_ccache = False
self._failed = []
def check_failure(self):
"""Raise an exception if any step failed."""
if self._failed:
raise self.m.step.StepFailure('Failed build steps: %s' %
', '.join([f.name for f in self._failed]))
@property
def failed_steps(self):
return self._failed[:]
def run_once(self, fn, *args, **kwargs):
if not fn.__name__ in self._already_ran:
self._already_ran[fn.__name__] = fn(*args, **kwargs)
return self._already_ran[fn.__name__]
def readfile(self, filename, *args, **kwargs):
"""Convenience function for reading files."""
name = kwargs.pop('name', 'read %s' % self.m.path.basename(filename))
return self.m.file.read_text(name, filename, *args, **kwargs)
def writefile(self, filename, contents):
"""Convenience function for writing files."""
return self.m.file.write_text('write %s' % self.m.path.basename(filename),
filename, contents)
def rmtree(self, path):
"""Wrapper around api.file.rmtree."""
self.m.file.rmtree('rmtree %s' % self.m.path.basename(path), path)
def asset_version(self, asset_name, skia_dir, test_data=None):
"""Return the contents of VERSION for the given asset as a string.
If test_data is not specified, reads the property
'test_<asset_name>_version' or if not present, uses
TEST_DEFAULT_ASSET_VERSION."""
version_file = skia_dir.join(
'infra', 'bots', 'assets', asset_name, 'VERSION')
if not test_data:
test_data = self.m.properties.get(
'test_%s_version' % asset_name, TEST_DEFAULT_ASSET_VERSION)
return self.m.file.read_text('Get %s VERSION' % asset_name,
version_file,
test_data=test_data).rstrip()
def __call__(self, steptype, name, abort_on_failure=True,
fail_build_on_failure=True, **kwargs):
"""Run a step. If it fails, keep going but mark the build status failed."""
try:
with self.m.env(self.m.vars.default_env):
return steptype(name=name, **kwargs)
except self.m.step.StepFailure as e:
if fail_build_on_failure:
self._failed.append(e)
if abort_on_failure:
raise
def with_retry(self, steptype, name, attempts, between_attempts_fn=None,
abort_on_failure=True, fail_build_on_failure=True, **kwargs):
for attempt in xrange(attempts):
step_name = name
if attempt > 0:
step_name += ' (attempt %d)' % (attempt + 1)
try:
res = self(steptype, name=step_name, abort_on_failure=True,
fail_build_on_failure=fail_build_on_failure, **kwargs)
if attempt > 0 and fail_build_on_failure:
del self._failed[-attempt:]
return res
except self.m.step.StepFailure:
if attempt == attempts - 1:
if abort_on_failure:
raise
elif between_attempts_fn:
between_attempts_fn(attempt+1)
| bsd-3-clause |
razr1/mx | cocos2d/download-deps.py | 63 | 12254 | #!/usr/bin/env python
#coding=utf-8
#
# ./download-deps.py
#
# Download Cocos2D-X resources from github (https://github.com/cocos2d/cocos2d-x-3rd-party-libs-bin) and extract from ZIP
#
# Helps prevent repo bloat due to large binary files since they can
# be hosted separately.
#
"""****************************************************************************
Copyright (c) 2014 cocos2d-x.org
Copyright (c) 2014 Chukong Technologies Inc.
http://www.cocos2d-x.org
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
****************************************************************************"""
import os.path,zipfile
import shutil
import sys
import traceback
import distutils
import fileinput
import json
from optparse import OptionParser
from time import time
from sys import stdout
from distutils.errors import DistutilsError
from distutils.dir_util import copy_tree, remove_tree
class UnrecognizedFormat:
def __init__(self, prompt):
self._prompt = prompt
def __str__(self):
return self._prompt
class CocosZipInstaller(object):
def __init__(self, workpath, config_path, version_path, remote_version_key = None):
self._workpath = workpath
self._config_path = config_path
self._version_path = version_path
data = self.load_json_file(config_path)
self._current_version = data["version"]
self._repo_name = data["repo_name"]
self._filename = self._current_version + '.zip'
self._url = data["repo_parent"] + self._repo_name + '/archive/' + self._filename
self._zip_file_size = int(data["zip_file_size"])
# 'v' letter was swallowed by github, so we need to substring it from the 2nd letter
self._extracted_folder_name = os.path.join(self._workpath, self._repo_name + '-' + self._current_version[1:])
try:
data = self.load_json_file(version_path)
if remote_version_key == None:
self._remote_version = data["version"]
else:
self._remote_version = data[remote_version_key]
except:
print("==> version file doesn't exist")
def get_input_value(self, prompt):
ret = raw_input(prompt)
ret.rstrip(" \t")
return ret
def download_file(self):
print("==> Ready to download '%s' from '%s'" % (self._filename, self._url))
import urllib2
try:
u = urllib2.urlopen(self._url)
except urllib2.HTTPError as e:
if e.code == 404:
print("==> Error: Could not find the file from url: '%s'" % (self._url))
print("==> Http request failed, error code: " + str(e.code) + ", reason: " + e.read())
sys.exit(1)
f = open(self._filename, 'wb')
meta = u.info()
content_len = meta.getheaders("Content-Length")
file_size = 0
if content_len and len(content_len) > 0:
file_size = int(content_len[0])
else:
# github server may not reponse a header information which contains `Content-Length`,
# therefore, the size needs to be written hardcode here. While server doesn't return
# `Content-Length`, use it instead
print("==> WARNING: Couldn't grab the file size from remote, use 'zip_file_size' section in '%s'" % self._config_path)
file_size = self._zip_file_size
print("==> Start to download, please wait ...")
file_size_dl = 0
block_sz = 8192
block_size_per_second = 0
old_time=time()
while True:
buffer = u.read(block_sz)
if not buffer:
break
file_size_dl += len(buffer)
block_size_per_second += len(buffer)
f.write(buffer)
new_time = time()
if (new_time - old_time) > 1:
speed = block_size_per_second / (new_time - old_time) / 1000.0
status = ""
if file_size != 0:
percent = file_size_dl * 100. / file_size
status = r"Downloaded: %6dK / Total: %dK, Percent: %3.2f%%, Speed: %6.2f KB/S " % (file_size_dl / 1000, file_size / 1000, percent, speed)
else:
status = r"Downloaded: %6dK, Speed: %6.2f KB/S " % (file_size_dl / 1000, speed)
status = status + chr(8)*(len(status)+1)
print(status),
sys.stdout.flush()
block_size_per_second = 0
old_time = new_time
print("==> Downloading finished!")
f.close()
def ensure_directory(self, target):
if not os.path.exists(target):
os.mkdir(target)
def unpack_zipfile(self, extract_dir):
"""Unpack zip `filename` to `extract_dir`
Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined
by ``zipfile.is_zipfile()``).
"""
if not zipfile.is_zipfile(self._filename):
raise UnrecognizedFormat("%s is not a zip file" % (self._filename))
print("==> Extracting files, please wait ...")
z = zipfile.ZipFile(self._filename)
try:
for info in z.infolist():
name = info.filename
# don't extract absolute paths or ones with .. in them
if name.startswith('/') or '..' in name:
continue
target = os.path.join(extract_dir, *name.split('/'))
if not target:
continue
if name.endswith('/'):
# directory
self.ensure_directory(target)
else:
# file
data = z.read(info.filename)
f = open(target,'wb')
try:
f.write(data)
finally:
f.close()
del data
unix_attributes = info.external_attr >> 16
if unix_attributes:
os.chmod(target, unix_attributes)
finally:
z.close()
print("==> Extraction done!")
def ask_to_delete_downloaded_zip_file(self):
ret = self.get_input_value("==> Whether to delete '%s' file? It may be reused when you execute this script next time! (yes/no): " % self._filename)
ret = ret.strip()
if ret != 'yes' and ret != 'no':
print("==> Invalid answer, please answer 'yes' or 'no'!")
return self.ask_to_delete_downloaded_zip_file()
else:
return True if ret == 'yes' else False
def download_zip_file(self):
if not os.path.isfile(self._filename):
self.download_file()
try:
if not zipfile.is_zipfile(self._filename):
raise UnrecognizedFormat("%s is not a zip file" % (self._filename))
except UnrecognizedFormat as e:
print("==> Unrecognized zip format from your local '%s' file!" % (self._filename))
if os.path.isfile(self._filename):
os.remove(self._filename)
print("==> Download it from internet again, please wait...")
self.download_zip_file()
def need_to_update(self):
if not os.path.isfile(self._version_path):
return True
with open(self._version_path) as data_file:
data = json.load(data_file)
if self._remote_version == self._current_version:
return False
return True
def load_json_file(self, file_path):
if not os.path.isfile(file_path):
raise Exception("Could not find (%s)" % (file_path))
with open(file_path) as data_file:
data = json.load(data_file)
return data
def run(self, folder_for_extracting, remove_downloaded, force_update, download_only):
if not force_update and not self.need_to_update():
print("==> Not need to update!")
return
if os.path.exists(self._extracted_folder_name):
shutil.rmtree(self._extracted_folder_name)
self.download_zip_file()
if not download_only:
self.unpack_zipfile(self._workpath)
print("==> Copying files...")
if not os.path.exists(folder_for_extracting):
os.mkdir(folder_for_extracting)
distutils.dir_util.copy_tree(self._extracted_folder_name, folder_for_extracting)
print("==> Cleaning...")
if os.path.exists(self._extracted_folder_name):
shutil.rmtree(self._extracted_folder_name)
if os.path.isfile(self._filename):
if remove_downloaded != None:
if remove_downloaded == 'yes':
os.remove(self._filename)
elif self.ask_to_delete_downloaded_zip_file():
os.remove(self._filename)
else:
print("==> Download (%s) finish!" % self._filename)
def _check_python_version():
major_ver = sys.version_info[0]
if major_ver > 2:
print ("The python version is %d.%d. But python 2.x is required. (Version 2.7 is well tested)\n"
"Download it here: https://www.python.org/" % (major_ver, sys.version_info[1]))
return False
return True
def main():
workpath = os.path.dirname(os.path.realpath(__file__))
if not _check_python_version():
exit()
parser = OptionParser()
parser.add_option('-r', '--remove-download',
action="store", type="string", dest='remove_downloaded', default=None,
help="Whether to remove downloaded zip file, 'yes' or 'no'")
parser.add_option("-f", "--force-update",
action="store_true", dest="force_update", default=False,
help="Whether to force update the third party libraries")
parser.add_option("-d", "--download-only",
action="store_true", dest="download_only", default=False,
help="Only download zip file of the third party libraries, will not extract it")
(opts, args) = parser.parse_args()
print("=======================================================")
print("==> Prepare to download external libraries!")
external_path = os.path.join(workpath, 'external')
installer = CocosZipInstaller(workpath, os.path.join(workpath, 'external', 'config.json'), os.path.join(workpath, 'external', 'version.json'), "prebuilt_libs_version")
installer.run(external_path, opts.remove_downloaded, opts.force_update, opts.download_only)
print("=======================================================")
print("==> Prepare to download lua runtime binaries")
runtime_path = os.path.join(workpath, 'templates', 'lua-template-runtime', 'runtime')
installer = CocosZipInstaller(workpath, os.path.join(runtime_path, 'config.json'), os.path.join(runtime_path, 'version.json'))
installer.run(runtime_path, opts.remove_downloaded, opts.force_update, opts.download_only)
# -------------- main --------------
if __name__ == '__main__':
try:
main()
except Exception as e:
traceback.print_exc()
sys.exit(1)
| mit |
NcLang/vimrc | sources_non_forked/YouCompleteMe/third_party/pythonfutures/concurrent/futures/_compat.py | 179 | 4645 | from keyword import iskeyword as _iskeyword
from operator import itemgetter as _itemgetter
import sys as _sys
def namedtuple(typename, field_names):
"""Returns a new subclass of tuple with named fields.
>>> Point = namedtuple('Point', 'x y')
>>> Point.__doc__ # docstring for the new class
'Point(x, y)'
>>> p = Point(11, y=22) # instantiate with positional args or keywords
>>> p[0] + p[1] # indexable like a plain tuple
33
>>> x, y = p # unpack like a regular tuple
>>> x, y
(11, 22)
>>> p.x + p.y # fields also accessable by name
33
>>> d = p._asdict() # convert to a dictionary
>>> d['x']
11
>>> Point(**d) # convert from a dictionary
Point(x=11, y=22)
>>> p._replace(x=100) # _replace() is like str.replace() but targets named fields
Point(x=100, y=22)
"""
# Parse and validate the field names. Validation serves two purposes,
# generating informative error messages and preventing template injection attacks.
if isinstance(field_names, basestring):
field_names = field_names.replace(',', ' ').split() # names separated by whitespace and/or commas
field_names = tuple(map(str, field_names))
for name in (typename,) + field_names:
if not all(c.isalnum() or c=='_' for c in name):
raise ValueError('Type names and field names can only contain alphanumeric characters and underscores: %r' % name)
if _iskeyword(name):
raise ValueError('Type names and field names cannot be a keyword: %r' % name)
if name[0].isdigit():
raise ValueError('Type names and field names cannot start with a number: %r' % name)
seen_names = set()
for name in field_names:
if name.startswith('_'):
raise ValueError('Field names cannot start with an underscore: %r' % name)
if name in seen_names:
raise ValueError('Encountered duplicate field name: %r' % name)
seen_names.add(name)
# Create and fill-in the class template
numfields = len(field_names)
argtxt = repr(field_names).replace("'", "")[1:-1] # tuple repr without parens or quotes
reprtxt = ', '.join('%s=%%r' % name for name in field_names)
dicttxt = ', '.join('%r: t[%d]' % (name, pos) for pos, name in enumerate(field_names))
template = '''class %(typename)s(tuple):
'%(typename)s(%(argtxt)s)' \n
__slots__ = () \n
_fields = %(field_names)r \n
def __new__(_cls, %(argtxt)s):
return _tuple.__new__(_cls, (%(argtxt)s)) \n
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
'Make a new %(typename)s object from a sequence or iterable'
result = new(cls, iterable)
if len(result) != %(numfields)d:
raise TypeError('Expected %(numfields)d arguments, got %%d' %% len(result))
return result \n
def __repr__(self):
return '%(typename)s(%(reprtxt)s)' %% self \n
def _asdict(t):
'Return a new dict which maps field names to their values'
return {%(dicttxt)s} \n
def _replace(_self, **kwds):
'Return a new %(typename)s object replacing specified fields with new values'
result = _self._make(map(kwds.pop, %(field_names)r, _self))
if kwds:
raise ValueError('Got unexpected field names: %%r' %% kwds.keys())
return result \n
def __getnewargs__(self):
return tuple(self) \n\n''' % locals()
for i, name in enumerate(field_names):
template += ' %s = _property(_itemgetter(%d))\n' % (name, i)
# Execute the template string in a temporary namespace and
# support tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(_itemgetter=_itemgetter, __name__='namedtuple_%s' % typename,
_property=property, _tuple=tuple)
try:
exec(template, namespace)
except SyntaxError:
e = _sys.exc_info()[1]
raise SyntaxError(e.message + ':\n' + template)
result = namespace[typename]
# For pickling to work, the __module__ variable needs to be set to the frame
# where the named tuple is created. Bypass this step in enviroments where
# sys._getframe is not defined (Jython for example).
if hasattr(_sys, '_getframe'):
result.__module__ = _sys._getframe(1).f_globals.get('__name__', '__main__')
return result
| mit |
jundongl/PyFeaST | skfeature/example/test_CIFE.py | 3 | 1485 | import scipy.io
from sklearn.metrics import accuracy_score
from sklearn import cross_validation
from sklearn import svm
from skfeature.function.information_theoretical_based import CIFE
def main():
# load data
mat = scipy.io.loadmat('../data/colon.mat')
X = mat['X'] # data
X = X.astype(float)
y = mat['Y'] # label
y = y[:, 0]
n_samples, n_features = X.shape # number of samples and number of features
# split data into 10 folds
ss = cross_validation.KFold(n_samples, n_folds=10, shuffle=True)
# perform evaluation on classification task
num_fea = 10 # number of selected features
clf = svm.LinearSVC() # linear SVM
correct = 0
for train, test in ss:
# obtain the index of each feature on the training set
idx,_,_ = CIFE.cife(X[train], y[train], n_selected_features=num_fea)
# obtain the dataset on the selected features
features = X[:, idx[0:num_fea]]
# train a classification model with the selected features on the training dataset
clf.fit(features[train], y[train])
# predict the class labels of test data
y_predict = clf.predict(features[test])
# obtain the classification accuracy on the test data
acc = accuracy_score(y[test], y_predict)
correct = correct + acc
# output the average classification accuracy over all 10 folds
print 'Accuracy:', float(correct)/10
if __name__ == '__main__':
main()
| gpl-2.0 |
brunoflores/tapiriik | tapiriik/services/tcx.py | 9 | 22210 | from lxml import etree
from pytz import UTC
import copy
import dateutil.parser
from datetime import timedelta
from .interchange import WaypointType, Activity, ActivityStatistic, ActivityStatistics, ActivityStatisticUnit, ActivityType, Waypoint, Location, Lap, LapIntensity, LapTriggerMethod
from .devices import DeviceIdentifier, DeviceIdentifierType, Device
class TCXIO:
Namespaces = {
None: "http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2",
"ns2": "http://www.garmin.com/xmlschemas/UserProfile/v2",
"tpx": "http://www.garmin.com/xmlschemas/ActivityExtension/v2",
"ns4": "http://www.garmin.com/xmlschemas/ProfileExtension/v1",
"ns5": "http://www.garmin.com/xmlschemas/ActivityGoals/v1",
"xsi": "http://www.w3.org/2001/XMLSchema-instance"
}
def Parse(tcxData, act=None):
ns = copy.deepcopy(TCXIO.Namespaces)
ns["tcx"] = ns[None]
del ns[None]
act = act if act else Activity()
act.GPS = False
try:
root = etree.XML(tcxData)
except:
root = etree.fromstring(tcxData)
xacts = root.find("tcx:Activities", namespaces=ns)
if xacts is None:
raise ValueError("No activities element in TCX")
xact = xacts.find("tcx:Activity", namespaces=ns)
if xact is None:
raise ValueError("No activity element in TCX")
xauthor = root.find("tcx:Author", namespaces=ns)
if xauthor is not None:
xauthorname = xauthor.find("tcx:Name", namespaces=ns)
if xauthorname is not None:
if xauthorname.text == "tapiriik":
act.OriginatedFromTapiriik = True
if not act.Type or act.Type == ActivityType.Other:
if xact.attrib["Sport"] == "Biking":
act.Type = ActivityType.Cycling
elif xact.attrib["Sport"] == "Running":
act.Type = ActivityType.Running
xnotes = xact.find("tcx:Notes", namespaces=ns)
if xnotes is not None and xnotes.text:
xnotes_lines = xnotes.text.splitlines()
act.Name = xnotes_lines[0]
if len(xnotes_lines) > 1:
act.Notes = '\n'.join(xnotes_lines[1:])
xcreator = xact.find("tcx:Creator", namespaces=ns)
if xcreator is not None and xcreator.attrib["{" + TCXIO.Namespaces["xsi"] + "}type"] == "Device_t":
devId = DeviceIdentifier.FindMatchingIdentifierOfType(DeviceIdentifierType.TCX, {"ProductID": int(xcreator.find("tcx:ProductID", namespaces=ns).text)}) # Who knows if this is unique in the TCX ecosystem? We'll find out!
xver = xcreator.find("tcx:Version", namespaces=ns)
verMaj = None
verMin = None
if xver is not None:
verMaj = int(xver.find("tcx:VersionMajor", namespaces=ns).text)
verMin = int(xver.find("tcx:VersionMinor", namespaces=ns).text)
act.Device = Device(devId, int(xcreator.find("tcx:UnitId", namespaces=ns).text), verMaj=verMaj, verMin=verMin) # ID vs Id: ???
xlaps = xact.findall("tcx:Lap", namespaces=ns)
startTime = None
endTime = None
for xlap in xlaps:
lap = Lap()
act.Laps.append(lap)
lap.StartTime = dateutil.parser.parse(xlap.attrib["StartTime"])
totalTimeEL = xlap.find("tcx:TotalTimeSeconds", namespaces=ns)
if totalTimeEL is None:
raise ValueError("Missing lap TotalTimeSeconds")
lap.Stats.TimerTime = ActivityStatistic(ActivityStatisticUnit.Seconds, float(totalTimeEL.text))
lap.EndTime = lap.StartTime + timedelta(seconds=float(totalTimeEL.text))
distEl = xlap.find("tcx:DistanceMeters", namespaces=ns)
energyEl = xlap.find("tcx:Calories", namespaces=ns)
triggerEl = xlap.find("tcx:TriggerMethod", namespaces=ns)
intensityEl = xlap.find("tcx:Intensity", namespaces=ns)
# Some applications slack off and omit these, despite the fact that they're required in the spec.
# I will, however, require lap distance, because, seriously.
if distEl is None:
raise ValueError("Missing lap DistanceMeters")
lap.Stats.Distance = ActivityStatistic(ActivityStatisticUnit.Meters, float(distEl.text))
if energyEl is not None and energyEl.text:
lap.Stats.Energy = ActivityStatistic(ActivityStatisticUnit.Kilocalories, float(energyEl.text))
if lap.Stats.Energy.Value == 0:
lap.Stats.Energy.Value = None # It's dumb to make this required, but I digress.
if intensityEl is not None:
lap.Intensity = LapIntensity.Active if intensityEl.text == "Active" else LapIntensity.Rest
else:
lap.Intensity = LapIntensity.Active
if triggerEl is not None:
lap.Trigger = ({
"Manual": LapTriggerMethod.Manual,
"Distance": LapTriggerMethod.Distance,
"Location": LapTriggerMethod.PositionMarked,
"Time": LapTriggerMethod.Time,
"HeartRate": LapTriggerMethod.Manual # I guess - no equivalent in FIT
})[triggerEl.text]
else:
lap.Trigger = LapTriggerMethod.Manual # One would presume
maxSpdEl = xlap.find("tcx:MaximumSpeed", namespaces=ns)
if maxSpdEl is not None:
lap.Stats.Speed = ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, max=float(maxSpdEl.text))
avgHREl = xlap.find("tcx:AverageHeartRateBpm", namespaces=ns)
if avgHREl is not None:
lap.Stats.HR = ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, avg=float(avgHREl.find("tcx:Value", namespaces=ns).text))
maxHREl = xlap.find("tcx:MaximumHeartRateBpm", namespaces=ns)
if maxHREl is not None:
lap.Stats.HR.update(ActivityStatistic(ActivityStatisticUnit.BeatsPerMinute, max=float(maxHREl.find("tcx:Value", namespaces=ns).text)))
# WF fills these in with invalid values.
lap.Stats.HR.Max = lap.Stats.HR.Max if lap.Stats.HR.Max and lap.Stats.HR.Max > 10 else None
lap.Stats.HR.Average = lap.Stats.HR.Average if lap.Stats.HR.Average and lap.Stats.HR.Average > 10 else None
cadEl = xlap.find("tcx:Cadence", namespaces=ns)
if cadEl is not None:
lap.Stats.Cadence = ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, avg=float(cadEl.text))
extsEl = xlap.find("tcx:Extensions", namespaces=ns)
if extsEl is not None:
lxEls = extsEl.findall("tpx:LX", namespaces=ns)
for lxEl in lxEls:
avgSpeedEl = lxEl.find("tpx:AvgSpeed", namespaces=ns)
if avgSpeedEl is not None:
lap.Stats.Speed.update(ActivityStatistic(ActivityStatisticUnit.MetersPerSecond, avg=float(avgSpeedEl.text)))
maxBikeCadEl = lxEl.find("tpx:MaxBikeCadence", namespaces=ns)
if maxBikeCadEl is not None:
lap.Stats.Cadence.update(ActivityStatistic(ActivityStatisticUnit.RevolutionsPerMinute, max=float(maxBikeCadEl.text)))
maxPowerEl = lxEl.find("tpx:MaxWatts", namespaces=ns)
if maxPowerEl is not None:
lap.Stats.Power.update(ActivityStatistic(ActivityStatisticUnit.Watts, max=float(maxPowerEl.text)))
avgPowerEl = lxEl.find("tpx:AvgWatts", namespaces=ns)
if avgPowerEl is not None:
lap.Stats.Power.update(ActivityStatistic(ActivityStatisticUnit.Watts, avg=float(avgPowerEl.text)))
maxRunCadEl = lxEl.find("tpx:MaxRunCadence", namespaces=ns)
if maxRunCadEl is not None:
lap.Stats.RunCadence.update(ActivityStatistic(ActivityStatisticUnit.StepsPerMinute, max=float(maxRunCadEl.text)))
avgRunCadEl = lxEl.find("tpx:AvgRunCadence", namespaces=ns)
if avgRunCadEl is not None:
lap.Stats.RunCadence.update(ActivityStatistic(ActivityStatisticUnit.StepsPerMinute, avg=float(avgRunCadEl.text)))
stepsEl = lxEl.find("tpx:Steps", namespaces=ns)
if stepsEl is not None:
lap.Stats.Strides.update(ActivityStatistic(ActivityStatisticUnit.Strides, value=float(stepsEl.text)))
xtrkseg = xlap.find("tcx:Track", namespaces=ns)
if xtrkseg is None:
# Some TCX files have laps with no track - not sure if it's valid or not.
continue
for xtrkpt in xtrkseg.findall("tcx:Trackpoint", namespaces=ns):
wp = Waypoint()
tsEl = xtrkpt.find("tcx:Time", namespaces=ns)
if tsEl is None:
raise ValueError("Trackpoint without timestamp")
wp.Timestamp = dateutil.parser.parse(tsEl.text)
wp.Timestamp.replace(tzinfo=UTC)
if startTime is None or wp.Timestamp < startTime:
startTime = wp.Timestamp
if endTime is None or wp.Timestamp > endTime:
endTime = wp.Timestamp
xpos = xtrkpt.find("tcx:Position", namespaces=ns)
if xpos is not None:
act.GPS = True
wp.Location = Location(float(xpos.find("tcx:LatitudeDegrees", namespaces=ns).text), float(xpos.find("tcx:LongitudeDegrees", namespaces=ns).text), None)
eleEl = xtrkpt.find("tcx:AltitudeMeters", namespaces=ns)
if eleEl is not None:
wp.Location = wp.Location if wp.Location else Location(None, None, None)
wp.Location.Altitude = float(eleEl.text)
distEl = xtrkpt.find("tcx:DistanceMeters", namespaces=ns)
if distEl is not None:
wp.Distance = float(distEl.text)
hrEl = xtrkpt.find("tcx:HeartRateBpm", namespaces=ns)
if hrEl is not None:
wp.HR = float(hrEl.find("tcx:Value", namespaces=ns).text)
cadEl = xtrkpt.find("tcx:Cadence", namespaces=ns)
if cadEl is not None:
wp.Cadence = float(cadEl.text)
extsEl = xtrkpt.find("tcx:Extensions", namespaces=ns)
if extsEl is not None:
tpxEl = extsEl.find("tpx:TPX", namespaces=ns)
if tpxEl is not None:
powerEl = tpxEl.find("tpx:Watts", namespaces=ns)
if powerEl is not None:
wp.Power = float(powerEl.text)
speedEl = tpxEl.find("tpx:Speed", namespaces=ns)
if speedEl is not None:
wp.Speed = float(speedEl.text)
runCadEl = tpxEl.find("tpx:RunCadence", namespaces=ns)
if runCadEl is not None:
wp.RunCadence = float(runCadEl.text)
lap.Waypoints.append(wp)
xtrkpt.clear()
del xtrkpt
if len(lap.Waypoints):
lap.EndTime = lap.Waypoints[-1].Timestamp
act.StartTime = act.Laps[0].StartTime if len(act.Laps) else act.StartTime
act.EndTime = act.Laps[-1].EndTime if len(act.Laps) else act.EndTime
if act.CountTotalWaypoints():
act.Stationary = False
act.GetFlatWaypoints()[0].Type = WaypointType.Start
act.GetFlatWaypoints()[-1].Type = WaypointType.End
else:
act.Stationary = True
if len(act.Laps) == 1:
act.Laps[0].Stats.update(act.Stats) # External source is authorative
act.Stats = act.Laps[0].Stats
else:
sum_stats = ActivityStatistics() # Blank
for lap in act.Laps:
sum_stats.sumWith(lap.Stats)
sum_stats.update(act.Stats)
act.Stats = sum_stats
act.CalculateUID()
return act
def Dump(activity):
root = etree.Element("TrainingCenterDatabase", nsmap=TCXIO.Namespaces)
activities = etree.SubElement(root, "Activities")
act = etree.SubElement(activities, "Activity")
author = etree.SubElement(root, "Author")
author.attrib["{" + TCXIO.Namespaces["xsi"] + "}type"] = "Application_t"
etree.SubElement(author, "Name").text = "tapiriik"
build = etree.SubElement(author, "Build")
version = etree.SubElement(build, "Version")
etree.SubElement(version, "VersionMajor").text = "0"
etree.SubElement(version, "VersionMinor").text = "0"
etree.SubElement(version, "BuildMajor").text = "0"
etree.SubElement(version, "BuildMinor").text = "0"
etree.SubElement(author, "LangID").text = "en"
etree.SubElement(author, "PartNumber").text = "000-00000-00"
dateFormat = "%Y-%m-%dT%H:%M:%S.000Z"
if activity.Type == ActivityType.Cycling:
act.attrib["Sport"] = "Biking"
elif activity.Type == ActivityType.Running:
act.attrib["Sport"] = "Running"
else:
act.attrib["Sport"] = "Other"
etree.SubElement(act, "Id").text = activity.StartTime.astimezone(UTC).strftime(dateFormat)
def _writeStat(parent, elName, value, wrapValue=False, naturalValue=False, default=None):
if value is not None or default is not None:
xstat = etree.SubElement(parent, elName)
if wrapValue:
xstat = etree.SubElement(xstat, "Value")
value = value if value is not None else default
xstat.text = str(value) if not naturalValue else str(int(value))
xlaps = []
for lap in activity.Laps:
xlap = etree.SubElement(act, "Lap")
xlaps.append(xlap)
xlap.attrib["StartTime"] = lap.StartTime.astimezone(UTC).strftime(dateFormat)
_writeStat(xlap, "TotalTimeSeconds", lap.Stats.TimerTime.asUnits(ActivityStatisticUnit.Seconds).Value if lap.Stats.TimerTime.Value else None, default=(lap.EndTime - lap.StartTime).total_seconds())
_writeStat(xlap, "DistanceMeters", lap.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value)
_writeStat(xlap, "MaximumSpeed", lap.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond).Max)
_writeStat(xlap, "Calories", lap.Stats.Energy.asUnits(ActivityStatisticUnit.Kilocalories).Value, default=0, naturalValue=True)
_writeStat(xlap, "AverageHeartRateBpm", lap.Stats.HR.Average, naturalValue=True, wrapValue=True)
_writeStat(xlap, "MaximumHeartRateBpm", lap.Stats.HR.Max, naturalValue=True, wrapValue=True)
etree.SubElement(xlap, "Intensity").text = "Resting" if lap.Intensity == LapIntensity.Rest else "Active"
_writeStat(xlap, "Cadence", lap.Stats.Cadence.Average, naturalValue=True)
etree.SubElement(xlap, "TriggerMethod").text = ({
LapTriggerMethod.Manual: "Manual",
LapTriggerMethod.Distance: "Distance",
LapTriggerMethod.PositionMarked: "Location",
LapTriggerMethod.Time: "Time",
LapTriggerMethod.PositionStart: "Location",
LapTriggerMethod.PositionLap: "Location",
LapTriggerMethod.PositionMarked: "Location",
LapTriggerMethod.SessionEnd: "Manual",
LapTriggerMethod.FitnessEquipment: "Manual"
})[lap.Trigger]
if len([x for x in [lap.Stats.Cadence.Max, lap.Stats.RunCadence.Max, lap.Stats.RunCadence.Average, lap.Stats.Strides.Value, lap.Stats.Power.Max, lap.Stats.Power.Average, lap.Stats.Speed.Average] if x is not None]):
exts = etree.SubElement(xlap, "Extensions")
lapext = etree.SubElement(exts, "LX")
lapext.attrib["xmlns"] = "http://www.garmin.com/xmlschemas/ActivityExtension/v2"
_writeStat(lapext, "MaxBikeCadence", lap.Stats.Cadence.Max, naturalValue=True)
# This dividing-by-two stuff is getting silly
_writeStat(lapext, "MaxRunCadence", lap.Stats.RunCadence.Max if lap.Stats.RunCadence.Max is not None else None, naturalValue=True)
_writeStat(lapext, "AvgRunCadence", lap.Stats.RunCadence.Average if lap.Stats.RunCadence.Average is not None else None, naturalValue=True)
_writeStat(lapext, "Steps", lap.Stats.Strides.Value, naturalValue=True)
_writeStat(lapext, "MaxWatts", lap.Stats.Power.asUnits(ActivityStatisticUnit.Watts).Max, naturalValue=True)
_writeStat(lapext, "AvgWatts", lap.Stats.Power.asUnits(ActivityStatisticUnit.Watts).Average, naturalValue=True)
_writeStat(lapext, "AvgSpeed", lap.Stats.Speed.asUnits(ActivityStatisticUnit.MetersPerSecond).Average)
inPause = False
for lap in activity.Laps:
xlap = xlaps[activity.Laps.index(lap)]
track = None
for wp in lap.Waypoints:
if wp.Type == WaypointType.Pause:
if inPause:
continue # this used to be an exception, but I don't think that was merited
inPause = True
if inPause and wp.Type != WaypointType.Pause:
inPause = False
if track is None: # Defer creating the track until there are points
track = etree.SubElement(xlap, "Track") # TODO - pauses should create new tracks instead of new laps?
trkpt = etree.SubElement(track, "Trackpoint")
if wp.Timestamp.tzinfo is None:
raise ValueError("TCX export requires TZ info")
etree.SubElement(trkpt, "Time").text = wp.Timestamp.astimezone(UTC).strftime(dateFormat)
if wp.Location:
if wp.Location.Latitude is not None and wp.Location.Longitude is not None:
pos = etree.SubElement(trkpt, "Position")
etree.SubElement(pos, "LatitudeDegrees").text = str(wp.Location.Latitude)
etree.SubElement(pos, "LongitudeDegrees").text = str(wp.Location.Longitude)
if wp.Location.Altitude is not None:
etree.SubElement(trkpt, "AltitudeMeters").text = str(wp.Location.Altitude)
if wp.Distance is not None:
etree.SubElement(trkpt, "DistanceMeters").text = str(wp.Distance)
if wp.HR is not None:
xhr = etree.SubElement(trkpt, "HeartRateBpm")
xhr.attrib["{" + TCXIO.Namespaces["xsi"] + "}type"] = "HeartRateInBeatsPerMinute_t"
etree.SubElement(xhr, "Value").text = str(int(wp.HR))
if wp.Cadence is not None:
etree.SubElement(trkpt, "Cadence").text = str(int(wp.Cadence))
if wp.Power is not None or wp.RunCadence is not None or wp.Speed is not None:
exts = etree.SubElement(trkpt, "Extensions")
gpxtpxexts = etree.SubElement(exts, "TPX")
gpxtpxexts.attrib["xmlns"] = "http://www.garmin.com/xmlschemas/ActivityExtension/v2"
if wp.Speed is not None:
etree.SubElement(gpxtpxexts, "Speed").text = str(wp.Speed)
if wp.RunCadence is not None:
etree.SubElement(gpxtpxexts, "RunCadence").text = str(int(wp.RunCadence))
if wp.Power is not None:
etree.SubElement(gpxtpxexts, "Watts").text = str(int(wp.Power))
if track is not None:
exts = xlap.find("Extensions")
if exts is not None:
track.addnext(exts)
if activity.Name is not None and activity.Notes is not None:
etree.SubElement(act, "Notes").text = '\n'.join((activity.Name, activity.Notes))
elif activity.Name is not None:
etree.SubElement(act, "Notes").text = activity.Name
elif activity.Notes is not None:
etree.SubElement(act, "Notes").text = '\n' + activity.Notes
if activity.Device and activity.Device.Identifier:
devId = DeviceIdentifier.FindEquivalentIdentifierOfType(DeviceIdentifierType.TCX, activity.Device.Identifier)
if devId:
xcreator = etree.SubElement(act, "Creator")
xcreator.attrib["{" + TCXIO.Namespaces["xsi"] + "}type"] = "Device_t"
etree.SubElement(xcreator, "Name").text = devId.Name
etree.SubElement(xcreator, "UnitId").text = str(activity.Device.Serial) if activity.Device.Serial else "0"
etree.SubElement(xcreator, "ProductID").text = str(devId.ProductID)
xver = etree.SubElement(xcreator, "Version")
etree.SubElement(xver, "VersionMajor").text = str(activity.Device.VersionMajor) if activity.Device.VersionMajor else "0" # Blegh.
etree.SubElement(xver, "VersionMinor").text = str(activity.Device.VersionMinor) if activity.Device.VersionMinor else "0"
etree.SubElement(xver, "BuildMajor").text = "0"
etree.SubElement(xver, "BuildMinor").text = "0"
return etree.tostring(root, pretty_print=True, xml_declaration=True, encoding="UTF-8").decode("UTF-8")
| apache-2.0 |
j-carpentier/nova | nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py | 35 | 3017 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import floating_ip_pools \
as fipp_v21
from nova.api.openstack.compute.legacy_v2.contrib import floating_ip_pools \
as fipp_v2
from nova import context
from nova import exception
from nova import network
from nova import test
from nova.tests.unit.api.openstack import fakes
def fake_get_floating_ip_pools(self, context):
return ['nova', 'other']
class FloatingIpPoolTestV21(test.NoDBTestCase):
floating_ip_pools = fipp_v21
def setUp(self):
super(FloatingIpPoolTestV21, self).setUp()
self.stubs.Set(network.api.API, "get_floating_ip_pools",
fake_get_floating_ip_pools)
self.context = context.RequestContext('fake', 'fake')
self.controller = self.floating_ip_pools.FloatingIPPoolsController()
self.req = fakes.HTTPRequest.blank('')
def test_translate_floating_ip_pools_view(self):
pools = fake_get_floating_ip_pools(None, self.context)
view = self.floating_ip_pools._translate_floating_ip_pools_view(pools)
self.assertIn('floating_ip_pools', view)
self.assertEqual(view['floating_ip_pools'][0]['name'],
pools[0])
self.assertEqual(view['floating_ip_pools'][1]['name'],
pools[1])
def test_floating_ips_pools_list(self):
res_dict = self.controller.index(self.req)
pools = fake_get_floating_ip_pools(None, self.context)
response = {'floating_ip_pools': [{'name': name} for name in pools]}
self.assertEqual(res_dict, response)
class FloatingIpPoolTestV2(FloatingIpPoolTestV21):
floating_ip_pools = fipp_v2
class FloatingIPPoolsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPPoolsPolicyEnforcementV21, self).setUp()
self.controller = fipp_v21.FloatingIPPoolsController()
self.req = fakes.HTTPRequest.blank('')
def test_change_password_policy_failed(self):
rule_name = "os_compute_api:os-floating-ip-pools"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." %
rule_name, exc.format_message())
| apache-2.0 |
Ivoz/pip | pip/_vendor/requests/packages/charade/sjisprober.py | 1182 | 3734 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import SJISDistributionAnalysis
from .jpcntx import SJISContextAnalysis
from .mbcssm import SJISSMModel
from . import constants
class SJISProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(SJISSMModel)
self._mDistributionAnalyzer = SJISDistributionAnalysis()
self._mContextAnalyzer = SJISContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "SHIFT_JIS"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar[2 - charLen:],
charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i + 1 - charLen:i + 3
- charLen], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| mit |
Aristocles/CouchPotatoServer | libs/gntp/core.py | 92 | 13975 | # Copyright: 2013 Paul Traylor
# These sources are released under the terms of the MIT license: see LICENSE
import hashlib
import re
import time
import gntp.shim
import gntp.errors as errors
__all__ = [
'GNTPRegister',
'GNTPNotice',
'GNTPSubscribe',
'GNTPOK',
'GNTPError',
'parse_gntp',
]
#GNTP/<version> <messagetype> <encryptionAlgorithmID>[:<ivValue>][ <keyHashAlgorithmID>:<keyHash>.<salt>]
GNTP_INFO_LINE = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)' +
' (?P<encryptionAlgorithmID>[A-Z0-9]+(:(?P<ivValue>[A-F0-9]+))?) ?' +
'((?P<keyHashAlgorithmID>[A-Z0-9]+):(?P<keyHash>[A-F0-9]+).(?P<salt>[A-F0-9]+))?\r\n',
re.IGNORECASE
)
GNTP_INFO_LINE_SHORT = re.compile(
'GNTP/(?P<version>\d+\.\d+) (?P<messagetype>REGISTER|NOTIFY|SUBSCRIBE|\-OK|\-ERROR)',
re.IGNORECASE
)
GNTP_HEADER = re.compile('([\w-]+):(.+)')
GNTP_EOL = gntp.shim.b('\r\n')
GNTP_SEP = gntp.shim.b(': ')
class _GNTPBuffer(gntp.shim.StringIO):
"""GNTP Buffer class"""
def writeln(self, value=None):
if value:
self.write(gntp.shim.b(value))
self.write(GNTP_EOL)
def writeheader(self, key, value):
if not isinstance(value, str):
value = str(value)
self.write(gntp.shim.b(key))
self.write(GNTP_SEP)
self.write(gntp.shim.b(value))
self.write(GNTP_EOL)
class _GNTPBase(object):
"""Base initilization
:param string messagetype: GNTP Message type
:param string version: GNTP Protocol version
:param string encription: Encryption protocol
"""
def __init__(self, messagetype=None, version='1.0', encryption=None):
self.info = {
'version': version,
'messagetype': messagetype,
'encryptionAlgorithmID': encryption
}
self.hash_algo = {
'MD5': hashlib.md5,
'SHA1': hashlib.sha1,
'SHA256': hashlib.sha256,
'SHA512': hashlib.sha512,
}
self.headers = {}
self.resources = {}
def __str__(self):
return self.encode()
def _parse_info(self, data):
"""Parse the first line of a GNTP message to get security and other info values
:param string data: GNTP Message
:return dict: Parsed GNTP Info line
"""
match = GNTP_INFO_LINE.match(data)
if not match:
raise errors.ParseError('ERROR_PARSING_INFO_LINE')
info = match.groupdict()
if info['encryptionAlgorithmID'] == 'NONE':
info['encryptionAlgorithmID'] = None
return info
def set_password(self, password, encryptAlgo='MD5'):
"""Set a password for a GNTP Message
:param string password: Null to clear password
:param string encryptAlgo: Supports MD5, SHA1, SHA256, SHA512
"""
if not password:
self.info['encryptionAlgorithmID'] = None
self.info['keyHashAlgorithm'] = None
return
self.password = gntp.shim.b(password)
self.encryptAlgo = encryptAlgo.upper()
if not self.encryptAlgo in self.hash_algo:
raise errors.UnsupportedError('INVALID HASH "%s"' % self.encryptAlgo)
hashfunction = self.hash_algo.get(self.encryptAlgo)
password = password.encode('utf8')
seed = time.ctime().encode('utf8')
salt = hashfunction(seed).hexdigest()
saltHash = hashfunction(seed).digest()
keyBasis = password + saltHash
key = hashfunction(keyBasis).digest()
keyHash = hashfunction(key).hexdigest()
self.info['keyHashAlgorithmID'] = self.encryptAlgo
self.info['keyHash'] = keyHash.upper()
self.info['salt'] = salt.upper()
def _decode_hex(self, value):
"""Helper function to decode hex string to `proper` hex string
:param string value: Human readable hex string
:return string: Hex string
"""
result = ''
for i in range(0, len(value), 2):
tmp = int(value[i:i + 2], 16)
result += chr(tmp)
return result
def _decode_binary(self, rawIdentifier, identifier):
rawIdentifier += '\r\n\r\n'
dataLength = int(identifier['Length'])
pointerStart = self.raw.find(rawIdentifier) + len(rawIdentifier)
pointerEnd = pointerStart + dataLength
data = self.raw[pointerStart:pointerEnd]
if not len(data) == dataLength:
raise errors.ParseError('INVALID_DATA_LENGTH Expected: %s Recieved %s' % (dataLength, len(data)))
return data
def _validate_password(self, password):
"""Validate GNTP Message against stored password"""
self.password = password
if password is None:
raise errors.AuthError('Missing password')
keyHash = self.info.get('keyHash', None)
if keyHash is None and self.password is None:
return True
if keyHash is None:
raise errors.AuthError('Invalid keyHash')
if self.password is None:
raise errors.AuthError('Missing password')
keyHashAlgorithmID = self.info.get('keyHashAlgorithmID','MD5')
password = self.password.encode('utf8')
saltHash = self._decode_hex(self.info['salt'])
keyBasis = password + saltHash
self.key = self.hash_algo[keyHashAlgorithmID](keyBasis).digest()
keyHash = self.hash_algo[keyHashAlgorithmID](self.key).hexdigest()
if not keyHash.upper() == self.info['keyHash'].upper():
raise errors.AuthError('Invalid Hash')
return True
def validate(self):
"""Verify required headers"""
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header)
def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = 'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
def _parse_dict(self, data):
"""Helper function to parse blocks of GNTP headers into a dictionary
:param string data:
:return dict: Dictionary of parsed GNTP Headers
"""
d = {}
for line in data.split('\r\n'):
match = GNTP_HEADER.match(line)
if not match:
continue
key = match.group(1).strip()
val = match.group(2).strip()
d[key] = val
return d
def add_header(self, key, value):
self.headers[key] = value
def add_resource(self, data):
"""Add binary resource
:param string data: Binary Data
"""
data = gntp.shim.b(data)
identifier = hashlib.md5(data).hexdigest()
self.resources[identifier] = data
return 'x-growl-resource://%s' % identifier
def decode(self, data, password=None):
"""Decode GNTP Message
:param string data:
"""
self.password = password
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self.headers = self._parse_dict(parts[0])
def encode(self):
"""Encode a generic GNTP Message
:return string: GNTP Message ready to be sent. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue()
class GNTPRegister(_GNTPBase):
"""Represents a GNTP Registration Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notifications-Count'
]
_requiredNotificationHeaders = ['Notification-Name']
def __init__(self, data=None, password=None):
_GNTPBase.__init__(self, 'REGISTER')
self.notifications = []
if data:
self.decode(data, password)
else:
self.set_password(password)
self.add_header('Application-Name', 'pygntp')
self.add_header('Notifications-Count', 0)
def validate(self):
'''Validate required headers and validate notification headers'''
for header in self._requiredHeaders:
if not self.headers.get(header, False):
raise errors.ParseError('Missing Registration Header: ' + header)
for notice in self.notifications:
for header in self._requiredNotificationHeaders:
if not notice.get(header, False):
raise errors.ParseError('Missing Notification Header: ' + header)
def decode(self, data, password):
"""Decode existing GNTP Registration message
:param string data: Message to decode
"""
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Notification-Name', False):
self.notifications.append(notice)
elif notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('register.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
def add_notification(self, name, enabled=True):
"""Add new Notification to Registration message
:param string name: Notification Name
:param boolean enabled: Enable this notification by default
"""
notice = {}
notice['Notification-Name'] = name
notice['Notification-Enabled'] = enabled
self.notifications.append(notice)
self.add_header('Notifications-Count', len(self.notifications))
def encode(self):
"""Encode a GNTP Registration Message
:return string: Encoded GNTP Registration message. Returned as a byte string
"""
buff = _GNTPBuffer()
buff.writeln(self._format_info())
#Headers
for k, v in self.headers.items():
buff.writeheader(k, v)
buff.writeln()
#Notifications
if len(self.notifications) > 0:
for notice in self.notifications:
for k, v in notice.items():
buff.writeheader(k, v)
buff.writeln()
#Resources
for resource, data in self.resources.items():
buff.writeheader('Identifier', resource)
buff.writeheader('Length', len(data))
buff.writeln()
buff.write(data)
buff.writeln()
buff.writeln()
return buff.getvalue()
class GNTPNotice(_GNTPBase):
"""Represents a GNTP Notification Command
:param string data: (Optional) See decode()
:param string app: (Optional) Set Application-Name
:param string name: (Optional) Set Notification-Name
:param string title: (Optional) Set Notification Title
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Application-Name',
'Notification-Name',
'Notification-Title'
]
def __init__(self, data=None, app=None, name=None, title=None, password=None):
_GNTPBase.__init__(self, 'NOTIFY')
if data:
self.decode(data, password)
else:
self.set_password(password)
if app:
self.add_header('Application-Name', app)
if name:
self.add_header('Notification-Name', name)
if title:
self.add_header('Notification-Title', title)
def decode(self, data, password):
"""Decode existing GNTP Notification message
:param string data: Message to decode.
"""
self.raw = gntp.shim.u(data)
parts = self.raw.split('\r\n\r\n')
self.info = self._parse_info(self.raw)
self._validate_password(password)
self.headers = self._parse_dict(parts[0])
for i, part in enumerate(parts):
if i == 0:
continue # Skip Header
if part.strip() == '':
continue
notice = self._parse_dict(part)
if notice.get('Identifier', False):
notice['Data'] = self._decode_binary(part, notice)
#open('notice.png','wblol').write(notice['Data'])
self.resources[notice.get('Identifier')] = notice
class GNTPSubscribe(_GNTPBase):
"""Represents a GNTP Subscribe Command
:param string data: (Optional) See decode()
:param string password: (Optional) Password to use while encoding/decoding messages
"""
_requiredHeaders = [
'Subscriber-ID',
'Subscriber-Name',
]
def __init__(self, data=None, password=None):
_GNTPBase.__init__(self, 'SUBSCRIBE')
if data:
self.decode(data, password)
else:
self.set_password(password)
class GNTPOK(_GNTPBase):
"""Represents a GNTP OK Response
:param string data: (Optional) See _GNTPResponse.decode()
:param string action: (Optional) Set type of action the OK Response is for
"""
_requiredHeaders = ['Response-Action']
def __init__(self, data=None, action=None):
_GNTPBase.__init__(self, '-OK')
if data:
self.decode(data)
if action:
self.add_header('Response-Action', action)
class GNTPError(_GNTPBase):
"""Represents a GNTP Error response
:param string data: (Optional) See _GNTPResponse.decode()
:param string errorcode: (Optional) Error code
:param string errordesc: (Optional) Error Description
"""
_requiredHeaders = ['Error-Code', 'Error-Description']
def __init__(self, data=None, errorcode=None, errordesc=None):
_GNTPBase.__init__(self, '-ERROR')
if data:
self.decode(data)
if errorcode:
self.add_header('Error-Code', errorcode)
self.add_header('Error-Description', errordesc)
def error(self):
return (self.headers.get('Error-Code', None),
self.headers.get('Error-Description', None))
def parse_gntp(data, password=None):
"""Attempt to parse a message as a GNTP message
:param string data: Message to be parsed
:param string password: Optional password to be used to verify the message
"""
data = gntp.shim.u(data)
match = GNTP_INFO_LINE_SHORT.match(data)
if not match:
raise errors.ParseError('INVALID_GNTP_INFO')
info = match.groupdict()
if info['messagetype'] == 'REGISTER':
return GNTPRegister(data, password=password)
elif info['messagetype'] == 'NOTIFY':
return GNTPNotice(data, password=password)
elif info['messagetype'] == 'SUBSCRIBE':
return GNTPSubscribe(data, password=password)
elif info['messagetype'] == '-OK':
return GNTPOK(data)
elif info['messagetype'] == '-ERROR':
return GNTPError(data)
raise errors.ParseError('INVALID_GNTP_MESSAGE')
| gpl-3.0 |
DirkHoffmann/indico | indico/util/console.py | 3 | 4266 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import fcntl
import re
import struct
import sys
import termios
import time
from datetime import timedelta
import click
from click.types import convert_type
from colorclass import Color
from termcolor import colored
from indico.util.date_time import format_human_timedelta
from indico.util.string import validate_email
def prompt_email(prompt='Email', default=None, confirm=False):
conv = convert_type(None)
def _proc_email(val):
val = conv(val).strip()
if not validate_email(val):
raise click.UsageError('invalid email')
return val
return click.prompt(prompt, default=default, confirmation_prompt=confirm, value_proc=_proc_email)
def prompt_pass(prompt='Password', min_length=8, confirm=True):
while True:
password = click.prompt(prompt, hide_input=True, confirmation_prompt=confirm).strip()
# Empty, just prompt again
if not password:
continue
# Too short, tell the user about the fact
if min_length and len(password) < min_length:
click.echo(f"Password is too short (must be at least {min_length} chars)")
continue
return password
def terminal_size():
h, w, hp, wp = struct.unpack(b'HHHH', fcntl.ioctl(0, termios.TIOCGWINSZ, struct.pack(b'HHHH', 0, 0, 0, 0)))
return w, h
def clear_line():
"""Clear the current line in the terminal."""
print('\r', ' ' * terminal_size()[0], '\r', end='', sep='')
def verbose_iterator(iterable, total, get_id, get_title=None, print_every=10, print_total_time=False):
"""Iterate large iterables verbosely.
:param iterable: An iterable
:param total: The number of items in `iterable`
:param get_id: callable to retrieve the ID of an item
:param get_title: callable to retrieve the title of an item
:param print_every: after which number of items to update the progress
:param print_total_time: whether to print the total time spent at the end
"""
term_width = terminal_size()[0]
start_time = time.time()
fmt = cformat(
'[%{cyan!}{:6}%{reset}/%{cyan}{}%{reset} %{yellow!}{:.3f}%{reset}% %{green!}{}%{reset}] {:>8} %{grey!}{}'
)
end_fmt = cformat(
'[%{cyan!}{:6}%{reset}/%{cyan}{}%{reset} %{yellow!}{:.3f}%{reset}% %{green!}{}%{reset}] '
'Total duration: %{green}{}'
)
def _print_text(text):
print('\r', ' ' * term_width, end='', sep='')
# terminal width + ansi control code length - trailing reset code (4)
print('\r', text[:term_width + len(text.value_colors) - len(text.value_no_colors) - 4], cformat('%{reset}'),
end='', sep='')
sys.stdout.flush()
for i, item in enumerate(iterable, 1):
if i % print_every == 0 or i == total:
remaining_seconds = int((time.time() - start_time) / i * (total - i))
minutes, seconds = divmod(remaining_seconds, 60)
remaining = f'{minutes:02}:{seconds:02}'
id_ = get_id(item)
title = get_title(item).replace('\n', ' ') if get_title else ''
text = fmt.format(i, total, (i / total * 100.0), remaining, id_, title)
_print_text(text)
yield item
if print_total_time:
total_duration = timedelta(seconds=(time.time() - start_time))
_print_text(end_fmt.format(total, total, 100, '00:00', format_human_timedelta(total_duration)))
print()
def _cformat_sub(m):
bg = 'on_{}'.format(m.group('bg')) if m.group('bg') else None
attrs = ['bold'] if m.group('fg_bold') else None
return colored('', m.group('fg'), bg, attrs=attrs)[:-4]
def cformat(string):
"""Replace %{color} and %{color,bgcolor} with ansi colors.
Bold foreground can be achieved by suffixing the color with a '!'.
"""
reset = colored('')
string = string.replace('%{reset}', reset)
string = re.sub(r'%\{(?P<fg>[a-z]+)(?P<fg_bold>!?)(?:,(?P<bg>[a-z]+))?}', _cformat_sub, string)
if not string.endswith(reset):
string += reset
return Color(string)
| gpl-3.0 |
MoKee/android_kernel_samsung_piranha | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
cloud9ers/gurumate | environment/lib/python2.7/site-packages/IPython/utils/growl.py | 3 | 1872 | # encoding: utf-8
"""
Utilities using Growl on OS X for notifications.
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2008-2011 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
class IPythonGrowlError(Exception):
pass
class Notifier(object):
def __init__(self, app_name):
try:
import Growl
except ImportError:
self.g_notifier = None
else:
self.g_notifier = Growl.GrowlNotifier(app_name, ['kernel', 'core'])
self.g_notifier.register()
def _notify(self, title, msg):
if self.g_notifier is not None:
self.g_notifier.notify('core', title, msg)
def notify(self, title, msg):
self._notify(title, msg)
def notify_deferred(self, r, msg):
title = "Deferred Result"
msg = msg + '\n' + repr(r)
self._notify(title, msg)
return r
_notifier = None
def notify(title, msg):
pass
def notify_deferred(r, msg):
return r
def start(app_name):
global _notifier, notify, notify_deferred
if _notifier is not None:
raise IPythonGrowlError("this process is already registered with Growl")
else:
_notifier = Notifier(app_name)
notify = _notifier.notify
notify_deferred = _notifier.notify_deferred
| lgpl-3.0 |
fkolacek/FIT-VUT | bp-revok/python/lib/python2.7/encodings/iso8859_14.py | 593 | 13908 | """ Python Character Mapping Codec iso8859_14 generated from 'MAPPINGS/ISO8859/8859-14.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-14',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u1e02' # 0xA1 -> LATIN CAPITAL LETTER B WITH DOT ABOVE
u'\u1e03' # 0xA2 -> LATIN SMALL LETTER B WITH DOT ABOVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\u010a' # 0xA4 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u010b' # 0xA5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u1e0a' # 0xA6 -> LATIN CAPITAL LETTER D WITH DOT ABOVE
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u1e80' # 0xA8 -> LATIN CAPITAL LETTER W WITH GRAVE
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u1e82' # 0xAA -> LATIN CAPITAL LETTER W WITH ACUTE
u'\u1e0b' # 0xAB -> LATIN SMALL LETTER D WITH DOT ABOVE
u'\u1ef2' # 0xAC -> LATIN CAPITAL LETTER Y WITH GRAVE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0178' # 0xAF -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u1e1e' # 0xB0 -> LATIN CAPITAL LETTER F WITH DOT ABOVE
u'\u1e1f' # 0xB1 -> LATIN SMALL LETTER F WITH DOT ABOVE
u'\u0120' # 0xB2 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\u0121' # 0xB3 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\u1e40' # 0xB4 -> LATIN CAPITAL LETTER M WITH DOT ABOVE
u'\u1e41' # 0xB5 -> LATIN SMALL LETTER M WITH DOT ABOVE
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\u1e56' # 0xB7 -> LATIN CAPITAL LETTER P WITH DOT ABOVE
u'\u1e81' # 0xB8 -> LATIN SMALL LETTER W WITH GRAVE
u'\u1e57' # 0xB9 -> LATIN SMALL LETTER P WITH DOT ABOVE
u'\u1e83' # 0xBA -> LATIN SMALL LETTER W WITH ACUTE
u'\u1e60' # 0xBB -> LATIN CAPITAL LETTER S WITH DOT ABOVE
u'\u1ef3' # 0xBC -> LATIN SMALL LETTER Y WITH GRAVE
u'\u1e84' # 0xBD -> LATIN CAPITAL LETTER W WITH DIAERESIS
u'\u1e85' # 0xBE -> LATIN SMALL LETTER W WITH DIAERESIS
u'\u1e61' # 0xBF -> LATIN SMALL LETTER S WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0174' # 0xD0 -> LATIN CAPITAL LETTER W WITH CIRCUMFLEX
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u1e6a' # 0xD7 -> LATIN CAPITAL LETTER T WITH DOT ABOVE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0176' # 0xDE -> LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0175' # 0xF0 -> LATIN SMALL LETTER W WITH CIRCUMFLEX
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u1e6b' # 0xF7 -> LATIN SMALL LETTER T WITH DOT ABOVE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0177' # 0xFE -> LATIN SMALL LETTER Y WITH CIRCUMFLEX
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| apache-2.0 |
adamgreen/mbed | tools/host_tests/host_tests_plugins/host_test_plugins.py | 92 | 4881 | """
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from os import access, F_OK
from sys import stdout
from time import sleep
from subprocess import call
class HostTestPluginBase:
""" Base class for all plug-ins used with host tests.
"""
###########################################################################
# Interface:
###########################################################################
###########################################################################
# Interface attributes defining plugin name, type etc.
###########################################################################
name = "HostTestPluginBase" # Plugin name, can be plugin class name
type = "BasePlugin" # Plugin type: ResetMethod, Copymethod etc.
capabilities = [] # Capabilities names: what plugin can achieve
# (e.g. reset using some external command line tool)
stable = False # Determine if plugin is stable and can be used
###########################################################################
# Interface methods
###########################################################################
def setup(self, *args, **kwargs):
""" Configure plugin, this function should be called before plugin execute() method is used.
"""
return False
def execute(self, capabilitity, *args, **kwargs):
""" Executes capability by name.
Each capability e.g. may directly just call some command line
program or execute building pythonic function
"""
return False
###########################################################################
# Interface helper methods - overload only if you need to have custom behaviour
###########################################################################
def print_plugin_error(self, text):
""" Function prints error in console and exits always with False
"""
print "Plugin error: %s::%s: %s"% (self.name, self.type, text)
return False
def print_plugin_info(self, text, NL=True):
""" Function prints notification in console and exits always with True
"""
if NL:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text)
else:
print "Plugin info: %s::%s: %s"% (self.name, self.type, text),
return True
def print_plugin_char(self, char):
""" Function prints char on stdout
"""
stdout.write(char)
stdout.flush()
return True
def check_mount_point_ready(self, destination_disk, init_delay=0.2, loop_delay=0.25):
""" Checks if destination_disk is ready and can be accessed by e.g. copy commands
@init_delay - Initial delay time before first access check
@loop_delay - pooling delay for access check
"""
if not access(destination_disk, F_OK):
self.print_plugin_info("Waiting for mount point '%s' to be ready..."% destination_disk, NL=False)
sleep(init_delay)
while not access(destination_disk, F_OK):
sleep(loop_delay)
self.print_plugin_char('.')
def check_parameters(self, capabilitity, *args, **kwargs):
""" This function should be ran each time we call execute()
to check if none of the required parameters is missing.
"""
missing_parameters = []
for parameter in self.required_parameters:
if parameter not in kwargs:
missing_parameters.append(parameter)
if len(missing_parameters) > 0:
self.print_plugin_error("execute parameter(s) '%s' missing!"% (', '.join(parameter)))
return False
return True
def run_command(self, cmd, shell=True):
""" Runs command from command line.
"""
result = True
ret = 0
try:
ret = call(cmd, shell=shell)
if ret:
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
return False
except Exception as e:
result = False
self.print_plugin_error("[ret=%d] Command: %s"% (int(ret), cmd))
self.print_plugin_error(str(e))
return result
| apache-2.0 |
vitan/hue | desktop/core/ext-py/tablib-develop/tablib/packages/odf/dr3d.py | 99 | 1445 | # -*- coding: utf-8 -*-
# Copyright (C) 2006-2007 Søren Roug, European Environment Agency
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
#
from namespaces import DR3DNS
from element import Element
from draw import StyleRefElement
# Autogenerated
def Cube(**args):
return StyleRefElement(qname = (DR3DNS,'cube'), **args)
def Extrude(**args):
return StyleRefElement(qname = (DR3DNS,'extrude'), **args)
def Light(Element):
return StyleRefElement(qname = (DR3DNS,'light'), **args)
def Rotate(**args):
return StyleRefElement(qname = (DR3DNS,'rotate'), **args)
def Scene(**args):
return StyleRefElement(qname = (DR3DNS,'scene'), **args)
def Sphere(**args):
return StyleRefElement(qname = (DR3DNS,'sphere'), **args)
| apache-2.0 |
kubernetes-client/python | kubernetes/client/models/v1_config_map_node_config_source.py | 1 | 8452 | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.18
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ConfigMapNodeConfigSource(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'kubelet_config_key': 'str',
'name': 'str',
'namespace': 'str',
'resource_version': 'str',
'uid': 'str'
}
attribute_map = {
'kubelet_config_key': 'kubeletConfigKey',
'name': 'name',
'namespace': 'namespace',
'resource_version': 'resourceVersion',
'uid': 'uid'
}
def __init__(self, kubelet_config_key=None, name=None, namespace=None, resource_version=None, uid=None, local_vars_configuration=None): # noqa: E501
"""V1ConfigMapNodeConfigSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._kubelet_config_key = None
self._name = None
self._namespace = None
self._resource_version = None
self._uid = None
self.discriminator = None
self.kubelet_config_key = kubelet_config_key
self.name = name
self.namespace = namespace
if resource_version is not None:
self.resource_version = resource_version
if uid is not None:
self.uid = uid
@property
def kubelet_config_key(self):
"""Gets the kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. # noqa: E501
:return: The kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._kubelet_config_key
@kubelet_config_key.setter
def kubelet_config_key(self, kubelet_config_key):
"""Sets the kubelet_config_key of this V1ConfigMapNodeConfigSource.
KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases. # noqa: E501
:param kubelet_config_key: The kubelet_config_key of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and kubelet_config_key is None: # noqa: E501
raise ValueError("Invalid value for `kubelet_config_key`, must not be `None`") # noqa: E501
self._kubelet_config_key = kubelet_config_key
@property
def name(self):
"""Gets the name of this V1ConfigMapNodeConfigSource. # noqa: E501
Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. # noqa: E501
:return: The name of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this V1ConfigMapNodeConfigSource.
Name is the metadata.name of the referenced ConfigMap. This field is required in all cases. # noqa: E501
:param name: The name of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and name is None: # noqa: E501
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def namespace(self):
"""Gets the namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. # noqa: E501
:return: The namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._namespace
@namespace.setter
def namespace(self, namespace):
"""Sets the namespace of this V1ConfigMapNodeConfigSource.
Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases. # noqa: E501
:param namespace: The namespace of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and namespace is None: # noqa: E501
raise ValueError("Invalid value for `namespace`, must not be `None`") # noqa: E501
self._namespace = namespace
@property
def resource_version(self):
"""Gets the resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
:return: The resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._resource_version
@resource_version.setter
def resource_version(self, resource_version):
"""Sets the resource_version of this V1ConfigMapNodeConfigSource.
ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
:param resource_version: The resource_version of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
self._resource_version = resource_version
@property
def uid(self):
"""Gets the uid of this V1ConfigMapNodeConfigSource. # noqa: E501
UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
:return: The uid of this V1ConfigMapNodeConfigSource. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this V1ConfigMapNodeConfigSource.
UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status. # noqa: E501
:param uid: The uid of this V1ConfigMapNodeConfigSource. # noqa: E501
:type: str
"""
self._uid = uid
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ConfigMapNodeConfigSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ConfigMapNodeConfigSource):
return True
return self.to_dict() != other.to_dict()
| apache-2.0 |
sndnvaps/git_repo | command.py | 18 | 3745 | #
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import optparse
import sys
import manifest_loader
from error import NoSuchProjectError
class Command(object):
"""Base class for any command line action in repo.
"""
common = False
_optparse = None
def WantPager(self, opt):
return False
@property
def OptionParser(self):
if self._optparse is None:
try:
me = 'repo %s' % self.NAME
usage = self.helpUsage.strip().replace('%prog', me)
except AttributeError:
usage = 'repo %s' % self.NAME
self._optparse = optparse.OptionParser(usage = usage)
self._Options(self._optparse)
return self._optparse
def _Options(self, p):
"""Initialize the option parser.
"""
def Usage(self):
"""Display usage and terminate.
"""
self.OptionParser.print_usage()
sys.exit(1)
def Execute(self, opt, args):
"""Perform the action, after option parsing is complete.
"""
raise NotImplementedError
@property
def manifest(self):
return self.GetManifest()
def GetManifest(self, reparse=False, type=None):
return manifest_loader.GetManifest(self.repodir,
reparse=reparse,
type=type)
def GetProjects(self, args, missing_ok=False):
"""A list of projects that match the arguments.
"""
all = self.manifest.projects
mp = self.manifest.manifestProject
if mp.relpath == '.':
all = dict(all)
all[mp.name] = mp
result = []
if not args:
for project in all.values():
if missing_ok or project.Exists:
result.append(project)
else:
by_path = None
for arg in args:
project = all.get(arg)
if not project:
path = os.path.abspath(arg).replace('\\', '/')
if not by_path:
by_path = dict()
for p in all.values():
by_path[p.worktree] = p
try:
project = by_path[path]
except KeyError:
oldpath = None
while path \
and path != oldpath \
and path != self.manifest.topdir:
try:
project = by_path[path]
break
except KeyError:
oldpath = path
path = os.path.dirname(path)
if not project:
raise NoSuchProjectError(arg)
if not missing_ok and not project.Exists:
raise NoSuchProjectError(arg)
result.append(project)
def _getpath(x):
return x.relpath
result.sort(key=_getpath)
return result
class InteractiveCommand(Command):
"""Command which requires user interaction on the tty and
must not run within a pager, even if the user asks to.
"""
def WantPager(self, opt):
return False
class PagedCommand(Command):
"""Command which defaults to output in a pager, as its
display tends to be larger than one screen full.
"""
def WantPager(self, opt):
return True
class MirrorSafeCommand(object):
"""Command permits itself to run within a mirror,
and does not require a working directory.
"""
| apache-2.0 |
RadioFreeAsia/RDacity | lib-src/lv2/lilv/waflib/TaskGen.py | 177 | 11938 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import copy,re,os
from waflib import Task,Utils,Logs,Errors,ConfigSet,Node
feats=Utils.defaultdict(set)
class task_gen(object):
mappings={}
prec=Utils.defaultdict(list)
def __init__(self,*k,**kw):
self.source=''
self.target=''
self.meths=[]
self.prec=Utils.defaultdict(list)
self.mappings={}
self.features=[]
self.tasks=[]
if not'bld'in kw:
self.env=ConfigSet.ConfigSet()
self.idx=0
self.path=None
else:
self.bld=kw['bld']
self.env=self.bld.env.derive()
self.path=self.bld.path
try:
self.idx=self.bld.idx[id(self.path)]=self.bld.idx.get(id(self.path),0)+1
except AttributeError:
self.bld.idx={}
self.idx=self.bld.idx[id(self.path)]=1
for key,val in kw.items():
setattr(self,key,val)
def __str__(self):
return"<task_gen %r declared in %s>"%(self.name,self.path.abspath())
def __repr__(self):
lst=[]
for x in self.__dict__.keys():
if x not in['env','bld','compiled_tasks','tasks']:
lst.append("%s=%s"%(x,repr(getattr(self,x))))
return"bld(%s) in %s"%(", ".join(lst),self.path.abspath())
def get_name(self):
try:
return self._name
except AttributeError:
if isinstance(self.target,list):
lst=[str(x)for x in self.target]
name=self._name=','.join(lst)
else:
name=self._name=str(self.target)
return name
def set_name(self,name):
self._name=name
name=property(get_name,set_name)
def to_list(self,val):
if isinstance(val,str):return val.split()
else:return val
def post(self):
if getattr(self,'posted',None):
return False
self.posted=True
keys=set(self.meths)
self.features=Utils.to_list(self.features)
for x in self.features+['*']:
st=feats[x]
if not st:
if not x in Task.classes:
Logs.warn('feature %r does not exist - bind at least one method to it'%x)
keys.update(list(st))
prec={}
prec_tbl=self.prec or task_gen.prec
for x in prec_tbl:
if x in keys:
prec[x]=prec_tbl[x]
tmp=[]
for a in keys:
for x in prec.values():
if a in x:break
else:
tmp.append(a)
tmp.sort()
out=[]
while tmp:
e=tmp.pop()
if e in keys:out.append(e)
try:
nlst=prec[e]
except KeyError:
pass
else:
del prec[e]
for x in nlst:
for y in prec:
if x in prec[y]:
break
else:
tmp.append(x)
if prec:
raise Errors.WafError('Cycle detected in the method execution %r'%prec)
out.reverse()
self.meths=out
Logs.debug('task_gen: posting %s %d'%(self,id(self)))
for x in out:
try:
v=getattr(self,x)
except AttributeError:
raise Errors.WafError('%r is not a valid task generator method'%x)
Logs.debug('task_gen: -> %s (%d)'%(x,id(self)))
v()
Logs.debug('task_gen: posted %s'%self.name)
return True
def get_hook(self,node):
name=node.name
for k in self.mappings:
if name.endswith(k):
return self.mappings[k]
for k in task_gen.mappings:
if name.endswith(k):
return task_gen.mappings[k]
raise Errors.WafError("File %r has no mapping in %r (did you forget to load a waf tool?)"%(node,task_gen.mappings.keys()))
def create_task(self,name,src=None,tgt=None):
task=Task.classes[name](env=self.env.derive(),generator=self)
if src:
task.set_inputs(src)
if tgt:
task.set_outputs(tgt)
self.tasks.append(task)
return task
def clone(self,env):
newobj=self.bld()
for x in self.__dict__:
if x in['env','bld']:
continue
elif x in['path','features']:
setattr(newobj,x,getattr(self,x))
else:
setattr(newobj,x,copy.copy(getattr(self,x)))
newobj.posted=False
if isinstance(env,str):
newobj.env=self.bld.all_envs[env].derive()
else:
newobj.env=env.derive()
return newobj
def declare_chain(name='',rule=None,reentrant=None,color='BLUE',ext_in=[],ext_out=[],before=[],after=[],decider=None,scan=None,install_path=None,shell=False):
ext_in=Utils.to_list(ext_in)
ext_out=Utils.to_list(ext_out)
if not name:
name=rule
cls=Task.task_factory(name,rule,color=color,ext_in=ext_in,ext_out=ext_out,before=before,after=after,scan=scan,shell=shell)
def x_file(self,node):
ext=decider and decider(self,node)or cls.ext_out
if ext_in:
_ext_in=ext_in[0]
tsk=self.create_task(name,node)
cnt=0
keys=list(self.mappings.keys())+list(self.__class__.mappings.keys())
for x in ext:
k=node.change_ext(x,ext_in=_ext_in)
tsk.outputs.append(k)
if reentrant!=None:
if cnt<int(reentrant):
self.source.append(k)
else:
for y in keys:
if k.name.endswith(y):
self.source.append(k)
break
cnt+=1
if install_path:
self.bld.install_files(install_path,tsk.outputs)
return tsk
for x in cls.ext_in:
task_gen.mappings[x]=x_file
return x_file
def taskgen_method(func):
setattr(task_gen,func.__name__,func)
return func
def feature(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for name in k:
feats[name].update([func.__name__])
return func
return deco
def before_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not func.__name__ in task_gen.prec[fun_name]:
task_gen.prec[fun_name].append(func.__name__)
return func
return deco
before=before_method
def after_method(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for fun_name in k:
if not fun_name in task_gen.prec[func.__name__]:
task_gen.prec[func.__name__].append(fun_name)
return func
return deco
after=after_method
def extension(*k):
def deco(func):
setattr(task_gen,func.__name__,func)
for x in k:
task_gen.mappings[x]=func
return func
return deco
@taskgen_method
def to_nodes(self,lst,path=None):
tmp=[]
path=path or self.path
find=path.find_resource
if isinstance(lst,self.path.__class__):
lst=[lst]
for x in Utils.to_list(lst):
if isinstance(x,str):
node=find(x)
else:
node=x
if not node:
raise Errors.WafError("source not found: %r in %r"%(x,self))
tmp.append(node)
return tmp
@feature('*')
def process_source(self):
self.source=self.to_nodes(getattr(self,'source',[]))
for node in self.source:
self.get_hook(node)(self,node)
@feature('*')
@before_method('process_source')
def process_rule(self):
if not getattr(self,'rule',None):
return
name=str(getattr(self,'name',None)or self.target or getattr(self.rule,'__name__',self.rule))
try:
cache=self.bld.cache_rule_attr
except AttributeError:
cache=self.bld.cache_rule_attr={}
cls=None
if getattr(self,'cache_rule','True'):
try:
cls=cache[(name,self.rule)]
except KeyError:
pass
if not cls:
cls=Task.task_factory(name,self.rule,getattr(self,'vars',[]),shell=getattr(self,'shell',True),color=getattr(self,'color','BLUE'),scan=getattr(self,'scan',None))
if getattr(self,'scan',None):
cls.scan=self.scan
elif getattr(self,'deps',None):
def scan(self):
nodes=[]
for x in self.generator.to_list(getattr(self.generator,'deps',None)):
node=self.generator.path.find_resource(x)
if not node:
self.generator.bld.fatal('Could not find %r (was it declared?)'%x)
nodes.append(node)
return[nodes,[]]
cls.scan=scan
if getattr(self,'update_outputs',None):
Task.update_outputs(cls)
if getattr(self,'always',None):
Task.always_run(cls)
for x in['after','before','ext_in','ext_out']:
setattr(cls,x,getattr(self,x,[]))
if getattr(self,'cache_rule','True'):
cache[(name,self.rule)]=cls
tsk=self.create_task(name)
if getattr(self,'target',None):
if isinstance(self.target,str):
self.target=self.target.split()
if not isinstance(self.target,list):
self.target=[self.target]
for x in self.target:
if isinstance(x,str):
tsk.outputs.append(self.path.find_or_declare(x))
else:
x.parent.mkdir()
tsk.outputs.append(x)
if getattr(self,'install_path',None):
self.bld.install_files(self.install_path,tsk.outputs)
if getattr(self,'source',None):
tsk.inputs=self.to_nodes(self.source)
self.source=[]
if getattr(self,'cwd',None):
tsk.cwd=self.cwd
@feature('seq')
def sequence_order(self):
if self.meths and self.meths[-1]!='sequence_order':
self.meths.append('sequence_order')
return
if getattr(self,'seq_start',None):
return
if getattr(self.bld,'prev',None):
self.bld.prev.post()
for x in self.bld.prev.tasks:
for y in self.tasks:
y.set_run_after(x)
self.bld.prev=self
re_m4=re.compile('@(\w+)@',re.M)
class subst_pc(Task.Task):
def run(self):
if getattr(self.generator,'is_copy',None):
self.outputs[0].write(self.inputs[0].read('rb'),'wb')
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
return None
if getattr(self.generator,'fun',None):
return self.generator.fun(self)
code=self.inputs[0].read(encoding=getattr(self.generator,'encoding','ISO8859-1'))
if getattr(self.generator,'subst_fun',None):
code=self.generator.subst_fun(self,code)
if code is not None:
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
return
code=code.replace('%','%%')
lst=[]
def repl(match):
g=match.group
if g(1):
lst.append(g(1))
return"%%(%s)s"%g(1)
return''
global re_m4
code=getattr(self.generator,'re_m4',re_m4).sub(repl,code)
try:
d=self.generator.dct
except AttributeError:
d={}
for x in lst:
tmp=getattr(self.generator,x,'')or self.env.get_flat(x)or self.env.get_flat(x.upper())
d[x]=str(tmp)
code=code%d
self.outputs[0].write(code,encoding=getattr(self.generator,'encoding','ISO8859-1'))
self.generator.bld.raw_deps[self.uid()]=self.dep_vars=lst
try:delattr(self,'cache_sig')
except AttributeError:pass
if getattr(self.generator,'chmod',None):
os.chmod(self.outputs[0].abspath(),self.generator.chmod)
def sig_vars(self):
bld=self.generator.bld
env=self.env
upd=self.m.update
if getattr(self.generator,'fun',None):
upd(Utils.h_fun(self.generator.fun))
if getattr(self.generator,'subst_fun',None):
upd(Utils.h_fun(self.generator.subst_fun))
vars=self.generator.bld.raw_deps.get(self.uid(),[])
act_sig=bld.hash_env_vars(env,vars)
upd(act_sig)
lst=[getattr(self.generator,x,'')for x in vars]
upd(Utils.h_list(lst))
return self.m.digest()
@extension('.pc.in')
def add_pcfile(self,node):
tsk=self.create_task('subst_pc',node,node.change_ext('.pc','.pc.in'))
self.bld.install_files(getattr(self,'install_path','${LIBDIR}/pkgconfig/'),tsk.outputs)
class subst(subst_pc):
pass
@feature('subst')
@before_method('process_source','process_rule')
def process_subst(self):
src=Utils.to_list(getattr(self,'source',[]))
if isinstance(src,Node.Node):
src=[src]
tgt=Utils.to_list(getattr(self,'target',[]))
if isinstance(tgt,Node.Node):
tgt=[tgt]
if len(src)!=len(tgt):
raise Errors.WafError('invalid number of source/target for %r'%self)
for x,y in zip(src,tgt):
if not x or not y:
raise Errors.WafError('null source or target for %r'%self)
a,b=None,None
if isinstance(x,str)and isinstance(y,str)and x==y:
a=self.path.find_node(x)
b=self.path.get_bld().make_node(y)
if not os.path.isfile(b.abspath()):
b.sig=None
b.parent.mkdir()
else:
if isinstance(x,str):
a=self.path.find_resource(x)
elif isinstance(x,Node.Node):
a=x
if isinstance(y,str):
b=self.path.find_or_declare(y)
elif isinstance(y,Node.Node):
b=y
if not a:
raise Errors.WafError('cound not find %r for %r'%(x,self))
has_constraints=False
tsk=self.create_task('subst',a,b)
for k in('after','before','ext_in','ext_out'):
val=getattr(self,k,None)
if val:
has_constraints=True
setattr(tsk,k,val)
if not has_constraints and b.name.endswith('.h'):
tsk.before=[k for k in('c','cxx')if k in Task.classes]
inst_to=getattr(self,'install_path',None)
if inst_to:
self.bld.install_files(inst_to,b,chmod=getattr(self,'chmod',Utils.O644))
self.source=[]
| gpl-2.0 |
Ctrip-DI/Hue-Ctrip-DI | sparksql/setup.py | 1 | 1746 | # Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
import os
def expand_package_data(src_dirs, strip=""):
ret = []
for src_dir in src_dirs:
for path, dnames, fnames in os.walk(src_dir):
for fname in fnames:
ret.append(os.path.join(path, fname).replace(strip, ""))
return ret
os.chdir(os.path.dirname(os.path.abspath(__file__)))
setup(
name = "sparksql",
version = "0.1",
url = 'TODO',
description = 'TODO',
author = 'TODO',
packages = find_packages('src'),
package_dir = {'': 'src'},
install_requires = ['setuptools', 'desktop'],
entry_points = { 'desktop.sdk.application': 'sparksql=sparksql' },
zip_safe = False,
package_data = {
# Include static resources. Package_data doesn't
# deal well with directory globs, so we enumerate
# the files manually.
'sparksql': expand_package_data(
["src/sparksql/templates", "src/sparksql/static"],
"src/sparksql/")
}
)
| mit |
mltsp/mltsp | cesium/features/cadence_features.py | 5 | 3420 | import numpy as np
import scipy.stats as stats
__all__ = ['double_to_single_step', 'cad_prob', 'delta_t_hist',
'normalize_hist', 'find_sorted_peaks', 'peak_ratio', 'peak_bin']
def double_to_single_step(cads):
"""Ratios (t[i+2] - t[i]) / (t[i+1] - t[i])."""
cads = np.asarray(cads)
return (cads[2:] + cads[:-2]) / (cads[1:-1] - cads[:-2])
def cad_prob(cads, time):
"""Given the observed distribution of time lags `cads`, compute the probability
that the next observation occurs within `time` minutes of an arbitrary epoch.
"""
return stats.percentileofscore(cads, float(time) / (24.0 * 60.0)) / 100.0
def delta_t_hist(t, nbins=50, conv_oversample=50):
"""Build histogram of all possible |t_i - t_j|'s.
For efficiency, we construct the histogram via a convolution of the PDF
rather than by actually computing all the differences. For better accuracy
we use a factor `conv_oversample` more bins when performing the convolution
and then aggregate the result to have `nbins` total values.
"""
f, x = np.histogram(t, bins=conv_oversample * nbins)
g = np.convolve(f, f[::-1])[len(f) - 1:] # Discard negative domain
g[0] -= len(t) # First bin is double-counted because of i=j terms
hist = g.reshape((-1, conv_oversample)).sum(axis=1) # Combine bins
return hist
def normalize_hist(hist, total_time):
"""Normalize histogram such that integral from t_min to t_max equals 1.
cf. np.histogram(..., density=True).
"""
return hist / (total_time * np.mean(hist))
def find_sorted_peaks(x):
"""Find peaks, i.e. local maxima, of an array. Interior points are peaks if
they are greater than both their neighbors, and edge points are peaks if
they are greater than their only neighbor. In the case of ties, we
(arbitrarily) choose the first index in the sequence of equal values as the
peak.
Returns a list of tuples (i, x[i]) of peak indices i and values x[i],
sorted in decreasing order by peak value.
"""
peak_inds = []
nbins = len(x)
for i in range(nbins):
if i == 0 or x[i] > x[i - 1]: # Increasing from left
if i == nbins - 1 or x[i] > x[i + 1]: # Increasing from right
peak_inds.append(i)
elif x[i] == x[i + 1]: # Tied; check the next non-equal value
for j in range(i + 1, nbins):
if x[j] != x[i]:
if x[j] < x[i]:
peak_inds.append(i)
break
if j == nbins - 1 and x[i] == x[j]: # Reached the end
peak_inds.append(i)
sorted_peak_inds = sorted(peak_inds, key=lambda i: x[i], reverse=True)
return list(zip(sorted_peak_inds, x[sorted_peak_inds]))
def peak_ratio(peaks, i, j):
"""Compute the ratio of the values of the ith and jth largest peaks. Peaks is a list
of tuples (i, x[i]) of peak indices i and values x[i], sorted in decreasing order
by peak value."""
if len(peaks) > i and len(peaks) > j:
return peaks[i][1] / peaks[j][1]
else:
return np.nan
def peak_bin(peaks, i):
"""Return the (bin) index of the ith largest peak. Peaks is a list of tuples (i, x[i])
of peak indices i and values x[i], sorted in decreasing order by peak value."""
if len(peaks) > i:
return peaks[i][0]
else:
return np.nan
| bsd-3-clause |
lisa-lab/pylearn2 | pylearn2/models/gsn.py | 44 | 36324 | """
Generative Stochastic Networks
This is described in:
- "Generalized Denoising Auto-Encoders as Generative Models" Bengio, Yao, Alain,
Vincent. arXiv:1305.6663
- "Deep Generative Stochastic Networks Trainable by Backprop" Bengio,
Thibodeau-Laufer. arXiv:1306.1091
There is an example of training both unsupervised and supervised GSNs on MNIST
in pylearn2/scripts/gsn_example.py
"""
__authors__ = "Eric Martin"
__copyright__ = "Copyright 2013, Universite de Montreal"
__license__ = "3-clause BSD"
import copy
import functools
import warnings
import numpy as np
from theano.compat.six.moves import xrange
import theano
T = theano.tensor
from pylearn2.blocks import StackedBlocks
from pylearn2.expr.activations import identity
from pylearn2.models.autoencoder import Autoencoder
from pylearn2.models.model import Model
from pylearn2.utils import safe_zip
# Enforce correct restructured text list format.
# Be sure to re-run docgen.py and make sure there are no warnings if you
# modify the module-level docstring.
assert """:
- """ in __doc__
class GSN(StackedBlocks, Model):
"""
.. todo::
WRITEME
Parameters
----------
autoencoders : list
A list of autoencoder objects. As of now, only the functionality
from the base Autoencoder class is used.
preact_cors : list
A list of length len(autoencoders) + 1 where each element is a
callable (which includes Corruptor objects). The callable at
index i is called before activating the ith layer. Name stands
for "preactivation corruptors".
postact_cors : list
A list of length len(autoencoders) + 1 where each element is a
callable (which includes Corruptor objects). The callable at
index i is called directly after activating the ith layer. Name
stands for "postactivation corruptors". The valid values for this
parameter are the same as that for preact_cors.
layer_samplers: list
Describes how to sample from each layer. Sampling occurs directly
before the post activation corruption is applied. Valid values
for this argument are of the same form as valid parameters for
preact_cor and postact_cor (and if an element in the list is
None, no sampling will be applied at that layer). Note: as of
right now, we've only experimented with sampling at the visible
layer.
Notes
-----
Most of the time it will be much easier to construct a GSN using
GSN.new rather than GSN.__init__. This method exists to make the GSN
class very easy to modify.
The activation function for the visible layer is the "act_dec" function
on the first autoencoder, and the activation function for the i_th
hidden layer is the "act_enc" function on the (i - 1)th autoencoder.
"""
def __init__(self, autoencoders, preact_cors=None, postact_cors=None,
layer_samplers=None):
super(GSN, self).__init__(autoencoders)
# only for convenience
self.aes = self._layers
# easy way to turn off corruption (True => corrupt, False => don't)
self._corrupt_switch = True
# easy way to turn off sampling
self._sample_switch = True
# easy way to not use bias (True => use bias, False => don't)
self._bias_switch = True
# check that autoencoders are the correct sizes by looking at previous
# layer. We can't do this for the first ae, so we skip it.
for i in xrange(1, len(self.aes)):
assert (self.aes[i].weights.get_value().shape[0] ==
self.aes[i - 1].nhid)
# do some type checking and convert None's to identity function
def _make_callable_list(previous):
"""
.. todo::
WRITEME
"""
if len(previous) != self.nlayers:
raise ValueError("Need same number of corruptors/samplers as layers")
if not all(map(lambda x: callable(x) or x is None, previous)):
raise ValueError("All elements must either be None or be a callable")
return map(lambda x: identity if x is None else x, previous)
self._preact_cors = _make_callable_list(preact_cors)
self._postact_cors = _make_callable_list(postact_cors)
self._layer_samplers = _make_callable_list(layer_samplers)
@staticmethod
def _make_aes(layer_sizes, activation_funcs, tied=True):
"""
Creates the Autoencoder objects needed by the GSN.
Parameters
----------
layer_sizes : WRITEME
activation_funcs : WRITEME
tied : WRITEME
"""
aes = []
assert len(activation_funcs) == len(layer_sizes)
for i in xrange(len(layer_sizes) - 1):
# activation for visible layer is aes[0].act_dec
act_enc = activation_funcs[i + 1]
act_dec = act_enc if i != 0 else activation_funcs[0]
aes.append(
Autoencoder(layer_sizes[i], layer_sizes[i + 1],
act_enc, act_dec, tied_weights=tied)
)
return aes
@classmethod
def new(cls,
layer_sizes,
activation_funcs,
pre_corruptors,
post_corruptors,
layer_samplers,
tied=True):
"""
An easy (and recommended) way to initialize a GSN.
Parameters
----------
layer_sizes : list
A list of integers. The i_th element in the list is the size of
the i_th layer of the network, and the network will have
len(layer_sizes) layers.
activation_funcs : list
activation_funcs must be a list of the same length as layer_sizes
where the i_th element is the activation function for the i_th
layer. Each component of the list must refer to an activation
function in such a way that the Autoencoder class recognizes the
function. Valid values include a callable (which takes a symbolic
tensor), a string that refers to a Theano activation function, or
None (which gives the identity function).
preact_corruptors : list
preact_corruptors follows exactly the same format as the
activations_func argument.
postact_corruptors : list
postact_corruptors follows exactly the same format as the
activations_func argument.
layer_samplers : list
layer_samplers follows exactly the same format as the
activations_func argument.
tied : bool
Indicates whether the network should use tied weights.
Notes
-----
The GSN classes applies functions in the following order:
- pre-activation corruption
- activation
- clamping applied
- sampling
- post-activation corruption
All setting and returning of values occurs after applying the
activation function (or clamping if clamping is used) but before
applying sampling.
"""
args = [layer_sizes, pre_corruptors, post_corruptors, layer_samplers]
if not all(isinstance(arg, list) for arg in args):
raise TypeError("All arguments except for tied must be lists")
if not all(len(arg) == len(args[0]) for arg in args):
lengths = map(len, args)
raise ValueError("All list arguments must be of the same length. " +
"Current lengths are %s" % lengths)
aes = cls._make_aes(layer_sizes, activation_funcs, tied=tied)
return cls(aes,
preact_cors=pre_corruptors,
postact_cors=post_corruptors,
layer_samplers=layer_samplers)
@functools.wraps(Model.get_params)
def get_params(self):
"""
.. todo::
WRITEME
"""
params = set()
for ae in self.aes:
params.update(ae.get_params())
return list(params)
@property
def nlayers(self):
"""
Returns how many layers the GSN has.
"""
return len(self.aes) + 1
def _run(self, minibatch, walkback=0, clamped=None):
"""
This runs the GSN on input 'minibatch' and returns all of the activations
at every time step.
Parameters
----------
minibatch : see parameter description in _set_activations
walkback : int
How many walkback steps to perform.
clamped : list of theano tensors or None.
clamped must be None or a list of len(minibatch) where each
element is a Theano tensor or None. Each Theano tensor should be
1 for indices where the value should be clamped and 0 for where
the value should not be clamped.
Returns
-------
steps : list of list of tensor_likes
A list of the activations at each time step. The activations
themselves are lists of tensor_like symbolic variables.
A time step consists of a call to the _update function
(so updating both the even and odd layers). When there is no
walkback, the GSN runs long enough for signal from the bottom
layer to propogate to the top layer and then back to the bottom.
The walkback parameter adds single steps on top of the default.
"""
# the indices which are being set
set_idxs = safe_zip(*minibatch)[0]
if self.nlayers == 2 and len(set_idxs) == 2:
if clamped is None:
raise ValueError("Setting both layers of 2 layer GSN without " +
"clamping causes one layer to overwrite the " +
"other. The value for layer 0 will not be used.")
else:
warnings.warn("Setting both layers of 2 layer GSN with clamping " +
"may not be valid, depending on what clamping is " +
"done")
diff = lambda L: [L[i] - L[i - 1] for i in xrange(1, len(L))]
if 1 in diff(sorted(set_idxs)):
# currently doing an odd step at first. If this warning appears, you
# should remember that the odd step (ie calculating the odd activations)
# is done first (so all setting of odd layers is valid) and that for
# an even layer to have an effect it must be used to compute either the
# (odd) layer below or above it.
warnings.warn("Adjacent layers in the GSN are being set. There is a" +
" significant possibility that some of the set values" +
" are not being used and are just overwriting each " +
"other. This is dependent on both the ordering of the " +
"even and odd steps as well as the proximity to the " +
"edge of the network.\n It is recommended to read the " +
"source to ensure the behavior is understood if setting " +
"adjacent layers.")
self._set_activations(minibatch)
# intialize steps
steps = [self.activations[:]]
self.apply_postact_corruption(self.activations,
xrange(self.nlayers))
if clamped is not None:
vals = safe_zip(*minibatch)[1]
clamped = safe_zip(set_idxs, vals, clamped)
# main loop
for _ in xrange(len(self.aes) + walkback):
steps.append(self._update(self.activations, clamped=clamped))
return steps
def _make_or_get_compiled(self, indices, clamped=False):
"""
Compiles, wraps, and caches Theano functions for non-symbolic calls
to get_samples.
Parameters
----------
indices : WRITEME
clamped : WRITEME
"""
def compile_f_init():
mb = T.matrices(len(indices))
zipped = safe_zip(indices, mb)
f_init = theano.function(mb,
self._set_activations(zipped, corrupt=True),
allow_input_downcast=True)
# handle splitting of concatenated data
def wrap_f_init(*args):
data = f_init(*args)
length = len(data) / 2
return data[:length], data[length:]
return wrap_f_init
def compile_f_step():
prev = T.matrices(self.nlayers)
if clamped:
_initial = T.matrices(len(indices))
_clamps = T.matrices(len(indices))
z = self._update(copy.copy(prev),
clamped=safe_zip(indices, _initial, _clamps),
return_activations=True)
f = theano.function(prev + _initial + _clamps, z,
on_unused_input='ignore',
allow_input_downcast=True)
else:
z = self._update(copy.copy(prev), return_activations=True)
f = theano.function(prev, z, on_unused_input='ignore',
allow_input_downcast=True)
def wrapped(*args):
data = f(*args)
length = len(data) / 2
return data[:length], data[length:]
return wrapped
# things that require re-compiling everything
state = (self._corrupt_switch, self._sample_switch, self._bias_switch)
if hasattr(self, '_compiled_cache') and state == self._compiled_cache[0]:
# already have some cached functions
if indices == self._compiled_cache[1]:
# everything is cached, return all but state and indices
return self._compiled_cache[2:]
else:
# indices have changed, need to recompile f_init
f_init = compile_f_init()
cc = self._compiled_cache
self._compiled_cache = (state, indices, f_init, cc[3])
return self._compiled_cache[2:]
else:
# have no cached function (or incorrect state)
f_init = compile_f_init()
f_step = compile_f_step()
self._compiled_cache = (state, indices, f_init, f_step)
return self._compiled_cache[2:]
def get_samples(self, minibatch, walkback=0, indices=None, symbolic=True,
include_first=False, clamped=None):
"""
Runs minibatch through GSN and returns reconstructed data.
Parameters
----------
minibatch : see parameter description in _set_activations
In addition to the description in get_samples, the tensor_likes
in the list should be replaced by numpy matrices if symbolic=False.
walkback : int
How many walkback steps to perform. This is both how many extra
samples to take as well as how many extra reconstructed points
to train off of. See description in _run.
This parameter controls how many samples you get back.
indices : None or list of ints, optional
Indices of the layers that should be returned for each time step.
If indices is None, then get_samples returns the values for all
of the layers which were initially specified (by minibatch).
symbolic : bool, optional
Whether the input (minibatch) contains a Theano (symbolic)
tensors or actual (numpy) arrays. This flag is needed because
Theano cannot compile the large computational graphs that
walkback creates.
include_first : bool, optional
Whether to include the initial activations (ie just the input) in
the output. This is useful for visualization, but can screw up
training due to some cost functions failing on perfect
reconstruction.
clamped : list of tensor_likes, optional
See description on _run. Theano symbolics should be replaced by
numpy matrices if symbolic=False. Length must be the same as
length of minibatch.
Returns
-------
reconstructions : list of tensor_likes
A list of length 1 + number of layers + walkback that contains
the samples generated by the GSN. The layers returned at each
time step is decided by the indices parameter (and defaults to
the layers specified in minibatch). If include_first is True,
then the list will be 1 element longer (inserted at beginning)
than specified above.
"""
if walkback > 8 and symbolic:
warnings.warn(("Running GSN in symbolic mode (needed for training) " +
"with a lot of walkback. Theano may take a very long " +
"time to compile this computational graph. If " +
"compiling is taking too long, then reduce the amount " +
"of walkback."))
input_idxs = safe_zip(*minibatch)[0]
if indices is None:
indices = input_idxs
if not symbolic:
vals = safe_zip(*minibatch)[1]
f_init, f_step = self._make_or_get_compiled(input_idxs,
clamped=clamped is not None)
if clamped is None:
get_args = lambda x: x
else:
mb_values = [mb[1] for mb in minibatch]
get_args = lambda x: x + mb_values + clamped
precor, activations = f_init(*vals)
results = [precor]
for _ in xrange(len(self.aes) + walkback):
precor, activations = f_step(*get_args(activations))
results.append(precor)
else:
results = self._run(minibatch, walkback=walkback, clamped=clamped)
# leave out the first time step
if not include_first:
results = results[1:]
return [[step[i] for i in indices] for step in results]
@functools.wraps(Autoencoder.reconstruct)
def reconstruct(self, minibatch):
"""
.. todo::
WRITEME
"""
# included for compatibility with cost functions for autoencoders,
# so assumes model is in unsupervised mode
assert len(minibatch) == 1
idx = minibatch[0][0]
return self.get_samples(minibatch, walkback=0, indices=[idx])
def __call__(self, minibatch):
"""
As specified by StackedBlocks, this returns the output representation of
all layers. This occurs at the final time step.
Parameters
----------
minibatch : WRITEME
Returns
-------
WRITEME
"""
return self._run(minibatch)[-1]
"""
NOTE: The following methods contain the algorithmic content of the GSN class.
All of these methods are written in a way such that they can be run without
modifying the state of the GSN object. This primary visible consequence of this
is that the methods take an "activations parameter", which is generally just
self.activations.
Although this style is a bit odd, it is completely necessary. Theano can handle
small amounts of walkback (which allows us to train for walkback), but for
many sampling iterations (ie more than 10) Theano struggles to compile these
large computational graphs. Making all of these methods below take
activations as an explicit parameter (which they then modify in place,
which allows calling with self.activations) allows one to create smaller
external Theano functions that allow many sampling iterations.
See pylearn2.models.tests.test_gsn.sampling_test for an example.
"""
def _set_activations(self, minibatch, set_val=True, corrupt=False):
"""
Initializes the GSN as specified by minibatch.
Parameters
----------
minibatch : list of (int, tensor_like)
The minibatch parameter must be a list of tuples of form
(int, tensor_like), where the int component represents the index
of the layer (so 0 for visible, -1 for top/last layer) and the
tensor_like represents the activation at that level. Layer
indices not included in the minibatch will be set to 0. For
tuples included in the minibatch, the tensor_like component can
actually be None; this will result in that layer getting set to 0
initially.
set_val : bool, optional
Determines whether the method sets self.activations.
corrupt : bool, optional
Instructs the method to return both a non-corrupted and corrupted
set of activations rather than just non-corrupted.
Notes
-----
This method creates a new list, not modifying an existing list.
This method also does the first odd step in the network.
"""
activations = [None] * self.nlayers
mb_size = minibatch[0][1].shape[0]
first_layer_size = self.aes[0].weights.shape[0]
# zero out activations to start
activations[0] = T.alloc(0, mb_size, first_layer_size)
for i in xrange(1, len(activations)):
activations[i] = T.zeros_like(
T.dot(activations[i - 1], self.aes[i - 1].weights)
)
# set minibatch
for i, val in minibatch:
if val is not None:
activations[i] = val
indices = [t[0] for t in minibatch if t[1] is not None]
self._update_odds(activations, skip_idxs=indices, corrupt=False)
if set_val:
self.activations = activations
if corrupt:
return (activations +
self.apply_postact_corruption(activations[:],
xrange(len(activations))))
else:
return activations
def _update_odds(self, activations, skip_idxs=frozenset(), corrupt=True,
clamped=None):
"""
Updates just the odd layers of the network.
Parameters
----------
activations : list
List of symbolic tensors representing the current activations.
skip_idxs : list
List of integers representing which odd indices should not be
updated. This parameter exists so that _set_activations can solve
the tricky problem of initializing the network when both even and
odd layers are being assigned.
corrupt : bool, optional
Whether or not to apply post-activation corruption to the odd
layers. This parameter does not alter the return value of this
method but does modify the activations parameter in place.
clamped : list, optional
See description for _apply_clamping.
"""
# Update and corrupt all of the odd layers (which we aren't skipping)
odds = filter(lambda i: i not in skip_idxs,
range(1, len(activations), 2))
self._update_activations(activations, odds)
if clamped is not None:
self._apply_clamping(activations, clamped)
odds_copy = [(i, activations[i]) for i in xrange(1, len(activations), 2)]
if corrupt:
self.apply_postact_corruption(activations, odds)
return odds_copy
def _update_evens(self, activations, clamped=None):
"""
Updates just the even layers of the network.
Parameters
----------
See all of the descriptions for _update_evens.
"""
evens = xrange(0, len(activations), 2)
self._update_activations(activations, evens)
if clamped is not None:
self._apply_clamping(activations, clamped)
evens_copy = [(i, activations[i]) for i in evens]
self.apply_postact_corruption(activations, evens)
return evens_copy
def _update(self, activations, clamped=None, return_activations=False):
"""
See Figure 1 in "Deep Generative Stochastic Networks as Generative
Models" by Bengio, Thibodeau-Laufer.
This and _update_activations implement exactly that, which is essentially
forward propogating the neural network in both directions.
Parameters
----------
activations : list of tensors
List of activations at time step t - 1.
clamped : list
See description on _apply_clamping
return_activations : bool
If true, then this method returns both the activation values
after the activation function has been applied and the values
after the sampling + post-activation corruption has been applied.
If false, then only return the values after the activation
function has been applied (no corrupted version).
This parameter is only set to True when compiling the functions
needed by get_samples. Regardless of this parameter setting, the
sampling/post-activation corruption noise is still added in-place
to activations.
Returns
-------
y : list of tensors
List of activations at time step t (prior to adding postact noise).
Notes
-----
The return value is generally not equal to the value of activations at
the the end of this method. The return value contains all layers
without sampling/post-activation noise, but the activations value
contains noise on the odd layers (necessary to compute the even
layers).
"""
evens_copy = self._update_evens(activations, clamped=clamped)
odds_copy = self._update_odds(activations, clamped=clamped)
# precor is before sampling + postactivation corruption (after preactivation
# corruption and activation)
precor = [None] * len(self.activations)
for idx, val in evens_copy + odds_copy:
assert precor[idx] is None
precor[idx] = val
assert None not in precor
if return_activations:
return precor + activations
else:
return precor
@staticmethod
def _apply_clamping(activations, clamped, symbolic=True):
"""
Resets the value of some layers within the network.
Parameters
----------
activations : list
List of symbolic tensors representing the current activations.
clamped : list of (int, matrix, matrix or None) tuples
The first component of each tuple is an int representing the
index of the layer to clamp.
The second component is a matrix of the initial values for that
layer (ie what we are resetting the values to).
The third component is a matrix mask indicated which indices in
the minibatch to clamp (1 indicates clamping, 0 indicates not).
The value of None is equivalent to the 0 matrix (so no clamping).
If symbolic is true then matrices are Theano tensors, otherwise
they should be numpy matrices.
symbolic : bool, optional
Whether to execute with symbolic Theano tensors or numpy matrices.
"""
for idx, initial, clamp in clamped:
if clamp is None:
continue
# take values from initial
clamped_val = clamp * initial
# zero out values in activations
if symbolic:
activations[idx] = T.switch(clamp, initial, activations[idx])
else:
activations[idx] = np.switch(clamp, initial, activations[idx])
return activations
@staticmethod
def _apply_corruption(activations, corruptors, idx_iter):
"""
Applies a list of corruptor functions to all layers.
Parameters
----------
activations : list of tensor_likes
Generally gsn.activations
corruptors : list of callables
Generally gsn.postact_cors or gsn.preact_cors
idx_iter : iterable
An iterable of indices into self.activations. The indexes
indicate which layers the post activation corruptors should be
applied to.
"""
assert len(corruptors) == len(activations)
for i in idx_iter:
activations[i] = corruptors[i](activations[i])
return activations
def apply_sampling(self, activations, idx_iter):
"""
.. todo::
WRITEME
"""
# using _apply_corruption to apply samplers
if self._sample_switch:
self._apply_corruption(activations, self._layer_samplers,
idx_iter)
return activations
def apply_postact_corruption(self, activations, idx_iter, sample=True):
"""
.. todo::
WRITEME
"""
if sample:
self.apply_sampling(activations, idx_iter)
if self._corrupt_switch:
self._apply_corruption(activations, self._postact_cors,
idx_iter)
return activations
def apply_preact_corruption(self, activations, idx_iter):
"""
.. todo::
WRITEME
"""
if self._corrupt_switch:
self._apply_corruption(activations, self._preact_cors,
idx_iter)
return activations
def _update_activations(self, activations, idx_iter):
"""
Actually computes the activations for all indices in idx_iters.
This method computes the values for a layer by computing a linear
combination of the neighboring layers (dictated by the weight matrices),
applying the pre-activation corruption, and then applying the layer's
activation function.
Parameters
----------
activations : list of tensor_likes
The activations to update (could be self.activations). Updates
in-place.
idx_iter : iterable
An iterable of indices into self.activations. The indexes
indicate which layers should be updated.
Must be able to iterate over idx_iter multiple times.
"""
from_above = lambda i: ((self.aes[i].visbias if self._bias_switch else 0) +
T.dot(activations[i + 1],
self.aes[i].w_prime))
from_below = lambda i: ((self.aes[i - 1].hidbias if self._bias_switch else 0) +
T.dot(activations[i - 1],
self.aes[i - 1].weights))
for i in idx_iter:
# first compute the hidden activation
if i == 0:
activations[i] = from_above(i)
elif i == len(activations) - 1:
activations[i] = from_below(i)
else:
activations[i] = from_below(i) + from_above(i)
self.apply_preact_corruption(activations, idx_iter)
for i in idx_iter:
# Using the activation function from lower autoencoder
act_func = None
if i == 0:
act_func = self.aes[0].act_dec
else:
act_func = self.aes[i - 1].act_enc
# ACTIVATION
# None implies linear
if act_func is not None:
activations[i] = act_func(activations[i])
class JointGSN(GSN):
"""
This class only provides a few convenient methods on top of the GSN class
above. This class should be used when learning the joint distribution between
2 vectors.
"""
@classmethod
def convert(cls, gsn, input_idx=0, label_idx=None):
"""
'convert' essentially serves as the constructor for JointGSN.
Parameters
----------
gsn : GSN
input_idx : int
The index of the layer which serves as the "input" to the
network. During classification, this layer will be given.
Defaults to 0.
label_idx : int
The index of the layer which serves as the "output" of the
network. This label is predicted during classification.
Defaults to top layer of network.
"""
gsn = copy.copy(gsn)
gsn.__class__ = cls
gsn.input_idx = input_idx
gsn.label_idx = label_idx or (gsn.nlayers - 1)
return gsn
def calc_walkback(self, trials):
"""
Utility method that calculates how much walkback is needed to get at
at least 'trials' samples.
Parameters
----------
trials : WRITEME
"""
wb = trials - len(self.aes)
if wb <= 0:
return 0
else:
return wb
def _get_aggregate_classification(self, minibatch, trials=10, skip=0):
"""
See classify method.
Returns the prediction vector aggregated over all time steps where
axis 0 is the minibatch item and axis 1 is the output for the label.
"""
clamped = np.ones(minibatch.shape, dtype=np.float32)
data = self.get_samples([(self.input_idx, minibatch)],
walkback=self.calc_walkback(trials + skip),
indices=[self.label_idx],
clamped=[clamped],
symbolic=False)
# 3d tensor: axis 0 is time step, axis 1 is minibatch item,
# axis 2 is softmax output for label (after slicing)
data = np.asarray(data[skip:skip+trials])[:, 0, :, :]
return data.mean(axis=0)
def classify(self, minibatch, trials=10, skip=0):
"""
Classifies a minibatch.
This method clamps minibatch at self.input_idx and then runs the GSN.
The first 'skip' predictions are skipped and the next 'trials'
predictions are averaged and then arg-maxed to make a final prediction.
The prediction vectors are the activations at self.label_idx.
Parameters
----------
minibatch : numpy matrix
WRITEME
trials : int
WRITEME
skip : int
WRITEME
Notes
-----
A fairly large 3D tensor during classification, so one should watch
their memory use. The easiest way to limit memory consumption is to
classify just minibatches rather than the whole test set at once.
The large tensor is of size (skip + trials) * mb_size * num labels.
.. warning::
This method does not directly control whether or not
corruption and sampling is applied during classification.
These are decided by self._corrupt_switch and
self._sample_switch.
"""
mean = self._get_aggregate_classification(minibatch, trials=trials,
skip=skip)
am = np.argmax(mean, axis=1)
# convert argmax's to one-hot format
labels = np.zeros_like(mean)
labels[np.arange(labels.shape[0]), am] = 1.0
return labels
def get_samples_from_labels(self, labels, trials=5):
"""
Clamps labels and generates samples.
Parameters
----------
labels : WRITEME
trials : WRITEME
"""
clamped = np.ones(labels.shape, dtype=np.float32)
data = self.get_samples([(self.label_idx, labels)],
walkback=self.calc_walkback(trials),
indices=[self.input_idx],
clamped=[clamped],
symbolic=False)
return np.array(data)[:, 0, :, :]
| bsd-3-clause |
ristvan/googletest | test/gtest_filter_unittest.py | 2826 | 21261 | #!/usr/bin/env python
#
# Copyright 2005 Google Inc. All Rights Reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test test filters.
A user can specify which test(s) in a Google Test program to run via either
the GTEST_FILTER environment variable or the --gtest_filter flag.
This script tests such functionality by invoking
gtest_filter_unittest_ (a program written with Google Test) with different
environments and command line flags.
Note that test sharding may also influence which tests are filtered. Therefore,
we test that here also.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
import sets
import sys
import gtest_test_utils
# Constants.
# Checks if this platform can pass empty environment variables to child
# processes. We set an env variable to an empty string and invoke a python
# script in a subprocess to print whether the variable is STILL in
# os.environ. We then use 'eval' to parse the child's output so that an
# exception is thrown if the input is anything other than 'True' nor 'False'.
os.environ['EMPTY_VAR'] = ''
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ'])
CAN_PASS_EMPTY_ENV = eval(child.output)
# Check if this platform can unset environment variables in child processes.
# We set an env variable to a non-empty string, unset it, and invoke
# a python script in a subprocess to print whether the variable
# is NO LONGER in os.environ.
# We use 'eval' to parse the child's output so that an exception
# is thrown if the input is neither 'True' nor 'False'.
os.environ['UNSET_VAR'] = 'X'
del os.environ['UNSET_VAR']
child = gtest_test_utils.Subprocess(
[sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ'])
CAN_UNSET_ENV = eval(child.output)
# Checks if we should test with an empty filter. This doesn't
# make sense on platforms that cannot pass empty env variables (Win32)
# and on platforms that cannot unset variables (since we cannot tell
# the difference between "" and NULL -- Borland and Solaris < 5.10)
CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV)
# The environment variable for specifying the test filters.
FILTER_ENV_VAR = 'GTEST_FILTER'
# The environment variables for test sharding.
TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS'
SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX'
SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE'
# The command line flag for specifying the test filters.
FILTER_FLAG = 'gtest_filter'
# The command line flag for including disabled tests.
ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests'
# Command to run the gtest_filter_unittest_ program.
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_')
# Regex for determining whether parameterized tests are enabled in the binary.
PARAM_TEST_REGEX = re.compile(r'/ParamTest')
# Regex for parsing test case names from Google Test's output.
TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)')
# Regex for parsing test names from Google Test's output.
TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)')
# The command line flag to tell Google Test to output the list of tests it
# will run.
LIST_TESTS_FLAG = '--gtest_list_tests'
# Indicates whether Google Test supports death tests.
SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess(
[COMMAND, LIST_TESTS_FLAG]).output
# Full names of all tests in gtest_filter_unittests_.
PARAM_TESTS = [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestX/1',
'SeqQ/ParamTest.TestY/0',
'SeqQ/ParamTest.TestY/1',
]
DISABLED_TESTS = [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
]
if SUPPORTS_DEATH_TESTS:
DEATH_TESTS = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
]
else:
DEATH_TESTS = []
# All the non-disabled tests.
ACTIVE_TESTS = [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS
param_tests_present = None
# Utilities.
environ = os.environ.copy()
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
environ[env_var] = value
elif env_var in environ:
del environ[env_var]
def RunAndReturnOutput(args = None):
"""Runs the test program and returns its output."""
return gtest_test_utils.Subprocess([COMMAND] + (args or []),
env=environ).output
def RunAndExtractTestList(args = None):
"""Runs the test program and returns its exit code and a list of tests run."""
p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ)
tests_run = []
test_case = ''
test = ''
for line in p.output.split('\n'):
match = TEST_CASE_REGEX.match(line)
if match is not None:
test_case = match.group(1)
else:
match = TEST_REGEX.match(line)
if match is not None:
test = match.group(1)
tests_run.append(test_case + '.' + test)
return (tests_run, p.exit_code)
def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs):
"""Runs the given function and arguments in a modified environment."""
try:
original_env = environ.copy()
environ.update(extra_env)
return function(*args, **kwargs)
finally:
environ.clear()
environ.update(original_env)
def RunWithSharding(total_shards, shard_index, command):
"""Runs a test program shard and returns exit code and a list of tests run."""
extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index),
TOTAL_SHARDS_ENV_VAR: str(total_shards)}
return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command)
# The unit test.
class GTestFilterUnitTest(gtest_test_utils.TestCase):
"""Tests the env variable or the command line flag to filter tests."""
# Utilities.
def AssertSetEqual(self, lhs, rhs):
"""Asserts that two sets are equal."""
for elem in lhs:
self.assert_(elem in rhs, '%s in %s' % (elem, rhs))
for elem in rhs:
self.assert_(elem in lhs, '%s in %s' % (elem, lhs))
def AssertPartitionIsValid(self, set_var, list_of_sets):
"""Asserts that list_of_sets is a valid partition of set_var."""
full_partition = []
for slice_var in list_of_sets:
full_partition.extend(slice_var)
self.assertEqual(len(set_var), len(full_partition))
self.assertEqual(sets.Set(set_var), sets.Set(full_partition))
def AdjustForParameterizedTests(self, tests_to_run):
"""Adjust tests_to_run in case value parameterized tests are disabled."""
global param_tests_present
if not param_tests_present:
return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS))
else:
return tests_to_run
def RunAndVerify(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for a given filter."""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# First, tests using the environment variable.
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
tests_run = RunAndExtractTestList()[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, tests_to_run)
# pylint: enable-msg=C6403
# Next, tests using the command line flag.
if gtest_filter is None:
args = []
else:
args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)]
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run,
args=None, check_exit_0=False):
"""Checks that binary runs correct tests for the given filter and shard.
Runs all shards of gtest_filter_unittest_ with the given filter, and
verifies that the right set of tests were run. The union of tests run
on each shard should be identical to tests_to_run, without duplicates.
Args:
gtest_filter: A filter to apply to the tests.
total_shards: A total number of shards to split test run into.
tests_to_run: A set of tests expected to run.
args : Arguments to pass to the to the test binary.
check_exit_0: When set to a true value, make sure that all shards
return 0.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Windows removes empty variables from the environment when passing it
# to a new process. This means it is impossible to pass an empty filter
# into a process using the environment variable. However, we can still
# test the case when the variable is not supplied (i.e., gtest_filter is
# None).
# pylint: disable-msg=C6403
if CAN_TEST_EMPTY_FILTER or gtest_filter != '':
SetEnvVar(FILTER_ENV_VAR, gtest_filter)
partition = []
for i in range(0, total_shards):
(tests_run, exit_code) = RunWithSharding(total_shards, i, args)
if check_exit_0:
self.assertEqual(0, exit_code)
partition.append(tests_run)
self.AssertPartitionIsValid(tests_to_run, partition)
SetEnvVar(FILTER_ENV_VAR, None)
# pylint: enable-msg=C6403
def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run):
"""Checks that the binary runs correct set of tests for the given filter.
Runs gtest_filter_unittest_ with the given filter, and enables
disabled tests. Verifies that the right set of tests were run.
Args:
gtest_filter: A filter to apply to the tests.
tests_to_run: A set of tests expected to run.
"""
tests_to_run = self.AdjustForParameterizedTests(tests_to_run)
# Construct the command line.
args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG]
if gtest_filter is not None:
args.append('--%s=%s' % (FILTER_FLAG, gtest_filter))
tests_run = RunAndExtractTestList(args)[0]
self.AssertSetEqual(tests_run, tests_to_run)
def setUp(self):
"""Sets up test case.
Determines whether value-parameterized tests are enabled in the binary and
sets the flags accordingly.
"""
global param_tests_present
if param_tests_present is None:
param_tests_present = PARAM_TEST_REGEX.search(
RunAndReturnOutput()) is not None
def testDefaultBehavior(self):
"""Tests the behavior of not specifying the filter."""
self.RunAndVerify(None, ACTIVE_TESTS)
def testDefaultBehaviorWithShards(self):
"""Tests the behavior without the filter, with sharding enabled."""
self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS)
self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS)
def testEmptyFilter(self):
"""Tests an empty filter."""
self.RunAndVerify('', [])
self.RunAndVerifyWithSharding('', 1, [])
self.RunAndVerifyWithSharding('', 2, [])
def testBadFilter(self):
"""Tests a filter that matches nothing."""
self.RunAndVerify('BadFilter', [])
self.RunAndVerifyAllowingDisabled('BadFilter', [])
def testFullName(self):
"""Tests filtering by full name."""
self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz'])
self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz'])
def testUniversalFilters(self):
"""Tests filters that match everything."""
self.RunAndVerify('*', ACTIVE_TESTS)
self.RunAndVerify('*.*', ACTIVE_TESTS)
self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS)
self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS)
self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS)
def testFilterByTestCase(self):
"""Tests filtering by test case name."""
self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz'])
BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB']
self.RunAndVerify('BazTest.*', BAZ_TESTS)
self.RunAndVerifyAllowingDisabled('BazTest.*',
BAZ_TESTS + ['BazTest.DISABLED_TestC'])
def testFilterByTest(self):
"""Tests filtering by test name."""
self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne'])
def testFilterDisabledTests(self):
"""Select only the disabled tests to run."""
self.RunAndVerify('DISABLED_FoobarTest.Test1', [])
self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1',
['DISABLED_FoobarTest.Test1'])
self.RunAndVerify('*DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS)
self.RunAndVerify('*.DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [
'BarTest.DISABLED_TestFour',
'BarTest.DISABLED_TestFive',
'BazTest.DISABLED_TestC',
'DISABLED_FoobarTest.DISABLED_Test2',
])
self.RunAndVerify('DISABLED_*', [])
self.RunAndVerifyAllowingDisabled('DISABLED_*', [
'DISABLED_FoobarTest.Test1',
'DISABLED_FoobarTest.DISABLED_Test2',
'DISABLED_FoobarbazTest.TestA',
])
def testWildcardInTestCaseName(self):
"""Tests using wildcard in the test case name."""
self.RunAndVerify('*a*.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS)
def testWildcardInTestName(self):
"""Tests using wildcard in the test name."""
self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testFilterWithoutDot(self):
"""Tests a filter that has no '.' in it."""
self.RunAndVerify('*z*', [
'FooTest.Xyz',
'BazTest.TestOne',
'BazTest.TestA',
'BazTest.TestB',
])
def testTwoPatterns(self):
"""Tests filters that consist of two patterns."""
self.RunAndVerify('Foo*.*:*A*', [
'FooTest.Abc',
'FooTest.Xyz',
'BazTest.TestA',
])
# An empty pattern + a non-empty one
self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA'])
def testThreePatterns(self):
"""Tests filters that consist of three patterns."""
self.RunAndVerify('*oo*:*A*:*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
'BazTest.TestA',
])
# The 2nd pattern is empty.
self.RunAndVerify('*oo*::*One', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BazTest.TestOne',
])
# The last 2 patterns are empty.
self.RunAndVerify('*oo*::', [
'FooTest.Abc',
'FooTest.Xyz',
])
def testNegativeFilters(self):
self.RunAndVerify('*-BazTest.TestOne', [
'FooTest.Abc',
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
'BazTest.TestA',
'BazTest.TestB',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('*-FooTest.Abc:BazTest.*', [
'FooTest.Xyz',
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
self.RunAndVerify('BarTest.*-BarTest.TestOne', [
'BarTest.TestTwo',
'BarTest.TestThree',
])
# Tests without leading '*'.
self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [
'BarTest.TestOne',
'BarTest.TestTwo',
'BarTest.TestThree',
] + DEATH_TESTS + PARAM_TESTS)
# Value parameterized tests.
self.RunAndVerify('*/*', PARAM_TESTS)
# Value parameterized tests filtering by the sequence name.
self.RunAndVerify('SeqP/*', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
])
# Value parameterized tests filtering by the test name.
self.RunAndVerify('*/0', [
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestY/0',
'SeqQ/ParamTest.TestX/0',
'SeqQ/ParamTest.TestY/0',
])
def testFlagOverridesEnvVar(self):
"""Tests that the filter flag overrides the filtering env. variable."""
SetEnvVar(FILTER_ENV_VAR, 'Foo*')
args = ['--%s=%s' % (FILTER_FLAG, '*One')]
tests_run = RunAndExtractTestList(args)[0]
SetEnvVar(FILTER_ENV_VAR, None)
self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne'])
def testShardStatusFileIsCreated(self):
"""Tests that the shard file is created if specified in the environment."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
InvokeWithModifiedEnv(extra_env, RunAndReturnOutput)
finally:
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
def testShardStatusFileIsCreatedWithListTests(self):
"""Tests that the shard file is created with the "list_tests" flag."""
shard_status_file = os.path.join(gtest_test_utils.GetTempDir(),
'shard_status_file2')
self.assert_(not os.path.exists(shard_status_file))
extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file}
try:
output = InvokeWithModifiedEnv(extra_env,
RunAndReturnOutput,
[LIST_TESTS_FLAG])
finally:
# This assertion ensures that Google Test enumerated the tests as
# opposed to running them.
self.assert_('[==========]' not in output,
'Unexpected output during test enumeration.\n'
'Please ensure that LIST_TESTS_FLAG is assigned the\n'
'correct flag value for listing Google Test tests.')
self.assert_(os.path.exists(shard_status_file))
os.remove(shard_status_file)
if SUPPORTS_DEATH_TESTS:
def testShardingWorksWithDeathTests(self):
"""Tests integration with death tests and sharding."""
gtest_filter = 'HasDeathTest.*:SeqP/*'
expected_tests = [
'HasDeathTest.Test1',
'HasDeathTest.Test2',
'SeqP/ParamTest.TestX/0',
'SeqP/ParamTest.TestX/1',
'SeqP/ParamTest.TestY/0',
'SeqP/ParamTest.TestY/1',
]
for flag in ['--gtest_death_test_style=threadsafe',
'--gtest_death_test_style=fast']:
self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests,
check_exit_0=True, args=[flag])
self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests,
check_exit_0=True, args=[flag])
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
Arvedui/i3pystatus | i3pystatus/alsa.py | 11 | 5178 | from alsaaudio import Mixer, ALSAAudioError
from math import exp, log, log10, ceil, floor
from i3pystatus import IntervalModule
class ALSA(IntervalModule):
"""
Shows volume of ALSA mixer. You can also use this for inputs, btw.
Requires pyalsaaudio
.. rubric:: Available formatters
* `{volume}` — the current volume in percent
* `{muted}` — the value of one of the `muted` or `unmuted` settings
* `{card}` — the associated soundcard
* `{mixer}` — the associated ALSA mixer
"""
interval = 1
settings = (
"format",
("format_muted", "optional format string to use when muted"),
("mixer", "ALSA mixer"),
("mixer_id", "ALSA mixer id"),
("card", "ALSA sound card"),
("increment", "integer percentage of max volume to in/decrement volume on mousewheel"),
"muted", "unmuted",
"color_muted", "color",
"channel",
("map_volume", "volume display/setting as in AlsaMixer. increment option is ignored then.")
)
muted = "M"
unmuted = ""
color_muted = "#AAAAAA"
color = "#FFFFFF"
format = "♪: {volume}"
format_muted = None
mixer = "Master"
mixer_id = 0
card = 0
channel = 0
increment = 5
map_volume = False
alsamixer = None
has_mute = True
on_upscroll = "increase_volume"
on_downscroll = "decrease_volume"
on_leftclick = "switch_mute"
on_rightclick = on_leftclick
def init(self):
self.create_mixer()
try:
self.alsamixer.getmute()
except ALSAAudioError:
self.has_mute = False
self.fdict = {
"card": self.alsamixer.cardname(),
"mixer": self.mixer,
}
self.dbRng = self.alsamixer.getrange()
self.dbMin = self.dbRng[0]
self.dbMax = self.dbRng[1]
def create_mixer(self):
self.alsamixer = Mixer(
control=self.mixer, id=self.mixer_id, cardindex=self.card)
def run(self):
self.create_mixer()
muted = False
if self.has_mute:
muted = self.alsamixer.getmute()[self.channel] == 1
self.fdict["volume"] = self.get_cur_volume()
self.fdict["muted"] = self.muted if muted else self.unmuted
self.fdict["db"] = self.get_db()
if muted and self.format_muted is not None:
output_format = self.format_muted
else:
output_format = self.format
self.data = self.fdict
self.output = {
"full_text": output_format.format(**self.fdict),
"color": self.color_muted if muted else self.color,
}
def switch_mute(self):
if self.has_mute:
muted = self.alsamixer.getmute()[self.channel]
self.alsamixer.setmute(not muted)
def get_cur_volume(self):
if self.map_volume:
dbCur = self.get_db() * 100.0
dbMin = self.dbMin * 100.0
dbMax = self.dbMax * 100.0
dbCur_norm = self.exp10((dbCur - dbMax) / 6000.0)
dbMin_norm = self.exp10((dbMin - dbMax) / 6000.0)
vol = (dbCur_norm - dbMin_norm) / (1 - dbMin_norm)
vol = int(round(vol * 100, 0))
return vol
else:
return self.alsamixer.getvolume()[self.channel]
def get_new_volume(self, direction):
if direction == "inc":
volume = (self.fdict["volume"] + 1) / 100
elif direction == "dec":
volume = (self.fdict["volume"] - 1) / 100
dbMin = self.dbMin * 100
dbMax = self.dbMax * 100
dbMin_norm = self.exp10((dbMin - dbMax) / 6000.0)
vol = volume * (1 - dbMin_norm) + dbMin_norm
if direction == "inc":
dbNew = min(self.dbMax, ceil(((6000.0 * log10(vol)) + dbMax) / 100))
elif direction == "dec":
dbNew = max(self.dbMin, floor(((6000.0 * log10(vol)) + dbMax) / 100))
volNew = int(round(self.map_db(dbNew, self.dbMin, self.dbMax, 0, 100), 0))
return volNew
def increase_volume(self, delta=None):
if self.map_volume:
vol = self.get_new_volume("inc")
self.alsamixer.setvolume(vol)
else:
vol = self.alsamixer.getvolume()[self.channel]
self.alsamixer.setvolume(min(100, vol + (delta if delta else self.increment)))
def decrease_volume(self, delta=None):
if self.map_volume:
vol = self.get_new_volume("dec")
self.alsamixer.setvolume(vol)
else:
vol = self.alsamixer.getvolume()[self.channel]
self.alsamixer.setvolume(max(0, vol - (delta if delta else self.increment)))
def get_db(self):
db = (((self.dbMax - self.dbMin) / 100) * self.alsamixer.getvolume()[self.channel]) + self.dbMin
db = int(round(db, 0))
return db
def map_db(self, value, dbMin, dbMax, volMin, volMax):
dbRange = dbMax - dbMin
volRange = volMax - volMin
volScaled = float(value - dbMin) / float(dbRange)
return volMin + (volScaled * volRange)
def exp10(self, x):
return exp(x * log(10))
| mit |
dch312/scipy | scipy/optimize/zeros.py | 55 | 19069 | from __future__ import division, print_function, absolute_import
import warnings
from . import _zeros
from numpy import finfo, sign, sqrt
_iter = 100
_xtol = 1e-12
_rtol = finfo(float).eps * 2
__all__ = ['newton', 'bisect', 'ridder', 'brentq', 'brenth']
CONVERGED = 'converged'
SIGNERR = 'sign error'
CONVERR = 'convergence error'
flag_map = {0: CONVERGED, -1: SIGNERR, -2: CONVERR}
class RootResults(object):
""" Represents the root finding result.
Attributes
----------
root : float
Estimated root location.
iterations : int
Number of iterations needed to find the root.
function_calls : int
Number of times the function was called.
converged : bool
True if the routine converged.
flag : str
Description of the cause of termination.
"""
def __init__(self, root, iterations, function_calls, flag):
self.root = root
self.iterations = iterations
self.function_calls = function_calls
self.converged = flag == 0
try:
self.flag = flag_map[flag]
except KeyError:
self.flag = 'unknown error %d' % (flag,)
def results_c(full_output, r):
if full_output:
x, funcalls, iterations, flag = r
results = RootResults(root=x,
iterations=iterations,
function_calls=funcalls,
flag=flag)
return x, results
else:
return r
# Newton-Raphson method
def newton(func, x0, fprime=None, args=(), tol=1.48e-8, maxiter=50,
fprime2=None):
"""
Find a zero using the Newton-Raphson or secant method.
Find a zero of the function `func` given a nearby starting point `x0`.
The Newton-Raphson method is used if the derivative `fprime` of `func`
is provided, otherwise the secant method is used. If the second order
derivate `fprime2` of `func` is provided, parabolic Halley's method
is used.
Parameters
----------
func : function
The function whose zero is wanted. It must be a function of a
single variable of the form f(x,a,b,c...), where a,b,c... are extra
arguments that can be passed in the `args` parameter.
x0 : float
An initial estimate of the zero that should be somewhere near the
actual zero.
fprime : function, optional
The derivative of the function when available and convenient. If it
is None (default), then the secant method is used.
args : tuple, optional
Extra arguments to be used in the function call.
tol : float, optional
The allowable error of the zero value.
maxiter : int, optional
Maximum number of iterations.
fprime2 : function, optional
The second order derivative of the function when available and
convenient. If it is None (default), then the normal Newton-Raphson
or the secant method is used. If it is given, parabolic Halley's
method is used.
Returns
-------
zero : float
Estimated location where function is zero.
See Also
--------
brentq, brenth, ridder, bisect
fsolve : find zeroes in n dimensions.
Notes
-----
The convergence rate of the Newton-Raphson method is quadratic,
the Halley method is cubic, and the secant method is
sub-quadratic. This means that if the function is well behaved
the actual error in the estimated zero is approximately the square
(cube for Halley) of the requested tolerance up to roundoff
error. However, the stopping criterion used here is the step size
and there is no guarantee that a zero has been found. Consequently
the result should be verified. Safer algorithms are brentq,
brenth, ridder, and bisect, but they all require that the root
first be bracketed in an interval where the function changes
sign. The brentq algorithm is recommended for general use in one
dimensional problems when such an interval has been found.
"""
if tol <= 0:
raise ValueError("tol too small (%g <= 0)" % tol)
if fprime is not None:
# Newton-Rapheson method
# Multiply by 1.0 to convert to floating point. We don't use float(x0)
# so it still works if x0 is complex.
p0 = 1.0 * x0
fder2 = 0
for iter in range(maxiter):
myargs = (p0,) + args
fder = fprime(*myargs)
if fder == 0:
msg = "derivative was zero."
warnings.warn(msg, RuntimeWarning)
return p0
fval = func(*myargs)
if fprime2 is not None:
fder2 = fprime2(*myargs)
if fder2 == 0:
# Newton step
p = p0 - fval / fder
else:
# Parabolic Halley's method
discr = fder ** 2 - 2 * fval * fder2
if discr < 0:
p = p0 - fder / fder2
else:
p = p0 - 2*fval / (fder + sign(fder) * sqrt(discr))
if abs(p - p0) < tol:
return p
p0 = p
else:
# Secant method
p0 = x0
if x0 >= 0:
p1 = x0*(1 + 1e-4) + 1e-4
else:
p1 = x0*(1 + 1e-4) - 1e-4
q0 = func(*((p0,) + args))
q1 = func(*((p1,) + args))
for iter in range(maxiter):
if q1 == q0:
if p1 != p0:
msg = "Tolerance of %s reached" % (p1 - p0)
warnings.warn(msg, RuntimeWarning)
return (p1 + p0)/2.0
else:
p = p1 - q1*(p1 - p0)/(q1 - q0)
if abs(p - p1) < tol:
return p
p0 = p1
q0 = q1
p1 = p
q1 = func(*((p1,) + args))
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def bisect(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find root of a function within an interval.
Basic bisection routine to find a zero of the function `f` between the
arguments `a` and `b`. `f(a)` and `f(b)` cannot have the same signs.
Slow but sure.
Parameters
----------
f : function
Python function returning a number. `f` must be continuous, and
f(a) and f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within `xtol` of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in `maxiter` iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where x is the root, and r is
a `RootResults` object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton
fixed_point : scalar fixed-point finder
fsolve : n-dimensional root-finding
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._bisect(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def ridder(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in an interval.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence.
In particular, ``r.converged`` is True if the routine converged.
See Also
--------
brentq, brenth, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
Notes
-----
Uses [Ridders1979]_ method to find a zero of the function `f` between the
arguments `a` and `b`. Ridders' method is faster than bisection, but not
generally as fast as the Brent rountines. [Ridders1979]_ provides the
classic description and source of the algorithm. A description can also be
found in any recent edition of Numerical Recipes.
The routine used here diverges slightly from standard presentations in
order to be a bit more careful of tolerance.
References
----------
.. [Ridders1979]
Ridders, C. F. J. "A New Algorithm for Computing a
Single Root of a Real Continuous Function."
IEEE Trans. Circuits Systems 26, 979-980, 1979.
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._ridder(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def brentq(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""
Find a root of a function in given interval.
Return float, a zero of `f` between `a` and `b`. `f` must be a continuous
function, and [a,b] must be a sign changing interval.
Description:
Uses the classic Brent (1973) method to find a zero of the function `f` on
the sign changing interval [a , b]. Generally considered the best of the
rootfinding routines here. It is a safe version of the secant method that
uses inverse quadratic extrapolation. Brent's method combines root
bracketing, interval bisection, and inverse quadratic interpolation. It is
sometimes known as the van Wijngaarden-Dekker-Brent method. Brent (1973)
claims convergence is guaranteed for functions computable within [a,b].
[Brent1973]_ provides the classic description of the algorithm. Another
description can be found in a recent edition of Numerical Recipes, including
[PressEtal1992]_. Another description is at
http://mathworld.wolfram.com/BrentsMethod.html. It should be easy to
understand the algorithm just by reading our code. Our code diverges a bit
from standard presentations: we choose a different formula for the
extrapolation step.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
multivariate local optimizers
`fmin`, `fmin_powell`, `fmin_cg`, `fmin_bfgs`, `fmin_ncg`
nonlinear least squares minimizer
`leastsq`
constrained multivariate optimizers
`fmin_l_bfgs_b`, `fmin_tnc`, `fmin_cobyla`
global optimizers
`basinhopping`, `brute`, `differential_evolution`
local scalar minimizers
`fminbound`, `brent`, `golden`, `bracket`
n-dimensional root-finding
`fsolve`
one-dimensional root-finding
`brentq`, `brenth`, `ridder`, `bisect`, `newton`
scalar fixed-point finder
`fixed_point`
Notes
-----
`f` must be continuous. f(a) and f(b) must have opposite signs.
References
----------
.. [Brent1973]
Brent, R. P.,
*Algorithms for Minimization Without Derivatives*.
Englewood Cliffs, NJ: Prentice-Hall, 1973. Ch. 3-4.
.. [PressEtal1992]
Press, W. H.; Flannery, B. P.; Teukolsky, S. A.; and Vetterling, W. T.
*Numerical Recipes in FORTRAN: The Art of Scientific Computing*, 2nd ed.
Cambridge, England: Cambridge University Press, pp. 352-355, 1992.
Section 9.3: "Van Wijngaarden-Dekker-Brent Method."
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brentq(f,a,b,xtol,rtol,maxiter,args,full_output,disp)
return results_c(full_output, r)
def brenth(f, a, b, args=(),
xtol=_xtol, rtol=_rtol, maxiter=_iter,
full_output=False, disp=True):
"""Find root of f in [a,b].
A variation on the classic Brent routine to find a zero of the function f
between the arguments a and b that uses hyperbolic extrapolation instead of
inverse quadratic extrapolation. There was a paper back in the 1980's ...
f(a) and f(b) cannot have the same signs. Generally on a par with the
brent routine, but not as heavily tested. It is a safe version of the
secant method that uses hyperbolic extrapolation. The version here is by
Chuck Harris.
Parameters
----------
f : function
Python function returning a number. f must be continuous, and f(a) and
f(b) must have opposite signs.
a : number
One end of the bracketing interval [a,b].
b : number
The other end of the bracketing interval [a,b].
xtol : number, optional
The routine converges when a root is known to lie within xtol of the
value return. Should be >= 0. The routine modifies this to take into
account the relative precision of doubles.
rtol : number, optional
The routine converges when a root is known to lie within `rtol` times
the value returned of the value returned. Should be >= 0. Defaults to
``np.finfo(float).eps * 2``.
maxiter : number, optional
if convergence is not achieved in maxiter iterations, an error is
raised. Must be >= 0.
args : tuple, optional
containing extra arguments for the function `f`.
`f` is called by ``apply(f, (x)+args)``.
full_output : bool, optional
If `full_output` is False, the root is returned. If `full_output` is
True, the return value is ``(x, r)``, where `x` is the root, and `r` is
a RootResults object.
disp : bool, optional
If True, raise RuntimeError if the algorithm didn't converge.
Returns
-------
x0 : float
Zero of `f` between `a` and `b`.
r : RootResults (present if ``full_output = True``)
Object containing information about the convergence. In particular,
``r.converged`` is True if the routine converged.
See Also
--------
fmin, fmin_powell, fmin_cg,
fmin_bfgs, fmin_ncg : multivariate local optimizers
leastsq : nonlinear least squares minimizer
fmin_l_bfgs_b, fmin_tnc, fmin_cobyla : constrained multivariate optimizers
basinhopping, differential_evolution, brute : global optimizers
fminbound, brent, golden, bracket : local scalar minimizers
fsolve : n-dimensional root-finding
brentq, brenth, ridder, bisect, newton : one-dimensional root-finding
fixed_point : scalar fixed-point finder
"""
if not isinstance(args, tuple):
args = (args,)
if xtol <= 0:
raise ValueError("xtol too small (%g <= 0)" % xtol)
if rtol < _rtol:
raise ValueError("rtol too small (%g < %g)" % (rtol, _rtol))
r = _zeros._brenth(f,a, b, xtol, rtol, maxiter, args, full_output, disp)
return results_c(full_output, r)
| bsd-3-clause |
raymondxyang/tensorflow | tensorflow/python/training/adam_test.py | 51 | 12346 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Adam."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
def adam_update_numpy(param,
g_t,
t,
m,
v,
alpha=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-8):
alpha_t = alpha * np.sqrt(1 - beta2**t) / (1 - beta1**t)
m_t = beta1 * m + (1 - beta1) * g_t
v_t = beta2 * v + (1 - beta2) * g_t * g_t
param_t = param - alpha_t * m_t / (np.sqrt(v_t) + epsilon)
return param_t, m_t, v_t
class AdamOptimizerTest(test.TestCase):
def doTestSparse(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0_np_indices = np.array([0, 1], dtype=np.int32)
grads0 = ops.IndexedSlices(
constant_op.constant(grads0_np),
constant_op.constant(grads0_np_indices), constant_op.constant([2]))
grads1_np_indices = np.array([0, 1], dtype=np.int32)
grads1 = ops.IndexedSlices(
constant_op.constant(grads1_np),
constant_op.constant(grads1_np_indices), constant_op.constant([2]))
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSparse(self):
self.doTestSparse(use_resource=False)
def testResourceSparse(self):
self.doTestSparse(use_resource=True)
def testSparseDevicePlacement(self):
for index_dtype in [dtypes.int32, dtypes.int64]:
with self.test_session(force_gpu=test.is_gpu_available()):
# If a GPU is available, tests that all optimizer ops can be placed on
# it (i.e. they have GPU kernels).
var = variables.Variable([[1.0], [2.0]])
indices = constant_op.constant([0, 1], dtype=index_dtype)
gathered_sum = math_ops.reduce_sum(array_ops.gather(var, indices))
optimizer = adam.AdamOptimizer(3.0)
minimize_op = optimizer.minimize(gathered_sum)
variables.global_variables_initializer().run()
minimize_op.run()
def testSparseRepeatedIndices(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
repeated_index_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
aggregated_update_var = variables.Variable(
[[1.0], [2.0]], dtype=dtype)
grad_repeated_index = ops.IndexedSlices(
constant_op.constant(
[0.1, 0.1], shape=[2, 1], dtype=dtype),
constant_op.constant([1, 1]),
constant_op.constant([2, 1]))
grad_aggregated = ops.IndexedSlices(
constant_op.constant(
[0.2], shape=[1, 1], dtype=dtype),
constant_op.constant([1]),
constant_op.constant([2, 1]))
repeated_update = adam.AdamOptimizer().apply_gradients(
[(grad_repeated_index, repeated_index_update_var)])
aggregated_update = adam.AdamOptimizer().apply_gradients(
[(grad_aggregated, aggregated_update_var)])
variables.global_variables_initializer().run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
for _ in range(3):
repeated_update.run()
aggregated_update.run()
self.assertAllClose(aggregated_update_var.eval(),
repeated_index_update_var.eval())
def doTestBasic(self, use_resource=False):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
if use_resource:
var0 = resource_variable_ops.ResourceVariable(var0_np)
var1 = resource_variable_ops.ResourceVariable(var1_np)
else:
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testBasic(self):
self.doTestBasic(use_resource=False)
def testResourceBasic(self):
self.doTestBasic(use_resource=True)
def testTensorLearningRate(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer(constant_op.constant(0.001))
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
beta1_power, beta2_power = opt._get_beta_accumulators()
# Run 3 steps of Adam
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
update.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testSharing(self):
for dtype in [dtypes.half, dtypes.float32, dtypes.float64]:
with self.test_session():
# Initialize variables for numpy implementation.
m0, v0, m1, v1 = 0.0, 0.0, 0.0, 0.0
var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
grads0_np = np.array([0.1, 0.1], dtype=dtype.as_numpy_dtype)
var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
grads1_np = np.array([0.01, 0.01], dtype=dtype.as_numpy_dtype)
var0 = variables.Variable(var0_np)
var1 = variables.Variable(var1_np)
grads0 = constant_op.constant(grads0_np)
grads1 = constant_op.constant(grads1_np)
opt = adam.AdamOptimizer()
update1 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
update2 = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
variables.global_variables_initializer().run()
beta1_power, beta2_power = opt._get_beta_accumulators()
# Fetch params to validate initial values
self.assertAllClose([1.0, 2.0], var0.eval())
self.assertAllClose([3.0, 4.0], var1.eval())
# Run 3 steps of intertwined Adam1 and Adam2.
for t in range(1, 4):
self.assertAllCloseAccordingToType(0.9**t, beta1_power.eval())
self.assertAllCloseAccordingToType(0.999**t, beta2_power.eval())
if t % 2 == 0:
update1.run()
else:
update2.run()
var0_np, m0, v0 = adam_update_numpy(var0_np, grads0_np, t, m0, v0)
var1_np, m1, v1 = adam_update_numpy(var1_np, grads1_np, t, m1, v1)
# Validate updated params
self.assertAllCloseAccordingToType(var0_np, var0.eval())
self.assertAllCloseAccordingToType(var1_np, var1.eval())
def testTwoSessions(self):
optimizer = adam.AdamOptimizer()
g = ops.Graph()
with g.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
optimizer.apply_gradients([(grads0, var0)])
gg = ops.Graph()
with gg.as_default():
with session.Session():
var0 = variables.Variable(np.array([1.0, 2.0]), name="v0")
grads0 = constant_op.constant(np.array([0.1, 0.1]))
# If the optimizer saves any state not keyed by graph the following line
# fails.
optimizer.apply_gradients([(grads0, var0)])
if __name__ == "__main__":
test.main()
| apache-2.0 |
w3nd1go/android_external_skia | platform_tools/android/tests/android_framework_gyp_tests.py | 145 | 2418 | #!/usr/bin/python
# Copyright 2014 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Test gyp_to_android.py
"""
import os
import shutil
import sys
import tempfile
import test_variables
import unittest
sys.path.append(test_variables.ANDROID_DIR)
import gyp_gen.android_framework_gyp
GYPD_SUFFIX = ".gypd"
GYP_SUFFIX = ".gyp"
GYPI_SUFFIX = ".gypi"
OTHER_SUFFIX = ".txt"
class CleanGypdTest(unittest.TestCase):
def setUp(self):
self.__tmp_dir = tempfile.mkdtemp()
self.__num_files = 10
# Fill the dir with four types of files. .gypd files should be deleted by
# clean_gypd_files(), while the rest should be left alone.
for i in range(self.__num_files):
self.create_file('%s%s' % (str(i), GYPD_SUFFIX))
self.create_file('%s%s' % (str(i), GYPI_SUFFIX))
self.create_file('%s%s' % (str(i), GYP_SUFFIX))
self.create_file('%s%s' % (str(i), OTHER_SUFFIX))
def create_file(self, basename):
"""Create a file named 'basename' in self.__tmp_dir.
"""
f = tempfile.mkstemp(dir=self.__tmp_dir)
os.rename(f[1], os.path.join(self.__tmp_dir, basename))
self.assert_file_exists(basename)
def assert_file_exists(self, basename):
"""Assert that 'basename' exists in self.__tmp_dir.
"""
full_name = os.path.join(self.__tmp_dir, basename)
self.assertTrue(os.path.exists(full_name))
def assert_file_does_not_exist(self, basename):
"""Assert that 'basename' does not exist in self.__tmp_dir.
"""
full_name = os.path.join(self.__tmp_dir, basename)
self.assertFalse(os.path.exists(full_name))
def test_clean(self):
"""Test that clean_gypd_files() deletes .gypd files, and leaves others.
"""
gyp_gen.android_framework_gyp.clean_gypd_files(self.__tmp_dir)
for i in range(self.__num_files):
self.assert_file_exists('%s%s' % (str(i), GYPI_SUFFIX))
self.assert_file_exists('%s%s' % (str(i), GYP_SUFFIX))
self.assert_file_exists('%s%s' % (str(i), OTHER_SUFFIX))
# Only the GYPD files should have been deleted.
self.assert_file_does_not_exist('%s%s' % (str(i), GYPD_SUFFIX))
def tearDown(self):
shutil.rmtree(self.__tmp_dir)
def main():
loader = unittest.TestLoader()
suite = loader.loadTestsFromTestCase(CleanGypdTest)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == "__main__":
main()
| bsd-3-clause |
djeraseit/eucalyptus | clc/eucadmin/eucadmin/deregisterobjectstoragegateway.py | 6 | 1507 | # Copyright 2011-2012 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import eucadmin.deregisterrequest
class DeregisterObjectStorageGateway(eucadmin.deregisterrequest.DeregisterRequest):
ServiceName = 'ObjectStorageGateway'
| gpl-3.0 |
KohlsTechnology/ansible | lib/ansible/modules/network/cloudengine/ce_vlan.py | 13 | 20876 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_vlan
version_added: "2.4"
short_description: Manages VLAN resources and attributes on Huawei CloudEngine switches.
description:
- Manages VLAN configurations on Huawei CloudEngine switches.
author: QijunPan (@CloudEngine-Ansible)
options:
vlan_id:
description:
- Single VLAN ID, in the range from 1 to 4094.
vlan_range:
description:
- Range of VLANs such as C(2-10) or C(2,5,10-15), etc.
name:
description:
- Name of VLAN, in the range from 1 to 31.
description:
description:
- Specify VLAN description, in the range from 1 to 80.
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: vlan module test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: Ensure a range of VLANs are not present on the switch
ce_vlan:
vlan_range: "2-10,20,50,55-60,100-150"
state: absent
provider: "{{ cli }}"
- name: Ensure VLAN 50 exists with the name WEB
ce_vlan:
vlan_id: 50
name: WEB
state: absent
provider: "{{ cli }}"
- name: Ensure VLAN is NOT on the device
ce_vlan:
vlan_id: 50
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed_vlans_list:
description: list of VLANs being proposed
returned: always
type: list
sample: ["100"]
existing_vlans_list:
description: list of existing VLANs on the switch prior to making changes
returned: always
type: list
sample: ["1", "2", "3", "4", "5", "20"]
end_state_vlans_list:
description: list of VLANs after the module is executed
returned: always
type: list
sample: ["1", "2", "3", "4", "5", "20", "100"]
proposed:
description: k/v pairs of parameters passed into module (does not include
vlan_id or vlan_range)
returned: always
type: dict
sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "vlan for app" }
existing:
description: k/v pairs of existing vlan or null when using vlan_range
returned: always
type: dict
sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "" }
end_state:
description: k/v pairs of the VLAN after executing module or null
when using vlan_range
returned: always
type: dict
sample: {"vlan_id":"20", "name": "VLAN_APP", "description": "vlan for app" }
updates:
description: command string sent to the device
returned: always
type: list
sample: ["vlan 20", "name VLAN20"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, execute_nc_action, ce_argument_spec
CE_NC_CREATE_VLAN = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="create">
<vlanId>%s</vlanId>
<vlanName>%s</vlanName>
<vlanDesc>%s</vlanDesc>
<vlanType></vlanType>
<subVlans/>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_DELETE_VLAN = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="delete">
<vlanId>%s</vlanId>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_MERGE_VLAN_DES = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="merge">
<vlanId>%s</vlanId>
<vlanDesc>%s</vlanDesc>
<vlanType></vlanType>
<subVlans/>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_MERGE_VLAN_NAME = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="merge">
<vlanId>%s</vlanId>
<vlanName>%s</vlanName>
<vlanType></vlanType>
<subVlans/>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_MERGE_VLAN = """
<config>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan operation="merge">
<vlanId>%s</vlanId>
<vlanName>%s</vlanName>
<vlanDesc>%s</vlanDesc>
<vlanType></vlanType>
<subVlans/>
</vlan>
</vlans>
</vlan>
</config>
"""
CE_NC_GET_VLAN = """
<filter type="subtree">
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan>
<vlanId>%s</vlanId>
<vlanDesc/>
<vlanName/>
</vlan>
</vlans>
</vlan>
</filter>
"""
CE_NC_GET_VLANS = """
<filter type="subtree">
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<vlans>
<vlan>
<vlanId/>
<vlanName/>
</vlan>
</vlans>
</vlan>
</filter>
"""
CE_NC_CREATE_VLAN_BATCH = """
<action>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<shVlanBatchCrt>
<vlans>%s:%s</vlans>
</shVlanBatchCrt>
</vlan>
</action>
"""
CE_NC_DELETE_VLAN_BATCH = """
<action>
<vlan xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<shVlanBatchDel>
<vlans>%s:%s</vlans>
</shVlanBatchDel>
</vlan>
</action>
"""
class Vlan(object):
"""
Manages VLAN resources and attributes
"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# vlan config info
self.vlan_id = self.module.params['vlan_id']
self.vlan_range = self.module.params['vlan_range']
self.name = self.module.params['name']
self.description = self.module.params['description']
self.state = self.module.params['state']
# state
self.changed = False
self.vlan_exist = False
self.vlan_attr_exist = None
self.vlans_list_exist = list()
self.vlans_list_change = list()
self.updates_cmd = list()
self.results = dict()
self.vlan_attr_end = dict()
def init_module(self):
"""
init ansilbe NetworkModule.
"""
required_one_of = [["vlan_id", "vlan_range"]]
mutually_exclusive = [["vlan_id", "vlan_range"]]
self.module = AnsibleModule(
argument_spec=self.spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def config_vlan(self, vlan_id, name='', description=''):
"""Create vlan."""
if name is None:
name = ''
if description is None:
description = ''
conf_str = CE_NC_CREATE_VLAN % (vlan_id, name, description)
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "CREATE_VLAN")
self.changed = True
def merge_vlan(self, vlan_id, name, description):
"""Merge vlan."""
conf_str = None
if not name and description:
conf_str = CE_NC_MERGE_VLAN_DES % (vlan_id, description)
if not description and name:
conf_str = CE_NC_MERGE_VLAN_NAME % (vlan_id, name)
if description and name:
conf_str = CE_NC_MERGE_VLAN % (vlan_id, name, description)
if not conf_str:
return
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "MERGE_VLAN")
self.changed = True
def create_vlan_batch(self, vlan_list):
"""Create vlan batch."""
if not vlan_list:
return
vlan_bitmap = self.vlan_list_to_bitmap(vlan_list)
xmlstr = CE_NC_CREATE_VLAN_BATCH % (vlan_bitmap, vlan_bitmap)
recv_xml = execute_nc_action(self.module, xmlstr)
self.check_response(recv_xml, "CREATE_VLAN_BATCH")
self.updates_cmd.append('vlan batch %s' % (
self.vlan_range.replace(',', ' ').replace('-', ' to ')))
self.changed = True
def delete_vlan_batch(self, vlan_list):
"""Delete vlan batch."""
if not vlan_list:
return
vlan_bitmap = self.vlan_list_to_bitmap(vlan_list)
xmlstr = CE_NC_DELETE_VLAN_BATCH % (vlan_bitmap, vlan_bitmap)
recv_xml = execute_nc_action(self.module, xmlstr)
self.check_response(recv_xml, "DELETE_VLAN_BATCH")
self.updates_cmd.append('undo vlan batch %s' % (
self.vlan_range.replace(',', ' ').replace('-', ' to ')))
self.changed = True
def undo_config_vlan(self, vlanid):
"""Delete vlan."""
conf_str = CE_NC_DELETE_VLAN % vlanid
recv_xml = set_nc_config(self.module, conf_str)
self.check_response(recv_xml, "DELETE_VLAN")
self.changed = True
self.updates_cmd.append('undo vlan %s' % self.vlan_id)
def get_vlan_attr(self, vlan_id):
""" get vlan attributes."""
conf_str = CE_NC_GET_VLAN % vlan_id
xml_str = get_nc_config(self.module, conf_str)
attr = dict()
if "<data/>" in xml_str:
return attr
else:
re_find = re.findall(r'.*<vlanId>(.*)</vlanId>.*\s*'
r'<vlanName>(.*)</vlanName>.*\s*'
r'<vlanDesc>(.*)</vlanDesc>.*', xml_str)
if re_find:
attr = dict(vlan_id=re_find[0][0], name=re_find[0][1],
description=re_find[0][2])
return attr
def get_vlans_name(self):
""" get all vlan vid and its name list,
sample: [ ("20", "VLAN_NAME_20"), ("30", "VLAN_NAME_30") ]"""
conf_str = CE_NC_GET_VLANS
xml_str = get_nc_config(self.module, conf_str)
vlan_list = list()
if "<data/>" in xml_str:
return vlan_list
else:
vlan_list = re.findall(
r'.*<vlanId>(.*)</vlanId>.*\s*<vlanName>(.*)</vlanName>.*', xml_str)
return vlan_list
def get_vlans_list(self):
""" get all vlan vid list, sample: [ "20", "30", "31" ]"""
conf_str = CE_NC_GET_VLANS
xml_str = get_nc_config(self.module, conf_str)
vlan_list = list()
if "<data/>" in xml_str:
return vlan_list
else:
vlan_list = re.findall(
r'.*<vlanId>(.*)</vlanId>.*', xml_str)
return vlan_list
def vlan_series(self, vlanid_s):
""" convert vlan range to list """
vlan_list = []
peerlistlen = len(vlanid_s)
if peerlistlen != 2:
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
for num in range(peerlistlen):
if not vlanid_s[num].isdigit():
self.module.fail_json(
msg='Error: Format of vlanid is invalid.')
if int(vlanid_s[0]) > int(vlanid_s[1]):
self.module.fail_json(msg='Error: Format of vlanid is invalid.')
elif int(vlanid_s[0]) == int(vlanid_s[1]):
vlan_list.append(str(vlanid_s[0]))
return vlan_list
for num in range(int(vlanid_s[0]), int(vlanid_s[1])):
vlan_list.append(str(num))
vlan_list.append(vlanid_s[1])
return vlan_list
def vlan_region(self, vlanid_list):
""" convert vlan range to vlan list """
vlan_list = []
peerlistlen = len(vlanid_list)
for num in range(peerlistlen):
if vlanid_list[num].isdigit():
vlan_list.append(vlanid_list[num])
else:
vlan_s = self.vlan_series(vlanid_list[num].split('-'))
vlan_list.extend(vlan_s)
return vlan_list
def vlan_range_to_list(self, vlan_range):
""" convert vlan range to vlan list """
vlan_list = self.vlan_region(vlan_range.split(','))
return vlan_list
def vlan_list_to_bitmap(self, vlanlist):
""" convert vlan list to vlan bitmap """
vlan_bit = ['0'] * 1024
bit_int = [0] * 1024
vlan_list_len = len(vlanlist)
for num in range(vlan_list_len):
tagged_vlans = int(vlanlist[num])
if tagged_vlans <= 0 or tagged_vlans > 4094:
self.module.fail_json(
msg='Error: Vlan id is not in the range from 1 to 4094.')
j = tagged_vlans / 4
bit_int[j] |= 0x8 >> (tagged_vlans % 4)
vlan_bit[j] = hex(bit_int[j])[2]
vlan_xml = ''.join(vlan_bit)
return vlan_xml
def check_params(self):
"""Check all input params"""
if not self.vlan_id and self.description:
self.module.fail_json(
msg='Error: Vlan description could be set only at one vlan.')
if not self.vlan_id and self.name:
self.module.fail_json(
msg='Error: Vlan name could be set only at one vlan.')
# check vlan id
if self.vlan_id:
if not self.vlan_id.isdigit():
self.module.fail_json(
msg='Error: Vlan id is not digit.')
if int(self.vlan_id) <= 0 or int(self.vlan_id) > 4094:
self.module.fail_json(
msg='Error: Vlan id is not in the range from 1 to 4094.')
# check vlan description
if self.description:
if len(self.description) > 81 or len(self.description.replace(' ', '')) < 1:
self.module.fail_json(
msg='Error: vlan description is not in the range from 1 to 80.')
# check vlan name
if self.name:
if len(self.name) > 31 or len(self.name.replace(' ', '')) < 1:
self.module.fail_json(
msg='Error: Vlan name is not in the range from 1 to 31.')
def get_proposed(self):
"""
get proposed config.
"""
if self.vlans_list_change:
if self.state == 'present':
proposed_vlans_tmp = list(self.vlans_list_change)
proposed_vlans_tmp.extend(self.vlans_list_exist)
self.results['proposed_vlans_list'] = list(
set(proposed_vlans_tmp))
else:
self.results['proposed_vlans_list'] = list(
set(self.vlans_list_exist) - set(self.vlans_list_change))
self.results['proposed_vlans_list'].sort()
else:
self.results['proposed_vlans_list'] = self.vlans_list_exist
if self.vlan_id:
if self.state == "present":
self.results['proposed'] = dict(
vlan_id=self.vlan_id,
name=self.name,
description=self.description
)
else:
self.results['proposed'] = None
else:
self.results['proposed'] = None
def get_existing(self):
"""
get existing config.
"""
self.results['existing_vlans_list'] = self.vlans_list_exist
if self.vlan_id:
if self.vlan_attr_exist:
self.results['existing'] = dict(
vlan_id=self.vlan_attr_exist['vlan_id'],
name=self.vlan_attr_exist['name'],
description=self.vlan_attr_exist['description']
)
else:
self.results['existing'] = None
else:
self.results['existing'] = None
def get_end_state(self):
"""
get end state config.
"""
self.results['end_state_vlans_list'] = self.get_vlans_list()
if self.vlan_id:
if self.vlan_attr_end:
self.results['end_state'] = dict(
vlan_id=self.vlan_attr_end['vlan_id'],
name=self.vlan_attr_end['name'],
description=self.vlan_attr_end['description']
)
else:
self.results['end_state'] = None
else:
self.results['end_state'] = None
def work(self):
"""
worker.
"""
# check param
self.check_params()
# get all vlan info
self.vlans_list_exist = self.get_vlans_list()
# get vlan attributes
if self.vlan_id:
self.vlans_list_change.append(self.vlan_id)
self.vlan_attr_exist = self.get_vlan_attr(self.vlan_id)
if self.vlan_attr_exist:
self.vlan_exist = True
if self.vlan_range:
new_vlans_tmp = self.vlan_range_to_list(self.vlan_range)
if self.state == 'present':
self.vlans_list_change = list(
set(new_vlans_tmp) - set(self.vlans_list_exist))
else:
self.vlans_list_change = [
val for val in new_vlans_tmp if val in self.vlans_list_exist]
if self.state == 'present':
if self.vlan_id:
if not self.vlan_exist:
# create a new vlan
self.config_vlan(self.vlan_id, self.name, self.description)
elif self.description and self.description != self.vlan_attr_exist['description']:
# merge vlan description
self.merge_vlan(self.vlan_id, self.name, self.description)
elif self.name and self.name != self.vlan_attr_exist['name']:
# merge vlan name
self.merge_vlan(self.vlan_id, self.name, self.description)
# update command for results
if self.changed:
self.updates_cmd.append('vlan %s' % self.vlan_id)
if self.name:
self.updates_cmd.append('name %s' % self.name)
if self.description:
self.updates_cmd.append(
'description %s' % self.description)
elif self.vlan_range and self.vlans_list_change:
self.create_vlan_batch(self.vlans_list_change)
else: # absent
if self.vlan_id:
if self.vlan_exist:
# delete the vlan
self.undo_config_vlan(self.vlan_id)
elif self.vlan_range and self.vlans_list_change:
self.delete_vlan_batch(self.vlans_list_change)
# result
if self.vlan_id:
self.vlan_attr_end = self.get_vlan_attr(self.vlan_id)
self.get_existing()
self.get_proposed()
self.get_end_state()
self.results['changed'] = self.changed
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def main():
""" module main """
argument_spec = dict(
vlan_id=dict(required=False),
vlan_range=dict(required=False, type='str'),
name=dict(required=False, type='str'),
description=dict(required=False, type='str'),
state=dict(choices=['absent', 'present'],
default='present', required=False),
)
argument_spec.update(ce_argument_spec)
vlancfg = Vlan(argument_spec)
vlancfg.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
AstromechZA/validoot | setup.py | 1 | 1644 | import os
import sys
from setuptools import setup
from setuptools.command.test import test
here = os.path.abspath(os.path.dirname(__file__))
class PyTest(test):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
test.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
test.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
setup(
# package info
name='validoot',
version='1.3',
author='Ben Meier',
author_email='benmeier@fastmail.com',
url='http://github.com/AstromechZA/validoot',
download_url='https://github.com/AstromechZA/validoot/tarball/1.3',
description='Simple validation for function arguments using a decorator.',
long_description=open(os.path.join(here, 'README.rst')).read(),
keywords=[
'validate',
'function arguments',
'decorator'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Software Development'
],
license='MIT',
# packages
packages=['validoot'],
# runtime scripts
scripts=[],
# requirements
install_requires=['wrapt'],
# tests
tests_require=['pytest-cov', 'pytest', 'mock'],
cmdclass={'test': PyTest}
)
| mit |
valentin-krasontovitsch/ansible | test/units/modules/network/f5/test_bigip_gtm_virtual_server.py | 21 | 5644 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_virtual_server import ApiParameters
from library.modules.bigip_gtm_virtual_server import ModuleParameters
from library.modules.bigip_gtm_virtual_server import ModuleManager
from library.modules.bigip_gtm_virtual_server import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_gtm_virtual_server import ApiParameters
from ansible.modules.network.f5.bigip_gtm_virtual_server import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_virtual_server import ModuleManager
from ansible.modules.network.f5.bigip_gtm_virtual_server import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foo',
server_name='server1',
address='1.1.1.1',
port=22,
translation_address='2.2.2.2',
translation_port=443,
availability_requirements=dict(
type='at_least',
at_least=2,
),
monitors=['http', 'tcp', 'gtp'],
virtual_server_dependencies=[
dict(
server='server2',
virtual_server='vs2'
)
],
link='link1',
limits=dict(
bits_enabled=True,
packets_enabled=True,
connections_enabled=True,
bits_limit=100,
packets_limit=200,
connections_limit=300
),
state='present'
)
p = ModuleParameters(params=args)
assert p.name == 'foo'
assert p.server_name == 'server1'
assert p.address == '1.1.1.1'
assert p.port == 22
assert p.translation_address == '2.2.2.2'
assert p.translation_port == 443
assert p.availability_requirement_type == 'at_least'
assert p.at_least == 2
assert p.monitors == 'min 2 of { /Common/http /Common/tcp /Common/gtp }'
assert len(p.virtual_server_dependencies) == 1
assert p.link == '/Common/link1'
assert p.bits_enabled == 'enabled'
assert p.bits_limit == 100
assert p.packets_enabled == 'enabled'
assert p.packets_limit == 200
assert p.connections_enabled == 'enabled'
assert p.connections_limit == 300
def test_api_parameters(self):
args = load_fixture('load_gtm_server_virtual_2.json')
p = ApiParameters(params=args)
assert p.name == 'vs2'
assert p.address == '6.6.6.6'
assert p.port == 8080
assert p.translation_address == 'none'
assert p.translation_port == 0
assert p.availability_requirement_type == 'all'
assert p.monitors == '/Common/gtp'
assert p.bits_enabled == 'enabled'
assert p.bits_limit == 100
assert p.packets_enabled == 'enabled'
assert p.packets_limit == 200
assert p.connections_enabled == 'enabled'
assert p.connections_limit == 300
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
try:
self.p1 = patch('library.modules.bigip_gtm_virtual_server.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
except Exception:
self.p1 = patch('ansible.modules.network.f5.bigip_gtm_virtual_server.module_provisioned')
self.m1 = self.p1.start()
self.m1.return_value = True
def tearDown(self):
self.p1.stop()
def test_create_datacenter(self, *args):
set_module_args(dict(
server_name='foo',
name='vs1',
address='1.1.1.1',
state='present',
password='admin',
server='localhost',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
RichIsMyName/PicklingToolsRepo | Xm/ptools161/python/XMTime.py | 3 | 26838 | """
Python class that wraps the X-Midas time facilities. The XMTime
is represented as two floats, one which contains the integral number
of seconds since the epoch, the other which contains the fractional number.
The following is always true of an XMTime:
self.integral == round(self.integral)
0.0 <= self.fractional < 1.0
"""
# $Id: xmtime.py 3442 2006-12-14 23:49:09Z dtj $
#import fixdlopenflags
#import xmpyapi
# Format "enumeration"
STANDARD = "_"
ACQ = "A"
EPOCH = "E"
NORAD = "N"
TCR = "T"
VAX = "V"
# Warning level "enumeration"
IGNORE = 0
WARNING = 1
EXCEPTION = 2
class XMTimeInvalidTC_PREC(Exception):
def __init__(self, hdr):
Exception.__init__(self,
'TC_PREC main keyword in file %s does not '
'evaluate to a float: %s' %
(hdr.get('file_name', ''),
repr(hdr['keywords']['TC_PREC'])))
class XMTime(object):
# Slots make construction and look-up of attributes in a Python
# class more efficient and catches attempts to assign to
# misspelled attribute references (e.g. XMTime.ingetral = ...)
__slots__ = ('integral', 'fractional')
# __setstate__/__getstate__ are required for pickling/copying when
# __slots__ is defined. To maintain pre-__slots__ backwards
# compatibility use the former __dict__ contents as the state.
def __getstate__(self):
return dict(integral=self.integral, fractional=self.fractional)
def __setstate__(self, state):
self.integral = state['integral']
self.fractional = state['fractional']
# __reduce__ introduces a more compact and efficient pickle/copy
# method for post-__slots__ pickled XMTimes. The __get/setstate__
# methods will only be called for pre-__slots__ pickled XMTimes.
def __reduce__(self):
"""helper for pickle"""
return (XMTime, self.two_floats())
def __init__(self, *args):
"""
Create an XMTime.
If no arguments are given, create an XMTime representing the
current time of day.
XMTime(seconds): If one float is given, assume it is a number of
seconds, and create a time from it relative to J1950.
XMTime(integral, fractional): If two floats are given, assume
it is the integral and fractional parts of an XMTime, and
construct this XMTime directly from that.
XMTime(string [,format] [,warning level]): If a string is given,
convert it to a time according to the formats documented in the
help given by xm.help('time_format'). If the 'format' argument
is not supplied, the standard format is assumed. The optional
'warning level' argument allows the user to specify the desired
behavior if an invalid time string is given. Allowable warning
levels are as follows:
0 IGNORE Warnings are suppressed
Default --> 1 WARNING A warning is printed to the screen
2 EXCEPTION An exception is thrown
If 'warning level' is omitted or is a value other than 0, 1, or
2, it defaults to warning level 1.
XMTime(hcb): If a dictionary is given, assume it is a
dictionary representation of a BLUE header, such as that
returned by bluefile.readheader(), and extract any precision
timecode contained within it. Note that the header's 'xstart'
field is not taken into account, for symmetry with the X-Midas
STATUS command. To get the time of the first sample in the
file described by 'hcb', use this expression:
XMTime(hcb) + hcb['xstart']
If the hcb['keywords']['TC_PREC'] main header keyword is present
and it is not a string (all main header keywords default to a
string type) or a float an XMTimeInvalidTC_PREC exception is
raised. If a string is present but it cannot be converted to
a float via the float() the standard ValueError exception will
be raised.
"""
if len(args) == 0:
self.integral, self.fractional = xmpyapi.now()
else:
if isinstance(args[0], str):
# string, optional format, optional warn flag
time_str = args[0]
# Default values
warn = WARNING
fmt = STANDARD
if len(args) == 2:
# User entered time string and one optional argument
if isinstance(args[1], str):
# Optional argument must be the format
fmt = args[1]
else:
# Optional argument must be the warning level
warn = args[1]
elif len(args) >= 3:
# User supplied all arguments
fmt = args[1]
warn = args[2]
if warn not in [IGNORE, WARNING, EXCEPTION]:
warn = WARNING
junk, self.integral, self.fractional = \
xmpyapi.time_format(time_str, fmt.upper(), warn)
elif isinstance(args[0], XMTime):
self.integral, self.fractional = \
args[0].integral, args[0].fractional
elif isinstance(args[0], dict) and args[0].has_key('timecode'):
# Assume a header from bluefile
hdr = args[0]
tc = hdr['timecode']
tc_prec = hdr.get('keywords', {}).get('TC_PREC', 0.0)
if isinstance(tc_prec, str):
tc_prec = float(tc_prec)
elif not isinstance(tc_prec, float):
raise XMTimeInvalidTC_PREC(hdr)
self.integral = round(tc)
# We're getting this from hcb.timecode, so we know
# that it has precisely 6 digits after the decimal.
self.fractional = round((tc - self.integral), 6) + tc_prec
else:
if len(args) == 1:
t = args[0]
tint = round(t)
self.integral, self.fractional = tint, t - tint
else:
self.integral, self.fractional = args
self._normalize()
def _fracstr(self):
"""
Return the fractional part of the time as a string. Used to be
done as '%.12f', but Python's string conversion uses the
rint() function, not Fortran NINT() as X-Midas does. The
underlying X-Midas time conversion utilities are used for
consistency. _fracstr is called by hash() and cmp(), so that
times that look the same compare and hash the same.
"""
# times2tod() is used in order to avoid involving the date
# conversion utilities.
s = xmpyapi.times2tod(0, self.fractional)
ii = s.find('.')
if ii >= 0:
return s[ii:]
else:
return ''
def put_epoch(self, hcb):
"""
Given a dictionary representation of a BLUE header (such
as that returned from bluefile.readheader), fill in its
'timecode' field and 'TC_PREC' main keyword according
to the BLUE timecode standard.
The inverse of this function is accomplished by constructing
an XMTime from a dictionary rep. of a BLUE header.
The value of the 'TC_PREC' keyword added to the dictionary
passed in will be a string representation of a float value;
all main header keywords can only have string values.
"""
timeu = round(self.fractional, 6)
timep = round((self.fractional - timeu), 12)
hcb['timecode'] = self.integral + timeu
if timep != 0.0:
if not hcb.has_key('keywords') or hcb['keywords'] is None:
hcb['keywords'] = {}
hcb['keywords']['TC_PREC'] = str(timep)
else:
try:
del(hcb['keywords']['TC_PREC'])
except:
pass
def eqt(self, other_time, tol):
"""
Return whether the given XMTime is equal to this XMTime
within some tolerance (i.e. is abs(difference) <= tol).
"""
diff = self.__sub__(other_time)
return abs(diff.seconds()) <= tol
def two_floats(self):
"""
Return the implementation of this XMTime, that is, two
floating point numbers representing the integral and
fractional number of seconds, respectively, past the
J1950 epoch.
"""
return self.integral, self.fractional
def seconds(self):
"""
Add the integral and fractional number of seconds to
return a single floating point value representing the
whole time. Some precision will be lost in the result;
only the time up to about the microsecond level will be
accurate.
"""
return self.integral + self.fractional
def date(self):
"""
Return the date portion of an XMTime.
"""
return xmpyapi.times2str(self.integral, 0.0, '_DATE')
def hms(self):
"""
Return the time portion of an XMTime, in hms format
(hh:mm:ss).
"""
return str(self).split('::')[-1].split('.')[0]
def round(self, digits=0):
"""
Round an XMTime to a given precision in decimal digits
(default 0 digits) and return as an XMTime. Precision may be
negative and is applied to the full J1950 seconds.
The built-in Python round() method can round XMTimes
incorrectly when digits >= 0. Internally the XMTime is
converted to a float before rounding, i.e. (XMTime.seconds()),
and in doing so the full precision required to round
accurately is lost. Furthermore, the returned float value
converted back to an XMTime often contains sub-microsecond
floating point noise.
Note: J1950 seconds are rounded to match the behavior of
Python's built-in round() method. Hence, when digits is < -1
(i.e. 100's, 1000's, etc.) the resulting XMTime will not
necessarily fall on minute, hour, day, etc. boundaries.
"""
if digits < 0:
# Rounding boundaries are at integral values (5, 50, 500, etc)
# hence the fractional portion is irrelevant.
return XMTime(round(self.integral, digits))
else:
# If the fractional portion rounds up, the XMTime constructor
# will normalize it.
return XMTime(self.integral, round(self.fractional, digits))
def __float__(self):
"""
Same as self.seconds().
"""
return self.seconds()
def __str__(self):
"""
Create a standard string representation of this XMTime.
"""
return self.asString()
def __sub__(self, t):
"""
Subtract a float or another XMTime from this XMTime.
The result is another XMTime.
"""
if not isinstance(t, XMTime):
t = XMTime(t)
temp = XMTime(self)
temp.integral -= t.integral
temp.fractional -= t.fractional
temp._normalize()
return temp
def __add__(self, t):
"""
Add a floating point number or another XMTime to an XMTime to
produce a new XMTime.
"""
temp = XMTime(self)
if isinstance(t, XMTime):
temp.integral += t.integral
temp.fractional += t.fractional
else:
tint = round(t)
temp.integral += tint
temp.fractional += (t - tint)
temp._normalize()
return temp
def __radd__(self, t):
"""
Add a floating point number or another XMTime to an XMTime to
produce a new XMTime.
"""
return self.__add__(t)
def asString(self, format=STANDARD):
"""
Return a string representation of this time, according to
the given format.
"""
return xmpyapi.times2str(self.integral, self.fractional, format)
def __repr__(self):
"""
Return the standard string representation of an XMTime object,
in Python-object syntax: <xmtime.XMTime YYYY:MM:DD::hh::mm::ss.ssss>
"""
return "<xmtime.XMTime %s>" % str(self)
def __hash__(self):
"""
Return a unique hash value for this instance that is determined
by its value.
"""
# Cannot use two_floats(), because the floats may
# have accumulated sub-picosecond precision, and yet XMTime is
# only intended to be exact to picoseconds. Sub-picosecond precision
# is not stored in BLUE headers, is not written out in the
# str() method, and so two XMTimes that differ by less than a
# picosecond should be considered the same for this function.
return hash((self.integral, self._fracstr()))
# ##### Relational operators
def __cmp__(self, t):
"""
XMTimes are only intended to record time to the nearest picosecond.
Common sense suggests that times that have the same string
representation will compare as equal, as well as the converse.
Thus, when times are within a second of each other, only the
first twelve digits to the right of the decimal, with rounding
from the thirteenth, are considered in the comparison.
"""
tt = XMTime(t)
icmp = cmp(self.integral, tt.integral)
if icmp:
return icmp
else:
return cmp(self._fracstr(), tt._fracstr())
def _normalize(self):
"""
Makes sure that self.integral has all of its fractional parts
moved into self.fractional, and that self.fractional has all
of its integral parts moved into self.integral.
Ensure that self.fractional is not storing
more than picoseconds.
Also make sure that self.fractional is positive.
"""
iint = round(self.integral)
self.fractional = round(self.fractional, 12)
if iint != self.integral:
self.fractional += (self.integral - iint)
self.integral = iint
if abs(self.fractional) >= 1.0:
ifrac = round(self.fractional)
self.integral += ifrac
self.fractional -= ifrac
assert round(self.integral) == self.integral
assert abs(self.fractional) < 1.0
if self.fractional < 0.0:
self.fractional += 1
self.integral -= 1
assert 0.0 <= self.fractional < 1.0
# The following class is a replacement for the methods from the xmpyapi
# module the XMPY xmtime module relies on.
from struct import calcsize as _calcsize
class xmpyapi(object):
# The start of the Unix 1970 epoch, expressed in terms of the
# J1950 epoch. To be added to time.time() in order to obtain the
# right value, or to be subtracted from self.integral before using
# Unix time utilities for string time representation.
EPOCH_DIFF = 631152000.0
# time.strftime() overflows on 32 bit machines past this time
LONG_IS_32_BIT = (_calcsize('l') == 4)
UNIX_32BIT_EPOCH_MAX = 2777068800.0
SECS_PER_MIN = 60.0
SECS_PER_HOUR = 3600.0
SECS_PER_DAY = 86400.0
SECS_PER_NON_LEAP_YEAR = 31536000.0 # J1950 is a not a leap year
NCOLONS_MAP = {
0 : [1], # ss.sss
1 : [SECS_PER_HOUR, SECS_PER_MIN], # hh:mm
2 : [SECS_PER_HOUR, SECS_PER_MIN, 1], # hh:mm:ss
}
# _parse_date() assumes that any instance of '%y/%Y' are at the
# beginning of the format string to handle overflow/underflow
# problems as well as date wrapping around 1950 vs 1970 for 2
# digit years. Currently VAX is out-of-luck for under/overflow
# issues.
TFORMAT = { STANDARD:('%Y:%m:%d::%H:%M:%S', 'yyyy:mm:dd::hh:mm:ss.sss'),
VAX:('%d-%b-%Y:%H:%M:%S', 'dd-MON-yyyy::hh:mm:ss.sss'),
EPOCH:('%Y:', 'yyyy:sec_of_year.ssss'),
NORAD:('%y%j', 'yyDOY:frac_day'),
TCR:('%j:%H:%M:%S', 'DOY:hh:mm:ss.sss'),
ACQ:('%y.%j:%H:%M:%S', 'yy.DOY:hh:mm:ss.sss'),
'_DATE':('%Y:%m:%d', 'yyyy:mm:dd')}
@staticmethod
def now():
import time, math
fractional, integral = math.modf(time.time())
# System time does not have more than microsecond precision.
# Drop the bits that suggest otherwise
return integral + xmpyapi.EPOCH_DIFF, round(fractional, 6)
@staticmethod
def time_format(time_str, fmt, warn):
try:
integral, fractional = xmpyapi._parse_time(time_str, fmt)
except Exception, e:
if warn == EXCEPTION:
raise
elif warn == WARNING:
print 'WARNING:', e
integral, fractional = 0.0, 0.0
return None, integral, fractional
@staticmethod
def _parse_time(time_str, fmt):
from math import floor
# Pull off the fractional part separately
integral = 0.0
idec = time_str.rfind('.')
if idec >= 0 and (fmt != ACQ or idec > time_str.rfind(':')):
fractional = float(time_str[idec:])
time_whole = time_str[:idec].strip()
else:
time_whole = time_str.strip()
fractional = 0.0
time_fmt = xmpyapi.TFORMAT.get(fmt, (None, None))[0]
if not time_fmt:
raise Exception('XMTime format "%s" not yet implemented' % fmt)
if fmt == STANDARD:
# We need to be much more forgiving than time.strptime() will
# be below, so we parse out the HH:MM:SS ourselves below
date_time = time_whole.split('::')
if len(date_time) > 2 or not date_time[0]:
raise XMTimeParseError(fmt, time_str)
if date_time[-1]:
# Manually compute time string integral, map empty strings
# to 0 Either SS, HH:MM or HH:MM:SS
vals = [int(_ or 0) for _ in date_time[-1].split(':')]
if len(vals) > 3:
raise XMTimeParseError(fmt, time_str)
if len(vals) == 1:
integral = float(vals[0]) # SS
elif len(vals) > 1:
# HH:MM[:SS]
integral = (float(vals[0]) * xmpyapi.SECS_PER_HOUR +
float(vals[1]) * xmpyapi.SECS_PER_MIN)
if len(vals) > 2:
integral += vals[2] # [:SS]
if len(date_time) == 1:
# No year, we're done
return integral, fractional
elif len(date_time[0].split(':')) == 1:
# Date portion has no ':' separators, interpret as offset
# days (i.e. a duration)
integral += float(date_time[0]) * xmpyapi.SECS_PER_DAY
return integral, fractional
# Just the YEAR part needs to be parsed
time_whole = date_time[0]
time_fmt = time_fmt.split('::')[0]
elif fmt == NORAD:
# Fractional part is fractional day
fractional *= xmpyapi.SECS_PER_DAY
integral = floor(fractional)
fractional -= integral
elif fmt == EPOCH:
# %Y:%<soy> is second of year. time.strptime() does not handle
# second-of-year, so unstringize this part ourselves.
parts = time_whole.split(':')
if len(parts) != 2:
raise XMTimeParseError(fmt, time_str)
time_whole, integral = parts[0] + ':', float(parts[1])
if time_fmt:
try:
integral += xmpyapi._parse_date(time_whole, fmt, time_fmt)
except:
raise XMTimeParseError(fmt, time_str)
return integral, fractional
@staticmethod
def _parse_date(time_whole, fmt, time_fmt):
import time, sys
# Account for the J1950 -> J1970 epoch diff
integral = xmpyapi.EPOCH_DIFF
if time_fmt.startswith('%y'):
# time.strptime() interprets splits 19xx/20xx 2 digit
# years at 1969, xmtime.XMTime splits them at 1950.
yy = int(time_whole[:2])
if yy < 69:
time_fmt = '%Y' + time_fmt[2:]
if yy > 49:
time_whole = '19' + time_whole
elif sys.maxint < 4e9 and yy > 37:
# On 32 bit systems, time.mktime() wll
# overflow if not 1901 < year < 2038, so we
# need to do some extra magic knowing the
# EPOCH_DIFF between 2030 and 2050 is the same
# as between 1950 and 1970
integral += xmpyapi.EPOCH_DIFF
time_whole = str(2000+yy-20) + time_whole[2:]
else:
time_whole = '20' + time_whole
elif time_fmt.startswith('%Y') and sys.maxint < 4e9:
yy = int(time_whole[:4])
while yy > 2037:
integral += xmpyapi.EPOCH_DIFF
yy -= 20
while yy < 1902:
integral -= xmpyapi.EPOCH_DIFF
yy += 20
time_whole = str(yy) + time_whole[4:]
unix_time = time.strptime(time_whole, time_fmt)
# In TCR, the year is always the current year
if fmt == TCR:
unix_time = (time.gmtime()[0],) + unix_time[1:]
return integral + time.mktime(unix_time)
@staticmethod
def times2str(integral, fractional, format):
import time
fmt = xmpyapi.TFORMAT.get(format, (None, None))[0]
if not fmt:
raise Exception("Unhandled time format: " + format)
if (format == STANDARD and
abs(integral) < xmpyapi.SECS_PER_NON_LEAP_YEAR):
# If integral is within a year of the J1950 epoch
# year then assume it's a duration. We are taking
# advantage of the fact here that the UNIX epoch year
# 1970 is identical in number of days to J1950.
unix_time = time.gmtime(integral)
istr = time.strftime('%H:%M:%S', unix_time)
doy = unix_time.tm_yday - 1
if integral < 0: doy -= 365
if doy: istr = '%d::%s' % (doy, istr)
else:
import sys
unix_epoch = integral - xmpyapi.EPOCH_DIFF
if abs(unix_epoch) > sys.maxint and fmt.upper().find('%Y') >= 0:
# Handle 32 bit overflow by calculating at an
# equivalent year and manually replacing %Y and %y in
# the fmt string before calling time.strftime().
#
# xmtime.XMTime does not properly handle leap year at
# centuries that are not divisible by 400 (i.e. 1900,
# 2100) so we don't handle it here either.
eoff = int(unix_epoch / xmpyapi.EPOCH_DIFF)
unix_time = time.gmtime(unix_epoch - eoff * xmpyapi.EPOCH_DIFF)
year = str(unix_time[0] + (eoff * 20))
fmt = fmt.replace('%Y', year).replace('%y', year[2:])
else:
unix_time = time.gmtime(unix_epoch)
istr = time.strftime(fmt, unix_time)
if format == NORAD:
# Now generate <frac_of_day> by combining the <sec_of_day>
# and the fractional seconds.
fractional += (integral % xmpyapi.SECS_PER_DAY)
fractional /= xmpyapi.SECS_PER_DAY
if fractional:
# Get as much precision as possible since we can only keep
# up to .1 nanosecond precision near the end of the day.
ifrac = ('%.16f' % fractional).rstrip('0')
if ifrac.startswith('1.'):
# Too close... we rounded up, can't change the integral
# part, so indicate just below 1.0
return istr + '.9999999999999999'
elif not ifrac.endswith('.'):
return istr + ifrac[1:]
return istr
elif format == EPOCH:
# %Y:%<soy> is second of year. time.strftime() does not handle
# second-of-year, so stringize this part ourselves.
istr += str(int(integral - float(XMTime(istr + "01:01::"))))
ifrac = xmpyapi.times2tod(0, fractional)
if ifrac == '0':
return istr
elif ifrac.endswith('000000'):
# To match X-Midas, show sub-microsecond only when present
return istr + ifrac[1:-6]
else:
return istr + ifrac[1:]
@staticmethod
def times2tod(unused, fractional):
# This method is called by the XMPY xmtime module only to hash
# and compare XMTime instances. The underlying X-Midas routine
# called uses Fortran NINT() for rounding the fractional
# seconds which rounds up at .5. The '%.12f' string conversion
# uses the rint() (as does printf()) which rounds 0.5 down. To
# keep the comparisons consistent with the Fortran, they
# called into X-Midas. However, Python's round(val, 12)
# function also gives the desired NINT rounding behavior
# without needing to call into X-Midas.
# According to the original author of xmtime.XMTime, the
# resulting rounded value is converted to a string because it
# was found the sub-picosecond bits of the rounded value were
# not always consistent. This would cause unpredictable
# behavior when hashing or comparing two XMTimes that should
# otherwise be equal. When this port of X-Midas-free XMTime
# was written in 11/2009, this behavior could not be
# reproduced using round(fractional, 12) in limited
# testing. It is possible this may now be enough and no
# conversion to string is necessary, in which case the need
# for the XMTime._fracstr() method goes away all-together.
v = round(fractional, 12)
if v:
return '%.12f' % v
else:
return '0'
class XMTimeParseError(Exception):
def __init__(self, fmt, got):
Exception.__init__(self,
'"%s" time expected %s, got "%s". Failed to create '
'time formatted string.' %
(fmt, xmpyapi.TFORMAT[fmt][-1], got))
| bsd-3-clause |
android-ia/platform_external_chromium_org_third_party_WebKit | Source/core/inspector/CodeGeneratorInstrumentation.py | 9 | 18313 | #!/usr/bin/env python
# Copyright (c) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import optparse
import re
import string
import sys
template_h = string.Template("""// Code generated from InspectorInstrumentation.idl
#ifndef ${file_name}_h
#define ${file_name}_h
${includes}
namespace blink {
${forward_declarations}
namespace InspectorInstrumentation {
$methods
} // namespace InspectorInstrumentation
} // namespace blink
#endif // !defined(${file_name}_h)
""")
template_inline = string.Template("""
inline void ${name}(${params_public})
{ ${fast_return}
if (${condition})
${name}Impl(${params_impl});
}
""")
template_inline_forward = string.Template("""
inline void ${name}(${params_public})
{ ${fast_return}
${name}Impl(${params_impl});
}
""")
template_inline_returns_value = string.Template("""
inline ${return_type} ${name}(${params_public})
{ ${fast_return}
if (${condition})
return ${name}Impl(${params_impl});
return ${default_return_value};
}
""")
template_cpp = string.Template("""// Code generated from InspectorInstrumentation.idl
#include "config.h"
${includes}
namespace blink {
${extra_definitions}
namespace InspectorInstrumentation {
$methods
} // namespace InspectorInstrumentation
} // namespace blink
""")
template_outofline = string.Template("""
${return_type} ${name}Impl(${params_impl})
{${impl_lines}
}""")
template_agent_call = string.Template("""
if (${agent_class}* agent = ${agent_fetch})
${maybe_return}agent->${name}(${params_agent});""")
template_agent_call_timeline_returns_cookie = string.Template("""
int timelineAgentId = 0;
if (InspectorTimelineAgent* agent = agents->inspectorTimelineAgent()) {
if (agent->${name}(${params_agent}))
timelineAgentId = agent->id();
}""")
template_instrumenting_agents_h = string.Template("""// Code generated from InspectorInstrumentation.idl
#ifndef InstrumentingAgentsInl_h
#define InstrumentingAgentsInl_h
#include "platform/heap/Handle.h"
#include "wtf/FastAllocBase.h"
#include "wtf/Noncopyable.h"
#include "wtf/PassRefPtr.h"
#include "wtf/RefCounted.h"
namespace blink {
${forward_list}
class InstrumentingAgents : public RefCountedWillBeGarbageCollectedFinalized<InstrumentingAgents> {
WTF_MAKE_NONCOPYABLE(InstrumentingAgents);
WTF_MAKE_FAST_ALLOCATED_WILL_BE_REMOVED;
public:
static PassRefPtrWillBeRawPtr<InstrumentingAgents> create()
{
return adoptRefWillBeNoop(new InstrumentingAgents());
}
~InstrumentingAgents() { }
void trace(Visitor*);
void reset();
${accessor_list}
private:
InstrumentingAgents();
${member_list}
};
}
#endif // !defined(InstrumentingAgentsInl_h)
""")
template_instrumenting_agent_accessor = string.Template("""
${class_name}* ${getter_name}() const { return ${member_name}; }
void set${class_name}(${class_name}* agent) { ${member_name} = agent; }""")
template_instrumenting_agents_cpp = string.Template("""
InstrumentingAgents::InstrumentingAgents()
: $init_list
{
}
void InstrumentingAgents::trace(Visitor* visitor)
{
$trace_list
}
void InstrumentingAgents::reset()
{
$reset_list
}""")
def match_and_consume(pattern, source):
match = re.match(pattern, source)
if match:
return match, source[len(match.group(0)):].strip()
return None, source
def load_model_from_idl(source):
source = re.sub("//.*", "", source) # Remove line comments
source = re.sub("/\*(.|\n)*?\*/", "", source, re.MULTILINE) # Remove block comments
source = re.sub("\]\s*?\n\s*", "] ", source) # Merge the method annotation with the next line
source = source.strip()
model = []
while len(source):
match, source = match_and_consume("interface\s(\w*)\s?\{([^\{]*)\}", source)
if not match:
sys.stderr.write("Cannot parse %s\n" % source[:100])
sys.exit(1)
model.append(File(match.group(1), match.group(2)))
return model
class File:
def __init__(self, name, source):
self.name = name
self.header_name = self.name + "Inl"
self.includes = [include_inspector_header("InspectorInstrumentation")]
self.forward_declarations = []
self.declarations = []
for line in map(str.strip, source.split("\n")):
line = re.sub("\s{2,}", " ", line).strip() # Collapse whitespace
if len(line) == 0:
continue
if line[0] == "#":
self.includes.append(line)
elif line.startswith("class "):
self.forward_declarations.append(line)
else:
self.declarations.append(Method(line))
self.includes.sort()
self.forward_declarations.sort()
def generate(self, cpp_lines, used_agents):
header_lines = []
for declaration in self.declarations:
for agent in set(declaration.agents):
used_agents.add(agent)
declaration.generate_header(header_lines)
declaration.generate_cpp(cpp_lines)
return template_h.substitute(None,
file_name=self.header_name,
includes="\n".join(self.includes),
forward_declarations="\n".join(self.forward_declarations),
methods="\n".join(header_lines))
class Method:
def __init__(self, source):
match = re.match("(\[[\w|,|=|\s]*\])?\s?(\w*\*?) (\w*)\((.*)\)\s?;", source)
if not match:
sys.stderr.write("Cannot parse %s\n" % source)
sys.exit(1)
self.options = []
if match.group(1):
options_str = re.sub("\s", "", match.group(1)[1:-1])
if len(options_str) != 0:
self.options = options_str.split(",")
self.return_type = match.group(2)
self.name = match.group(3)
# Splitting parameters by a comma, assuming that attribute lists contain no more than one attribute.
self.params = map(Parameter, map(str.strip, match.group(4).split(",")))
self.accepts_cookie = len(self.params) and self.params[0].type == "const InspectorInstrumentationCookie&"
self.returns_cookie = self.return_type == "InspectorInstrumentationCookie"
self.returns_value = self.return_type != "void"
if self.return_type == "bool":
self.default_return_value = "false"
elif self.return_type == "int":
self.default_return_value = "0"
elif self.return_type == "String":
self.default_return_value = "\"\""
else:
self.default_return_value = self.return_type + "()"
for param in self.params:
if "DefaultReturn" in param.options:
self.default_return_value = param.name
self.params_impl = self.params
if not self.accepts_cookie and not "Inline=Forward" in self.options:
if not "Keep" in self.params_impl[0].options:
self.params_impl = self.params_impl[1:]
self.params_impl = [Parameter("InstrumentingAgents* agents")] + self.params_impl
self.agents = filter(lambda option: not "=" in option, self.options)
def generate_header(self, header_lines):
if "Inline=Custom" in self.options:
return
header_lines.append("%s %sImpl(%s);" % (
self.return_type, self.name, ", ".join(map(Parameter.to_str_class, self.params_impl))))
if "Inline=FastReturn" in self.options or "Inline=Forward" in self.options:
fast_return = "\n FAST_RETURN_IF_NO_FRONTENDS(%s);" % self.default_return_value
else:
fast_return = ""
for param in self.params:
if "FastReturn" in param.options:
fast_return += "\n if (!%s)\n return %s;" % (param.name, self.default_return_value)
if self.accepts_cookie:
condition = "%s.isValid()" % self.params_impl[0].name
template = template_inline
elif "Inline=Forward" in self.options:
condition = ""
template = template_inline_forward
else:
condition = "InstrumentingAgents* agents = instrumentingAgentsFor(%s)" % self.params[0].name
if self.returns_value:
template = template_inline_returns_value
else:
template = template_inline
header_lines.append(template.substitute(
None,
name=self.name,
fast_return=fast_return,
return_type=self.return_type,
default_return_value=self.default_return_value,
params_public=", ".join(map(Parameter.to_str_full, self.params)),
params_impl=", ".join(map(Parameter.to_str_name, self.params_impl)),
condition=condition))
def generate_cpp(self, cpp_lines):
if len(self.agents) == 0:
return
body_lines = map(self.generate_ref_ptr, self.params)
body_lines += map(self.generate_agent_call, self.agents)
if self.returns_cookie:
if "Timeline" in self.agents:
timeline_agent_id = "timelineAgentId"
else:
timeline_agent_id = "0"
body_lines.append("\n return InspectorInstrumentationCookie(agents, %s);" % timeline_agent_id)
elif self.returns_value:
body_lines.append("\n return %s;" % self.default_return_value)
cpp_lines.append(template_outofline.substitute(
None,
return_type=self.return_type,
name=self.name,
params_impl=", ".join(map(Parameter.to_str_class_and_name, self.params_impl)),
impl_lines="".join(body_lines)))
def generate_agent_call(self, agent):
agent_class, agent_getter = agent_getter_signature(agent)
leading_param_name = self.params_impl[0].name
if not self.accepts_cookie:
agent_fetch = "%s->%s()" % (leading_param_name, agent_getter)
elif agent == "Timeline":
agent_fetch = "retrieveTimelineAgent(%s)" % leading_param_name
else:
agent_fetch = "%s.instrumentingAgents()->%s()" % (leading_param_name, agent_getter)
if agent == "Timeline" and self.returns_cookie:
template = template_agent_call_timeline_returns_cookie
else:
template = template_agent_call
if not self.returns_value or self.returns_cookie:
maybe_return = ""
else:
maybe_return = "return "
return template.substitute(
None,
name=self.name,
agent_class=agent_class,
agent_fetch=agent_fetch,
maybe_return=maybe_return,
params_agent=", ".join(map(Parameter.to_str_value, self.params_impl)[1:]))
def generate_ref_ptr(self, param):
if param.is_prp:
return "\n RefPtr<%s> %s = %s;" % (param.inner_type, param.value, param.name)
else:
return ""
class Parameter:
def __init__(self, source):
self.options = []
match, source = match_and_consume("\[(\w*)\]", source)
if match:
self.options.append(match.group(1))
parts = map(str.strip, source.split("="))
if len(parts) == 1:
self.default_value = None
else:
self.default_value = parts[1]
param_decl = parts[0]
if re.match("(const|unsigned long) ", param_decl):
min_type_tokens = 2
else:
min_type_tokens = 1
if len(param_decl.split(" ")) > min_type_tokens:
parts = param_decl.split(" ")
self.type = " ".join(parts[:-1])
self.name = parts[-1]
else:
self.type = param_decl
self.name = generate_param_name(self.type)
if re.match("PassRefPtr<", param_decl):
self.is_prp = True
self.value = self.name
self.name = "prp" + self.name[0].upper() + self.name[1:]
self.inner_type = re.match("PassRefPtr<(.+)>", param_decl).group(1)
else:
self.is_prp = False
self.value = self.name
def to_str_full(self):
if self.default_value is None:
return self.to_str_class_and_name()
return "%s %s = %s" % (self.type, self.name, self.default_value)
def to_str_class_and_name(self):
return "%s %s" % (self.type, self.name)
def to_str_class(self):
return self.type
def to_str_name(self):
return self.name
def to_str_value(self):
return self.value
def generate_param_name(param_type):
base_name = re.match("(const |PassRefPtr<)?(\w*)", param_type).group(2)
return "param" + base_name
def agent_class_name(agent):
custom_agent_names = ["PageDebugger", "PageRuntime", "WorkerRuntime"]
if agent in custom_agent_names:
return "%sAgent" % agent
return "Inspector%sAgent" % agent
def agent_getter_signature(agent):
agent_class = agent_class_name(agent)
return agent_class, agent_class[0].lower() + agent_class[1:]
def include_header(name):
return "#include \"%s.h\"" % name
def include_inspector_header(name):
return include_header("core/inspector/" + name)
def generate_instrumenting_agents(used_agents):
agents = list(used_agents)
forward_list = []
accessor_list = []
member_list = []
init_list = []
trace_list = []
reset_list = []
for agent in agents:
class_name, getter_name = agent_getter_signature(agent)
member_name = "m_" + getter_name
forward_list.append("class %s;" % class_name)
accessor_list.append(template_instrumenting_agent_accessor.substitute(
None,
class_name=class_name,
getter_name=getter_name,
member_name=member_name))
member_list.append(" RawPtrWillBeMember<%s> %s;" % (class_name, member_name))
init_list.append("%s(nullptr)" % member_name)
trace_list.append("visitor->trace(%s);" % member_name)
reset_list.append("%s = nullptr;" % member_name)
forward_list.sort()
accessor_list.sort()
member_list.sort()
init_list.sort()
trace_list.sort()
reset_list.sort()
header_lines = template_instrumenting_agents_h.substitute(
None,
forward_list="\n".join(forward_list),
accessor_list="\n".join(accessor_list),
member_list="\n".join(member_list))
cpp_lines = template_instrumenting_agents_cpp.substitute(
None,
init_list="\n , ".join(init_list),
trace_list="\n ".join(trace_list),
reset_list="\n ".join(reset_list))
return header_lines, cpp_lines
def generate(input_path, output_dir):
fin = open(input_path, "r")
files = load_model_from_idl(fin.read())
fin.close()
cpp_includes = []
cpp_lines = []
used_agents = set()
for f in files:
cpp_includes.append(include_header(f.header_name))
fout = open(output_dir + "/" + f.header_name + ".h", "w")
fout.write(f.generate(cpp_lines, used_agents))
fout.close()
for agent in used_agents:
cpp_includes.append(include_inspector_header(agent_class_name(agent)))
cpp_includes.append(include_header("InstrumentingAgentsInl"))
cpp_includes.sort()
instrumenting_agents_header, instrumenting_agents_cpp = generate_instrumenting_agents(used_agents)
fout = open(output_dir + "/" + "InstrumentingAgentsInl.h", "w")
fout.write(instrumenting_agents_header)
fout.close()
fout = open(output_dir + "/InspectorInstrumentationImpl.cpp", "w")
fout.write(template_cpp.substitute(None,
includes="\n".join(cpp_includes),
extra_definitions=instrumenting_agents_cpp,
methods="\n".join(cpp_lines)))
fout.close()
cmdline_parser = optparse.OptionParser()
cmdline_parser.add_option("--output_dir")
try:
arg_options, arg_values = cmdline_parser.parse_args()
if (len(arg_values) != 1):
raise Exception("Exactly one plain argument expected (found %s)" % len(arg_values))
input_path = arg_values[0]
output_dirpath = arg_options.output_dir
if not output_dirpath:
raise Exception("Output directory must be specified")
except Exception:
# Work with python 2 and 3 http://docs.python.org/py3k/howto/pyporting.html
exc = sys.exc_info()[1]
sys.stderr.write("Failed to parse command-line arguments: %s\n\n" % exc)
sys.stderr.write("Usage: <script> --output_dir <output_dir> InspectorInstrumentation.idl\n")
exit(1)
generate(input_path, output_dirpath)
| bsd-3-clause |
Beyond-Imagination/BlubBlub | RaspberryPI/django-env/lib/python3.4/site-packages/django/views/generic/detail.py | 109 | 6720 | from __future__ import unicode_literals
from django.core.exceptions import ImproperlyConfigured
from django.db import models
from django.http import Http404
from django.utils.translation import ugettext as _
from django.views.generic.base import ContextMixin, TemplateResponseMixin, View
class SingleObjectMixin(ContextMixin):
"""
Provides the ability to retrieve a single object for further manipulation.
"""
model = None
queryset = None
slug_field = 'slug'
context_object_name = None
slug_url_kwarg = 'slug'
pk_url_kwarg = 'pk'
query_pk_and_slug = False
def get_object(self, queryset=None):
"""
Returns the object the view is displaying.
By default this requires `self.queryset` and a `pk` or `slug` argument
in the URLconf, but subclasses can override this to return any object.
"""
# Use a custom queryset if provided; this is required for subclasses
# like DateDetailView
if queryset is None:
queryset = self.get_queryset()
# Next, try looking up by primary key.
pk = self.kwargs.get(self.pk_url_kwarg)
slug = self.kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
# Next, try looking up by slug.
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
# If none of those are defined, it's an error.
if pk is None and slug is None:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
# Get the single item from the filtered queryset
obj = queryset.get()
except queryset.model.DoesNotExist:
raise Http404(_("No %(verbose_name)s found matching the query") %
{'verbose_name': queryset.model._meta.verbose_name})
return obj
def get_queryset(self):
"""
Return the `QuerySet` that will be used to look up the object.
Note that this method is called by the default implementation of
`get_object` and may not be called if `get_object` is overridden.
"""
if self.queryset is None:
if self.model:
return self.model._default_manager.all()
else:
raise ImproperlyConfigured(
"%(cls)s is missing a QuerySet. Define "
"%(cls)s.model, %(cls)s.queryset, or override "
"%(cls)s.get_queryset()." % {
'cls': self.__class__.__name__
}
)
return self.queryset.all()
def get_slug_field(self):
"""
Get the name of a slug field to be used to look up by slug.
"""
return self.slug_field
def get_context_object_name(self, obj):
"""
Get the name to use for the object.
"""
if self.context_object_name:
return self.context_object_name
elif isinstance(obj, models.Model):
return obj._meta.model_name
else:
return None
def get_context_data(self, **kwargs):
"""
Insert the single object into the context dict.
"""
context = {}
if self.object:
context['object'] = self.object
context_object_name = self.get_context_object_name(self.object)
if context_object_name:
context[context_object_name] = self.object
context.update(kwargs)
return super(SingleObjectMixin, self).get_context_data(**context)
class BaseDetailView(SingleObjectMixin, View):
"""
A base view for displaying a single object
"""
def get(self, request, *args, **kwargs):
self.object = self.get_object()
context = self.get_context_data(object=self.object)
return self.render_to_response(context)
class SingleObjectTemplateResponseMixin(TemplateResponseMixin):
template_name_field = None
template_name_suffix = '_detail'
def get_template_names(self):
"""
Return a list of template names to be used for the request. May not be
called if render_to_response is overridden. Returns the following list:
* the value of ``template_name`` on the view (if provided)
* the contents of the ``template_name_field`` field on the
object instance that the view is operating upon (if available)
* ``<app_label>/<model_name><template_name_suffix>.html``
"""
try:
names = super(SingleObjectTemplateResponseMixin, self).get_template_names()
except ImproperlyConfigured:
# If template_name isn't specified, it's not a problem --
# we just start with an empty list.
names = []
# If self.template_name_field is set, grab the value of the field
# of that name from the object; this is the most specific template
# name, if given.
if self.object and self.template_name_field:
name = getattr(self.object, self.template_name_field, None)
if name:
names.insert(0, name)
# The least-specific option is the default <app>/<model>_detail.html;
# only use this if the object in question is a model.
if isinstance(self.object, models.Model):
object_meta = self.object._meta
names.append("%s/%s%s.html" % (
object_meta.app_label,
object_meta.model_name,
self.template_name_suffix
))
elif hasattr(self, 'model') and self.model is not None and issubclass(self.model, models.Model):
names.append("%s/%s%s.html" % (
self.model._meta.app_label,
self.model._meta.model_name,
self.template_name_suffix
))
# If we still haven't managed to find any template names, we should
# re-raise the ImproperlyConfigured to alert the user.
if not names:
raise
return names
class DetailView(SingleObjectTemplateResponseMixin, BaseDetailView):
"""
Render a "detail" view of an object.
By default this is a model instance looked up from `self.queryset`, but the
view will support display of *any* object by overriding `self.get_object()`.
"""
| gpl-3.0 |
sauloal/cufflinksviewer | venvwin/Lib/encodings/aliases.py | 84 | 15375 | """ Encoding Aliases Support
This module is used by the encodings package search function to
map encodings names to module names.
Note that the search function normalizes the encoding names before
doing the lookup, so the mapping will have to map normalized
encoding names to module names.
Contents:
The following aliases dictionary contains mappings of all IANA
character set names for which the Python core library provides
codecs. In addition to these, a few Python specific codec
aliases have also been added.
"""
aliases = {
# Please keep this list sorted alphabetically by value !
# ascii codec
'646' : 'ascii',
'ansi_x3.4_1968' : 'ascii',
'ansi_x3_4_1968' : 'ascii', # some email headers use this non-standard name
'ansi_x3.4_1986' : 'ascii',
'cp367' : 'ascii',
'csascii' : 'ascii',
'ibm367' : 'ascii',
'iso646_us' : 'ascii',
'iso_646.irv_1991' : 'ascii',
'iso_ir_6' : 'ascii',
'us' : 'ascii',
'us_ascii' : 'ascii',
# base64_codec codec
'base64' : 'base64_codec',
'base_64' : 'base64_codec',
# big5 codec
'big5_tw' : 'big5',
'csbig5' : 'big5',
# big5hkscs codec
'big5_hkscs' : 'big5hkscs',
'hkscs' : 'big5hkscs',
# bz2_codec codec
'bz2' : 'bz2_codec',
# cp037 codec
'037' : 'cp037',
'csibm037' : 'cp037',
'ebcdic_cp_ca' : 'cp037',
'ebcdic_cp_nl' : 'cp037',
'ebcdic_cp_us' : 'cp037',
'ebcdic_cp_wt' : 'cp037',
'ibm037' : 'cp037',
'ibm039' : 'cp037',
# cp1026 codec
'1026' : 'cp1026',
'csibm1026' : 'cp1026',
'ibm1026' : 'cp1026',
# cp1140 codec
'1140' : 'cp1140',
'ibm1140' : 'cp1140',
# cp1250 codec
'1250' : 'cp1250',
'windows_1250' : 'cp1250',
# cp1251 codec
'1251' : 'cp1251',
'windows_1251' : 'cp1251',
# cp1252 codec
'1252' : 'cp1252',
'windows_1252' : 'cp1252',
# cp1253 codec
'1253' : 'cp1253',
'windows_1253' : 'cp1253',
# cp1254 codec
'1254' : 'cp1254',
'windows_1254' : 'cp1254',
# cp1255 codec
'1255' : 'cp1255',
'windows_1255' : 'cp1255',
# cp1256 codec
'1256' : 'cp1256',
'windows_1256' : 'cp1256',
# cp1257 codec
'1257' : 'cp1257',
'windows_1257' : 'cp1257',
# cp1258 codec
'1258' : 'cp1258',
'windows_1258' : 'cp1258',
# cp424 codec
'424' : 'cp424',
'csibm424' : 'cp424',
'ebcdic_cp_he' : 'cp424',
'ibm424' : 'cp424',
# cp437 codec
'437' : 'cp437',
'cspc8codepage437' : 'cp437',
'ibm437' : 'cp437',
# cp500 codec
'500' : 'cp500',
'csibm500' : 'cp500',
'ebcdic_cp_be' : 'cp500',
'ebcdic_cp_ch' : 'cp500',
'ibm500' : 'cp500',
# cp775 codec
'775' : 'cp775',
'cspc775baltic' : 'cp775',
'ibm775' : 'cp775',
# cp850 codec
'850' : 'cp850',
'cspc850multilingual' : 'cp850',
'ibm850' : 'cp850',
# cp852 codec
'852' : 'cp852',
'cspcp852' : 'cp852',
'ibm852' : 'cp852',
# cp855 codec
'855' : 'cp855',
'csibm855' : 'cp855',
'ibm855' : 'cp855',
# cp857 codec
'857' : 'cp857',
'csibm857' : 'cp857',
'ibm857' : 'cp857',
# cp858 codec
'858' : 'cp858',
'csibm858' : 'cp858',
'ibm858' : 'cp858',
# cp860 codec
'860' : 'cp860',
'csibm860' : 'cp860',
'ibm860' : 'cp860',
# cp861 codec
'861' : 'cp861',
'cp_is' : 'cp861',
'csibm861' : 'cp861',
'ibm861' : 'cp861',
# cp862 codec
'862' : 'cp862',
'cspc862latinhebrew' : 'cp862',
'ibm862' : 'cp862',
# cp863 codec
'863' : 'cp863',
'csibm863' : 'cp863',
'ibm863' : 'cp863',
# cp864 codec
'864' : 'cp864',
'csibm864' : 'cp864',
'ibm864' : 'cp864',
# cp865 codec
'865' : 'cp865',
'csibm865' : 'cp865',
'ibm865' : 'cp865',
# cp866 codec
'866' : 'cp866',
'csibm866' : 'cp866',
'ibm866' : 'cp866',
# cp869 codec
'869' : 'cp869',
'cp_gr' : 'cp869',
'csibm869' : 'cp869',
'ibm869' : 'cp869',
# cp932 codec
'932' : 'cp932',
'ms932' : 'cp932',
'mskanji' : 'cp932',
'ms_kanji' : 'cp932',
# cp949 codec
'949' : 'cp949',
'ms949' : 'cp949',
'uhc' : 'cp949',
# cp950 codec
'950' : 'cp950',
'ms950' : 'cp950',
# euc_jis_2004 codec
'jisx0213' : 'euc_jis_2004',
'eucjis2004' : 'euc_jis_2004',
'euc_jis2004' : 'euc_jis_2004',
# euc_jisx0213 codec
'eucjisx0213' : 'euc_jisx0213',
# euc_jp codec
'eucjp' : 'euc_jp',
'ujis' : 'euc_jp',
'u_jis' : 'euc_jp',
# euc_kr codec
'euckr' : 'euc_kr',
'korean' : 'euc_kr',
'ksc5601' : 'euc_kr',
'ks_c_5601' : 'euc_kr',
'ks_c_5601_1987' : 'euc_kr',
'ksx1001' : 'euc_kr',
'ks_x_1001' : 'euc_kr',
# gb18030 codec
'gb18030_2000' : 'gb18030',
# gb2312 codec
'chinese' : 'gb2312',
'csiso58gb231280' : 'gb2312',
'euc_cn' : 'gb2312',
'euccn' : 'gb2312',
'eucgb2312_cn' : 'gb2312',
'gb2312_1980' : 'gb2312',
'gb2312_80' : 'gb2312',
'iso_ir_58' : 'gb2312',
# gbk codec
'936' : 'gbk',
'cp936' : 'gbk',
'ms936' : 'gbk',
# hex_codec codec
'hex' : 'hex_codec',
# hp_roman8 codec
'roman8' : 'hp_roman8',
'r8' : 'hp_roman8',
'csHPRoman8' : 'hp_roman8',
# hz codec
'hzgb' : 'hz',
'hz_gb' : 'hz',
'hz_gb_2312' : 'hz',
# iso2022_jp codec
'csiso2022jp' : 'iso2022_jp',
'iso2022jp' : 'iso2022_jp',
'iso_2022_jp' : 'iso2022_jp',
# iso2022_jp_1 codec
'iso2022jp_1' : 'iso2022_jp_1',
'iso_2022_jp_1' : 'iso2022_jp_1',
# iso2022_jp_2 codec
'iso2022jp_2' : 'iso2022_jp_2',
'iso_2022_jp_2' : 'iso2022_jp_2',
# iso2022_jp_2004 codec
'iso_2022_jp_2004' : 'iso2022_jp_2004',
'iso2022jp_2004' : 'iso2022_jp_2004',
# iso2022_jp_3 codec
'iso2022jp_3' : 'iso2022_jp_3',
'iso_2022_jp_3' : 'iso2022_jp_3',
# iso2022_jp_ext codec
'iso2022jp_ext' : 'iso2022_jp_ext',
'iso_2022_jp_ext' : 'iso2022_jp_ext',
# iso2022_kr codec
'csiso2022kr' : 'iso2022_kr',
'iso2022kr' : 'iso2022_kr',
'iso_2022_kr' : 'iso2022_kr',
# iso8859_10 codec
'csisolatin6' : 'iso8859_10',
'iso_8859_10' : 'iso8859_10',
'iso_8859_10_1992' : 'iso8859_10',
'iso_ir_157' : 'iso8859_10',
'l6' : 'iso8859_10',
'latin6' : 'iso8859_10',
# iso8859_11 codec
'thai' : 'iso8859_11',
'iso_8859_11' : 'iso8859_11',
'iso_8859_11_2001' : 'iso8859_11',
# iso8859_13 codec
'iso_8859_13' : 'iso8859_13',
'l7' : 'iso8859_13',
'latin7' : 'iso8859_13',
# iso8859_14 codec
'iso_8859_14' : 'iso8859_14',
'iso_8859_14_1998' : 'iso8859_14',
'iso_celtic' : 'iso8859_14',
'iso_ir_199' : 'iso8859_14',
'l8' : 'iso8859_14',
'latin8' : 'iso8859_14',
# iso8859_15 codec
'iso_8859_15' : 'iso8859_15',
'l9' : 'iso8859_15',
'latin9' : 'iso8859_15',
# iso8859_16 codec
'iso_8859_16' : 'iso8859_16',
'iso_8859_16_2001' : 'iso8859_16',
'iso_ir_226' : 'iso8859_16',
'l10' : 'iso8859_16',
'latin10' : 'iso8859_16',
# iso8859_2 codec
'csisolatin2' : 'iso8859_2',
'iso_8859_2' : 'iso8859_2',
'iso_8859_2_1987' : 'iso8859_2',
'iso_ir_101' : 'iso8859_2',
'l2' : 'iso8859_2',
'latin2' : 'iso8859_2',
# iso8859_3 codec
'csisolatin3' : 'iso8859_3',
'iso_8859_3' : 'iso8859_3',
'iso_8859_3_1988' : 'iso8859_3',
'iso_ir_109' : 'iso8859_3',
'l3' : 'iso8859_3',
'latin3' : 'iso8859_3',
# iso8859_4 codec
'csisolatin4' : 'iso8859_4',
'iso_8859_4' : 'iso8859_4',
'iso_8859_4_1988' : 'iso8859_4',
'iso_ir_110' : 'iso8859_4',
'l4' : 'iso8859_4',
'latin4' : 'iso8859_4',
# iso8859_5 codec
'csisolatincyrillic' : 'iso8859_5',
'cyrillic' : 'iso8859_5',
'iso_8859_5' : 'iso8859_5',
'iso_8859_5_1988' : 'iso8859_5',
'iso_ir_144' : 'iso8859_5',
# iso8859_6 codec
'arabic' : 'iso8859_6',
'asmo_708' : 'iso8859_6',
'csisolatinarabic' : 'iso8859_6',
'ecma_114' : 'iso8859_6',
'iso_8859_6' : 'iso8859_6',
'iso_8859_6_1987' : 'iso8859_6',
'iso_ir_127' : 'iso8859_6',
# iso8859_7 codec
'csisolatingreek' : 'iso8859_7',
'ecma_118' : 'iso8859_7',
'elot_928' : 'iso8859_7',
'greek' : 'iso8859_7',
'greek8' : 'iso8859_7',
'iso_8859_7' : 'iso8859_7',
'iso_8859_7_1987' : 'iso8859_7',
'iso_ir_126' : 'iso8859_7',
# iso8859_8 codec
'csisolatinhebrew' : 'iso8859_8',
'hebrew' : 'iso8859_8',
'iso_8859_8' : 'iso8859_8',
'iso_8859_8_1988' : 'iso8859_8',
'iso_ir_138' : 'iso8859_8',
# iso8859_9 codec
'csisolatin5' : 'iso8859_9',
'iso_8859_9' : 'iso8859_9',
'iso_8859_9_1989' : 'iso8859_9',
'iso_ir_148' : 'iso8859_9',
'l5' : 'iso8859_9',
'latin5' : 'iso8859_9',
# johab codec
'cp1361' : 'johab',
'ms1361' : 'johab',
# koi8_r codec
'cskoi8r' : 'koi8_r',
# latin_1 codec
#
# Note that the latin_1 codec is implemented internally in C and a
# lot faster than the charmap codec iso8859_1 which uses the same
# encoding. This is why we discourage the use of the iso8859_1
# codec and alias it to latin_1 instead.
#
'8859' : 'latin_1',
'cp819' : 'latin_1',
'csisolatin1' : 'latin_1',
'ibm819' : 'latin_1',
'iso8859' : 'latin_1',
'iso8859_1' : 'latin_1',
'iso_8859_1' : 'latin_1',
'iso_8859_1_1987' : 'latin_1',
'iso_ir_100' : 'latin_1',
'l1' : 'latin_1',
'latin' : 'latin_1',
'latin1' : 'latin_1',
# mac_cyrillic codec
'maccyrillic' : 'mac_cyrillic',
# mac_greek codec
'macgreek' : 'mac_greek',
# mac_iceland codec
'maciceland' : 'mac_iceland',
# mac_latin2 codec
'maccentraleurope' : 'mac_latin2',
'maclatin2' : 'mac_latin2',
# mac_roman codec
'macroman' : 'mac_roman',
# mac_turkish codec
'macturkish' : 'mac_turkish',
# mbcs codec
'dbcs' : 'mbcs',
# ptcp154 codec
'csptcp154' : 'ptcp154',
'pt154' : 'ptcp154',
'cp154' : 'ptcp154',
'cyrillic_asian' : 'ptcp154',
# quopri_codec codec
'quopri' : 'quopri_codec',
'quoted_printable' : 'quopri_codec',
'quotedprintable' : 'quopri_codec',
# rot_13 codec
'rot13' : 'rot_13',
# shift_jis codec
'csshiftjis' : 'shift_jis',
'shiftjis' : 'shift_jis',
'sjis' : 'shift_jis',
's_jis' : 'shift_jis',
# shift_jis_2004 codec
'shiftjis2004' : 'shift_jis_2004',
'sjis_2004' : 'shift_jis_2004',
's_jis_2004' : 'shift_jis_2004',
# shift_jisx0213 codec
'shiftjisx0213' : 'shift_jisx0213',
'sjisx0213' : 'shift_jisx0213',
's_jisx0213' : 'shift_jisx0213',
# tactis codec
'tis260' : 'tactis',
# tis_620 codec
'tis620' : 'tis_620',
'tis_620_0' : 'tis_620',
'tis_620_2529_0' : 'tis_620',
'tis_620_2529_1' : 'tis_620',
'iso_ir_166' : 'tis_620',
# utf_16 codec
'u16' : 'utf_16',
'utf16' : 'utf_16',
# utf_16_be codec
'unicodebigunmarked' : 'utf_16_be',
'utf_16be' : 'utf_16_be',
# utf_16_le codec
'unicodelittleunmarked' : 'utf_16_le',
'utf_16le' : 'utf_16_le',
# utf_32 codec
'u32' : 'utf_32',
'utf32' : 'utf_32',
# utf_32_be codec
'utf_32be' : 'utf_32_be',
# utf_32_le codec
'utf_32le' : 'utf_32_le',
# utf_7 codec
'u7' : 'utf_7',
'utf7' : 'utf_7',
'unicode_1_1_utf_7' : 'utf_7',
# utf_8 codec
'u8' : 'utf_8',
'utf' : 'utf_8',
'utf8' : 'utf_8',
'utf8_ucs2' : 'utf_8',
'utf8_ucs4' : 'utf_8',
# uu_codec codec
'uu' : 'uu_codec',
# zlib_codec codec
'zip' : 'zlib_codec',
'zlib' : 'zlib_codec',
}
| mit |
gilestrolab/ethoscope | prototypes/automask_and_tracking.py | 1 | 1063 | __author__ = 'quentin'
from ethoscope.tracking.roi_builders import SleepDepROIBuilder
from ethoscope.tracking.cameras import MovieVirtualCamera
from ethoscope.tracking.monitor import Monitor
from ethoscope.tracking.trackers import AdaptiveBGModel
from ethoscope.tracking.interactors import SystemPlaySoundOnStop
from ethoscope.tracking.interactors import SleepDepInteractor
from ethoscope.hardware_control.arduino_api import SleepDepriverInterface
cam = MovieVirtualCamera("/stk/pysolo_video_samples/motion_in_dark_one_tube_at_a_time.avi")
#cam = MovieVirtualCamera("/stk/pysolo_video_samples/long_realistic_recording_with_motion.avi")
#cam = MovieVirtualCamera("/stk/pysolo_video_samples/long_realistic_recording.avi")
# sdi = SleepDepriverInterface()
roi_builder = SleepDepROIBuilder()
rois = roi_builder(cam)
inters = [SystemPlaySoundOnStop(500 + i * 30) for i in range(len(rois))]
# inters = [SleepDepInteractor(i, sdi) for i in range(len(rois))]
monit = Monitor(cam, AdaptiveBGModel, rois, interactors= inters, draw_results=True)
monit.run()
# | gpl-3.0 |
drkitty/cyder | cyder/cydhcp/range/utils.py | 4 | 6674 | from django.db.models import get_model, Q
from cyder.cydhcp.utils import start_end_filter, two_to_one, one_to_two
from django.http import HttpResponse
from cyder.base.constants import STATIC, DYNAMIC
import json
import ipaddr
def find_range(ip_str):
Range = get_model('cyder', 'range')
ip_upper, ip_lower = one_to_two(int(ipaddr.IPAddress(ip_str)))
q_start = (Q(start_upper__lt=ip_upper) |
Q(start_upper=ip_upper,
start_lower__lte=ip_lower))
q_end = (Q(end_upper__gt=ip_upper) |
Q(end_upper=ip_upper,
end_lower__gte=ip_lower))
try:
return Range.objects.filter(q_start, q_end)[0]
except IndexError:
return None
def ip_taken(ip, records):
"""
Given an ip as an integer and a queryset find an object in the queryset
with the same ip as the integer. This is inteded for ptrs and arecords and
interfaces.
"""
ip_low, ip_high = one_to_two(ip)
for record in records:
if record.ip_lower is ip_low and record.ip_upper is ip_high:
return record
return None
def range_usage(ip_start, ip_end, ip_type, get_objects=True):
"""Returns ip usage statistics about the range starting at ip_start and
ending at ip_end.
Given an inclusive contiguous range of positive integers (IP addresses)
between `a` and `b` and a list of lists where each sublist contains
integers (IP addresses) that are within the range, how many integers
between `a` and `b` do not exist in any of the lists; this is what this
function calculates.
For example:
```
Start = 0
End = 9
Lists = [[1,2,3], [2,3,4]]
```
The integers that do not occur in `Lists` are `0`, `5`, `6`, `7`, `8`, and
`9`, so there are 6 integers that do not exist in Lists that satisfy `Start
<= n <= End`.
Start can be small and End can be very large (the range may be
larger than you would want to itterate over). Due to the size of IPv6
ranges, we should not use recursion.
There are three types of objects (that we care about) that have IP's
associated with them: AddressRecord, PTR, StaticInterface. Because we get
objects back as Queryset's that are hard to merge, we have to do this
algorithm while retaining all three lists. The gist of the algoritm is as
follows::
# Assume the lists are sorted
while lists:
note the start number (ip)
lowest =: of the things in list (PTR, A, INTR), find the lowest
difference =: start - lowest.ip
total_free +=: difference
start =: lowest.ip + 1
if any PTR, A, or INTR has the same IP as lowest:
remove those items from their lists
"""
StaticInterface = get_model('cyder', 'staticinterface')
PTR = get_model('cyder', 'ptr')
AddressRecord = get_model('cyder', 'addressrecord')
istart, iend, ipf_q = start_end_filter(ip_start, ip_end, ip_type)
def get_ip(rec):
return two_to_one(rec.ip_upper, rec.ip_lower)
lists = [sorted(AddressRecord.objects.filter(ipf_q), key=get_ip),
sorted(PTR.objects.filter(ipf_q), key=get_ip),
sorted(StaticInterface.objects.filter(ipf_q), key=get_ip)]
free_ranges = []
def cmp_ip_upper_lower(a, b):
if a.ip_upper > b.ip_upper:
return a
elif a.ip_upper < b.ip_upper:
return b
elif a.ip_lower > b.ip_lower:
return a
elif a.ip_lower < b.ip_lower:
return b
else:
return a # redundant, maybe?
unused = 0
minimum_i = 0
rel_start = int(istart)
end = int(iend)
# This is translated directly from a recursive implementation.
while True:
if rel_start > end:
break
lists = [l for l in lists if l]
if not lists:
free_ranges.append((rel_start, end))
unused += end - rel_start + 1
break
min_list = min(lists, key=lambda x: two_to_one(x[0].ip_upper,
x[0].ip_lower))
minimum = min_list[0]
minimum_i = two_to_one(minimum.ip_upper, minimum.ip_lower)
unused += minimum_i - rel_start
if minimum_i != rel_start:
free_ranges.append((rel_start, minimum_i - 1))
for l in lists:
while (l and l[0].ip_upper == minimum.ip_upper and
l[0].ip_lower == minimum.ip_lower):
l.pop(0)
rel_start = minimum_i + 1
return {
'unused': unused,
'used': int(iend) - int(istart) - unused + 1,
'free_ranges': free_ranges,
}
def range_wizard_get_ip(request):
if not request.POST:
return None
freeIp = request.POST.get('freeIp', '')
rngId = request.POST.get('range', '')
Range = get_model('cyder', 'range')
rng = Range.objects.get(id=rngId)
if freeIp == 'true' and rng and rng.ip_type == '4':
ip_str = rng.get_next_ip()
if not ip_str:
ip_str = 'This range is full!'
else:
ip_str = '.'.join(rng.start_str.split('.')[:-1])
return HttpResponse(json.dumps({
'ip_type': rng.ip_type,
'ip_str': str(ip_str), }))
def range_wizard_get_ranges(request):
if not request.POST:
return None
from cyder.cydhcp.network.utils import get_ranges
Network = get_model('cyder', 'network')
vrf_networks = None
site_networks = None
networks = []
all_ranges = False
data = request.POST
if data.get('rangeType', None):
range_types = [data.get('rangeType')]
else:
range_types = [STATIC, DYNAMIC]
if data.get('vrf', None):
Vrf = get_model('cyder', 'vrf')
vrf = Vrf.objects.get(id=data['vrf'])
vrf_networks = Network.objects.filter(vrf=vrf)
if data.get('site', None):
Site = get_model('cyder', 'site')
site = Site.objects.get(id=data['site'])
site_networks = Network.objects.filter(site=site)
if data.get('site', None) and data.get('vrf', None):
networks = vrf_networks & site_networks
elif data.get('site', None) or data.get('vrf', None):
networks = vrf_networks or site_networks
else:
all_ranges = True
if networks:
networks = list(networks)
else:
networks = []
ranges = get_ranges(
networks, ctnr=request.session['ctnr'],
range_types=range_types, all_ranges=all_ranges)
ranges = [([r.get_str() for r in ranges]),
([r.id for r in ranges])]
return HttpResponse(json.dumps({'ranges': ranges}))
| bsd-3-clause |
kennetanti/lolcoin | contrib/testgen/gen_base58_test_vectors.py | 1064 | 4344 | #!/usr/bin/env python
'''
Generate valid and invalid base58 address and private key test vectors.
Usage:
gen_base58_test_vectors.py valid 50 > ../../src/test/data/base58_keys_valid.json
gen_base58_test_vectors.py invalid 50 > ../../src/test/data/base58_keys_invalid.json
'''
# 2012 Wladimir J. van der Laan
# Released under MIT License
import os
from itertools import islice
from base58 import b58encode, b58decode, b58encode_chk, b58decode_chk, b58chars
import random
from binascii import b2a_hex
# key types
PUBKEY_ADDRESS = 48
SCRIPT_ADDRESS = 5
PUBKEY_ADDRESS_TEST = 111
SCRIPT_ADDRESS_TEST = 196
PRIVKEY = 176
PRIVKEY_TEST = 239
metadata_keys = ['isPrivkey', 'isTestnet', 'addrType', 'isCompressed']
# templates for valid sequences
templates = [
# prefix, payload_size, suffix, metadata
# None = N/A
((PUBKEY_ADDRESS,), 20, (), (False, False, 'pubkey', None)),
((SCRIPT_ADDRESS,), 20, (), (False, False, 'script', None)),
((PUBKEY_ADDRESS_TEST,), 20, (), (False, True, 'pubkey', None)),
((SCRIPT_ADDRESS_TEST,), 20, (), (False, True, 'script', None)),
((PRIVKEY,), 32, (), (True, False, None, False)),
((PRIVKEY,), 32, (1,), (True, False, None, True)),
((PRIVKEY_TEST,), 32, (), (True, True, None, False)),
((PRIVKEY_TEST,), 32, (1,), (True, True, None, True))
]
def is_valid(v):
'''Check vector v for validity'''
result = b58decode_chk(v)
if result is None:
return False
valid = False
for template in templates:
prefix = str(bytearray(template[0]))
suffix = str(bytearray(template[2]))
if result.startswith(prefix) and result.endswith(suffix):
if (len(result) - len(prefix) - len(suffix)) == template[1]:
return True
return False
def gen_valid_vectors():
'''Generate valid test vectors'''
while True:
for template in templates:
prefix = str(bytearray(template[0]))
payload = os.urandom(template[1])
suffix = str(bytearray(template[2]))
rv = b58encode_chk(prefix + payload + suffix)
assert is_valid(rv)
metadata = dict([(x,y) for (x,y) in zip(metadata_keys,template[3]) if y is not None])
yield (rv, b2a_hex(payload), metadata)
def gen_invalid_vector(template, corrupt_prefix, randomize_payload_size, corrupt_suffix):
'''Generate possibly invalid vector'''
if corrupt_prefix:
prefix = os.urandom(1)
else:
prefix = str(bytearray(template[0]))
if randomize_payload_size:
payload = os.urandom(max(int(random.expovariate(0.5)), 50))
else:
payload = os.urandom(template[1])
if corrupt_suffix:
suffix = os.urandom(len(template[2]))
else:
suffix = str(bytearray(template[2]))
return b58encode_chk(prefix + payload + suffix)
def randbool(p = 0.5):
'''Return True with P(p)'''
return random.random() < p
def gen_invalid_vectors():
'''Generate invalid test vectors'''
# start with some manual edge-cases
yield "",
yield "x",
while True:
# kinds of invalid vectors:
# invalid prefix
# invalid payload length
# invalid (randomized) suffix (add random data)
# corrupt checksum
for template in templates:
val = gen_invalid_vector(template, randbool(0.2), randbool(0.2), randbool(0.2))
if random.randint(0,10)<1: # line corruption
if randbool(): # add random character to end
val += random.choice(b58chars)
else: # replace random character in the middle
n = random.randint(0, len(val))
val = val[0:n] + random.choice(b58chars) + val[n+1:]
if not is_valid(val):
yield val,
if __name__ == '__main__':
import sys, json
iters = {'valid':gen_valid_vectors, 'invalid':gen_invalid_vectors}
try:
uiter = iters[sys.argv[1]]
except IndexError:
uiter = gen_valid_vectors
try:
count = int(sys.argv[2])
except IndexError:
count = 0
data = list(islice(uiter(), count))
json.dump(data, sys.stdout, sort_keys=True, indent=4)
sys.stdout.write('\n')
| mit |
KevZho/buffbot | kol/request/ItemInformationRequest.py | 4 | 2392 | from ApiRequest import ApiRequest
from kol.database import ItemDatabase
class ItemInformationRequest(ApiRequest):
"This class is used to get information about a particular item."
def __init__(self, session, itemId):
super(ItemInformationRequest, self).__init__(session)
self.requestData["what"] = "item"
self.requestData["id"] = itemId
self.itemId = itemId
def parseResponse(self):
super(ItemInformationRequest, self).parseResponse()
item = {}
data = self.jsonData
item["id"] = self.itemId
item["descId"] = int(data["descid"])
item["name"] = data["name"]
if "plural" in data and len(data["plural"]) > 0:
item["plural"] = data["plural"]
if "picture" in data and len(data["picture"]) > 0:
item["image"] = "%s.gif" % data["picture"]
if "type" in data:
item["type"] = data["type"]
if item["type"] == "gift":
item["type"] = "gift package"
if "sellvalue" in data and int(data["sellvalue"] > 0):
item["autosell"] = int(data["sellvalue"])
if "power" in data:
item["power"] = int(data["power"])
if "hands" in data and int(data["hands"] > 0):
item["numHands"] = int(data["hands"])
if "cantransfer" in data and data["cantransfer"] == "1":
item["canTransfer"] = True
if "cook" in data and data["cook"] == "1":
item["isCookingIngredient"] = True
if "cocktail" in data and data["cocktail"] == "1":
item["isCocktailcraftingIngredient"] = True
if "jewelry" in data and data["jewelry"] == "1":
item["isJewelrymakingComponent"] = True
if "smith" in data and data["smith"] == "1":
item["isMeatsmithingComponent"] = True
if "combine" in data and data["combine"] == "1":
item["isMeatpastingComponent"] = True
if "fancy" in data and data["fancy"] == "1":
item["isFancy"] = True
if "quest" in data and data["quest"] == "1":
item["isQuestItem"] = True
if "candiscard" in data and data["candiscard"] == "1":
item["isDiscardable"] = True
if "unhardcore" in data and data["unhardcore"] == "1":
item["isHardcoreDenied"] = True
self.responseData["item"] = item
| mit |
FreeOpcUa/freeopcua | python/examples/event_client.py | 9 | 2312 |
import sys
import time
sys.path.append(".")
from IPython import embed
import opcua
class SubHandler(opcua.SubscriptionHandler):
"""
Client to subcsription. It will receive events from server
"""
def __init__(self, *args):
opcua.SubscriptionHandler.__init__(self, *args)
self.ev = MessageSecurityMode::None
def data_change(self, handle, node, val, attr):
print("Python: New data change event", handle, node, val, attr)
def event(self, handle, event):
print("Python: New event", handle, event)
self.ev = event
if __name__ == "__main__":
# create our client object
client = opcua.Client(False)
client.connect("opc.tcp://localhost:4841/freeopcua/server/")
#s.connect("opc.tcp://192.168.56.101:48030")
try:
# get server namespace. You may want to get all namespaces
# with client.get_server_namespaces()
uri = "http://examples.freeopcua.github.io"
idx = client.get_namespace_index(uri)
# read a node from standard opcua address space
statenode = client.get_node(opcua.ObjectId.Server_ServerStatus_State)
print("Server state is: ", statenode.get_value())
# get root node of server and browse it
root = client.get_root_node()
print("I got root: ", root)
print("Childs are: ", root.get_children())
# get objects node of server
# this is where the interesting data from server should be
print("Objects is: ", client.get_objects_node())
objects = client.get_objects_node()
print("Children of objects are: ", objects.get_children())
# get child using browse path
myvar = objects.get_child(["{}:NewObject".format(idx), "MyVariable"])
print("yvar is: ", myvar)
# create a subsription we will use to subscribe to nodes or events
sclt = SubHandler()
sub = client.create_subscription(100, sclt)
# subscribe to a specific node
#handle = sub.subscribe_data_change(myvar)
#print("Subscribe handle is: ", handle)
# subscribe to events from server
evhandle = sub.subscribe_events()
print("Subscribe handle is: ", evhandle)
embed()
finally:
client.disconnect()
| lgpl-3.0 |
synergeticsedx/deployment-wipro | lms/djangoapps/django_comment_client/tests/test_models.py | 17 | 2866 | """
Tests for the django comment client integration models
"""
from django.test.testcases import TestCase
from nose.plugins.attrib import attr
from opaque_keys.edx.keys import CourseKey
import django_comment_common.models as models
from xmodule.modulestore.tests.django_utils import (
TEST_DATA_MIXED_MODULESTORE, ModuleStoreTestCase
)
from xmodule.modulestore.tests.factories import ToyCourseFactory
@attr(shard=1)
class RoleClassTestCase(ModuleStoreTestCase):
"""
Tests for roles of the comment client service integration
"""
MODULESTORE = TEST_DATA_MIXED_MODULESTORE
def setUp(self):
super(RoleClassTestCase, self).setUp()
# For course ID, syntax edx/classname/classdate is important
# because xmodel.course_module.id_to_location looks for a string to split
self.course_id = ToyCourseFactory.create().id
self.student_role = models.Role.objects.get_or_create(name="Student",
course_id=self.course_id)[0]
self.student_role.add_permission("delete_thread")
self.student_2_role = models.Role.objects.get_or_create(name="Student",
course_id=self.course_id)[0]
self.TA_role = models.Role.objects.get_or_create(name="Community TA",
course_id=self.course_id)[0]
self.course_id_2 = CourseKey.from_string("edX/6.002x/2012_Fall")
self.TA_role_2 = models.Role.objects.get_or_create(name="Community TA",
course_id=self.course_id_2)[0]
def test_has_permission(self):
# Whenever you add a permission to student_role,
# Roles with the same FORUM_ROLE in same class also receives the same
# permission.
# Is this desirable behavior?
self.assertTrue(self.student_role.has_permission("delete_thread"))
self.assertTrue(self.student_2_role.has_permission("delete_thread"))
self.assertFalse(self.TA_role.has_permission("delete_thread"))
def test_inherit_permission(self):
self.TA_role.inherit_permissions(self.student_role)
self.assertTrue(self.TA_role.has_permission("delete_thread"))
# Despite being from 2 different courses, TA_role_2 can still inherit
# permissions from TA_role without error
self.TA_role_2.inherit_permissions(self.TA_role)
@attr(shard=1)
class PermissionClassTestCase(TestCase):
"""
Tests for permissions of the comment client service integration
"""
def setUp(self):
super(PermissionClassTestCase, self).setUp()
self.permission = models.Permission.objects.get_or_create(name="test")[0]
def test_unicode(self):
self.assertEqual(str(self.permission), "test")
| agpl-3.0 |
SerialShadow/SickRage | lib/sqlalchemy/dialects/mssql/adodbapi.py | 79 | 2509 | # mssql/adodbapi.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mssql+adodbapi
:name: adodbapi
:dbapi: adodbapi
:connectstring: mssql+adodbapi://<username>:<password>@<dsnname>
:url: http://adodbapi.sourceforge.net/
.. note::
The adodbapi dialect is not implemented SQLAlchemy versions 0.6 and
above at this time.
"""
import datetime
from sqlalchemy import types as sqltypes, util
from sqlalchemy.dialects.mssql.base import MSDateTime, MSDialect
import sys
class MSDateTime_adodbapi(MSDateTime):
def result_processor(self, dialect, coltype):
def process(value):
# adodbapi will return datetimes with empty time
# values as datetime.date() objects.
# Promote them back to full datetime.datetime()
if type(value) is datetime.date:
return datetime.datetime(value.year, value.month, value.day)
return value
return process
class MSDialect_adodbapi(MSDialect):
supports_sane_rowcount = True
supports_sane_multi_rowcount = True
supports_unicode = sys.maxunicode == 65535
supports_unicode_statements = True
driver = 'adodbapi'
@classmethod
def import_dbapi(cls):
import adodbapi as module
return module
colspecs = util.update_copy(
MSDialect.colspecs,
{
sqltypes.DateTime: MSDateTime_adodbapi
}
)
def create_connect_args(self, url):
keys = url.query
connectors = ["Provider=SQLOLEDB"]
if 'port' in keys:
connectors.append("Data Source=%s, %s" %
(keys.get("host"), keys.get("port")))
else:
connectors.append("Data Source=%s" % keys.get("host"))
connectors.append("Initial Catalog=%s" % keys.get("database"))
user = keys.get("user")
if user:
connectors.append("User Id=%s" % user)
connectors.append("Password=%s" % keys.get("password", ""))
else:
connectors.append("Integrated Security=SSPI")
return [[";".join(connectors)], {}]
def is_disconnect(self, e, connection, cursor):
return isinstance(e, self.dbapi.adodbapi.DatabaseError) and \
"'connection failure'" in str(e)
dialect = MSDialect_adodbapi
| gpl-3.0 |
cjellick/rancher | tests/validation/tests/v3_api/test_cluster_templates.py | 1 | 34974 | import copy
import os
import pytest
import requests
from rancher import ApiError
from .common import * # NOQA
from .test_monitoring import cluster_query_template
from .test_monitoring import validate_cluster_graph
from .test_rbac import create_user
from .test_rke_cluster_provisioning import engine_install_url
DO_ACCESSKEY = os.environ.get('DO_ACCESSKEY', "None")
RANCHER_S3_BUCKETNAME = os.environ.get('RANCHER_S3_BUCKETNAME', "None")
RANCHER_S3_ENDPOINT = os.environ.get('RANCHER_S3_ENDPOINT', "None")
AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID', "None")
AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY', "None")
user_token = {"stduser_with_createrketemplate_role": {"user": None,
"token": None},
"standard_user": {"user": None, "token": None}}
CLUSTER_MONITORING_APP = "cluster-monitoring"
MONITORING_OPERATOR_APP = "monitoring-operator"
@pytest.fixture(scope='module', autouse="True")
def setup(request):
client = get_admin_client()
# create users
user_token["stduser_with_createrketemplate_role"]["user"], \
user_token["stduser_with_createrketemplate_role"]["token"] = \
create_user(client)
user_token["standard_user"]["user"], \
user_token["standard_user"]["token"] = create_user(client)
stduser_with_createrketemplate_role_id = \
user_token["stduser_with_createrketemplate_role"]["user"].id
# Add clustertemplates-create global role binding to the standard user
client.create_global_role_binding(
globalRoleId="clustertemplates-create",
subjectKind="User",
userId=stduser_with_createrketemplate_role_id)
def get_k8s_versionlist():
# Get the list of K8s version supported by the rancher server
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + ADMIN_TOKEN}
json_data = {
'responseType': 'json'
}
settings_url = CATTLE_TEST_URL + "/v3/settings/k8s-versions-current"
response = requests.get(settings_url, json=json_data,
verify=False, headers=headers)
json_response = (json.loads(response.content))
k8sversionstring = json_response['value']
k8sversionlist = k8sversionstring.split(",")
assert len(k8sversionlist) > 1
return k8sversionlist
def get_cluster_config(k8sversion, enableMonitoring="false"):
rke_config = getRKEConfig(k8sversion)
cluster_config = {
"dockerRootDir": "/var/lib/docker123",
"enableClusterAlerting": "false",
"enableClusterMonitoring": enableMonitoring,
"enableNetworkPolicy": "false",
"type": "clusterSpecBase",
"localClusterAuthEndpoint": {
"enabled": "true",
"type": "localClusterAuthEndpoint"
},
"rancherKubernetesEngineConfig": rke_config
}
return cluster_config
def get_cisscan_enabled_clusterconfig(k8sversion):
rke_config = getRKEConfig(k8sversion)
cluster_config = {
"dockerRootDir": "/var/lib/docker123",
"enableClusterAlerting": "false",
"enableClusterMonitoring": "false",
"enableNetworkPolicy": "false",
"type": "clusterSpecBase",
"localClusterAuthEndpoint": {
"enabled": "true",
"type": "localClusterAuthEndpoint"
},
"scheduledClusterScan": {
"enabled": "true",
"scanConfig": {
"cisScanConfig": {
"debugMaster": "false",
"debugWorker": "false",
"overrideBenchmarkVersion": CIS_SCAN_PROFILE,
"overrideSkip": "None",
"profile": "permissive",
"type": "/v3/schemas/cisScanConfig"
},
"type": "/v3/schemas/clusterScanConfig"
},
"scheduleConfig": {
"cronSchedule": "0 */1 * * *",
"retention": 24,
"type": "/v3/schemas/scheduledClusterScanConfig"
},
"type": "/v3/schemas/scheduledClusterScan"
},
"rancherKubernetesEngineConfig": rke_config
}
return cluster_config
def test_cluster_template_create_with_questions():
# Create a cluster template and revision with questions and create a
# cluster with the revision
k8sversionlist = get_k8s_versionlist()
cluster_config = get_cluster_config(k8sversionlist[0])
questions = [{
"variable": "rancherKubernetesEngineConfig.kubernetesVersion",
"required": "true",
"type": "string",
"default": k8sversionlist[0]
},
{
"variable": "rancherKubernetesEngineConfig.network.plugin",
"required": "true",
"type": "string",
"default": "canal"
},
{
"variable": "rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.bucketName",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": "rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.endpoint",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": "rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.accessKey",
"required": "true",
"type": "string",
"default": ""
},
{
"variable": "rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.secretKey",
"required": "true",
"type": "string",
"default": ""
}]
answers = {
"values": {
"rancherKubernetesEngineConfig.kubernetesVersion":
k8sversionlist[1],
"rancherKubernetesEngineConfig.network.plugin": "flannel",
"rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.bucketName": RANCHER_S3_BUCKETNAME,
"rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.endpoint": RANCHER_S3_ENDPOINT,
"rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.accessKey": AWS_ACCESS_KEY_ID,
"rancherKubernetesEngineConfig.services.etcd.backupConfig."
"s3BackupConfig.secretKey": AWS_SECRET_ACCESS_KEY
}
}
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
cluster_template = \
standard_user_client.create_cluster_template(
name=random_test_name("template"),
description="test-template")
clusterTemplateId = cluster_template.id
revision_name = random_test_name("revision")
cluster_template_revision = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=clusterTemplateId,
enabled="true", questions=questions)
time.sleep(2)
cluster_template_revision = standard_user_client.reload(
cluster_template_revision)
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
cluster = create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_revision.id,
answers=answers, userToken=userToken)
# Verify that the cluster's applied spec has the parameters set as expected
assert cluster.appliedSpec.dockerRootDir == "/var/lib/docker123"
assert cluster.appliedSpec.localClusterAuthEndpoint.enabled is True
assert cluster.appliedSpec.rancherKubernetesEngineConfig.\
kubernetesVersion == k8sversionlist[1]
assert cluster.appliedSpec.rancherKubernetesEngineConfig.services.etcd.\
backupConfig.s3BackupConfig.bucketName == RANCHER_S3_BUCKETNAME
assert cluster.appliedSpec.rancherKubernetesEngineConfig.services.\
etcd.backupConfig.s3BackupConfig.endpoint == RANCHER_S3_ENDPOINT
assert cluster.appliedSpec.rancherKubernetesEngineConfig.services.etcd.\
backupConfig.s3BackupConfig.accessKey == AWS_ACCESS_KEY_ID
assert cluster.appliedSpec.rancherKubernetesEngineConfig.services.etcd.\
backupConfig.s3BackupConfig.type == "/v3/schemas/s3BackupConfig"
assert cluster.appliedSpec.rancherKubernetesEngineConfig.network.plugin ==\
"flannel"
check_cluster_version(cluster, k8sversionlist[1])
# Verify flannel pod in the kube-system namespace
cmd = "get pods -l k8s-app=flannel --namespace kube-system"
pod_result = execute_kubectl_cmd(cmd)
assert (len(["items"])) == 1
for pod in pod_result["items"]:
print(pod["metadata"]["name"])
assert "flannel" in (pod["metadata"]["name"])
# Perform Backup
backup = cluster.backupEtcd()
backupname = backup['metadata']['name']
etcdbackups = cluster.etcdBackups(name=backupname)
etcdbackupdata = etcdbackups['data']
s3backupconfig = etcdbackupdata[0]['backupConfig']['s3BackupConfig']
assert s3backupconfig['type'] == '/v3/schemas/s3BackupConfig'
backupId = etcdbackupdata[0]['id']
print("BackupId", backupId)
wait_for_backup_to_active(cluster, backupname)
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_create_edit_adminuser():
# Create an admin client . As an admin, create a RKE template and
# revisions R1 and R2. Create a cluster using R1.
# Edit and change revision to R2
cluster_template_create_edit(ADMIN_TOKEN)
def test_cluster_template_create_edit_stduser():
# Create a standard user client . As a standard user, create a RKE
# template and revisions R1 and R2. Create a cluster using R1.
# Edit and change revision to R2
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
cluster_template_create_edit(userToken)
def test_cluster_template_add_owner():
# This test case tests the owner member role of the cluster template
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
cluster_config2 = get_cluster_config(k8sversionlist[1])
client = get_admin_client()
# As an Admin, create a cluster template and update the members
# list with the new user as owner
template_name = random_test_name("template")
cluster_template = client.create_cluster_template(
name=template_name, description="test-template")
principalid = user_token["standard_user"]["user"]["principalIds"]
members = [{
"type": "member",
"accessType": "owner",
"userPrincipalId": principalid
}]
cluster_template = client.update(cluster_template,
name=template_name,
members=members)
standard_user_client = \
get_client_for_token(user_token["standard_user"]["token"])
# As an owner of the template, create a revision using the template
# and also create a cluster using the template revision
revision_name = random_test_name("revision1")
cluster_template_revision = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision = standard_user_client.reload(
cluster_template_revision)
userToken = user_token["standard_user"]["token"]
cluster = create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_revision.id,
userToken=userToken)
# As an admin, create another template and a revision.
cluster_template_new = client.create_cluster_template(
name="new_template", description="newtest-template")
newrevision_name = random_test_name("revision2")
cluster_template_newrevision = \
client.create_cluster_template_revision(
name=newrevision_name,
clusterConfig=cluster_config2,
clusterTemplateId=cluster_template_new.id)
time.sleep(2)
cluster_template_newrevision = client.reload(
cluster_template_newrevision)
# Verify that the existing standard user cannot create a new revision using
# this template
with pytest.raises(ApiError) as e:
standard_user_client.create_cluster_template_revision(
name=random_test_name("userrevision"),
clusterConfig=cluster_config2,
clusterTemplateId=cluster_template_new.id)
print(e.value.error.status)
print(e.value.error.code)
assert e.value.error.status == 404
assert e.value.error.code == "NotFound"
userToken = user_token["standard_user"]["token"]
# Verify that the existing standard user cannot create a cluster
# using the new revision
with pytest.raises(ApiError) as e:
create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_newrevision.id,
userToken=userToken)
print(e)
assert e.value.error.status == 404
assert e.value.error.code == "NotFound"
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_add_readonly_member():
# This test case tests a read-only member role of the cluster template
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
client = get_admin_client()
# As an Admin, create a cluster template and update the members
# list with the new standard user as read-only user
template_name = random_test_name("usertemplate")
cluster_template = client.create_cluster_template(
name=template_name, description="test-template")
principalid = user_token["standard_user"]["user"]["principalIds"]
members = [{
"type": "member",
"accessType": "read-only",
"userPrincipalId": principalid
}]
cluster_template = client.update(cluster_template,
name=template_name, members=members)
revision_name = random_test_name("revision1")
cluster_template_revision1 = client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = client.reload(
cluster_template_revision1)
standard_user_client = \
get_client_for_token(user_token["standard_user"]["token"])
# As a read-only member of the rke template, verify that
# adding another revision to the template fails
revision_name = "userrevision"
with pytest.raises(ApiError) as e:
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
assert e.value.error.status == 403
assert e.value.error.code == 'PermissionDenied'
userToken = user_token["standard_user"]["token"]
# Verify that the read-only user can create a cluster with the existing
# template revision
cluster = create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=userToken)
# As an admin, create another template and a revision.
cluster_template_new = client.create_cluster_template(
name="new_template", description="newtest-template")
revision_name = random_test_name("revision2")
cluster_template_newrevision = \
client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template_new.id)
# Verify that the existing standard user cannot create a cluster
# using the new revision
with pytest.raises(ApiError) as e:
create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=cluster_template_newrevision.id,
userToken=userToken)
print(e)
assert e.value.error.status == 404
assert e.value.error.code == "NotFound"
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_export():
# Create a DO cluster using rke config. Save a rketemplate from this
# cluster (with template name and revision V1).
# Create another cluster using the cluster template revision V1
k8sversionlist = get_k8s_versionlist()
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
rke_config = getRKEConfig(k8sversionlist[0])
cluster_name = random_test_name("test-auto-export")
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
cluster = create_node_cluster(standard_user_client, cluster_name,
rancherKubernetesEngineConfig=rke_config,
userToken=userToken)
# Export a Template
cluster.saveAsTemplate(clusterTemplateName="testnewrketemplate",
clusterTemplateRevisionName="v1")
cluster = standard_user_client.reload(cluster)
templateid = cluster.clusterTemplateId
revisionid = cluster.clusterTemplateRevisionId
# Create a new cluster using the template revision just exported
newcluster = create_node_cluster(
standard_user_client, name=random_test_name("test-auto"),
clusterTemplateRevisionId=revisionid, userToken=userToken)
newcluster = standard_user_client.reload(newcluster)
assert newcluster.appliedSpec.clusterTemplateId == templateid
assert newcluster.appliedSpec.clusterTemplateRevisionId == revisionid
cluster_cleanup(standard_user_client, cluster)
cluster_cleanup(standard_user_client, newcluster)
def test_cluster_template_enforcement_on_admin(request):
# As an admin turn ON enforcement and ensure that admin can create clusters
# using rke config and also using rke template
try:
enforcement_settings_url = CATTLE_TEST_URL + \
"/v3/settings/cluster-template-enforcement"
data_test = {
"name": "cluster-template-enforcement",
"value": "true"
}
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + ADMIN_TOKEN}
response = requests.put(enforcement_settings_url, json=data_test,
verify=False, headers=headers)
print(response.content)
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
rke_config = getRKEConfig(k8sversionlist[0])
# Verify creating cluster using rkeconfig succeeds
client = get_admin_client()
cluster_name = random_test_name("test-auto-rkeconfig")
rkecluster = \
create_node_cluster(client, cluster_name,
rancherKubernetesEngineConfig=rke_config,
userToken=ADMIN_TOKEN)
# Verify creating cluster using rke template succeeds
cluster_template = client.create_cluster_template(
name=random_test_name("template"), description="test-template")
revision_name = random_test_name("revision1")
cluster_template_revision1 = client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = client.reload(
cluster_template_revision1)
cluster_name = random_test_name("test-auto")
cluster = create_node_cluster(
client, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=ADMIN_TOKEN)
check_cluster_version(cluster, k8sversionlist[0])
# Reset the enforcement flag to false
finally:
data_test = {
"name": "cluster-template-enforcement",
"value": "false"
}
requests.put(enforcement_settings_url, json=data_test,
verify=False, headers=headers)
cluster_cleanup(client, cluster)
cluster_cleanup(client, rkecluster)
def test_cluster_template_enforcement_on_stduser():
# As an admin turn ON enforcement and ensure that standandard users
# can create clusters only using rke template. Creating clusters using
# regular rke config should not be allowed
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
rke_config = getRKEConfig(k8sversionlist[0])
try:
enforcement_settings_url = CATTLE_TEST_URL + \
"/v3/settings/cluster-template-enforcement"
data_test = {
"name": "cluster-template-enforcement",
"value": "true"
}
headers = {"Content-Type": "application/json",
"Accept": "application/json",
"Authorization": "Bearer " + ADMIN_TOKEN}
response = requests.put(enforcement_settings_url, json=data_test,
verify=False, headers=headers)
print(response.content)
# Verify creating cluster using rke template succeeds
cluster_template = standard_user_client.create_cluster_template(
name=random_test_name("template"), description="test-template")
revision_name = random_test_name("revision1")
cluster_template_revision1 = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = standard_user_client.reload(
cluster_template_revision1)
cluster_name = random_test_name("test-auto")
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
cluster = create_node_cluster(
standard_user_client, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
# Verify creating cluster using rkeconfig fails. API returns error as:
# "MissingRequired : A clusterTemplateRevision to create a cluster"
cluster_name = random_test_name("test-auto-rkeconfig")
with pytest.raises(ApiError) as e:
create_node_cluster(standard_user_client, cluster_name,
rancherKubernetesEngineConfig=rke_config,
userToken=userToken)
print(e)
assert e.value.error.status == 422
assert e.value.error.code == "MissingRequired"
# Reset the enforcement flag to false
finally:
data_test = {
"name": "cluster-template-enforcement",
"value": "false"
}
requests.put(enforcement_settings_url, json=data_test,
verify=False, headers=headers)
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_create_with_cisscan_enabled():
k8sversionlist = get_k8s_versionlist()
# Obtain cluster config with cisscan enabled
cluster_config = get_cisscan_enabled_clusterconfig(k8sversionlist[0])
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
# Create a cluster template
cluster_template = standard_user_client.create_cluster_template(
name=random_test_name("template"), description="cis-enabled-template")
revision_name = random_test_name("revision1")
# Create a cluster template revision with the cis enabled cluster config
cluster_template_revision = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision = standard_user_client.reload(
cluster_template_revision)
cluster_name = random_test_name("test-auto")
# Create a cluster using the cluster template revision
cluster = create_node_cluster(
standard_user_client, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
# Verify that the cluster's applied spec has the cis scan parameters
# set as expected
assert cluster.appliedSpec. \
scheduledClusterScan.enabled == True
assert cluster.appliedSpec.scheduledClusterScan.\
scanConfig.type == "/v3/schemas/clusterScanConfig"
assert cluster.appliedSpec. \
scheduledClusterScan.scanConfig.\
cisScanConfig.overrideBenchmarkVersion == "rke-cis-1.4"
assert cluster.appliedSpec. \
scheduledClusterScan.scanConfig.cisScanConfig.profile == "permissive"
assert cluster.appliedSpec.scheduledClusterScan.scheduleConfig.\
cronSchedule == "0 */1 * * *"
cluster_cleanup(standard_user_client, cluster)
def test_cluster_template_create_with_monitoring():
k8sversionlist = get_k8s_versionlist()
# Obtain cluster config with monitoring enabled
cluster_config = get_cluster_config(k8sversionlist[0],
enableMonitoring="true")
standard_user_client = \
get_client_for_token(
user_token["stduser_with_createrketemplate_role"]["token"])
userToken = user_token["stduser_with_createrketemplate_role"]["token"]
# Create a cluster template
cluster_template = standard_user_client.\
create_cluster_template(name=random_test_name("template"),
description="test-template")
revision_name = random_test_name("revision1")
# Create cluster template revision with monitoring enabled cluster config
cluster_template_revision = \
standard_user_client.create_cluster_template_revision(
name=revision_name,
clusterConfig=cluster_config,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision = standard_user_client.reload(
cluster_template_revision)
cluster_name = random_test_name("test-auto")
# Create a cluster using the cluster template revision
cluster = create_node_cluster(
standard_user_client, name=cluster_name, nodecount=3, nodesize="8gb",
clusterTemplateRevisionId=cluster_template_revision.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
assert cluster.appliedSpec.enableClusterMonitoring == True
# Verify the monitoring apps are deployed and active
system_project = \
standard_user_client.list_project(clusterId=cluster.id,
name="System").data[0]
sys_proj_client = get_project_client_for_token(system_project, USER_TOKEN)
wait_for_app_to_active(sys_proj_client, CLUSTER_MONITORING_APP, 1000)
wait_for_app_to_active(sys_proj_client, MONITORING_OPERATOR_APP, 1000)
# wait for all graphs to be available
time.sleep(60 * 3)
cluster_monitoring_obj = standard_user_client.list_clusterMonitorGraph()
# generate the request payload
query1 = copy.deepcopy(cluster_query_template)
query1["obj"] = cluster_monitoring_obj
query1["filters"]["clusterId"] = cluster.id
query1["filters"]["resourceType"] = "cluster"
# Verify graphs are generated
validate_cluster_graph(query1, "cluster")
cluster_cleanup(standard_user_client, cluster)
def cluster_template_create_edit(userToken):
# Method to create cluster template revisions R1, R2.
# Create a cluster with a RKE template revision R1.
# Then edit the cluster and change the revision to R2
k8sversionlist = get_k8s_versionlist()
cluster_config1 = get_cluster_config(k8sversionlist[0])
cluster_config2 = get_cluster_config(k8sversionlist[1])
client = get_client_for_token(userToken)
cluster_template = client.create_cluster_template(
name=random_test_name("template"), description="test-template")
revision1_name = random_test_name("revision1")
cluster_template_revision1 = client.create_cluster_template_revision(
name=revision1_name,
clusterConfig=cluster_config1,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision1 = client.reload(
cluster_template_revision1)
cluster_name = random_test_name("test-auto")
cluster = create_node_cluster(
client, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision1.id,
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[0])
revision2_name = random_test_name("revision2")
cluster_template_revision2 = client.create_cluster_template_revision(
name=revision2_name,
clusterConfig=cluster_config2,
clusterTemplateId=cluster_template.id)
time.sleep(2)
cluster_template_revision2 = client.reload(
cluster_template_revision2)
cluster = \
client.update(
cluster, name=cluster_name,
clusterTemplateRevisionId=cluster_template_revision2.id)
cluster = validate_cluster(client,
cluster,
intermediate_state="updating",
userToken=userToken)
check_cluster_version(cluster, k8sversionlist[1])
cluster_cleanup(client, cluster)
def node_template_digocean(userclient, nodesize):
client = userclient
do_cloud_credential_config = {"accessToken": DO_ACCESSKEY}
do_cloud_credential = client.create_cloud_credential(
digitaloceancredentialConfig=do_cloud_credential_config)
time.sleep(3)
node_template = client.create_node_template(
digitaloceanConfig={"region": "nyc3",
"size": nodesize,
"image": "ubuntu-18-04-x64"},
name=random_name(),
driver="digitalocean",
namespaceId="dig",
cloudCredentialId=do_cloud_credential.id,
engineInstallURL=engine_install_url,
useInternalIpAddress=True)
node_template = client.wait_success(node_template)
return node_template
def create_node_cluster(userclient, name, nodecount=1, nodesize="4gb",
clusterTemplateRevisionId=None,
rancherKubernetesEngineConfig=None, answers=None,
userToken=None):
client = userclient
if(rancherKubernetesEngineConfig is not None):
cluster = client.create_cluster(
name=name,
rancherKubernetesEngineConfig=rancherKubernetesEngineConfig)
else:
cluster = \
client.create_cluster(
name=name,
clusterTemplateRevisionId=clusterTemplateRevisionId,
answers=answers)
nodetemplate = node_template_digocean(client, nodesize)
nodes = []
node = {"hostnamePrefix": random_test_name("test-auto"),
"nodeTemplateId": nodetemplate.id,
"requestedHostname": "test-auto-template",
"controlPlane": True,
"etcd": True,
"worker": True,
"quantity": nodecount,
"clusterId": None}
nodes.append(node)
node_pools = []
for node in nodes:
node["clusterId"] = cluster.id
success = False
start = time.time()
while not success:
if time.time() - start > 10:
raise AssertionError(
"Timed out waiting for cluster owner global Roles")
try:
time.sleep(1)
node_pool = client.create_node_pool(**node)
success = True
except ApiError:
success = False
node_pool = client.wait_success(node_pool)
node_pools.append(node_pool)
cluster = validate_cluster(client, cluster, userToken=userToken)
nodes = client.list_node(clusterId=cluster.id).data
assert len(nodes) == len(nodes)
for node in nodes:
assert node.state == "active"
return cluster
def getRKEConfig(k8sversion):
rke_config = {
"addonJobTimeout": 30,
"ignoreDockerVersion": "true",
"sshAgentAuth": "false",
"type": "rancherKubernetesEngineConfig",
"kubernetesVersion": k8sversion,
"authentication": {
"strategy": "x509",
"type": "authnConfig"
},
"network": {
"plugin": "canal",
"type": "networkConfig",
"options": {
"flannel_backend_type": "vxlan"
}
},
"ingress": {
"provider": "nginx",
"type": "ingressConfig"
},
"monitoring": {
"provider": "metrics-server",
"type": "monitoringConfig"
},
"services": {
"type": "rkeConfigServices",
"kubeApi": {
"alwaysPullImages": "false",
"podSecurityPolicy": "false",
"serviceNodePortRange": "30000-32767",
"type": "kubeAPIService"
},
"etcd": {
"creation": "12h",
"extraArgs": {
"heartbeat-interval": 500,
"election-timeout": 5000
},
"retention": "72h",
"snapshot": "false",
"type": "etcdService",
"backupConfig": {
"enabled": "true",
"intervalHours": 12,
"retention": 6,
"type": "backupConfig",
"s3BackupConfig": {
"type": "s3BackupConfig",
"accessKey": AWS_ACCESS_KEY_ID,
"secretKey": AWS_SECRET_ACCESS_KEY,
"bucketName": "test-auto-s3",
"endpoint": "s3.amazonaws.com"
}
}
}
}
}
return rke_config
| apache-2.0 |
vikramsunkara/PyME | pyme/lazy_dict.py | 2 | 3974 | """
Dictionary with lazy evaluation on access, via a supplied update function
"""
import itertools
class LazyDict(dict):
"""
A dictionary type that lazily updates values when they are accessed.
All the usual dictionary methods work as expected, with automatic lazy
updates occuring behind the scenes whenever values are read from the
dictionary.
The optional ``items`` argument, if specified, is a mapping instance used
to initialise the items in the :class:`LazyDict`.
The ``update_value`` argument required by the :class:`LazyDict` constructor
must be a function of the form:
update_value(k, existing_value, member) -> updated_value
This function is called whenever an item with the key ``k`` is read
from the :class:`LazyDict`. The second argument ``existing_value``, is
the value corresponding to the key ``k`` stored in the :class:`LazyDict`,
or ``None``, if the key ``k`` is not contained in the :class:`LazyDict`.
The third argument ``member`` is a boolean value indicating if there is
an existing value stored under the key ``k``.
This function is used as follows by the :class:`LazyDict`. Suppose that the
value ``v`` has been stored in a :class:`LazyDict` object ``lazy_dict``
under the key ``k``, that is, ``lazy_dict[k] = v``. Then subsequently
accessing this value in the usual manner::
v_updated = lazy_dict[k]
is equivalent to the following two statements::
lazy_dict[k] = update_value(k, v, (k in lazy_dict))
v_updated = update_value(k, v, (k in lazy_dict))
Observe how the value stored in the :class:`LazyDict` under the key ``k``
is first updated, using the provided function,
with the updated value then being the one returned.
"""
def __init__(self, update_value, items = None):
"""
Returns a LazyDict using the specified ``update_value`` function
and optional initial dictionary arguments.
"""
self.update_value = update_value
if items is None:
dict.__init__(self)
else:
dict.__init__(items)
def __getitem__(self, key):
member = dict.__contains__(self, key)
if member:
existing_value = dict.__getitem__(self, key)
else:
existing_value = None
# ensure measurement is up to date
updated_value = self.update_value(key, existing_value, member)
self[key] = updated_value
return updated_value
def copy(self):
return LazyDict(self.update_value, dict.copy(self))
def itervalues(self):
return itertools.imap((lambda k : self[k]), dict.iterkeys(self))
def iteritems(self):
return itertools.imap((lambda k : (k, self[k])), dict.iterkeys(self))
def pop(self, *args):
n_args = len(args)
if n_args < 1:
raise TypeError('pop expected at least 1 argument, got %d' % n_args)
if n_args > 2:
raise TypeError('pop expected at most 2 arguments, got %d' % n_args)
k = args[0]
if k in self:
value = self[k]
del self[k]
return value
else:
if n_args == 2:
return args[1]
else:
raise KeyError(str(k))
def popitem(self):
key, value = dict.popitem(self)
self[key] = value
updated_value = self[key]
del self[key]
return key, updated_value
def setdefault(self, k, x=None):
if k in self:
return self[k]
else:
self[k] = x
return x
def get(self, k, x=None):
if k in self:
return self[k]
else:
return x
def values(self):
return list(self.itervalues())
def items(self):
return list(self.iteritems())
| agpl-3.0 |
ArthurGarnier/SickRage | lib/hachoir_core/field/static_field_set.py | 93 | 1870 | from hachoir_core.field import FieldSet, ParserError
class StaticFieldSet(FieldSet):
"""
Static field set: format class attribute is a tuple of all fields
in syntax like:
format = (
(TYPE1, ARG1, ARG2, ...),
(TYPE2, ARG1, ARG2, ..., {KEY1=VALUE1, ...}),
...
)
Types with dynamic size are forbidden, eg. CString, PascalString8, etc.
"""
format = None # You have to redefine this class variable
_class = None
def __new__(cls, *args, **kw):
assert cls.format is not None, "Class attribute 'format' is not set"
if cls._class is not cls.__name__:
cls._class = cls.__name__
cls.static_size = cls._computeStaticSize()
return object.__new__(cls, *args, **kw)
@staticmethod
def _computeItemSize(item):
item_class = item[0]
if item_class.static_size is None:
raise ParserError("Unable to get static size of field type: %s"
% item_class.__name__)
if callable(item_class.static_size):
if isinstance(item[-1], dict):
return item_class.static_size(*item[1:-1], **item[-1])
else:
return item_class.static_size(*item[1:])
else:
assert isinstance(item_class.static_size, (int, long))
return item_class.static_size
def createFields(self):
for item in self.format:
if isinstance(item[-1], dict):
yield item[0](self, *item[1:-1], **item[-1])
else:
yield item[0](self, *item[1:])
@classmethod
def _computeStaticSize(cls, *args):
return sum(cls._computeItemSize(item) for item in cls.format)
# Initial value of static_size, it changes when first instance
# is created (see __new__)
static_size = _computeStaticSize
| gpl-3.0 |
overtherain/scriptfile | software/googleAppEngine/lib/django_1_2/django/contrib/messages/tests/base.py | 44 | 16343 | from django import http
from django.test import TestCase
from django.conf import settings
from django.utils.translation import ugettext_lazy
from django.contrib.messages import constants, utils, get_level, set_level
from django.contrib.messages.api import MessageFailure
from django.contrib.messages.storage import default_storage, base
from django.contrib.messages.storage.base import Message
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
def add_level_messages(storage):
"""
Adds 6 messages from different levels (including a custom one) to a storage
instance.
"""
storage.add(constants.INFO, 'A generic info message')
storage.add(29, 'Some custom level')
storage.add(constants.DEBUG, 'A debugging message', extra_tags='extra-tag')
storage.add(constants.WARNING, 'A warning')
storage.add(constants.ERROR, 'An error')
storage.add(constants.SUCCESS, 'This was a triumph.')
class BaseTest(TestCase):
storage_class = default_storage
restore_settings = ['MESSAGE_LEVEL', 'MESSAGE_TAGS']
urls = 'django.contrib.messages.tests.urls'
levels = {
'debug': constants.DEBUG,
'info': constants.INFO,
'success': constants.SUCCESS,
'warning': constants.WARNING,
'error': constants.ERROR,
}
def setUp(self):
self._remembered_settings = {}
for setting in self.restore_settings:
if hasattr(settings, setting):
self._remembered_settings[setting] = getattr(settings, setting)
delattr(settings._wrapped, setting)
# Backup these manually because we do not want them deleted.
self._middleware_classes = settings.MIDDLEWARE_CLASSES
self._template_context_processors = \
settings.TEMPLATE_CONTEXT_PROCESSORS
self._installed_apps = settings.INSTALLED_APPS
self._message_storage = settings.MESSAGE_STORAGE
settings.MESSAGE_STORAGE = '%s.%s' % (self.storage_class.__module__,
self.storage_class.__name__)
self.old_TEMPLATE_DIRS = settings.TEMPLATE_DIRS
settings.TEMPLATE_DIRS = ()
def tearDown(self):
for setting in self.restore_settings:
self.restore_setting(setting)
# Restore these manually (see above).
settings.MIDDLEWARE_CLASSES = self._middleware_classes
settings.TEMPLATE_CONTEXT_PROCESSORS = \
self._template_context_processors
settings.INSTALLED_APPS = self._installed_apps
settings.MESSAGE_STORAGE = self._message_storage
settings.TEMPLATE_DIRS = self.old_TEMPLATE_DIRS
def restore_setting(self, setting):
if setting in self._remembered_settings:
value = self._remembered_settings.pop(setting)
setattr(settings, setting, value)
elif hasattr(settings, setting):
delattr(settings._wrapped, setting)
def get_request(self):
return http.HttpRequest()
def get_response(self):
return http.HttpResponse()
def get_storage(self, data=None):
"""
Returns the storage backend, setting its loaded data to the ``data``
argument.
This method avoids the storage ``_get`` method from getting called so
that other parts of the storage backend can be tested independent of
the message retrieval logic.
"""
storage = self.storage_class(self.get_request())
storage._loaded_data = data or []
return storage
def test_add(self):
storage = self.get_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 1')
self.assert_(storage.added_new)
storage.add(constants.INFO, 'Test message 2', extra_tags='tag')
self.assertEqual(len(storage), 2)
def test_add_lazy_translation(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, ugettext_lazy('lazy message'))
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_no_update(self):
storage = self.get_storage()
response = self.get_response()
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_add_update(self):
storage = self.get_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 1')
storage.add(constants.INFO, 'Test message 1', extra_tags='tag')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 2)
def test_existing_add_read_update(self):
storage = self.get_existing_storage()
response = self.get_response()
storage.add(constants.INFO, 'Test message 3')
list(storage) # Simulates a read
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 0)
def test_existing_read_add_update(self):
storage = self.get_existing_storage()
response = self.get_response()
list(storage) # Simulates a read
storage.add(constants.INFO, 'Test message 3')
storage.update(response)
storing = self.stored_messages_count(storage, response)
self.assertEqual(storing, 1)
def test_full_request_response_cycle(self):
"""
With the message middleware enabled, tests that messages are properly
stored and then retrieved across the full request/redirect/response
cycle.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
messages = [Message(self.levels[level], msg) for msg in
data['messages']]
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_multiple_posts(self):
"""
Tests that messages persist properly when multiple POSTs are made
before a GET.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
messages = []
for level in ('debug', 'info', 'success', 'warning', 'error'):
messages.extend([Message(self.levels[level], msg) for msg in
data['messages']])
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.client.post(add_url, data)
response = self.client.get(show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), messages)
for msg in data['messages']:
self.assertContains(response, msg)
def test_middleware_disabled_auth_user(self):
"""
Tests that the messages API successfully falls back to using
user.message_set to store messages directly when the middleware is
disabled.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
user = User.objects.create_user('test', 'test@example.com', 'test')
self.client.login(username='test', password='test')
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
context_messages = list(response.context['messages'])
for msg in data['messages']:
self.assertTrue(msg in context_messages)
self.assertContains(response, msg)
def test_middleware_disabled_anon_user(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is raised when one attempts to store a message.
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
self.assertRaises(MessageFailure, self.client.post, add_url,
data, follow=True)
def test_middleware_disabled_anon_user_fail_silently(self):
"""
Tests that, when the middleware is disabled and a user is not logged
in, an exception is not raised if 'fail_silently' = True
"""
settings.MESSAGE_LEVEL = constants.DEBUG
settings.INSTALLED_APPS = list(settings.INSTALLED_APPS)
settings.INSTALLED_APPS.remove(
'django.contrib.messages',
)
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.remove(
'django.contrib.messages.middleware.MessageMiddleware',
)
settings.TEMPLATE_CONTEXT_PROCESSORS = \
list(settings.TEMPLATE_CONTEXT_PROCESSORS)
settings.TEMPLATE_CONTEXT_PROCESSORS.remove(
'django.contrib.messages.context_processors.messages',
)
data = {
'messages': ['Test message %d' % x for x in xrange(10)],
'fail_silently': True,
}
show_url = reverse('django.contrib.messages.tests.urls.show')
for level in ('debug', 'info', 'success', 'warning', 'error'):
add_url = reverse('django.contrib.messages.tests.urls.add',
args=(level,))
response = self.client.post(add_url, data, follow=True)
self.assertRedirects(response, show_url)
self.assertTrue('messages' in response.context)
self.assertEqual(list(response.context['messages']), [])
def stored_messages_count(self, storage, response):
"""
Returns the number of messages being stored after a
``storage.update()`` call.
"""
raise NotImplementedError('This method must be set by a subclass.')
def test_get(self):
raise NotImplementedError('This method must be set by a subclass.')
def get_existing_storage(self):
return self.get_storage([Message(constants.INFO, 'Test message 1'),
Message(constants.INFO, 'Test message 2',
extra_tags='tag')])
def test_existing_read(self):
"""
Tests that reading the existing storage doesn't cause the data to be
lost.
"""
storage = self.get_existing_storage()
self.assertFalse(storage.used)
# After iterating the storage engine directly, the used flag is set.
data = list(storage)
self.assert_(storage.used)
# The data does not disappear because it has been iterated.
self.assertEqual(data, list(storage))
def test_existing_add(self):
storage = self.get_existing_storage()
self.assertFalse(storage.added_new)
storage.add(constants.INFO, 'Test message 3')
self.assert_(storage.added_new)
def test_default_level(self):
# get_level works even with no storage on the request.
request = self.get_request()
self.assertEqual(get_level(request), constants.INFO)
# get_level returns the default level if it hasn't been set.
storage = self.get_storage()
request._messages = storage
self.assertEqual(get_level(request), constants.INFO)
# Only messages of sufficient level get recorded.
add_level_messages(storage)
self.assertEqual(len(storage), 5)
def test_low_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assert_(set_level(request, 5))
self.assertEqual(get_level(request), 5)
add_level_messages(storage)
self.assertEqual(len(storage), 6)
def test_high_level(self):
request = self.get_request()
storage = self.storage_class(request)
request._messages = storage
self.assert_(set_level(request, 30))
self.assertEqual(get_level(request), 30)
add_level_messages(storage)
self.assertEqual(len(storage), 2)
def test_settings_level(self):
request = self.get_request()
storage = self.storage_class(request)
settings.MESSAGE_LEVEL = 29
self.assertEqual(get_level(request), 29)
add_level_messages(storage)
self.assertEqual(len(storage), 3)
def test_tags(self):
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', '', 'extra-tag debug', 'warning', 'error',
'success'])
def test_custom_tags(self):
settings.MESSAGE_TAGS = {
constants.INFO: 'info',
constants.DEBUG: '',
constants.WARNING: '',
constants.ERROR: 'bad',
29: 'custom',
}
# LEVEL_TAGS is a constant defined in the
# django.contrib.messages.storage.base module, so after changing
# settings.MESSAGE_TAGS, we need to update that constant too.
base.LEVEL_TAGS = utils.get_level_tags()
try:
storage = self.get_storage()
storage.level = 0
add_level_messages(storage)
tags = [msg.tags for msg in storage]
self.assertEqual(tags,
['info', 'custom', 'extra-tag', '', 'bad', 'success'])
finally:
# Ensure the level tags constant is put back like we found it.
self.restore_setting('MESSAGE_TAGS')
base.LEVEL_TAGS = utils.get_level_tags()
| mit |
Simran-B/arangodb | 3rdParty/V8-4.3.61/third_party/python_26/Lib/site-packages/win32/Demos/win32comport_demo.py | 34 | 5761 | # This is a simple serial port terminal demo.
#
# Its primary purpose is to demonstrate the native serial port access offered via
# win32file.
# It uses 3 threads:
# - The main thread, which cranks up the other 2 threads, then simply waits for them to exit.
# - The user-input thread - blocks waiting for a keyboard character, and when found sends it
# out the COM port. If the character is Ctrl+C, it stops, signalling the COM port thread to stop.
# - The COM port thread is simply listening for input on the COM port, and prints it to the screen.
# This demo uses userlapped IO, so that none of the read or write operations actually block (however,
# in this sample, the very next thing we do _is_ block - so it shows off the concepts even though it
# doesnt exploit them.
from win32file import * # The base COM port and file IO functions.
from win32event import * # We use events and the WaitFor[Multiple]Objects functions.
import win32con # constants.
import msvcrt # For the getch() function.
import threading
import sys
def FindModem():
# Snoop over the comports, seeing if it is likely we have a modem.
for i in range(1,5):
port = "COM%d" % (i,)
try:
handle = CreateFile(port,
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL,
None)
# It appears that an available COM port will always success here,
# just return 0 for the status flags. We only care that it has _any_ status
# flags (and therefore probably a real modem)
if GetCommModemStatus(handle) != 0:
return port
except error:
pass # No port, or modem status failed.
return None
# A basic synchronous COM port file-like object
class SerialTTY:
def __init__(self, port):
if type(port)==type(0):
port = "COM%d" % (port,)
self.handle = CreateFile(port,
win32con.GENERIC_READ | win32con.GENERIC_WRITE,
0, # exclusive access
None, # no security
win32con.OPEN_EXISTING,
win32con.FILE_ATTRIBUTE_NORMAL | win32con.FILE_FLAG_OVERLAPPED,
None)
# Tell the port we want a notification on each char.
SetCommMask(self.handle, EV_RXCHAR)
# Setup a 4k buffer
SetupComm(self.handle, 4096, 4096)
# Remove anything that was there
PurgeComm(self.handle, PURGE_TXABORT | PURGE_RXABORT | PURGE_TXCLEAR | PURGE_RXCLEAR )
# Setup for overlapped IO.
timeouts = 0xFFFFFFFF, 0, 1000, 0, 1000
SetCommTimeouts(self.handle, timeouts)
# Setup the connection info.
dcb = GetCommState( self.handle )
dcb.BaudRate = CBR_115200
dcb.ByteSize = 8
dcb.Parity = NOPARITY
dcb.StopBits = ONESTOPBIT
SetCommState(self.handle, dcb)
print "Connected to %s at %s baud" % (port, dcb.BaudRate)
def _UserInputReaderThread(self):
overlapped = OVERLAPPED()
overlapped.hEvent = CreateEvent(None, 1, 0, None)
try:
while 1:
ch = msvcrt.getch()
if ord(ch)==3:
break
WriteFile(self.handle, ch, overlapped)
# Wait for the write to complete.
WaitForSingleObject(overlapped.hEvent, INFINITE)
finally:
SetEvent(self.eventStop)
def _ComPortThread(self):
overlapped = OVERLAPPED()
overlapped.hEvent = CreateEvent(None, 1, 0, None)
while 1:
# XXX - note we could _probably_ just use overlapped IO on the win32file.ReadFile() statement
# XXX but this tests the COM stuff!
rc, mask = WaitCommEvent(self.handle, overlapped)
if rc == 0: # Character already ready!
SetEvent(overlapped.hEvent)
rc = WaitForMultipleObjects([overlapped.hEvent, self.eventStop], 0, INFINITE)
if rc == WAIT_OBJECT_0:
# Some input - read and print it
flags, comstat = ClearCommError( self.handle )
rc, data = ReadFile(self.handle, comstat.cbInQue, overlapped)
WaitForSingleObject(overlapped.hEvent, INFINITE)
sys.stdout.write(data)
else:
# Stop the thread!
# Just incase the user input thread uis still going, close it
sys.stdout.close()
break
def Run(self):
self.eventStop = CreateEvent(None, 0, 0, None)
# Start the reader and writer threads.
user_thread = threading.Thread(target = self._UserInputReaderThread)
user_thread.start()
com_thread = threading.Thread(target = self._ComPortThread)
com_thread.start()
user_thread.join()
com_thread.join()
if __name__=='__main__':
print "Serial port terminal demo - press Ctrl+C to exit"
if len(sys.argv)<=1:
port = FindModem()
if port is None:
print "No COM port specified, and no modem could be found"
print "Please re-run this script with the name of a COM port (eg COM3)"
sys.exit(1)
else:
port = sys.argv[1]
tty = SerialTTY(port)
tty.Run()
| apache-2.0 |
Dany3R9/ns-3-dev-ndnSIM | src/buildings/doc/source/conf.py | 175 | 7083 | # -*- coding: utf-8 -*-
#
# ns-3 documentation build configuration file, created by
# sphinx-quickstart on Tue Dec 14 09:00:39 2010.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.pngmath',
'sphinxcontrib.seqdiag']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'buildings'
# General information about the project.
project = u'LENA'
copyright = u'2011-2012, CTTC'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'M2'
# The full version, including alpha/beta/rc tags.
release = 'M2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
#htmlhelp_basename = 'ns-3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('buildings', 'buildings.tex', u'Buildings Module Documentation', u'Centre Tecnologic de Telecomunicacions de Catalunya (CTTC)', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ns-3-model-library', u'ns-3 Model Library',
[u'ns-3 project'], 1)
]
| gpl-2.0 |
JPJPJPOPOP/zulip | api/integrations/asana/zulip_asana_config.py | 27 | 2139 | # -*- coding: utf-8 -*-
#
# Copyright © 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
### REQUIRED CONFIGURATION ###
# Change these values to your Asana credentials.
ASANA_API_KEY = "0123456789abcdef0123456789abcdef"
# Change these values to the credentials for your Asana bot.
ZULIP_USER = "asana-bot@example.com"
ZULIP_API_KEY = "0123456789abcdef0123456789abcdef"
# The Zulip stream that will receive Asana task updates.
ZULIP_STREAM_NAME = "asana"
### OPTIONAL CONFIGURATION ###
# Set to None for logging to stdout when testing, and to a file for
# logging in production.
#LOG_FILE = "/var/tmp/zulip_asana.log"
LOG_FILE = None
# This file is used to resume this mirror in case the script shuts down.
# It is required and needs to be writeable.
RESUME_FILE = "/var/tmp/zulip_asana.state"
# When initially started, how many hours of messages to include.
ASANA_INITIAL_HISTORY_HOURS = 1
# Set this to your Zulip API server URI
ZULIP_SITE = "https://zulip.example.com"
# If properly installed, the Zulip API should be in your import
# path, but if not, set a custom path below
ZULIP_API_PATH = None
| apache-2.0 |
getsentry/freight | tests/checks/test_cloudbuilder.py | 1 | 9438 | import json
from textwrap import dedent
import pytest
import responses
from freight import checks
from freight.exceptions import CheckFailed, CheckPending
from freight.testutils import TestCase
class CloudbuilderCheckBase(TestCase):
def setUp(self):
self.check = checks.get("cloudbuilder")
self.user = self.create_user()
self.repo = self.create_repo()
self.app = self.create_app(repository=self.repo)
self.test_project = "mycoolproject"
self.test_sha = "0987654321"
self.test_token = "mysuperfaketoken"
class CloudbuilderContextCheckTest(CloudbuilderCheckBase):
@responses.activate
def test_build_success(self):
test_id = "successful_build_id"
body = json.dumps(
{
"builds": [
{
"id": test_id,
"logUrl": f"https://console.cloud.google.com/gcr/builds/{test_id}?project={self.test_project}",
"logsBucket": f"gs://{self.test_project}.cloudbuild-logs.googleusercontent.com",
"status": "SUCCESS",
}
]
}
)
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
body=body,
)
config = {"project": self.test_project, "oauth_token": self.test_token}
self.check.check(self.app, self.test_sha, config)
@responses.activate
def test_build_fail(self):
test_id = "failed_build_id"
body = json.dumps(
{
"builds": [
{
"id": test_id,
"logUrl": f"https://console.cloud.google.com/gcr/builds/{test_id}?project={self.test_project}",
"logsBucket": f"gs://{self.test_project}.cloudbuild-logs.googleusercontent.com",
"status": "FAILURE",
}
]
}
)
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
body=body,
)
config = {"project": self.test_project, "oauth_token": self.test_token}
failed_log_text = """\
LOG TEXT HERE
THIS HERE IS A MOCK OF LOG.TEXT THAT WILL BE PRINTED
build build build build build build steps
MORE LOGS HERE.
"""
build_logs = "mycoolproject.cloudbuild-logs.googleusercontent.com"
build_id = "failed_build_id"
responses.add(
responses.GET,
f"https://storage.googleapis.com/{build_logs}/log-{build_id}.txt",
body=dedent(failed_log_text),
)
with pytest.raises(CheckFailed):
self.check.check(self.app, self.test_sha, config)
@responses.activate
def test_build_in_progress(self):
test_id = "WIP_build_id"
body = json.dumps(
{
"builds": [
{
"id": test_id,
"logUrl": f"https://console.cloud.google.com/gcr/builds/{test_id}?project={self.test_project}",
"logsBucket": f"gs://{self.test_project}.cloudbuild-logs.googleusercontent.com",
"status": "WORKING",
}
]
}
)
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
body=body,
)
config = {"project": self.test_project, "oauth_token": self.test_token}
with pytest.raises(CheckPending):
self.check.check(self.app, self.test_sha, config)
@responses.activate
def test_build_status_unknown(self):
""" "STATUS_UNKNOWN": "Status of the build is unknown."""
test_id = "unknown_build_id"
body = json.dumps(
{
"builds": [
{
"id": test_id,
"logUrl": f"https://console.cloud.google.com/gcr/builds/{test_id}?project={self.test_project}",
"logsBucket": f"gs://{self.test_project}.cloudbuild-logs.googleusercontent.com",
"status": "STATUS_UNKNOWN",
}
]
}
)
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
body=body,
)
config = {"project": self.test_project, "oauth_token": self.test_token}
with pytest.raises(CheckFailed) as exception_info:
self.check.check(self.app, self.test_sha, config)
@responses.activate
def test_build_status_queued(self):
"""QUEUED": "Build or step is queued; work has not yet begun."""
test_id = "build_id"
body = json.dumps(
{
"builds": [
{
"id": test_id,
"logUrl": f"https://console.cloud.google.com/gcr/builds/{test_id}?project={self.test_project}",
"logsBucket": f"gs://{self.test_project}.cloudbuild-logs.googleusercontent.com",
"status": "QUEUED",
}
]
}
)
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
body=body,
)
config = {"project": self.test_project, "oauth_token": self.test_token}
with pytest.raises(CheckPending) as exception_info:
self.check.check(self.app, self.test_sha, config)
@responses.activate
def test_build_status_internal_error(self):
"""INTERNAL_ERROR": "Build or step failed due to an internal cause."""
test_id = "build_id"
body = json.dumps(
{
"builds": [
{
"id": test_id,
"logUrl": f"https://console.cloud.google.com/gcr/builds/{test_id}?project={self.test_project}",
"logsBucket": f"gs://{self.test_project}.cloudbuild-logs.googleusercontent.com",
"status": "INTERNAL_ERROR",
}
]
}
)
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
body=body,
)
config = {"project": self.test_project, "oauth_token": self.test_token}
with pytest.raises(CheckFailed) as exception_info:
self.check.check(self.app, self.test_sha, config)
@responses.activate
def test_build_status_timeout(self):
"""[summary]
"TIMEOUT": "Build or step took longer than was allowed.",
Arguments:
self {[type]} -- [description]
"""
test_id = "build_id"
body = json.dumps(
{
"builds": [
{
"id": test_id,
"logUrl": f"https://console.cloud.google.com/gcr/builds/{test_id}?project={self.test_project}",
"logsBucket": f"gs://{self.test_project}.cloudbuild-logs.googleusercontent.com",
"status": "TIMEOUT",
}
]
}
)
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
body=body,
)
config = {"project": self.test_project, "oauth_token": self.test_token}
with pytest.raises(CheckFailed) as exception_info:
self.check.check(self.app, self.test_sha, config)
@responses.activate
def test_build_status_cancelled(self):
"""[summary]
"CANCELLED": "Build or step was canceled by a user.",
"""
test_id = "build_id"
body = json.dumps(
{
"builds": [
{
"id": test_id,
"logUrl": f"https://console.cloud.google.com/gcr/builds/{test_id}?project={self.test_project}",
"logsBucket": f"gs://{self.test_project}.cloudbuild-logs.googleusercontent.com",
"status": "CANCELLED",
}
]
}
)
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
body=body,
)
config = {"project": self.test_project, "oauth_token": self.test_token}
with pytest.raises(CheckFailed) as exception_info:
self.check.check(self.app, self.test_sha, config)
@responses.activate
def test_missing_body(self):
config = {"project": self.test_project, "oauth_token": self.test_token}
responses.add(
responses.GET,
f"https://cloudbuild.googleapis.com/v1/projects/{self.test_project}/builds",
status=400,
)
with pytest.raises(CheckFailed):
self.check.check(self.app, self.test_sha, config)
| apache-2.0 |
Sealos/Sarcasm | Proyecto Final/prueba.py | 1 | 2337 | import numpy as np
from sklearn import datasets, svm
"""
iris = datasets.load_iris()
iris_X = iris.data
iris_y = iris.target
np.unique(iris_y)
np.random.seed(0)
indices = np.random.permutation(len(iris_X))
iris_X_train = iris_X[indices[:-10]]
iris_y_train = iris_y[indices[:-10]]
iris_X_test = iris_X[indices[-10:]]
iris_y_test = iris_y[indices[-10:]]
svc = svm.SVC(kernel='rbf')
svc.fit(iris_X_train, iris_y_train)
print svc.predict(iris_X)
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
C = 1.0 # SVM regularization parameter
print iris.data[1,1]
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
h = .02 # step size in the mesh
svc = svm.SVC(kernel='linear', C=C).fit(X, y)
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(X, y)
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(X, y)
lin_svc = svm.LinearSVC(C=C).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
| gpl-2.0 |
lunchpy/fspider | fspider/pipelines.py | 1 | 1917 | # -*- coding: utf-8 -*-
import logging
import pymysql
from scrapy.conf import settings
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
class FspiderPipeline(object):
def process_item(self, item, spider):
return item
class MariadbPipeline(object):
@staticmethod
def process_item(item, spider):
logger = logging.getLogger(__name__)
host = settings['DB_HOST']
port = settings['DB_PORT']
db = settings['DB_NAME']
user = settings['DB_USER']
password = settings['DB_PASSWORD']
charset = settings['DB_CHARSET']
conn = pymysql.connect(host=host, port=port, user=user, passwd=password, db=db, charset=charset)
try:
cursor = conn.cursor()
# 处理品牌
# 处理一级分类
# 处理二级分类
# 处理三级分类
# 判断是否已经抓取
sql = 'select count(*) from item where item_id=%s'
param = (item['item_id'])
cursor.execute(sql, param)
result = cursor.fetchone()
if result[0] == 0:
sql = 'insert into item(item_id, item_name, item_price, item_url) values(%s, %s, %s, %s)'
param = (item['item_id'], item['item_name'], item['item_price'], item['item_url'])
else:
sql = 'update item set item_name=%s, item_price=%s, item_url=%s where item_id=%s'
param = (item['item_name'], item['item_price'], item['item_url'], item['item_id'])
cursor.execute(sql, param)
conn.commit()
except Exception as e:
conn.rollback()
logger.error(e)
finally:
cursor.close()
conn.close()
return item
| gpl-3.0 |
GarySparrow/mFlaskWeb | venv/Lib/site-packages/pip/_vendor/requests/cookies.py | 355 | 18208 | # -*- coding: utf-8 -*-
"""
requests.cookies
~~~~~~~~~~~~~~~~
Compatibility code to be able to use `cookielib.CookieJar` with requests.
requests.utils imports from here, so be careful with imports.
"""
import copy
import time
import calendar
import collections
from .compat import cookielib, urlparse, urlunparse, Morsel
try:
import threading
# grr, pyflakes: this fixes "redefinition of unused 'threading'"
threading
except ImportError:
import dummy_threading as threading
class MockRequest(object):
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
The code in `cookielib.CookieJar` expects this interface in order to correctly
manage cookie policies, i.e., determine whether a cookie can be set, given the
domains of the request and the cookie.
The original request object is read-only. The client is responsible for collecting
the new headers via `get_new_headers()` and interpreting them appropriately. You
probably want `get_cookie_header`, defined below.
"""
def __init__(self, request):
self._r = request
self._new_headers = {}
self.type = urlparse(self._r.url).scheme
def get_type(self):
return self.type
def get_host(self):
return urlparse(self._r.url).netloc
def get_origin_req_host(self):
return self.get_host()
def get_full_url(self):
# Only return the response's URL if the user hadn't set the Host
# header
if not self._r.headers.get('Host'):
return self._r.url
# If they did set it, retrieve it and reconstruct the expected domain
host = self._r.headers['Host']
parsed = urlparse(self._r.url)
# Reconstruct the URL as we expect it
return urlunparse([
parsed.scheme, host, parsed.path, parsed.params, parsed.query,
parsed.fragment
])
def is_unverifiable(self):
return True
def has_header(self, name):
return name in self._r.headers or name in self._new_headers
def get_header(self, name, default=None):
return self._r.headers.get(name, self._new_headers.get(name, default))
def add_header(self, key, val):
"""cookielib has no legitimate use for this method; add it back if you find one."""
raise NotImplementedError("Cookie headers should be added with add_unredirected_header()")
def add_unredirected_header(self, name, value):
self._new_headers[name] = value
def get_new_headers(self):
return self._new_headers
@property
def unverifiable(self):
return self.is_unverifiable()
@property
def origin_req_host(self):
return self.get_origin_req_host()
@property
def host(self):
return self.get_host()
class MockResponse(object):
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
...what? Basically, expose the parsed HTTP headers from the server response
the way `cookielib` expects to see them.
"""
def __init__(self, headers):
"""Make a MockResponse for `cookielib` to read.
:param headers: a httplib.HTTPMessage or analogous carrying the headers
"""
self._headers = headers
def info(self):
return self._headers
def getheaders(self, name):
self._headers.getheaders(name)
def extract_cookies_to_jar(jar, request, response):
"""Extract the cookies from the response into a CookieJar.
:param jar: cookielib.CookieJar (not necessarily a RequestsCookieJar)
:param request: our own requests.Request object
:param response: urllib3.HTTPResponse object
"""
if not (hasattr(response, '_original_response') and
response._original_response):
return
# the _original_response field is the wrapped httplib.HTTPResponse object,
req = MockRequest(request)
# pull out the HTTPMessage with the headers and put it in the mock:
res = MockResponse(response._original_response.msg)
jar.extract_cookies(res, req)
def get_cookie_header(jar, request):
"""
Produce an appropriate Cookie header string to be sent with `request`, or None.
:rtype: str
"""
r = MockRequest(request)
jar.add_cookie_header(r)
return r.get_new_headers().get('Cookie')
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
"""Unsets a cookie by name, by default over all domains and paths.
Wraps CookieJar.clear(), is O(n).
"""
clearables = []
for cookie in cookiejar:
if cookie.name != name:
continue
if domain is not None and domain != cookie.domain:
continue
if path is not None and path != cookie.path:
continue
clearables.append((cookie.domain, cookie.path, cookie.name))
for domain, path, name in clearables:
cookiejar.clear(domain, path, name)
class CookieConflictError(RuntimeError):
"""There are two cookies that meet the criteria specified in the cookie jar.
Use .get and .set and include domain and path args in order to be more specific.
"""
class RequestsCookieJar(cookielib.CookieJar, collections.MutableMapping):
"""Compatibility class; is a cookielib.CookieJar, but exposes a dict
interface.
This is the CookieJar we create by default for requests and sessions that
don't specify one, since some clients may expect response.cookies and
session.cookies to support dict operations.
Requests does not use the dict interface internally; it's just for
compatibility with external client code. All requests code should work
out of the box with externally provided instances of ``CookieJar``, e.g.
``LWPCookieJar`` and ``FileCookieJar``.
Unlike a regular CookieJar, this class is pickleable.
.. warning:: dictionary operations that are normally O(1) may be O(n).
"""
def get(self, name, default=None, domain=None, path=None):
"""Dict-like get() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
.. warning:: operation is O(n), not O(1).
"""
try:
return self._find_no_duplicates(name, domain, path)
except KeyError:
return default
def set(self, name, value, **kwargs):
"""Dict-like set() that also supports optional domain and path args in
order to resolve naming collisions from using one cookie jar over
multiple domains.
"""
# support client code that unsets cookies by assignment of a None value:
if value is None:
remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path'))
return
if isinstance(value, Morsel):
c = morsel_to_cookie(value)
else:
c = create_cookie(name, value, **kwargs)
self.set_cookie(c)
return c
def iterkeys(self):
"""Dict-like iterkeys() that returns an iterator of names of cookies
from the jar.
.. seealso:: itervalues() and iteritems().
"""
for cookie in iter(self):
yield cookie.name
def keys(self):
"""Dict-like keys() that returns a list of names of cookies from the
jar.
.. seealso:: values() and items().
"""
return list(self.iterkeys())
def itervalues(self):
"""Dict-like itervalues() that returns an iterator of values of cookies
from the jar.
.. seealso:: iterkeys() and iteritems().
"""
for cookie in iter(self):
yield cookie.value
def values(self):
"""Dict-like values() that returns a list of values of cookies from the
jar.
.. seealso:: keys() and items().
"""
return list(self.itervalues())
def iteritems(self):
"""Dict-like iteritems() that returns an iterator of name-value tuples
from the jar.
.. seealso:: iterkeys() and itervalues().
"""
for cookie in iter(self):
yield cookie.name, cookie.value
def items(self):
"""Dict-like items() that returns a list of name-value tuples from the
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
vanilla python dict of key value pairs.
.. seealso:: keys() and values().
"""
return list(self.iteritems())
def list_domains(self):
"""Utility method to list all the domains in the jar."""
domains = []
for cookie in iter(self):
if cookie.domain not in domains:
domains.append(cookie.domain)
return domains
def list_paths(self):
"""Utility method to list all the paths in the jar."""
paths = []
for cookie in iter(self):
if cookie.path not in paths:
paths.append(cookie.path)
return paths
def multiple_domains(self):
"""Returns True if there are multiple domains in the jar.
Returns False otherwise.
:rtype: bool
"""
domains = []
for cookie in iter(self):
if cookie.domain is not None and cookie.domain in domains:
return True
domains.append(cookie.domain)
return False # there is only one domain in jar
def get_dict(self, domain=None, path=None):
"""Takes as an argument an optional domain and path and returns a plain
old Python dict of name-value pairs of cookies that meet the
requirements.
:rtype: dict
"""
dictionary = {}
for cookie in iter(self):
if (domain is None or cookie.domain == domain) and (path is None
or cookie.path == path):
dictionary[cookie.name] = cookie.value
return dictionary
def __contains__(self, name):
try:
return super(RequestsCookieJar, self).__contains__(name)
except CookieConflictError:
return True
def __getitem__(self, name):
"""Dict-like __getitem__() for compatibility with client code. Throws
exception if there are more than one cookie with name. In that case,
use the more explicit get() method instead.
.. warning:: operation is O(n), not O(1).
"""
return self._find_no_duplicates(name)
def __setitem__(self, name, value):
"""Dict-like __setitem__ for compatibility with client code. Throws
exception if there is already a cookie of that name in the jar. In that
case, use the more explicit set() method instead.
"""
self.set(name, value)
def __delitem__(self, name):
"""Deletes a cookie given a name. Wraps ``cookielib.CookieJar``'s
``remove_cookie_by_name()``.
"""
remove_cookie_by_name(self, name)
def set_cookie(self, cookie, *args, **kwargs):
if hasattr(cookie.value, 'startswith') and cookie.value.startswith('"') and cookie.value.endswith('"'):
cookie.value = cookie.value.replace('\\"', '')
return super(RequestsCookieJar, self).set_cookie(cookie, *args, **kwargs)
def update(self, other):
"""Updates this jar with cookies from another CookieJar or dict-like"""
if isinstance(other, cookielib.CookieJar):
for cookie in other:
self.set_cookie(copy.copy(cookie))
else:
super(RequestsCookieJar, self).update(other)
def _find(self, name, domain=None, path=None):
"""Requests uses this method internally to get cookie values.
If there are conflicting cookies, _find arbitrarily chooses one.
See _find_no_duplicates if you want an exception thrown if there are
conflicting cookies.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:return: cookie.value
"""
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
return cookie.value
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def _find_no_duplicates(self, name, domain=None, path=None):
"""Both ``__get_item__`` and ``get`` call this function: it's never
used elsewhere in Requests.
:param name: a string containing name of cookie
:param domain: (optional) string containing domain of cookie
:param path: (optional) string containing path of cookie
:raises KeyError: if cookie is not found
:raises CookieConflictError: if there are multiple cookies
that match name and optionally domain and path
:return: cookie.value
"""
toReturn = None
for cookie in iter(self):
if cookie.name == name:
if domain is None or cookie.domain == domain:
if path is None or cookie.path == path:
if toReturn is not None: # if there are multiple cookies that meet passed in criteria
raise CookieConflictError('There are multiple cookies with name, %r' % (name))
toReturn = cookie.value # we will eventually return this as long as no cookie conflict
if toReturn:
return toReturn
raise KeyError('name=%r, domain=%r, path=%r' % (name, domain, path))
def __getstate__(self):
"""Unlike a normal CookieJar, this class is pickleable."""
state = self.__dict__.copy()
# remove the unpickleable RLock object
state.pop('_cookies_lock')
return state
def __setstate__(self, state):
"""Unlike a normal CookieJar, this class is pickleable."""
self.__dict__.update(state)
if '_cookies_lock' not in self.__dict__:
self._cookies_lock = threading.RLock()
def copy(self):
"""Return a copy of this RequestsCookieJar."""
new_cj = RequestsCookieJar()
new_cj.update(self)
return new_cj
def _copy_cookie_jar(jar):
if jar is None:
return None
if hasattr(jar, 'copy'):
# We're dealing with an instance of RequestsCookieJar
return jar.copy()
# We're dealing with a generic CookieJar instance
new_jar = copy.copy(jar)
new_jar.clear()
for cookie in jar:
new_jar.set_cookie(copy.copy(cookie))
return new_jar
def create_cookie(name, value, **kwargs):
"""Make a cookie from underspecified parameters.
By default, the pair of `name` and `value` will be set for the domain ''
and sent on every request (this is sometimes called a "supercookie").
"""
result = dict(
version=0,
name=name,
value=value,
port=None,
domain='',
path='/',
secure=False,
expires=None,
discard=True,
comment=None,
comment_url=None,
rest={'HttpOnly': None},
rfc2109=False,)
badargs = set(kwargs) - set(result)
if badargs:
err = 'create_cookie() got unexpected keyword arguments: %s'
raise TypeError(err % list(badargs))
result.update(kwargs)
result['port_specified'] = bool(result['port'])
result['domain_specified'] = bool(result['domain'])
result['domain_initial_dot'] = result['domain'].startswith('.')
result['path_specified'] = bool(result['path'])
return cookielib.Cookie(**result)
def morsel_to_cookie(morsel):
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
expires = None
if morsel['max-age']:
try:
expires = int(time.time() + int(morsel['max-age']))
except ValueError:
raise TypeError('max-age: %s must be integer' % morsel['max-age'])
elif morsel['expires']:
time_template = '%a, %d-%b-%Y %H:%M:%S GMT'
expires = calendar.timegm(
time.strptime(morsel['expires'], time_template)
)
return create_cookie(
comment=morsel['comment'],
comment_url=bool(morsel['comment']),
discard=False,
domain=morsel['domain'],
expires=expires,
name=morsel.key,
path=morsel['path'],
port=None,
rest={'HttpOnly': morsel['httponly']},
rfc2109=False,
secure=bool(morsel['secure']),
value=morsel.value,
version=morsel['version'] or 0,
)
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
"""Returns a CookieJar from a key/value dictionary.
:param cookie_dict: Dict of key/values to insert into CookieJar.
:param cookiejar: (optional) A cookiejar to add the cookies to.
:param overwrite: (optional) If False, will not replace cookies
already in the jar with new ones.
"""
if cookiejar is None:
cookiejar = RequestsCookieJar()
if cookie_dict is not None:
names_from_jar = [cookie.name for cookie in cookiejar]
for name in cookie_dict:
if overwrite or (name not in names_from_jar):
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
return cookiejar
def merge_cookies(cookiejar, cookies):
"""Add cookies to cookiejar and returns a merged CookieJar.
:param cookiejar: CookieJar object to add the cookies to.
:param cookies: Dictionary or CookieJar object to be added.
"""
if not isinstance(cookiejar, cookielib.CookieJar):
raise ValueError('You can only merge into CookieJar')
if isinstance(cookies, dict):
cookiejar = cookiejar_from_dict(
cookies, cookiejar=cookiejar, overwrite=False)
elif isinstance(cookies, cookielib.CookieJar):
try:
cookiejar.update(cookies)
except AttributeError:
for cookie_in_jar in cookies:
cookiejar.set_cookie(cookie_in_jar)
return cookiejar
| mit |
camptocamp/c2c-rd-addons | c2c_account_payment_extension/payment_order.py | 4 | 16221 | # -*- coding: utf-8 -*-
##############################################
#
# Copyright (C) ChriCar Beteiligungs- und Beratungs- GmbH
# all rights reserved
# this work is based on
# Copyright (c) 2008 Zikzakmedia S.L. (http://zikzakmedia.com) All Rights Reserved.
# Jordi Esteve <jesteve@zikzakmedia.com>
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs.
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly advised to contract a Free Software
# Service Company.
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/> or
# write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1.17, USA.
#
###############################################
from openerp.osv import osv, fields
from openerp.tools.translate import _
import openerp.addons.one2many_sorted as one2many_sorted
class payment_order(osv.osv):
_name = 'payment.order'
_inherit = 'payment.order'
# def _get_type(self, cr, uid, context=None):
# if context is None:
# context = {}
# return context.get('type', 'payable')
def _get_reference(self, cr, uid, context=None):
if context is None:
context = {}
#type = context.get('type', 'payable')
type = 'payable'
#model = type == 'payable' and 'payment.order' or 'rec.payment.order'
model = 'payment.order'
return self.pool.get('ir.sequence').get(cr, uid, model)
def _get_period(self, cr, uid, context=None):
try:
# find() function will throw an exception if no period can be found for
# current date. That should not be a problem because user would be notified
# but as this model inherits an existing one, once installed it will create
# the new field and try to update existing records (even if there are no records yet)
# So we must ensure no exception is thrown, otherwise the module can only be installed
# once periods are created.
periods = self.pool.get('account.period').find(cr, uid)
return periods[0]
except Exception, e:
return False
# def _payment_type_name_get(self, cr, uid, ids, field_name, arg, context=None):
# result = {}
# for rec in self.browse(cr, uid, ids, context):
# result[rec.id] = rec.mode and rec.mode.type.name or ""
# return result
def _name_get(self, cr, uid, ids, field_name, arg, context=None):
result = {}
for rec in self.browse(cr, uid, ids, context):
result[rec.id] = rec.reference
return result
_columns = {
'type': fields.selection([
('payable','Payable'),
('receivable','Receivable'),
],'Type', readonly=True, select=True),
# invisible field to filter payment order lines by payment type
#'payment_type_name': fields.function(_payment_type_name_get, method=True, type="char", size=64, string="Payment type name"),
# The field name is necessary to add attachement documents to payment orders
# 'name': fields.function(_name_get, method=True, type="char", size=64, string="Name"),
'create_account_moves': fields.selection([('bank-statement','Bank Statement'),('direct-payment','Direct Payment')],
'Create Account Moves',
required=True,
states={'done':[('readonly',True)]},
help='Indicates when account moves should be created for order payment lines. "Bank Statement" '\
'will wait until user introduces those payments in bank a bank statement. "Direct Payment" '\
'will mark all payment lines as payied once the order is done.'),
'period_id': fields.many2one('account.period', 'Period', states={'done':[('readonly',True)]}),
'line_ids': one2many_sorted.one2many_sorted('payment.line', 'order_id', 'Payment lines', states={'done': [('readonly', True)]}, \
order = ' partner_id.name, ml_inv_ref.number'),
}
_defaults = {
# 'type': _get_type,
'reference': _get_reference,
'create_account_moves': lambda *a: 'bank-statement',
'period_id': _get_period,
}
def cancel_from_done(self, cr, uid, ids, context=None):
if context is None:
context = {}
#Search for account_moves
remove = []
for move in self.browse(cr,uid,ids,context):
#Search for any line
for line in move.line_ids:
if line.payment_move_id:
remove += [ line.payment_move_id.id ]
self.pool.get('account.move').button_cancel( cr, uid, remove, context=context)
self.pool.get('account.move').unlink(cr, uid, remove, context)
self.write( cr, uid, ids, {
'state':'cancel'
},context=context)
return True
def unlink(self, cr, uid, ids, context=None):
pay_orders = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for t in pay_orders:
if t['state'] in ('draft', 'cancel'):
unlink_ids.append(t['id'])
else:
raise osv.except_osv(_('Invalid action!'), _('You cannot delete payment order(s) which are already confirmed or done!'))
result = super(payment_order, self).unlink(cr, uid, unlink_ids, context=context)
return result
def set_done(self, cr, uid, ids, context=None):
result = super(payment_order, self).set_done(cr, uid, ids, context)
company_currency_id = self.pool.get('res.users').browse(cr, uid, uid, context).company_id.currency_id.id
for order in self.browse(cr, uid, ids, context):
if order.create_account_moves != 'direct-payment':
continue
# This process creates a simple account move with bank and line accounts and line's amount. At the end
# it will reconcile or partial reconcile both entries if that is possible.
move_id = self.pool.get('account.move').create(cr, uid, {
'name': '/',
'journal_id': order.mode.journal.id,
'period_id': order.period_id.id,
}, context)
for line in order.line_ids:
if not line.amount:
continue
if not line.account_id:
raise osv.except_osv(_('Error!'), _('Payment order should create account moves but line with amount %.2f for partner "%s" has no account assigned.') % (line.amount, line.partner_id.name ) )
currency_id = order.mode.journal.currency and order.mode.journal.currency.id or company_currency_id
# if line.type == 'payable':
line_amount = line.amount_currency or line.amount
# else:
# line_amount = -line.amount_currency or -line.amount
if line_amount >= 0:
account_id = order.mode.journal.default_credit_account_id.id
else:
account_id = order.mode.journal.default_debit_account_id.id
acc_cur = ((line_amount<=0) and order.mode.journal.default_debit_account_id) or line.account_id
ctx = context.copy()
ctx['res.currency.compute.account'] = acc_cur
amount = self.pool.get('res.currency').compute(cr, uid, currency_id, company_currency_id, line_amount, context=ctx)
val = {
'name': line.move_line_id and line.move_line_id.name or '/',
'move_id': move_id,
'date': order.date_done,
'ref': line.move_line_id and line.move_line_id.ref or False,
'partner_id': line.partner_id and line.partner_id.id or False,
'account_id': line.account_id.id,
'debit': ((amount>0) and amount) or 0.0,
'credit': ((amount<0) and -amount) or 0.0,
'journal_id': order.mode.journal.id,
'period_id': order.period_id.id,
'currency_id': currency_id,
}
amount = self.pool.get('res.currency').compute(cr, uid, currency_id, company_currency_id, line_amount, context=ctx)
if currency_id != company_currency_id:
amount_cur = self.pool.get('res.currency').compute(cr, uid, company_currency_id, currency_id, amount, context=ctx)
val['amount_currency'] = -amount_cur
if line.account_id and line.account_id.currency_id and line.account_id.currency_id.id != company_currency_id:
val['currency_id'] = line.account_id.currency_id.id
if company_currency_id == line.account_id.currency_id.id:
amount_cur = line_amount
else:
amount_cur = self.pool.get('res.currency').compute(cr, uid, company_currency_id, line.account_id.currency_id.id, amount, context=ctx)
val['amount_currency'] = amount_cur
partner_line_id = self.pool.get('account.move.line').create(cr, uid, val, context, check=False)
# Fill the secondary amount/currency
# if currency is not the same than the company
if currency_id != company_currency_id:
amount_currency = line_amount
move_currency_id = currency_id
else:
amount_currency = False
move_currency_id = False
self.pool.get('account.move.line').create(cr, uid, {
'name': line.move_line_id and line.move_line_id.name or '/',
'move_id': move_id,
'date': order.date_done,
'ref': line.move_line_id and line.move_line_id.ref or False,
'partner_id': line.partner_id and line.partner_id.id or False,
'account_id': account_id,
'debit': ((amount < 0) and -amount) or 0.0,
'credit': ((amount > 0) and amount) or 0.0,
'journal_id': order.mode.journal.id,
'period_id': order.period_id.id,
'amount_currency': amount_currency,
'currency_id': move_currency_id,
}, context)
aml_ids = [x.id for x in self.pool.get('account.move').browse(cr, uid, move_id, context).line_id]
for x in self.pool.get('account.move.line').browse(cr, uid, aml_ids, context):
if x.state != 'valid':
raise osv.except_osv(_('Error !'), _('Account move line "%s" is not valid') % x.name)
if line.move_line_id and not line.move_line_id.reconcile_id:
# If payment line has a related move line, we try to reconcile it with the move we just created.
lines_to_reconcile = [
partner_line_id,
]
# Check if payment line move is already partially reconciled and use those moves in that case.
if line.move_line_id.reconcile_partial_id:
for rline in line.move_line_id.reconcile_partial_id.line_partial_ids:
lines_to_reconcile.append( rline.id )
else:
lines_to_reconcile.append( line.move_line_id.id )
amount = 0.0
for rline in self.pool.get('account.move.line').browse(cr, uid, lines_to_reconcile, context):
amount += rline.debit - rline.credit
currency = self.pool.get('res.users').browse(cr, uid, uid, context).company_id.currency_id
if self.pool.get('res.currency').is_zero(cr, uid, currency, amount):
self.pool.get('account.move.line').reconcile(cr, uid, lines_to_reconcile, 'payment', context=context)
else:
self.pool.get('account.move.line').reconcile_partial(cr, uid, lines_to_reconcile, 'payment', context)
if order.mode.journal.entry_posted:
self.pool.get('account.move').write(cr, uid, [move_id], {
'state':'posted',
}, context)
self.pool.get('payment.line').write(cr, uid, [line.id], {
'payment_move_id': move_id,
}, context)
return result
payment_order()
class payment_line(osv.osv):
_name = 'payment.line'
_inherit = 'payment.line'
def _auto_init(self, cr, context=None):
cr.execute("SELECT column_name FROM information_schema.columns WHERE table_name = 'payment_line' and column_name='type'")
if cr.fetchone():
update_sign = False
else:
update_sign = True
result = super(payment_line, self)._auto_init(cr, context=context)
if update_sign:
# Ensure related store value of field 'type' is updated in the database.
# Note that by forcing the update here we also ensure everything is done in the same transaction.
# Because addons/__init__.py will execute a commit just after creating table fields.
result.sort()
for item in result:
item[1](cr, *item[2])
# Change sign of 'receivable' payment lines
cr.execute("UPDATE payment_line SET amount_currency = -amount_currency WHERE type='receivable'")
return result
_columns = {
'move_line_id': fields.many2one('account.move.line', 'Entry line', domain="[('reconcile_id','=', False), ('amount_to_pay','!=',0), ('account_id.type','=',parent.type),('payment_type','ilike',parent.payment_type_name or '%')]", help='This Entry Line will be referred for the information of the ordering customer.'),
'payment_move_id': fields.many2one('account.move', 'Payment Move', readonly=True, help='Account move that pays this debt.'),
'account_id': fields.many2one('account.account', 'Account'),
'type': fields.related('order_id','type', type='selection', selection=[('payable','Payable'),('receivable','Receivable')], readonly=True, store=True, string='Type'),
}
def onchange_move_line(self, cr, uid, ids, move_line_id, payment_type, date_prefered, date_scheduled, currency=False, company_currency=False, context=None):
# Adds account.move.line name to the payment line communication
res = super(payment_line, self).onchange_move_line(cr, uid, ids, move_line_id, payment_type, date_prefered, date_scheduled, currency, company_currency, context)
if move_line_id:
line = self.pool.get('account.move.line').browse(cr, uid, move_line_id)
if line.name != '/':
res['value']['communication'] = res['value']['communication'] + '. ' + line.name
res['value']['account_id'] = line.account_id.id
return res
payment_line()
| agpl-3.0 |
thomasmoelhave/tpie | scripts/check_missing_ctests.py | 1 | 1860 | #!/usr/bin/env python3
# Copyright 2016, The TPIE development team
#
# This file is part of TPIE.
#
# TPIE is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# TPIE is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with TPIE. If not, see <http://www.gnu.org/licenses/>
import os
import os.path
import sys
import subprocess
from glob import glob
from cmakeast import ast as cmakeast
os.chdir(os.path.dirname(sys.argv[0]))
os.chdir('..')
ctests = []
test_decs = ['add_unittest', 'add_fulltest']
with open('test/unit/CMakeLists.txt', 'r') as f:
ast = cmakeast.parse(f.read())
for s in ast.statements:
if isinstance(s, cmakeast.FunctionCall):
if s.name in test_decs:
args = list(map(lambda x: x.contents, s.arguments))
suit = args[0]
tests = args[1:]
for t in tests:
ctests.append(suit + '_' + t)
os.chdir('build')
uttests = []
for suit in glob('test/unit/ut-*'):
suitname = suit.split('/ut-')[1]
out = subprocess.check_output([suit, '--help'])
out = out.split(b'Available tests:')[1]
out = out.split(b'\n\n')[0]
for test in out.strip().split(b'\n'):
testname = str(test.strip().split()[0], 'utf-8')
uttests.append(suitname + '_' + testname)
sc = set(ctests)
sut = set(uttests)
missing = sorted(sut - sc)
if missing:
print('Tests in ut-*, but not in ctest:')
for t in missing:
print(' ' * 4 + t)
sys.exit(1)
else:
print('All tests in ut-* found in ctest.')
| gpl-3.0 |
lociii/googleads-python-lib | examples/adspygoogle/dfp/v201302/creative_wrapper_service/update_creative_wrappers.py | 3 | 2479 | #!/usr/bin/python
#
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This code example updates a creative wrapper to the 'OUTER' wrapping order.
To determine which creative wrappers exist, run get_all_creative_wrappers.py.
Tags: CreativeWrapperService.getCreativeWrapper
Tags: CreativeWrapperService.updateCreativeWrappers
"""
__author__ = 'api.shamjeff@gmail.com (Jeff Sham)'
# Locate the client library. If module was installed via "setup.py" script, then
# the following two lines are not needed.
import os
import sys
sys.path.insert(0, os.path.join('..', '..', '..', '..', '..'))
# Import appropriate classes from the client library.
from adspygoogle import DfpClient
# Set the ID of the creative wrapper to get.
CREATIVE_WRAPPER_ID = 'INSERT_CREATIVE_WRAPPER_ID_HERE'
def main(client, creative_wrapper_id):
# Initialize appropriate service.
creative_wrapper_service = client.GetService('CreativeWrapperService',
version='v201302')
# Get creative wrapper.
creative_wrapper = creative_wrapper_service.GetCreativeWrapper(
creative_wrapper_id)[0]
if creative_wrapper:
creative_wrapper['ordering'] = 'OUTER'
# Update the creative wrappers on the server.
creative_wrappers = creative_wrapper_service.UpdateCreativeWrappers(
[creative_wrapper])
# Display results.
if creative_wrappers:
for creative_wrapper in creative_wrappers:
print (('Creative wrapper with ID \'%s\' and wrapping order \'%s\' '
'was updated.') % (creative_wrapper['id'],
creative_wrapper['ordering']))
else:
print 'No creative wrappers were updated.'
else:
print 'No creative wrappers found to update.'
if __name__ == '__main__':
# Initialize client object.
dfp_client = DfpClient(path=os.path.join('..', '..', '..', '..', '..'))
main(dfp_client, CREATIVE_WRAPPER_ID)
| apache-2.0 |
neonatura/crotalus | lib/libxml2/genUnicode.py | 337 | 12985 | #!/usr/bin/python -u
#
# Original script modified in November 2003 to take advantage of
# the character-validation range routines, and updated to the
# current Unicode information (Version 4.0.1)
#
# NOTE: there is an 'alias' facility for blocks which are not present in
# the current release, but are needed for ABI compatibility. This
# must be accomplished MANUALLY! Please see the comments below under
# 'blockAliases'
#
import sys
import string
import time
webpage = "http://www.unicode.org/Public/4.0-Update1/UCD-4.0.1.html"
sources = "Blocks-4.0.1.txt UnicodeData-4.0.1.txt"
#
# blockAliases is a small hack - it is used for mapping block names which
# were were used in the 3.1 release, but are missing or changed in the current
# release. The format is "OldBlockName:NewBlockName1[,NewBlockName2[,...]]"
blockAliases = []
blockAliases.append("CombiningMarksforSymbols:CombiningDiacriticalMarksforSymbols")
blockAliases.append("Greek:GreekandCoptic")
blockAliases.append("PrivateUse:PrivateUseArea,SupplementaryPrivateUseArea-A," +
"SupplementaryPrivateUseArea-B")
# minTableSize gives the minimum number of ranges which must be present
# before a range table is produced. If there are less than this
# number, inline comparisons are generated
minTableSize = 8
(blockfile, catfile) = string.split(sources)
#
# Now process the "blocks" file, reducing it to a dictionary
# indexed by blockname, containing a tuple with the applicable
# block range
#
BlockNames = {}
try:
blocks = open(blockfile, "r")
except:
print "Missing %s, aborting ..." % blockfile
sys.exit(1)
for line in blocks.readlines():
if line[0] == '#':
continue
line = string.strip(line)
if line == '':
continue
try:
fields = string.split(line, ';')
range = string.strip(fields[0])
(start, end) = string.split(range, "..")
name = string.strip(fields[1])
name = string.replace(name, ' ', '')
except:
print "Failed to process line: %s" % (line)
continue
start = "0x" + start
end = "0x" + end
try:
BlockNames[name].append((start, end))
except:
BlockNames[name] = [(start, end)]
blocks.close()
print "Parsed %d blocks descriptions" % (len(BlockNames.keys()))
for block in blockAliases:
alias = string.split(block,':')
alist = string.split(alias[1],',')
for comp in alist:
if BlockNames.has_key(comp):
if alias[0] not in BlockNames:
BlockNames[alias[0]] = []
for r in BlockNames[comp]:
BlockNames[alias[0]].append(r)
else:
print "Alias %s: %s not in Blocks" % (alias[0], comp)
continue
#
# Next process the Categories file. This is more complex, since
# the file is in code sequence, and we need to invert it. We use
# a dictionary with index category-name, with each entry containing
# all the ranges (codepoints) of that category. Note that category
# names comprise two parts - the general category, and the "subclass"
# within that category. Therefore, both "general category" (which is
# the first character of the 2-character category-name) and the full
# (2-character) name are entered into this dictionary.
#
try:
data = open(catfile, "r")
except:
print "Missing %s, aborting ..." % catfile
sys.exit(1)
nbchar = 0;
Categories = {}
for line in data.readlines():
if line[0] == '#':
continue
line = string.strip(line)
if line == '':
continue
try:
fields = string.split(line, ';')
point = string.strip(fields[0])
value = 0
while point != '':
value = value * 16
if point[0] >= '0' and point[0] <= '9':
value = value + ord(point[0]) - ord('0')
elif point[0] >= 'A' and point[0] <= 'F':
value = value + 10 + ord(point[0]) - ord('A')
elif point[0] >= 'a' and point[0] <= 'f':
value = value + 10 + ord(point[0]) - ord('a')
point = point[1:]
name = fields[2]
except:
print "Failed to process line: %s" % (line)
continue
nbchar = nbchar + 1
# update entry for "full name"
try:
Categories[name].append(value)
except:
try:
Categories[name] = [value]
except:
print "Failed to process line: %s" % (line)
# update "general category" name
try:
Categories[name[0]].append(value)
except:
try:
Categories[name[0]] = [value]
except:
print "Failed to process line: %s" % (line)
blocks.close()
print "Parsed %d char generating %d categories" % (nbchar, len(Categories.keys()))
#
# The data is now all read. Time to process it into a more useful form.
#
# reduce the number list into ranges
for cat in Categories.keys():
list = Categories[cat]
start = -1
prev = -1
end = -1
ranges = []
for val in list:
if start == -1:
start = val
prev = val
continue
elif val == prev + 1:
prev = val
continue
elif prev == start:
ranges.append((prev, prev))
start = val
prev = val
continue
else:
ranges.append((start, prev))
start = val
prev = val
continue
if prev == start:
ranges.append((prev, prev))
else:
ranges.append((start, prev))
Categories[cat] = ranges
#
# Assure all data is in alphabetic order, since we will be doing binary
# searches on the tables.
#
bkeys = BlockNames.keys()
bkeys.sort()
ckeys = Categories.keys()
ckeys.sort()
#
# Generate the resulting files
#
try:
header = open("include/libxml/xmlunicode.h", "w")
except:
print "Failed to open include/libxml/xmlunicode.h"
sys.exit(1)
try:
output = open("xmlunicode.c", "w")
except:
print "Failed to open xmlunicode.c"
sys.exit(1)
date = time.asctime(time.localtime(time.time()))
header.write(
"""/*
* Summary: Unicode character APIs
* Description: API for the Unicode character APIs
*
* This file is automatically generated from the
* UCS description files of the Unicode Character Database
* %s
* using the genUnicode.py Python script.
*
* Generation date: %s
* Sources: %s
* Author: Daniel Veillard
*/
#ifndef __XML_UNICODE_H__
#define __XML_UNICODE_H__
#include <libxml/xmlversion.h>
#ifdef LIBXML_UNICODE_ENABLED
#ifdef __cplusplus
extern "C" {
#endif
""" % (webpage, date, sources));
output.write(
"""/*
* xmlunicode.c: this module implements the Unicode character APIs
*
* This file is automatically generated from the
* UCS description files of the Unicode Character Database
* %s
* using the genUnicode.py Python script.
*
* Generation date: %s
* Sources: %s
* Daniel Veillard <veillard@redhat.com>
*/
#define IN_LIBXML
#include "libxml.h"
#ifdef LIBXML_UNICODE_ENABLED
#include <string.h>
#include <libxml/xmlversion.h>
#include <libxml/xmlunicode.h>
#include <libxml/chvalid.h>
typedef int (xmlIntFunc)(int); /* just to keep one's mind untwisted */
typedef struct {
const char *rangename;
xmlIntFunc *func;
} xmlUnicodeRange;
typedef struct {
xmlUnicodeRange *table;
int numentries;
} xmlUnicodeNameTable;
static xmlIntFunc *xmlUnicodeLookup(xmlUnicodeNameTable *tptr, const char *tname);
static xmlUnicodeRange xmlUnicodeBlocks[] = {
""" % (webpage, date, sources));
flag = 0
for block in bkeys:
name = string.replace(block, '-', '')
if flag:
output.write(',\n')
else:
flag = 1
output.write(' {"%s", xmlUCSIs%s}' % (block, name))
output.write('};\n\n')
output.write('static xmlUnicodeRange xmlUnicodeCats[] = {\n')
flag = 0;
for name in ckeys:
if flag:
output.write(',\n')
else:
flag = 1
output.write(' {"%s", xmlUCSIsCat%s}' % (name, name))
output.write('};\n\n')
#
# For any categories with more than minTableSize ranges we generate
# a range table suitable for xmlCharInRange
#
for name in ckeys:
if len(Categories[name]) > minTableSize:
numshort = 0
numlong = 0
ranges = Categories[name]
sptr = "NULL"
lptr = "NULL"
for range in ranges:
(low, high) = range
if high < 0x10000:
if numshort == 0:
pline = "static const xmlChSRange xml%sS[] = {" % name
sptr = "xml%sS" % name
else:
pline += ", "
numshort += 1
else:
if numlong == 0:
if numshort > 0:
output.write(pline + " };\n")
pline = "static const xmlChLRange xml%sL[] = {" % name
lptr = "xml%sL" % name
else:
pline += ", "
numlong += 1
if len(pline) > 60:
output.write(pline + "\n")
pline = " "
pline += "{%s, %s}" % (hex(low), hex(high))
output.write(pline + " };\nstatic xmlChRangeGroup xml%sG = {%s,%s,%s,%s};\n\n"
% (name, numshort, numlong, sptr, lptr))
output.write(
"""static xmlUnicodeNameTable xmlUnicodeBlockTbl = {xmlUnicodeBlocks, %s};
static xmlUnicodeNameTable xmlUnicodeCatTbl = {xmlUnicodeCats, %s};
/**
* xmlUnicodeLookup:
* @tptr: pointer to the name table
* @name: name to be found
*
* binary table lookup for user-supplied name
*
* Returns pointer to range function if found, otherwise NULL
*/
static xmlIntFunc
*xmlUnicodeLookup(xmlUnicodeNameTable *tptr, const char *tname) {
int low, high, mid, cmp;
xmlUnicodeRange *sptr;
if ((tptr == NULL) || (tname == NULL)) return(NULL);
low = 0;
high = tptr->numentries - 1;
sptr = tptr->table;
while (low <= high) {
mid = (low + high) / 2;
if ((cmp=strcmp(tname, sptr[mid].rangename)) == 0)
return (sptr[mid].func);
if (cmp < 0)
high = mid - 1;
else
low = mid + 1;
}
return (NULL);
}
""" % (len(BlockNames), len(Categories)) )
for block in bkeys:
name = string.replace(block, '-', '')
header.write("XMLPUBFUN int XMLCALL xmlUCSIs%s\t(int code);\n" % name)
output.write("/**\n * xmlUCSIs%s:\n * @code: UCS code point\n" % (name))
output.write(" *\n * Check whether the character is part of %s UCS Block\n"%
(block))
output.write(" *\n * Returns 1 if true 0 otherwise\n */\n");
output.write("int\nxmlUCSIs%s(int code) {\n return(" % name)
flag = 0
for (start, end) in BlockNames[block]:
if flag:
output.write(" ||\n ")
else:
flag = 1
output.write("((code >= %s) && (code <= %s))" % (start, end))
output.write(");\n}\n\n")
header.write("\nXMLPUBFUN int XMLCALL xmlUCSIsBlock\t(int code, const char *block);\n\n")
output.write(
"""/**
* xmlUCSIsBlock:
* @code: UCS code point
* @block: UCS block name
*
* Check whether the character is part of the UCS Block
*
* Returns 1 if true, 0 if false and -1 on unknown block
*/
int
xmlUCSIsBlock(int code, const char *block) {
xmlIntFunc *func;
func = xmlUnicodeLookup(&xmlUnicodeBlockTbl, block);
if (func == NULL)
return (-1);
return (func(code));
}
""")
for name in ckeys:
ranges = Categories[name]
header.write("XMLPUBFUN int XMLCALL xmlUCSIsCat%s\t(int code);\n" % name)
output.write("/**\n * xmlUCSIsCat%s:\n * @code: UCS code point\n" % (name))
output.write(" *\n * Check whether the character is part of %s UCS Category\n"%
(name))
output.write(" *\n * Returns 1 if true 0 otherwise\n */\n");
output.write("int\nxmlUCSIsCat%s(int code) {\n" % name)
if len(Categories[name]) > minTableSize:
output.write(" return(xmlCharInRange((unsigned int)code, &xml%sG)"
% name)
else:
start = 1
for range in ranges:
(begin, end) = range;
if start:
output.write(" return(");
start = 0
else:
output.write(" ||\n ");
if (begin == end):
output.write("(code == %s)" % (hex(begin)))
else:
output.write("((code >= %s) && (code <= %s))" % (
hex(begin), hex(end)))
output.write(");\n}\n\n")
header.write("\nXMLPUBFUN int XMLCALL xmlUCSIsCat\t(int code, const char *cat);\n")
output.write(
"""/**
* xmlUCSIsCat:
* @code: UCS code point
* @cat: UCS Category name
*
* Check whether the character is part of the UCS Category
*
* Returns 1 if true, 0 if false and -1 on unknown category
*/
int
xmlUCSIsCat(int code, const char *cat) {
xmlIntFunc *func;
func = xmlUnicodeLookup(&xmlUnicodeCatTbl, cat);
if (func == NULL)
return (-1);
return (func(code));
}
#define bottom_xmlunicode
#include "elfgcchack.h"
#endif /* LIBXML_UNICODE_ENABLED */
""")
header.write("""
#ifdef __cplusplus
}
#endif
#endif /* LIBXML_UNICODE_ENABLED */
#endif /* __XML_UNICODE_H__ */
""");
header.close()
output.close()
| gpl-2.0 |
erdincay/youtube-dl | youtube_dl/extractor/radiobremen.py | 128 | 2471 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import parse_duration
class RadioBremenIE(InfoExtractor):
_VALID_URL = r'http?://(?:www\.)?radiobremen\.de/mediathek/(?:index\.html)?\?id=(?P<id>[0-9]+)'
IE_NAME = 'radiobremen'
_TEST = {
'url': 'http://www.radiobremen.de/mediathek/index.html?id=114720',
'info_dict': {
'id': '114720',
'ext': 'mp4',
'duration': 1685,
'width': 512,
'title': 'buten un binnen vom 22. Dezember',
'thumbnail': 're:https?://.*\.jpg$',
'description': 'Unter anderem mit diesen Themen: 45 Flüchtlinge sind in Worpswede angekommen +++ Freies Internet für alle: Bremer arbeiten an einem flächendeckenden W-Lan-Netzwerk +++ Aktivisten kämpfen für das Unibad +++ So war das Wetter 2014 +++',
},
}
def _real_extract(self, url):
video_id = self._match_id(url)
meta_url = "http://www.radiobremen.de/apps/php/mediathek/metadaten.php?id=%s" % video_id
meta_doc = self._download_webpage(
meta_url, video_id, 'Downloading metadata')
title = self._html_search_regex(
r"<h1.*>(?P<title>.+)</h1>", meta_doc, "title")
description = self._html_search_regex(
r"<p>(?P<description>.*)</p>", meta_doc, "description", fatal=False)
duration = parse_duration(self._html_search_regex(
r"Länge:</td>\s+<td>(?P<duration>[0-9]+:[0-9]+)</td>",
meta_doc, "duration", fatal=False))
page_doc = self._download_webpage(
url, video_id, 'Downloading video information')
mobj = re.search(
r"ardformatplayerclassic\(\'playerbereich\',\'(?P<width>[0-9]+)\',\'.*\',\'(?P<video_id>[0-9]+)\',\'(?P<secret>[0-9]+)\',\'(?P<thumbnail>.+)\',\'\'\)",
page_doc)
video_url = (
"http://dl-ondemand.radiobremen.de/mediabase/%s/%s_%s_%s.mp4" %
(video_id, video_id, mobj.group("secret"), mobj.group('width')))
formats = [{
'url': video_url,
'ext': 'mp4',
'width': int(mobj.group("width")),
}]
return {
'id': video_id,
'title': title,
'description': description,
'duration': duration,
'formats': formats,
'thumbnail': mobj.group('thumbnail'),
}
| unlicense |
TEAM-Gummy/platform_external_chromium_org | tools/python/google/gethash_timer.py | 182 | 4366 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Issue a series of GetHash requests to the SafeBrowsing servers and measure
the response times.
Usage:
$ ./gethash_timer.py --period=600 --samples=20 --output=resp.csv
--period (or -p): The amount of time (in seconds) to wait between GetHash
requests. Using a value of more than 300 (5 minutes) to
include the effect of DNS.
--samples (or -s): The number of requests to issue. If this parameter is not
specified, the test will run indefinitely.
--output (or -o): The path to a file where the output will be written in
CSV format: sample_number,response_code,elapsed_time_ms
"""
import getopt
import httplib
import sys
import time
_GETHASH_HOST = 'safebrowsing.clients.google.com'
_GETHASH_REQUEST = (
'/safebrowsing/gethash?client=googleclient&appver=1.0&pver=2.1')
# Global logging file handle.
g_file_handle = None
def IssueGetHash(prefix):
'''Issue one GetHash request to the safebrowsing servers.
Args:
prefix: A 4 byte value to look up on the server.
Returns:
The HTTP response code for the GetHash request.
'''
body = '4:4\n' + prefix
h = httplib.HTTPConnection(_GETHASH_HOST)
h.putrequest('POST', _GETHASH_REQUEST)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
response_code = h.getresponse().status
h.close()
return response_code
def TimedGetHash(prefix):
'''Measure the amount of time it takes to receive a GetHash response.
Args:
prefix: A 4 byte value to look up on the the server.
Returns:
A tuple of HTTP resonse code and the response time (in milliseconds).
'''
start = time.time()
response_code = IssueGetHash(prefix)
return response_code, (time.time() - start) * 1000
def RunTimedGetHash(period, samples=None):
'''Runs an experiment to measure the amount of time it takes to receive
multiple responses from the GetHash servers.
Args:
period: A floating point value that indicates (in seconds) the delay
between requests.
samples: An integer value indicating the number of requests to make.
If 'None', the test continues indefinitely.
Returns:
None.
'''
global g_file_handle
prefix = '\x50\x61\x75\x6c'
sample_count = 1
while True:
response_code, elapsed_time = TimedGetHash(prefix)
LogResponse(sample_count, response_code, elapsed_time)
sample_count += 1
if samples is not None and sample_count == samples:
break
time.sleep(period)
def LogResponse(sample_count, response_code, elapsed_time):
'''Output the response for one GetHash query.
Args:
sample_count: The current sample number.
response_code: The HTTP response code for the GetHash request.
elapsed_time: The round-trip time (in milliseconds) for the
GetHash request.
Returns:
None.
'''
global g_file_handle
output_list = (sample_count, response_code, elapsed_time)
print 'Request: %d, status: %d, elapsed time: %f ms' % output_list
if g_file_handle is not None:
g_file_handle.write(('%d,%d,%f' % output_list) + '\n')
g_file_handle.flush()
def SetupOutputFile(file_name):
'''Open a file for logging results.
Args:
file_name: A path to a file to store the output.
Returns:
None.
'''
global g_file_handle
g_file_handle = open(file_name, 'w')
def main():
period = 10
samples = None
options, args = getopt.getopt(sys.argv[1:],
's:p:o:',
['samples=', 'period=', 'output='])
for option, value in options:
if option == '-s' or option == '--samples':
samples = int(value)
elif option == '-p' or option == '--period':
period = float(value)
elif option == '-o' or option == '--output':
file_name = value
else:
print 'Bad option: %s' % option
return 1
try:
print 'Starting Timed GetHash ----------'
SetupOutputFile(file_name)
RunTimedGetHash(period, samples)
except KeyboardInterrupt:
pass
print 'Timed GetHash complete ----------'
g_file_handle.close()
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause |
schwarz/youtube-dl | youtube_dl/extractor/facebook.py | 74 | 7339 | from __future__ import unicode_literals
import json
import re
import socket
from .common import InfoExtractor
from ..compat import (
compat_http_client,
compat_str,
compat_urllib_error,
compat_urllib_parse_unquote,
compat_urllib_request,
)
from ..utils import (
ExtractorError,
int_or_none,
limit_length,
urlencode_postdata,
get_element_by_id,
clean_html,
)
class FacebookIE(InfoExtractor):
_VALID_URL = r'''(?x)
https?://(?:\w+\.)?facebook\.com/
(?:[^#]*?\#!/)?
(?:
(?:video/video\.php|photo\.php|video\.php|video/embed)\?(?:.*?)
(?:v|video_id)=|
[^/]+/videos/(?:[^/]+/)?
)
(?P<id>[0-9]+)
(?:.*)'''
_LOGIN_URL = 'https://www.facebook.com/login.php?next=http%3A%2F%2Ffacebook.com%2Fhome.php&login_attempt=1'
_CHECKPOINT_URL = 'https://www.facebook.com/checkpoint/?next=http%3A%2F%2Ffacebook.com%2Fhome.php&_fb_noscript=1'
_NETRC_MACHINE = 'facebook'
IE_NAME = 'facebook'
_TESTS = [{
'url': 'https://www.facebook.com/video.php?v=637842556329505&fref=nf',
'md5': '6a40d33c0eccbb1af76cf0485a052659',
'info_dict': {
'id': '637842556329505',
'ext': 'mp4',
'title': 're:Did you know Kei Nishikori is the first Asian man to ever reach a Grand Slam',
'uploader': 'Tennis on Facebook',
}
}, {
'note': 'Video without discernible title',
'url': 'https://www.facebook.com/video.php?v=274175099429670',
'info_dict': {
'id': '274175099429670',
'ext': 'mp4',
'title': 'Facebook video #274175099429670',
'uploader': 'Asif Nawab Butt',
},
'expected_warnings': [
'title'
]
}, {
'url': 'https://www.facebook.com/video.php?v=10204634152394104',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/amogood/videos/1618742068337349/?fref=nf',
'only_matching': True,
}, {
'url': 'https://www.facebook.com/ChristyClarkForBC/videos/vb.22819070941/10153870694020942/?type=2&theater',
'only_matching': True,
}]
def _login(self):
(useremail, password) = self._get_login_info()
if useremail is None:
return
login_page_req = compat_urllib_request.Request(self._LOGIN_URL)
login_page_req.add_header('Cookie', 'locale=en_US')
login_page = self._download_webpage(login_page_req, None,
note='Downloading login page',
errnote='Unable to download login page')
lsd = self._search_regex(
r'<input type="hidden" name="lsd" value="([^"]*)"',
login_page, 'lsd')
lgnrnd = self._search_regex(r'name="lgnrnd" value="([^"]*?)"', login_page, 'lgnrnd')
login_form = {
'email': useremail,
'pass': password,
'lsd': lsd,
'lgnrnd': lgnrnd,
'next': 'http://facebook.com/home.php',
'default_persistent': '0',
'legacy_return': '1',
'timezone': '-60',
'trynum': '1',
}
request = compat_urllib_request.Request(self._LOGIN_URL, urlencode_postdata(login_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
try:
login_results = self._download_webpage(request, None,
note='Logging in', errnote='unable to fetch login page')
if re.search(r'<form(.*)name="login"(.*)</form>', login_results) is not None:
self._downloader.report_warning('unable to log in: bad username/password, or exceded login rate limit (~3/min). Check credentials or wait.')
return
check_form = {
'fb_dtsg': self._search_regex(r'name="fb_dtsg" value="(.+?)"', login_results, 'fb_dtsg'),
'h': self._search_regex(
r'name="h"\s+(?:\w+="[^"]+"\s+)*?value="([^"]+)"', login_results, 'h'),
'name_action_selected': 'dont_save',
}
check_req = compat_urllib_request.Request(self._CHECKPOINT_URL, urlencode_postdata(check_form))
check_req.add_header('Content-Type', 'application/x-www-form-urlencoded')
check_response = self._download_webpage(check_req, None,
note='Confirming login')
if re.search(r'id="checkpointSubmitButton"', check_response) is not None:
self._downloader.report_warning('Unable to confirm login, you have to login in your brower and authorize the login.')
except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err:
self._downloader.report_warning('unable to log in: %s' % compat_str(err))
return
def _real_initialize(self):
self._login()
def _real_extract(self, url):
video_id = self._match_id(url)
url = 'https://www.facebook.com/video/video.php?v=%s' % video_id
webpage = self._download_webpage(url, video_id)
BEFORE = '{swf.addParam(param[0], param[1]);});\n'
AFTER = '.forEach(function(variable) {swf.addVariable(variable[0], variable[1]);});'
m = re.search(re.escape(BEFORE) + '(.*?)' + re.escape(AFTER), webpage)
if not m:
m_msg = re.search(r'class="[^"]*uiInterstitialContent[^"]*"><div>(.*?)</div>', webpage)
if m_msg is not None:
raise ExtractorError(
'The video is not available, Facebook said: "%s"' % m_msg.group(1),
expected=True)
else:
raise ExtractorError('Cannot parse data')
data = dict(json.loads(m.group(1)))
params_raw = compat_urllib_parse_unquote(data['params'])
params = json.loads(params_raw)
video_data = params['video_data'][0]
formats = []
for quality in ['sd', 'hd']:
src = video_data.get('%s_src' % quality)
if src is not None:
formats.append({
'format_id': quality,
'url': src,
})
if not formats:
raise ExtractorError('Cannot find video formats')
video_title = self._html_search_regex(
r'<h2\s+[^>]*class="uiHeaderTitle"[^>]*>([^<]*)</h2>', webpage, 'title',
default=None)
if not video_title:
video_title = self._html_search_regex(
r'(?s)<span class="fbPhotosPhotoCaption".*?id="fbPhotoPageCaption"><span class="hasCaption">(.*?)</span>',
webpage, 'alternative title', fatal=False)
video_title = limit_length(video_title, 80)
if not video_title:
video_title = 'Facebook video #%s' % video_id
uploader = clean_html(get_element_by_id('fbPhotoPageAuthorName', webpage))
return {
'id': video_id,
'title': video_title,
'formats': formats,
'duration': int_or_none(video_data.get('video_duration')),
'thumbnail': video_data.get('thumbnail_src'),
'uploader': uploader,
}
| unlicense |
reviewboard/ReviewBot | bot/reviewbot/utils/process.py | 1 | 5223 | from __future__ import unicode_literals
import logging
import os
import subprocess
import sys
def execute(command,
env=None,
split_lines=False,
ignore_errors=False,
extra_ignore_errors=(),
translate_newlines=True,
with_errors=True,
return_errors=False,
none_on_ignored_error=False):
"""Execute a command and return the output.
Args:
command (list of unicode):
The command to run.
env (dict, optional):
The environment variables to use when running the process.
split_lines (bool, optional):
Whether to return the output as a list (split on newlines) or a
single string.
ignore_errors (bool, optional):
Whether to ignore non-zero return codes from the command.
extra_ignore_errors (tuple of int, optional):
Process return codes to ignore.
translate_newlines (bool, optional):
Whether to convert platform-specific newlines (such as \\r\\n) to
the regular newline (\\n) character.
with_errors (bool, optional):
Whether the stderr output should be merged in with the stdout
output or just ignored.
return_errors (bool, optional)
Whether to return the content of the stderr stream. If set, this
argument takes precedence over the ``with_errors`` argument.
none_on_ignored_error (bool, optional):
Whether to return ``None`` if there was an ignored error (instead
of the process output).
Returns:
object:
This returns a single value or 2-tuple, depending on the arguments.
If ``return_errors`` is ``True``, this will return the standard output
and standard errors as strings in a tuple. Otherwise, this will just
result the standard output as a string.
If ``split_lines`` is ``True``, those strings will instead be lists
of lines (preserving newlines).
"""
if isinstance(command, list):
logging.debug(subprocess.list2cmdline(command))
else:
logging.debug(command)
if env:
env.update(os.environ)
else:
env = os.environ.copy()
env['LC_ALL'] = 'en_US.UTF-8'
env['LANGUAGE'] = 'en_US.UTF-8'
if with_errors and not return_errors:
errors_output = subprocess.STDOUT
else:
errors_output = subprocess.PIPE
if sys.platform.startswith('win'):
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=errors_output,
shell=False,
universal_newlines=translate_newlines,
env=env)
else:
p = subprocess.Popen(command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=errors_output,
shell=False,
close_fds=True,
universal_newlines=translate_newlines,
env=env)
data, errors = p.communicate()
if split_lines:
data = data.splitlines(True)
if return_errors:
if split_lines:
errors = errors.splitlines(True)
else:
errors = None
rc = p.wait()
if rc and not ignore_errors and rc not in extra_ignore_errors:
raise Exception('Failed to execute command: %s\n%s' % (command, data))
if rc and none_on_ignored_error:
data = None
if return_errors:
return data, errors
else:
return data
def is_exe_in_path(name, cache={}):
"""Check whether an executable is in the user's search path.
If the provided filename is an absolute path, it will be checked
directly without looking in the search path.
Version Changed:
3.0:
Added the ``cache`` parameter.
Args:
name (unicode):
The name of the executable, without any platform-specific
executable extension. The extension will be appended if necessary.
cache (dict, optional):
A result cache, to avoid repeated lookups.
This will store the paths to any files that are found (or ``None``
if not found).
By default, the cache is shared across all calls. A custom cache
can be provided instead.
Returns:
boolean:
True if the executable can be found in the execution path.
"""
if sys.platform == 'win32' and not name.endswith('.exe'):
name += '.exe'
if name in cache:
return cache[name]
path = None
if os.path.isabs(name):
if os.path.exists(name):
path = name
else:
for dirname in os.environ['PATH'].split(os.pathsep):
temp_path = os.path.abspath(os.path.join(dirname, name))
if os.path.exists(temp_path):
path = temp_path
break
cache[name] = path
return path is not None
| mit |
tychon/physics | physics/datacursors.py | 1 | 6645 | # This module offers two Cursors:
# * DataCursor,
# where you have to click the data point, and
# * FollowDotCursor,
# where the bubble is always on the point
# nearest to your pointer.
#
# All the code was copied from
# http://stackoverflow.com/a/13306887
# DataCursor Example
# x=[1,2,3,4,5]
# y=[6,7,8,9,10]
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
# scat = ax.scatter(x, y)
# DataCursor(scat, x, y)
# plt.show()
# FollowDotCursor Example
# x=[1,2,3,4,5]
# y=[6,7,8,9,10]
# fig = plt.figure()
# ax = fig.add_subplot(1, 1, 1)
# ax.scatter(x, y)
# cursor = FollowDotCursor(ax, x, y)
# plt.show()
import numpy as np
import matplotlib.pyplot as plt
import scipy.spatial as spatial
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
def fmt(x, y):
return 'x: {x:0.2f}\ny: {y:0.2f}'.format(x=x, y=y)
# http://stackoverflow.com/a/4674445/190597
class DataCursor(object):
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, x = [], y = [], tolerance = 5, offsets = (-20, 20),
formatter = fmt, display_all = False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"formatter" is a callback function which takes 2 numeric arguments and
returns a string
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self._points = np.column_stack((x,y))
self.formatter = formatter
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.formatter, xy = (0, 0), ha = 'right',
xytext = self.offsets, textcoords = 'offset points', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'yellow', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def snap(self, x, y):
"""Return the value in self._points closest to (x, y).
"""
idx = np.nanargmin(((self._points - (x,y))**2).sum(axis = -1))
return self._points[idx]
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
annotation = self.annotations[event.artist.axes]
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
x, y = self.snap(x, y)
annotation.xy = x, y
annotation.set_text(self.formatter(x, y))
annotation.set_visible(True)
event.canvas.draw()
class FollowDotCursor(object):
"""Display the x,y location of the nearest data point."""
def __init__(self, ax, x, y, tolerance=5, formatter=fmt, offsets=(-20, 20)):
try:
x = np.asarray(x, dtype='float')
except (TypeError, ValueError):
x = np.asarray(mdates.date2num(x), dtype='float')
y = np.asarray(y, dtype='float')
self._points = np.column_stack((x, y))
self.offsets = offsets
self.scale = x.ptp()
self.scale = y.ptp() / self.scale if self.scale else 1
self.tree = spatial.cKDTree(self.scaled(self._points))
self.formatter = formatter
self.tolerance = tolerance
self.ax = ax
self.fig = ax.figure
self.ax.xaxis.set_label_position('top')
self.dot = ax.scatter(
[x.min()], [y.min()], s=130, color='green', alpha=0.7)
self.annotation = self.setup_annotation()
plt.connect('motion_notify_event', self)
def scaled(self, points):
points = np.asarray(points)
return points * (self.scale, 1)
def __call__(self, event):
ax = self.ax
# event.inaxes is always the current axis. If you use twinx, ax could be
# a different axis.
if event.inaxes == ax:
x, y = event.xdata, event.ydata
elif event.inaxes is None:
return
else:
inv = ax.transData.inverted()
x, y = inv.transform([(event.x, event.y)]).ravel()
annotation = self.annotation
x, y = self.snap(x, y)
annotation.xy = x, y
annotation.set_text(self.formatter(x, y))
self.dot.set_offsets((x, y))
bbox = ax.viewLim
event.canvas.draw()
def setup_annotation(self):
"""Draw and hide the annotation box."""
annotation = self.ax.annotate(
'', xy=(0, 0), ha = 'right',
xytext = self.offsets, textcoords = 'offset points', va = 'bottom',
bbox = dict(
boxstyle='round,pad=0.5', fc='yellow', alpha=0.75),
arrowprops = dict(
arrowstyle='->', connectionstyle='arc3,rad=0'))
return annotation
def snap(self, x, y):
"""Return the value in self.tree closest to x, y."""
dist, idx = self.tree.query(self.scaled((x, y)), k=1, p=1)
try:
return self._points[idx]
except IndexError:
# IndexError: index out of bounds
return self._points[0]
| mit |
skycucumber/restful | python/venv/lib/python2.7/site-packages/sqlalchemy/dialects/sqlite/pysqlcipher.py | 80 | 4129 | # sqlite/pysqlcipher.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: sqlite+pysqlcipher
:name: pysqlcipher
:dbapi: pysqlcipher
:connectstring: sqlite+pysqlcipher://:passphrase/file_path[?kdf_iter=<iter>]
:url: https://pypi.python.org/pypi/pysqlcipher
``pysqlcipher`` is a fork of the standard ``pysqlite`` driver to make
use of the `SQLCipher <https://www.zetetic.net/sqlcipher>`_ backend.
.. versionadded:: 0.9.9
Driver
------
The driver here is the `pysqlcipher <https://pypi.python.org/pypi/pysqlcipher>`_
driver, which makes use of the SQLCipher engine. This system essentially
introduces new PRAGMA commands to SQLite which allows the setting of a
passphrase and other encryption parameters, allowing the database
file to be encrypted.
Connect Strings
---------------
The format of the connect string is in every way the same as that
of the :mod:`~sqlalchemy.dialects.sqlite.pysqlite` driver, except that the
"password" field is now accepted, which should contain a passphrase::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db')
For an absolute file path, two leading slashes should be used for the
database name::
e = create_engine('sqlite+pysqlcipher://:testing@//path/to/foo.db')
A selection of additional encryption-related pragmas supported by SQLCipher
as documented at https://www.zetetic.net/sqlcipher/sqlcipher-api/ can be passed
in the query string, and will result in that PRAGMA being called for each
new connection. Currently, ``cipher``, ``kdf_iter``
``cipher_page_size`` and ``cipher_use_hmac`` are supported::
e = create_engine('sqlite+pysqlcipher://:testing@/foo.db?cipher=aes-256-cfb&kdf_iter=64000')
Pooling Behavior
----------------
The driver makes a change to the default pool behavior of pysqlite
as described in :ref:`pysqlite_threading_pooling`. The pysqlcipher driver
has been observed to be significantly slower on connection than the
pysqlite driver, most likely due to the encryption overhead, so the
dialect here defaults to using the :class:`.SingletonThreadPool`
implementation,
instead of the :class:`.NullPool` pool used by pysqlite. As always, the pool
implementation is entirely configurable using the
:paramref:`.create_engine.poolclass` parameter; the :class:`.StaticPool` may
be more feasible for single-threaded use, or :class:`.NullPool` may be used
to prevent unencrypted connections from being held open for long periods of
time, at the expense of slower startup time for new connections.
"""
from __future__ import absolute_import
from .pysqlite import SQLiteDialect_pysqlite
from ...engine import url as _url
from ... import pool
class SQLiteDialect_pysqlcipher(SQLiteDialect_pysqlite):
driver = 'pysqlcipher'
pragmas = ('kdf_iter', 'cipher', 'cipher_page_size', 'cipher_use_hmac')
@classmethod
def dbapi(cls):
from pysqlcipher import dbapi2 as sqlcipher
return sqlcipher
@classmethod
def get_pool_class(cls, url):
return pool.SingletonThreadPool
def connect(self, *cargs, **cparams):
passphrase = cparams.pop('passphrase', '')
pragmas = dict(
(key, cparams.pop(key, None)) for key in
self.pragmas
)
conn = super(SQLiteDialect_pysqlcipher, self).\
connect(*cargs, **cparams)
conn.execute('pragma key="%s"' % passphrase)
for prag, value in pragmas.items():
if value is not None:
conn.execute('pragma %s=%s' % (prag, value))
return conn
def create_connect_args(self, url):
super_url = _url.URL(
url.drivername, username=url.username,
host=url.host, database=url.database, query=url.query)
c_args, opts = super(SQLiteDialect_pysqlcipher, self).\
create_connect_args(super_url)
opts['passphrase'] = url.password
return c_args, opts
dialect = SQLiteDialect_pysqlcipher
| gpl-2.0 |
phoebusliang/parallel-lettuce | tests/integration/lib/Django-1.3/django/contrib/localflavor/kw/forms.py | 310 | 1988 | """
Kuwait-specific Form helpers
"""
import re
from datetime import date
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, RegexField
from django.utils.translation import gettext as _
id_re = re.compile(r'^(?P<initial>\d{1})(?P<yy>\d\d)(?P<mm>\d\d)(?P<dd>\d\d)(?P<mid>\d{4})(?P<checksum>\d{1})')
class KWCivilIDNumberField(Field):
"""
Kuwaiti Civil ID numbers are 12 digits, second to seventh digits
represents the person's birthdate.
Checks the following rules to determine the validty of the number:
* The number consist of 12 digits.
* The birthdate of the person is a valid date.
* The calculated checksum equals to the last digit of the Civil ID.
"""
default_error_messages = {
'invalid': _('Enter a valid Kuwaiti Civil ID number'),
}
def has_valid_checksum(self, value):
weight = (2, 1, 6, 3, 7, 9, 10, 5, 8, 4, 2)
calculated_checksum = 0
for i in range(11):
calculated_checksum += int(value[i]) * weight[i]
remainder = calculated_checksum % 11
checkdigit = 11 - remainder
if checkdigit != int(value[11]):
return False
return True
def clean(self, value):
super(KWCivilIDNumberField, self).clean(value)
if value in EMPTY_VALUES:
return u''
if not re.match(r'^\d{12}$', value):
raise ValidationError(self.error_messages['invalid'])
match = re.match(id_re, value)
if not match:
raise ValidationError(self.error_messages['invalid'])
gd = match.groupdict()
try:
d = date(int(gd['yy']), int(gd['mm']), int(gd['dd']))
except ValueError:
raise ValidationError(self.error_messages['invalid'])
if not self.has_valid_checksum(value):
raise ValidationError(self.error_messages['invalid'])
return value
| gpl-3.0 |
cainmatt/django | tests/max_lengths/tests.py | 380 | 1589 | from __future__ import unicode_literals
import unittest
from .models import PersonWithCustomMaxLengths, PersonWithDefaultMaxLengths
class MaxLengthArgumentsTests(unittest.TestCase):
def verify_max_length(self, model, field, length):
self.assertEqual(model._meta.get_field(field).max_length, length)
def test_default_max_lengths(self):
self.verify_max_length(PersonWithDefaultMaxLengths, 'email', 254)
self.verify_max_length(PersonWithDefaultMaxLengths, 'vcard', 100)
self.verify_max_length(PersonWithDefaultMaxLengths, 'homepage', 200)
self.verify_max_length(PersonWithDefaultMaxLengths, 'avatar', 100)
def test_custom_max_lengths(self):
self.verify_max_length(PersonWithCustomMaxLengths, 'email', 250)
self.verify_max_length(PersonWithCustomMaxLengths, 'vcard', 250)
self.verify_max_length(PersonWithCustomMaxLengths, 'homepage', 250)
self.verify_max_length(PersonWithCustomMaxLengths, 'avatar', 250)
class MaxLengthORMTests(unittest.TestCase):
def test_custom_max_lengths(self):
args = {
"email": "someone@example.com",
"vcard": "vcard",
"homepage": "http://example.com/",
"avatar": "me.jpg"
}
for field in ("email", "vcard", "homepage", "avatar"):
new_args = args.copy()
new_args[field] = "X" * 250 # a value longer than any of the default fields could hold.
p = PersonWithCustomMaxLengths.objects.create(**new_args)
self.assertEqual(getattr(p, field), ("X" * 250))
| bsd-3-clause |
sharad/calibre | src/cherrypy/process/plugins.py | 81 | 25628 | """Site services for use with a Web Site Process Bus."""
import os
import re
import signal as _signal
import sys
import time
import threading
from cherrypy._cpcompat import basestring, get_daemon, get_thread_ident, ntob, set
# _module__file__base is used by Autoreload to make
# absolute any filenames retrieved from sys.modules which are not
# already absolute paths. This is to work around Python's quirk
# of importing the startup script and using a relative filename
# for it in sys.modules.
#
# Autoreload examines sys.modules afresh every time it runs. If an application
# changes the current directory by executing os.chdir(), then the next time
# Autoreload runs, it will not be able to find any filenames which are
# not absolute paths, because the current directory is not the same as when the
# module was first imported. Autoreload will then wrongly conclude the file has
# "changed", and initiate the shutdown/re-exec sequence.
# See ticket #917.
# For this workaround to have a decent probability of success, this module
# needs to be imported as early as possible, before the app has much chance
# to change the working directory.
_module__file__base = os.getcwd()
class SimplePlugin(object):
"""Plugin base class which auto-subscribes methods for known channels."""
bus = None
"""A :class:`Bus <cherrypy.process.wspbus.Bus>`, usually cherrypy.engine."""
def __init__(self, bus):
self.bus = bus
def subscribe(self):
"""Register this object as a (multi-channel) listener on the bus."""
for channel in self.bus.listeners:
# Subscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.subscribe(channel, method)
def unsubscribe(self):
"""Unregister this object as a listener on the bus."""
for channel in self.bus.listeners:
# Unsubscribe self.start, self.exit, etc. if present.
method = getattr(self, channel, None)
if method is not None:
self.bus.unsubscribe(channel, method)
class SignalHandler(object):
"""Register bus channels (and listeners) for system signals.
You can modify what signals your application listens for, and what it does
when it receives signals, by modifying :attr:`SignalHandler.handlers`,
a dict of {signal name: callback} pairs. The default set is::
handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
The :func:`SignalHandler.handle_SIGHUP`` method calls
:func:`bus.restart()<cherrypy.process.wspbus.Bus.restart>`
if the process is daemonized, but
:func:`bus.exit()<cherrypy.process.wspbus.Bus.exit>`
if the process is attached to a TTY. This is because Unix window
managers tend to send SIGHUP to terminal windows when the user closes them.
Feel free to add signals which are not available on every platform. The
:class:`SignalHandler` will ignore errors raised from attempting to register
handlers for unknown signals.
"""
handlers = {}
"""A map from signal names (e.g. 'SIGTERM') to handlers (e.g. bus.exit)."""
signals = {}
"""A map from signal numbers to names."""
for k, v in vars(_signal).items():
if k.startswith('SIG') and not k.startswith('SIG_'):
signals[v] = k
del k, v
def __init__(self, bus):
self.bus = bus
# Set default handlers
self.handlers = {'SIGTERM': self.bus.exit,
'SIGHUP': self.handle_SIGHUP,
'SIGUSR1': self.bus.graceful,
}
if sys.platform[:4] == 'java':
del self.handlers['SIGUSR1']
self.handlers['SIGUSR2'] = self.bus.graceful
self.bus.log("SIGUSR1 cannot be set on the JVM platform. "
"Using SIGUSR2 instead.")
self.handlers['SIGINT'] = self._jython_SIGINT_handler
self._previous_handlers = {}
def _jython_SIGINT_handler(self, signum=None, frame=None):
# See http://bugs.jython.org/issue1313
self.bus.log('Keyboard Interrupt: shutting down bus')
self.bus.exit()
def subscribe(self):
"""Subscribe self.handlers to signals."""
for sig, func in self.handlers.items():
try:
self.set_handler(sig, func)
except ValueError:
pass
def unsubscribe(self):
"""Unsubscribe self.handlers from signals."""
for signum, handler in self._previous_handlers.items():
signame = self.signals[signum]
if handler is None:
self.bus.log("Restoring %s handler to SIG_DFL." % signame)
handler = _signal.SIG_DFL
else:
self.bus.log("Restoring %s handler %r." % (signame, handler))
try:
our_handler = _signal.signal(signum, handler)
if our_handler is None:
self.bus.log("Restored old %s handler %r, but our "
"handler was not registered." %
(signame, handler), level=30)
except ValueError:
self.bus.log("Unable to restore %s handler %r." %
(signame, handler), level=40, traceback=True)
def set_handler(self, signal, listener=None):
"""Subscribe a handler for the given signal (number or name).
If the optional 'listener' argument is provided, it will be
subscribed as a listener for the given signal's channel.
If the given signal name or number is not available on the current
platform, ValueError is raised.
"""
if isinstance(signal, basestring):
signum = getattr(_signal, signal, None)
if signum is None:
raise ValueError("No such signal: %r" % signal)
signame = signal
else:
try:
signame = self.signals[signal]
except KeyError:
raise ValueError("No such signal: %r" % signal)
signum = signal
prev = _signal.signal(signum, self._handle_signal)
self._previous_handlers[signum] = prev
if listener is not None:
self.bus.log("Listening for %s." % signame)
self.bus.subscribe(signame, listener)
def _handle_signal(self, signum=None, frame=None):
"""Python signal handler (self.set_handler subscribes it for you)."""
signame = self.signals[signum]
self.bus.log("Caught signal %s." % signame)
self.bus.publish(signame)
def handle_SIGHUP(self):
"""Restart if daemonized, else exit."""
if os.isatty(sys.stdin.fileno()):
# not daemonized (may be foreground or background)
self.bus.log("SIGHUP caught but not daemonized. Exiting.")
self.bus.exit()
else:
self.bus.log("SIGHUP caught while daemonized. Restarting.")
self.bus.restart()
try:
import pwd, grp
except ImportError:
pwd, grp = None, None
class DropPrivileges(SimplePlugin):
"""Drop privileges. uid/gid arguments not available on Windows.
Special thanks to Gavin Baker: http://antonym.org/node/100.
"""
def __init__(self, bus, umask=None, uid=None, gid=None):
SimplePlugin.__init__(self, bus)
self.finalized = False
self.uid = uid
self.gid = gid
self.umask = umask
def _get_uid(self):
return self._uid
def _set_uid(self, val):
if val is not None:
if pwd is None:
self.bus.log("pwd module not available; ignoring uid.",
level=30)
val = None
elif isinstance(val, basestring):
val = pwd.getpwnam(val)[2]
self._uid = val
uid = property(_get_uid, _set_uid,
doc="The uid under which to run. Availability: Unix.")
def _get_gid(self):
return self._gid
def _set_gid(self, val):
if val is not None:
if grp is None:
self.bus.log("grp module not available; ignoring gid.",
level=30)
val = None
elif isinstance(val, basestring):
val = grp.getgrnam(val)[2]
self._gid = val
gid = property(_get_gid, _set_gid,
doc="The gid under which to run. Availability: Unix.")
def _get_umask(self):
return self._umask
def _set_umask(self, val):
if val is not None:
try:
os.umask
except AttributeError:
self.bus.log("umask function not available; ignoring umask.",
level=30)
val = None
self._umask = val
umask = property(_get_umask, _set_umask,
doc="""The default permission mode for newly created files and directories.
Usually expressed in octal format, for example, ``0644``.
Availability: Unix, Windows.
""")
def start(self):
# uid/gid
def current_ids():
"""Return the current (uid, gid) if available."""
name, group = None, None
if pwd:
name = pwd.getpwuid(os.getuid())[0]
if grp:
group = grp.getgrgid(os.getgid())[0]
return name, group
if self.finalized:
if not (self.uid is None and self.gid is None):
self.bus.log('Already running as uid: %r gid: %r' %
current_ids())
else:
if self.uid is None and self.gid is None:
if pwd or grp:
self.bus.log('uid/gid not set', level=30)
else:
self.bus.log('Started as uid: %r gid: %r' % current_ids())
if self.gid is not None:
os.setgid(self.gid)
os.setgroups([])
if self.uid is not None:
os.setuid(self.uid)
self.bus.log('Running as uid: %r gid: %r' % current_ids())
# umask
if self.finalized:
if self.umask is not None:
self.bus.log('umask already set to: %03o' % self.umask)
else:
if self.umask is None:
self.bus.log('umask not set', level=30)
else:
old_umask = os.umask(self.umask)
self.bus.log('umask old: %03o, new: %03o' %
(old_umask, self.umask))
self.finalized = True
# This is slightly higher than the priority for server.start
# in order to facilitate the most common use: starting on a low
# port (which requires root) and then dropping to another user.
start.priority = 77
class Daemonizer(SimplePlugin):
"""Daemonize the running script.
Use this with a Web Site Process Bus via::
Daemonizer(bus).subscribe()
When this component finishes, the process is completely decoupled from
the parent environment. Please note that when this component is used,
the return code from the parent process will still be 0 if a startup
error occurs in the forked children. Errors in the initial daemonizing
process still return proper exit codes. Therefore, if you use this
plugin to daemonize, don't use the return code as an accurate indicator
of whether the process fully started. In fact, that return code only
indicates if the process succesfully finished the first fork.
"""
def __init__(self, bus, stdin='/dev/null', stdout='/dev/null',
stderr='/dev/null'):
SimplePlugin.__init__(self, bus)
self.stdin = stdin
self.stdout = stdout
self.stderr = stderr
self.finalized = False
def start(self):
if self.finalized:
self.bus.log('Already deamonized.')
# forking has issues with threads:
# http://www.opengroup.org/onlinepubs/000095399/functions/fork.html
# "The general problem with making fork() work in a multi-threaded
# world is what to do with all of the threads..."
# So we check for active threads:
if threading.activeCount() != 1:
self.bus.log('There are %r active threads. '
'Daemonizing now may cause strange failures.' %
threading.enumerate(), level=30)
# See http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
# (or http://www.faqs.org/faqs/unix-faq/programmer/faq/ section 1.7)
# and http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# Finish up with the current stdout/stderr
sys.stdout.flush()
sys.stderr.flush()
# Do first fork.
try:
pid = os.fork()
if pid == 0:
# This is the child process. Continue.
pass
else:
# This is the first parent. Exit, now that we've forked.
self.bus.log('Forking once.')
os._exit(0)
except OSError:
# Python raises OSError rather than returning negative numbers.
exc = sys.exc_info()[1]
sys.exit("%s: fork #1 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.setsid()
# Do second fork
try:
pid = os.fork()
if pid > 0:
self.bus.log('Forking twice.')
os._exit(0) # Exit second parent
except OSError:
exc = sys.exc_info()[1]
sys.exit("%s: fork #2 failed: (%d) %s\n"
% (sys.argv[0], exc.errno, exc.strerror))
os.chdir("/")
os.umask(0)
si = open(self.stdin, "r")
so = open(self.stdout, "a+")
se = open(self.stderr, "a+")
# os.dup2(fd, fd2) will close fd2 if necessary,
# so we don't explicitly close stdin/out/err.
# See http://docs.python.org/lib/os-fd-ops.html
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
self.bus.log('Daemonized to PID: %s' % os.getpid())
self.finalized = True
start.priority = 65
class PIDFile(SimplePlugin):
"""Maintain a PID file via a WSPBus."""
def __init__(self, bus, pidfile):
SimplePlugin.__init__(self, bus)
self.pidfile = pidfile
self.finalized = False
def start(self):
pid = os.getpid()
if self.finalized:
self.bus.log('PID %r already written to %r.' % (pid, self.pidfile))
else:
open(self.pidfile, "wb").write(ntob("%s" % pid, 'utf8'))
self.bus.log('PID %r written to %r.' % (pid, self.pidfile))
self.finalized = True
start.priority = 70
def exit(self):
try:
os.remove(self.pidfile)
self.bus.log('PID file removed: %r.' % self.pidfile)
except (KeyboardInterrupt, SystemExit):
raise
except:
pass
class PerpetualTimer(threading._Timer):
"""A responsive subclass of threading._Timer whose run() method repeats.
Use this timer only when you really need a very interruptible timer;
this checks its 'finished' condition up to 20 times a second, which can
results in pretty high CPU usage
"""
def run(self):
while True:
self.finished.wait(self.interval)
if self.finished.isSet():
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
self.bus.log("Error in perpetual timer thread function %r." %
self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
class BackgroundTask(threading.Thread):
"""A subclass of threading.Thread whose run() method repeats.
Use this class for most repeating tasks. It uses time.sleep() to wait
for each interval, which isn't very responsive; that is, even if you call
self.cancel(), you'll have to wait until the sleep() call finishes before
the thread stops. To compensate, it defaults to being daemonic, which means
it won't delay stopping the whole process.
"""
def __init__(self, interval, function, args=[], kwargs={}, bus=None):
threading.Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.running = False
self.bus = bus
def cancel(self):
self.running = False
def run(self):
self.running = True
while self.running:
time.sleep(self.interval)
if not self.running:
return
try:
self.function(*self.args, **self.kwargs)
except Exception:
if self.bus:
self.bus.log("Error in background task thread function %r."
% self.function, level=40, traceback=True)
# Quit on first error to avoid massive logs.
raise
def _set_daemon(self):
return True
class Monitor(SimplePlugin):
"""WSPBus listener to periodically run a callback in its own thread."""
callback = None
"""The function to call at intervals."""
frequency = 60
"""The time in seconds between callback runs."""
thread = None
"""A :class:`BackgroundTask<cherrypy.process.plugins.BackgroundTask>` thread."""
def __init__(self, bus, callback, frequency=60, name=None):
SimplePlugin.__init__(self, bus)
self.callback = callback
self.frequency = frequency
self.thread = None
self.name = name
def start(self):
"""Start our callback in its own background thread."""
if self.frequency > 0:
threadname = self.name or self.__class__.__name__
if self.thread is None:
self.thread = BackgroundTask(self.frequency, self.callback,
bus = self.bus)
self.thread.setName(threadname)
self.thread.start()
self.bus.log("Started monitor thread %r." % threadname)
else:
self.bus.log("Monitor thread %r already started." % threadname)
start.priority = 70
def stop(self):
"""Stop our callback's background task thread."""
if self.thread is None:
self.bus.log("No thread running for %s." % self.name or self.__class__.__name__)
else:
if self.thread is not threading.currentThread():
name = self.thread.getName()
self.thread.cancel()
if not get_daemon(self.thread):
self.bus.log("Joining %r" % name)
self.thread.join()
self.bus.log("Stopped thread %r." % name)
self.thread = None
def graceful(self):
"""Stop the callback's background task thread and restart it."""
self.stop()
self.start()
class Autoreloader(Monitor):
"""Monitor which re-executes the process when files change.
This :ref:`plugin<plugins>` restarts the process (via :func:`os.execv`)
if any of the files it monitors change (or is deleted). By default, the
autoreloader monitors all imported modules; you can add to the
set by adding to ``autoreload.files``::
cherrypy.engine.autoreload.files.add(myFile)
If there are imported files you do *not* wish to monitor, you can adjust the
``match`` attribute, a regular expression. For example, to stop monitoring
cherrypy itself::
cherrypy.engine.autoreload.match = r'^(?!cherrypy).+'
Like all :class:`Monitor<cherrypy.process.plugins.Monitor>` plugins,
the autoreload plugin takes a ``frequency`` argument. The default is
1 second; that is, the autoreloader will examine files once each second.
"""
files = None
"""The set of files to poll for modifications."""
frequency = 1
"""The interval in seconds at which to poll for modified files."""
match = '.*'
"""A regular expression by which to match filenames."""
def __init__(self, bus, frequency=1, match='.*'):
self.mtimes = {}
self.files = set()
self.match = match
Monitor.__init__(self, bus, self.run, frequency)
def start(self):
"""Start our own background task thread for self.run."""
if self.thread is None:
self.mtimes = {}
Monitor.start(self)
start.priority = 70
def sysfiles(self):
"""Return a Set of sys.modules filenames to monitor."""
files = set()
for k, m in sys.modules.items():
if re.match(self.match, k):
if hasattr(m, '__loader__') and hasattr(m.__loader__, 'archive'):
f = m.__loader__.archive
else:
f = getattr(m, '__file__', None)
if f is not None and not os.path.isabs(f):
# ensure absolute paths so a os.chdir() in the app doesn't break me
f = os.path.normpath(os.path.join(_module__file__base, f))
files.add(f)
return files
def run(self):
"""Reload the process if registered files have been modified."""
for filename in self.sysfiles() | self.files:
if filename:
if filename.endswith('.pyc'):
filename = filename[:-1]
oldtime = self.mtimes.get(filename, 0)
if oldtime is None:
# Module with no .py file. Skip it.
continue
try:
mtime = os.stat(filename).st_mtime
except OSError:
# Either a module with no .py file, or it's been deleted.
mtime = None
if filename not in self.mtimes:
# If a module has no .py file, this will be None.
self.mtimes[filename] = mtime
else:
if mtime is None or mtime > oldtime:
# The file has been deleted or modified.
self.bus.log("Restarting because %s changed." % filename)
self.thread.cancel()
self.bus.log("Stopped thread %r." % self.thread.getName())
self.bus.restart()
return
class ThreadManager(SimplePlugin):
"""Manager for HTTP request threads.
If you have control over thread creation and destruction, publish to
the 'acquire_thread' and 'release_thread' channels (for each thread).
This will register/unregister the current thread and publish to
'start_thread' and 'stop_thread' listeners in the bus as needed.
If threads are created and destroyed by code you do not control
(e.g., Apache), then, at the beginning of every HTTP request,
publish to 'acquire_thread' only. You should not publish to
'release_thread' in this case, since you do not know whether
the thread will be re-used or not. The bus will call
'stop_thread' listeners for you when it stops.
"""
threads = None
"""A map of {thread ident: index number} pairs."""
def __init__(self, bus):
self.threads = {}
SimplePlugin.__init__(self, bus)
self.bus.listeners.setdefault('acquire_thread', set())
self.bus.listeners.setdefault('start_thread', set())
self.bus.listeners.setdefault('release_thread', set())
self.bus.listeners.setdefault('stop_thread', set())
def acquire_thread(self):
"""Run 'start_thread' listeners for the current thread.
If the current thread has already been seen, any 'start_thread'
listeners will not be run again.
"""
thread_ident = get_thread_ident()
if thread_ident not in self.threads:
# We can't just use get_ident as the thread ID
# because some platforms reuse thread ID's.
i = len(self.threads) + 1
self.threads[thread_ident] = i
self.bus.publish('start_thread', i)
def release_thread(self):
"""Release the current thread and run 'stop_thread' listeners."""
thread_ident = get_thread_ident()
i = self.threads.pop(thread_ident, None)
if i is not None:
self.bus.publish('stop_thread', i)
def stop(self):
"""Release all threads and run all 'stop_thread' listeners."""
for thread_ident, i in self.threads.items():
self.bus.publish('stop_thread', i)
self.threads.clear()
graceful = stop
| gpl-3.0 |
reeshupatel/demo | keystone/openstack/common/threadgroup.py | 6 | 4806 | # Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
import eventlet
from eventlet import greenpool
from keystone.openstack.common import log as logging
from keystone.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
def _thread_done(gt, *args, **kwargs):
"""Callback function to be passed to GreenThread.link() when we spawn()
Calls the :class:`ThreadGroup` to notify if.
"""
kwargs['group'].thread_done(kwargs['thread'])
class Thread(object):
"""Wrapper around a greenthread, that holds a reference to the
:class:`ThreadGroup`. The Thread will notify the :class:`ThreadGroup` when
it has done so it can be removed from the threads list.
"""
def __init__(self, thread, group):
self.thread = thread
self.thread.link(_thread_done, group=group, thread=self)
def stop(self):
self.thread.kill()
def wait(self):
return self.thread.wait()
def link(self, func, *args, **kwargs):
self.thread.link(func, *args, **kwargs)
class ThreadGroup(object):
"""The point of the ThreadGroup class is to:
* keep track of timers and greenthreads (making it easier to stop them
when need be).
* provide an easy API to add timers.
"""
def __init__(self, thread_pool_size=10):
self.pool = greenpool.GreenPool(thread_pool_size)
self.threads = []
self.timers = []
def add_dynamic_timer(self, callback, initial_delay=None,
periodic_interval_max=None, *args, **kwargs):
timer = loopingcall.DynamicLoopingCall(callback, *args, **kwargs)
timer.start(initial_delay=initial_delay,
periodic_interval_max=periodic_interval_max)
self.timers.append(timer)
def add_timer(self, interval, callback, initial_delay=None,
*args, **kwargs):
pulse = loopingcall.FixedIntervalLoopingCall(callback, *args, **kwargs)
pulse.start(interval=interval,
initial_delay=initial_delay)
self.timers.append(pulse)
def add_thread(self, callback, *args, **kwargs):
gt = self.pool.spawn(callback, *args, **kwargs)
th = Thread(gt, self)
self.threads.append(th)
return th
def thread_done(self, thread):
self.threads.remove(thread)
def _stop_threads(self):
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
# don't kill the current thread.
continue
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
def stop_timers(self):
for x in self.timers:
try:
x.stop()
except Exception as ex:
LOG.exception(ex)
self.timers = []
def stop(self, graceful=False):
"""stop function has the option of graceful=True/False.
* In case of graceful=True, wait for all threads to be finished.
Never kill threads.
* In case of graceful=False, kill threads immediately.
"""
self.stop_timers()
if graceful:
# In case of graceful=True, wait for all threads to be
# finished, never kill threads
self.wait()
else:
# In case of graceful=False(Default), kill threads
# immediately
self._stop_threads()
def wait(self):
for x in self.timers:
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
current = threading.current_thread()
# Iterate over a copy of self.threads so thread_done doesn't
# modify the list while we're iterating
for x in self.threads[:]:
if x is current:
continue
try:
x.wait()
except eventlet.greenlet.GreenletExit:
pass
except Exception as ex:
LOG.exception(ex)
| apache-2.0 |
jscott413/maidsinharlem | flask/lib/python2.7/site-packages/whoosh/highlight.py | 22 | 33812 | # Copyright 2008 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""The highlight module contains classes and functions for displaying short
excerpts from hit documents in the search results you present to the user, with
query terms highlighted.
The highlighting system has four main elements.
* **Fragmenters** chop up the original text into __fragments__, based on the
locations of matched terms in the text.
* **Scorers** assign a score to each fragment, allowing the system to rank the
best fragments by whatever criterion.
* **Order functions** control in what order the top-scoring fragments are
presented to the user. For example, you can show the fragments in the order
they appear in the document (FIRST) or show higher-scoring fragments first
(SCORE)
* **Formatters** turn the fragment objects into human-readable output, such as
an HTML string.
See :doc:`/highlight` for more information.
"""
from __future__ import division
from collections import deque
from heapq import nlargest
from itertools import groupby
from whoosh.compat import htmlescape
from whoosh.analysis import Token
# The default value for the maximum chars to examine when fragmenting
DEFAULT_CHARLIMIT = 2 ** 15
# Fragment object
def mkfrag(text, tokens, startchar=None, endchar=None,
charsbefore=0, charsafter=0):
"""Returns a :class:`Fragment` object based on the :class:`analysis.Token`
objects in ``tokens`.
"""
if startchar is None:
startchar = tokens[0].startchar if tokens else 0
if endchar is None:
endchar = tokens[-1].endchar if tokens else len(text)
startchar = max(0, startchar - charsbefore)
endchar = min(len(text), endchar + charsafter)
return Fragment(text, tokens, startchar, endchar)
class Fragment(object):
"""Represents a fragment (extract) from a hit document. This object is
mainly used to keep track of the start and end points of the fragment and
the "matched" character ranges inside; it does not contain the text of the
fragment or do much else.
The useful attributes are:
``Fragment.text``
The entire original text from which this fragment is taken.
``Fragment.matches``
An ordered list of objects representing the matched terms in the
fragment. These objects have ``startchar`` and ``endchar`` attributes.
``Fragment.startchar``
The index of the first character in the fragment.
``Fragment.endchar``
The index of the last character in the fragment.
``Fragment.matched_terms``
A ``set`` of the ``text`` of the matched terms in the fragment (if
available).
"""
def __init__(self, text, matches, startchar=0, endchar= -1):
"""
:param text: the source text of the fragment.
:param matches: a list of objects which have ``startchar`` and
``endchar`` attributes, and optionally a ``text`` attribute.
:param startchar: the index into ``text`` at which the fragment starts.
The default is 0.
:param endchar: the index into ``text`` at which the fragment ends.
The default is -1, which is interpreted as the length of ``text``.
"""
self.text = text
self.matches = matches
if endchar == -1:
endchar = len(text)
self.startchar = startchar
self.endchar = endchar
self.matched_terms = set()
for t in matches:
if hasattr(t, "text"):
self.matched_terms.add(t.text)
def __repr__(self):
return "<Fragment %d:%d %d>" % (self.startchar, self.endchar,
len(self.matches))
def __len__(self):
return self.endchar - self.startchar
def overlaps(self, fragment):
sc = self.startchar
ec = self.endchar
fsc = fragment.startchar
fec = fragment.endchar
return (sc < fsc < ec) or (sc < fec < ec)
def overlapped_length(self, fragment):
sc = self.startchar
ec = self.endchar
fsc = fragment.startchar
fec = fragment.endchar
return max(ec, fec) - min(sc, fsc)
def __lt__(self, other):
return id(self) < id(other)
# Tokenizing
def set_matched_filter(tokens, termset):
for t in tokens:
t.matched = t.text in termset
yield t
# Fragmenters
class Fragmenter(object):
def must_retokenize(self):
"""Returns True if this fragmenter requires retokenized text.
If this method returns True, the fragmenter's ``fragment_tokens``
method will be called with an iterator of ALL tokens from the text,
with the tokens for matched terms having the ``matched`` attribute set
to True.
If this method returns False, the fragmenter's ``fragment_matches``
method will be called with a LIST of matching tokens.
"""
return True
def fragment_tokens(self, text, all_tokens):
"""Yields :class:`Fragment` objects based on the tokenized text.
:param text: the string being highlighted.
:param all_tokens: an iterator of :class:`analysis.Token`
objects from the string.
"""
raise NotImplementedError
def fragment_matches(self, text, matched_tokens):
"""Yields :class:`Fragment` objects based on the text and the matched
terms.
:param text: the string being highlighted.
:param matched_tokens: a list of :class:`analysis.Token` objects
representing the term matches in the string.
"""
raise NotImplementedError
class WholeFragmenter(Fragmenter):
"""Doesn't fragment the token stream. This object just returns the entire
entire stream as one "fragment". This is useful if you want to highlight
the entire text.
Note that even if you use the `WholeFragmenter`, the highlight code will
return no fragment if no terms matched in the given field. To return the
whole fragment even in that case, call `highlights()` with `minscore=0`::
# Query where no terms match in the "text" field
q = query.Term("tag", "new")
r = mysearcher.search(q)
r.fragmenter = highlight.WholeFragmenter()
r.formatter = highlight.UppercaseFormatter()
# Since no terms in the "text" field matched, we get no fragments back
assert r[0].highlights("text") == ""
# If we lower the minimum score to 0, we get a fragment even though it
# has no matching terms
assert r[0].highlights("text", minscore=0) == "This is the text field."
"""
def __init__(self, charlimit=DEFAULT_CHARLIMIT):
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
charlimit = self.charlimit
matches = []
for t in tokens:
if charlimit and t.endchar > charlimit:
break
if t.matched:
matches.append(t.copy())
return [Fragment(text, matches)]
# Backwards compatiblity
NullFragmeter = WholeFragmenter
class SentenceFragmenter(Fragmenter):
"""Breaks the text up on sentence end punctuation characters
(".", "!", or "?"). This object works by looking in the original text for a
sentence end as the next character after each token's 'endchar'.
When highlighting with this fragmenter, you should use an analyzer that
does NOT remove stop words, for example::
sa = StandardAnalyzer(stoplist=None)
"""
def __init__(self, maxchars=200, sentencechars=".!?",
charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
"""
self.maxchars = maxchars
self.sentencechars = frozenset(sentencechars)
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
maxchars = self.maxchars
sentencechars = self.sentencechars
charlimit = self.charlimit
textlen = len(text)
# startchar of first token in the current sentence
first = None
# Buffer for matched tokens in the current sentence
tks = []
endchar = None
# Number of chars in the current sentence
currentlen = 0
for t in tokens:
startchar = t.startchar
endchar = t.endchar
if charlimit and endchar > charlimit:
break
if first is None:
# Remember the startchar of the first token in a sentence
first = startchar
currentlen = 0
tlength = endchar - startchar
currentlen += tlength
if t.matched:
tks.append(t.copy())
# If the character after the current token is end-of-sentence
# punctuation, finish the sentence and reset
if endchar < textlen and text[endchar] in sentencechars:
# Don't break for two periods in a row (e.g. ignore "...")
if endchar + 1 < textlen and text[endchar + 1] in sentencechars:
continue
# If the sentence had matches and it's not too long, yield it
# as a token
if tks and currentlen <= maxchars:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
# Reset the counts
tks = []
first = None
currentlen = 0
# If we get to the end of the text and there's still a sentence
# in the buffer, yield it
if tks:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
class ContextFragmenter(Fragmenter):
"""Looks for matched terms and aggregates them with their surrounding
context.
"""
def __init__(self, maxchars=200, surround=20, charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
:param surround: The number of extra characters of context to add both
before the first matched term and after the last matched term.
"""
self.maxchars = maxchars
self.surround = surround
self.charlimit = charlimit
def fragment_tokens(self, text, tokens):
maxchars = self.maxchars
surround = self.surround
charlimit = self.charlimit
# startchar of the first token in the fragment
first = None
# Stack of startchars
firsts = deque()
# Each time we see a matched token, we reset the countdown to finishing
# the fragment. This also indicates whether we're currently inside a
# fragment (< 0 not in fragment, >= 0 in fragment)
countdown = -1
# Tokens in current fragment
tks = []
endchar = None
# Number of chars in the current fragment
currentlen = 0
for t in tokens:
startchar = t.startchar
endchar = t.endchar
tlength = endchar - startchar
if charlimit and endchar > charlimit:
break
if countdown < 0 and not t.matched:
# We're not in a fragment currently, so just maintain the
# "charsbefore" buffer
firsts.append(startchar)
while firsts and endchar - firsts[0] > surround:
firsts.popleft()
elif currentlen + tlength > maxchars:
# We're in a fragment, but adding this token would put us past
# the maximum size. Zero the countdown so the code below will
# cause the fragment to be emitted
countdown = 0
elif t.matched:
# Start/restart the countdown
countdown = surround
# Remember the first char of this fragment
if first is None:
if firsts:
first = firsts[0]
else:
first = startchar
# Add on unused front context
countdown += surround
tks.append(t.copy())
# If we're in a fragment...
if countdown >= 0:
# Update the counts
currentlen += tlength
countdown -= tlength
# If the countdown is expired
if countdown <= 0:
# Finish the fragment
yield mkfrag(text, tks, startchar=first, endchar=endchar)
# Reset the counts
tks = []
firsts = deque()
first = None
currentlen = 0
# If there's a fragment left over at the end, yield it
if tks:
yield mkfrag(text, tks, startchar=first, endchar=endchar)
class PinpointFragmenter(Fragmenter):
"""This is a NON-RETOKENIZING fragmenter. It builds fragments from the
positions of the matched terms.
"""
def __init__(self, maxchars=200, surround=20, autotrim=False,
charlimit=DEFAULT_CHARLIMIT):
"""
:param maxchars: The maximum number of characters allowed in a
fragment.
:param surround: The number of extra characters of context to add both
before the first matched term and after the last matched term.
:param autotrim: automatically trims text before the first space and
after the last space in the fragments, to try to avoid truncated
words at the start and end. For short fragments or fragments with
long runs between spaces this may give strange results.
"""
self.maxchars = maxchars
self.surround = surround
self.autotrim = autotrim
self.charlimit = charlimit
def must_retokenize(self):
return False
def fragment_tokens(self, text, tokens):
matched = [t for t in tokens if t.matched]
return self.fragment_matches(text, matched)
@staticmethod
def _autotrim(fragment):
text = fragment.text
startchar = fragment.startchar
endchar = fragment.endchar
firstspace = text.find(" ", startchar, endchar)
if firstspace > 0:
startchar = firstspace + 1
lastspace = text.rfind(" ", startchar, endchar)
if lastspace > 0:
endchar = lastspace
if fragment.matches:
startchar = min(startchar, fragment.matches[0].startchar)
endchar = max(endchar, fragment.matches[-1].endchar)
fragment.startchar = startchar
fragment.endchar = endchar
def fragment_matches(self, text, tokens):
maxchars = self.maxchars
surround = self.surround
autotrim = self.autotrim
charlimit = self.charlimit
j = -1
for i, t in enumerate(tokens):
if j >= i:
continue
j = i
left = t.startchar
right = t.endchar
if charlimit and right > charlimit:
break
currentlen = right - left
while j < len(tokens) - 1 and currentlen < maxchars:
next = tokens[j + 1]
ec = next.endchar
if ec - right <= surround and ec - left <= maxchars:
j += 1
right = ec
currentlen += (ec - next.startchar)
else:
break
left = max(0, left - surround)
right = min(len(text), right + surround)
fragment = Fragment(text, tokens[i:j + 1], left, right)
if autotrim:
self._autotrim(fragment)
yield fragment
# Fragment scorers
class FragmentScorer(object):
pass
class BasicFragmentScorer(FragmentScorer):
def __call__(self, f):
# Add up the boosts for the matched terms in this passage
score = sum(t.boost for t in f.matches)
# Favor diversity: multiply score by the number of separate
# terms matched
score *= (len(f.matched_terms) * 100) or 1
return score
# Fragment sorters
def SCORE(fragment):
"Sorts higher scored passages first."
return 1
def FIRST(fragment):
"Sorts passages from earlier in the document first."
return fragment.startchar
def LONGER(fragment):
"Sorts longer passages first."
return 0 - len(fragment)
def SHORTER(fragment):
"Sort shorter passages first."
return len(fragment)
# Formatters
def get_text(original, token, replace):
"""Convenience function for getting the text to use for a match when
formatting.
If ``replace`` is False, returns the part of ``original`` between
``token.startchar`` and ``token.endchar``. If ``replace`` is True, returns
``token.text``.
"""
if replace:
return token.text
else:
return original[token.startchar:token.endchar]
class Formatter(object):
"""Base class for formatters.
For highlighters that return strings, it is usually only necessary to
override :meth:`Formatter.format_token`.
Use the :func:`get_text` function as a convenience to get the token text::
class MyFormatter(Formatter):
def format_token(text, token, replace=False):
ttext = get_text(text, token, replace)
return "[%s]" % ttext
"""
between = "..."
def _text(self, text):
return text
def format_token(self, text, token, replace=False):
"""Returns a formatted version of the given "token" object, which
should have at least ``startchar`` and ``endchar`` attributes, and
a ``text`` attribute if ``replace`` is True.
:param text: the original fragment text being highlighted.
:param token: an object having ``startchar`` and ``endchar`` attributes
and optionally a ``text`` attribute (if ``replace`` is True).
:param replace: if True, the original text between the token's
``startchar`` and ``endchar`` indices will be replaced with the
value of the token's ``text`` attribute.
"""
raise NotImplementedError
def format_fragment(self, fragment, replace=False):
"""Returns a formatted version of the given text, using the "token"
objects in the given :class:`Fragment`.
:param fragment: a :class:`Fragment` object representing a list of
matches in the text.
:param replace: if True, the original text corresponding to each
match will be replaced with the value of the token object's
``text`` attribute.
"""
output = []
index = fragment.startchar
text = fragment.text
for t in fragment.matches:
if t.startchar is None:
continue
if t.startchar < index:
continue
if t.startchar > index:
output.append(self._text(text[index:t.startchar]))
output.append(self.format_token(text, t, replace))
index = t.endchar
output.append(self._text(text[index:fragment.endchar]))
out_string = "".join(output)
return out_string
def format(self, fragments, replace=False):
"""Returns a formatted version of the given text, using a list of
:class:`Fragment` objects.
"""
formatted = [self.format_fragment(f, replace=replace)
for f in fragments]
return self.between.join(formatted)
def __call__(self, text, fragments):
# For backwards compatibility
return self.format(fragments)
class NullFormatter(Formatter):
"""Formatter that does not modify the string.
"""
def format_token(self, text, token, replace=False):
return get_text(text, token, replace)
class UppercaseFormatter(Formatter):
"""Returns a string in which the matched terms are in UPPERCASE.
"""
def __init__(self, between="..."):
"""
:param between: the text to add between fragments.
"""
self.between = between
def format_token(self, text, token, replace=False):
ttxt = get_text(text, token, replace)
return ttxt.upper()
class HtmlFormatter(Formatter):
"""Returns a string containing HTML formatting around the matched terms.
This formatter wraps matched terms in an HTML element with two class names.
The first class name (set with the constructor argument ``classname``) is
the same for each match. The second class name (set with the constructor
argument ``termclass`` is different depending on which term matched. This
allows you to give different formatting (for example, different background
colors) to the different terms in the excerpt.
>>> hf = HtmlFormatter(tagname="span", classname="match", termclass="term")
>>> hf(mytext, myfragments)
"The <span class="match term0">template</span> <span class="match term1">geometry</span> is..."
This object maintains a dictionary mapping terms to HTML class names (e.g.
``term0`` and ``term1`` above), so that multiple excerpts will use the same
class for the same term. If you want to re-use the same HtmlFormatter
object with different searches, you should call HtmlFormatter.clear()
between searches to clear the mapping.
"""
template = '<%(tag)s class=%(q)s%(cls)s%(tn)s%(q)s>%(t)s</%(tag)s>'
def __init__(self, tagname="strong", between="...",
classname="match", termclass="term", maxclasses=5,
attrquote='"'):
"""
:param tagname: the tag to wrap around matching terms.
:param between: the text to add between fragments.
:param classname: the class name to add to the elements wrapped around
matching terms.
:param termclass: the class name prefix for the second class which is
different for each matched term.
:param maxclasses: the maximum number of term classes to produce. This
limits the number of classes you have to define in CSS by recycling
term class names. For example, if you set maxclasses to 3 and have
5 terms, the 5 terms will use the CSS classes ``term0``, ``term1``,
``term2``, ``term0``, ``term1``.
"""
self.between = between
self.tagname = tagname
self.classname = classname
self.termclass = termclass
self.attrquote = attrquote
self.maxclasses = maxclasses
self.seen = {}
self.htmlclass = " ".join((self.classname, self.termclass))
def _text(self, text):
return htmlescape(text, quote=False)
def format_token(self, text, token, replace=False):
seen = self.seen
ttext = self._text(get_text(text, token, replace))
if ttext in seen:
termnum = seen[ttext]
else:
termnum = len(seen) % self.maxclasses
seen[ttext] = termnum
return self.template % {"tag": self.tagname, "q": self.attrquote,
"cls": self.htmlclass, "t": ttext,
"tn": termnum}
def clean(self):
"""Clears the dictionary mapping terms to HTML classnames.
"""
self.seen = {}
class GenshiFormatter(Formatter):
"""Returns a Genshi event stream containing HTML formatting around the
matched terms.
"""
def __init__(self, qname="strong", between="..."):
"""
:param qname: the QName for the tag to wrap around matched terms.
:param between: the text to add between fragments.
"""
self.qname = qname
self.between = between
from genshi.core import START, END, TEXT # @UnresolvedImport
from genshi.core import Attrs, Stream # @UnresolvedImport
self.START, self.END, self.TEXT = START, END, TEXT
self.Attrs, self.Stream = Attrs, Stream
def _add_text(self, text, output):
if output and output[-1][0] == self.TEXT:
output[-1] = (self.TEXT, output[-1][1] + text, output[-1][2])
else:
output.append((self.TEXT, text, (None, -1, -1)))
def format_token(self, text, token, replace=False):
qn = self.qname
txt = get_text(text, token, replace)
return self.Stream([(self.START, (qn, self.Attrs()), (None, -1, -1)),
(self.TEXT, txt, (None, -1, -1)),
(self.END, qn, (None, -1, -1))])
def format_fragment(self, fragment, replace=False):
output = []
index = fragment.startchar
text = fragment.text
for t in fragment.matches:
if t.startchar > index:
self._add_text(text[index:t.startchar], output)
output.append((text, t, replace))
index = t.endchar
if index < len(text):
self._add_text(text[index:], output)
return self.Stream(output)
def format(self, fragments, replace=False):
output = []
first = True
for fragment in fragments:
if not first:
self._add_text(self.between, output)
output += self.format_fragment(fragment, replace=replace)
first = False
return self.Stream(output)
# Highlighting
def top_fragments(fragments, count, scorer, order, minscore=1):
scored_fragments = ((scorer(f), f) for f in fragments)
scored_fragments = nlargest(count, scored_fragments)
best_fragments = [sf for score, sf in scored_fragments if score >= minscore]
best_fragments.sort(key=order)
return best_fragments
def highlight(text, terms, analyzer, fragmenter, formatter, top=3,
scorer=None, minscore=1, order=FIRST, mode="query"):
if scorer is None:
scorer = BasicFragmentScorer()
if type(fragmenter) is type:
fragmenter = fragmenter()
if type(formatter) is type:
formatter = formatter()
if type(scorer) is type:
scorer = scorer()
if scorer is None:
scorer = BasicFragmentScorer()
termset = frozenset(terms)
tokens = analyzer(text, chars=True, mode=mode, removestops=False)
tokens = set_matched_filter(tokens, termset)
fragments = fragmenter.fragment_tokens(text, tokens)
fragments = top_fragments(fragments, top, scorer, order, minscore)
return formatter(text, fragments)
class Highlighter(object):
def __init__(self, fragmenter=None, scorer=None, formatter=None,
always_retokenize=False, order=FIRST):
self.fragmenter = fragmenter or ContextFragmenter()
self.scorer = scorer or BasicFragmentScorer()
self.formatter = formatter or HtmlFormatter(tagname="b")
self.order = order
self.always_retokenize = always_retokenize
def can_load_chars(self, results, fieldname):
# Is it possible to build a mapping between the matched terms/docs and
# their start and end chars for "pinpoint" highlighting (ie not require
# re-tokenizing text)?
if self.always_retokenize:
# No, we've been configured to always retokenize some text
return False
if not results.has_matched_terms():
# No, we don't know what the matched terms are yet
return False
if self.fragmenter.must_retokenize():
# No, the configured fragmenter doesn't support it
return False
# Maybe, if the field was configured to store characters
field = results.searcher.schema[fieldname]
return field.supports("characters")
@staticmethod
def _load_chars(results, fieldname, texts, to_bytes):
# For each docnum, create a mapping of text -> [(startchar, endchar)]
# for the matched terms
results._char_cache[fieldname] = cache = {}
sorted_ids = sorted(docnum for _, docnum in results.top_n)
for docnum in sorted_ids:
cache[docnum] = {}
for text in texts:
btext = to_bytes(text)
m = results.searcher.postings(fieldname, btext)
docset = set(results.termdocs[(fieldname, btext)])
for docnum in sorted_ids:
if docnum in docset:
m.skip_to(docnum)
assert m.id() == docnum
cache[docnum][text] = m.value_as("characters")
@staticmethod
def _merge_matched_tokens(tokens):
# Merges consecutive matched tokens together, so they are highlighted
# as one
token = None
for t in tokens:
if not t.matched:
if token is not None:
yield token
token = None
yield t
continue
if token is None:
token = t.copy()
elif t.startchar <= token.endchar:
if t.endchar > token.endchar:
token.text += t.text[token.endchar-t.endchar:]
token.endchar = t.endchar
else:
yield token
token = None
# t was not merged, also has to be yielded
yield t
if token is not None:
yield token
def highlight_hit(self, hitobj, fieldname, text=None, top=3, minscore=1):
results = hitobj.results
schema = results.searcher.schema
field = schema[fieldname]
to_bytes = field.to_bytes
from_bytes = field.from_bytes
if text is None:
if fieldname not in hitobj:
raise KeyError("Field %r is not stored." % fieldname)
text = hitobj[fieldname]
# Get the terms searched for/matched in this field
if results.has_matched_terms():
bterms = (term for term in results.matched_terms()
if term[0] == fieldname)
else:
bterms = results.query_terms(expand=True, fieldname=fieldname)
# Convert bytes to unicode
words = frozenset(from_bytes(term[1]) for term in bterms)
# If we can do "pinpoint" highlighting...
if self.can_load_chars(results, fieldname):
# Build the docnum->[(startchar, endchar),] map
if fieldname not in results._char_cache:
self._load_chars(results, fieldname, words, to_bytes)
hitterms = (from_bytes(term[1]) for term in hitobj.matched_terms()
if term[0] == fieldname)
# Grab the word->[(startchar, endchar)] map for this docnum
cmap = results._char_cache[fieldname][hitobj.docnum]
# A list of Token objects for matched words
tokens = []
charlimit = self.fragmenter.charlimit
for word in hitterms:
chars = cmap[word]
for pos, startchar, endchar in chars:
if charlimit and endchar > charlimit:
break
tokens.append(Token(text=word, pos=pos,
startchar=startchar, endchar=endchar))
tokens.sort(key=lambda t: t.startchar)
tokens = [max(group, key=lambda t: t.endchar - t.startchar)
for key, group in groupby(tokens, lambda t: t.startchar)]
fragments = self.fragmenter.fragment_matches(text, tokens)
else:
# Retokenize the text
analyzer = results.searcher.schema[fieldname].analyzer
tokens = analyzer(text, positions=True, chars=True, mode="index",
removestops=False)
# Set Token.matched attribute for tokens that match a query term
tokens = set_matched_filter(tokens, words)
tokens = self._merge_matched_tokens(tokens)
fragments = self.fragmenter.fragment_tokens(text, tokens)
fragments = top_fragments(fragments, top, self.scorer, self.order,
minscore=minscore)
output = self.formatter.format(fragments)
return output
| bsd-3-clause |
trondeau/gnuradio | gr-uhd/examples/python/freq_hopping.py | 36 | 9337 | #!/usr/bin/env python
#
# Copyright 2014 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
"""
TXs a waveform (either from a file, or a sinusoid) in a frequency-hopping manner.
"""
import numpy
import argparse
import pmt
from gnuradio import gr
from gnuradio import blocks
from gnuradio import uhd
def setup_parser():
""" Setup the parser for the frequency hopper. """
parser = argparse.ArgumentParser(
description="Transmit a signal in a frequency-hopping manner, using tx_freq tags."
)
parser.add_argument('-i', '--input-file', type=file, default=None,
help="File with samples to transmit. If left out, will transmit a sinusoid.")
parser.add_argument("-a", "--args", default="",
help="UHD device address args.")
parser.add_argument("--spec", default="",
help="UHD subdev spec.")
parser.add_argument("--antenna", default="",
help="UHD antenna settings.")
parser.add_argument("--gain", default=None, type=float,
help="USRP gain (defaults to mid-point in dB).")
parser.add_argument("-r", "--rate", type=float, default=1e6,
help="Sampling rate")
parser.add_argument("-N", "--samp-per-burst", type=int, default=10000,
help="Samples per burst")
parser.add_argument("-t", "--hop-time", type=float, default=1000,
help="Time between hops in milliseconds. This must be larger than or equal to the burst duration as set by --samp-per-burst")
parser.add_argument("-f", "--freq", type=float, default=2.45e9,
help="Base frequency. This is the middle channel frequency at which the USRP will Tx.")
parser.add_argument("--dsp", action='store_true',
help="DSP tuning only.")
parser.add_argument("-d", "--freq-delta", type=float, default=1e6,
help="Channel spacing.")
parser.add_argument("-c", "--num-channels", type=int, default=5,
help="Number of channels.")
parser.add_argument("-B", "--num-bursts", type=int, default=30,
help="Number of bursts to transmit before terminating.")
parser.add_argument("-p", "--post-tuning", action='count',
help="Tune after transmitting. Default is to tune immediately before transmitting.")
parser.add_argument("-v", "--verbose", action='count',
help="Print more information. The morer the printier.")
return parser
class FrequencyHopperSrc(gr.hier_block2):
""" Provides tags for frequency hopping """
def __init__(
self,
n_bursts, n_channels,
freq_delta, base_freq, dsp_tuning,
burst_length, base_time, hop_time,
post_tuning=False,
tx_gain=0,
verbose=False
):
gr.hier_block2.__init__(self,
"FrequencyHopperSrc",
gr.io_signature(1, 1, gr.sizeof_gr_complex),
gr.io_signature(1, 1, gr.sizeof_gr_complex),
)
n_samples_total = n_bursts * burst_length
lowest_frequency = base_freq - numpy.floor(n_channels/2) * freq_delta
self.hop_sequence = [lowest_frequency + n * freq_delta for n in xrange(n_channels)]
numpy.random.shuffle(self.hop_sequence)
# Repeat that:
self.hop_sequence = [self.hop_sequence[x % n_channels] for x in xrange(n_bursts)]
if verbose:
print "Hop Frequencies | Hop Pattern"
print "=================|================================"
for f in self.hop_sequence:
print "{:6.3f} MHz | ".format(f/1e6),
if n_channels < 50:
print " " * int((f - base_freq) / freq_delta) + "#"
else:
print "\n"
print "=================|================================"
# There's no real point in setting the gain via tag for this application,
# but this is an example to show you how to do it.
gain_tag = gr.tag_t()
gain_tag.offset = 0
gain_tag.key = pmt.string_to_symbol('tx_command')
gain_tag.value = pmt.to_pmt({'gain': tx_gain})
tag_list = [gain_tag,]
for i in xrange(len(self.hop_sequence)):
tune_tag = gr.tag_t()
tune_tag.offset = i * burst_length
if i > 0 and post_tuning and not dsp_tuning: # TODO dsp_tuning should also be able to do post_tuning
tune_tag.offset -= 1 # Move it to last sample of previous burst
if dsp_tuning:
tune_tag.key = pmt.string_to_symbol('tx_command')
tune_tag.value = pmt.to_pmt({'lo_freq': base_freq, 'dsp_freq': base_freq - self.hop_sequence[i]})
else:
tune_tag.key = pmt.string_to_symbol('tx_freq')
tune_tag.value = pmt.to_pmt(self.hop_sequence[i])
tag_list.append(tune_tag)
length_tag = gr.tag_t()
length_tag.offset = i * burst_length
length_tag.key = pmt.string_to_symbol('packet_len')
length_tag.value = pmt.from_long(burst_length)
tag_list.append(length_tag)
time_tag = gr.tag_t()
time_tag.offset = i * burst_length
time_tag.key = pmt.string_to_symbol('tx_time')
time_tag.value = pmt.make_tuple(
pmt.from_uint64(int(base_time + i * hop_time)),
pmt.from_double((base_time + i * hop_time) % 1),
)
tag_list.append(time_tag)
tag_source = blocks.vector_source_c((1.0,) * n_samples_total, repeat=False, tags=tag_list)
mult = blocks.multiply_cc()
self.connect(self, mult, self)
self.connect(tag_source, (mult, 1))
class FlowGraph(gr.top_block):
""" Flow graph that does the frequency hopping. """
def __init__(self, options):
gr.top_block.__init__(self)
if options.input_file is not None:
src = blocks.file_source(gr.sizeof_gr_complex, options.filename, repeat=True)
else:
src = blocks.vector_source_c((.5,) * int(1e6) * 2, repeat=True)
# Setup USRP
self.u = uhd.usrp_sink(options.args, uhd.stream_args('fc32'), "packet_len")
if(options.spec):
self.u.set_subdev_spec(options.spec, 0)
if(options.antenna):
self.u.set_antenna(options.antenna, 0)
self.u.set_samp_rate(options.rate)
# Gain is set in the hopper block
if options.gain is None:
g = self.u.get_gain_range()
options.gain = float(g.start()+g.stop())/2.0
print "-- Setting gain to {} dB".format(options.gain)
r = self.u.set_center_freq(options.freq)
if not r:
print '[ERROR] Failed to set base frequency.'
raise SystemExit, 1
hopper_block = FrequencyHopperSrc(
options.num_bursts, options.num_channels,
options.freq_delta, options.freq, options.dsp,
options.samp_per_burst, 1.0, options.hop_time / 1000.,
options.post_tuning,
options.gain,
options.verbose,
)
self.connect(src, hopper_block, self.u)
def print_hopper_stats(args):
""" Nothing to do with Grace Hopper """
print """
Parameter | Value
===================+=========================
Hop Interval | {hop_time} ms
Burst duration | {hop_duration} ms
Lowest Frequency | {lowest_freq:6.3f} MHz
Highest Frequency | {highest_freq:6.3f} MHz
Frequency spacing | {freq_delta:6.4f} MHz
Number of channels | {num_channels}
Sampling rate | {rate} Msps
Transmit Gain | {gain} dB
===================+=========================
""".format(
hop_time=args.hop_time,
hop_duration=1000.0/args.rate*args.samp_per_burst,
gain=args.gain,
lowest_freq=args.freq/1e6,
highest_freq=(args.freq + (args.num_channels-1) * args.freq_delta)/1e6,
freq_delta=args.freq_delta/1e6,
num_channels=args.num_channels,
rate=args.rate/1e6,
)
def main():
""" Go, go, go! """
args = setup_parser().parse_args()
if (1.0 * args.samp_per_burst / args.rate) > args.hop_time * 1e-3:
print "Burst duration must be smaller than hop time."
exit(1)
if args.verbose:
print_hopper_stats(args)
top_block = FlowGraph(args)
print "Starting to hop, skip and jump... press Ctrl+C to exit."
top_block.u.set_time_now(uhd.time_spec(0.0))
top_block.run()
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
fly19890211/edx-platform | lms/djangoapps/certificates/migrations/0002_auto__add_field_generatedcertificate_download_url.py | 188 | 6807 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'GeneratedCertificate.download_url'
db.add_column('certificates_generatedcertificate', 'download_url',
self.gf('django.db.models.fields.CharField')(max_length=128, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'GeneratedCertificate.download_url'
db.delete_column('certificates_generatedcertificate', 'download_url')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'avatar_type': ('django.db.models.fields.CharField', [], {'default': "'n'", 'max_length': '1'}),
'bronze': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'consecutive_days_visit_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'country': ('django_countries.fields.CountryField', [], {'max_length': '2', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'display_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'email_isvalid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'email_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'email_tag_filter_strategy': ('django.db.models.fields.SmallIntegerField', [], {'default': '1'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'gold': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'gravatar': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ignored_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'interesting_tags': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'location': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'new_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'questions_per_page': ('django.db.models.fields.SmallIntegerField', [], {'default': '10'}),
'real_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'reputation': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'seen_response_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'show_country': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'silver': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'w'", 'max_length': '2'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'certificates.generatedcertificate': {
'Meta': {'object_name': 'GeneratedCertificate'},
'certificate_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'download_url': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['certificates']
| agpl-3.0 |
matichorvat/pydelphin | mrs.py | 1 | 2166 | #!/usr/bin/env python3
import sys
import argparse
from delphin.mrs import simplemrs, mrx, dmrx, eds, simpledmrs
from delphin.extra.latex import dmrs_tikz_dependency
mrsformats = {
'simplemrs': simplemrs,
'mrx': mrx,
'dmrx': dmrx,
'eds': eds,
'simpledmrs': simpledmrs
}
extraformats = {
'dmrs-tikz': dmrs_tikz_dependency
}
parser = argparse.ArgumentParser(description="Utility for manipulating MRSs")
subparsers = parser.add_subparsers(dest='command')
convert_parser = subparsers.add_parser('convert', aliases=['c'])
convert_parser.add_argument(
'--from', '-f',
dest='srcfmt',
choices=list(mrsformats.keys())
)
convert_parser.add_argument(
'--to', '-t',
dest='tgtfmt',
choices=list(mrsformats.keys()) + list(extraformats.keys())
)
convert_parser.add_argument('--pretty-print', '-p', action='store_true')
convert_parser.add_argument('--color', '-c', action='store_true')
convert_parser.add_argument('infile', metavar='PATH', nargs='?')
path_parser = subparsers.add_parser('paths', aliases=['p'])
path_parser.add_argument('--format', '-f', choices=list(mrsformats.keys()))
path_parser.add_argument('--depth', '-d', default=-1)
path_parser.add_argument('infile', metavar='PATH', nargs='?')
args = parser.parse_args()
if args.command in ('convert', 'c'):
srcfmt = mrsformats[args.srcfmt]
if args.infile is not None:
ms = srcfmt.load(open(args.infile, 'r'))
else:
ms = srcfmt.loads(sys.stdin.read())
output = ''
if args.tgtfmt in mrsformats:
output = mrsformats[args.tgtfmt].dumps(
ms,
pretty_print=args.pretty_print,
color=args.color
)
elif args.tgtfmt in extraformats:
output = extraformats[args.tgtfmt](ms)
print(output)
elif args.command in ('paths', 'p'):
from delphin.mrs import path as mrspath
if args.infile is not None:
instream = open(args.infile, 'r')
else:
instream = sys.stdin
outstream = sys.stdout
ms = mrsformats[args.format].load(instream)
for m in ms:
paths = list(mrspath.get_paths(m, max_depth=int(args.depth)))
print('\t'.join(paths))
| mit |
renyi533/tensorflow | tensorflow/python/keras/engine/base_layer_test.py | 2 | 52630 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for TensorFlow 2.0 layer behavior."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import sys
import traceback
import numpy as np
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import test_util
from tensorflow.python.keras import backend
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import base_layer
from tensorflow.python.keras.mixed_precision.experimental import policy
from tensorflow.python.keras.optimizer_v2 import rmsprop
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.layers import core as legacy_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
from tensorflow.python.platform import tf_logging
from tensorflow.python.summary import summary_iterator
from tensorflow.python.util import nest
class DynamicLayer(base_layer.Layer):
def __init__(self, dynamic=False, **kwargs):
super(DynamicLayer, self).__init__(dynamic=dynamic, **kwargs)
def call(self, inputs):
samples = tensor_array_ops.TensorArray(
dtype=dtypes.float32, size=array_ops.shape(inputs)[0])
for idx, sample in enumerate(inputs):
samples = samples.write(idx, math_ops.square(sample))
return samples.stack()
def compute_output_shape(self, input_shape):
return input_shape
class InvalidLayer(base_layer.Layer):
def call(self, inputs):
raise ValueError('You did something wrong!')
class BaseLayerTest(keras_parameterized.TestCase):
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer(self):
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer_error(self):
with self.assertRaisesRegexp(TypeError,
'attempting to use Python control flow'):
model = testing_utils.get_model_from_layers([DynamicLayer()],
input_shape=(3,))
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
@keras_parameterized.run_with_all_model_types
def test_dynamic_layer_error_running_in_graph_mode(self):
with ops.get_default_graph().as_default():
model = testing_utils.get_model_from_layers([DynamicLayer(dynamic=True)],
input_shape=(3,))
self.assertEqual(model.dynamic, True)
# But then you cannot run the model since you're in a graph scope.
with self.assertRaisesRegexp(
ValueError, 'You must enable eager execution'):
model.compile(rmsprop.RMSprop(0.001), loss='mse')
def test_manual_compute_output_shape(self):
class BuildCounter(keras.layers.Layer):
def __init__(self, *args, **kwargs): # pylint: disable=redefined-outer-name
super(BuildCounter, self).__init__(*args, **kwargs)
self.build_counter = 0
def build(self, input_shape):
self.build_counter += 1
def call(self, inputs):
return inputs
with context.eager_mode():
layer = BuildCounter(dtype=dtypes.float64)
output_shape = layer.compute_output_shape((None, 10))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(output_shape.as_list(), [None, 10])
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(dtype=dtypes.float64, shape=[None, 10]))
self.assertEqual(layer.build_counter, 1)
self.assertEqual(output_signature.dtype, dtypes.float64)
self.assertEqual(output_signature.shape.as_list(), [None, 10])
layer(np.ones((5, 10)))
self.assertEqual(layer.build_counter, 1)
def test_eager_switch_case_input(self):
with context.eager_mode():
task = keras.Input(shape=(), dtype=dtypes.int32)
control_flow_ops.switch_case(
task[0], [lambda: constant_op.constant(1.0) for _ in range(10)])
def test_dynamic_layer_with_deferred_sequential_model(self):
model = keras.Sequential(
[DynamicLayer(dynamic=True),
keras.layers.Dense(3)])
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_nested_dynamic_layers_in_eager_mode(self):
inputs = keras.Input((3,))
outputs = DynamicLayer(dynamic=True)(inputs)
inner_model = keras.Model(inputs, outputs)
self.assertEqual(inner_model.dynamic, True)
inputs = keras.Input((3,))
x = DynamicLayer(dynamic=True)(inputs)
outputs = inner_model(x)
model = keras.Model(inputs, outputs)
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
def test_dynamic_subclassed_model_no_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
self.assertEqual(model.run_eagerly, True)
model.train_on_batch(np.random.random((2, 3)), np.random.random((2, 3)))
self.assertEqual(model.outputs, None)
def test_dynamic_subclassed_model_with_shape_inference(self):
class MyModel(keras.Model):
def __init__(self):
super(MyModel, self).__init__(dynamic=True)
self.layer1 = keras.layers.Dense(3)
self.layer2 = keras.layers.Dense(3)
def call(self, inputs):
if math_ops.reduce_sum(inputs) > 0:
return self.layer1(inputs)
else:
return self.layer2(inputs)
def compute_output_shape(self, input_shape):
return tuple(input_shape[:-1].as_list()) + (3,)
model = MyModel()
self.assertEqual(model.dynamic, True)
model.compile(rmsprop.RMSprop(0.001), loss='mse')
x, y = np.random.random((2, 3)), np.random.random((2, 3))
model.train_on_batch(x, y)
outputs = model(x)
self.assertEqual(outputs.shape.as_list(), [2, 3])
def test_deepcopy(self):
with context.eager_mode():
bias_reg = lambda x: 1e-3 * math_ops.reduce_sum(x)
layer = keras.layers.Conv2D(32, (3, 3), bias_regularizer=bias_reg)
# Call the Layer on data to generate regularize losses.
layer(array_ops.ones((1, 10, 10, 3)))
self.assertLen(layer.losses, 1)
new_layer = copy.deepcopy(layer)
self.assertEqual(new_layer.bias_regularizer, bias_reg)
self.assertEqual(layer.get_config(), new_layer.get_config())
@test_util.run_in_graph_and_eager_modes
def test_invalid_forward_pass(self):
inputs = keras.Input((3,))
with self.assertRaisesRegexp(ValueError, 'You did something wrong!'):
_ = InvalidLayer()(inputs)
def test_no_legacy_model(self):
inputs = keras.Input((1,))
legacy_dense_0 = legacy_core.Dense(1, name='legacy_dense_0')
legacy_dense_1 = legacy_core.Dense(1, name='legacy_dense_1')
layer = legacy_dense_0(inputs)
layer = keras.layers.Dense(1)(layer)
layer = legacy_dense_1(layer)
expected_regex = (r'The following are legacy tf\.layers\.Layers:\n '
'{}\n {}'.format(legacy_dense_0, legacy_dense_1))
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Model(inputs=[inputs], outputs=[layer])
model = keras.models.Model(inputs=[inputs], outputs=[inputs])
with self.assertRaisesRegexp(TypeError, expected_regex):
model._insert_layers([legacy_dense_0, legacy_dense_1])
def test_no_legacy_sequential(self):
layers = [
keras.layers.Dense(1),
legacy_core.Dense(1, name='legacy_dense_0')
]
expected_regex = r'legacy tf\.layers\.Layers:\n {}'.format(layers[1])
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Sequential(layers)
with self.assertRaisesRegexp(TypeError, expected_regex):
_ = keras.models.Sequential([keras.layers.Input(shape=(4,))] + layers)
model = keras.models.Sequential()
with self.assertRaisesRegexp(TypeError, expected_regex):
for l in layers:
model.add(l)
@keras_parameterized.run_with_all_model_types
@test_util.run_in_graph_and_eager_modes
def test_build_with_numpy_data(self):
model_layers = [
keras.layers.Dense(3, activation='relu', kernel_initializer='ones'),
keras.layers.Dense(1, activation='sigmoid', kernel_initializer='ones')
]
model = testing_utils.get_model_from_layers(model_layers, input_shape=(4,))
model(np.zeros((2, 4), dtype='float32'))
self.assertTrue(model.built)
@test_util.run_in_graph_and_eager_modes
def test_default_add_weight(self):
class TestLayer(keras.layers.Layer):
def __init__(self):
super(TestLayer, self).__init__()
self.default_weight = self.add_weight()
self.weight_without_name = self.add_weight(shape=(3, 4))
self.regularized_weight_without_name = self.add_weight(
shape=(3, 4), regularizer='l2')
layer = TestLayer()
self.assertEqual(layer.default_weight.shape.as_list(), [])
self.assertEqual(layer.weight_without_name.shape.as_list(), [3, 4])
self.assertEqual(layer.default_weight.dtype.name, 'float32')
self.assertEqual(layer.weight_without_name.dtype.name, 'float32')
self.assertEqual(len(layer.losses), 1)
if not context.executing_eagerly():
# Cannot access tensor.name in eager execution.
self.assertTrue('Variable_2/Regularizer' in layer.losses[0].name)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_learning_phase_freezing_for_layers(self):
class LearningPhaseLayer(keras.layers.Layer):
def call(self, inputs):
return keras.backend.in_train_phase(
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
def get_learning_phase_value():
model = keras.models.Sequential([LearningPhaseLayer(input_shape=(1,))])
model._run_eagerly = testing_utils.should_run_eagerly()
return np.sum(model(np.ones((1, 1))))
self.assertEqual(get_learning_phase_value(), 0)
# Test scope.
with keras.backend.learning_phase_scope(1):
self.assertEqual(get_learning_phase_value(), 1)
# The effects of the scope end after exiting it.
self.assertEqual(get_learning_phase_value(), 0)
# Test setting.
keras.backend.set_learning_phase(1)
self.assertEqual(get_learning_phase_value(), 1)
keras.backend.set_learning_phase(0)
self.assertEqual(get_learning_phase_value(), 0)
# Cannot be enabled with `run_eagerly=True`, see b/123904578
@test_util.run_all_in_graph_and_eager_modes
def test_layer_can_return_variable(self):
class ComputeSum(keras.layers.Layer):
def __init__(self):
super(ComputeSum, self).__init__()
self.total = variables.Variable(
initial_value=array_ops.zeros((1, 1)), trainable=False)
if not context.executing_eagerly():
keras.backend.get_session().run(self.total.initializer)
def call(self, inputs):
self.total.assign_add(inputs)
return self.total
inputs = keras.Input(shape=(1,))
model = keras.Model(inputs, ComputeSum()(inputs))
model.predict(np.ones((1, 1)))
def _get_layer_with_training_arg(self):
class TrainingLayer(keras.layers.Layer):
"""A layer with a `training` argument in a defuned `call`."""
@def_function.function
def call(self, inputs, training=None):
if training is None:
training = keras.backend.learning_phase()
return tf_utils.smart_cond(training,
lambda: array_ops.ones_like(inputs),
lambda: array_ops.zeros_like(inputs))
return TrainingLayer()
@keras_parameterized.run_with_all_model_types
# b/124459427: can't test with `run_eagerly=True` for now.
@test_util.run_in_graph_and_eager_modes
def test_training_arg_in_defun(self):
layer = self._get_layer_with_training_arg()
model = testing_utils.get_model_from_layers([layer], input_shape=(1,))
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 1.)
loss = model.evaluate(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(loss, 0.)
# Test that the argument injection performed in `call` is not active
# when the argument is passed explicitly.
layer = self._get_layer_with_training_arg()
inputs = keras.Input(shape=(1,))
# Pass `training` by name
outputs = layer(inputs, training=False)
model = keras.Model(inputs, outputs)
model.compile(rmsprop.RMSprop(0.),
loss='mae')
history = model.fit(np.zeros((1, 1)), np.zeros((1, 1)))
self.assertEqual(history.history['loss'][0], 0.)
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_raw_variable_assignment(self):
class RawVariableLayer(keras.layers.Layer):
def __init__(self, **kwargs):
super(RawVariableLayer, self).__init__(**kwargs)
# Test variables in nested structure.
self.var_list = [variables.Variable(1.), {'a': variables.Variable(2.)}]
def call(self, inputs):
return inputs * self.var_list[0] * self.var_list[1]['a']
model = testing_utils.get_model_from_layers([RawVariableLayer()],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
x, y = np.ones((10, 10)), np.ones((10, 10))
# Checks that variables get initialized.
model.fit(x, y, batch_size=2, epochs=2)
@test_util.run_in_graph_and_eager_modes
def test_layer_names(self):
inputs = keras.layers.Input(shape=[2])
add1 = inputs + inputs
add2 = keras.layers.Add()([inputs, inputs])
add3 = inputs + inputs
add4 = keras.layers.Add()([inputs, inputs])
model = keras.models.Model(
inputs=[inputs], outputs=[add1, add2, add3, add4])
actual_names = [l.name for l in model.layers]
graph_names = [
'input_1', 'tf_op_layer_AddV2', 'add', 'tf_op_layer_AddV2_1', 'add_1'
]
eager_names = [
'input_1', 'tf_op_layer_add', 'add', 'tf_op_layer_add_2', 'add_1'
]
for actual, eager, graph in zip(actual_names, graph_names, eager_names):
self.assertIn(actual, {eager, graph})
def test_add_trainable_weight_on_frozen_layer(self):
class TestLayer(keras.layers.Layer):
def build(self, input_shape):
self.w = self.add_weight(shape=(), trainable=True)
def call(self, inputs):
return self.w * inputs
layer = TestLayer()
layer.trainable = False
layer.build(None)
layer.trainable = True
self.assertListEqual(layer.trainable_weights, [layer.w])
@keras_parameterized.run_with_all_model_types
@keras_parameterized.run_all_keras_modes
def test_passing_initial_weights_values(self):
kernel_value = np.random.random((10, 2))
layer_with_weights = keras.layers.Dense(
2, use_bias=False, weights=[kernel_value])
model = testing_utils.get_model_from_layers([layer_with_weights],
input_shape=(10,))
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
inputs = np.random.random((3, 10))
out = model.predict(inputs)
self.assertAllClose(model.layers[-1].get_weights()[0], kernel_value)
self.assertAllClose(out, np.dot(inputs, kernel_value))
@test_util.run_in_graph_and_eager_modes
def test_set_weights_and_get_weights(self):
layer = keras.layers.Dense(2)
layer.build((None, 10))
kernel = np.random.random((10, 2))
bias = np.random.random((2,))
layer.set_weights([kernel, bias])
weights = layer.get_weights()
self.assertEqual(len(weights), 2)
self.assertAllClose(weights[0], kernel)
self.assertAllClose(weights[1], bias)
with self.assertRaisesRegexp(
ValueError, 'but the layer was expecting 2 weights'):
layer.set_weights([1, 2, 3])
with self.assertRaisesRegexp(
ValueError, 'not compatible with provided weight shape'):
layer.set_weights([kernel.T, bias])
def test_get_config_error(self):
class MyLayer(keras.layers.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayer, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
# `__init__` includes kwargs but `get_config` is not overridden, so
# an error should be thrown:
with self.assertRaisesRegexp(NotImplementedError, 'Layer MyLayer has'):
MyLayer('custom').get_config()
class MyLayerNew(keras.layers.Layer):
def __init__(self, my_kwarg='default', **kwargs):
super(MyLayerNew, self).__init__(**kwargs)
self.my_kwarg = my_kwarg
def get_config(self):
config = super(MyLayerNew, self).get_config()
config['my_kwarg'] = self.my_kwarg
return config
# Test to make sure that error is not raised if the method call is
# from an overridden `get_config`:
self.assertEqual(MyLayerNew('custom').get_config()['my_kwarg'], 'custom')
class MyLayerNew2(keras.layers.Layer):
def __init__(self, name='MyLayerName', dtype=None, **kwargs): # pylint:disable=redefined-outer-name
super(MyLayerNew2, self).__init__(name=name, dtype=dtype, **kwargs)
# Check that if the kwargs in `__init__` are base layer constructor
# arguments, no error is thrown:
self.assertEqual(MyLayerNew2(name='New').get_config()['name'], 'New')
@test_util.run_in_graph_and_eager_modes
def test_count_params(self):
dense = keras.layers.Dense(16)
dense.build((None, 4))
self.assertEqual(dense.count_params(), 16 * 4 + 16)
dense = keras.layers.Dense(16)
with self.assertRaisesRegexp(ValueError, 'call `count_params`'):
dense.count_params()
model = keras.Sequential(keras.layers.Dense(16))
with self.assertRaisesRegexp(ValueError, 'call `count_params`'):
model.count_params()
dense = keras.layers.Dense(16, input_dim=4)
model = keras.Sequential(dense)
self.assertEqual(model.count_params(), 16 * 4 + 16)
def test_super_not_called(self):
class CustomLayerNotCallingSuper(keras.layers.Layer):
def __init__(self):
pass
layer = CustomLayerNotCallingSuper()
with self.assertRaisesRegexp(RuntimeError, 'You must call `super()'):
layer(np.random.random((10, 2)))
@test_util.run_in_graph_and_eager_modes
def test_first_arg_not_called_inputs(self):
x, y = array_ops.ones((10, 1)), array_ops.ones((10, 1))
class ArgLayer(keras.layers.Layer):
def call(self, x, y):
return x + y
layer = ArgLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
class KwargLayer(keras.layers.Layer):
def call(self, x=None, y=None):
return x + y
layer = KwargLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
with self.assertRaisesRegexp(ValueError, 'must always be passed'):
layer(y=y)
class TFFunctionLayer(keras.layers.Layer):
@def_function.function
def call(self, x, y=None):
if y is None:
return x
return x + y
layer = TFFunctionLayer()
out = self.evaluate(layer(x=x, y=y))
self.assertAllClose(out, 2 * np.ones((10, 1)))
def test_build_input_shape(self):
class CustomLayer(keras.layers.Layer):
def build(self, input_shape):
self.add_weight('w', shape=input_shape[1:])
super(CustomLayer, self).build(input_shape)
layer = CustomLayer()
self.assertFalse(layer.built)
layer.build([None, 1, 2, 3])
self.assertTrue(layer.built)
self.assertEqual([None, 1, 2, 3], layer._build_input_shape)
layer = CustomLayer()
layer(keras.Input((3,)))
self.assertTrue(layer.built)
self.assertEqual([None, 3], layer._build_input_shape.as_list())
class SymbolicSupportTest(test.TestCase):
def test_using_symbolic_tensors_with_tf_ops(self):
# Single-input.
x = keras.Input((3,))
y = math_ops.square(x)
self.assertEqual(y.graph, keras.backend.get_graph())
# Multi-inputs.
x1, x2 = keras.Input((3,)), keras.Input((3,))
y = array_ops.concat([x1, x2], axis=1)
self.assertEqual(y.graph, keras.backend.get_graph())
# Mixing Keras symbolic tensors and graph tensors from the same graph works.
with keras.backend.get_graph().as_default():
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
# Creating same op type (matmul) multiple times in the Keras graph works.
x1 = keras.Input((3,))
x2 = keras.Input((3,))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
def test_mixing_eager_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = array_ops.ones((3, 3))
self.assertIsInstance(x2, ops.EagerTensor)
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
def test_mixing_numpy_arrays_and_graph_tensors(self):
with ops.Graph().as_default():
x1 = array_ops.ones((3, 3))
x2 = np.ones((3, 3), dtype='float32')
with self.assertRaisesRegexp(TypeError, 'Graph tensors'):
math_ops.matmul(x1, x2)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_eager_tensors(self):
x1 = keras.Input((3,))
x2 = array_ops.ones((3, 3))
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_mixing_keras_symbolic_tensors_and_numpy_arrays(self):
x1 = keras.Input((3,))
x2 = np.ones((3, 3), dtype='float32')
y = math_ops.matmul(x1, x2)
self.assertEqual(y.graph, keras.backend.get_graph())
fn = keras.backend.function(inputs=[x1], outputs=[y])
x_val = np.random.random((3, 3))
y_val = np.ones((3, 3))
self.assertAllClose(fn([x_val])[0],
np.matmul(x_val, y_val),
atol=1e-5)
@test_util.run_in_graph_and_eager_modes
def test_reraising_exception(self):
# When layer is not dynamic, we have some pattern matching during exception
# handling to detect when the user is trying to use python control flow.
# When an exception is thrown but the pattern doesn't match, we want to
# preserve the originating stack trace. An early implementation of this
# logic lost the stack trace. We test the correct behavior here.
class TypeErrorLayer(base_layer.Layer):
def call(self, inputs):
def easily_identifiable_name():
raise TypeError('Non-matching TypeError message.')
easily_identifiable_name()
inputs = keras.Input((3,))
try:
_ = TypeErrorLayer()(inputs)
except TypeError as e:
if hasattr(e, 'ag_error_metadata'):
self.assertIn('easily_identifiable_name', str(e))
# See ErrorMetadataBase in autograph/pyct/errors.py
function_name = e.ag_error_metadata.translated_stack[-1].function_name
else:
tb = traceback.extract_tb(sys.exc_info()[2])
last_entry = tb[-1]
function_name = last_entry[2]
self.assertEqual(function_name, 'easily_identifiable_name')
@test_util.run_in_graph_and_eager_modes
def test_summaries_in_tf_function(self):
if not context.executing_eagerly():
return
class MyLayer(keras.layers.Layer):
def call(self, inputs):
summary_ops_v2.scalar('mean', math_ops.reduce_mean(inputs))
return inputs
tmp_dir = self.get_temp_dir()
writer = summary_ops_v2.create_file_writer_v2(tmp_dir)
with writer.as_default(), summary_ops_v2.always_record_summaries():
my_layer = MyLayer()
x = array_ops.ones((10, 10))
def my_fn(x):
return my_layer(x)
_ = my_fn(x)
event_file = gfile.Glob(os.path.join(tmp_dir, 'events*'))
self.assertLen(event_file, 1)
event_file = event_file[0]
tags = set()
for e in summary_iterator.summary_iterator(event_file):
for val in e.summary.value:
tags.add(val.tag)
self.assertEqual(set(['my_layer/mean']), tags)
@test_util.run_all_in_graph_and_eager_modes
class NestedTrackingTest(test.TestCase):
def test_nested_layer_variable_tracking(self):
# Test that variables from nested sublayers are
# being tracked by subclassed layers.
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer, self).__init__()
self.dense1 = keras.layers.Dense(1)
self.dense2 = keras.layers.BatchNormalization()
def build(self, input_shape):
self.v1 = self.add_weight('v1', shape=input_shape[1:].as_list())
self.v2 = variables.Variable(
name='v2',
initial_value=np.zeros(input_shape[1:].as_list(), dtype='float32'),
trainable=False)
def call(self, inputs):
x = self.dense1(inputs) + self.dense2(inputs)
return x + self.v1 + self.v2
layer = MyLayer()
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 5)
self.assertEqual(len(layer.non_trainable_weights), 3)
layer.dense1.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 3)
self.assertEqual(len(layer.non_trainable_weights), 5)
layer.trainable = False
self.assertEqual(len(layer.weights), 8)
self.assertEqual(len(layer.trainable_weights), 0)
self.assertEqual(len(layer.non_trainable_weights), 8)
self.assertEqual(
{id(v) for v in [layer.dense1, layer.dense2, layer.v1, layer.v2]},
{id(v) for _, v in layer._checkpoint_dependencies})
def test_nested_layer_updates_losses_tracking(self):
# Test that updates and losses from nested sublayers are
# being tracked by subclassed layers.
class UpdateAndLossLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
return inputs + 1
class MyLayer(keras.layers.Layer):
def build(self, _):
self.v1 = self.add_weight('v1', shape=())
def __init__(self):
super(MyLayer, self).__init__()
self.ul1 = UpdateAndLossLayer()
self.ul2 = UpdateAndLossLayer()
def call(self, inputs):
self.add_loss(math_ops.reduce_sum(inputs))
self.add_update(state_ops.assign_add(self.v1, 1))
x = self.ul1(inputs)
return self.ul2(x)
layer = MyLayer()
if context.executing_eagerly():
inputs = array_ops.ones((3, 1))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertLen(layer.get_losses_for(None), 3)
else:
inputs = keras.Input((1,))
_ = layer(inputs)
self.assertEqual(len(layer.losses), 3)
self.assertEqual(len(layer.updates), 3)
self.assertLen(layer.get_losses_for(None), 3)
def test_attribute_reassignment(self):
l = keras.layers.Layer()
l.a = keras.layers.Layer()
l.a = []
l.a = variables.Variable(1.)
l.a = keras.layers.Layer()
last_assignment = keras.layers.Layer()
l.a = last_assignment
l.b = variables.Variable(1.)
del l.b
l.c = keras.layers.Layer()
del l.c
l.d = last_assignment
del l.d
self.assertEqual([last_assignment], l._layers)
self.assertEqual([], l.trainable_weights)
self.assertEqual([], l.non_trainable_weights)
self.assertEqual([], l.weights)
del l.a
self.assertEqual([], l._layers)
def test_assign_op_not_tracked_as_variable(self):
class LayerWithAssignAttr(keras.layers.Layer):
def build(self, input_shape):
self.v = variables.Variable(1.)
self.v_assign = self.v.assign_add(2.)
layer = LayerWithAssignAttr()
layer.build((10, 10))
self.assertEqual([layer.v], layer.variables)
def test_layer_class_not_tracked_as_sublayer(self):
# See https://github.com/tensorflow/tensorflow/issues/27431 for details.
class LayerWithClassAttribute(keras.layers.Layer):
def __init__(self):
super(LayerWithClassAttribute, self).__init__()
self.layer_fn = keras.layers.Dense
layer = LayerWithClassAttribute()
self.assertEmpty(layer.variables)
self.assertEmpty(layer.submodules)
def test_layer_call_fn_args(self):
class NonDefunLayer(keras.layers.Layer):
def call(self, inputs, a, mask, b=None, training=None):
return inputs
class DefunLayer(keras.layers.Layer):
@def_function.function
def call(self, x, mask, a, training=None, b=None):
return x
nondefun_layer = NonDefunLayer()
self.assertEqual(nondefun_layer._call_fn_args,
['inputs', 'a', 'mask', 'b', 'training'])
defun_layer = DefunLayer()
self.assertEqual(defun_layer._call_fn_args,
['x', 'mask', 'a', 'training', 'b'])
def test_sequential_model(self):
model = keras.Sequential([keras.layers.Dense(10, input_shape=(10,)),
keras.layers.Dense(5)])
self.assertLen(model.layers, 2)
self.assertLen(model.weights, 4)
# Make sure a subclass model also works when it is called 'Sequential'.
class Sequential(keras.Model):
def __init__(self):
super(Sequential, self).__init__()
self.dense_layers = [keras.layers.Dense(10),
keras.layers.Dense(5)]
def call(self, inputs):
x = inputs
for d in self.dense_layers:
x = d(x)
return x
s = Sequential()
self.assertLen(s.layers, 2)
self.assertLen(s.weights, 0)
s(keras.Input((10,)))
self.assertLen(s.weights, 4)
@test_util.run_all_in_graph_and_eager_modes
class NameScopingTest(keras_parameterized.TestCase):
def test_name_scope_layer(self):
x = keras.backend.placeholder(shape=(10, 10))
layer = keras.layers.Dense(10, name='MyName')
layer(x)
self.assertEqual(layer.bias.name, 'MyName/bias:0')
self.assertEqual(layer.kernel.name, 'MyName/kernel:0')
def test_name_scope_sublayer(self):
class NameScopeTracker(keras.layers.Layer):
def call(self, inputs):
self.active_name_scope = ops.get_name_scope()
return inputs
x = keras.backend.placeholder(shape=(10, 10))
sublayer = NameScopeTracker(name='Sublayer')
layer = keras.layers.Dense(10, activation=sublayer, name='MyName2')
layer(x)
self.assertEqual(layer.bias.name, 'MyName2/bias:0')
self.assertEqual(layer.kernel.name, 'MyName2/kernel:0')
self.assertEqual(sublayer.active_name_scope, 'MyName2/Sublayer')
def test_name_scope_tf_tensor(self):
x = ops.convert_to_tensor_v2(np.ones((10, 10)))
layer = keras.layers.Dense(
10, activation=keras.layers.ReLU(name='MyAct'), name='MyName3')
layer(x)
self.assertEqual(layer.bias.name, 'MyName3/bias:0')
self.assertEqual(layer.kernel.name, 'MyName3/kernel:0')
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
class AutographControlFlowTest(keras_parameterized.TestCase):
def test_disabling_in_context_is_matched(self):
test_obj = self
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
with test_obj.assertRaisesRegex(TypeError, 'Tensor.*as.*bool'):
if constant_op.constant(False):
return inputs * 1.
return inputs * 0.
@def_function.function(autograph=False)
def test_fn():
return MyLayer()(constant_op.constant([[1., 2., 3.]]))
test_fn()
def test_if_training_pattern_output(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
return inputs * 1.
return inputs * 0.
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 0.)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 1.)
def test_if_training_pattern_loss(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
loss = math_ops.reduce_sum(inputs)
else:
loss = 0.
self.add_loss(loss)
return inputs
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
train_loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(train_loss, 2 * 3)
test_loss = model.test_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(test_loss, 0)
def test_if_training_pattern_metric(self):
class MyLayer(keras.layers.Layer):
def call(self, inputs, training=None):
if training:
metric = math_ops.reduce_sum(inputs)
else:
metric = 0.
self.add_metric(metric, name='my_metric', aggregation='mean')
return inputs
inputs = keras.Input((3,))
outputs = MyLayer()(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
for _ in range(3):
_, train_metric = model.train_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(train_metric, 2 * 3)
_, test_metric = model.test_on_batch(np.ones((2, 3)),
np.ones((2, 3)))
self.assertEqual(test_metric, 0)
def test_if_training_pattern_update(self):
class MyLayer(keras.layers.Layer):
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
increment = 1.
else:
increment = 0.
self.counter.assign_add(increment)
return inputs
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 1.)
def test_conditional_updates_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def build(self, input_shape):
self.counter = self.add_weight(
shape=(), trainable=False, initializer='zeros')
def call(self, inputs, training=None):
if training:
z = math_ops.reduce_sum(inputs)
self.add_update(lambda: self.counter.assign_add(z))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(keras.backend.get_value(layer.counter), 6.)
else:
# TODO(fchollet): support the same workflow in graph mode.
with self.assertRaisesRegexp(RuntimeError,
'`add_update` in a control flow branch'):
layer = MyLayer()
layer(keras.Input((3,)))
_ = layer.updates
def test_conditional_losses_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_loss(math_ops.reduce_sum(inputs))
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
loss = model.train_on_batch(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(loss, 2 * 3)
else:
with self.assertRaisesRegexp(RuntimeError,
'`add_loss` in a control flow branch'):
layer = MyLayer()(keras.Input((3,)))
def test_conditional_callable_losses(self):
model = keras.Sequential([
keras.layers.Dense(
1, kernel_regularizer=keras.regularizers.l2(1e-4), input_shape=(1,))
])
model._run_eagerly = testing_utils.should_run_eagerly()
def assert_graph(t):
if not context.executing_eagerly():
self.assertEqual(t.graph, ops.get_default_graph())
@def_function.function
def get_losses(t):
if t < 0:
return math_ops.reduce_sum(model.losses) * t
else:
return math_ops.reduce_sum(model.losses)
assert_graph(get_losses(constant_op.constant(2.)))
assert_graph(get_losses(constant_op.constant(0.5)))
def test_conditional_metrics_in_call(self):
class MyLayer(keras.layers.Layer):
def __init__(self):
super(MyLayer,
self).__init__(dynamic=testing_utils.should_run_eagerly())
def call(self, inputs, training=None):
if training:
self.add_metric(math_ops.reduce_sum(inputs),
name='sum',
aggregation='mean')
return inputs
def compute_output_shape(self, input_shape):
return input_shape
if testing_utils.should_run_eagerly():
inputs = keras.Input((3,))
layer = MyLayer()
outputs = layer(inputs)
model = keras.Model(inputs, outputs)
model.compile(
'sgd',
'mse',
run_eagerly=testing_utils.should_run_eagerly())
history = model.fit(np.ones((2, 3)), np.ones((2, 3)))
self.assertEqual(history.history['sum'][-1], 2 * 3)
else:
# TODO(fchollet): support the same workflow in graph mode.
with self.assertRaisesRegexp(RuntimeError,
'`add_metric` in a control flow branch'):
layer = MyLayer()(keras.Input((3,)))
def test_conditional_activity_regularizer_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = keras.layers.Dense(2, activity_regularizer='l2')
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 1))
y = np.ones(shape=(10, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(
RuntimeError, '`activity_regularizer` in a control flow branch'):
model.fit(x, y, epochs=2, batch_size=5)
def test_conditional_activity_regularizer_with_wrappers_in_call(self):
class TestModel(keras.Model):
def __init__(self):
super(TestModel, self).__init__(
name='test_model', dynamic=testing_utils.should_run_eagerly())
self.layer = keras.layers.TimeDistributed(
keras.layers.Dense(2, activity_regularizer='l2'),
input_shape=(3, 4))
def call(self, x, training=None):
if math_ops.greater(math_ops.reduce_sum(x), 0.0):
return self.layer(x)
else:
return self.layer(x)
model = TestModel()
model.compile(
loss='mse',
optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
x = np.ones(shape=(10, 3, 4))
y = np.ones(shape=(10, 3, 2))
if testing_utils.should_run_eagerly():
model.fit(x, y, epochs=2, batch_size=5)
else:
with self.assertRaisesRegexp(
RuntimeError, '`activity_regularizer` in a control flow branch'):
model.fit(x, y, epochs=2, batch_size=5)
class AddLayer(keras.layers.Layer):
"""A layer which adds its input to a variable.
Useful for testing a layer with a variable
"""
def build(self, _):
self.v = self.add_weight('v', (), initializer='ones')
self.built = True
def call(self, inputs):
return inputs + self.v
class IdentityLayer(keras.layers.Layer):
"""A layer that returns its input.
Useful for testing a layer without a variable.
"""
def call(self, inputs):
return inputs
@test_util.run_all_in_graph_and_eager_modes
class DTypeTest(keras_parameterized.TestCase):
# This class only have tests relating to layer.dtype. Tests for dtype policies
# are in mixed_precision/experimental/keras_test.py
# TODO(reedwm): Maybe have a separate test file for input casting tests.
def _const(self, dtype):
return array_ops.constant(1, dtype=dtype)
@testing_utils.enable_v2_dtype_behavior
def test_dtype_defaults_to_floatx(self):
layer = AddLayer()
self.assertEqual(layer.dtype, 'float32')
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float32') # dtype should not change
try:
backend.set_floatx('float64')
layer = AddLayer()
self.assertEqual(layer.dtype, 'float64')
finally:
backend.set_floatx('float32')
@testing_utils.enable_v2_dtype_behavior
def test_passing_dtype_to_constructor(self):
layer = IdentityLayer(dtype='float64')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
layer = IdentityLayer(dtype='int32')
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'int32')
layer = IdentityLayer(dtype=dtypes.float64)
layer(self._const('float32'))
self.assertEqual(layer.dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def input_cast_to_dtype(self):
layer = AddLayer()
# Input should be cast to layer.dtype, so output should also be layer.dtype
self.assertEqual(layer(self._const('float64')).dtype, 'float32')
layer = AddLayer(dtype='float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float64')
# Test inputs are not casted if layer.dtype is not floating-point
layer = IdentityLayer(dtype='int32')
self.assertEqual(layer(self._const('float64')).dtype, 'float64')
# Test inputs are not casted if the inputs are not floating-point
layer = IdentityLayer(dtype='float32')
self.assertEqual(layer(self._const('int32')).dtype, 'int32')
# Test Numpy arrays are casted
layer = IdentityLayer(dtype='float64')
self.assertEqual(layer(np.array(1, dtype='float32')).dtype, 'float64')
# Test Python floats are casted
layer = IdentityLayer(dtype='float64')
self.assertEqual(layer(1.).dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def multiple_inputs_cast_to_dtype(self):
class MultiIdentityLayer(keras.layers.Layer):
def call(self, inputs):
return [array_ops.identity(x) for x in inputs]
# Testing layer with default dtype of float32
layer = MultiIdentityLayer()
x, y = layer([self._const('float16'), self._const('float32')])
self.assertEqual(x.dtype, 'float32')
self.assertEqual(y.dtype, 'float32')
# Test passing dtype to the constructor
layer = MultiIdentityLayer(dtype='float64')
x, y = layer([self._const('float16'), self._const('float32')])
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float64')
# Test several non-floating point types
layer = MultiIdentityLayer(dtype='float64')
x, y, z, w = layer([self._const('float16'), self._const('bool'),
self._const('float64'), self._constant('complex64')])
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'bool')
self.assertEqual(z.dtype, 'float64')
self.assertEqual(w.dtype, 'complex64')
@testing_utils.enable_v2_dtype_behavior
def test_extra_args_and_kwargs_not_casted(self):
class IdentityLayerWithArgs(keras.layers.Layer):
def call(self, inputs, *args, **kwargs):
return nest.flatten([inputs, args, kwargs])
layer = IdentityLayerWithArgs(dtype='float64')
x, y, z = layer(self._const('float16'), self._const('float16'),
kwarg=self._const('float16'))
self.assertEqual(x.dtype, 'float64')
self.assertEqual(y.dtype, 'float16')
self.assertEqual(z.dtype, 'float16')
@testing_utils.enable_v2_dtype_behavior
def test_layer_without_autocast(self):
class IdentityLayerWithoutAutocast(IdentityLayer):
def __init__(self, *args, **kwargs):
kwargs['autocast'] = False
super(IdentityLayerWithoutAutocast, self).__init__(*args, **kwargs)
layer = IdentityLayerWithoutAutocast(dtype='float64')
self.assertEqual(layer(self._const('float32')).dtype, 'float32')
@testing_utils.enable_v2_dtype_behavior
def test_dtype_warnings(self):
# Test a layer warns when it casts inputs.
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
self.assertRegexpMatches(
str(mock_warn.call_args),
".*from dtype float64 to the layer's dtype of float32.*"
"The layer has dtype float32 because.*")
# Test a layer does not warn a second time
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
# Test a new layer can warn even if a different layer already warned
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
self.assertRegexpMatches(
str(mock_warn.call_args),
".*from dtype float64 to the layer's dtype of float32.*"
"The layer has dtype float32 because.*")
# Test a layer does not warn if a dtype is passed
layer = IdentityLayer(dtype='float32')
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
# Test a layer does not warn if a Policy is set:
with policy.policy_scope('float32'):
layer = IdentityLayer()
with test.mock.patch.object(tf_logging, 'warn') as mock_warn:
layer(self._const('float64'))
mock_warn.assert_not_called()
@testing_utils.enable_v2_dtype_behavior
def test_compute_output_signature(self):
class IdentityLayerWithOutputShape(IdentityLayer):
def compute_output_shape(self, input_shape):
return input_shape
layer = IdentityLayerWithOutputShape(dtype='float64')
output_signature = layer.compute_output_signature(
tensor_spec.TensorSpec(shape=(), dtype='float32'))
self.assertEqual(output_signature.shape, ())
self.assertEqual(output_signature.dtype, 'float64')
@testing_utils.enable_v2_dtype_behavior
def test_composite_tensors_input_casting(self):
sparse = sparse_tensor.SparseTensor(
indices=array_ops.constant([[0, 1], [2, 3]], dtype='int64'),
values=array_ops.constant([0., 1.], dtype='float32'),
dense_shape=array_ops.constant([4, 4], dtype='int64'))
ragged = ragged_tensor.RaggedTensor.from_row_splits(
values=array_ops.constant([1., 2., 3.], dtype='float32'),
row_splits=array_ops.constant([0, 2, 2, 3], dtype='int64'))
layer = IdentityLayer(dtype='float16')
layer._supports_ragged_inputs = True
for x in sparse, ragged:
self.assertEqual(x.dtype, 'float32')
y = layer(x)
self.assertEqual(y.dtype, 'float16')
self.assertEqual(type(x), type(y))
def test_supports_ragged_inputs_attribute_error(self):
with self.assertRaisesRegexp(ValueError,
'does not support RaggedTensors'):
ragged = ragged_tensor.RaggedTensor.from_row_splits(
values=array_ops.constant([1., 2., 3.], dtype='float32'),
row_splits=array_ops.constant([0, 2, 2, 3], dtype='int64'))
model = keras.Sequential([
keras.layers.InputLayer(input_shape=(None,), ragged=True),
IdentityLayer()])
model.compile(rmsprop.RMSprop(0.001), loss='mse')
model.train_on_batch(ragged)
@testing_utils.enable_v2_dtype_behavior
def test_passing_non_tensor(self):
layer = IdentityLayer()
x = object()
y = layer(x) # Layer should not cast 'x', as it's not a tensor
self.assertIs(x, y)
@testing_utils.disable_v2_dtype_behavior
def test_v1_behavior(self):
# Test dtype defaults to None and inferred from input
layer = IdentityLayer()
self.assertIsNone(layer.dtype)
layer(self._const('float64'))
self.assertEqual(layer.dtype, 'float64')
# Test layer does not cast to dtype
self.assertEqual(layer(self._const('float32')).dtype, 'float32')
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
ZdenekM/ar-table-itable | art_projected_gui/tests/test_ui_core.py | 6 | 4485 | #!/usr/bin/env python
import unittest
import rostest
import sys
from art_projected_gui.gui import UICore
from art_projected_gui.items import ObjectItem, PlaceItem
from PyQt4.QtGui import QApplication
from art_msgs.msg import ObjectType
import rospy
from shape_msgs.msg import SolidPrimitive
from geometry_msgs.msg import PoseStamped
app = QApplication(sys.argv)
class TestUICore(unittest.TestCase):
def setUp(self):
self.ui_core = UICore(0, 0, 2, 1, 1000)
self.type1 = ObjectType()
self.type1.name = "type1"
self.type1.bbox.type = SolidPrimitive.BOX
self.type1.bbox.dimensions = [0.1, 0.1, 0.1]
self.type2 = ObjectType()
self.type2.name = "type2"
self.type2.bbox.type = SolidPrimitive.BOX
self.type2.bbox.dimensions = [0.1, 0.1, 0.1]
self.ps = PoseStamped()
self.ps.pose.orientation.w = 1.0
def test_select_object_type(self):
self.ui_core.add_object("id1", self.type1, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.ui_core.add_object("id2", self.type2, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.ui_core.add_object("id3", self.type2, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.ui_core.select_object_type("type1")
self.assertEquals(self.ui_core.get_object("id1").selected, True, "test_select_object_type")
self.assertEquals(self.ui_core.get_object("id2").selected, False, "test_select_object_type")
self.assertEquals(self.ui_core.get_object("id3").selected, False, "test_select_object_type")
self.ui_core.select_object_type("type2")
self.assertEquals(self.ui_core.get_object("id1").selected, False, "test_select_object_type")
self.assertEquals(self.ui_core.get_object("id2").selected, True, "test_select_object_type")
self.assertEquals(self.ui_core.get_object("id3").selected, True, "test_select_object_type")
self.ui_core.select_object_type("type1", unselect_others=False)
self.assertEquals(self.ui_core.get_object("id1").selected, True, "test_select_object_type")
self.assertEquals(self.ui_core.get_object("id2").selected, True, "test_select_object_type")
self.assertEquals(self.ui_core.get_object("id3").selected, True, "test_select_object_type")
def test_get_object(self):
self.ui_core.add_object("id1", self.type1, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.ui_core.add_object("id2", self.type1, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.assertIsNotNone(self.ui_core.get_object("id1"), "test_get_object")
self.assertEquals(self.ui_core.get_object("id1").object_id, "id1", "test_get_object")
self.assertIsNone(self.ui_core.get_object("non_existent_id"), "test_get_object")
def test_remove_object(self):
self.ui_core.add_object("id1", self.type1, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.ui_core.add_object("id2", self.type1, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.assertEquals(len(list(self.ui_core.get_scene_items_by_type(ObjectItem))), 2, "test_remove_object")
self.assertEquals(self.ui_core.remove_object("id1"), True, "test_remove_object")
self.assertEquals(len(list(self.ui_core.get_scene_items_by_type(ObjectItem))), 1, "test_remove_object")
self.assertEquals(self.ui_core.remove_object("id2"), True, "test_remove_object")
self.assertEquals(len(list(self.ui_core.get_scene_items_by_type(ObjectItem))), 0, "test_remove_object")
self.assertEquals(self.ui_core.remove_object("id1"), False, "test_remove_object")
def test_get_by_type(self):
self.ui_core.add_object("id1", self.type1, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.ui_core.add_place("caption", self.ps, self.type1)
self.assertEquals(len(list(self.ui_core.get_scene_items_by_type(ObjectItem))), 1, "test_get_by_type")
self.assertEquals(len(list(self.ui_core.get_scene_items_by_type(PlaceItem))), 1, "test_get_by_type")
def test_clear_places(self):
self.ui_core.add_object("id1", self.type1, 0.5, 0.5, 0.0, [0, 0, 0, 1])
self.ui_core.add_place("caption", self.ps, self.type1)
self.ui_core.clear_places()
self.assertEquals(len(list(self.ui_core.get_scene_items_by_type(ObjectItem))), 1, "test_clear_places")
self.assertEquals(len(list(self.ui_core.get_scene_items_by_type(PlaceItem))), 0, "test_clear_places")
if __name__ == '__main__':
rospy.init_node('test_node')
rostest.run('art_projected_gui', 'test_ui_core', TestUICore, sys.argv)
| lgpl-2.1 |
MobinRanjbar/hue | desktop/core/ext-py/requests-2.6.0/requests/packages/chardet/eucjpprober.py | 2919 | 3678 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
import sys
from . import constants
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCJPDistributionAnalysis
from .jpcntx import EUCJPContextAnalysis
from .mbcssm import EUCJPSMModel
class EUCJPProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCJPSMModel)
self._mDistributionAnalyzer = EUCJPDistributionAnalysis()
self._mContextAnalyzer = EUCJPContextAnalysis()
self.reset()
def reset(self):
MultiByteCharSetProber.reset(self)
self._mContextAnalyzer.reset()
def get_charset_name(self):
return "EUC-JP"
def feed(self, aBuf):
aLen = len(aBuf)
for i in range(0, aLen):
# PY3K: aBuf is a byte array, so aBuf[i] is an int, not a byte
codingState = self._mCodingSM.next_state(aBuf[i])
if codingState == constants.eError:
if constants._debug:
sys.stderr.write(self.get_charset_name()
+ ' prober hit error at byte ' + str(i)
+ '\n')
self._mState = constants.eNotMe
break
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
break
elif codingState == constants.eStart:
charLen = self._mCodingSM.get_current_charlen()
if i == 0:
self._mLastChar[1] = aBuf[0]
self._mContextAnalyzer.feed(self._mLastChar, charLen)
self._mDistributionAnalyzer.feed(self._mLastChar, charLen)
else:
self._mContextAnalyzer.feed(aBuf[i - 1:i + 1], charLen)
self._mDistributionAnalyzer.feed(aBuf[i - 1:i + 1],
charLen)
self._mLastChar[0] = aBuf[aLen - 1]
if self.get_state() == constants.eDetecting:
if (self._mContextAnalyzer.got_enough_data() and
(self.get_confidence() > constants.SHORTCUT_THRESHOLD)):
self._mState = constants.eFoundIt
return self.get_state()
def get_confidence(self):
contxtCf = self._mContextAnalyzer.get_confidence()
distribCf = self._mDistributionAnalyzer.get_confidence()
return max(contxtCf, distribCf)
| apache-2.0 |
MathieuR/ardupilot | Tools/LogAnalyzer/tests/TestVibration.py | 261 | 3069 | from LogAnalyzer import Test,TestResult
import DataflashLog
import numpy
class TestVibration(Test):
'''test for accelerometer vibration (accX/accY/accZ) within recommendations'''
def __init__(self):
Test.__init__(self)
self.name = "Vibration"
def run(self, logdata, verbose):
self.result = TestResult()
if logdata.vehicleType != "ArduCopter":
self.result.status = TestResult.StatusType.NA
return
# constants
gravity = -9.81
aimRangeWarnXY = 1.5
aimRangeFailXY = 3.0
aimRangeWarnZ = 2.0 # gravity +/- aim range
aimRangeFailZ = 5.0 # gravity +/- aim range
if not "IMU" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No IMU log data"
return
# find some stable LOITER data to analyze, at least 10 seconds
chunks = DataflashLog.DataflashLogHelper.findLoiterChunks(logdata, minLengthSeconds=10, noRCInputs=True)
if not chunks:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No stable LOITER log data found"
return
# for now we'll just use the first (largest) chunk of LOITER data
# TODO: ignore the first couple of secs to avoid bad data during transition - or can we check more analytically that we're stable?
# TODO: accumulate all LOITER chunks over min size, or just use the largest one?
startLine = chunks[0][0]
endLine = chunks[0][1]
#print "TestVibration using LOITER chunk from lines %s to %s" % (`startLine`, `endLine`)
def getStdDevIMU(logdata, channelName, startLine,endLine):
loiterData = logdata.channels["IMU"][channelName].getSegment(startLine,endLine)
numpyData = numpy.array(loiterData.dictData.values())
return numpy.std(numpyData)
# use 2x standard deviations as the metric, so if 95% of samples lie within the aim range we're good
stdDevX = abs(2 * getStdDevIMU(logdata,"AccX",startLine,endLine))
stdDevY = abs(2 * getStdDevIMU(logdata,"AccY",startLine,endLine))
stdDevZ = abs(2 * getStdDevIMU(logdata,"AccZ",startLine,endLine))
if (stdDevX > aimRangeFailXY) or (stdDevY > aimRangeFailXY) or (stdDevZ > aimRangeFailZ):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Vibration too high (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
elif (stdDevX > aimRangeWarnXY) or (stdDevY > aimRangeWarnXY) or (stdDevZ > aimRangeWarnZ):
self.result.status = TestResult.StatusType.WARN
self.result.statusMessage = "Vibration slightly high (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
else:
self.result.status = TestResult.StatusType.GOOD
self.result.statusMessage = "Good vibration values (X:%.2fg, Y:%.2fg, Z:%.2fg)" % (stdDevX,stdDevY,stdDevZ)
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.