repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
mrtenda/CoilSnake | coilsnake/modules/eb/TitleScreenModule.py | 2 | 26582 | import logging
from coilsnake.model.common.blocks import Block
from coilsnake.model.eb.blocks import EbCompressibleBlock
from coilsnake.model.eb.graphics import EbGraphicTileset, EbTileArrangement
from coilsnake.model.eb.palettes import EbPalette
from coilsnake.model.eb.title_screen import TitleScreenLayoutEntry, \
CHARS_NUM_TILES
from coilsnake.modules.eb.EbModule import EbModule
from coilsnake.util.common.image import open_indexed_image
from coilsnake.util.common.yml import yml_dump, yml_load
from coilsnake.util.eb.pointer import from_snes_address, read_asm_pointer, \
write_asm_pointer, to_snes_address
log = logging.getLogger(__name__)
# Background data pointers
BG_TILESET_POINTER = 0xEBF2
BG_ARRANGEMENT_POINTER = 0xEC1D
BG_ANIM_PALETTE_POINTER = 0xEC9D
BG_PALETTE_POINTER = 0xECC6
BG_PALETTE_POINTER_SECONDARY = 0xED6B
# Kirby debug menu sprite assembly
KIRBY_INDEX_AND_ASM_POINTER = 0x2FFB8
KIRBY_INDEX_AND_ASM_POINTER_SIZE = 3
KIRBY_INDEX_SIZE = 2
KIRBY_INDEX_AND_ASM_SIZE = KIRBY_INDEX_SIZE + 45
# Background data parameters
BG_ARRANGEMENT_WIDTH = 32
BG_ARRANGEMENT_HEIGHT = 32
BG_SUBPALETTE_LENGTH = 256
BG_NUM_ANIM_SUBPALETTES = 20
BG_NUM_TILES = 256
BG_TILESET_BPP = 8
# Characters data pointers
CHARS_TILESET_POINTER = 0xEC49
CHARS_ANIM_PALETTE_POINTER = 0xEC83
CHARS_PALETTE_POINTER = 0x3F492
# Characters data parameters
CHARS_SUBPALETTE_LENGTH = 16
CHARS_NUM_ANIM_SUBPALETTES = 14
CHARS_TILESET_BPP = 4
# Commmon parameters
ANIM_SUBPALETTE_LENGTH = 16
NUM_ANIM_FRAMES = BG_NUM_ANIM_SUBPALETTES + CHARS_NUM_ANIM_SUBPALETTES
NUM_CHARS = 9
NUM_SUBPALETTES = 1
TILE_WIDTH = 8
TILE_HEIGHT = 8
# Special palette slices
CHARS_ANIM_SLICE = slice(0x80, 0x80 + ANIM_SUBPALETTE_LENGTH)
BG_ANIM_SLICE = slice(0x70, 0x70 + ANIM_SUBPALETTE_LENGTH)
# Animation data bank offsets
CHARS_LAYOUT_BANK = 0xA0FE
CHARS_LAYOUT_TABLE = 0x21CF9D
CHARS_LAYOUT_POINTER_OFFSET_DEFAULT = 0x210000
# Project file paths
BG_REFERENCE_PATH = "TitleScreen/Background/Reference"
BG_FRAMES_PATH = "TitleScreen/Background/{:02d}"
BG_INITIAL_FLASH_PATH = "TitleScreen/Background/InitialFlash"
CHARS_FRAMES_PATH = "TitleScreen/Chars/{:02d}"
CHARS_INITIAL_PATH = "TitleScreen/Chars/Initial"
CHARS_POSITIONS_PATH = "TitleScreen/Chars/positions"
class TitleScreenModule(EbModule):
"""Extracts the title screen data from EarthBound.
This module allows for the editing of the background and characters
of the title screen. The slide-in animation for the characters is
controlled through assembly, while the rest of the animation works
by changing between several palettes (one for each new frame of
animation) and keeping the same tileset for each frame.
"""
NAME = "Title Screen"
FREE_RANGES = [
(0x21B211, 0x21C6E4), # Background Tileset
(0x21AF7D, 0x21B210), # Background Arrangement
(0x21CDE1, 0x21CE07), # Background Palette
(0x21AEFD, 0x21AF7C), # Background Animated Palette
(0x21C6E5, 0x21CDE0), # Characters Tileset
(0x21AE7C, 0x21AE82), # Characters Palette
(0x21AE83, 0x21AEFC), # Characters Animated Palette
(0x21CE08, 0x21CF9C) # Animation Data
]
def __init__(self):
super(TitleScreenModule, self).__init__()
# Background data (includes the central "B", the copyright
# notice and the glow around the letters)
self.bg_tileset = EbGraphicTileset(
num_tiles=BG_NUM_TILES, tile_width=TILE_WIDTH,
tile_height=TILE_HEIGHT
)
self.bg_arrangement = EbTileArrangement(
width=BG_ARRANGEMENT_WIDTH, height=BG_ARRANGEMENT_HEIGHT
)
self.bg_anim_palette = EbPalette(
num_subpalettes=BG_NUM_ANIM_SUBPALETTES,
subpalette_length=ANIM_SUBPALETTE_LENGTH
)
self.bg_palette = EbPalette(
num_subpalettes=NUM_SUBPALETTES,
subpalette_length=BG_SUBPALETTE_LENGTH
)
# Characters data (the title screen's animated letters)
self.chars_tileset = EbGraphicTileset(
num_tiles=CHARS_NUM_TILES, tile_width=TILE_WIDTH,
tile_height=TILE_HEIGHT
)
self.chars_anim_palette = EbPalette(
num_subpalettes=CHARS_NUM_ANIM_SUBPALETTES,
subpalette_length=ANIM_SUBPALETTE_LENGTH
)
self.chars_palette = EbPalette(
num_subpalettes=NUM_SUBPALETTES,
subpalette_length=CHARS_SUBPALETTE_LENGTH
)
self.chars_layouts = [[] for _ in range(NUM_CHARS)]
def read_from_rom(self, rom):
self.read_background_data_from_rom(rom)
self.read_chars_data_from_rom(rom)
self.read_chars_layouts_from_rom(rom)
# Add the characters palette to the background data.
self.bg_palette[0, CHARS_ANIM_SLICE] =\
self.chars_anim_palette.get_subpalette(
CHARS_NUM_ANIM_SUBPALETTES - 1
)[0, :]
def read_background_data_from_rom(self, rom):
with EbCompressibleBlock() as block:
# Read the background tileset data
self._decompress_block(rom, block, BG_TILESET_POINTER)
self.bg_tileset.from_block(
block=block, offset=0, bpp=BG_TILESET_BPP
)
# Read the background tile arrangement data
self._decompress_block(rom, block, BG_ARRANGEMENT_POINTER)
self.bg_arrangement.from_block(block=block, offset=0)
# Read the background palette data
# The decompressed data is smaller than the expected value,
# so it is extended with black entries.
self._decompress_block(rom, block, BG_PALETTE_POINTER)
block.from_array(
block.to_array() + [0]*(BG_SUBPALETTE_LENGTH*2 - len(block))
)
self.bg_palette.from_block(block=block, offset=0)
# Read the background animated palette data
# Each subpalette corresponds to an animation frame.
self._decompress_block(rom, block, BG_ANIM_PALETTE_POINTER)
self.bg_anim_palette.from_block(block=block, offset=0)
def read_chars_data_from_rom(self, rom):
with EbCompressibleBlock() as block:
# Read the characters tileset data
self._decompress_block(rom, block, CHARS_TILESET_POINTER)
self.chars_tileset.from_block(
block=block, offset=0, bpp=CHARS_TILESET_BPP
)
# Read the characters palette data
self._decompress_block(rom, block, CHARS_PALETTE_POINTER)
self.chars_palette.from_block(block=block, offset=0)
# Read the characters animated palette data
# Each subpalette corresponds to an animation frame.
self._decompress_block(rom, block, CHARS_ANIM_PALETTE_POINTER)
self.chars_anim_palette.from_block(block=block, offset=0)
def read_chars_layouts_from_rom(self, rom):
lda_instruction = rom[CHARS_LAYOUT_BANK]
chars_layout_pointer_offset = CHARS_LAYOUT_POINTER_OFFSET_DEFAULT
# Check if we are dealing with the modified Rom,
# If we are, we need to recalculate the offset to the
# character layouts
if lda_instruction == 0xA9:
bank = rom[CHARS_LAYOUT_BANK + 1]
chars_layout_pointer_offset = from_snes_address(bank << 16)
self.chars_layouts = [[] for _ in range(NUM_CHARS)]
for char in range(NUM_CHARS):
# Get the location of a character's data
offset = chars_layout_pointer_offset + rom.read_multi(
CHARS_LAYOUT_TABLE + char*2, 2
)
# Read entries until a final entry is encountered
while True:
entry = TitleScreenLayoutEntry()
entry.from_block(rom, offset)
self.chars_layouts[char].append(entry)
offset += 5
if entry.is_final():
break
def write_to_rom(self, rom):
self.write_background_data_to_rom(rom)
self.write_chars_data_to_rom(rom)
self.write_chars_layouts_and_kirby_data_to_rom(rom)
def write_background_data_to_rom(self, rom):
# Write the background tileset data
block_size = self.bg_tileset.block_size(bpp=BG_TILESET_BPP)
with EbCompressibleBlock(block_size) as block:
self.bg_tileset.to_block(block=block, offset=0, bpp=BG_TILESET_BPP)
self._write_compressed_block(rom, block, BG_TILESET_POINTER)
# Write the background tile arrangement data
block_size = self.bg_arrangement.block_size()
with EbCompressibleBlock(block_size) as block:
self.bg_arrangement.to_block(block=block, offset=0)
self._write_compressed_block(rom, block, BG_ARRANGEMENT_POINTER)
# Write the background palette data
# There is an additional pointer to this location, so change that one
# too
block_size = self.bg_palette.block_size()
with EbCompressibleBlock(block_size) as block:
self.bg_palette.to_block(block=block, offset=0)
new_offset = self._write_compressed_block(
rom, block, BG_PALETTE_POINTER
)
write_asm_pointer(
block=rom, offset=BG_PALETTE_POINTER_SECONDARY,
pointer=to_snes_address(new_offset)
)
# Write the background animated palette data
block_size = self.bg_anim_palette.block_size()
with EbCompressibleBlock(block_size) as block:
self.bg_anim_palette.to_block(block=block, offset=0)
self._write_compressed_block(rom, block, BG_ANIM_PALETTE_POINTER)
def write_chars_data_to_rom(self, rom):
# Write the characters tileset data
block_size = self.chars_tileset.block_size(bpp=CHARS_TILESET_BPP)
with EbCompressibleBlock(block_size) as block:
self.chars_tileset.to_block(
block=block, offset=0, bpp=CHARS_TILESET_BPP
)
self._write_compressed_block(rom, block, CHARS_TILESET_POINTER)
# Write the characters palette data
block_size = self.chars_palette.block_size()
with EbCompressibleBlock(block_size) as block:
self.chars_palette.to_block(block=block, offset=0)
self._write_compressed_block(rom, block, CHARS_PALETTE_POINTER)
# Write the characters animation palette data
block_size = self.chars_anim_palette.block_size()
with EbCompressibleBlock(block_size) as block:
self.chars_anim_palette.to_block(block=block, offset=0)
self._write_compressed_block(
rom, block, CHARS_ANIM_PALETTE_POINTER
)
def write_chars_layouts_and_kirby_data_to_rom(self, rom):
block_size = sum(
TitleScreenLayoutEntry.block_size()*len(c)
for c in self.chars_layouts
) + KIRBY_INDEX_AND_ASM_SIZE
# Ensure the new data is located in only one bank
# Spreading it across two banks might make part of it inaccessible.
def can_write_to(begin):
return begin >> 16 == (begin + block_size) >> 16
with Block(block_size) as block:
# Write the character animation data to the ROM
offset = 0
for layout in self.chars_layouts:
for entry in layout:
entry.to_block(block=block, offset=offset)
offset += entry.block_size()
# Move the Kirby debug menu sprite assembly to its new location
kirby_index_and_asm_offset = from_snes_address(
rom[KIRBY_INDEX_AND_ASM_POINTER] |
(rom[KIRBY_INDEX_AND_ASM_POINTER + 1] << 8) |
(rom[KIRBY_INDEX_AND_ASM_POINTER + 2] << 16)
)
block[offset:offset + KIRBY_INDEX_AND_ASM_SIZE] = \
rom[kirby_index_and_asm_offset:kirby_index_and_asm_offset + KIRBY_INDEX_AND_ASM_SIZE]
rom_offset = rom.allocate(
data=block,
size=block_size,
can_write_to=can_write_to
)
new_offset = to_snes_address(rom_offset)
# Write the offsets to the layouts to the ROM
new_bank = new_offset >> 16
new_data_start = new_offset & 0xFFFF
data_offset = new_data_start
for c, layout in enumerate(self.chars_layouts):
rom[CHARS_LAYOUT_TABLE + c*2:CHARS_LAYOUT_TABLE + c*2 + 2] = [
data_offset & 0xFF, data_offset >> 8
]
data_offset += len(layout)*TitleScreenLayoutEntry.block_size()
# Fix pointers for the Kirby sprite assembly
kirby_index_offset = (rom_offset & 0xFF0000) + data_offset
kirby_asm_offset = kirby_index_offset + KIRBY_INDEX_SIZE
rom[KIRBY_INDEX_AND_ASM_POINTER:KIRBY_INDEX_AND_ASM_POINTER + KIRBY_INDEX_AND_ASM_POINTER_SIZE] = \
[kirby_index_offset & 0xFF, (kirby_index_offset & 0xFF00) >> 8, new_bank]
rom[kirby_index_offset:kirby_asm_offset] = \
[kirby_asm_offset & 0xFF, (kirby_asm_offset & 0xFF00) >> 8]
# Change the offset for the character layouts
# The way this normally works is that EarthBound stores the address
# of the bank holding the data (0xE1 by default, hence the 0x210000
# offset); the offsets in the table are then prefixed with that
# address. However, reallocating the data may have changed its
# bank, so we need to manually set it to the new bank address.
# In order to change the offset, we are replacing a LDA instruction
# which addresses a direct page (0xA5) with a LDA instruction
# that treats its operand as the constant to load (0xA9)
# See https://wiki.superfamicom.org/snes/show/65816+Reference#instructions.
rom[CHARS_LAYOUT_BANK:CHARS_LAYOUT_BANK + 2] = [0xA9, new_bank]
def read_from_project(self, resource_open):
self.read_background_data_from_project(resource_open)
self.read_chars_data_from_project(resource_open)
def read_background_data_from_project(self, resource_open):
# Load the background reference image
# The image's arrangement, tileset and palette will be used for the
# animation frames
with resource_open(BG_REFERENCE_PATH, "png") as f:
image = open_indexed_image(f)
self.bg_arrangement.from_image(
image, self.bg_tileset, self.bg_palette
)
# Read the background animated frames
for frame in range(NUM_ANIM_FRAMES):
# Create temporary structures used to check consistency between
# frames
tileset = EbGraphicTileset(BG_NUM_TILES, TILE_WIDTH, TILE_HEIGHT)
arrangement = EbTileArrangement(
BG_ARRANGEMENT_WIDTH, BG_ARRANGEMENT_HEIGHT
)
palette = EbPalette(NUM_SUBPALETTES, BG_SUBPALETTE_LENGTH)
# Read one frame's image data
with resource_open(BG_FRAMES_PATH.format(frame), "png") as f:
image = open_indexed_image(f)
arrangement.from_image(image, tileset, palette)
# Make sure each frame's tileset and arrangement is identical
# The background palette is checked only if it isn't the fake
# palette used for the first few frames
if frame >= CHARS_NUM_ANIM_SUBPALETTES:
# Get the background animated subpalette from the background
# palette
colors = palette[0, BG_ANIM_SLICE]
self.bg_anim_palette.subpalettes[
frame - CHARS_NUM_ANIM_SUBPALETTES
] = colors
palette[0, BG_ANIM_SLICE] = self.bg_palette[
0, BG_ANIM_SLICE
]
if self.bg_palette != palette:
log.warn(
"Palette from background frame {} does not match "
"reference.".format(frame)
)
if self.bg_tileset != tileset:
log.warn(
"Tileset from background frame {} does not match "
"reference.".format(frame)
)
if self.bg_arrangement != arrangement:
log.warn(
"Arrangement from background frame {} does not match "
"reference.".format(frame)
)
def read_chars_data_from_project(self, resource_open):
# Read the characters positions
with resource_open(CHARS_POSITIONS_PATH, "yml", True) as f:
chars_positions = yml_load(f)
# Read the characters animated frames
self.chars_tileset = None
self.chars_anim_palette = EbPalette(
CHARS_NUM_ANIM_SUBPALETTES, ANIM_SUBPALETTE_LENGTH
)
original_tileset = None
for p in range(CHARS_NUM_ANIM_SUBPALETTES):
# Read one of the animation frames
with resource_open(CHARS_FRAMES_PATH.format(p), "png") as f:
# Create temporary structures to hold the data
image = open_indexed_image(f)
arrangement = EbTileArrangement(
image.width // TILE_WIDTH, image.height // TILE_HEIGHT
)
tileset = EbGraphicTileset(
CHARS_NUM_TILES, TILE_WIDTH, TILE_HEIGHT
)
anim_subpalette = EbPalette(
NUM_SUBPALETTES, ANIM_SUBPALETTE_LENGTH
)
arrangement.from_image(image, tileset, anim_subpalette, True)
# Add the characters animation subpalette
for i in range(ANIM_SUBPALETTE_LENGTH):
self.chars_anim_palette[p, i] = anim_subpalette[0, i]
# Add the characters tileset if not already set, otherwise
# ensure that it the current tileset is identical
if not self.chars_tileset:
original_tileset = tileset
self.chars_tileset = EbGraphicTileset(
CHARS_NUM_TILES, TILE_WIDTH, TILE_HEIGHT
)
self.chars_tileset.tiles = [
[[0 for _ in range(TILE_HEIGHT)]
for _ in range(TILE_WIDTH)]
for _ in range(CHARS_NUM_TILES)
]
unused_tiles = set(range(CHARS_NUM_TILES))
# Set the new character layouts
self.chars_layouts = [[] for _ in range(NUM_CHARS)]
for c, data in chars_positions.items():
# Get the data from the YAML file
x = int(data['x'] // TILE_WIDTH)
y = int(data['y'] // TILE_HEIGHT)
width = int(data['width'] // TILE_WIDTH)
height = int(data['height'] // TILE_HEIGHT)
x_offset = data['top_left_offset']['x']
y_offset = data['top_left_offset']['y']
unknown = data['unknown']
# Generate a list of all tiles must be visited
# Where possible, we try to generate a multi tile (4 tiles
# stored as one); otherwise, bordering tiles that are
# visited will all be single tiles.
l = [
(i, j) for i in range(0, width, 2)
for j in range(0, height, 2)
]
if width % 2 == 1:
l.extend([(width-1, j) for j in range(1, height, 2)])
if height % 2 == 1:
l.extend([(i, height-1) for i in range(1, width, 2)])
# Generate the new reduced tileset
for i, j in l:
# Put the tile in the new tileset
o_tile = arrangement[x + i, y + j].tile
n_tile = unused_tiles.pop()
self.chars_tileset.tiles[n_tile] = tileset[o_tile]
entry = TitleScreenLayoutEntry(
i*8 + x_offset, j*8 + y_offset, n_tile, 0, unknown
)
# Create a multi entry if possible to save space
if i < width - 1 and j < height - 1:
entry.set_single(True)
o_tile_r = arrangement[x+i+1, y+j].tile
o_tile_d = arrangement[x+i, y+j+1].tile
o_tile_dr = arrangement[x+i+1, y+j+1].tile
n_tile_r = n_tile + 1
n_tile_d = n_tile + 16
n_tile_dr = n_tile + 17
unused_tiles.difference_update(
(n_tile_r, n_tile_d, n_tile_dr)
)
self.chars_tileset.tiles[n_tile_r] = \
tileset[o_tile_r]
self.chars_tileset.tiles[n_tile_d] = \
tileset[o_tile_d]
self.chars_tileset.tiles[n_tile_dr] = \
tileset[o_tile_dr]
self.chars_layouts[c].append(entry)
self.chars_layouts[c][-1].set_final(True)
elif original_tileset != tileset:
log.warn(
"Tileset from characters frame {} does not match "
"tileset from characters frame 0.".format(p)
)
# Read the initial characters palette
with resource_open(CHARS_INITIAL_PATH, "png") as f:
image = open_indexed_image(f)
arrangement = EbTileArrangement(
image.width // TILE_WIDTH, image.height // TILE_HEIGHT
)
tileset = EbGraphicTileset(
CHARS_NUM_TILES, TILE_WIDTH, TILE_HEIGHT
)
self.chars_palette = EbPalette(
NUM_SUBPALETTES, ANIM_SUBPALETTE_LENGTH
)
arrangement.from_image(image, tileset, self.chars_palette)
def write_to_project(self, resource_open):
self.write_background_data_to_project(resource_open)
self.write_chars_data_to_project(resource_open)
def write_background_data_to_project(self, resource_open):
# Write out the reference background image
# This image is used to get the arrangement, tileset and static palette
# that will be used by all background images.
with resource_open(
BG_REFERENCE_PATH, "png"
) as f:
image = self.bg_arrangement.image(self.bg_tileset, self.bg_palette)
image.save(f)
# Write out the background's animated frames
for frame in range(NUM_ANIM_FRAMES):
palette = EbPalette(NUM_SUBPALETTES, BG_SUBPALETTE_LENGTH)
if frame < CHARS_NUM_ANIM_SUBPALETTES:
palette[0, CHARS_ANIM_SLICE] = \
self.chars_anim_palette.get_subpalette(frame)[0, :]
else:
palette[0, :] = self.bg_palette.get_subpalette(0)[0, :]
palette[0, BG_ANIM_SLICE] = \
self.bg_anim_palette.get_subpalette(
frame - CHARS_NUM_ANIM_SUBPALETTES
)[0, :]
with resource_open(BG_FRAMES_PATH.format(frame), "png") as f:
image = self.bg_arrangement.image(self.bg_tileset, palette)
image.save(f)
def write_chars_data_to_project(self, resource_open):
# Build an arrangement combining every character for convenience
chars_positions = {}
arrangement = EbTileArrangement(3*9, 6)
for c, layout in enumerate(self.chars_layouts):
top_left = {'x': 128, 'y': 128}
for e, entry in enumerate(layout):
tile = entry.tile & (CHARS_NUM_TILES - 1)
top_left['x'] = min(top_left['x'], int(entry.x))
top_left['y'] = min(top_left['y'], int(entry.y))
x = c*3 + (entry.x + 16) // 8
y = (entry.y + 24) // 8
arrangement[x, y].tile = tile
if not entry.is_single():
arrangement[x+1, y].tile = tile + 1
arrangement[x, y+1].tile = tile + 16
arrangement[x+1, y+1].tile = tile + 17
chars_positions[c] = {
'x': c*3*8,
'y': 0,
'width': 3*8,
'height': 6*8,
'top_left_offset': top_left,
'unknown': layout[0].unknown
}
# Write the characters animation frames
for p in range(CHARS_NUM_ANIM_SUBPALETTES):
with resource_open(CHARS_FRAMES_PATH.format(p), "png") as f:
image = arrangement.image(
self.chars_tileset,
self.chars_anim_palette.get_subpalette(p)
)
image.save(f)
# Write out the initial characters palette
with resource_open(CHARS_INITIAL_PATH, "png") as f:
image = arrangement.image(
self.chars_tileset,
self.chars_palette
)
image.save(f)
# Write out the positions of the characters
with resource_open(CHARS_POSITIONS_PATH, "yml", True) as f:
yml_dump(chars_positions, f, False)
def upgrade_project(
self, old_version, new_version, rom, resource_open_r,
resource_open_w, resource_delete):
if old_version < 9:
self.read_from_rom(rom)
self.write_to_project(resource_open_w)
@staticmethod
def _decompress_block(rom, block, pointer):
block.from_compressed_block(
block=rom,
offset=from_snes_address(read_asm_pointer(rom, pointer))
)
@staticmethod
def _write_compressed_block(rom, compressed_block, pointer):
compressed_block.compress()
new_offset = rom.allocate(data=compressed_block)
write_asm_pointer(
block=rom, offset=pointer, pointer=to_snes_address(new_offset)
)
return new_offset
| gpl-3.0 |
divyang4481/photivo | scons-local-2.2.0/SCons/Scanner/Fortran.py | 14 | 14381 | """SCons.Scanner.Fortran
This module implements the dependency scanner for Fortran code.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__revision__ = "src/engine/SCons/Scanner/Fortran.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import re
import SCons.Node
import SCons.Node.FS
import SCons.Scanner
import SCons.Util
import SCons.Warnings
class F90Scanner(SCons.Scanner.Classic):
"""
A Classic Scanner subclass for Fortran source files which takes
into account both USE and INCLUDE statements. This scanner will
work for both F77 and F90 (and beyond) compilers.
Currently, this scanner assumes that the include files do not contain
USE statements. To enable the ability to deal with USE statements
in include files, add logic right after the module names are found
to loop over each include file, search for and locate each USE
statement, and append each module name to the list of dependencies.
Caching the search results in a common dictionary somewhere so that
the same include file is not searched multiple times would be a
smart thing to do.
"""
def __init__(self, name, suffixes, path_variable,
use_regex, incl_regex, def_regex, *args, **kw):
self.cre_use = re.compile(use_regex, re.M)
self.cre_incl = re.compile(incl_regex, re.M)
self.cre_def = re.compile(def_regex, re.M)
def _scan(node, env, path, self=self):
node = node.rfile()
if not node.exists():
return []
return self.scan(node, env, path)
kw['function'] = _scan
kw['path_function'] = SCons.Scanner.FindPathDirs(path_variable)
kw['recursive'] = 1
kw['skeys'] = suffixes
kw['name'] = name
SCons.Scanner.Current.__init__(self, *args, **kw)
def scan(self, node, env, path=()):
# cache the includes list in node so we only scan it once:
if node.includes != None:
mods_and_includes = node.includes
else:
# retrieve all included filenames
includes = self.cre_incl.findall(node.get_text_contents())
# retrieve all USE'd module names
modules = self.cre_use.findall(node.get_text_contents())
# retrieve all defined module names
defmodules = self.cre_def.findall(node.get_text_contents())
# Remove all USE'd module names that are defined in the same file
# (case-insensitively)
d = {}
for m in defmodules:
d[m.lower()] = 1
modules = [m for m in modules if m.lower() not in d]
# Convert module name to a .mod filename
suffix = env.subst('$FORTRANMODSUFFIX')
modules = [x.lower() + suffix for x in modules]
# Remove unique items from the list
mods_and_includes = SCons.Util.unique(includes+modules)
node.includes = mods_and_includes
# This is a hand-coded DSU (decorate-sort-undecorate, or
# Schwartzian transform) pattern. The sort key is the raw name
# of the file as specifed on the USE or INCLUDE line, which lets
# us keep the sort order constant regardless of whether the file
# is actually found in a Repository or locally.
nodes = []
source_dir = node.get_dir()
if callable(path):
path = path()
for dep in mods_and_includes:
n, i = self.find_include(dep, source_dir, path)
if n is None:
SCons.Warnings.warn(SCons.Warnings.DependencyWarning,
"No dependency generated for file: %s (referenced by: %s) -- file not found" % (i, node))
else:
sortkey = self.sort_key(dep)
nodes.append((sortkey, n))
return [pair[1] for pair in sorted(nodes)]
def FortranScan(path_variable="FORTRANPATH"):
"""Return a prototype Scanner instance for scanning source files
for Fortran USE & INCLUDE statements"""
# The USE statement regex matches the following:
#
# USE module_name
# USE :: module_name
# USE, INTRINSIC :: module_name
# USE, NON_INTRINSIC :: module_name
#
# Limitations
#
# -- While the regex can handle multiple USE statements on one line,
# it cannot properly handle them if they are commented out.
# In either of the following cases:
#
# ! USE mod_a ; USE mod_b [entire line is commented out]
# USE mod_a ! ; USE mod_b [in-line comment of second USE statement]
#
# the second module name (mod_b) will be picked up as a dependency
# even though it should be ignored. The only way I can see
# to rectify this would be to modify the scanner to eliminate
# the call to re.findall, read in the contents of the file,
# treating the comment character as an end-of-line character
# in addition to the normal linefeed, loop over each line,
# weeding out the comments, and looking for the USE statements.
# One advantage to this is that the regex passed to the scanner
# would no longer need to match a semicolon.
#
# -- I question whether or not we need to detect dependencies to
# INTRINSIC modules because these are built-in to the compiler.
# If we consider them a dependency, will SCons look for them, not
# find them, and kill the build? Or will we there be standard
# compiler-specific directories we will need to point to so the
# compiler and SCons can locate the proper object and mod files?
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^ : start of line
# (?: : group a collection of regex symbols without saving the match as a "group"
# ^|; : matches either the start of the line or a semicolon - semicolon
# ) : end the unsaved grouping
# \s* : any amount of white space
# USE : match the string USE, case insensitive
# (?: : group a collection of regex symbols without saving the match as a "group"
# \s+| : match one or more whitespace OR .... (the next entire grouped set of regex symbols)
# (?: : group a collection of regex symbols without saving the match as a "group"
# (?: : establish another unsaved grouping of regex symbols
# \s* : any amount of white space
# , : match a comma
# \s* : any amount of white space
# (?:NON_)? : optionally match the prefix NON_, case insensitive
# INTRINSIC : match the string INTRINSIC, case insensitive
# )? : optionally match the ", INTRINSIC/NON_INTRINSIC" grouped expression
# \s* : any amount of white space
# :: : match a double colon that must appear after the INTRINSIC/NON_INTRINSIC attribute
# ) : end the unsaved grouping
# ) : end the unsaved grouping
# \s* : match any amount of white space
# (\w+) : match the module name that is being USE'd
#
#
use_regex = "(?i)(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)"
# The INCLUDE statement regex matches the following:
#
# INCLUDE 'some_Text'
# INCLUDE "some_Text"
# INCLUDE "some_Text" ; INCLUDE "some_Text"
# INCLUDE kind_"some_Text"
# INCLUDE kind_'some_Text"
#
# where some_Text can include any alphanumeric and/or special character
# as defined by the Fortran 2003 standard.
#
# Limitations:
#
# -- The Fortran standard dictates that a " or ' in the INCLUDE'd
# string must be represented as a "" or '', if the quotes that wrap
# the entire string are either a ' or ", respectively. While the
# regular expression below can detect the ' or " characters just fine,
# the scanning logic, presently is unable to detect them and reduce
# them to a single instance. This probably isn't an issue since,
# in practice, ' or " are not generally used in filenames.
#
# -- This regex will not properly deal with multiple INCLUDE statements
# when the entire line has been commented out, ala
#
# ! INCLUDE 'some_file' ; INCLUDE 'some_file'
#
# In such cases, it will properly ignore the first INCLUDE file,
# but will actually still pick up the second. Interestingly enough,
# the regex will properly deal with these cases:
#
# INCLUDE 'some_file'
# INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# To get around the above limitation, the FORTRAN programmer could
# simply comment each INCLUDE statement separately, like this
#
# ! INCLUDE 'some_file' !; INCLUDE 'some_file'
#
# The way I see it, the only way to get around this limitation would
# be to modify the scanning logic to replace the calls to re.findall
# with a custom loop that processes each line separately, throwing
# away fully commented out lines before attempting to match against
# the INCLUDE syntax.
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# (?: : begin a non-saving group that matches the following:
# ^ : either the start of the line
# | : or
# ['">]\s*; : a semicolon that follows a single quote,
# double quote or greater than symbol (with any
# amount of whitespace in between). This will
# allow the regex to match multiple INCLUDE
# statements per line (although it also requires
# the positive lookahead assertion that is
# used below). It will even properly deal with
# (i.e. ignore) cases in which the additional
# INCLUDES are part of an in-line comment, ala
# " INCLUDE 'someFile' ! ; INCLUDE 'someFile2' "
# ) : end of non-saving group
# \s* : any amount of white space
# INCLUDE : match the string INCLUDE, case insensitive
# \s+ : match one or more white space characters
# (?\w+_)? : match the optional "kind-param _" prefix allowed by the standard
# [<"'] : match the include delimiter - an apostrophe, double quote, or less than symbol
# (.+?) : match one or more characters that make up
# the included path and file name and save it
# in a group. The Fortran standard allows for
# any non-control character to be used. The dot
# operator will pick up any character, including
# control codes, but I can't conceive of anyone
# putting control codes in their file names.
# The question mark indicates it is non-greedy so
# that regex will match only up to the next quote,
# double quote, or greater than symbol
# (?=["'>]) : positive lookahead assertion to match the include
# delimiter - an apostrophe, double quote, or
# greater than symbol. This level of complexity
# is required so that the include delimiter is
# not consumed by the match, thus allowing the
# sub-regex discussed above to uniquely match a
# set of semicolon-separated INCLUDE statements
# (as allowed by the F2003 standard)
include_regex = """(?i)(?:^|['">]\s*;)\s*INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])"""
# The MODULE statement regex finds module definitions by matching
# the following:
#
# MODULE module_name
#
# but *not* the following:
#
# MODULE PROCEDURE procedure_name
#
# Here is a breakdown of the regex:
#
# (?i) : regex is case insensitive
# ^\s* : any amount of white space
# MODULE : match the string MODULE, case insensitive
# \s+ : match one or more white space characters
# (?!PROCEDURE) : but *don't* match if the next word matches
# PROCEDURE (negative lookahead assertion),
# case insensitive
# (\w+) : match one or more alphanumeric characters
# that make up the defined module name and
# save it in a group
def_regex = """(?i)^\s*MODULE\s+(?!PROCEDURE)(\w+)"""
scanner = F90Scanner("FortranScan",
"$FORTRANSUFFIXES",
path_variable,
use_regex,
include_regex,
def_regex)
return scanner
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
simone/django-gb | django/utils/log.py | 2 | 5464 | import logging
import sys
import warnings
from django.conf import settings
from django.core import mail
from django.core.mail import get_connection
from django.utils.deprecation import RemovedInNextVersionWarning
from django.utils.module_loading import import_string
from django.views.debug import ExceptionReporter, get_exception_reporter_filter
# Imports kept for backwards-compatibility in Django 1.7.
from logging import NullHandler # NOQA
from logging.config import dictConfig # NOQA
getLogger = logging.getLogger
# Default logging for Django. This sends an email to the site admins on every
# HTTP 500 error. Depending on DEBUG, all other log records are either sent to
# the console (DEBUG=True) or discarded by mean of the NullHandler (DEBUG=False).
DEFAULT_LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'null': {
'class': 'logging.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
}
def configure_logging(logging_config, logging_settings):
if not sys.warnoptions:
# Route warnings through python logging
logging.captureWarnings(True)
# RemovedInNextVersionWarning is a subclass of DeprecationWarning which
# is hidden by default, hence we force the "default" behavior
warnings.simplefilter("default", RemovedInNextVersionWarning)
if logging_config:
# First find the logging configuration function ...
logging_config_func = import_string(logging_config)
logging_config_func(DEFAULT_LOGGING)
# ... then invoke it with the logging settings
if logging_settings:
logging_config_func(logging_settings)
class AdminEmailHandler(logging.Handler):
"""An exception log handler that emails log entries to site admins.
If the request is passed as the first argument to the log record,
request data will be provided in the email report.
"""
def __init__(self, include_html=False, email_backend=None):
logging.Handler.__init__(self)
self.include_html = include_html
self.email_backend = email_backend
def emit(self, record):
try:
request = record.request
subject = '%s (%s IP): %s' % (
record.levelname,
('internal' if request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS
else 'EXTERNAL'),
record.getMessage()
)
filter = get_exception_reporter_filter(request)
request_repr = '\n{0}'.format(filter.get_request_repr(request))
except Exception:
subject = '%s: %s' % (
record.levelname,
record.getMessage()
)
request = None
request_repr = "unavailable"
subject = self.format_subject(subject)
if record.exc_info:
exc_info = record.exc_info
else:
exc_info = (None, record.getMessage(), None)
message = "%s\n\nRequest repr(): %s" % (self.format(record), request_repr)
reporter = ExceptionReporter(request, is_email=True, *exc_info)
html_message = reporter.get_traceback_html() if self.include_html else None
mail.mail_admins(subject, message, fail_silently=True,
html_message=html_message,
connection=self.connection())
def connection(self):
return get_connection(backend=self.email_backend, fail_silently=True)
def format_subject(self, subject):
"""
Escape CR and LF characters, and limit length.
RFC 2822's hard limit is 998 characters per line. So, minus "Subject: "
the actual subject must be no longer than 989 characters.
"""
formatted_subject = subject.replace('\n', '\\n').replace('\r', '\\r')
return formatted_subject[:989]
class CallbackFilter(logging.Filter):
"""
A logging filter that checks the return value of a given callable (which
takes the record-to-be-logged as its only parameter) to decide whether to
log a record.
"""
def __init__(self, callback):
self.callback = callback
def filter(self, record):
if self.callback(record):
return 1
return 0
class RequireDebugFalse(logging.Filter):
def filter(self, record):
return not settings.DEBUG
class RequireDebugTrue(logging.Filter):
def filter(self, record):
return settings.DEBUG
| bsd-3-clause |
CSC591ADBI-TeamProjects/Bitcoin-Price-Prediction | bitcoin.py | 1 | 5370 | import statsmodels.formula.api as smf
import sklearn.metrics as sm
import pandas as pd
import numpy as np
import math
import sys
# The path to the data folder should be given as input
if len(sys.argv) != 2:
print('bitcoin.py <path to data folder>')
sys.exit(1)
data_path = sys.argv[1]
# Reading the vectors from the given csv files
train1_90 = pd.read_csv(data_path+'/train1_90.csv')
train1_180 = pd.read_csv(data_path+'/train1_180.csv')
train1_360 = pd.read_csv(data_path+'/train1_360.csv')
train2_90 = pd.read_csv(data_path+'/train2_90.csv')
train2_180 = pd.read_csv(data_path+'/train2_180.csv')
train2_360 = pd.read_csv(data_path+'/train2_360.csv')
test_90 = pd.read_csv(data_path+'/test_90.csv')
test_180 = pd.read_csv(data_path+'/test_180.csv')
test_360 = pd.read_csv(data_path+'/test_360.csv')
def computeDelta(wt, X, Xi):
"""
This function computes equation 6 of the paper, but with the euclidean distance
replaced by the similarity function given in Equation 9.
Parameters
----------
wt : int
This is the constant c at the top of the right column on page 4.
X : A row of Panda Dataframe
Corresponds to (x, y) in Equation 6.
Xi : Panda Dataframe
Corresponds to a dataframe of (xi, yi) in Equation 6.
Returns
-------
float
The output of equation 6, a prediction of the average price change.
"""
# YOUR CODE GOES HERE
num = 0.0
den = 0.0
for i in xrange(0,len(Xi)):
Yi = Xi.iloc[i][-1]
xi = Xi.iloc[i][0:-1]
s_X_xi = similarity(X[0:-1],xi)
#shouldn't it be "wt" instead of "weight" in following 2 lines?
num += float(Yi*math.exp(wt*s_X_xi))
den += float(math.exp(wt*s_X_xi))
return float(num)/den
def similarity(a,b):
#is similarity not simply:
#sim = ((a-a.mean())*(b-b.mean())).sum()/float(len(a)*a.std()*b.std())
#std_a = std(a)
#std_b = std(b)
#mu_a = float(sum(a))/len(a)
#mu_b = float(sum(b))/len(b)
std_a = np.std(a)
std_b = np.std(b)
mu_a = np.mean(a)
mu_b = np.mean(b)
M = len(b)
sumab = 0
for z in xrange(0, M):
sumab += (a[z] - mu_a) * (b[z] - mu_b)
return float(sumab) / (M*std_a*std_b)
#i think this is for variance, sqrt missing
#def std(a):
# suma = 0
# mu = float(sum(a))/len(a)
# for ai in a:
# suma += (ai - mu)**2
# return float(suma)/len(a)
# Perform the Bayesian Regression to predict the average price change for each dataset of train2 using train1 as input.
# These will be used to estimate the coefficients (w0, w1, w2, and w3) in equation 8.
weight = 2 # This constant was not specified in the paper, but we will use 2.
trainDeltaP90 = np.empty(0)
trainDeltaP180 = np.empty(0)
trainDeltaP360 = np.empty(0)
for i in xrange(0,len(train1_90.index)) :
trainDeltaP90 = np.append(trainDeltaP90, computeDelta(weight,train2_90.iloc[i],train1_90))
for i in xrange(0,len(train1_180.index)) :
trainDeltaP180 = np.append(trainDeltaP180, computeDelta(weight,train2_180.iloc[i],train1_180))
for i in xrange(0,len(train1_360.index)) :
trainDeltaP360 = np.append(trainDeltaP360, computeDelta(weight,train2_360.iloc[i],train1_360))
# Actual deltaP values for the train2 data.
trainDeltaP = np.asarray(train2_360[['Yi']])
trainDeltaP = np.reshape(trainDeltaP, -1)
# Combine all the training data
d = {'deltaP': trainDeltaP,
'deltaP90': trainDeltaP90,
'deltaP180': trainDeltaP180,
'deltaP360': trainDeltaP360 }
trainData = pd.DataFrame(d)
# Feed the data: [deltaP, deltaP90, deltaP180, deltaP360] to train the linear model.
# Use the statsmodels ols function.
# Use the variable name model for your fitted model
# YOUR CODE HERE
model = smf.ols(formula = 'deltaP ~ deltaP90 + deltaP180 + deltaP360', data = trainData).fit()
# Print the weights from the model
print model.params
# Perform the Bayesian Regression to predict the average price change for each dataset of test using train1 as input.
# This should be similar to above where it was computed for train2.
# YOUR CODE HERE
testDeltaP90 = np.empty(0)
testDeltaP180 = np.empty(0)
testDeltaP360 = np.empty(0)
for i in xrange(0,len(train1_90.index)) :
testDeltaP90 = np.append(testDeltaP90, computeDelta(weight,test_90.iloc[i],train1_90))
for i in xrange(0,len(train1_180.index)) :
testDeltaP180 = np.append(testDeltaP180, computeDelta(weight,test_180.iloc[i],train1_180))
for i in xrange(0,len(train1_360.index)) :
testDeltaP360 = np.append(testDeltaP360, computeDelta(weight,test_360.iloc[i],train1_360))
# Actual deltaP values for test data.
# YOUR CODE HERE (use the right variable names so the below code works)
testDeltaP = np.asarray(test_360[['Yi']])
testDeltaP = np.reshape(testDeltaP, -1)
# Combine all the test data
d = {'deltaP': testDeltaP,
'deltaP90': testDeltaP90,
'deltaP180': testDeltaP180,
'deltaP360': testDeltaP360}
testData = pd.DataFrame(d)
# Predict price variation on the test data set.
result = model.predict(testData)
compare = { 'Actual': testDeltaP,
'Predicted': result }
compareDF = pd.DataFrame(compare)
# Compute the MSE and print the result
# HINT: consider using the sm.mean_squared_error function
MSE = 0.0
# YOUR CODE HERE
print "The MSE is %f" % (sm.mean_squared_error(compareDF['Actual'], compareDF['Predicted']))
| mit |
Fusion-Rom/android_external_chromium_org | tools/telemetry/telemetry/value/skip_unittest.py | 29 | 1532 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry import value
from telemetry.page import page_set
from telemetry.value import skip
class TestBase(unittest.TestCase):
def setUp(self):
self.page_set = page_set.PageSet(file_path=os.path.dirname(__file__))
self.page_set.AddPageWithDefaultRunNavigate("http://www.bar.com/")
@property
def pages(self):
return self.page_set.pages
class ValueTest(TestBase):
def testBuildbotAndRepresentativeValue(self):
v = skip.SkipValue(self.pages[0], 'page skipped for testing reason')
self.assertIsNone(v.GetBuildbotValue())
self.assertIsNone(v.GetBuildbotDataType(
value.COMPUTED_PER_PAGE_SUMMARY_OUTPUT_CONTEXT))
self.assertIsNone(v.GetChartAndTraceNameForPerPageResult())
self.assertIsNone(v.GetRepresentativeNumber())
self.assertIsNone(v.GetRepresentativeString())
def testAsDict(self):
v = skip.SkipValue(self.pages[0], 'page skipped for testing reason')
d = v.AsDictWithoutBaseClassEntries()
self.assertEquals(d['reason'], 'page skipped for testing reason')
def testFromDict(self):
d = {
'type': 'skip',
'name': 'skip',
'units': '',
'reason': 'page skipped for testing reason'
}
v = value.Value.FromDict(d, {})
self.assertTrue(isinstance(v, skip.SkipValue))
self.assertEquals(v.reason, 'page skipped for testing reason')
| bsd-3-clause |
jlegendary/youtube-dl | youtube_dl/extractor/eporner.py | 129 | 2307 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
parse_duration,
str_to_int,
)
class EpornerIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?eporner\.com/hd-porn/(?P<id>\d+)/(?P<display_id>[\w-]+)'
_TEST = {
'url': 'http://www.eporner.com/hd-porn/95008/Infamous-Tiffany-Teen-Strip-Tease-Video/',
'md5': '39d486f046212d8e1b911c52ab4691f8',
'info_dict': {
'id': '95008',
'display_id': 'Infamous-Tiffany-Teen-Strip-Tease-Video',
'ext': 'mp4',
'title': 'Infamous Tiffany Teen Strip Tease Video',
'duration': 1838,
'view_count': int,
'age_limit': 18,
}
}
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, display_id)
title = self._html_search_regex(
r'<title>(.*?) - EPORNER', webpage, 'title')
redirect_url = 'http://www.eporner.com/config5/%s' % video_id
player_code = self._download_webpage(
redirect_url, display_id, note='Downloading player config')
sources = self._search_regex(
r'(?s)sources\s*:\s*\[\s*({.+?})\s*\]', player_code, 'sources')
formats = []
for video_url, format_id in re.findall(r'file\s*:\s*"([^"]+)",\s*label\s*:\s*"([^"]+)"', sources):
fmt = {
'url': video_url,
'format_id': format_id,
}
m = re.search(r'^(\d+)', format_id)
if m:
fmt['height'] = int(m.group(1))
formats.append(fmt)
self._sort_formats(formats)
duration = parse_duration(self._html_search_meta('duration', webpage))
view_count = str_to_int(self._search_regex(
r'id="cinemaviews">\s*([0-9,]+)\s*<small>views',
webpage, 'view count', fatal=False))
return {
'id': video_id,
'display_id': display_id,
'title': title,
'duration': duration,
'view_count': view_count,
'formats': formats,
'age_limit': 18,
}
| unlicense |
astropy/astropy | astropy/time/tests/test_custom_formats.py | 8 | 7556 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from datetime import date
from itertools import count
import pytest
import numpy as np
from erfa import DJM0
from astropy.time import Time, TimeFormat
from astropy.time.utils import day_frac
class SpecificException(ValueError):
pass
@pytest.fixture
def custom_format_name():
for i in count():
if not i:
custom = f"custom_format_name"
else:
custom = f"custom_format_name_{i}"
if custom not in Time.FORMATS:
break
yield custom
Time.FORMATS.pop(custom, None)
def test_custom_time_format_set_jds_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_val_type_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
raise SpecificException
try:
Time(7.0, format=custom_format_name)
except ValueError as e:
assert hasattr(e, "__cause__") and isinstance(e.__cause__, SpecificException)
def test_custom_time_format_value_exception(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
raise SpecificException
t = Time.now()
with pytest.raises(SpecificException):
getattr(t, custom_format_name)
def test_custom_time_format_fine(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
return self.jd1 + self.jd2
t = Time.now()
getattr(t, custom_format_name)
t2 = Time(7, 9, format=custom_format_name)
getattr(t2, custom_format_name)
def test_custom_time_format_forgot_property(custom_format_name):
with pytest.raises(ValueError):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
def value(self):
return self.jd1, self.jd2
def test_custom_time_format_problematic_name():
assert "sort" not in Time.FORMATS, "problematic name in default FORMATS!"
assert hasattr(Time, "sort")
try:
class Custom(TimeFormat):
name = "sort"
_dtype = np.dtype([('jd1', 'f8'), ('jd2', 'f8')])
def set_jds(self, val, val2):
self.jd1, self.jd2 = val, val2
@property
def value(self):
result = np.empty(self.jd1.shape, self._dtype)
result['jd1'] = self.jd1
result['jd2'] = self.jd2
return result
t = Time.now()
assert t.sort() == t, "bogus time format clobbers everyone's Time objects"
t.format = "sort"
assert t.value.dtype == Custom._dtype
t2 = Time(7, 9, format="sort")
assert t2.value == np.array((7, 9), Custom._dtype)
finally:
Time.FORMATS.pop("sort", None)
def test_mjd_longdouble_preserves_precision(custom_format_name):
class CustomMJD(TimeFormat):
name = custom_format_name
def _check_val_type(self, val, val2):
val = np.longdouble(val)
if val2 is not None:
raise ValueError("Only one value permitted")
return val, 0
def set_jds(self, val, val2):
mjd1 = np.float64(np.floor(val))
mjd2 = np.float64(val - mjd1)
self.jd1, self.jd2 = day_frac(mjd1 + DJM0, mjd2)
@property
def value(self):
mjd1, mjd2 = day_frac(self.jd1 - DJM0, self.jd2)
return np.longdouble(mjd1) + np.longdouble(mjd2)
m = 58000.0
t = Time(m, format=custom_format_name)
# Pick a different long double (ensuring it will give a different jd2
# even when long doubles are more precise than Time, as on arm64).
m2 = np.longdouble(m) + max(2. * m * np.finfo(np.longdouble).eps,
np.finfo(float).eps)
assert m2 != m, 'long double is weird!'
t2 = Time(m2, format=custom_format_name)
assert t != t2
assert isinstance(getattr(t, custom_format_name), np.longdouble)
assert getattr(t, custom_format_name) != getattr(t2, custom_format_name)
@pytest.mark.parametrize(
"jd1, jd2",
[
("foo", None),
(np.arange(3), np.arange(4)),
("foo", "bar"),
(1j, 2j),
pytest.param(
np.longdouble(3), np.longdouble(5),
marks=pytest.mark.skipif(
np.longdouble().itemsize == np.dtype(float).itemsize,
reason="long double == double on this platform")),
({1: 2}, {3: 4}),
({1, 2}, {3, 4}),
([1, 2], [3, 4]),
(lambda: 4, lambda: 7),
(np.arange(3), np.arange(4)),
],
)
def test_custom_format_cannot_make_bogus_jd1(custom_format_name, jd1, jd2):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = jd1, jd2
@property
def value(self):
return self.jd1 + self.jd2
with pytest.raises((ValueError, TypeError)):
Time(5, format=custom_format_name)
def test_custom_format_scalar_jd1_jd2_okay(custom_format_name):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 7.0, 3.0
@property
def value(self):
return self.jd1 + self.jd2
getattr(Time(5, format=custom_format_name), custom_format_name)
@pytest.mark.parametrize(
"thing",
[
1,
1.0,
np.longdouble(1),
1.0j,
"foo",
b"foo",
Time(5, format="mjd"),
lambda: 7,
np.datetime64('2005-02-25'),
date(2006, 2, 25),
],
)
def test_custom_format_can_return_any_scalar(custom_format_name, thing):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 2., 0.
@property
def value(self):
return np.array(thing)
assert type(getattr(Time(5, format=custom_format_name),
custom_format_name)) == type(thing)
assert np.all(getattr(Time(5, format=custom_format_name),
custom_format_name) == thing)
@pytest.mark.parametrize(
"thing",
[
(1, 2),
[1, 2],
np.array([2, 3]),
np.array([2, 3, 5, 7]),
{6: 7},
{1, 2},
],
)
def test_custom_format_can_return_any_iterable(custom_format_name, thing):
class Custom(TimeFormat):
name = custom_format_name
def set_jds(self, val, val2):
self.jd1, self.jd2 = 2., 0.
@property
def value(self):
return thing
assert type(getattr(Time(5, format=custom_format_name),
custom_format_name)) == type(thing)
assert np.all(getattr(Time(5, format=custom_format_name),
custom_format_name) == thing)
| bsd-3-clause |
joshjh/WTR-CLU | openbook.py | 1 | 2383 | __author__ = 'josh'
""" Opens a excel workbook and indexes the respective rows """
import xlrd
class SP:
def __init__(self, last_name, position):
self.whois = (last_name, position)
def setvalues(self, values):
for attr in values:
setattr(self, attr, values[attr])
class AP:
def __init__(self, NAME, Service_No):
self.whois = (NAME, Service_No)
def setvalues(self, values):
for attr in values:
setattr(self, attr, values[attr])
def openbook(workbook, sheet_type='USR'):
"""
opens the workbook and creates class SP. Returns SP with all attributes for each line in the Excel Sheet
"""
openedbook = xlrd.open_workbook(workbook)
if sheet_type == 'USR':
sheet = openedbook.sheet_by_name('Full USR')
elif sheet_type == 'ALW':
sheet = openedbook.sheet_by_name('Faslane')
elif sheet_type == 'LVE':
sheet = openedbook.sheet_by_name('Absence Details')
header = sheet.row_values(0)
for index in range(len(header)):
if sheet_type == 'ALW':
header[index] = header[index].replace(' ', '_')
header[index] = header[index].replace('(', '_')
header[index] = header[index].replace(')', '_')
header[index] = header[index].replace('__', '_')
# print (header[index])
else:
header[index] = header[index].replace(' ', '_')
unit = []
if sheet_type == 'USR':
for x in range(1, sheet.nrows):
sp_dictionary = dict(zip(header, sheet.row_values(x)))
SP_object = SP(sp_dictionary['Last_Name'], sp_dictionary['Position'])
SP_object.setvalues(sp_dictionary)
unit.append(SP_object)
elif sheet_type == 'ALW':
for x in range(1, sheet.nrows):
al_dictionary = dict(zip(header, sheet.row_values(x)))
AL_object = SP(al_dictionary['NAME'], al_dictionary['Service_No'])
AL_object.setvalues(al_dictionary)
unit.append(AL_object)
elif sheet_type == 'LVE':
for x in range(1, sheet.nrows):
lve_dictionary = dict(zip(header, sheet.row_values(x)))
lve_object = SP(lve_dictionary['Full_Name'], lve_dictionary['Employee_Number'])
lve_object.setvalues(lve_dictionary)
unit.append(lve_object)
return unit
| gpl-2.0 |
takis/django | django/core/cache/backends/locmem.py | 586 | 4287 | "Thread-safe in-memory cache backend."
import time
from contextlib import contextmanager
from django.core.cache.backends.base import DEFAULT_TIMEOUT, BaseCache
from django.utils.synch import RWLock
try:
from django.utils.six.moves import cPickle as pickle
except ImportError:
import pickle
# Global in-memory store of cache data. Keyed by name, to provide
# multiple named local memory caches.
_caches = {}
_expire_info = {}
_locks = {}
@contextmanager
def dummy():
"""A context manager that does nothing special."""
yield
class LocMemCache(BaseCache):
def __init__(self, name, params):
BaseCache.__init__(self, params)
self._cache = _caches.setdefault(name, {})
self._expire_info = _expire_info.setdefault(name, {})
self._lock = _locks.setdefault(name, RWLock())
def add(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
if self._has_expired(key):
self._set(key, pickled, timeout)
return True
return False
def get(self, key, default=None, version=None, acquire_lock=True):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = None
with (self._lock.reader() if acquire_lock else dummy()):
if not self._has_expired(key):
pickled = self._cache[key]
if pickled is not None:
try:
return pickle.loads(pickled)
except pickle.PickleError:
return default
with (self._lock.writer() if acquire_lock else dummy()):
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return default
def _set(self, key, value, timeout=DEFAULT_TIMEOUT):
if len(self._cache) >= self._max_entries:
self._cull()
self._cache[key] = value
self._expire_info[key] = self.get_backend_timeout(timeout)
def set(self, key, value, timeout=DEFAULT_TIMEOUT, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
pickled = pickle.dumps(value, pickle.HIGHEST_PROTOCOL)
with self._lock.writer():
self._set(key, pickled, timeout)
def incr(self, key, delta=1, version=None):
with self._lock.writer():
value = self.get(key, version=version, acquire_lock=False)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
key = self.make_key(key, version=version)
pickled = pickle.dumps(new_value, pickle.HIGHEST_PROTOCOL)
self._cache[key] = pickled
return new_value
def has_key(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.reader():
if not self._has_expired(key):
return True
with self._lock.writer():
try:
del self._cache[key]
del self._expire_info[key]
except KeyError:
pass
return False
def _has_expired(self, key):
exp = self._expire_info.get(key, -1)
if exp is None or exp > time.time():
return False
return True
def _cull(self):
if self._cull_frequency == 0:
self.clear()
else:
doomed = [k for (i, k) in enumerate(self._cache) if i % self._cull_frequency == 0]
for k in doomed:
self._delete(k)
def _delete(self, key):
try:
del self._cache[key]
except KeyError:
pass
try:
del self._expire_info[key]
except KeyError:
pass
def delete(self, key, version=None):
key = self.make_key(key, version=version)
self.validate_key(key)
with self._lock.writer():
self._delete(key)
def clear(self):
self._cache.clear()
self._expire_info.clear()
| bsd-3-clause |
ajgallegog/gem5_arm | src/arch/power/PowerISA.py | 71 | 2254 | # Copyright (c) 2012 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from m5.SimObject import SimObject
class PowerISA(SimObject):
type = 'PowerISA'
cxx_class = 'PowerISA::ISA'
cxx_header = "arch/power/isa.hh"
| bsd-3-clause |
asajeffrey/servo | tests/wpt/web-platform-tests/webdriver/tests/is_element_selected/selected.py | 4 | 1995 | from tests.support.asserts import assert_error, assert_success
from tests.support.inline import inline
check_doc = inline("""
<input id=checked type=checkbox checked>
<input id=notChecked type=checkbox>
""")
option_doc = inline("""
<select>
<option id=notSelected>r-
<option id=selected selected>r+
</select>
""")
def is_element_selected(session, element_id):
return session.transport.send(
"GET", "session/{session_id}/element/{element_id}/selected".format(
session_id=session.session_id,
element_id=element_id))
def test_no_top_browsing_context(session, closed_window):
response = is_element_selected(session, "foo")
assert_error(response, "no such window")
def test_no_browsing_context(session, closed_frame):
response = is_element_selected(session, "foo")
assert_error(response, "no such window")
def test_element_stale(session):
session.url = check_doc
element = session.find.css("#checked", all=False)
session.refresh()
result = is_element_selected(session, element.id)
assert_error(result, "stale element reference")
def test_element_checked(session):
session.url = check_doc
element = session.find.css("#checked", all=False)
result = is_element_selected(session, element.id)
assert_success(result, True)
def test_checkbox_not_selected(session):
session.url = check_doc
element = session.find.css("#notChecked", all=False)
result = is_element_selected(session, element.id)
assert_success(result, False)
def test_element_selected(session):
session.url = option_doc
element = session.find.css("#selected", all=False)
result = is_element_selected(session, element.id)
assert_success(result, True)
def test_element_not_selected(session):
session.url = option_doc
element = session.find.css("#notSelected", all=False)
result = is_element_selected(session, element.id)
assert_success(result, False)
| mpl-2.0 |
miguelparaiso/PracticaOdoo | addons/crm_helpdesk/__init__.py | 442 | 1081 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import crm_helpdesk
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
kouaw/CouchPotatoServer | libs/suds/sax/parser.py | 180 | 4461 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
"""
from logging import getLogger
import suds.metrics
from suds import *
from suds.sax import *
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sax.text import Text
from suds.sax.attribute import Attribute
from xml.sax import make_parser, InputSource, ContentHandler
from xml.sax.handler import feature_external_ges
from cStringIO import StringIO
log = getLogger(__name__)
class Handler(ContentHandler):
""" sax hanlder """
def __init__(self):
self.nodes = [Document()]
def startElement(self, name, attrs):
top = self.top()
node = Element(unicode(name), parent=top)
for a in attrs.getNames():
n = unicode(a)
v = unicode(attrs.getValue(a))
attribute = Attribute(n,v)
if self.mapPrefix(node, attribute):
continue
node.append(attribute)
node.charbuffer = []
top.append(node)
self.push(node)
def mapPrefix(self, node, attribute):
skip = False
if attribute.name == 'xmlns':
if len(attribute.value):
node.expns = unicode(attribute.value)
skip = True
elif attribute.prefix == 'xmlns':
prefix = attribute.name
node.nsprefixes[prefix] = unicode(attribute.value)
skip = True
return skip
def endElement(self, name):
name = unicode(name)
current = self.top()
if len(current.charbuffer):
current.text = Text(u''.join(current.charbuffer))
del current.charbuffer
if len(current):
current.trim()
currentqname = current.qname()
if name == currentqname:
self.pop()
else:
raise Exception('malformed document')
def characters(self, content):
text = unicode(content)
node = self.top()
node.charbuffer.append(text)
def push(self, node):
self.nodes.append(node)
return node
def pop(self):
return self.nodes.pop()
def top(self):
return self.nodes[len(self.nodes)-1]
class Parser:
""" SAX Parser """
@classmethod
def saxparser(cls):
p = make_parser()
p.setFeature(feature_external_ges, 0)
h = Handler()
p.setContentHandler(h)
return (p, h)
def parse(self, file=None, string=None):
"""
SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str
"""
timer = metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(StringIO(string))
sax.parse(source)
timer.stop()
metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0] | gpl-3.0 |
michael-chi/jumbo | v1/robot-node/stt/sphinxbase/doc/doxy2swig.py | 1 | 16587 | #!/usr/bin/env python
"""Doxygen XML to SWIG docstring converter.
Usage:
doxy2swig.py [options] input.xml output.i
Converts Doxygen generated XML files into a file containing docstrings
that can be used by SWIG-1.3.x. Note that you need to get SWIG
version > 1.3.23 or use Robin Dunn's docstring patch to be able to use
the resulting output.
input.xml is your doxygen generated XML file and output.i is where the
output will be written (the file will be clobbered).
"""
######################################################################
#
# This code is implemented using Mark Pilgrim's code as a guideline:
# http://www.faqs.org/docs/diveintopython/kgp_divein.html
#
# Author: Prabhu Ramachandran
# License: BSD style
#
# Thanks:
# Johan Hake: the include_function_definition feature
# Bill Spotz: bug reports and testing.
# Sebastian Henschel: Misc. enhancements.
#
######################################################################
from xml.dom import minidom
import re
import textwrap
import sys
import types
import os.path
import optparse
# TODO: do not process unnecessary files
TYPEMAP = {
'cmd_ln_t': ('Config', 'cmd_ln_'),
'fe_t': ('FrontEnd', 'fe_'),
'feat_t': ('Feature', 'feat_'),
'fsg_model_t': ('FsgModel', 'fsg_model_'),
'jsgf_t': ('Jsgf', 'jsgf_'),
'ngram_model_set_t': ('NGramModelSet', 'ngram_model_set_'),
'ngram_model_t': ('NGramModel', 'ngram_model_'),
}
USE_PREFIXES = [
'cmd__ln_8',
'fe_8',
'feat_',
'fsg__model_',
'jsgf_8',
'ngram__model_'
]
def my_open_read(source):
if hasattr(source, "read"):
return source
else:
return open(source)
def my_open_write(dest):
if hasattr(dest, "write"):
return dest
else:
return open(dest, 'w')
class Doxy2SWIG:
"""Converts Doxygen generated XML files into a file containing
docstrings that can be used by SWIG-1.3.x that have support for
feature("docstring"). Once the data is parsed it is stored in
self.pieces.
"""
def __init__(self, src, include_function_definition=True, quiet=False):
"""Initialize the instance given a source object. `src` can
be a file or filename. If you do not want to include function
definitions from doxygen then set
`include_function_definition` to `False`. This is handy since
this allows you to use the swig generated function definition
using %feature("autodoc", [0,1]).
"""
f = my_open_read(src)
self.my_dir = os.path.dirname(f.name)
self.xmldoc = minidom.parse(f).documentElement
f.close()
self.pieces = []
self.pieces.append('\n// File: %s\n'%\
os.path.basename(f.name))
self.space_re = re.compile(r'\s+')
self.lead_spc = re.compile(r'^(%feature\S+\s+\S+\s*?)"\s+(\S)')
self.multi = 0
self.ignores = ['inheritancegraph', 'param', 'listofallmembers',
'innerclass', 'name', 'declname', 'incdepgraph',
'invincdepgraph', 'programlisting', 'type',
'references', 'referencedby', 'location',
'collaborationgraph', 'reimplements',
'reimplementedby', 'derivedcompoundref',
'basecompoundref']
#self.generics = []
self.include_function_definition = include_function_definition
if not include_function_definition:
self.ignores.append('argsstring')
self.quiet = quiet
def generate(self):
"""Parses the file set in the initialization. The resulting
data is stored in `self.pieces`.
"""
self.parse(self.xmldoc)
def parse(self, node):
"""Parse a given node. This function in turn calls the
`parse_<nodeType>` functions which handle the respective
nodes.
"""
pm = getattr(self, "parse_%s"%node.__class__.__name__)
pm(node)
def parse_Document(self, node):
self.parse(node.documentElement)
def parse_Text(self, node):
txt = node.data
txt = txt.replace('\\', r'\\\\')
txt = txt.replace('"', r'\"')
# ignore pure whitespace
m = self.space_re.match(txt)
if m and len(m.group()) == len(txt):
pass
else:
self.add_text(textwrap.fill(txt, break_long_words=False))
def parse_Element(self, node):
"""Parse an `ELEMENT_NODE`. This calls specific
`do_<tagName>` handers for different elements. If no handler
is available the `generic_parse` method is called. All
tagNames specified in `self.ignores` are simply ignored.
"""
name = node.tagName
ignores = self.ignores
if name in ignores:
return
attr = "do_%s" % name
if hasattr(self, attr):
handlerMethod = getattr(self, attr)
handlerMethod(node)
else:
self.generic_parse(node)
#if name not in self.generics: self.generics.append(name)
def parse_Comment(self, node):
"""Parse a `COMMENT_NODE`. This does nothing for now."""
return
def add_text(self, value):
"""Adds text corresponding to `value` into `self.pieces`."""
if isinstance(value, tuple) or isinstance(value, list):
self.pieces.extend(value)
else:
self.pieces.append(value)
def get_specific_nodes(self, node, names):
"""Given a node and a sequence of strings in `names`, return a
dictionary containing the names as keys and child
`ELEMENT_NODEs`, that have a `tagName` equal to the name.
"""
nodes = [(x.tagName, x) for x in node.childNodes \
if x.nodeType == x.ELEMENT_NODE and \
x.tagName in names]
return dict(nodes)
def generic_parse(self, node, pad=0):
"""A Generic parser for arbitrary tags in a node.
Parameters:
- node: A node in the DOM.
- pad: `int` (default: 0)
If 0 the node data is not padded with newlines. If 1 it
appends a newline after parsing the childNodes. If 2 it
pads before and after the nodes are processed. Defaults to
0.
"""
npiece = 0
if pad:
npiece = len(self.pieces)
if pad == 2:
self.add_text('\n')
for n in node.childNodes:
self.parse(n)
if pad:
if len(self.pieces) > npiece:
self.add_text('\n')
def space_parse(self, node):
self.add_text(' ')
self.generic_parse(node)
do_ref = space_parse
do_emphasis = space_parse
do_bold = space_parse
do_computeroutput = space_parse
do_formula = space_parse
def do_compoundname(self, node):
self.add_text('\n\n')
data = node.firstChild.data
self.add_text('%%feature("docstring") %s "\n' % data)
def do_compounddef(self, node):
kind = node.attributes['kind'].value
if kind in ('class', 'struct'):
prot = node.attributes['prot'].value
if prot != 'public':
return
names = ('compoundname', 'briefdescription',
'detaileddescription', 'includes')
first = self.get_specific_nodes(node, names)
for n in names:
if first.has_key(n):
self.parse(first[n])
self.add_text(['";','\n'])
for n in node.childNodes:
if n not in first.values():
self.parse(n)
elif kind in ('file', 'namespace'):
nodes = node.getElementsByTagName('sectiondef')
for n in nodes:
self.parse(n)
def do_includes(self, node):
self.add_text('C++ includes: ')
self.generic_parse(node, pad=1)
def do_parameterlist(self, node):
text='unknown'
for key, val in node.attributes.items():
if key == 'kind':
if val == 'param': text = 'Parameters'
elif val == 'exception': text = 'Exceptions'
else: text = val
break
self.add_text(['\n', '\n', text, ':', '\n'])
self.generic_parse(node, pad=1)
def do_para(self, node):
self.add_text('\n')
self.generic_parse(node, pad=1)
def do_parametername(self, node):
self.add_text('\n')
try:
data=node.firstChild.data
except AttributeError: # perhaps a <ref> tag in it
data=node.firstChild.firstChild.data
if data.find('Exception') != -1:
self.add_text(data)
else:
self.add_text("%s: "%data)
def do_parameterdefinition(self, node):
self.generic_parse(node, pad=1)
def do_detaileddescription(self, node):
self.generic_parse(node, pad=1)
def do_briefdescription(self, node):
self.generic_parse(node, pad=1)
def do_memberdef(self, node):
prot = node.attributes['prot'].value
id = node.attributes['id'].value
kind = node.attributes['kind'].value
tmp = node.parentNode.parentNode.parentNode
compdef = tmp.getElementsByTagName('compounddef')[0]
cdef_kind = compdef.attributes['kind'].value
if prot == 'public':
first = self.get_specific_nodes(node, ('definition', 'name'))
name = first['name'].firstChild.data
for n in node.getElementsByTagName('param'):
arg_type = n.getElementsByTagName('type')[0]
ref = self.get_specific_nodes(arg_type, ('ref'))
if 'ref' in ref:
type_name = ref['ref'].firstChild.data
# TODO: check argument position
if type_name in TYPEMAP:
alias, prefix = TYPEMAP[type_name]
short_name = name.replace(prefix, '')
if not re.match(r'^\d', short_name):
name = alias + '::' + name.replace(prefix, '')
break
if name[:8] == 'operator': # Don't handle operators yet.
return
if not ('definition' in first) or \
kind in ['variable', 'typedef']:
return
if self.include_function_definition:
defn = first['definition'].firstChild.data
else:
defn = ""
self.add_text('\n')
self.add_text('%feature("docstring") ')
anc = node.parentNode.parentNode
if cdef_kind in ('file', 'namespace'):
ns_node = anc.getElementsByTagName('innernamespace')
if not ns_node and cdef_kind == 'namespace':
ns_node = anc.getElementsByTagName('compoundname')
if ns_node:
ns = ns_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(ns, name, defn))
else:
self.add_text(' %s "\n%s'%(name, defn))
elif cdef_kind in ('class', 'struct'):
# Get the full function name.
anc_node = anc.getElementsByTagName('compoundname')
cname = anc_node[0].firstChild.data
self.add_text(' %s::%s "\n%s'%(cname, name, defn))
for n in node.childNodes:
if n not in first.values():
self.parse(n)
self.add_text(['";', '\n'])
def do_definition(self, node):
data = node.firstChild.data
self.add_text('%s "\n%s'%(data, data))
def do_sectiondef(self, node):
kind = node.attributes['kind'].value
if kind in ('public-func', 'func', 'user-defined', ''):
self.generic_parse(node)
def do_header(self, node):
"""For a user defined section def a header field is present
which should not be printed as such, so we comment it in the
output."""
data = node.firstChild.data
self.add_text('\n/*\n %s \n*/\n'%data)
# If our immediate sibling is a 'description' node then we
# should comment that out also and remove it from the parent
# node's children.
parent = node.parentNode
idx = parent.childNodes.index(node)
if len(parent.childNodes) >= idx + 2:
nd = parent.childNodes[idx+2]
if nd.nodeName == 'description':
nd = parent.removeChild(nd)
self.add_text('\n/*')
self.generic_parse(nd)
self.add_text('\n*/\n')
def do_simplesect(self, node):
kind = node.attributes['kind'].value
if kind in ('date', 'rcs', 'version'):
pass
elif kind == 'warning':
self.add_text(['\n', 'WARNING: '])
self.generic_parse(node)
elif kind == 'see':
self.add_text('\n')
self.add_text('See: ')
self.generic_parse(node)
else:
self.generic_parse(node)
def do_argsstring(self, node):
self.generic_parse(node, pad=1)
def do_member(self, node):
kind = node.attributes['kind'].value
refid = node.attributes['refid'].value
if kind == 'function' and refid[:9] == 'namespace':
self.generic_parse(node)
def do_doxygenindex(self, node):
self.multi = 1
comps = node.getElementsByTagName('compound')
for c in comps:
refid = c.attributes['refid'].value
fname = refid + '.xml'
for prefix in USE_PREFIXES:
if fname.startswith(prefix):
if not os.path.exists(fname):
fname = os.path.join(self.my_dir, fname)
if not self.quiet:
print ("parsing file: %s" % fname)
p = Doxy2SWIG(fname, self.include_function_definition, self.quiet)
p.generate()
self.pieces.extend(self.clean_pieces(p.pieces))
break
def write(self, fname):
o = my_open_write(fname)
if self.multi:
o.write("".join(self.pieces))
else:
o.write("".join(self.clean_pieces(self.pieces)))
o.close()
def clean_pieces(self, pieces):
"""Cleans the list of strings given as `pieces`. It replaces
multiple newlines by a maximum of 2 and returns a new list.
It also wraps the paragraphs nicely.
"""
ret = []
count = 0
for i in pieces:
if i == '\n':
count = count + 1
else:
if i == '";':
if count:
ret.append('\n')
elif count > 2:
ret.append('\n\n')
elif count:
ret.append('\n'*count)
count = 0
ret.append(i)
_data = "".join(ret)
ret = []
for i in _data.split('\n\n'):
if i == 'Parameters:' or i == 'Exceptions:':
ret.extend([i, '\n-----------', '\n\n'])
elif i.find('// File:') > -1: # leave comments alone.
ret.extend([i, '\n'])
else:
_tmp = textwrap.fill(i.strip(), break_long_words=False)
_tmp = self.lead_spc.sub(r'\1"\2', _tmp)
ret.extend([_tmp, '\n\n'])
return ret
def convert(input, output, include_function_definition=True, quiet=False):
p = Doxy2SWIG(input, include_function_definition, quiet)
p.generate()
p.write(output)
def main():
usage = __doc__
parser = optparse.OptionParser(usage)
parser.add_option("-n", '--no-function-definition',
action='store_true',
default=False,
dest='func_def',
help='do not include doxygen function definitions')
parser.add_option("-q", '--quiet',
action='store_true',
default=False,
dest='quiet',
help='be quiet and minimize output')
options, args = parser.parse_args()
if len(args) != 2:
parser.error("error: no input and output specified")
convert(args[0], args[1], not options.func_def, options.quiet)
if __name__ == '__main__':
main()
| gpl-3.0 |
ppizarror/Hero-of-Antair | bin/simplejson/tests/test_bitsize_int_as_string.py | 121 | 2297 | from unittest import TestCase
import simplejson as json
class TestBitSizeIntAsString(TestCase):
# Python 2.5, at least the one that ships on Mac OS X, calculates
# 2 ** 31 as 0! It manages to calculate 1 << 31 correctly.
values = [
(200, 200),
((1 << 31) - 1, (1 << 31) - 1),
((1 << 31), str(1 << 31)),
((1 << 31) + 1, str((1 << 31) + 1)),
(-100, -100),
((-1 << 31), str(-1 << 31)),
((-1 << 31) - 1, str((-1 << 31) - 1)),
((-1 << 31) + 1, (-1 << 31) + 1),
]
def test_invalid_counts(self):
for n in ['foo', -1, 0, 1.0]:
self.assertRaises(
TypeError,
json.dumps, 0, int_as_string_bitcount=n)
def test_ints_outside_range_fails(self):
self.assertNotEqual(
str(1 << 15),
json.loads(json.dumps(1 << 15, int_as_string_bitcount=16)),
)
def test_ints(self):
for val, expect in self.values:
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, int_as_string_bitcount=31)),
)
def test_lists(self):
for val, expect in self.values:
val = [val, val]
expect = [expect, expect]
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, int_as_string_bitcount=31)))
def test_dicts(self):
for val, expect in self.values:
val = {'k': val}
expect = {'k': expect}
self.assertEqual(
val,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, int_as_string_bitcount=31)))
def test_dict_keys(self):
for val, _ in self.values:
expect = {str(val): 'value'}
val = {val: 'value'}
self.assertEqual(
expect,
json.loads(json.dumps(val)))
self.assertEqual(
expect,
json.loads(json.dumps(val, int_as_string_bitcount=31)))
| gpl-2.0 |
jeanlinux/calibre | src/calibre/ebooks/rtf2xml/process_tokens.py | 24 | 42386 | #########################################################################
# #
# #
# copyright 2002 Paul Henry Tremblay #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU #
# General Public License for more details. #
# #
# #
#########################################################################
import os, re
from calibre.ebooks.rtf2xml import copy, check_brackets
from calibre.ptempfile import better_mktemp
class ProcessTokens:
"""
Process each token on a line and add information that will be useful for
later processing. Information will be put on one line, delimited by "<"
for main fields, and ">" for sub fields
"""
def __init__(self,
in_file,
exception_handler,
bug_handler,
copy = None,
run_level = 1,
):
self.__file = in_file
self.__bug_handler = bug_handler
self.__copy = copy
self.__run_level = run_level
self.__write_to = better_mktemp()
self.initiate_token_dict()
##self.initiate_token_actions()
self.compile_expressions()
self.__bracket_count=0
self.__exception_handler = exception_handler
self.__bug_handler = bug_handler
def compile_expressions(self):
self.__num_exp = re.compile(r"([a-zA-Z]+)(.*)")
self.__utf_exp = re.compile(r'(&.*?;)')
def initiate_token_dict(self):
self.__return_code = 0
self.dict_token={
# unicode
'mshex' : ('nu', '__________', self.__ms_hex_func),
# brackets
'{' : ('nu', '{', self.ob_func),
'}' : ('nu', '}', self.cb_func),
# microsoft characters
'ldblquote' : ('mc', 'ldblquote', self.ms_sub_func),
'rdblquote' : ('mc', 'rdblquote', self.ms_sub_func),
'rquote' : ('mc', 'rquote', self.ms_sub_func),
'lquote' : ('mc', 'lquote', self.ms_sub_func),
'emdash' : ('mc', 'emdash', self.ms_sub_func),
'endash' : ('mc', 'endash', self.ms_sub_func),
'bullet' : ('mc', 'bullet', self.ms_sub_func),
'~' : ('mc', '~', self.ms_sub_func),
'tab' : ('mc', 'tab', self.ms_sub_func),
'_' : ('mc', '_', self.ms_sub_func),
';' : ('mc', ';', self.ms_sub_func),
# this must be wrong
'-' : ('mc', '-', self.ms_sub_func),
'line' : ('mi', 'hardline-break', self.direct_conv_func), #calibre
# misc => ml
'*' : ('ml', 'asterisk__', self.default_func),
':' : ('ml', 'colon_____', self.default_func),
# text
'backslash' : ('nu', '\\', self.text_func),
'ob' : ('nu', '{', self.text_func),
'cb' : ('nu', '}', self.text_func),
# paragraph formatting => pf
'page' : ('pf', 'page-break', self.default_func),
'par' : ('pf', 'par-end___', self.default_func),
'pard' : ('pf', 'par-def___', self.default_func),
'keepn' : ('pf', 'keep-w-nex', self.bool_st_func),
'widctlpar' : ('pf', 'widow-cntl', self.bool_st_func),
'adjustright' : ('pf', 'adjust-rgt', self.bool_st_func),
'lang' : ('pf', 'language__', self.__language_func),
'ri' : ('pf', 'right-inde', self.divide_by_20),
'fi' : ('pf', 'fir-ln-ind', self.divide_by_20),
'li' : ('pf', 'left-inden', self.divide_by_20),
'sb' : ('pf', 'space-befo', self.divide_by_20),
'sa' : ('pf', 'space-afte', self.divide_by_20),
'sl' : ('pf', 'line-space', self.divide_by_20),
'deftab' : ('pf', 'default-ta', self.divide_by_20),
'ql' : ('pf', 'align_____<left', self.two_part_func),
'qc' : ('pf', 'align_____<cent', self.two_part_func),
'qj' : ('pf', 'align_____<just', self.two_part_func),
'qr' : ('pf', 'align_____<right', self.two_part_func),
'nowidctlpar' : ('pf', 'widow-cntr<false', self.two_part_func),
'tx' : ('pf', 'tab-stop__', self.divide_by_20),
'tb' : ('pf', 'tab-bar-st', self.divide_by_20),
'tqr' : ('pf', 'tab-right_', self.default_func),
'tqdec' : ('pf', 'tab-dec___', self.default_func),
'tqc' : ('pf', 'tab-center', self.default_func),
'tlul' : ('pf', 'leader-und', self.default_func),
'tlhyph' : ('pf', 'leader-hyp', self.default_func),
'tldot' : ('pf', 'leader-dot', self.default_func),
# stylesheet = > ss
'stylesheet' : ('ss', 'style-shet', self.default_func),
'sbasedon' : ('ss', 'based-on__', self.default_func),
'snext' : ('ss', 'next-style', self.default_func),
'cs' : ('ss', 'char-style', self.default_func),
's' : ('ss', 'para-style', self.default_func),
# graphics => gr
'pict' : ('gr', 'picture___', self.default_func),
'objclass' : ('gr', 'obj-class_', self.default_func),
'macpict' : ('gr', 'mac-pic___', self.default_func),
# section => sc
'sect' : ('sc', 'section___', self.default_func),
'sectd' : ('sc', 'sect-defin', self.default_func),
'endhere' : ('sc', 'sect-note_', self.default_func),
# list=> ls
'pntext' : ('ls', 'list-text_', self.default_func),
# this line must be wrong because it duplicates an earlier one
'listtext' : ('ls', 'list-text_', self.default_func),
'pn' : ('ls', 'list______', self.default_func),
'pnseclvl' : ('ls', 'list-level', self.default_func),
'pncard' : ('ls', 'list-cardi', self.bool_st_func),
'pndec' : ('ls', 'list-decim', self.bool_st_func),
'pnucltr' : ('ls', 'list-up-al', self.bool_st_func),
'pnucrm' : ('ls', 'list-up-ro', self.bool_st_func),
'pnord' : ('ls', 'list-ord__', self.bool_st_func),
'pnordt' : ('ls', 'list-ordte', self.bool_st_func),
'pnlvlblt' : ('ls', 'list-bulli', self.bool_st_func),
'pnlvlbody' : ('ls', 'list-simpi', self.bool_st_func),
'pnlvlcont' : ('ls', 'list-conti', self.bool_st_func),
'pnhang' : ('ls', 'list-hang_', self.bool_st_func),
'pntxtb' : ('ls', 'list-tebef', self.bool_st_func),
'ilvl' : ('ls', 'list-level', self.default_func),
'ls' : ('ls', 'list-id___', self.default_func),
'pnstart' : ('ls', 'list-start', self.default_func),
'itap' : ('ls', 'nest-level', self.default_func),
'leveltext' : ('ls', 'level-text', self.default_func),
'levelnumbers' : ('ls', 'level-numb', self.default_func),
'list' : ('ls', 'list-in-tb', self.default_func),
'listlevel' : ('ls', 'list-tb-le', self.default_func),
'listname' : ('ls', 'list-name_', self.default_func),
'listtemplateid' : ('ls', 'ls-tem-id_', self.default_func),
'leveltemplateid' : ('ls', 'lv-tem-id_', self.default_func),
'listhybrid' : ('ls', 'list-hybri', self.default_func),
'levelstartat' : ('ls', 'level-star', self.default_func),
'levelspace' : ('ls', 'level-spac', self.divide_by_20),
'levelindent' : ('ls', 'level-inde', self.default_func),
'levelnfc' : ('ls', 'level-type', self.__list_type_func),
'levelnfcn' : ('ls', 'level-type', self.__list_type_func),
'listid' : ('ls', 'lis-tbl-id', self.default_func),
'listoverride' : ('ls', 'lis-overid', self.default_func),
# duplicate
'pnlvl' : ('ls', 'list-level', self.default_func),
# root info => ri
'rtf' : ('ri', 'rtf_______', self.default_func),
'deff' : ('ri', 'deflt-font', self.default_func),
'mac' : ('ri', 'macintosh_', self.default_func),
'pc' : ('ri', 'pc________', self.default_func),
'pca' : ('ri', 'pca_______', self.default_func),
'ansi' : ('ri', 'ansi______', self.default_func),
'ansicpg' : ('ri', 'ansi-codpg', self.default_func),
# notes => nt
'footnote' : ('nt', 'footnote__', self.default_func),
'ftnalt' : ('nt', 'type______<endnote', self.two_part_func),
# anchor => an
'tc' : ('an', 'toc_______', self.default_func),
'bkmkstt' : ('an', 'book-mk-st', self.default_func),
'bkmkstart' : ('an', 'book-mk-st', self.default_func),
'bkmkend' : ('an', 'book-mk-en', self.default_func),
'xe' : ('an', 'index-mark', self.default_func),
'rxe' : ('an', 'place_____', self.default_func),
# index => in
'bxe' : ('in', 'index-bold', self.default_func),
'ixe' : ('in', 'index-ital', self.default_func),
'txe' : ('in', 'index-see_', self.default_func),
# table of contents => tc
'tcl' : ('tc', 'toc-level_', self.default_func),
'tcn' : ('tc', 'toc-sup-nu', self.default_func),
# field => fd
'field' : ('fd', 'field_____', self.default_func),
'fldinst' : ('fd', 'field-inst', self.default_func),
'fldrslt' : ('fd', 'field-rslt', self.default_func),
'datafield' : ('fd', 'datafield_', self.default_func),
# info-tables => it
'fonttbl' : ('it', 'font-table', self.default_func),
'colortbl' : ('it', 'colr-table', self.default_func),
'listoverridetable' : ('it', 'lovr-table', self.default_func),
'listtable' : ('it', 'listtable_', self.default_func),
'revtbl' : ('it', 'revi-table', self.default_func),
# character info => ci
'b' : ('ci', 'bold______', self.bool_st_func),
'blue' : ('ci', 'blue______', self.color_func),
'caps' : ('ci', 'caps______', self.bool_st_func),
'cf' : ('ci', 'font-color', self.colorz_func),
'chftn' : ('ci', 'footnot-mk', self.bool_st_func),
'dn' : ('ci', 'font-down_', self.divide_by_2),
'embo' : ('ci', 'emboss____', self.bool_st_func),
'f' : ('ci', 'font-style', self.default_func),
'fs' : ('ci', 'font-size_', self.divide_by_2),
'green' : ('ci', 'green_____', self.color_func),
'i' : ('ci', 'italics___', self.bool_st_func),
'impr' : ('ci', 'engrave___', self.bool_st_func),
'outl' : ('ci', 'outline___', self.bool_st_func),
'plain' : ('ci', 'plain_____', self.bool_st_func),
'red' : ('ci', 'red_______', self.color_func),
'scaps' : ('ci', 'small-caps', self.bool_st_func),
'shad' : ('ci', 'shadow____', self.bool_st_func),
'strike' : ('ci', 'strike-thr', self.bool_st_func),
'striked' : ('ci', 'dbl-strike', self.bool_st_func),
'sub' : ('ci', 'subscript_', self.bool_st_func),
'super' : ('ci', 'superscrip', self.bool_st_func),
'nosupersub' : ('ci', 'no-su-supe', self.__no_sup_sub_func),
'up' : ('ci', 'font-up___', self.divide_by_2),
'v' : ('ci', 'hidden____', self.default_func),
# underline
# can't see why it isn't a char info: 'ul'=>'ci'
'ul' : ('ci', 'underlined<continous', self.two_part_func),
'uld' : ('ci', 'underlined<dotted', self.two_part_func),
'uldash' : ('ci', 'underlined<dash', self.two_part_func),
'uldashd' : ('ci', 'underlined<dash-dot', self.two_part_func),
'uldashdd' : ('ci', 'underlined<dash-dot-dot', self.two_part_func),
'uldb' : ('ci', 'underlined<double', self.two_part_func),
'ulhwave' : ('ci', 'underlined<heavy-wave', self.two_part_func),
'ulldash' : ('ci', 'underlined<long-dash', self.two_part_func),
'ulth' : ('ci', 'underlined<thich', self.two_part_func),
'ulthd' : ('ci', 'underlined<thick-dotted', self.two_part_func),
'ulthdash' : ('ci', 'underlined<thick-dash', self.two_part_func),
'ulthdashd' : ('ci', 'underlined<thick-dash-dot', self.two_part_func),
'ulthdashdd' : ('ci', 'underlined<thick-dash-dot-dot', self.two_part_func),
'ulthldash' : ('ci', 'underlined<thick-long-dash', self.two_part_func),
'ululdbwave' : ('ci', 'underlined<double-wave', self.two_part_func),
'ulw' : ('ci', 'underlined<word', self.two_part_func),
'ulwave' : ('ci', 'underlined<wave', self.two_part_func),
'ulnone' : ('ci', 'underlined<false', self.two_part_func),
# table => tb
'trowd' : ('tb', 'row-def___', self.default_func),
'cell' : ('tb', 'cell______', self.default_func),
'row' : ('tb', 'row_______', self.default_func),
'intbl' : ('tb', 'in-table__', self.default_func),
'cols' : ('tb', 'columns___', self.default_func),
'trleft' : ('tb', 'row-pos-le', self.divide_by_20),
'cellx' : ('tb', 'cell-posit', self.divide_by_20),
'trhdr' : ('tb', 'row-header', self.default_func),
# preamble => pr
# document information => di
# TODO integrate \userprops
'info' : ('di', 'doc-info__', self.default_func),
'title' : ('di', 'title_____', self.default_func),
'author' : ('di', 'author____', self.default_func),
'operator' : ('di', 'operator__', self.default_func),
'manager' : ('di', 'manager___', self.default_func),
'company' : ('di', 'company___', self.default_func),
'keywords' : ('di', 'keywords__', self.default_func),
'category' : ('di', 'category__', self.default_func),
'doccomm' : ('di', 'doc-notes_', self.default_func),
'comment' : ('di', 'doc-notes_', self.default_func),
'subject' : ('di', 'subject___', self.default_func),
'creatim' : ('di', 'create-tim', self.default_func),
'yr' : ('di', 'year______', self.default_func),
'mo' : ('di', 'month_____', self.default_func),
'dy' : ('di', 'day_______', self.default_func),
'min' : ('di', 'minute____', self.default_func),
'sec' : ('di', 'second____', self.default_func),
'revtim' : ('di', 'revis-time', self.default_func),
'edmins' : ('di', 'edit-time_', self.default_func),
'printim' : ('di', 'print-time', self.default_func),
'buptim' : ('di', 'backuptime', self.default_func),
'nofwords' : ('di', 'num-of-wor', self.default_func),
'nofchars' : ('di', 'num-of-chr', self.default_func),
'nofcharsws' : ('di', 'numofchrws', self.default_func),
'nofpages' : ('di', 'num-of-pag', self.default_func),
'version' : ('di', 'version___', self.default_func),
'vern' : ('di', 'intern-ver', self.default_func),
'hlinkbase' : ('di', 'linkbase__', self.default_func),
'id' : ('di', 'internalID', self.default_func),
# headers and footers => hf
'headerf' : ('hf', 'head-first', self.default_func),
'headerl' : ('hf', 'head-left_', self.default_func),
'headerr' : ('hf', 'head-right', self.default_func),
'footerf' : ('hf', 'foot-first', self.default_func),
'footerl' : ('hf', 'foot-left_', self.default_func),
'footerr' : ('hf', 'foot-right', self.default_func),
'header' : ('hf', 'header____', self.default_func),
'footer' : ('hf', 'footer____', self.default_func),
# page => pa
'margl' : ('pa', 'margin-lef', self.divide_by_20),
'margr' : ('pa', 'margin-rig', self.divide_by_20),
'margb' : ('pa', 'margin-bot', self.divide_by_20),
'margt' : ('pa', 'margin-top', self.divide_by_20),
'gutter' : ('pa', 'gutter____', self.divide_by_20),
'paperw' : ('pa', 'paper-widt', self.divide_by_20),
'paperh' : ('pa', 'paper-hght', self.divide_by_20),
# annotation => an
'annotation' : ('an', 'annotation', self.default_func),
# border => bd
'trbrdrh' : ('bd', 'bor-t-r-hi', self.default_func),
'trbrdrv' : ('bd', 'bor-t-r-vi', self.default_func),
'trbrdrt' : ('bd', 'bor-t-r-to', self.default_func),
'trbrdrl' : ('bd', 'bor-t-r-le', self.default_func),
'trbrdrb' : ('bd', 'bor-t-r-bo', self.default_func),
'trbrdrr' : ('bd', 'bor-t-r-ri', self.default_func),
'clbrdrb' : ('bd', 'bor-cel-bo', self.default_func),
'clbrdrt' : ('bd', 'bor-cel-to', self.default_func),
'clbrdrl' : ('bd', 'bor-cel-le', self.default_func),
'clbrdrr' : ('bd', 'bor-cel-ri', self.default_func),
'brdrb' : ('bd', 'bor-par-bo', self.default_func),
'brdrt' : ('bd', 'bor-par-to', self.default_func),
'brdrl' : ('bd', 'bor-par-le', self.default_func),
'brdrr' : ('bd', 'bor-par-ri', self.default_func),
'box' : ('bd', 'bor-par-bx', self.default_func),
'chbrdr' : ('bd', 'bor-par-bo', self.default_func),
'brdrbtw' : ('bd', 'bor-for-ev', self.default_func),
'brdrbar' : ('bd', 'bor-outsid', self.default_func),
'brdrnone' : ('bd', 'bor-none__<false', self.two_part_func),
# border type => bt
'brdrs' : ('bt', 'bdr-single', self.default_func),
'brdrth' : ('bt', 'bdr-doubtb', self.default_func),
'brdrsh' : ('bt', 'bdr-shadow', self.default_func),
'brdrdb' : ('bt', 'bdr-double', self.default_func),
'brdrdot' : ('bt', 'bdr-dotted', self.default_func),
'brdrdash' : ('bt', 'bdr-dashed', self.default_func),
'brdrhair' : ('bt', 'bdr-hair__', self.default_func),
'brdrinset' : ('bt', 'bdr-inset_', self.default_func),
'brdrdashsm' : ('bt', 'bdr-das-sm', self.default_func),
'brdrdashd' : ('bt', 'bdr-dot-sm', self.default_func),
'brdrdashdd' : ('bt', 'bdr-dot-do', self.default_func),
'brdroutset' : ('bt', 'bdr-outset', self.default_func),
'brdrtriple' : ('bt', 'bdr-trippl', self.default_func),
'brdrtnthsg' : ('bt', 'bdr-thsm__', self.default_func),
'brdrthtnsg' : ('bt', 'bdr-htsm__', self.default_func),
'brdrtnthtnsg' : ('bt', 'bdr-hthsm_', self.default_func),
'brdrtnthmg' : ('bt', 'bdr-thm___', self.default_func),
'brdrthtnmg' : ('bt', 'bdr-htm___', self.default_func),
'brdrtnthtnmg' : ('bt', 'bdr-hthm__', self.default_func),
'brdrtnthlg' : ('bt', 'bdr-thl___', self.default_func),
'brdrtnthtnlg' : ('bt', 'bdr-hthl__', self.default_func),
'brdrwavy' : ('bt', 'bdr-wavy__', self.default_func),
'brdrwavydb' : ('bt', 'bdr-d-wav_', self.default_func),
'brdrdashdotstr' : ('bt', 'bdr-strip_', self.default_func),
'brdremboss' : ('bt', 'bdr-embos_', self.default_func),
'brdrengrave' : ('bt', 'bdr-engra_', self.default_func),
'brdrframe' : ('bt', 'bdr-frame_', self.default_func),
'brdrw' : ('bt', 'bdr-li-wid', self.divide_by_20),
'brsp' : ('bt', 'bdr-sp-wid', self.divide_by_20),
'brdrcf' : ('bt', 'bdr-color_', self.default_func),
# comments
# 'comment' : ('cm', 'comment___', self.default_func),
}
self.__number_type_dict = {
0: 'Arabic',
1: 'uppercase Roman numeral',
2: 'lowercase Roman numeral',
3: 'uppercase letter',
4: 'lowercase letter',
5: 'ordinal number',
6: 'cardianl text number',
7: 'ordinal text number',
10: 'Kanji numbering without the digit character',
11: 'Kanji numbering with the digit character',
1246: 'phonetic Katakana characters in aiueo order',
1346: 'phonetic katakana characters in iroha order',
14: 'double byte character',
15: 'single byte character',
16: 'Kanji numbering 3',
17: 'Kanji numbering 4',
18: 'Circle numbering' ,
19: 'double-byte Arabic numbering',
2046: 'phonetic double-byte Katakana characters',
2146: 'phonetic double-byte katakana characters',
22: 'Arabic with leading zero',
23: 'bullet',
24: 'Korean numbering 2',
25: 'Korean numbering 1',
26: 'Chinese numbering 1',
27: 'Chinese numbering 2',
28: 'Chinese numbering 3',
29: 'Chinese numbering 4',
30: 'Chinese Zodiac numbering 1',
31: 'Chinese Zodiac numbering 2',
32: 'Chinese Zodiac numbering 3',
33: 'Taiwanese double-byte numbering 1',
34: 'Taiwanese double-byte numbering 2',
35: 'Taiwanese double-byte numbering 3',
36: 'Taiwanese double-byte numbering 4',
37: 'Chinese double-byte numbering 1',
38: 'Chinese double-byte numbering 2',
39: 'Chinese double-byte numbering 3',
40: 'Chinese double-byte numbering 4',
41: 'Korean double-byte numbering 1',
42: 'Korean double-byte numbering 2',
43: 'Korean double-byte numbering 3',
44: 'Korean double-byte numbering 4',
45: 'Hebrew non-standard decimal',
46: 'Arabic Alif Ba Tah',
47: 'Hebrew Biblical standard',
48: 'Arabic Abjad style',
255: 'No number',
}
self.__language_dict = {
1078 : 'Afrikaans',
1052 : 'Albanian',
1025 : 'Arabic',
5121 : 'Arabic Algeria',
15361 : 'Arabic Bahrain',
3073 : 'Arabic Egypt',
1 : 'Arabic General',
2049 : 'Arabic Iraq',
11265 : 'Arabic Jordan',
13313 : 'Arabic Kuwait',
12289 : 'Arabic Lebanon',
4097 : 'Arabic Libya',
6145 : 'Arabic Morocco',
8193 : 'Arabic Oman',
16385 : 'Arabic Qatar',
10241 : 'Arabic Syria',
7169 : 'Arabic Tunisia',
14337 : 'Arabic U.A.E.',
9217 : 'Arabic Yemen',
1067 : 'Armenian',
1101 : 'Assamese',
2092 : 'Azeri Cyrillic',
1068 : 'Azeri Latin',
1069 : 'Basque',
1093 : 'Bengali',
4122 : 'Bosnia Herzegovina',
1026 : 'Bulgarian',
1109 : 'Burmese',
1059 : 'Byelorussian',
1027 : 'Catalan',
2052 : 'Chinese China',
4 : 'Chinese General',
3076 : 'Chinese Hong Kong',
4100 : 'Chinese Singapore',
1028 : 'Chinese Taiwan',
1050 : 'Croatian',
1029 : 'Czech',
1030 : 'Danish',
2067 : 'Dutch Belgium',
1043 : 'Dutch Standard',
3081 : 'English Australia',
10249 : 'English Belize',
2057 : 'English British',
4105 : 'English Canada',
9225 : 'English Caribbean',
9 : 'English General',
6153 : 'English Ireland',
8201 : 'English Jamaica',
5129 : 'English New Zealand',
13321 : 'English Philippines',
7177 : 'English South Africa',
11273 : 'English Trinidad',
1033 : 'English United States',
1061 : 'Estonian',
1080 : 'Faerose',
1065 : 'Farsi',
1035 : 'Finnish',
1036 : 'French',
2060 : 'French Belgium',
11276 : 'French Cameroon',
3084 : 'French Canada',
12300 : 'French Cote d\'Ivoire',
5132 : 'French Luxembourg',
13324 : 'French Mali',
6156 : 'French Monaco',
8204 : 'French Reunion',
10252 : 'French Senegal',
4108 : 'French Swiss',
7180 : 'French West Indies',
9228 : 'French Democratic Republic of the Congo',
1122 : 'Frisian',
1084 : 'Gaelic',
2108 : 'Gaelic Ireland',
1110 : 'Galician',
1079 : 'Georgian',
1031 : 'German',
3079 : 'German Austrian',
5127 : 'German Liechtenstein',
4103 : 'German Luxembourg',
2055 : 'German Switzerland',
1032 : 'Greek',
1095 : 'Gujarati',
1037 : 'Hebrew',
1081 : 'Hindi',
1038 : 'Hungarian',
1039 : 'Icelandic',
1057 : 'Indonesian',
1040 : 'Italian',
2064 : 'Italian Switzerland',
1041 : 'Japanese',
1099 : 'Kannada',
1120 : 'Kashmiri',
2144 : 'Kashmiri India',
1087 : 'Kazakh',
1107 : 'Khmer',
1088 : 'Kirghiz',
1111 : 'Konkani',
1042 : 'Korean',
2066 : 'Korean Johab',
1108 : 'Lao',
1062 : 'Latvian',
1063 : 'Lithuanian',
2087 : 'Lithuanian Classic',
1086 : 'Malay',
2110 : 'Malay Brunei Darussalam',
1100 : 'Malayalam',
1082 : 'Maltese',
1112 : 'Manipuri',
1102 : 'Marathi',
1104 : 'Mongolian',
1121 : 'Nepali',
2145 : 'Nepali India',
1044 : 'Norwegian Bokmal',
2068 : 'Norwegian Nynorsk',
1096 : 'Oriya',
1045 : 'Polish',
1046 : 'Portuguese (Brazil)',
2070 : 'Portuguese (Portugal)',
1094 : 'Punjabi',
1047 : 'Rhaeto-Romanic',
1048 : 'Romanian',
2072 : 'Romanian Moldova',
1049 : 'Russian',
2073 : 'Russian Moldova',
1083 : 'Sami Lappish',
1103 : 'Sanskrit',
3098 : 'Serbian Cyrillic',
2074 : 'Serbian Latin',
1113 : 'Sindhi',
1051 : 'Slovak',
1060 : 'Slovenian',
1070 : 'Sorbian',
11274 : 'Spanish Argentina',
16394 : 'Spanish Bolivia',
13322 : 'Spanish Chile',
9226 : 'Spanish Colombia',
5130 : 'Spanish Costa Rica',
7178 : 'Spanish Dominican Republic',
12298 : 'Spanish Ecuador',
17418 : 'Spanish El Salvador',
4106 : 'Spanish Guatemala',
18442 : 'Spanish Honduras',
2058 : 'Spanish Mexico',
3082 : 'Spanish Modern',
19466 : 'Spanish Nicaragua',
6154 : 'Spanish Panama',
15370 : 'Spanish Paraguay',
10250 : 'Spanish Peru',
20490 : 'Spanish Puerto Rico',
1034 : 'Spanish Traditional',
14346 : 'Spanish Uruguay',
8202 : 'Spanish Venezuela',
1072 : 'Sutu',
1089 : 'Swahili',
1053 : 'Swedish',
2077 : 'Swedish Finland',
1064 : 'Tajik',
1097 : 'Tamil',
1092 : 'Tatar',
1098 : 'Telugu',
1054 : 'Thai',
1105 : 'Tibetan',
1073 : 'Tsonga',
1074 : 'Tswana',
1055 : 'Turkish',
1090 : 'Turkmen',
1058 : 'Ukranian',
1056 : 'Urdu',
2080 : 'Urdu India',
2115 : 'Uzbek Cyrillic',
1091 : 'Uzbek Latin',
1075 : 'Venda',
1066 : 'Vietnamese',
1106 : 'Welsh',
1076 : 'Xhosa',
1085 : 'Yiddish',
1077 : 'Zulu',
1024 : 'Unkown',
255 : 'Unkown',
}
"""
# unknown
# These must get passed on because they occure after \*
'do' : ('un', 'unknown___', self.default_func),
'company' : ('un', 'company___', self.default_func),
'shpinst' : ('un', 'unknown___', self.default_func),
'panose' : ('un', 'unknown___', self.default_func),
'falt' : ('un', 'unknown___', self.default_func),
'listoverridetable' : ('un', 'unknown___', self.default_func),
'category' : ('un', 'unknown___', self.default_func),
'template' : ('un', 'unknown___', self.default_func),
'ud' : ('un', 'unknown___', self.default_func),
'formfield' : ('un', 'unknown___', self.default_func),
'ts' : ('un', 'unknown___', self.default_func),
'rsidtbl' : ('un', 'unknown___', self.default_func),
'generator' : ('un', 'unknown___', self.default_func),
'ftnsep' : ('un', 'unknown___', self.default_func),
'aftnsep' : ('un', 'unknown___', self.default_func),
'aftnsepc' : ('un', 'unknown___', self.default_func),
'aftncn' : ('un', 'unknown___', self.default_func),
'objclass' : ('un', 'unknown___', self.default_func),
'objdata' : ('un', 'unknown___', self.default_func),
'picprop' : ('un', 'unknown___', self.default_func),
'blipuid' : ('un', 'unknown___', self.default_func),
"""
def __ms_hex_func(self, pre, token, num):
num = num[1:] # chop off leading 0, which I added
num = num.upper() # the mappings store hex in caps
return 'tx<hx<__________<\'%s\n' % num # add an ' for the mappings
def ms_sub_func(self, pre, token, num):
return 'tx<mc<__________<%s\n' % token
def direct_conv_func(self, pre, token, num):
return 'mi<tg<empty_____<%s\n' % token
def default_func(self, pre, token, num):
if num is None:
num = 'true'
return 'cw<%s<%s<nu<%s\n' % (pre, token, num)
def colorz_func(self, pre, token, num):
if num is None:
num = '0'
return 'cw<%s<%s<nu<%s\n' % (pre, token, num)
def __list_type_func(self, pre, token, num):
type = 'arabic'
if num is None:
type = 'Arabic'
else:
try:
num = int(num)
except ValueError:
if self.__run_level > 3:
msg = 'Number "%s" cannot be converted to integer\n' % num
raise self.__bug_handler, msg
type = self.__number_type_dict.get(num)
if type is None:
if self.__run_level > 3:
msg = 'No type for "%s" in self.__number_type_dict\n'
raise self.__bug_handler
type = 'Arabic'
return 'cw<%s<%s<nu<%s\n' % (pre, token, type)
def __language_func(self, pre, token, num):
lang_name = self.__language_dict.get(int(re.search('[0-9]+', num).group()))
if not lang_name:
lang_name = "not defined"
if self.__run_level > 3:
msg = 'No entry for number "%s"' % num
raise self.__bug_handler, msg
return 'cw<%s<%s<nu<%s\n' % (pre, token, lang_name)
def two_part_func(self, pre, token, num):
list = token.split("<")
token = list[0]
num = list[1]
return 'cw<%s<%s<nu<%s\n' % (pre, token, num)
##return 'cw<nu<nu<nu<%s>num<%s\n' % (token, num)
def divide_by_2(self, pre, token, num):
num = self.divide_num(num, 2)
return 'cw<%s<%s<nu<%s\n' % (pre, token, num)
##return 'cw<nu<nu<nu<%s>%s<%s\n' % (token, num, token)
def divide_by_20(self, pre, token, num):
num = self.divide_num(num, 20)
return 'cw<%s<%s<nu<%s\n' % (pre, token, num)
##return 'cw<nu<nu<nu<%s>%s<%s\n' % (token, num, token)
def text_func(self, pre, token, num=None):
return 'tx<nu<__________<%s\n' % token
def ob_func(self, pre, token, num=None):
self.__bracket_count += 1
return 'ob<nu<open-brack<%04d\n' % self.__bracket_count
def cb_func(self, pre, token, num=None):
line = 'cb<nu<clos-brack<%04d\n' % self.__bracket_count
self.__bracket_count -= 1
return line
def color_func(self, pre, token, num):
third_field = 'nu'
if num[-1] == ';':
num = num[:-1]
third_field = 'en'
num = str('%X' % int(num))
if len(num) != 2:
num = "0" + num
return 'cw<%s<%s<%s<%s\n' % (pre, token, third_field, num)
##return 'cw<cl<%s<nu<nu<%s>%s<%s\n' % (third_field, token, num, token)
def bool_st_func(self, pre, token, num):
if num is None or num == '' or num == '1':
return 'cw<%s<%s<nu<true\n' % (pre, token)
##return 'cw<nu<nu<nu<%s>true<%s\n' % (token, token)
elif num == '0':
return 'cw<%s<%s<nu<false\n' % (pre, token)
##return 'cw<nu<nu<nu<%s>false<%s\n' % (token, token)
else:
msg = "boolean should have some value module process tokens\ntoken is %s\n'%s'\n" % (token, num)
raise self.__bug_handler, msg
def __no_sup_sub_func(self, pre, token, num):
the_string = 'cw<ci<subscript_<nu<false\n'
the_string += 'cw<ci<superscrip<nu<false\n'
return the_string
def divide_num(self, numerator, denominator):
try:
#calibre why ignore negative number? Wrong in case of \fi
numerator = float(re.search('[0-9.\-]+', numerator).group())
except TypeError, msg:
if self.__run_level > 3:
msg = ('No number to process?\nthis indicates that the token \(\\li\) \
should have a number and does not\nnumerator is \
"%s"\ndenominator is "%s"\n') % (numerator, denominator)
raise self.__bug_handler, msg
if 5 > self.__return_code:
self.__return_code = 5
return 0
num = '%0.2f' % round(numerator/denominator, 2)
return num
string_num = str(num)
if string_num[-2:] == ".0":
string_num = string_num[:-2]
return string_num
def split_let_num(self, token):
match_obj = re.search(self.__num_exp,token)
if match_obj is not None:
first = match_obj.group(1)
second = match_obj.group(2)
if not second:
if self.__run_level > 3:
msg = "token is '%s' \n" % token
raise self.__bug_handler, msg
return first, 0
else:
if self.__run_level > 3:
msg = "token is '%s' \n" % token
raise self.__bug_handler
return token, 0
return first, second
def convert_to_hex(self,number):
"""Convert a string to uppercase hexidecimal"""
num = int(number)
try:
hex_num = "%X" % num
return hex_num
except:
raise self.__bug_handler
def process_cw(self, token):
"""Change the value of the control word by determining what dictionary
it belongs to"""
special = [ '*', ':', '}', '{', '~', '_', '-', ';' ]
##if token != "{" or token != "}":
token = token[1:] # strip off leading \
token = token.replace(" ", "")
##if not token: return
only_alpha = token.isalpha()
num = None
if not only_alpha and token not in special:
token, num = self.split_let_num(token)
pre, token, action = self.dict_token.get(token, (None, None, None))
if action:
return action(pre, token, num)
def __check_brackets(self, in_file):
self.__check_brack_obj = check_brackets.CheckBrackets\
(file = in_file)
good_br = self.__check_brack_obj.check_brackets()[0]
if not good_br:
return 1
def process_tokens(self):
"""Main method for handling other methods. """
line_count = 0
with open(self.__file, 'r') as read_obj:
with open(self.__write_to, 'wb') as write_obj:
for line in read_obj:
token = line.replace("\n","")
line_count += 1
if line_count == 1 and token != '\\{':
msg = '\nInvalid RTF: document doesn\'t start with {\n'
raise self.__exception_handler, msg
elif line_count == 2 and token[0:4] != '\\rtf':
msg = '\nInvalid RTF: document doesn\'t start with \\rtf \n'
raise self.__exception_handler, msg
the_index = token.find('\\ ')
if token is not None and the_index > -1:
msg = '\nInvalid RTF: token "\\ " not valid.\nError at line %d'\
% line_count
raise self.__exception_handler, msg
elif token[:1] == "\\":
try:
token.decode('us-ascii')
except UnicodeError, msg:
msg = '\nInvalid RTF: Tokens not ascii encoded.\n%s\nError at line %d'\
% (str(msg), line_count)
raise self.__exception_handler, msg
line = self.process_cw(token)
if line is not None:
write_obj.write(line)
else:
fields = re.split(self.__utf_exp, token)
for field in fields:
if not field:
continue
if field[0:1] == '&':
write_obj.write('tx<ut<__________<%s\n' % field)
else:
write_obj.write('tx<nu<__________<%s\n' % field)
if not line_count:
msg = '\nInvalid RTF: file appears to be empty.\n'
raise self.__exception_handler, msg
copy_obj = copy.Copy(bug_handler = self.__bug_handler)
if self.__copy:
copy_obj.copy_file(self.__write_to, "processed_tokens.data")
copy_obj.rename(self.__write_to, self.__file)
os.remove(self.__write_to)
bad_brackets = self.__check_brackets(self.__file)
if bad_brackets:
msg = '\nInvalid RTF: document does not have matching brackets.\n'
raise self.__exception_handler, msg
else:
return self.__return_code
| gpl-3.0 |
spatialaudio/sweep | lin_sweep_kaiser_window_script3/merge_scripts.py | 2 | 1658 | #!/usr/bin/env python3
""" Script to merge scripts"""
import numpy as np
import matplotlib.pyplot as plt
script3 = np.genfromtxt('lin_sweep_kaiser_window_script3.txt')
script3_1 = np.genfromtxt('lin_sweep_kaiser_window_script3_1.txt')
fade_in_list = script3[:, 0]
# Script3
pnr_list = script3[:, 1]
spectrum_distance_list = script3[:, 2]
# Script3_1 (unwindowed deconvolution)
pnr_unwindowed_deconvolution_list = script3_1[:, 1]
spectrum_distance_unwindowed_deconvolution_list = script3_1[:, 2]
plt.plot(fade_in_list, pnr_list, label='Deconvolution: Excitation windowed')
plt.plot(
fade_in_list,
pnr_unwindowed_deconvolution_list,
label='Deconvolution: Excitation unwindowed')
plt.grid()
plt.title('Peak to noise ratio depending on Fade in')
plt.xlabel('Fade in / ms')
plt.ylabel('Peak to noise ratio / dB')
plt.ticklabel_format(useOffset=False)
plt.legend(loc='center right')
plt.xlim([-10, 1000])
plt.savefig('pnr.png')
plt.close()
NFFT_bandstop = 88201
max_measurement = 7.09207671865
plt.plot(fade_in_list, -10 * np.log10(1 / NFFT_bandstop *
np.asarray(spectrum_distance_list) / max_measurement), label='Deconvolution: Excitation windowed')
plt.plot(fade_in_list,
-10 * np.log10(1 / NFFT_bandstop * np.asarray(spectrum_distance_unwindowed_deconvolution_list) /
max_measurement), label='Deconvolution: Excitation unwindowed')
plt.grid()
plt.title('Spectrum Distance depending on Fade in')
plt.xlabel('Fade in / ms')
plt.ylabel('(Spectrum Distance / max(Spectrum Distance)) / dB')
plt.ticklabel_format(useOffset=False)
plt.legend(loc='lower left')
plt.savefig('spectral_distance.png')
| mit |
ravindrapanda/tensorflow | tensorflow/contrib/eager/python/metrics_impl.py | 5 | 12951 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Metrics classes for computing the output of an evaluation."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
from tensorflow.contrib.summary import summary_ops
from tensorflow.python.eager import context
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
_to_replace = re.compile("[^A-Za-z0-9.]")
class Metric(object):
"""A metric holds state for aggregating statistics over an evaluation run.
Example use with eager execution:
```python
m = SomeMetric(...)
for input in ...:
m(input)
print(m.result())
```
Example use with graph execution:
```python
m = SomeMetric(...)
inputs = ... # Some tensors to compute the metric on.
m_update = m(inputs)
# Variables defined in first call, so get the initialization op afterwards.
m_init = m.init_variables() # or tf.global_variables_initializer()
m_result = m.result()
with tf.Session() as sess:
sess.run(m_init)
for input in ...:
sess.run(m_update)
print(sess.run(m_result))
```
Example use with graph execution with placeholders and feed_dict:
```python
m = SomeMetric(...)
m_placeholder = tf.placeholder(...)
m_update = m(m_placeholder)
# Variables defined in first call, so get the initialization op afterwards.
m_init = m.init_variables() # or tf.global_variables_initializer()
m_result = m.result()
with tf.Session() as sess:
sess.run(m_init)
for input in ...:
sess.run(m_update, feed_dict={m_placeholder: input})
print(sess.run(m_result))
```
Descendants will implement:
* `build()`: All variables should be created in this method, by calling
`self.add_variable()` as in: `self.var = self.add_variable(...)`
build() will be called in the first invocation of `__call__()`, with
the same arguments passed `call()`.
* `call()`: Has all updates to variables, as in:
self.var.assign_add(...)
* `result()`: Computes and returns a final value for the metric
from the variables in `self`.
Descendants may override `aggregate()`, but usually won't need to. It
adds in the state from a list of metrics of the same type as `self`.
(Default is to sum all the variables.) Note that users should not call
`aggregate()`, it is for use by TensorFlow infrastructure.
"""
def __init__(self, name=None):
self._built = False
self._vars = []
self._initial_values = {}
self._updates = []
name = name or self.__class__.__name__
# Replace things like spaces in name to create a valid scope name.
scope_name = _to_replace.sub("_", name)
# We create the variable scope now to get the unique name that will
# be used as a variable prefix when build() calls add_variable().
with variable_scope.variable_scope(
scope_name, use_resource=True, reuse=False) as scope:
pos = scope.name.rfind(scope_name)
self._name = name + scope.name[pos + len(scope_name):]
self._scope = scope
if context.in_graph_mode():
# We make self.call() into a graph callable here, so that we can
# return a single op that performs all of the variable updates.
self._construction_scope = ops.get_default_graph().as_default
self.call = function.defun(self.call)
else:
self._construction_scope = context.eager_mode
# ---- API for users ----
def __call__(self, *args, **kwargs):
"""Returns op to execute to update this metric for these inputs.
Returns None if eager execution is enabled.
Returns a graph-mode function if graph execution is enabled.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric, passed on to `call()`.
"""
if not self._built:
with variable_scope.variable_scope(
self._scope), self._construction_scope():
self.build(*args, **kwargs)
self._built = True
return self.call(*args, **kwargs)
@property
def name(self):
return self._name
@property
def variables(self):
return self._vars
def init_variables(self):
"""Initializes this Metric's variables.
Should be called after variables are created in the first execution
of `__call__()`. If using graph execution, the return value should be
`run()` in a session before running the op returned by `__call__()`.
(See example above.)
Returns:
If using graph execution, this returns an op to perform the
initialization. Under eager execution, the variables are reset to their
initial values as a side effect and this function returns None.
"""
if context.in_graph_mode():
return control_flow_ops.group([v.initializer for v in self._vars])
for v in self._vars:
v.assign(self._initial_values[v])
# ---- To be implemented by descendants ---
def build(self, *args, **kwargs):
"""Method to create variables.
Called by `__call__()` before `call()` for the first time.
Args:
*args:
**kwargs: The arguments to the first invocation of `__call__()`.
`build()` may use the shape and/or dtype of these arguments
when deciding how to create variables.
"""
raise NotImplementedError("Metrics must define a build() member function")
def call(self, *args, **kwargs):
"""Accumulates statistics for the metric. Users should use __call__ instead.
Note: This function is executed as a graph function in graph mode.
This means:
a) Operations on the same resource are executed in textual order.
This should make it easier to do things like add the updated
value of a variable to another, for example.
b) You don't need to worry about collecting the update ops to execute.
All update ops added to the graph by this function will be executed.
As a result, code should generally work the same way with graph or
eager execution.
Args:
*args:
**kwargs: A mini-batch of inputs to the Metric, as passed to
`__call__()`.
"""
raise NotImplementedError("Metrics must define a call() member function")
def result(self): # TODO(josh11b): Add an optional summary_writer parameter.
"""Computes and returns a final value for the metric."""
raise NotImplementedError("Metrics must define a result() member function")
def value(self):
"""In graph mode returns the result Tensor while in eager the callable."""
if context.in_graph_mode():
return self.result()
else:
return self.result
# We can support two different strategies of for doing data-parallel
# distributed metric computations:
# * Put metric variables on the first device and rely on small
# bandwidth needed to do updates. (Doesn't require any particular
# code in Metric implementations.)
# * Ask each type of metric to define an aggregation method to run
# at the end of eval to merge across devices. Note: this is good
# for the use case where they want to record the metric's state
# for each example and then later decide which examples they want
# to aggregate over. (Recommended -- not too much harder and adds
# flexibility over previous option.)
# I'm going with the second strategy since we can define a default
# implementation of aggregate() that will work for most descendants.
def aggregate(self, metrics):
"""Adds in the state from a list of metrics.
Default implementation sums all the metric variables.
Args:
metrics: A list of metrics with the same type as `self`.
Raises:
ValueError: If metrics contains invalid data.
"""
for m in metrics:
if type(self) != type(m): # pylint: disable=unidiomatic-typecheck
raise TypeError("All metrics must be the same type, '%s' != '%s'." %
(type(self), type(m)))
# pylint: disable=protected-access
for i in range(len(self._vars)):
if any(m._vars[i].name != self._vars[i].name for m in metrics):
raise ValueError("All metrics must have variables in the same order.")
self._vars[i].assign_add(math_ops.add_n([m._vars[i] for m in metrics]))
# pylint: enable=protected-access
# ---- For use by descendants ---
def add_variable(self, name, shape=None, dtype=None, initializer=None):
"""***Only for use by descendants of Metric***."""
if self._built:
raise RuntimeError("Can't call add_variable() except in build().")
collections = None if context.in_eager_mode() else [
ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES
]
v = variable_scope.get_variable(
name,
shape,
dtype,
initializer,
trainable=False,
collections=collections,
use_resource=True)
self._vars.append(v)
if context.in_eager_mode():
self._initial_values[v] = v.value()
return v
class Mean(Metric):
"""Computes the (weighted) mean of the given values."""
# TODO(josh11b): Maybe have a dtype argument that defaults to tf.float64?
# Or defaults to type of the input if it is tf.float32, else tf.float64?
def __init__(self, name=None, dtype=dtypes.float64):
super(Mean, self).__init__(name=name)
self.dtype = dtype
def build(self, *args, **kwargs):
# build() does not use call's arguments, by using *args, **kwargs
# we make it easier to inherit from Mean().
del args, kwargs
self.numer = self.add_variable(name="numer", shape=(),
dtype=self.dtype,
initializer=init_ops.zeros_initializer)
self.denom = self.add_variable(name="denom", shape=(),
dtype=self.dtype,
initializer=init_ops.zeros_initializer)
def call(self, values, weights=None):
"""Accumulate statistics for computing the mean.
For example, if values is [1, 3, 5, 7] then the mean is 4.
If the weights were specified as [1, 1, 0, 0] then the mean would be 2.
Args:
values: Tensor with the per-example value.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
if weights is None:
self.denom.assign_add(
math_ops.cast(array_ops.identity(array_ops.size(values)), self.dtype))
values = math_ops.reduce_sum(values)
self.numer.assign_add(math_ops.cast(values, self.dtype))
else:
weights = math_ops.cast(weights, self.dtype)
self.denom.assign_add(math_ops.reduce_sum(weights))
values = math_ops.cast(values, self.dtype) * weights
self.numer.assign_add(math_ops.reduce_sum(values))
if weights is None:
return values
return values, weights
def result(self):
t = self.numer / self.denom
summary_ops.scalar(name=self.name, tensor=t)
return t
class Accuracy(Mean):
"""Calculates how often `predictions` matches `labels`."""
def __init__(self, name=None, dtype=dtypes.float64):
super(Accuracy, self).__init__(name=name, dtype=dtype)
def call(self, labels, predictions, weights=None):
"""Accumulate accuracy statistics.
For example, if labels is [1, 2, 3, 4] and predictions is [0, 2, 3, 4]
then the accuracy is 3/4 or .75. If the weights were specified as
[1, 1, 0, 0] then the accuracy would be 1/2 or .5.
`labels` and `predictions` should have the same shape and type.
Args:
labels: Tensor with the true labels for each example. One example
per element of the Tensor.
predictions: Tensor with the predicted label for each example.
weights: Optional weighting of each example. Defaults to 1.
Returns:
The arguments, for easy chaining.
"""
matches = math_ops.equal(labels, predictions)
matches = math_ops.cast(matches, dtypes.float64)
super(Accuracy, self).call(matches, weights=weights)
if weights is None:
return labels, predictions
return labels, predictions, weights
| apache-2.0 |
binhqnguyen/lena | .waf-1.7.13-5a064c2686fe54de4e11018d22148cfc/waflib/Scripting.py | 85 | 10612 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shlex,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb=True
break
cur=current_directory
while cur:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in[env.run_dir,env.top_dir,env.out_dir]:
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except OSError:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except SystemExit:
raise
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in[update,dist,distclean,distcheck,update]:
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.log_timer=Utils.Timer()
ctx.options=Options.options
ctx.cmd=cmd_name
ctx.execute()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
ctx=run_command(cmd_name)
Logs.info('%r finished successfully (%s)'%(cmd_name,str(ctx.log_timer)))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=root+os.sep+f
try:
os.remove(fname)
except OSError:
Logs.warn('Could not remove %r'%fname)
for x in[Context.DBFILE,'config.log']:
try:
os.remove(x)
except OSError:
pass
try:
shutil.rmtree('c4che')
except OSError:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except IOError:
Logs.warn('Could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('project %r cannot be removed'%proj[Context.OUT])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
try:
os.remove(os.path.join(k,Options.lockfile))
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('file %r cannot be removed'%f)
if f.startswith('.waf')and not Options.commands:
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
'''creates an archive containing the project source code'''
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except Exception:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except Exception:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return self.base_name
def get_excl(self):
try:
return self.excl
except AttributeError:
self.excl=Node.exclude_regs+' **/waf-1.7.* **/.waf-1.7* **/waf3-1.7.* **/.waf3-1.7* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
nd=self.root.find_node(Context.out_dir)
if nd:
self.excl+=' '+nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files=self.files
except AttributeError:
files=self.base_path.ant_glob('**/*',excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
fun='distcheck'
cmd='distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
import tempfile,tarfile
t=None
try:
t=tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
cfg=[]
if Options.options.distcheck_args:
cfg=shlex.split(Options.options.distcheck_args)
else:
cfg=[x for x in sys.argv if x.startswith('-')]
instdir=tempfile.mkdtemp('.inst',self.get_base_name())
ret=Utils.subprocess.Popen([sys.executable,sys.argv[0],'configure','install','uninstall','--destdir='+instdir]+cfg,cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
'''updates the plugins from the *waflib/extras* directory'''
lst=Options.options.files.split(',')
if not lst:
lst=[x for x in Utils.listdir(Context.waf_dir+'/waflib/extras')if x.endswith('.py')]
for x in lst:
tool=x.replace('.py','')
try:
Configure.download_tool(tool,force=True,ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %s in the remote repository'%x)
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env=ConfigSet.ConfigSet()
do_config=False
try:
env.load(os.path.join(Context.top_dir,Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config=True
else:
if env.run_dir!=Context.run_dir:
do_config=True
else:
h=0
for f in env['files']:
h=hash((h,Utils.readf(f,'rb')))
do_config=h!=env.hash
if do_config:
Options.commands.insert(0,self.cmd)
Options.commands.insert(0,'configure')
return
return execute_method(self)
return execute
Build.BuildContext.execute=autoconfigure(Build.BuildContext.execute)
| gpl-2.0 |
ioram7/keystone-federado-pgid2013 | build/sqlalchemy-migrate/migrate/tests/changeset/test_constraint.py | 30 | 10924 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from sqlalchemy import *
from sqlalchemy.util import *
from sqlalchemy.exc import *
from migrate.exceptions import *
from migrate.changeset import *
from migrate.tests import fixture
class CommonTestConstraint(fixture.DB):
"""helper functions to test constraints.
we just create a fresh new table and make sure everything is
as required.
"""
def _setup(self, url):
super(CommonTestConstraint, self)._setup(url)
self._create_table()
def _teardown(self):
if hasattr(self, 'table') and self.engine.has_table(self.table.name):
self.table.drop()
super(CommonTestConstraint, self)._teardown()
def _create_table(self):
self._connect(self.url)
self.meta = MetaData(self.engine)
self.tablename = 'mytable'
self.table = Table(self.tablename, self.meta,
Column(u'id', Integer, nullable=False),
Column(u'fkey', Integer, nullable=False),
mysql_engine='InnoDB')
if self.engine.has_table(self.table.name):
self.table.drop()
self.table.create()
# make sure we start at zero
self.assertEquals(len(self.table.primary_key), 0)
self.assert_(isinstance(self.table.primary_key,
schema.PrimaryKeyConstraint), self.table.primary_key.__class__)
class TestConstraint(CommonTestConstraint):
level = fixture.DB.CONNECT
def _define_pk(self, *cols):
# Add a pk by creating a PK constraint
if (self.engine.name in ('oracle', 'firebird')):
# Can't drop Oracle PKs without an explicit name
pk = PrimaryKeyConstraint(table=self.table, name='temp_pk_key', *cols)
else:
pk = PrimaryKeyConstraint(table=self.table, *cols)
self.compare_columns_equal(pk.columns, cols)
pk.create()
self.refresh_table()
if not self.url.startswith('sqlite'):
self.compare_columns_equal(self.table.primary_key, cols, ['type', 'autoincrement'])
# Drop the PK constraint
#if (self.engine.name in ('oracle', 'firebird')):
# # Apparently Oracle PK names aren't introspected
# pk.name = self.table.primary_key.name
pk.drop()
self.refresh_table()
self.assertEquals(len(self.table.primary_key), 0)
self.assert_(isinstance(self.table.primary_key, schema.PrimaryKeyConstraint))
return pk
@fixture.usedb(not_supported='sqlite')
def test_define_fk(self):
"""FK constraints can be defined, created, and dropped"""
# FK target must be unique
pk = PrimaryKeyConstraint(self.table.c.id, table=self.table, name="pkid")
pk.create()
# Add a FK by creating a FK constraint
if SQLA_07:
self.assertEquals(list(self.table.c.fkey.foreign_keys), [])
else:
self.assertEquals(self.table.c.fkey.foreign_keys._list, [])
fk = ForeignKeyConstraint([self.table.c.fkey],
[self.table.c.id],
name="fk_id_fkey",
ondelete="CASCADE")
if SQLA_07:
self.assert_(list(self.table.c.fkey.foreign_keys) is not [])
else:
self.assert_(self.table.c.fkey.foreign_keys._list is not [])
for key in fk.columns:
self.assertEquals(key, self.table.c.fkey.name)
self.assertEquals([e.column for e in fk.elements], [self.table.c.id])
self.assertEquals(list(fk.referenced), [self.table.c.id])
if self.url.startswith('mysql'):
# MySQL FKs need an index
index = Index('index_name', self.table.c.fkey)
index.create()
fk.create()
# test for ondelete/onupdate
if SQLA_07:
fkey = list(self.table.c.fkey.foreign_keys)[0]
else:
fkey = self.table.c.fkey.foreign_keys._list[0]
self.assertEquals(fkey.ondelete, "CASCADE")
# TODO: test on real db if it was set
self.refresh_table()
if SQLA_07:
self.assert_(list(self.table.c.fkey.foreign_keys) is not [])
else:
self.assert_(self.table.c.fkey.foreign_keys._list is not [])
fk.drop()
self.refresh_table()
if SQLA_07:
self.assertEquals(list(self.table.c.fkey.foreign_keys), [])
else:
self.assertEquals(self.table.c.fkey.foreign_keys._list, [])
@fixture.usedb()
def test_define_pk(self):
"""PK constraints can be defined, created, and dropped"""
self._define_pk(self.table.c.fkey)
@fixture.usedb()
def test_define_pk_multi(self):
"""Multicolumn PK constraints can be defined, created, and dropped"""
self._define_pk(self.table.c.id, self.table.c.fkey)
@fixture.usedb(not_supported=['firebird'])
def test_drop_cascade(self):
"""Drop constraint cascaded"""
pk = PrimaryKeyConstraint('fkey', table=self.table, name="id_pkey")
pk.create()
self.refresh_table()
# Drop the PK constraint forcing cascade
pk.drop(cascade=True)
# TODO: add real assertion if it was added
@fixture.usedb(supported=['mysql'])
def test_fail_mysql_check_constraints(self):
"""Check constraints raise NotSupported for mysql on drop"""
cons = CheckConstraint('id > 3', name="id_check", table=self.table)
cons.create()
self.refresh_table()
try:
cons.drop()
except NotSupportedError:
pass
else:
self.fail()
@fixture.usedb(not_supported=['sqlite', 'mysql'])
def test_named_check_constraints(self):
"""Check constraints can be defined, created, and dropped"""
self.assertRaises(InvalidConstraintError, CheckConstraint, 'id > 3')
cons = CheckConstraint('id > 3', name="id_check", table=self.table)
cons.create()
self.refresh_table()
self.table.insert(values={'id': 4, 'fkey': 1}).execute()
try:
self.table.insert(values={'id': 1, 'fkey': 1}).execute()
except (IntegrityError, ProgrammingError):
pass
else:
self.fail()
# Remove the name, drop the constraint; it should succeed
cons.drop()
self.refresh_table()
self.table.insert(values={'id': 2, 'fkey': 2}).execute()
self.table.insert(values={'id': 1, 'fkey': 2}).execute()
class TestAutoname(CommonTestConstraint):
"""Every method tests for a type of constraint wether it can autoname
itself and if you can pass object instance and names to classes.
"""
level = fixture.DB.CONNECT
@fixture.usedb(not_supported=['oracle', 'firebird'])
def test_autoname_pk(self):
"""PrimaryKeyConstraints can guess their name if None is given"""
# Don't supply a name; it should create one
cons = PrimaryKeyConstraint(self.table.c.id)
cons.create()
self.refresh_table()
if not self.url.startswith('sqlite'):
# TODO: test for index for sqlite
self.compare_columns_equal(cons.columns, self.table.primary_key, ['autoincrement', 'type'])
# Remove the name, drop the constraint; it should succeed
cons.name = None
cons.drop()
self.refresh_table()
self.assertEquals(list(), list(self.table.primary_key))
# test string names
cons = PrimaryKeyConstraint('id', table=self.table)
cons.create()
self.refresh_table()
if not self.url.startswith('sqlite'):
# TODO: test for index for sqlite
self.compare_columns_equal(cons.columns, self.table.primary_key)
cons.name = None
cons.drop()
@fixture.usedb(not_supported=['oracle', 'sqlite', 'firebird'])
def test_autoname_fk(self):
"""ForeignKeyConstraints can guess their name if None is given"""
cons = PrimaryKeyConstraint(self.table.c.id)
cons.create()
cons = ForeignKeyConstraint([self.table.c.fkey], [self.table.c.id])
cons.create()
self.refresh_table()
if SQLA_07:
list(self.table.c.fkey.foreign_keys)[0].column is self.table.c.id
else:
self.table.c.fkey.foreign_keys[0].column is self.table.c.id
# Remove the name, drop the constraint; it should succeed
cons.name = None
cons.drop()
self.refresh_table()
if SQLA_07:
self.assertEquals(list(self.table.c.fkey.foreign_keys), list())
else:
self.assertEquals(self.table.c.fkey.foreign_keys._list, list())
# test string names
cons = ForeignKeyConstraint(['fkey'], ['%s.id' % self.tablename], table=self.table)
cons.create()
self.refresh_table()
if SQLA_07:
list(self.table.c.fkey.foreign_keys)[0].column is self.table.c.id
else:
self.table.c.fkey.foreign_keys[0].column is self.table.c.id
# Remove the name, drop the constraint; it should succeed
cons.name = None
cons.drop()
@fixture.usedb(not_supported=['oracle', 'sqlite', 'mysql'])
def test_autoname_check(self):
"""CheckConstraints can guess their name if None is given"""
cons = CheckConstraint('id > 3', columns=[self.table.c.id])
cons.create()
self.refresh_table()
if not self.engine.name == 'mysql':
self.table.insert(values={'id': 4, 'fkey': 1}).execute()
try:
self.table.insert(values={'id': 1, 'fkey': 2}).execute()
except (IntegrityError, ProgrammingError):
pass
else:
self.fail()
# Remove the name, drop the constraint; it should succeed
cons.name = None
cons.drop()
self.refresh_table()
self.table.insert(values={'id': 2, 'fkey': 2}).execute()
self.table.insert(values={'id': 1, 'fkey': 3}).execute()
@fixture.usedb(not_supported=['oracle', 'sqlite'])
def test_autoname_unique(self):
"""UniqueConstraints can guess their name if None is given"""
cons = UniqueConstraint(self.table.c.fkey)
cons.create()
self.refresh_table()
self.table.insert(values={'fkey': 4, 'id': 1}).execute()
try:
self.table.insert(values={'fkey': 4, 'id': 2}).execute()
except (sqlalchemy.exc.IntegrityError,
sqlalchemy.exc.ProgrammingError):
pass
else:
self.fail()
# Remove the name, drop the constraint; it should succeed
cons.name = None
cons.drop()
self.refresh_table()
self.table.insert(values={'fkey': 4, 'id': 2}).execute()
self.table.insert(values={'fkey': 4, 'id': 1}).execute()
| apache-2.0 |
hejuna/bite-project | deps/gdata-python-client/tests/all_tests_cached.py | 41 | 1091 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
__author__ = 'j.s@google.com (Jeff Scudder)'
import unittest
import all_tests
import gdata.test_config as conf
conf.options.set_value('runlive', 'true')
conf.options.set_value('savecache', 'true')
conf.options.set_value('clearcache', 'false')
def suite():
return unittest.TestSuite((atom_tests.core_test.suite(),))
if __name__ == '__main__':
unittest.TextTestRunner().run(all_tests.suite())
| apache-2.0 |
windyuuy/opera | chromium/src/third_party/WebKit/Tools/Scripts/webkitpy/tool/multicommandtool.py | 4 | 13191 | # Copyright (c) 2009 Google Inc. All rights reserved.
# Copyright (c) 2009 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# MultiCommandTool provides a framework for writing svn-like/git-like tools
# which are called with the following format:
# tool-name [global options] command-name [command options]
import logging
import sys
from optparse import OptionParser, IndentedHelpFormatter, SUPPRESS_USAGE, make_option
from webkitpy.tool.grammar import pluralize
_log = logging.getLogger(__name__)
class TryAgain(Exception):
pass
class Command(object):
name = None
show_in_main_help = False
def __init__(self, help_text, argument_names=None, options=None, long_help=None, requires_local_commits=False):
self.help_text = help_text
self.long_help = long_help
self.argument_names = argument_names
self.required_arguments = self._parse_required_arguments(argument_names)
self.options = options
self.requires_local_commits = requires_local_commits
self._tool = None
# option_parser can be overriden by the tool using set_option_parser
# This default parser will be used for standalone_help printing.
self.option_parser = HelpPrintingOptionParser(usage=SUPPRESS_USAGE, add_help_option=False, option_list=self.options)
def _exit(self, code):
sys.exit(code)
# This design is slightly awkward, but we need the
# the tool to be able to create and modify the option_parser
# before it knows what Command to run.
def set_option_parser(self, option_parser):
self.option_parser = option_parser
self._add_options_to_parser()
def _add_options_to_parser(self):
options = self.options or []
for option in options:
self.option_parser.add_option(option)
# The tool calls bind_to_tool on each Command after adding it to its list.
def bind_to_tool(self, tool):
# Command instances can only be bound to one tool at a time.
if self._tool and tool != self._tool:
raise Exception("Command already bound to tool!")
self._tool = tool
@staticmethod
def _parse_required_arguments(argument_names):
required_args = []
if not argument_names:
return required_args
split_args = argument_names.split(" ")
for argument in split_args:
if argument[0] == '[':
# For now our parser is rather dumb. Do some minimal validation that
# we haven't confused it.
if argument[-1] != ']':
raise Exception("Failure to parse argument string %s. Argument %s is missing ending ]" % (argument_names, argument))
else:
required_args.append(argument)
return required_args
def name_with_arguments(self):
usage_string = self.name
if self.options:
usage_string += " [options]"
if self.argument_names:
usage_string += " " + self.argument_names
return usage_string
def parse_args(self, args):
return self.option_parser.parse_args(args)
def check_arguments_and_execute(self, options, args, tool=None):
if len(args) < len(self.required_arguments):
_log.error("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % (
pluralize("argument", len(self.required_arguments)),
pluralize("argument", len(args)),
"'%s'" % " ".join(args),
" ".join(self.required_arguments),
tool.name(),
self.name))
return 1
return self.execute(options, args, tool) or 0
def standalone_help(self):
help_text = self.name_with_arguments().ljust(len(self.name_with_arguments()) + 3) + self.help_text + "\n\n"
if self.long_help:
help_text += "%s\n\n" % self.long_help
help_text += self.option_parser.format_option_help(IndentedHelpFormatter())
return help_text
def execute(self, options, args, tool):
raise NotImplementedError, "subclasses must implement"
# main() exists so that Commands can be turned into stand-alone scripts.
# Other parts of the code will likely require modification to work stand-alone.
def main(self, args=sys.argv):
(options, args) = self.parse_args(args)
# Some commands might require a dummy tool
return self.check_arguments_and_execute(options, args)
# FIXME: This should just be rolled into Command. help_text and argument_names do not need to be instance variables.
class AbstractDeclarativeCommand(Command):
help_text = None
argument_names = None
long_help = None
def __init__(self, options=None, **kwargs):
Command.__init__(self, self.help_text, self.argument_names, options=options, long_help=self.long_help, **kwargs)
class HelpPrintingOptionParser(OptionParser):
def __init__(self, epilog_method=None, *args, **kwargs):
self.epilog_method = epilog_method
OptionParser.__init__(self, *args, **kwargs)
def error(self, msg):
self.print_usage(sys.stderr)
error_message = "%s: error: %s\n" % (self.get_prog_name(), msg)
# This method is overriden to add this one line to the output:
error_message += "\nType \"%s --help\" to see usage.\n" % self.get_prog_name()
self.exit(1, error_message)
# We override format_epilog to avoid the default formatting which would paragraph-wrap the epilog
# and also to allow us to compute the epilog lazily instead of in the constructor (allowing it to be context sensitive).
def format_epilog(self, epilog):
if self.epilog_method:
return "\n%s\n" % self.epilog_method()
return ""
class HelpCommand(AbstractDeclarativeCommand):
name = "help"
help_text = "Display information about this program or its subcommands"
argument_names = "[COMMAND]"
def __init__(self):
options = [
make_option("-a", "--all-commands", action="store_true", dest="show_all_commands", help="Print all available commands"),
]
AbstractDeclarativeCommand.__init__(self, options)
self.show_all_commands = False # A hack used to pass --all-commands to _help_epilog even though it's called by the OptionParser.
def _help_epilog(self):
# Only show commands which are relevant to this checkout's SCM system. Might this be confusing to some users?
if self.show_all_commands:
epilog = "All %prog commands:\n"
relevant_commands = self._tool.commands[:]
else:
epilog = "Common %prog commands:\n"
relevant_commands = filter(self._tool.should_show_in_main_help, self._tool.commands)
longest_name_length = max(map(lambda command: len(command.name), relevant_commands))
relevant_commands.sort(lambda a, b: cmp(a.name, b.name))
command_help_texts = map(lambda command: " %s %s\n" % (command.name.ljust(longest_name_length), command.help_text), relevant_commands)
epilog += "%s\n" % "".join(command_help_texts)
epilog += "See '%prog help --all-commands' to list all commands.\n"
epilog += "See '%prog help COMMAND' for more information on a specific command.\n"
return epilog.replace("%prog", self._tool.name()) # Use of %prog here mimics OptionParser.expand_prog_name().
# FIXME: This is a hack so that we don't show --all-commands as a global option:
def _remove_help_options(self):
for option in self.options:
self.option_parser.remove_option(option.get_opt_string())
def execute(self, options, args, tool):
if args:
command = self._tool.command_by_name(args[0])
if command:
print command.standalone_help()
return 0
self.show_all_commands = options.show_all_commands
self._remove_help_options()
self.option_parser.print_help()
return 0
class MultiCommandTool(object):
global_options = None
def __init__(self, name=None, commands=None):
self._name = name or OptionParser(prog=name).get_prog_name() # OptionParser has nice logic for fetching the name.
# Allow the unit tests to disable command auto-discovery.
self.commands = commands or [cls() for cls in self._find_all_commands() if cls.name]
self.help_command = self.command_by_name(HelpCommand.name)
# Require a help command, even if the manual test list doesn't include one.
if not self.help_command:
self.help_command = HelpCommand()
self.commands.append(self.help_command)
for command in self.commands:
command.bind_to_tool(self)
@classmethod
def _add_all_subclasses(cls, class_to_crawl, seen_classes):
for subclass in class_to_crawl.__subclasses__():
if subclass not in seen_classes:
seen_classes.add(subclass)
cls._add_all_subclasses(subclass, seen_classes)
@classmethod
def _find_all_commands(cls):
commands = set()
cls._add_all_subclasses(Command, commands)
return sorted(commands)
def name(self):
return self._name
def _create_option_parser(self):
usage = "Usage: %prog [options] COMMAND [ARGS]"
return HelpPrintingOptionParser(epilog_method=self.help_command._help_epilog, prog=self.name(), usage=usage)
@staticmethod
def _split_command_name_from_args(args):
# Assume the first argument which doesn't start with "-" is the command name.
command_index = 0
for arg in args:
if arg[0] != "-":
break
command_index += 1
else:
return (None, args[:])
command = args[command_index]
return (command, args[:command_index] + args[command_index + 1:])
def command_by_name(self, command_name):
for command in self.commands:
if command_name == command.name:
return command
return None
def path(self):
raise NotImplementedError, "subclasses must implement"
def command_completed(self):
pass
def should_show_in_main_help(self, command):
return command.show_in_main_help
def should_execute_command(self, command):
return True
def _add_global_options(self, option_parser):
global_options = self.global_options or []
for option in global_options:
option_parser.add_option(option)
def handle_global_options(self, options):
pass
def main(self, argv=sys.argv):
(command_name, args) = self._split_command_name_from_args(argv[1:])
option_parser = self._create_option_parser()
self._add_global_options(option_parser)
command = self.command_by_name(command_name) or self.help_command
if not command:
option_parser.error("%s is not a recognized command" % command_name)
command.set_option_parser(option_parser)
(options, args) = command.parse_args(args)
self.handle_global_options(options)
(should_execute, failure_reason) = self.should_execute_command(command)
if not should_execute:
_log.error(failure_reason)
return 0 # FIXME: Should this really be 0?
while True:
try:
result = command.check_arguments_and_execute(options, args, self)
break
except TryAgain, e:
pass
self.command_completed()
return result
| bsd-3-clause |
jamesraul/google_pyquick | babynames/solution/babynames.py | 212 | 3852 | #!/usr/bin/python
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
import sys
import re
"""Baby Names exercise
Define the extract_names() function below and change main()
to call it.
For writing regex, it's nice to include a copy of the target
text for inspiration.
Here's what the html looks like in the baby.html files:
...
<h3 align="center">Popularity in 1990</h3>
....
<tr align="right"><td>1</td><td>Michael</td><td>Jessica</td>
<tr align="right"><td>2</td><td>Christopher</td><td>Ashley</td>
<tr align="right"><td>3</td><td>Matthew</td><td>Brittany</td>
...
Suggested milestones for incremental development:
-Extract the year and print it
-Extract the names and rank numbers and just print them
-Get the names data into a dict and print it
-Build the [year, 'name rank', ... ] list and print it
-Fix main() to use the extract_names list
"""
def extract_names(filename):
"""
Given a file name for baby.html, returns a list starting with the year string
followed by the name-rank strings in alphabetical order.
['2006', 'Aaliyah 91', Aaron 57', 'Abagail 895', ' ...]
"""
# +++your code here+++
# LAB(begin solution)
# The list [year, name_and_rank, name_and_rank, ...] we'll eventually return.
names = []
# Open and read the file.
f = open(filename, 'rU')
text = f.read()
# Could process the file line-by-line, but regex on the whole text
# at once is even easier.
# Get the year.
year_match = re.search(r'Popularity\sin\s(\d\d\d\d)', text)
if not year_match:
# We didn't find a year, so we'll exit with an error message.
sys.stderr.write('Couldn\'t find the year!\n')
sys.exit(1)
year = year_match.group(1)
names.append(year)
# Extract all the data tuples with a findall()
# each tuple is: (rank, boy-name, girl-name)
tuples = re.findall(r'<td>(\d+)</td><td>(\w+)</td>\<td>(\w+)</td>', text)
#print tuples
# Store data into a dict using each name as a key and that
# name's rank number as the value.
# (if the name is already in there, don't add it, since
# this new rank will be bigger than the previous rank).
names_to_rank = {}
for rank_tuple in tuples:
(rank, boyname, girlname) = rank_tuple # unpack the tuple into 3 vars
if boyname not in names_to_rank:
names_to_rank[boyname] = rank
if girlname not in names_to_rank:
names_to_rank[girlname] = rank
# You can also write:
# for rank, boyname, girlname in tuples:
# ...
# To unpack the tuples inside a for-loop.
# Get the names, sorted in the right order
sorted_names = sorted(names_to_rank.keys())
# Build up result list, one element per line
for name in sorted_names:
names.append(name + " " + names_to_rank[name])
return names
# LAB(replace solution)
# return
# LAB(end solution)
def main():
# This command-line parsing code is provided.
# Make a list of command line arguments, omitting the [0] element
# which is the script itself.
args = sys.argv[1:]
if not args:
print 'usage: [--summaryfile] file [file ...]'
sys.exit(1)
# Notice the summary flag and remove it from args if it is present.
summary = False
if args[0] == '--summaryfile':
summary = True
del args[0]
# +++your code here+++
# For each filename, get the names, then either print the text output
# or write it to a summary file
# LAB(begin solution)
for filename in args:
names = extract_names(filename)
# Make text out of the whole list
text = '\n'.join(names)
if summary:
outf = open(filename + '.summary', 'w')
outf.write(text + '\n')
outf.close()
else:
print text
# LAB(end solution)
if __name__ == '__main__':
main()
| apache-2.0 |
karllessard/tensorflow | tensorflow/lite/python/lite_v2_test_util.py | 4 | 3842 | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from six.moves import zip
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python.eager import def_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
class ModelTest(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Base test class for TensorFlow Lite 2.x model tests."""
def _evaluateTFLiteModel(self, tflite_model, input_data, input_shapes=None):
"""Evaluates the model on the `input_data`.
Args:
tflite_model: TensorFlow Lite model.
input_data: List of EagerTensor const ops containing the input data for
each input tensor.
input_shapes: List of tuples representing the `shape_signature` and the
new shape of each input tensor that has unknown dimensions.
Returns:
[np.ndarray]
"""
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
if input_shapes:
for idx, (shape_signature, final_shape) in enumerate(input_shapes):
self.assertTrue(
(input_details[idx]['shape_signature'] == shape_signature).all())
index = input_details[idx]['index']
interpreter.resize_tensor_input(index, final_shape, strict=True)
interpreter.allocate_tensors()
output_details = interpreter.get_output_details()
input_details = interpreter.get_input_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
interpreter.invoke()
return [
interpreter.get_tensor(details['index']) for details in output_details
]
def _getSimpleVariableModel(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
return root
def _getMultiFunctionModel(self):
class BasicModel(tracking.AutoTrackable):
"""Basic model with multiple functions."""
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
return BasicModel()
def _assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_v2_test.py', file_names)
self.assertNotIn('lite_test.py', file_names)
| apache-2.0 |
TNT-Samuel/Coding-Projects | DNS Server/Source/Lib/site-packages/dask/bytes/pyarrow.py | 4 | 1258 | from __future__ import print_function, division, absolute_import
import posixpath
from .glob import generic_glob
from ..base import tokenize
import pyarrow as pa
class HDFS3Wrapper(pa.filesystem.DaskFileSystem):
"""Wrapper around `hdfs3.HDFileSystem` that allows it to be passed to
pyarrow methods"""
def isdir(self, path):
return self.fs.isdir(path)
def isfile(self, path):
return self.fs.isfile(path)
_MIN_PYARROW_VERSION_SUPPORTED = '0.8.1.dev81'
class PyArrowHadoopFileSystem(object):
sep = "/"
def __init__(self, **kwargs):
self.fs = pa.hdfs.HadoopFileSystem(**kwargs)
@classmethod
def from_pyarrow(cls, fs):
out = object.__new__(cls)
out.fs = fs
return out
def open(self, path, mode='rb', **kwargs):
return self.fs.open(path, mode=mode, **kwargs)
def glob(self, path):
return sorted(generic_glob(self.fs, posixpath, path))
def mkdirs(self, path):
return self.fs.mkdir(path, create_parents=True)
def ukey(self, path):
return tokenize(path, self.fs.info(path)['last_modified'])
def size(self, path):
return self.fs.info(path)['size']
def _get_pyarrow_filesystem(self):
return self.fs
| gpl-3.0 |
shakamunyi/neutron-vrrp | neutron/tests/unit/nec/test_trema_driver.py | 8 | 13473 | # Copyright 2012 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Ryota MIBU
import random
import mock
from six import moves
from neutron.openstack.common import uuidutils
from neutron.plugins.nec.common import ofc_client
from neutron.plugins.nec.db import models as nmodels
from neutron.plugins.nec import drivers
from neutron.tests import base
class TestConfig(object):
"""Configuration for this test."""
host = '127.0.0.1'
port = 8888
class TremaDriverTestBase(base.BaseTestCase):
driver_name = "trema"
def setUp(self):
super(TremaDriverTestBase, self).setUp()
self.driver = drivers.get_driver(self.driver_name)(TestConfig)
self.do_request = mock.patch.object(ofc_client.OFCClient,
'do_request').start()
def get_ofc_item_random_params(self):
"""create random parameters for ofc_item test."""
tenant_id = uuidutils.generate_uuid()
network_id = uuidutils.generate_uuid()
port_id = uuidutils.generate_uuid()
mac = ':'.join(['%x' % random.randint(0, 255)
for i in moves.xrange(6)])
portinfo = nmodels.PortInfo(id=port_id, datapath_id="0x123456789",
port_no=1234, vlan_id=321,
mac=mac)
return tenant_id, network_id, portinfo
class TremaDriverNetworkTestBase(TremaDriverTestBase):
def test_create_tenant(self):
t, n, p = self.get_ofc_item_random_params()
ret = self.driver.create_tenant('dummy_desc', t)
ofc_t_path = "/tenants/%s" % t
self.assertEqual(ofc_t_path, ret)
# There is no API call.
self.assertEqual(0, self.do_request.call_count)
def test_update_tenant(self):
t, n, p = self.get_ofc_item_random_params()
path = "/tenants/%s" % t
self.driver.update_tenant(path, 'dummy_desc')
# There is no API call.
self.assertEqual(0, self.do_request.call_count)
def testc_delete_tenant(self):
t, n, p = self.get_ofc_item_random_params()
path = "/tenants/%s" % t
self.driver.delete_tenant(path)
# There is no API call.
self.assertEqual(0, self.do_request.call_count)
def testa_create_network(self):
t, n, p = self.get_ofc_item_random_params()
description = "desc of %s" % n
body = {'id': n, 'description': description}
ret = self.driver.create_network(t, description, n)
self.do_request.assert_called_once_with("POST", "/networks", body=body)
self.assertEqual(ret, '/networks/%s' % n)
def testc_delete_network(self):
t, n, p = self.get_ofc_item_random_params()
net_path = "/networks/%s" % n
self.driver.delete_network(net_path)
self.do_request.assert_called_once_with("DELETE", net_path)
class TremaPortBaseDriverTest(TremaDriverNetworkTestBase):
driver_name = "trema_port"
def test_filter_supported(self):
self.assertTrue(self.driver.filter_supported())
def testd_create_port(self):
_t, n, p = self.get_ofc_item_random_params()
net_path = "/networks/%s" % n
body = {'id': p.id,
'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
ret = self.driver.create_port(net_path, p, p.id)
self.do_request.assert_called_once_with(
"POST", "/networks/%s/ports" % n, body=body)
self.assertEqual(ret, '/networks/%s/ports/%s' % (n, p.id))
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
p_path = "/networks/%s/ports/%s" % (n, p.id)
self.driver.delete_port(p_path)
self.do_request.assert_called_once_with("DELETE", p_path)
class TremaPortMACBaseDriverTest(TremaDriverNetworkTestBase):
driver_name = "trema_portmac"
def test_filter_supported(self):
self.assertTrue(self.driver.filter_supported())
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
dummy_port = "dummy-%s" % p.id
net_path = "/networks/%s" % n
path_1 = "/networks/%s/ports" % n
body_1 = {'id': dummy_port,
'datapath_id': p.datapath_id,
'port': str(p.port_no),
'vid': str(p.vlan_id)}
path_2 = "/networks/%s/ports/%s/attachments" % (n, dummy_port)
body_2 = {'id': p.id, 'mac': p.mac}
path_3 = "/networks/%s/ports/%s" % (n, dummy_port)
ret = self.driver.create_port(net_path, p, p.id)
self.do_request.assert_has_calls([
mock.call("POST", path_1, body=body_1),
mock.call("POST", path_2, body=body_2),
mock.call("DELETE", path_3)
])
port_path = "/networks/%s/ports/%s/attachments/%s" % (n, dummy_port,
p.id)
self.assertEqual(ret, port_path)
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
dummy_port = "dummy-%s" % p.id
path = "/networks/%s/ports/%s/attachments/%s" % (n, dummy_port, p.id)
self.driver.delete_port(path)
self.do_request.assert_called_once_with("DELETE", path)
class TremaMACBaseDriverTest(TremaDriverNetworkTestBase):
driver_name = "trema_mac"
def test_filter_supported(self):
self.assertFalse(self.driver.filter_supported())
def testd_create_port(self):
t, n, p = self.get_ofc_item_random_params()
net_path = "/networks/%s" % n
path = "/networks/%s/attachments" % n
body = {'id': p.id, 'mac': p.mac}
ret = self.driver.create_port(net_path, p, p.id)
self.do_request.assert_called_once_with("POST", path, body=body)
self.assertEqual(ret, '/networks/%s/attachments/%s' % (n, p.id))
def testd_delete_port(self):
t, n, p = self.get_ofc_item_random_params()
path = "/networks/%s/attachments/%s" % (n, p.id)
self.driver.delete_port(path)
self.do_request.assert_called_once_with("DELETE", path)
class TremaFilterDriverTest(TremaDriverTestBase):
def _test_create_filter(self, filter_dict=None, filter_post=None,
filter_wildcards=None, no_portinfo=False):
t, n, p = self.get_ofc_item_random_params()
src_mac = ':'.join(['%x' % random.randint(0, 255)
for i in moves.xrange(6)])
if filter_wildcards is None:
filter_wildcards = []
f = {'tenant_id': t,
'id': uuidutils.generate_uuid(),
'network_id': n,
'priority': 123,
'action': "ACCEPT",
'in_port': p.id,
'src_mac': src_mac,
'dst_mac': "",
'eth_type': 0,
'src_cidr': "",
'dst_cidr': "",
'src_port': 0,
'dst_port': 0,
'protocol': "TCP",
'admin_state_up': True,
'status': "ACTIVE"}
if filter_dict:
f.update(filter_dict)
net_path = "/networks/%s" % n
all_wildcards_ofp = ['dl_vlan', 'dl_vlan_pcp', 'nw_tos',
'in_port', 'dl_src', 'dl_dst',
'nw_src', 'nw_dst',
'dl_type', 'nw_proto',
'tp_src', 'tp_dst']
all_wildcards_non_ofp = ['in_datapath_id', 'slice']
body = {'id': f['id'],
'action': 'ALLOW',
'priority': 123,
'slice': n,
'in_datapath_id': '0x123456789',
'in_port': 1234,
'nw_proto': '0x6',
'dl_type': '0x800',
'dl_src': src_mac}
if filter_post:
body.update(filter_post)
if no_portinfo:
filter_wildcards += ['in_datapath_id', 'in_port']
p = None
for field in filter_wildcards:
if field in body:
del body[field]
ofp_wildcards = ["%s:32" % _f if _f in ['nw_src', 'nw_dst'] else _f
for _f in all_wildcards_ofp if _f not in body]
body['ofp_wildcards'] = set(ofp_wildcards)
non_ofp_wildcards = [_f for _f in all_wildcards_non_ofp
if _f not in body]
if non_ofp_wildcards:
body['wildcards'] = set(non_ofp_wildcards)
ret = self.driver.create_filter(net_path, f, p, f['id'])
# The content of 'body' is checked below.
self.do_request.assert_called_once_with("POST", "/filters",
body=mock.ANY)
self.assertEqual(ret, '/filters/%s' % f['id'])
# ofp_wildcards and wildcards in body are comma-separated
# string but the order of elements are not considered,
# so we check these fields as set.
actual_body = self.do_request.call_args[1]['body']
if 'ofp_wildcards' in actual_body:
ofp_wildcards = actual_body['ofp_wildcards'].split(',')
actual_body['ofp_wildcards'] = set(ofp_wildcards)
if 'wildcards' in actual_body:
actual_body['wildcards'] = set(actual_body['wildcards'].split(','))
self.assertEqual(body, actual_body)
def test_create_filter_accept(self):
self._test_create_filter(filter_dict={'action': 'ACCEPT'})
def test_create_filter_allow(self):
self._test_create_filter(filter_dict={'action': 'ALLOW'})
def test_create_filter_deny(self):
self._test_create_filter(filter_dict={'action': 'DENY'},
filter_post={'action': 'DENY'})
def test_create_filter_drop(self):
self._test_create_filter(filter_dict={'action': 'DROP'},
filter_post={'action': 'DENY'})
def test_create_filter_no_port(self):
self._test_create_filter(no_portinfo=True)
def test_create_filter_src_mac_wildcard(self):
self._test_create_filter(filter_dict={'src_mac': ''},
filter_wildcards=['dl_src'])
def test_create_filter_dst_mac(self):
dst_mac = ':'.join(['%x' % random.randint(0, 255)
for i in moves.xrange(6)])
self._test_create_filter(filter_dict={'dst_mac': dst_mac},
filter_post={'dl_dst': dst_mac})
def test_create_filter_src_cidr(self):
src_cidr = '10.2.0.0/24'
self._test_create_filter(filter_dict={'src_cidr': src_cidr},
filter_post={'nw_src': src_cidr})
def test_create_filter_dst_cidr(self):
dst_cidr = '192.168.10.0/24'
self._test_create_filter(filter_dict={'dst_cidr': dst_cidr},
filter_post={'nw_dst': dst_cidr})
def test_create_filter_proto_icmp(self):
self._test_create_filter(
filter_dict={'protocol': 'icmp'},
filter_post={'dl_type': '0x800', 'nw_proto': '0x1'})
def test_create_filter_proto_tcp(self):
self._test_create_filter(
filter_dict={'protocol': 'tcp'},
filter_post={'dl_type': '0x800', 'nw_proto': '0x6'})
def test_create_filter_proto_udp(self):
self._test_create_filter(
filter_dict={'protocol': 'udp'},
filter_post={'dl_type': '0x800', 'nw_proto': '0x11'})
def test_create_filter_proto_arp(self):
self._test_create_filter(
filter_dict={'protocol': 'arp'},
filter_post={'dl_type': '0x806'},
filter_wildcards=['nw_proto'])
def test_create_filter_proto_misc(self):
self._test_create_filter(
filter_dict={'protocol': '0x33', 'eth_type': '0x900'},
filter_post={'dl_type': '0x900', 'nw_proto': '0x33'})
def test_create_filter_proto_misc_dl_type_wildcard(self):
self._test_create_filter(
filter_dict={'protocol': '0x33', 'ether_type': ''},
filter_post={'nw_proto': '0x33'},
filter_wildcards=['dl_type'])
def test_create_filter_proto_wildcard(self):
self._test_create_filter(
filter_dict={'protocol': ''},
filter_wildcards=['dl_type', 'nw_proto'])
def test_create_filter_src_dst_port(self):
self._test_create_filter(filter_dict={'src_port': 8192,
'dst_port': 4096},
filter_post={'tp_src': '0x2000',
'tp_dst': '0x1000'})
def testb_delete_filter(self):
t, n, p = self.get_ofc_item_random_params()
f_path = "/filters/%s" % uuidutils.generate_uuid()
self.driver.delete_filter(f_path)
self.do_request.assert_called_once_with("DELETE", f_path)
| apache-2.0 |
SnakeJenny/TensorFlow | tensorflow/python/framework/random_seed_test.py | 36 | 2150 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.python.framework.ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import random_seed
from tensorflow.python.platform import test
class RandomSeedTest(test.TestCase):
def testRandomSeed(self):
test_cases = [
# Each test case is a tuple with input to get_seed:
# (input_graph_seed, input_op_seed)
# and output from get_seed:
# (output_graph_seed, output_op_seed)
((None, None), (None, None)),
((None, 1), (random_seed.DEFAULT_GRAPH_SEED, 1)),
((1, None), (1, 0)), # 0 will be the default_graph._lastid.
((1, 1), (1, 1)),
((0, 0), (0, 2**31 - 1)), # Avoid nondeterministic (0, 0) output
((2**31 - 1, 0), (0, 2**31 - 1)), # Don't wrap to (0, 0) either
((0, 2**31 - 1), (0, 2**31 - 1)), # Wrapping for the other argument
]
for tc in test_cases:
tinput, toutput = tc[0], tc[1]
random_seed.set_random_seed(tinput[0])
g_seed, op_seed = random_seed.get_seed(tinput[1])
msg = 'test_case = {0}, got {1}, want {2}'.format(tinput,
(g_seed, op_seed),
toutput)
self.assertEqual((g_seed, op_seed), toutput, msg=msg)
random_seed.set_random_seed(None)
if __name__ == '__main__':
test.main()
| apache-2.0 |
TrossSoftwareAndTech/webvt | lib/node-v7.2.0/deps/v8/tools/testrunner/local/perfdata.py | 23 | 4657 | # Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shelve
import threading
class PerfDataEntry(object):
def __init__(self):
self.avg = 0.0
self.count = 0
def AddResult(self, result):
kLearnRateLimiter = 99 # Greater value means slower learning.
# We use an approximation of the average of the last 100 results here:
# The existing average is weighted with kLearnRateLimiter (or less
# if there are fewer data points).
effective_count = min(self.count, kLearnRateLimiter)
self.avg = self.avg * effective_count + result
self.count = effective_count + 1
self.avg /= self.count
class PerfDataStore(object):
def __init__(self, datadir, arch, mode):
filename = os.path.join(datadir, "%s.%s.perfdata" % (arch, mode))
self.database = shelve.open(filename, protocol=2)
self.closed = False
self.lock = threading.Lock()
def __del__(self):
self.close()
def close(self):
if self.closed: return
self.database.close()
self.closed = True
def GetKey(self, test):
"""Computes the key used to access data for the given testcase."""
flags = "".join(test.flags)
return str("%s.%s.%s" % (test.suitename(), test.path, flags))
def FetchPerfData(self, test):
"""Returns the observed duration for |test| as read from the store."""
key = self.GetKey(test)
if key in self.database:
return self.database[key].avg
return None
def UpdatePerfData(self, test):
"""Updates the persisted value in the store with test.duration."""
testkey = self.GetKey(test)
self.RawUpdatePerfData(testkey, test.duration)
def RawUpdatePerfData(self, testkey, duration):
with self.lock:
if testkey in self.database:
entry = self.database[testkey]
else:
entry = PerfDataEntry()
entry.AddResult(duration)
self.database[testkey] = entry
class PerfDataManager(object):
def __init__(self, datadir):
self.datadir = os.path.abspath(datadir)
if not os.path.exists(self.datadir):
os.makedirs(self.datadir)
self.stores = {} # Keyed by arch, then mode.
self.closed = False
self.lock = threading.Lock()
def __del__(self):
self.close()
def close(self):
if self.closed: return
for arch in self.stores:
modes = self.stores[arch]
for mode in modes:
store = modes[mode]
store.close()
self.closed = True
def GetStore(self, arch, mode):
with self.lock:
if not arch in self.stores:
self.stores[arch] = {}
modes = self.stores[arch]
if not mode in modes:
modes[mode] = PerfDataStore(self.datadir, arch, mode)
return modes[mode]
class NullPerfDataStore(object):
def UpdatePerfData(self, test):
pass
def FetchPerfData(self, test):
return None
class NullPerfDataManager(object):
def __init__(self):
pass
def GetStore(self, *args, **kwargs):
return NullPerfDataStore()
def close(self):
pass
def GetPerfDataManager(context, datadir):
if context.use_perf_data:
return PerfDataManager(datadir)
else:
return NullPerfDataManager()
| gpl-3.0 |
DEVELByte/incubator-airflow | tests/ti_deps/deps/not_running_dep.py | 20 | 1310 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from datetime import datetime
from airflow.ti_deps.deps.not_running_dep import NotRunningDep
from airflow.utils.state import State
from fake_models import FakeTI
class NotRunningDepTest(unittest.TestCase):
def test_ti_running(self):
"""
Running task instances should fail this dep
"""
ti = FakeTI(state=State.RUNNING, start_date=datetime(2016, 1, 1))
self.assertFalse(NotRunningDep().is_met(ti=ti, dep_context=None))
def test_ti_not_running(self):
"""
Non-running task instances should pass this dep
"""
ti = FakeTI(state=State.NONE, start_date=datetime(2016, 1, 1))
self.assertTrue(NotRunningDep().is_met(ti=ti, dep_context=None))
| apache-2.0 |
ericjpj/ns-3-dev | .waf-1.7.13-5a064c2686fe54de4e11018d22148cfc/waflib/ansiterm.py | 149 | 7136 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import sys,os
try:
if not(sys.stderr.isatty()and sys.stdout.isatty()):
raise ValueError('not a tty')
from ctypes import*
class COORD(Structure):
_fields_=[("X",c_short),("Y",c_short)]
class SMALL_RECT(Structure):
_fields_=[("Left",c_short),("Top",c_short),("Right",c_short),("Bottom",c_short)]
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
_fields_=[("Size",COORD),("CursorPosition",COORD),("Attributes",c_short),("Window",SMALL_RECT),("MaximumWindowSize",COORD)]
class CONSOLE_CURSOR_INFO(Structure):
_fields_=[('dwSize',c_ulong),('bVisible',c_int)]
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
csinfo=CONSOLE_CURSOR_INFO()
hconsole=windll.kernel32.GetStdHandle(-11)
windll.kernel32.GetConsoleScreenBufferInfo(hconsole,byref(sbinfo))
if sbinfo.Size.X<9 or sbinfo.Size.Y<9:raise ValueError('small console')
windll.kernel32.GetConsoleCursorInfo(hconsole,byref(csinfo))
except Exception:
pass
else:
import re,threading
is_vista=getattr(sys,"getwindowsversion",None)and sys.getwindowsversion()[0]>=6
try:
_type=unicode
except NameError:
_type=str
to_int=lambda number,default:number and int(number)or default
wlock=threading.Lock()
STD_OUTPUT_HANDLE=-11
STD_ERROR_HANDLE=-12
class AnsiTerm(object):
def __init__(self):
self.encoding=sys.stdout.encoding
self.hconsole=windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)
self.cursor_history=[]
self.orig_sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
self.orig_csinfo=CONSOLE_CURSOR_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(self.orig_sbinfo))
windll.kernel32.GetConsoleCursorInfo(hconsole,byref(self.orig_csinfo))
def screen_buffer_info(self):
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(sbinfo))
return sbinfo
def clear_line(self,param):
mode=param and int(param)or 0
sbinfo=self.screen_buffer_info()
if mode==1:
line_start=COORD(0,sbinfo.CursorPosition.Y)
line_length=sbinfo.Size.X
elif mode==2:
line_start=COORD(sbinfo.CursorPosition.X,sbinfo.CursorPosition.Y)
line_length=sbinfo.Size.X-sbinfo.CursorPosition.X
else:
line_start=sbinfo.CursorPosition
line_length=sbinfo.Size.X-sbinfo.CursorPosition.X
chars_written=c_int()
windll.kernel32.FillConsoleOutputCharacterA(self.hconsole,c_wchar(' '),line_length,line_start,byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole,sbinfo.Attributes,line_length,line_start,byref(chars_written))
def clear_screen(self,param):
mode=to_int(param,0)
sbinfo=self.screen_buffer_info()
if mode==1:
clear_start=COORD(0,0)
clear_length=sbinfo.CursorPosition.X*sbinfo.CursorPosition.Y
elif mode==2:
clear_start=COORD(0,0)
clear_length=sbinfo.Size.X*sbinfo.Size.Y
windll.kernel32.SetConsoleCursorPosition(self.hconsole,clear_start)
else:
clear_start=sbinfo.CursorPosition
clear_length=((sbinfo.Size.X-sbinfo.CursorPosition.X)+sbinfo.Size.X*(sbinfo.Size.Y-sbinfo.CursorPosition.Y))
chars_written=c_int()
windll.kernel32.FillConsoleOutputCharacterA(self.hconsole,c_wchar(' '),clear_length,clear_start,byref(chars_written))
windll.kernel32.FillConsoleOutputAttribute(self.hconsole,sbinfo.Attributes,clear_length,clear_start,byref(chars_written))
def push_cursor(self,param):
sbinfo=self.screen_buffer_info()
self.cursor_history.append(sbinfo.CursorPosition)
def pop_cursor(self,param):
if self.cursor_history:
old_pos=self.cursor_history.pop()
windll.kernel32.SetConsoleCursorPosition(self.hconsole,old_pos)
def set_cursor(self,param):
y,sep,x=param.partition(';')
x=to_int(x,1)-1
y=to_int(y,1)-1
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,x),sbinfo.Size.X),min(max(0,y),sbinfo.Size.Y))
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def set_column(self,param):
x=to_int(param,1)-1
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,x),sbinfo.Size.X),sbinfo.CursorPosition.Y)
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def move_cursor(self,x_offset=0,y_offset=0):
sbinfo=self.screen_buffer_info()
new_pos=COORD(min(max(0,sbinfo.CursorPosition.X+x_offset),sbinfo.Size.X),min(max(0,sbinfo.CursorPosition.Y+y_offset),sbinfo.Size.Y))
windll.kernel32.SetConsoleCursorPosition(self.hconsole,new_pos)
def move_up(self,param):
self.move_cursor(y_offset=-to_int(param,1))
def move_down(self,param):
self.move_cursor(y_offset=to_int(param,1))
def move_left(self,param):
self.move_cursor(x_offset=-to_int(param,1))
def move_right(self,param):
self.move_cursor(x_offset=to_int(param,1))
def next_line(self,param):
sbinfo=self.screen_buffer_info()
self.move_cursor(x_offset=-sbinfo.CursorPosition.X,y_offset=to_int(param,1))
def prev_line(self,param):
sbinfo=self.screen_buffer_info()
self.move_cursor(x_offset=-sbinfo.CursorPosition.X,y_offset=-to_int(param,1))
def rgb2bgr(self,c):
return((c&1)<<2)|(c&2)|((c&4)>>2)
def set_color(self,param):
cols=param.split(';')
sbinfo=CONSOLE_SCREEN_BUFFER_INFO()
windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole,byref(sbinfo))
attr=sbinfo.Attributes
for c in cols:
if is_vista:
c=int(c)
else:
c=to_int(c,0)
if c in range(30,38):
attr=(attr&0xfff0)|self.rgb2bgr(c-30)
elif c in range(40,48):
attr=(attr&0xff0f)|(self.rgb2bgr(c-40)<<4)
elif c==0:
attr=self.orig_sbinfo.Attributes
elif c==1:
attr|=0x08
elif c==4:
attr|=0x80
elif c==7:
attr=(attr&0xff88)|((attr&0x70)>>4)|((attr&0x07)<<4)
windll.kernel32.SetConsoleTextAttribute(self.hconsole,attr)
def show_cursor(self,param):
csinfo.bVisible=1
windll.kernel32.SetConsoleCursorInfo(self.hconsole,byref(csinfo))
def hide_cursor(self,param):
csinfo.bVisible=0
windll.kernel32.SetConsoleCursorInfo(self.hconsole,byref(csinfo))
ansi_command_table={'A':move_up,'B':move_down,'C':move_right,'D':move_left,'E':next_line,'F':prev_line,'G':set_column,'H':set_cursor,'f':set_cursor,'J':clear_screen,'K':clear_line,'h':show_cursor,'l':hide_cursor,'m':set_color,'s':push_cursor,'u':pop_cursor,}
ansi_tokens=re.compile('(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))')
def write(self,text):
try:
wlock.acquire()
for param,cmd,txt in self.ansi_tokens.findall(text):
if cmd:
cmd_func=self.ansi_command_table.get(cmd)
if cmd_func:
cmd_func(self,param)
else:
self.writeconsole(txt)
finally:
wlock.release()
def writeconsole(self,txt):
chars_written=c_int()
writeconsole=windll.kernel32.WriteConsoleA
if isinstance(txt,_type):
writeconsole=windll.kernel32.WriteConsoleW
TINY_STEP=3000
for x in range(0,len(txt),TINY_STEP):
tiny=txt[x:x+TINY_STEP]
writeconsole(self.hconsole,tiny,len(tiny),byref(chars_written),None)
def flush(self):
pass
def isatty(self):
return True
sys.stderr=sys.stdout=AnsiTerm()
os.environ['TERM']='vt100'
| gpl-2.0 |
jamison904/N920T | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
KokareIITP/django | django/db/backends/sqlite3/features.py | 151 | 2625 | from __future__ import unicode_literals
from django.db import utils
from django.db.backends.base.features import BaseDatabaseFeatures
from django.utils import six
from django.utils.functional import cached_property
from .base import Database
try:
import pytz
except ImportError:
pytz = None
class DatabaseFeatures(BaseDatabaseFeatures):
# SQLite cannot handle us only partially reading from a cursor's result set
# and then writing the same rows to the database in another cursor. This
# setting ensures we always read result sets fully into memory all in one
# go.
can_use_chunked_reads = False
test_db_allows_multiple_connections = False
supports_unspecified_pk = True
supports_timezones = False
supports_1000_query_parameters = False
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_column_check_constraints = False
autocommits_when_autocommit_is_off = True
can_introspect_decimal_field = False
can_introspect_positive_integer_field = True
can_introspect_small_integer_field = True
supports_transactions = True
atomic_transactions = False
can_rollback_ddl = True
supports_paramstyle_pyformat = False
supports_sequence_reset = False
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property
def can_release_savepoints(self):
return self.uses_savepoints
@cached_property
def can_share_in_memory_db(self):
return (
six.PY3 and
Database.__name__ == 'sqlite3.dbapi2' and
Database.sqlite_version_info >= (3, 7, 13)
)
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
SQLite supports STDDEV as an extension package; so
connection.ops.check_expression_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')
try:
cursor.execute('SELECT STDDEV(*) FROM STDDEV_TEST')
has_support = True
except utils.DatabaseError:
has_support = False
cursor.execute('DROP TABLE STDDEV_TEST')
return has_support
@cached_property
def has_zoneinfo_database(self):
return pytz is not None
| bsd-3-clause |
smvv/trs | tests/test_possibilities.py | 1 | 3246 | # This file is part of TRS (http://math.kompiler.org)
#
# TRS is free software: you can redistribute it and/or modify it under the
# terms of the GNU Affero General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option) any
# later version.
#
# TRS is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Affero General Public License for more
# details.
#
# You should have received a copy of the GNU Affero General Public License
# along with TRS. If not, see <http://www.gnu.org/licenses/>.
import unittest
from src.possibilities import MESSAGES, Possibility as P, flatten_mult
from tests.rulestestcase import tree
from src.parser import Parser
from tests.parser import ParserWrapper
def dummy_handler(root, args): # pragma: nocover
pass
def dummy_handler_msg(root, args): # pragma: nocover
pass
MESSAGES[dummy_handler_msg] = 'foo {1} + {2} bar'
class TestPossibilities(unittest.TestCase):
def setUp(self):
self.l1, self.l2 = self.n = tree('1 + 2')
self.p0 = P(self.n, dummy_handler, (self.l1, self.l2))
self.p1 = P(self.n, dummy_handler_msg, (self.l1, self.l2))
def test___str__(self):
self.assertEqual(str(self.p0),
'<Possibility root="1 + 2" handler=dummy_handler args=(1, 2)>')
self.assertEqual(str(self.p1), 'foo `1` + `2` bar')
def test___repr__(self):
self.assertEqual(repr(self.p0),
'<Possibility root="1 + 2" handler=dummy_handler args=(1, 2)>')
def test___eq__(self):
assert self.p0 == P(self.n, dummy_handler, (self.l1, self.l2))
assert self.p0 != self.p1
def test_multiple_input(self):
parser = ParserWrapper(Parser)
parser.run(['1+2', '?', '3+4', '?'])
possibilities = parser.parser.possibilities
self.assertEqual('\n'.join([repr(pos) for pos in possibilities]),
'<Possibility root="3 + 4" handler=add_numerics' \
' args=(<Scope of "3 + 4">, 3, 4)>')
def test_multiple_runs(self):
parser = ParserWrapper(Parser)
parser.run(['1+2', '?'])
possibilities = parser.parser.possibilities
self.assertEqual('\n'.join([repr(pos) for pos in possibilities]),
'<Possibility root="1 + 2" handler=add_numerics' \
' args=(<Scope of "1 + 2">, 1, 2)>')
# Remove previous possibilities after second run() call.
parser.run(['', ' '])
possibilities = parser.parser.possibilities
self.assertEqual(possibilities, None)
# Overwrite previous possibilities with new ones
parser.run(['3+4', '?'])
possibilities = parser.parser.possibilities
self.assertEqual('\n'.join([repr(pos) for pos in possibilities]),
'<Possibility root="3 + 4" handler=add_numerics' \
' args=(<Scope of "3 + 4">, 3, 4)>')
def test_flatten_mult(self):
self.assertEqual(flatten_mult(tree('2(xx)')), tree('2xx'))
self.assertEqual(flatten_mult(tree('2(xx) + 1')), tree('2xx + 1'))
| agpl-3.0 |
tux-00/ansible | lib/ansible/plugins/cache/memory.py | 44 | 1691 | # (c) 2014, Brian Coca, Josh Drake, et al
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
'''
DOCUMENTATION:
cache: memory
short_description: RAM backed, non persistent
description:
- RAM backed cache that is not persistent.
version_added: historical
author: core team (@ansible-core)
'''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.plugins.cache import BaseCacheModule
class CacheModule(BaseCacheModule):
def __init__(self, *args, **kwargs):
self._cache = {}
def get(self, key):
return self._cache.get(key)
def set(self, key, value):
self._cache[key] = value
def keys(self):
return self._cache.keys()
def contains(self, key):
return key in self._cache
def delete(self, key):
del self._cache[key]
def flush(self):
self._cache = {}
def copy(self):
return self._cache.copy()
def __getstate__(self):
return self.copy()
def __setstate__(self, data):
self._cache = data
| gpl-3.0 |
paulmouzas/blogodrone | autofixture/placeholder.py | 4 | 1389 | # coding=utf-8
import io
from PIL import Image
from PIL import ImageDraw
from PIL import ImageColor
from PIL import ImageFont
from PIL import ImageOps
get_color = lambda name: ImageColor.getrgb(name)
def get_placeholder_image(width, height, name=None, fg_color=get_color('black'),
bg_color=get_color('grey'), text=None, font=u'Verdana.ttf',
fontsize=42, encoding=u'unic', mode='RGBA', fmt=u'PNG'):
"""Little spin-off from https://github.com/Visgean/python-placeholder
that not saves an image and instead returns it."""
size = (width, height)
text = text if text else '{0}x{1}'.format(width, height)
try:
font = ImageFont.truetype(font, size=fontsize, encoding=encoding)
except IOError:
font = ImageFont.load_default()
result_img = Image.new(mode, size, bg_color)
text_size = font.getsize(text)
text_img = Image.new("RGBA", size, bg_color)
#position for the text:
left = size[0] / 2 - text_size[0] / 2
top = size[1] / 2 - text_size[1] / 2
drawing = ImageDraw.Draw(text_img)
drawing.text((left, top),
text,
font=font,
fill=fg_color)
txt_img = ImageOps.fit(text_img, size, method=Image.BICUBIC, centering=(0.5, 0.5))
result_img.paste(txt_img)
file_obj = io.BytesIO()
txt_img.save(file_obj, fmt)
return file_obj.getvalue() | unlicense |
robinro/ansible-modules-extras | cloud/vmware/vmware_cluster.py | 71 | 9862 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Joseph Callen <jcallen () csc.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: vmware_cluster
short_description: Create VMware vSphere Cluster
description:
- Create VMware vSphere Cluster
version_added: 2.0
author: Joseph Callen (@jcpowermac)
notes:
requirements:
- Tested on ESXi 5.5
- PyVmomi installed
options:
datacenter_name:
description:
- The name of the datacenter the cluster will be created in.
required: True
cluster_name:
description:
- The name of the cluster that will be created
required: True
enable_ha:
description:
- If set to True will enable HA when the cluster is created.
required: False
default: False
enable_drs:
description:
- If set to True will enable DRS when the cluster is created.
required: False
default: False
enable_vsan:
description:
- If set to True will enable vSAN when the cluster is created.
required: False
default: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
# Example vmware_cluster command from Ansible Playbooks
- name: Create Cluster
local_action: >
vmware_cluster
hostname="{{ ansible_ssh_host }}" username=root password=vmware
datacenter_name="datacenter"
cluster_name="cluster"
enable_ha=True
enable_drs=True
enable_vsan=True
'''
try:
from pyVmomi import vim, vmodl
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
class VMwareCluster(object):
def __init__(self, module):
self.module = module
self.enable_ha = module.params['enable_ha']
self.enable_drs = module.params['enable_drs']
self.enable_vsan = module.params['enable_vsan']
self.cluster_name = module.params['cluster_name']
self.desired_state = module.params['state']
self.datacenter = None
self.cluster = None
self.content = connect_to_api(module)
self.datacenter_name = module.params['datacenter_name']
def process_state(self):
cluster_states = {
'absent': {
'present': self.state_destroy_cluster,
'absent': self.state_exit_unchanged,
},
'present': {
'update': self.state_update_cluster,
'present': self.state_exit_unchanged,
'absent': self.state_create_cluster,
}
}
current_state = self.check_cluster_configuration()
# Based on the desired_state and the current_state call
# the appropriate method from the dictionary
cluster_states[self.desired_state][current_state]()
def configure_ha(self):
das_config = vim.cluster.DasConfigInfo()
das_config.enabled = self.enable_ha
das_config.admissionControlPolicy = vim.cluster.FailoverLevelAdmissionControlPolicy()
das_config.admissionControlPolicy.failoverLevel = 2
return das_config
def configure_drs(self):
drs_config = vim.cluster.DrsConfigInfo()
drs_config.enabled = self.enable_drs
# Set to partially automated
drs_config.vmotionRate = 3
return drs_config
def configure_vsan(self):
vsan_config = vim.vsan.cluster.ConfigInfo()
vsan_config.enabled = self.enable_vsan
vsan_config.defaultConfig = vim.vsan.cluster.ConfigInfo.HostDefaultInfo()
vsan_config.defaultConfig.autoClaimStorage = False
return vsan_config
def state_create_cluster(self):
try:
cluster_config_spec = vim.cluster.ConfigSpecEx()
cluster_config_spec.dasConfig = self.configure_ha()
cluster_config_spec.drsConfig = self.configure_drs()
if self.enable_vsan:
cluster_config_spec.vsanConfig = self.configure_vsan()
if not self.module.check_mode:
self.datacenter.hostFolder.CreateClusterEx(self.cluster_name, cluster_config_spec)
self.module.exit_json(changed=True)
except vim.fault.DuplicateName:
self.module.fail_json(msg="A cluster with the name %s already exists" % self.cluster_name)
except vmodl.fault.InvalidArgument:
self.module.fail_json(msg="Cluster configuration specification parameter is invalid")
except vim.fault.InvalidName:
self.module.fail_json(msg="%s is an invalid name for a cluster" % self.cluster_name)
except vmodl.fault.NotSupported:
# This should never happen
self.module.fail_json(msg="Trying to create a cluster on an incorrect folder object")
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
# This should never happen either
self.module.fail_json(msg=method_fault.msg)
def state_destroy_cluster(self):
changed = True
result = None
try:
if not self.module.check_mode:
task = self.cluster.Destroy_Task()
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=result)
except vim.fault.VimFault as vim_fault:
self.module.fail_json(msg=vim_fault.msg)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def state_exit_unchanged(self):
self.module.exit_json(changed=False)
def state_update_cluster(self):
cluster_config_spec = vim.cluster.ConfigSpecEx()
changed = True
result = None
if self.cluster.configurationEx.dasConfig.enabled != self.enable_ha:
cluster_config_spec.dasConfig = self.configure_ha()
if self.cluster.configurationEx.drsConfig.enabled != self.enable_drs:
cluster_config_spec.drsConfig = self.configure_drs()
if self.cluster.configurationEx.vsanConfigInfo.enabled != self.enable_vsan:
cluster_config_spec.vsanConfig = self.configure_vsan()
try:
if not self.module.check_mode:
task = self.cluster.ReconfigureComputeResource_Task(cluster_config_spec, True)
changed, result = wait_for_task(task)
self.module.exit_json(changed=changed, result=result)
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
except TaskError as task_e:
self.module.fail_json(msg=str(task_e))
def check_cluster_configuration(self):
try:
self.datacenter = find_datacenter_by_name(self.content, self.datacenter_name)
if self.datacenter is None:
self.module.fail_json(msg="Datacenter %s does not exist, "
"please create first with Ansible Module vmware_datacenter or manually."
% self.datacenter_name)
self.cluster = find_cluster_by_name_datacenter(self.datacenter, self.cluster_name)
if self.cluster is None:
return 'absent'
else:
desired_state = (self.enable_ha,
self.enable_drs,
self.enable_vsan)
current_state = (self.cluster.configurationEx.dasConfig.enabled,
self.cluster.configurationEx.drsConfig.enabled,
self.cluster.configurationEx.vsanConfigInfo.enabled)
if cmp(desired_state, current_state) != 0:
return 'update'
else:
return 'present'
except vmodl.RuntimeFault as runtime_fault:
self.module.fail_json(msg=runtime_fault.msg)
except vmodl.MethodFault as method_fault:
self.module.fail_json(msg=method_fault.msg)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(dict(datacenter_name=dict(required=True, type='str'),
cluster_name=dict(required=True, type='str'),
enable_ha=dict(default=False, required=False, type='bool'),
enable_drs=dict(default=False, required=False, type='bool'),
enable_vsan=dict(default=False, required=False, type='bool'),
state=dict(default='present', choices=['present', 'absent'], type='str')))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
vmware_cluster = VMwareCluster(module)
vmware_cluster.process_state()
from ansible.module_utils.vmware import *
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
leewinder/ng2-google-recaptcha | automation/publish_release_package.py | 4 | 2442 | """ Publishes an NMP package using the output generated from 'prepare_distribution_package.py'
Note that this script relies on the use of Travis CI environment variables, meaning
this script can only be run as part of a Travis CI build process
It also requires npm-cli-login and NPM_USER, NPM_PASS and NPM_EMAIL present in the envvars
npm install -g npm-cli-login """
#!/usr/bin/python
# Imports
import os
import cli
#
# Gets the main package folder
#
def get_package_folder():
""" Gets the main package folder """
project_path = cli.get_project_root()
# Get the path to the distribution package
package_path = project_path + '/release/package/'
return package_path
#
# Verifies the branch we're on
#
def verify_branch_name():
""" Verifies the branch we're on """
# Do we have the Environment variable to detect the branch?
# We only run this script on the master branch
branch_name = 'unknown'
try:
branch_name = os.environ['TRAVIS_BRANCH']
except KeyError:
print "Unable to access the 'TRAVIS_BRANCH' environment variable\n"
# Check it's a push and nothing else
event_type = 'unknown'
try:
event_type = os.environ['TRAVIS_EVENT_TYPE']
except KeyError:
print "Unable to access the 'TRAVIS_EVENT_TYPE' environment variable\n"
# We only run on master when it's a push
if branch_name.lower() != 'master' or event_type.lower() != 'push':
# Output the message
print "Publishing is only carried out on the 'master' branch when a push occurs"
print "A '{}:{}' is running so this step will be skipped".format(branch_name, event_type)
return False
# We're good
return True
#
# Main entry function
#
def main():
""" Main entry function """
# Check we're on a branch we can run on
branch_valid = verify_branch_name()
if branch_valid is False:
exit(0)
# Get our folder and remove existing packages
package_path = get_package_folder()
# Log in
return_code, _, _ = cli.run_command_line(package_path, "npm-cli-login", None)
if return_code != 0:
exit(return_code)
# Publish our package
return_code, _, _ = cli.run_command_line(package_path, "npm", ["publish"])
if return_code != 0:
exit(return_code)
# Done
print "Successfully published package file"
#
# Main entry point
#
if __name__ == "__main__":
main()
| mit |
olibre/doxygen | src/version.py | 13 | 1805 | #
# script to read the version information from `../configure`
# relevant lines are starting with:
# `doxygen_version_major`
# `doxygen_version_minor`
# `doxygen_version_revision`
# `doxygen_version_mmn`
# the collected information is written to: `../VERSION` and `../src/version.cpp`
#
import sys
import os
#
# set 'default' values
#
major = 0
minor = 0
revision = 0
mnt = 'NO'
configure = '../configure'
if len(sys.argv) > 2:
configure = sys.argv[2]
#
# open input file
# read file and get relevant information
# close
#
f = open(configure, 'r')
for line in f:
# check if line can match (saves 3 comparisons)
if (line.startswith('doxygen_version')):
if (line.startswith('doxygen_version_major')):
major = line.replace('doxygen_version_major=','')
elif (line.startswith('doxygen_version_minor')):
minor = line.replace('doxygen_version_minor=','')
elif (line.startswith('doxygen_version_revision')):
revision = line.replace('doxygen_version_revision=','')
elif (line.startswith('doxygen_version_mmn')):
mnt = line.replace('doxygen_version_mmn=','')
f.close()
# strip superfluous '\n`
major = major.replace('\n','')
minor = minor.replace('\n','')
revision = revision.replace('\n','')
mnt = mnt.replace('\n','')
#
# open output files
# write relevant infomation
# close files
#
f1 = open('../VERSION','w')
f2 = open(os.path.join(sys.argv[1],'version.cpp'),'w')
if (mnt == 'NO'):
f1.write(major + '.' + minor + '.' + revision)
f2.write('char versionString[]="' + major + '.' + minor + '.' + revision + '";')
else:
f1.write(major + '.' + minor + '.' + revision + '-' + mnt)
f2.write('char versionString[]="' + major + '.' + minor + '.' + revision + '-' + mnt + '";')
f1.close()
f2.close()
| gpl-2.0 |
creasyw/IMTAphy | documentation/doctools/tags/0.3/sphinx/ext/ifconfig.py | 4 | 1425 | # -*- coding: utf-8 -*-
"""
sphinx.ext.ifconfig
~~~~~~~~~~~~~~~~~~~
Provides the ``ifconfig`` directive that allows to write documentation
that is included depending on configuration variables.
Usage::
.. ifconfig:: releaselevel in ('alpha', 'beta', 'rc')
This stuff is only included in the built docs for unstable versions.
The argument for ``ifconfig`` is a plain Python expression, evaluated in the
namespace of the project configuration (that is, all variables from ``conf.py``
are available.)
:copyright: 2008 by Georg Brandl.
:license: BSD.
"""
from docutils import nodes
class ifconfig(nodes.Element): pass
def ifconfig_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
node = ifconfig()
node['expr'] = arguments[0]
state.nested_parse(content, content_offset, node)
return [node]
def process_ifconfig_nodes(app, doctree, docname):
ns = app.config.__dict__.copy()
ns['builder'] = app.builder.name
for node in doctree.traverse(ifconfig):
if not eval(node['expr'], ns):
node.replace_self([])
else:
node.replace_self(node.children)
def setup(app):
app.add_node(ifconfig)
app.add_directive('ifconfig', ifconfig_directive, 1, (1, 0, 1))
app.connect('doctree-resolved', process_ifconfig_nodes)
| gpl-2.0 |
prakritish/ansible | lib/ansible/modules/network/a10/a10_virtual_server.py | 16 | 11685 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Ansible module to manage A10 Networks slb virtual server objects
(c) 2014, Mischa Peters <mpeters@a10networks.com>,
Eric Chou <ericc@a10networks.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: a10_virtual_server
version_added: 1.8
short_description: Manage A10 Networks AX/SoftAX/Thunder/vThunder devices' virtual servers.
description:
- Manage SLB (Server Load Balancing) virtual server objects on A10 Networks devices via aXAPIv2.
author: "Eric Chou (@ericchou) 2016, Mischa Peters (@mischapeters) 2014"
notes:
- Requires A10 Networks aXAPI 2.1.
extends_documentation_fragment: a10
options:
partition:
version_added: "2.3"
description:
- set active-partition
required: false
default: null
virtual_server:
description:
- The SLB (Server Load Balancing) virtual server name.
required: true
default: null
aliases: ['vip', 'virtual']
virtual_server_ip:
description:
- The SLB virtual server IPv4 address.
required: false
default: null
aliases: ['ip', 'address']
virtual_server_status:
description:
- The SLB virtual server status, such as enabled or disabled.
required: false
default: enable
aliases: ['status']
choices: ['enabled', 'disabled']
virtual_server_ports:
description:
- A list of ports to create for the virtual server. Each list item should be a
dictionary which specifies the C(port:) and C(type:), but can also optionally
specify the C(service_group:) as well as the C(status:). See the examples
below for details. This parameter is required when C(state) is C(present).
required: false
validate_certs:
description:
- If C(no), SSL certificates will not be validated. This should only be used
on personally controlled devices using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
'''
RETURN = '''
#
'''
EXAMPLES = '''
# Create a new virtual server
- a10_virtual_server:
host: a10.mydomain.com
username: myadmin
password: mypassword
partition: mypartition
virtual_server: vserver1
virtual_server_ip: 1.1.1.1
virtual_server_ports:
- port: 80
protocol: TCP
service_group: sg-80-tcp
- port: 443
protocol: HTTPS
service_group: sg-443-https
- port: 8080
protocol: http
status: disabled
'''
RETURN = '''
content:
description: the full info regarding the slb_virtual
returned: success
type: string
sample: "mynewvirtualserver"
'''
VALID_PORT_FIELDS = ['port', 'protocol', 'service_group', 'status']
def validate_ports(module, ports):
for item in ports:
for key in item:
if key not in VALID_PORT_FIELDS:
module.fail_json(msg="invalid port field (%s), must be one of: %s" % (key, ','.join(VALID_PORT_FIELDS)))
# validate the port number is present and an integer
if 'port' in item:
try:
item['port'] = int(item['port'])
except:
module.fail_json(msg="port definitions must be integers")
else:
module.fail_json(msg="port definitions must define the port field")
# validate the port protocol is present, and convert it to
# the internal API integer value (and validate it)
if 'protocol' in item:
protocol = axapi_get_vport_protocol(item['protocol'])
if not protocol:
module.fail_json(msg="invalid port protocol, must be one of: %s" % ','.join(AXAPI_VPORT_PROTOCOLS))
else:
item['protocol'] = protocol
else:
module.fail_json(msg="port definitions must define the port protocol (%s)" % ','.join(AXAPI_VPORT_PROTOCOLS))
# convert the status to the internal API integer value
if 'status' in item:
item['status'] = axapi_enabled_disabled(item['status'])
else:
item['status'] = 1
# ensure the service_group field is at least present
if 'service_group' not in item:
item['service_group'] = ''
def main():
argument_spec = a10_argument_spec()
argument_spec.update(url_argument_spec())
argument_spec.update(
dict(
state=dict(type='str', default='present', choices=['present', 'absent']),
virtual_server=dict(type='str', aliases=['vip', 'virtual'], required=True),
virtual_server_ip=dict(type='str', aliases=['ip', 'address'], required=True),
virtual_server_status=dict(type='str', default='enabled', aliases=['status'], choices=['enabled', 'disabled']),
virtual_server_ports=dict(type='list', required=True),
partition=dict(type='str', default=[]),
)
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False
)
host = module.params['host']
username = module.params['username']
password = module.params['password']
partition = module.params['partition']
state = module.params['state']
write_config = module.params['write_config']
slb_virtual = module.params['virtual_server']
slb_virtual_ip = module.params['virtual_server_ip']
slb_virtual_status = module.params['virtual_server_status']
slb_virtual_ports = module.params['virtual_server_ports']
if slb_virtual is None:
module.fail_json(msg='virtual_server is required')
validate_ports(module, slb_virtual_ports)
axapi_base_url = 'https://%s/services/rest/V2.1/?format=json' % host
session_url = axapi_authenticate(module, axapi_base_url, username, password)
slb_server_partition = axapi_call(module, session_url + '&method=system.partition.active', json.dumps({'name': partition}))
slb_virtual_data = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
slb_virtual_exists = not axapi_failure(slb_virtual_data)
changed = False
if state == 'present':
json_post = {
'virtual_server': {
'name': slb_virtual,
'address': slb_virtual_ip,
'status': axapi_enabled_disabled(slb_virtual_status),
'vport_list': slb_virtual_ports,
}
}
# before creating/updating we need to validate that any
# service groups defined in the ports list exist since
# since the API will still create port definitions for
# them while indicating a failure occurred
checked_service_groups = []
for port in slb_virtual_ports:
if 'service_group' in port and port['service_group'] not in checked_service_groups:
# skip blank service group entries
if port['service_group'] == '':
continue
result = axapi_call(module, session_url + '&method=slb.service_group.search', json.dumps({'name': port['service_group']}))
if axapi_failure(result):
module.fail_json(msg="the service group %s specified in the ports list does not exist" % port['service_group'])
checked_service_groups.append(port['service_group'])
if not slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.create', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
else:
def needs_update(src_ports, dst_ports):
'''
Checks to determine if the port definitions of the src_ports
array are in or different from those in dst_ports. If there is
a difference, this function returns true, otherwise false.
'''
for src_port in src_ports:
found = False
different = False
for dst_port in dst_ports:
if src_port['port'] == dst_port['port']:
found = True
for valid_field in VALID_PORT_FIELDS:
if src_port[valid_field] != dst_port[valid_field]:
different = True
break
if found or different:
break
if not found or different:
return True
# every port from the src exists in the dst, and none of them were different
return False
defined_ports = slb_virtual_data.get('virtual_server', {}).get('vport_list', [])
# we check for a needed update both ways, in case ports
# are missing from either the ones specified by the user
# or from those on the device
if needs_update(defined_ports, slb_virtual_ports) or needs_update(slb_virtual_ports, defined_ports):
result = axapi_call(module, session_url + '&method=slb.virtual_server.update', json.dumps(json_post))
if axapi_failure(result):
module.fail_json(msg="failed to create the virtual server: %s" % result['response']['err']['msg'])
changed = True
# if we changed things, get the full info regarding
# the service group for the return data below
if changed:
result = axapi_call(module, session_url + '&method=slb.virtual_server.search', json.dumps({'name': slb_virtual}))
else:
result = slb_virtual_data
elif state == 'absent':
if slb_virtual_exists:
result = axapi_call(module, session_url + '&method=slb.virtual_server.delete', json.dumps({'name': slb_virtual}))
changed = True
else:
result = dict(msg="the virtual server was not present")
# if the config has changed, save the config unless otherwise requested
if changed and write_config:
write_result = axapi_call(module, session_url + '&method=system.action.write_memory')
if axapi_failure(write_result):
module.fail_json(msg="failed to save the configuration: %s" % write_result['response']['err']['msg'])
# log out of the session nicely and exit
axapi_call(module, session_url + '&method=session.close')
module.exit_json(changed=changed, content=result)
# standard ansible module imports
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import url_argument_spec
from ansible.module_utils.a10 import axapi_call, a10_argument_spec, axapi_authenticate, axapi_failure, axapi_enabled_disabled, axapi_get_vport_protocol
if __name__ == '__main__':
main()
| gpl-3.0 |
h3llrais3r/SickRage | lib/unidecode/x01d.py | 240 | 3608 | data = (
'', # 0x00
'', # 0x01
'', # 0x02
'', # 0x03
'', # 0x04
'', # 0x05
'', # 0x06
'', # 0x07
'', # 0x08
'', # 0x09
'', # 0x0a
'', # 0x0b
'', # 0x0c
'', # 0x0d
'', # 0x0e
'', # 0x0f
'', # 0x10
'', # 0x11
'', # 0x12
'', # 0x13
'', # 0x14
'', # 0x15
'', # 0x16
'', # 0x17
'', # 0x18
'', # 0x19
'', # 0x1a
'', # 0x1b
'', # 0x1c
'', # 0x1d
'', # 0x1e
'', # 0x1f
'', # 0x20
'', # 0x21
'', # 0x22
'', # 0x23
'', # 0x24
'', # 0x25
'', # 0x26
'', # 0x27
'', # 0x28
'', # 0x29
'', # 0x2a
'', # 0x2b
'', # 0x2c
'', # 0x2d
'', # 0x2e
'', # 0x2f
'', # 0x30
'', # 0x31
'', # 0x32
'', # 0x33
'', # 0x34
'', # 0x35
'', # 0x36
'', # 0x37
'', # 0x38
'', # 0x39
'', # 0x3a
'', # 0x3b
'', # 0x3c
'', # 0x3d
'', # 0x3e
'', # 0x3f
'', # 0x40
'', # 0x41
'', # 0x42
'', # 0x43
'', # 0x44
'', # 0x45
'', # 0x46
'', # 0x47
'', # 0x48
'', # 0x49
'', # 0x4a
'', # 0x4b
'', # 0x4c
'', # 0x4d
'', # 0x4e
'', # 0x4f
'', # 0x50
'', # 0x51
'', # 0x52
'', # 0x53
'', # 0x54
'', # 0x55
'', # 0x56
'', # 0x57
'', # 0x58
'', # 0x59
'', # 0x5a
'', # 0x5b
'', # 0x5c
'', # 0x5d
'', # 0x5e
'', # 0x5f
'', # 0x60
'', # 0x61
'', # 0x62
'', # 0x63
'', # 0x64
'', # 0x65
'', # 0x66
'', # 0x67
'', # 0x68
'', # 0x69
'', # 0x6a
'', # 0x6b
'b', # 0x6c
'd', # 0x6d
'f', # 0x6e
'm', # 0x6f
'n', # 0x70
'p', # 0x71
'r', # 0x72
'r', # 0x73
's', # 0x74
't', # 0x75
'z', # 0x76
'g', # 0x77
'', # 0x78
'', # 0x79
'', # 0x7a
'', # 0x7b
'', # 0x7c
'p', # 0x7d
'', # 0x7e
'', # 0x7f
'b', # 0x80
'd', # 0x81
'f', # 0x82
'g', # 0x83
'k', # 0x84
'l', # 0x85
'm', # 0x86
'n', # 0x87
'p', # 0x88
'r', # 0x89
's', # 0x8a
'', # 0x8b
'v', # 0x8c
'x', # 0x8d
'z', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
'', # 0xa0
'', # 0xa1
'', # 0xa2
'', # 0xa3
'', # 0xa4
'', # 0xa5
'', # 0xa6
'', # 0xa7
'', # 0xa8
'', # 0xa9
'', # 0xaa
'', # 0xab
'', # 0xac
'', # 0xad
'', # 0xae
'', # 0xaf
'', # 0xb0
'', # 0xb1
'', # 0xb2
'', # 0xb3
'', # 0xb4
'', # 0xb5
'', # 0xb6
'', # 0xb7
'', # 0xb8
'', # 0xb9
'', # 0xba
'', # 0xbb
'', # 0xbc
'', # 0xbd
'', # 0xbe
'', # 0xbf
'', # 0xc0
'', # 0xc1
'', # 0xc2
'', # 0xc3
'', # 0xc4
'', # 0xc5
'', # 0xc6
'', # 0xc7
'', # 0xc8
'', # 0xc9
'', # 0xca
'', # 0xcb
'', # 0xcc
'', # 0xcd
'', # 0xce
'', # 0xcf
'', # 0xd0
'', # 0xd1
'', # 0xd2
'', # 0xd3
'', # 0xd4
'', # 0xd5
'', # 0xd6
'', # 0xd7
'', # 0xd8
'', # 0xd9
'', # 0xda
'', # 0xdb
'', # 0xdc
'', # 0xdd
'', # 0xde
'', # 0xdf
'', # 0xe0
'', # 0xe1
'', # 0xe2
'', # 0xe3
'', # 0xe4
'', # 0xe5
'', # 0xe6
'', # 0xe7
'', # 0xe8
'', # 0xe9
'', # 0xea
'', # 0xeb
'', # 0xec
'', # 0xed
'', # 0xee
'', # 0xef
'', # 0xf0
'', # 0xf1
'', # 0xf2
'', # 0xf3
'', # 0xf4
'', # 0xf5
'', # 0xf6
'', # 0xf7
'', # 0xf8
'', # 0xf9
'', # 0xfa
'', # 0xfb
'', # 0xfc
'', # 0xfd
'', # 0xfe
)
| gpl-3.0 |
shashank971/edx-platform | lms/djangoapps/course_wiki/tests/test_access.py | 90 | 8482 | """
Tests for wiki permissions
"""
from django.contrib.auth.models import Group
from nose.plugins.attrib import attr
from student.tests.factories import UserFactory
from xmodule.modulestore.tests.factories import CourseFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from courseware.tests.factories import InstructorFactory, StaffFactory
from wiki.models import URLPath
from course_wiki.views import get_or_create_root
from course_wiki.utils import user_is_article_course_staff, course_wiki_slug
from course_wiki import settings
class TestWikiAccessBase(ModuleStoreTestCase):
"""Base class for testing wiki access."""
def setUp(self):
super(TestWikiAccessBase, self).setUp()
self.wiki = get_or_create_root()
self.course_math101 = CourseFactory.create(org='org', number='math101', display_name='Course', metadata={'use_unique_wiki_id': 'false'})
self.course_math101_staff = self.create_staff_for_course(self.course_math101)
wiki_math101 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101))
wiki_math101_page = self.create_urlpath(wiki_math101, 'Child')
wiki_math101_page_page = self.create_urlpath(wiki_math101_page, 'Grandchild')
self.wiki_math101_pages = [wiki_math101, wiki_math101_page, wiki_math101_page_page]
self.course_math101b = CourseFactory.create(org='org', number='math101b', display_name='Course', metadata={'use_unique_wiki_id': 'true'})
self.course_math101b_staff = self.create_staff_for_course(self.course_math101b)
wiki_math101b = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101b))
wiki_math101b_page = self.create_urlpath(wiki_math101b, 'Child')
wiki_math101b_page_page = self.create_urlpath(wiki_math101b_page, 'Grandchild')
self.wiki_math101b_pages = [wiki_math101b, wiki_math101b_page, wiki_math101b_page_page]
def create_urlpath(self, parent, slug):
"""Creates an article at /parent/slug and returns its URLPath"""
return URLPath.create_article(parent, slug, title=slug)
def create_staff_for_course(self, course):
"""Creates and returns users with instructor and staff access to course."""
return [
InstructorFactory(course_key=course.id), # Creates instructor_org/number/run role name
StaffFactory(course_key=course.id), # Creates staff_org/number/run role name
]
@attr('shard_1')
class TestWikiAccess(TestWikiAccessBase):
"""Test wiki access for course staff."""
def setUp(self):
super(TestWikiAccess, self).setUp()
self.course_310b = CourseFactory.create(org='org', number='310b', display_name='Course')
self.course_310b_staff = self.create_staff_for_course(self.course_310b)
self.course_310b2 = CourseFactory.create(org='org', number='310b_', display_name='Course')
self.course_310b2_staff = self.create_staff_for_course(self.course_310b2)
self.wiki_310b = self.create_urlpath(self.wiki, course_wiki_slug(self.course_310b))
self.wiki_310b2 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_310b2))
def test_no_one_is_root_wiki_staff(self):
all_course_staff = self.course_math101_staff + self.course_310b_staff + self.course_310b2_staff
for course_staff in all_course_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki.article))
def test_course_staff_is_course_wiki_staff(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
for page in self.wiki_math101b_pages:
for course_staff in self.course_math101b_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
def test_settings(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101_staff:
self.assertTrue(settings.CAN_DELETE(page.article, course_staff))
self.assertTrue(settings.CAN_MODERATE(page.article, course_staff))
self.assertTrue(settings.CAN_CHANGE_PERMISSIONS(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN_OWNER(page.article, course_staff))
for page in self.wiki_math101b_pages:
for course_staff in self.course_math101b_staff:
self.assertTrue(settings.CAN_DELETE(page.article, course_staff))
self.assertTrue(settings.CAN_MODERATE(page.article, course_staff))
self.assertTrue(settings.CAN_CHANGE_PERMISSIONS(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN(page.article, course_staff))
self.assertTrue(settings.CAN_ASSIGN_OWNER(page.article, course_staff))
def test_other_course_staff_is_not_course_wiki_staff(self):
for page in self.wiki_math101_pages:
for course_staff in self.course_math101b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, page.article))
for page in self.wiki_math101_pages:
for course_staff in self.course_310b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, page.article))
for course_staff in self.course_310b_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki_310b2.article))
for course_staff in self.course_310b2_staff:
self.assertFalse(user_is_article_course_staff(course_staff, self.wiki_310b.article))
@attr('shard_1')
class TestWikiAccessForStudent(TestWikiAccessBase):
"""Test access for students."""
def setUp(self):
super(TestWikiAccessForStudent, self).setUp()
self.student = UserFactory.create()
def test_student_is_not_root_wiki_staff(self):
self.assertFalse(user_is_article_course_staff(self.student, self.wiki.article))
def test_student_is_not_course_wiki_staff(self):
for page in self.wiki_math101_pages:
self.assertFalse(user_is_article_course_staff(self.student, page.article))
@attr('shard_1')
class TestWikiAccessForNumericalCourseNumber(TestWikiAccessBase):
"""Test staff has access if course number is numerical and wiki slug has an underscore appended."""
def setUp(self):
super(TestWikiAccessForNumericalCourseNumber, self).setUp()
self.course_200 = CourseFactory.create(org='org', number='200', display_name='Course')
self.course_200_staff = self.create_staff_for_course(self.course_200)
wiki_200 = self.create_urlpath(self.wiki, course_wiki_slug(self.course_200))
wiki_200_page = self.create_urlpath(wiki_200, 'Child')
wiki_200_page_page = self.create_urlpath(wiki_200_page, 'Grandchild')
self.wiki_200_pages = [wiki_200, wiki_200_page, wiki_200_page_page]
def test_course_staff_is_course_wiki_staff_for_numerical_course_number(self): # pylint: disable=invalid-name
for page in self.wiki_200_pages:
for course_staff in self.course_200_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
@attr('shard_1')
class TestWikiAccessForOldFormatCourseStaffGroups(TestWikiAccessBase):
"""Test staff has access if course group has old format."""
def setUp(self):
super(TestWikiAccessForOldFormatCourseStaffGroups, self).setUp()
self.course_math101c = CourseFactory.create(org='org', number='math101c', display_name='Course')
Group.objects.get_or_create(name='instructor_math101c')
self.course_math101c_staff = self.create_staff_for_course(self.course_math101c)
wiki_math101c = self.create_urlpath(self.wiki, course_wiki_slug(self.course_math101c))
wiki_math101c_page = self.create_urlpath(wiki_math101c, 'Child')
wiki_math101c_page_page = self.create_urlpath(wiki_math101c_page, 'Grandchild')
self.wiki_math101c_pages = [wiki_math101c, wiki_math101c_page, wiki_math101c_page_page]
def test_course_staff_is_course_wiki_staff(self):
for page in self.wiki_math101c_pages:
for course_staff in self.course_math101c_staff:
self.assertTrue(user_is_article_course_staff(course_staff, page.article))
| agpl-3.0 |
ogajduse/spacewalk | backend/db-checker.py | 6 | 2487 | #!/usr/bin/python
#
# Copyright (c) 2008--2016 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import os
import sys
from spacewalk.common import usix
_topdir = os.path.abspath(os.path.dirname(sys.argv[0]))
if _topdir not in sys.path:
sys.path.append(_topdir)
from spacewalk.server import rhnSQL
def main():
rhnSQL.initDB()
if not args:
print("No module specified")
return 0
if '.' not in sys.path:
sys.path.append('.')
g = globals()
for module_name in args:
print("Checking module %s" % module_name)
pmn = proper_module_name(module_name)
try:
m = __import__(pmn)
g[module_name] = m
except ImportError:
e = sys.exc_info()[1]
print("Unable to import module %s: %s" % (module_name, e))
continue
comps = pmn.split('.')
for c in comps[1:]:
m = getattr(m, c)
for mod, name, statement in get_class_instances(m, rhnSQL.Statement):
try:
rhnSQL.prepare(statement)
except rhnSQL.SQLStatementPrepareError:
e = sys.exc_info()[1]
print("Error: %s.%s: %s" % (mod.__name__, name, e))
def proper_module_name(module_name):
suffix = '.py'
if module_name.endswith(suffix):
module_name = module_name[:-len(suffix)]
return os.path.normpath(module_name).replace('/', '.')
_objs_seen = {}
def get_class_instances(obj, class_obj):
if not hasattr(obj, "__dict__"):
return []
id_obj = id(obj)
if id_obj in _objs_seen:
return []
_objs_seen[id_obj] = None
result = []
for k, v in obj.__dict__.items():
if isinstance(v, class_obj):
result.append((obj, k, v))
elif isinstance(v, usix.ClassType):
result.extend(get_class_instances(v, class_obj))
return result
if __name__ == '__main__':
sys.exit(main() or 0)
| gpl-2.0 |
FHannes/intellij-community | python/lib/Lib/site-packages/django/utils/unittest/result.py | 570 | 6105 | """Test result object"""
import sys
import traceback
import unittest
from StringIO import StringIO
from django.utils.unittest import util
from django.utils.unittest.compatibility import wraps
__unittest = True
def failfast(method):
@wraps(method)
def inner(self, *args, **kw):
if getattr(self, 'failfast', False):
self.stop()
return method(self, *args, **kw)
return inner
STDOUT_LINE = '\nStdout:\n%s'
STDERR_LINE = '\nStderr:\n%s'
class TestResult(unittest.TestResult):
"""Holder for test result information.
Test results are automatically managed by the TestCase and TestSuite
classes, and do not need to be explicitly manipulated by writers of tests.
Each instance holds the total number of tests run, and collections of
failures and errors that occurred among those test runs. The collections
contain tuples of (testcase, exceptioninfo), where exceptioninfo is the
formatted traceback of the error that occurred.
"""
_previousTestClass = None
_moduleSetUpFailed = False
def __init__(self):
self.failfast = False
self.failures = []
self.errors = []
self.testsRun = 0
self.skipped = []
self.expectedFailures = []
self.unexpectedSuccesses = []
self.shouldStop = False
self.buffer = False
self._stdout_buffer = None
self._stderr_buffer = None
self._original_stdout = sys.stdout
self._original_stderr = sys.stderr
self._mirrorOutput = False
def startTest(self, test):
"Called when the given test is about to be run"
self.testsRun += 1
self._mirrorOutput = False
if self.buffer:
if self._stderr_buffer is None:
self._stderr_buffer = StringIO()
self._stdout_buffer = StringIO()
sys.stdout = self._stdout_buffer
sys.stderr = self._stderr_buffer
def startTestRun(self):
"""Called once before any tests are executed.
See startTest for a method called before each test.
"""
def stopTest(self, test):
"""Called when the given test has been run"""
if self.buffer:
if self._mirrorOutput:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
self._original_stdout.write(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
self._original_stderr.write(STDERR_LINE % error)
sys.stdout = self._original_stdout
sys.stderr = self._original_stderr
self._stdout_buffer.seek(0)
self._stdout_buffer.truncate()
self._stderr_buffer.seek(0)
self._stderr_buffer.truncate()
self._mirrorOutput = False
def stopTestRun(self):
"""Called once after all tests are executed.
See stopTest for a method called after each test.
"""
@failfast
def addError(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
"""
self.errors.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
@failfast
def addFailure(self, test, err):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info()."""
self.failures.append((test, self._exc_info_to_string(err, test)))
self._mirrorOutput = True
def addSuccess(self, test):
"Called when a test has completed successfully"
pass
def addSkip(self, test, reason):
"""Called when a test is skipped."""
self.skipped.append((test, reason))
def addExpectedFailure(self, test, err):
"""Called when an expected failure/error occured."""
self.expectedFailures.append(
(test, self._exc_info_to_string(err, test)))
@failfast
def addUnexpectedSuccess(self, test):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"Tells whether or not this result was a success"
return (len(self.failures) + len(self.errors) == 0)
def stop(self):
"Indicates that the tests should be aborted"
self.shouldStop = True
def _exc_info_to_string(self, err, test):
"""Converts a sys.exc_info()-style tuple of values into a string."""
exctype, value, tb = err
# Skip test runner traceback levels
while tb and self._is_relevant_tb_level(tb):
tb = tb.tb_next
if exctype is test.failureException:
# Skip assert*() traceback levels
length = self._count_relevant_tb_levels(tb)
msgLines = traceback.format_exception(exctype, value, tb, length)
else:
msgLines = traceback.format_exception(exctype, value, tb)
if self.buffer:
output = sys.stdout.getvalue()
error = sys.stderr.getvalue()
if output:
if not output.endswith('\n'):
output += '\n'
msgLines.append(STDOUT_LINE % output)
if error:
if not error.endswith('\n'):
error += '\n'
msgLines.append(STDERR_LINE % error)
return ''.join(msgLines)
def _is_relevant_tb_level(self, tb):
return '__unittest' in tb.tb_frame.f_globals
def _count_relevant_tb_levels(self, tb):
length = 0
while tb and not self._is_relevant_tb_level(tb):
length += 1
tb = tb.tb_next
return length
def __repr__(self):
return "<%s run=%i errors=%i failures=%i>" % \
(util.strclass(self.__class__), self.testsRun, len(self.errors),
len(self.failures))
| apache-2.0 |
lulandco/SickRage | lib/unidecode/x0cf.py | 253 | 4713 | data = (
'ke', # 0x00
'keg', # 0x01
'kegg', # 0x02
'kegs', # 0x03
'ken', # 0x04
'kenj', # 0x05
'kenh', # 0x06
'ked', # 0x07
'kel', # 0x08
'kelg', # 0x09
'kelm', # 0x0a
'kelb', # 0x0b
'kels', # 0x0c
'kelt', # 0x0d
'kelp', # 0x0e
'kelh', # 0x0f
'kem', # 0x10
'keb', # 0x11
'kebs', # 0x12
'kes', # 0x13
'kess', # 0x14
'keng', # 0x15
'kej', # 0x16
'kec', # 0x17
'kek', # 0x18
'ket', # 0x19
'kep', # 0x1a
'keh', # 0x1b
'kyeo', # 0x1c
'kyeog', # 0x1d
'kyeogg', # 0x1e
'kyeogs', # 0x1f
'kyeon', # 0x20
'kyeonj', # 0x21
'kyeonh', # 0x22
'kyeod', # 0x23
'kyeol', # 0x24
'kyeolg', # 0x25
'kyeolm', # 0x26
'kyeolb', # 0x27
'kyeols', # 0x28
'kyeolt', # 0x29
'kyeolp', # 0x2a
'kyeolh', # 0x2b
'kyeom', # 0x2c
'kyeob', # 0x2d
'kyeobs', # 0x2e
'kyeos', # 0x2f
'kyeoss', # 0x30
'kyeong', # 0x31
'kyeoj', # 0x32
'kyeoc', # 0x33
'kyeok', # 0x34
'kyeot', # 0x35
'kyeop', # 0x36
'kyeoh', # 0x37
'kye', # 0x38
'kyeg', # 0x39
'kyegg', # 0x3a
'kyegs', # 0x3b
'kyen', # 0x3c
'kyenj', # 0x3d
'kyenh', # 0x3e
'kyed', # 0x3f
'kyel', # 0x40
'kyelg', # 0x41
'kyelm', # 0x42
'kyelb', # 0x43
'kyels', # 0x44
'kyelt', # 0x45
'kyelp', # 0x46
'kyelh', # 0x47
'kyem', # 0x48
'kyeb', # 0x49
'kyebs', # 0x4a
'kyes', # 0x4b
'kyess', # 0x4c
'kyeng', # 0x4d
'kyej', # 0x4e
'kyec', # 0x4f
'kyek', # 0x50
'kyet', # 0x51
'kyep', # 0x52
'kyeh', # 0x53
'ko', # 0x54
'kog', # 0x55
'kogg', # 0x56
'kogs', # 0x57
'kon', # 0x58
'konj', # 0x59
'konh', # 0x5a
'kod', # 0x5b
'kol', # 0x5c
'kolg', # 0x5d
'kolm', # 0x5e
'kolb', # 0x5f
'kols', # 0x60
'kolt', # 0x61
'kolp', # 0x62
'kolh', # 0x63
'kom', # 0x64
'kob', # 0x65
'kobs', # 0x66
'kos', # 0x67
'koss', # 0x68
'kong', # 0x69
'koj', # 0x6a
'koc', # 0x6b
'kok', # 0x6c
'kot', # 0x6d
'kop', # 0x6e
'koh', # 0x6f
'kwa', # 0x70
'kwag', # 0x71
'kwagg', # 0x72
'kwags', # 0x73
'kwan', # 0x74
'kwanj', # 0x75
'kwanh', # 0x76
'kwad', # 0x77
'kwal', # 0x78
'kwalg', # 0x79
'kwalm', # 0x7a
'kwalb', # 0x7b
'kwals', # 0x7c
'kwalt', # 0x7d
'kwalp', # 0x7e
'kwalh', # 0x7f
'kwam', # 0x80
'kwab', # 0x81
'kwabs', # 0x82
'kwas', # 0x83
'kwass', # 0x84
'kwang', # 0x85
'kwaj', # 0x86
'kwac', # 0x87
'kwak', # 0x88
'kwat', # 0x89
'kwap', # 0x8a
'kwah', # 0x8b
'kwae', # 0x8c
'kwaeg', # 0x8d
'kwaegg', # 0x8e
'kwaegs', # 0x8f
'kwaen', # 0x90
'kwaenj', # 0x91
'kwaenh', # 0x92
'kwaed', # 0x93
'kwael', # 0x94
'kwaelg', # 0x95
'kwaelm', # 0x96
'kwaelb', # 0x97
'kwaels', # 0x98
'kwaelt', # 0x99
'kwaelp', # 0x9a
'kwaelh', # 0x9b
'kwaem', # 0x9c
'kwaeb', # 0x9d
'kwaebs', # 0x9e
'kwaes', # 0x9f
'kwaess', # 0xa0
'kwaeng', # 0xa1
'kwaej', # 0xa2
'kwaec', # 0xa3
'kwaek', # 0xa4
'kwaet', # 0xa5
'kwaep', # 0xa6
'kwaeh', # 0xa7
'koe', # 0xa8
'koeg', # 0xa9
'koegg', # 0xaa
'koegs', # 0xab
'koen', # 0xac
'koenj', # 0xad
'koenh', # 0xae
'koed', # 0xaf
'koel', # 0xb0
'koelg', # 0xb1
'koelm', # 0xb2
'koelb', # 0xb3
'koels', # 0xb4
'koelt', # 0xb5
'koelp', # 0xb6
'koelh', # 0xb7
'koem', # 0xb8
'koeb', # 0xb9
'koebs', # 0xba
'koes', # 0xbb
'koess', # 0xbc
'koeng', # 0xbd
'koej', # 0xbe
'koec', # 0xbf
'koek', # 0xc0
'koet', # 0xc1
'koep', # 0xc2
'koeh', # 0xc3
'kyo', # 0xc4
'kyog', # 0xc5
'kyogg', # 0xc6
'kyogs', # 0xc7
'kyon', # 0xc8
'kyonj', # 0xc9
'kyonh', # 0xca
'kyod', # 0xcb
'kyol', # 0xcc
'kyolg', # 0xcd
'kyolm', # 0xce
'kyolb', # 0xcf
'kyols', # 0xd0
'kyolt', # 0xd1
'kyolp', # 0xd2
'kyolh', # 0xd3
'kyom', # 0xd4
'kyob', # 0xd5
'kyobs', # 0xd6
'kyos', # 0xd7
'kyoss', # 0xd8
'kyong', # 0xd9
'kyoj', # 0xda
'kyoc', # 0xdb
'kyok', # 0xdc
'kyot', # 0xdd
'kyop', # 0xde
'kyoh', # 0xdf
'ku', # 0xe0
'kug', # 0xe1
'kugg', # 0xe2
'kugs', # 0xe3
'kun', # 0xe4
'kunj', # 0xe5
'kunh', # 0xe6
'kud', # 0xe7
'kul', # 0xe8
'kulg', # 0xe9
'kulm', # 0xea
'kulb', # 0xeb
'kuls', # 0xec
'kult', # 0xed
'kulp', # 0xee
'kulh', # 0xef
'kum', # 0xf0
'kub', # 0xf1
'kubs', # 0xf2
'kus', # 0xf3
'kuss', # 0xf4
'kung', # 0xf5
'kuj', # 0xf6
'kuc', # 0xf7
'kuk', # 0xf8
'kut', # 0xf9
'kup', # 0xfa
'kuh', # 0xfb
'kweo', # 0xfc
'kweog', # 0xfd
'kweogg', # 0xfe
'kweogs', # 0xff
)
| gpl-3.0 |
Lujeni/ansible | lib/ansible/modules/network/nxos/nxos_pim_rp_address.py | 18 | 7993 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_pim_rp_address
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages configuration of an PIM static RP address instance.
description:
- Manages configuration of an Protocol Independent Multicast (PIM) static
rendezvous point (RP) address instance.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) is currently not supported on all platforms.
options:
rp_address:
description:
- Configures a Protocol Independent Multicast (PIM) static
rendezvous point (RP) address. Valid values are
unicast addresses.
required: true
group_list:
description:
- Group range for static RP. Valid values are multicast addresses.
prefix_list:
description:
- Prefix list policy for static RP. Valid values are prefix-list
policy names.
route_map:
description:
- Route map policy for static RP. Valid values are route-map
policy names.
bidir:
description:
- Group range is treated in PIM bidirectional mode.
type: bool
state:
description:
- Specify desired state of the resource.
required: true
default: present
choices: ['present','absent','default']
'''
EXAMPLES = '''
- nxos_pim_rp_address:
rp_address: "10.1.1.20"
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "vrf test", "router-id 192.0.2.1"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
def get_existing(module, args, gl):
existing = {}
config = str(get_config(module))
address = module.params['rp_address']
pim_address_re = r'ip pim rp-address (?P<value>.*)$'
for line in re.findall(pim_address_re, config, re.M):
values = line.split()
if values[0] != address:
continue
if gl and 'group-list' not in line:
continue
elif not gl and 'group-list' in line:
if '224.0.0.0/4' not in line: # ignore default group-list
continue
existing['bidir'] = existing.get('bidir') or 'bidir' in line
if len(values) > 2:
value = values[2]
if values[1] == 'route-map':
existing['route_map'] = value
elif values[1] == 'prefix-list':
existing['prefix_list'] = value
elif values[1] == 'group-list':
if value != '224.0.0.0/4': # ignore default group-list
existing['group_list'] = value
return existing
def state_present(module, existing, proposed, candidate):
address = module.params['rp_address']
command = 'ip pim rp-address {0}'.format(address)
if module.params['group_list'] and not proposed.get('group_list'):
command += ' group-list ' + module.params['group_list']
if module.params['prefix_list']:
if not proposed.get('prefix_list'):
command += ' prefix-list ' + module.params['prefix_list']
if module.params['route_map']:
if not proposed.get('route_map'):
command += ' route-map ' + module.params['route_map']
commands = build_command(proposed, command)
if commands:
candidate.add(commands, parents=[])
def build_command(param_dict, command):
for param in ['group_list', 'prefix_list', 'route_map']:
if param_dict.get(param):
command += ' {0} {1}'.format(
param.replace('_', '-'), param_dict.get(param))
if param_dict.get('bidir'):
command += ' bidir'
return [command]
def state_absent(module, existing, candidate):
address = module.params['rp_address']
commands = []
command = 'no ip pim rp-address {0}'.format(address)
if module.params['group_list'] == existing.get('group_list'):
commands = build_command(existing, command)
elif not module.params['group_list']:
commands = [command]
if commands:
candidate.add(commands, parents=[])
def get_proposed(pargs, existing):
proposed = {}
for key, value in pargs.items():
if key != 'rp_address':
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
if existing.get(key) != value:
proposed[key] = value
return proposed
def main():
argument_spec = dict(
rp_address=dict(required=True, type='str'),
group_list=dict(required=False, type='str'),
prefix_list=dict(required=False, type='str'),
route_map=dict(required=False, type='str'),
bidir=dict(required=False, type='bool'),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=[['group_list', 'route_map'],
['group_list', 'prefix_list'],
['route_map', 'prefix_list']],
supports_check_mode=True)
warnings = list()
result = {'changed': False, 'commands': [], 'warnings': warnings}
state = module.params['state']
args = [
'rp_address',
'group_list',
'prefix_list',
'route_map',
'bidir'
]
proposed_args = dict((k, v) for k, v in module.params.items()
if v is not None and k in args)
if module.params['group_list']:
existing = get_existing(module, args, True)
proposed = get_proposed(proposed_args, existing)
else:
existing = get_existing(module, args, False)
proposed = get_proposed(proposed_args, existing)
candidate = CustomNetworkConfig(indent=3)
if state == 'present' and (proposed or not existing):
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, candidate)
if candidate:
candidate = candidate.items_text()
result['commands'] = candidate
result['changed'] = True
msgs = load_config(module, candidate, True)
if msgs:
for item in msgs:
if item:
if isinstance(item, dict):
err_str = item['clierror']
else:
err_str = item
if 'No policy was configured' in err_str:
if state == 'absent':
addr = module.params['rp_address']
new_cmd = 'no ip pim rp-address {0}'.format(addr)
load_config(module, new_cmd)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
taknevski/tensorflow-xsmm | tensorflow/contrib/session_bundle/gc.py | 47 | 6397 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""System for specifying garbage collection (GC) of path based data.
This framework allows for GC of data specified by path names, for example files
on disk. gc.Path objects each represent a single item stored at a path and may
be a base directory,
/tmp/exports/0/...
/tmp/exports/1/...
...
or a fully qualified file,
/tmp/train-1.ckpt
/tmp/train-2.ckpt
...
A gc filter function takes and returns a list of gc.Path items. Filter
functions are responsible for selecting Path items for preservation or deletion.
Note that functions should always return a sorted list.
For example,
base_dir = "/tmp"
# create the directories
for e in xrange(10):
os.mkdir("%s/%d" % (base_dir, e), 0o755)
# create a simple parser that pulls the export_version from the directory
def parser(path):
match = re.match("^" + base_dir + "/(\\d+)$", path.path)
if not match:
return None
return path._replace(export_version=int(match.group(1)))
path_list = gc.get_paths("/tmp", parser) # contains all ten Paths
every_fifth = gc.mod_export_version(5)
print every_fifth(path_list) # shows ["/tmp/0", "/tmp/5"]
largest_three = gc.largest_export_versions(3)
print largest_three(all_paths) # shows ["/tmp/7", "/tmp/8", "/tmp/9"]
both = gc.union(every_fifth, largest_three)
print both(all_paths) # shows ["/tmp/0", "/tmp/5",
# "/tmp/7", "/tmp/8", "/tmp/9"]
# delete everything not in 'both'
to_delete = gc.negation(both)
for p in to_delete(all_paths):
gfile.DeleteRecursively(p.path) # deletes: "/tmp/1", "/tmp/2",
# "/tmp/3", "/tmp/4", "/tmp/6",
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import heapq
import math
import os
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
Path = collections.namedtuple('Path', 'path export_version')
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def largest_export_versions(n):
"""Creates a filter that keeps the largest n export versions.
Args:
n: number of versions to keep.
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
heap = []
for idx, path in enumerate(paths):
if path.export_version is not None:
heapq.heappush(heap, (path.export_version, idx))
keepers = [paths[i] for _, i in heapq.nlargest(n, heap)]
return sorted(keepers)
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def one_of_every_n_export_versions(n):
r"""Creates a filter that keeps one of every n export versions.
Args:
n: interval size.
Returns:
A filter function that keeps exactly one path from each interval
[0, n], (n, 2n], (2n, 3n], etc... If more than one path exists in an
interval the largest is kept.
"""
def keep(paths):
keeper_map = {} # map from interval to largest path seen in that interval
for p in paths:
if p.export_version is None:
# Skip missing export_versions.
continue
# Find the interval (with a special case to map export_version = 0 to
# interval 0.
interval = math.floor(
(p.export_version - 1) / n) if p.export_version else 0
existing = keeper_map.get(interval, None)
if (not existing) or (existing.export_version < p.export_version):
keeper_map[interval] = p
return sorted(keeper_map.values())
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def mod_export_version(n):
"""Creates a filter that keeps every export that is a multiple of n.
Args:
n: step size.
Returns:
A filter function that keeps paths where export_version % n == 0.
"""
def keep(paths):
keepers = []
for p in paths:
if p.export_version % n == 0:
keepers.append(p)
return sorted(keepers)
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def union(lf, rf):
"""Creates a filter that keeps the union of two filters.
Args:
lf: first filter
rf: second filter
Returns:
A filter function that keeps the n largest paths.
"""
def keep(paths):
l = set(lf(paths))
r = set(rf(paths))
return sorted(list(l|r))
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def negation(f):
"""Negate a filter.
Args:
f: filter function to invert
Returns:
A filter function that returns the negation of f.
"""
def keep(paths):
l = set(paths)
r = set(f(paths))
return sorted(list(l-r))
return keep
@deprecated('2017-06-30', 'Please use SavedModel instead.')
def get_paths(base_dir, parser):
"""Gets a list of Paths in a given directory.
Args:
base_dir: directory.
parser: a function which gets the raw Path and can augment it with
information such as the export_version, or ignore the path by returning
None. An example parser may extract the export version from a path
such as "/tmp/exports/100" an another may extract from a full file
name such as "/tmp/checkpoint-99.out".
Returns:
A list of Paths contained in the base directory with the parsing function
applied.
By default the following fields are populated,
- Path.path
The parsing function is responsible for populating,
- Path.export_version
"""
raw_paths = gfile.ListDirectory(base_dir)
paths = []
for r in raw_paths:
p = parser(Path(os.path.join(base_dir, r), None))
if p:
paths.append(p)
return sorted(paths)
| apache-2.0 |
Harnek/Data-Structure-and-Algorithms | Graphs/Floyd–Warshall algorithm.py | 2 | 1068 | '''
author : https://github.com/Harnek
Floyd–Warshall algorithm - All-pairs shortest path problem (for weighted graphs)
Complexity:
Performance = |V|^3
Space = |V|^2
'''
def allPairsShortestPath(graph):
dist = {}
pred = {}
for u in graph:
dist[u] = {}
pred[u] = {}
for v in graph:
dist[u][v] = float('inf')
pred[u][v] = None
dist[u][u] = 0
pred[u][v] = None
for v in graph[u]:
dist[u][v] = graph[u][v]
pred[u][v] = u
#[print(dist[v]) for v in dist]
for mid in graph:
for u in graph:
for v in graph:
newLen = dist[u][mid] + dist[mid][v]
if newLen < dist[u][v]:
dist[u][v] = newLen
pred[u][v] = pred[mid][v]
return dist, pred
graph = {0 : {1:6, 2:8},
1 : {4:11},
2 : {3: 9},
3 : {},
4 : {5:3},
5 : {2: 7, 3:4}}
dist, pred = allPairsShortestPath(graph)
for v in dist:
print(v, dist[v])
| mit |
barbarubra/Don-t-know-What-i-m-doing. | python/src/Lib/plat-irix5/CL_old.py | 66 | 6162 | #
# cl.h - Compression Library typedefs and prototypes
#
# 01/07/92 Cleanup by Brian Knittel
# 02/18/92 Original Version by Brian Knittel
#
#
# originalFormat parameter values
#
from warnings import warnpy3k
warnpy3k("the CL_old module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
MAX_NUMBER_OF_ORIGINAL_FORMATS = 32
# Audio
MONO = 0
STEREO_INTERLEAVED = 1
# Video
# YUV is defined to be the same thing as YCrCb (luma and two chroma components).
# 422 is appended to YUV (or YCrCb) if the chroma is sub-sampled by 2
# horizontally, packed as U Y1 V Y2 (byte order).
# 422HC is appended to YUV (or YCrCb) if the chroma is sub-sampled by 2
# vertically in addition to horizontally, and is packed the same as
# 422 except that U & V are not valid on the second line.
#
RGB = 0
RGBX = 1
RGBA = 2
RGB332 = 3
GRAYSCALE = 4
Y = 4
YUV = 5
YCbCr = 5
YUV422 = 6 # 4:2:2 sampling
YCbCr422 = 6 # 4:2:2 sampling
YUV422HC = 7 # 4:1:1 sampling
YCbCr422HC = 7 # 4:1:1 sampling
YUV422DC = 7 # 4:1:1 sampling
YCbCr422DC = 7 # 4:1:1 sampling
BEST_FIT = -1
def BytesPerSample(s):
if s in (MONO, YUV):
return 2
elif s == STEREO_INTERLEAVED:
return 4
else:
return 0
def BytesPerPixel(f):
if f in (RGB, YUV):
return 3
elif f in (RGBX, RGBA):
return 4
elif f in (RGB332, GRAYSCALE):
return 1
else:
return 2
def AudioFormatName(f):
if f == MONO:
return 'MONO'
elif f == STEREO_INTERLEAVED:
return 'STEREO_INTERLEAVED'
else:
return 'Not a valid format'
def VideoFormatName(f):
if f == RGB:
return 'RGB'
elif f == RGBX:
return 'RGBX'
elif f == RGBA:
return 'RGBA'
elif f == RGB332:
return 'RGB332'
elif f == GRAYSCALE:
return 'GRAYSCALE'
elif f == YUV:
return 'YUV'
elif f == YUV422:
return 'YUV422'
elif f == YUV422DC:
return 'YUV422DC'
else:
return 'Not a valid format'
MAX_NUMBER_OF_AUDIO_ALGORITHMS = 32
MAX_NUMBER_OF_VIDEO_ALGORITHMS = 32
#
# Algorithm types
#
AUDIO = 0
VIDEO = 1
def AlgorithmNumber(scheme):
return scheme & 0x7fff
def AlgorithmType(scheme):
return (scheme >> 15) & 1
def Algorithm(type, n):
return n | ((type & 1) << 15)
#
# "compressionScheme" argument values
#
UNKNOWN_SCHEME = -1
UNCOMPRESSED_AUDIO = Algorithm(AUDIO, 0)
G711_ULAW = Algorithm(AUDIO, 1)
ULAW = Algorithm(AUDIO, 1)
G711_ALAW = Algorithm(AUDIO, 2)
ALAW = Algorithm(AUDIO, 2)
AWARE_MPEG_AUDIO = Algorithm(AUDIO, 3)
AWARE_MULTIRATE = Algorithm(AUDIO, 4)
UNCOMPRESSED = Algorithm(VIDEO, 0)
UNCOMPRESSED_VIDEO = Algorithm(VIDEO, 0)
RLE = Algorithm(VIDEO, 1)
JPEG = Algorithm(VIDEO, 2)
MPEG_VIDEO = Algorithm(VIDEO, 3)
MVC1 = Algorithm(VIDEO, 4)
RTR = Algorithm(VIDEO, 5)
RTR1 = Algorithm(VIDEO, 5)
#
# Parameters
#
MAX_NUMBER_OF_PARAMS = 256
# Default Parameters
IMAGE_WIDTH = 0
IMAGE_HEIGHT = 1
ORIGINAL_FORMAT = 2
INTERNAL_FORMAT = 3
COMPONENTS = 4
BITS_PER_COMPONENT = 5
FRAME_RATE = 6
COMPRESSION_RATIO = 7
EXACT_COMPRESSION_RATIO = 8
FRAME_BUFFER_SIZE = 9
COMPRESSED_BUFFER_SIZE = 10
BLOCK_SIZE = 11
PREROLL = 12
FRAME_TYPE = 13
ALGORITHM_ID = 14
ALGORITHM_VERSION = 15
ORIENTATION = 16
NUMBER_OF_FRAMES = 17
SPEED = 18
LAST_FRAME_INDEX = 19
NUMBER_OF_PARAMS = 20
# JPEG Specific Parameters
QUALITY_FACTOR = NUMBER_OF_PARAMS + 0
# MPEG Specific Parameters
END_OF_SEQUENCE = NUMBER_OF_PARAMS + 0
# RTR Specific Parameters
QUALITY_LEVEL = NUMBER_OF_PARAMS + 0
ZOOM_X = NUMBER_OF_PARAMS + 1
ZOOM_Y = NUMBER_OF_PARAMS + 2
#
# Parameter value types
#
ENUM_VALUE = 0 # only certain constant values are valid
RANGE_VALUE = 1 # any value in a given range is valid
FLOATING_ENUM_VALUE = 2 # only certain constant floating point values are valid
FLOATING_RANGE_VALUE = 3 # any value in a given floating point range is valid
#
# Algorithm Functionality
#
DECOMPRESSOR = 1
COMPRESSOR = 2
CODEC = 3
#
# Buffer types
#
NONE = 0
FRAME = 1
DATA = 2
#
# Frame types
#
NONE = 0
KEYFRAME = 1
INTRA = 1
PREDICTED = 2
BIDIRECTIONAL = 3
#
# Orientations
#
TOP_DOWN = 0
BOTTOM_UP = 1
#
# SGI Proprietary Algorithm Header Start Code
#
HEADER_START_CODE = 0xc1C0DEC
#
# error codes
#
BAD_NO_BUFFERSPACE = -2 # no space for internal buffers
BAD_PVBUFFER = -3 # param/val buffer doesn't make sense
BAD_BUFFERLENGTH_NEG = -4 # negative buffer length
BAD_BUFFERLENGTH_ODD = -5 # odd length parameter/value buffer
BAD_PARAM = -6 # invalid parameter
BAD_COMPRESSION_SCHEME = -7 # compression scheme parameter invalid
BAD_COMPRESSOR_HANDLE = -8 # compression handle parameter invalid
BAD_COMPRESSOR_HANDLE_POINTER = -9 # compression handle pointer invalid
BAD_BUFFER_HANDLE = -10 # buffer handle invalid
BAD_BUFFER_QUERY_SIZE = -11 # buffer query size too large
JPEG_ERROR = -12 # error from libjpeg
BAD_FRAME_SIZE = -13 # frame size invalid
PARAM_OUT_OF_RANGE = -14 # parameter out of range
ADDED_ALGORITHM_ERROR = -15 # added algorithm had a unique error
BAD_ALGORITHM_TYPE = -16 # bad algorithm type
BAD_ALGORITHM_NAME = -17 # bad algorithm name
BAD_BUFFERING = -18 # bad buffering calls
BUFFER_NOT_CREATED = -19 # buffer not created
BAD_BUFFER_EXISTS = -20 # buffer already created
BAD_INTERNAL_FORMAT = -21 # invalid internal format
BAD_BUFFER_POINTER = -22 # invalid buffer pointer
FRAME_BUFFER_SIZE_ZERO = -23 # frame buffer has zero size
BAD_STREAM_HEADER = -24 # invalid stream header
BAD_LICENSE = -25 # netls license not valid
AWARE_ERROR = -26 # error from libawcmp
| apache-2.0 |
lamenezes/pingo-py | pingo/pcduino/tests/test_pcduino.py | 7 | 1463 | import unittest
import pingo
from pingo.test import level0
from pingo.test import level1
from pingo.detect import check_board
running_on_pcduino = check_board(pingo.pcduino.PcDuino)
class PcDuinoTest(unittest.TestCase):
def setUp(self):
self.board = pingo.pcduino.PcDuino()
# Level0 Parameters
self.digital_output_pin_number = 3
self.digital_input_pin_number = 0
self.total_pins = 20
# Level1 Parameters
self.analog_input_pin_number = 'A3'
self.expected_analog_input = 4096
self.expected_analog_ratio = 0.98
def tearDown(self):
self.board.cleanup()
@unittest.skipIf(not running_on_pcduino, 'PcDuino not detected')
class PcDuinoBasics(PcDuinoTest, level0.BoardBasics):
def test_list_pins(self):
pin = self.board.pins[self.digital_output_pin_number]
assert isinstance(pin, pingo.DigitalPin)
data_pins = len(self.board.pins)
assert data_pins == self.total_pins
@unittest.skipIf(not running_on_pcduino, 'PcDuino not detected')
class PcDuinoExceptions(PcDuinoTest, level0.BoardExceptions):
pass
@unittest.skipIf(not running_on_pcduino, 'PcDuino not detected')
class PcDuinoAnalogRead(PcDuinoTest, level1.AnalogReadBasics):
pass
@unittest.skipIf(not running_on_pcduino, 'PcDuino not detected')
class PcDuinoAnalogExceptions(PcDuinoTest, level1.AnalogExceptions):
pass
if __name__ == '__main__':
unittest.main()
| mit |
scaidermern/topTracks2playlist | lastfm/track.py | 1 | 12514 | #!/usr/bin/env python
__author__ = "Abhinav Sarkar <abhinav@abhinavsarkar.net>"
__version__ = "0.2"
__license__ = "GNU Lesser General Public License"
__package__ = "lastfm"
from lastfm.base import LastfmBase
from lastfm.mixin import mixin
from lastfm.decorators import cached_property, top_property
@mixin("crawlable", "sharable", "taggable",
"searchable", "cacheable", "property_adder")
class Track(LastfmBase):
"""A class representing a track."""
class Meta(object):
properties = ["id", "name", "mbid", "url", "duration",
"artist", "image", "stats", "played_on", "loved_on",
"subject"]
fillable_properties = ["streamable", "full_track",
"album", "position", "wiki"]
def init(self, api, **kwargs):
if not isinstance(api, Api):
raise InvalidParametersError("api reference must be supplied as an argument")
self._api = api
super(Track, self).init(**kwargs)
self._stats = hasattr(self, "_stats") and Stats(
subject = self,
match = self._stats.match,
playcount = self._stats.playcount,
rank = self._stats.rank,
listeners = self._stats.listeners,
) or None
self._wiki = hasattr(self, "_wiki") and Wiki(
subject = self,
published = self._wiki.published,
summary = self._wiki.summary,
content = self._wiki.content
) or None
@property
def wiki(self):
"""wiki of the track"""
if self._wiki == "na":
return None
if self._wiki is None:
self._fill_info()
return self._wiki
@cached_property
def similar(self):
"""tracks similar to this track"""
params = Track._check_params(
{'method': 'track.getSimilar'},
self.artist.name,
self.name,
self.mbid
)
data = self._api._fetch_data(params).find('similartracks')
return [
Track(
self._api,
subject = self,
name = t.findtext('name'),
artist = Artist(
self._api,
subject = self,
name = t.findtext('artist/name'),
mbid = t.findtext('artist/mbid'),
url = t.findtext('artist/url')
),
mbid = t.findtext('mbid'),
stats = Stats(
subject = t.findtext('name'),
match = float(t.findtext('match'))
),
streamable = (t.findtext('streamable') == '1'),
full_track = (t.find('streamable').attrib['fulltrack'] == '1'),
image = dict([(i.get('size'), i.text) for i in t.findall('image')]),
)
for t in data.findall('track')
]
@top_property("similar")
def most_similar(self):
"""track most similar to this track"""
pass
@cached_property
def top_fans(self):
"""top fans of the track"""
params = Track._check_params(
{'method': 'track.getTopFans'},
self.artist.name,
self.name,
self.mbid
)
data = self._api._fetch_data(params).find('topfans')
return [
User(
self._api,
subject = self,
name = u.findtext('name'),
url = u.findtext('url'),
image = dict([(i.get('size'), i.text) for i in u.findall('image')]),
stats = Stats(
subject = u.findtext('name'),
weight = int(u.findtext('weight'))
)
)
for u in data.findall('user')
]
@top_property("top_fans")
def top_fan(self):
"""topmost fan of the track"""
pass
@cached_property
def top_tags(self):
"""top tags for the track"""
params = Track._check_params(
{'method': 'track.getTopTags'},
self.artist.name,
self.name,
self.mbid
)
data = self._api._fetch_data(params).find('toptags')
return [
Tag(
self._api,
subject = self,
name = t.findtext('name'),
url = t.findtext('url'),
stats = Stats(
subject = t.findtext('name'),
count = int(t.findtext('count')),
)
)
for t in data.findall('tag')
]
@top_property("top_tags")
def top_tag(self):
"""topmost tag for the track"""
pass
def love(self):
params = self._default_params({'method': 'track.love'})
self._api._post_data(params)
def ban(self):
params = self._default_params({'method': 'track.ban'})
self._api._post_data(params)
@staticmethod
def get_info(api,
artist = None,
track = None,
mbid = None):
data = Track._fetch_data(api, artist, track, mbid)
t = Track(
api,
name = data.findtext('name'),
artist = Artist(
api,
name = data.findtext('artist/name'),
),
)
t._fill_info()
return t
@staticmethod
def _get_all(seed_track):
def gen():
for artist in Artist.get_all(seed_track.artist):
for track in artist.top_tracks:
yield track
return (seed_track, ['name', 'artist'], lambda api, hsh: gen())
def _default_params(self, extra_params = None):
if not (self.artist and self.name):
raise InvalidParametersError("artist and track have to be provided.")
params = {'artist': self.artist.name, 'track': self.name}
if extra_params is not None:
params.update(extra_params)
return params
@staticmethod
def _search_yield_func(api, track):
return Track(
api,
name = track.findtext('name'),
artist = Artist(
api,
name=track.findtext('artist')
),
url = track.findtext('url'),
stats = Stats(
subject=track.findtext('name'),
listeners=int(track.findtext('listeners'))
),
streamable = (track.findtext('streamable') == '1'),
full_track = (track.find('streamable').attrib['fulltrack'] == '1'),
image = dict([(i.get('size'), i.text) for i in track.findall('image')]),
)
@staticmethod
def _fetch_data(api,
artist = None,
track = None,
mbid = None):
params = Track._check_params({'method': 'track.getInfo'}, artist, track, mbid)
return api._fetch_data(params).find('track')
def _fill_info(self):
data = Track._fetch_data(self._api, self.artist.name, self.name)
self._id = int(data.findtext('id'))
self._mbid = data.findtext('mbid')
self._url = data.findtext('url')
self._duration = int(data.findtext('duration'))
self._streamable = (data.findtext('streamable') == '1')
self._full_track = (data.find('streamable').attrib['fulltrack'] == '1')
self._image = dict([(i.get('size'), i.text) for i in data.findall('image')])
self._stats = Stats(
subject = self,
listeners = int(data.findtext('listeners')),
playcount = int(data.findtext('playcount')),
)
self._artist = Artist(
self._api,
name = data.findtext('artist/name'),
mbid = data.findtext('artist/mbid'),
url = data.findtext('artist/url')
)
if data.find('album') is not None:
self._album = Album(
self._api,
artist = self._artist,
name = data.findtext('album/title'),
mbid = data.findtext('album/mbid'),
url = data.findtext('album/url'),
image = dict([(i.get('size'), i.text) for i in data.findall('album/image')])
)
self._position = data.find('album').attrib['position'].strip() \
and int(data.find('album').attrib['position'])
if data.find('wiki') is not None:
self._wiki = Wiki(
self,
published = datetime(*(time.strptime(
data.findtext('wiki/published').strip(),
'%a, %d %b %Y %H:%M:%S +0000'
)[0:6])),
summary = data.findtext('wiki/summary'),
content = data.findtext('wiki/content')
)
else:
self._wiki = 'na'
@staticmethod
def _check_params(params,
artist = None,
track = None,
mbid = None):
if not ((artist and track) or mbid):
raise InvalidParametersError("either (artist and track) or mbid has to be given as argument.")
if artist and track:
params.update({'artist': artist, 'track': track})
elif mbid:
params.update({'mbid': mbid})
return params
@staticmethod
def _hash_func(*args, **kwds):
try:
return hash("%s%s" % (kwds['name'], hash(kwds['artist'])))
except KeyError:
raise InvalidParametersError("name and artist have to be provided for hashing")
def __hash__(self):
return self.__class__._hash_func(name = self.name, artist = self.artist)
def __eq__(self, other):
if self.mbid and other.mbid:
return self.mbid == other.mbid
if self.url and other.url:
return self.url == other.url
if (self.name and self.artist) and (other.name and other.artist):
return (self.name == other.name) and (self.artist == other.artist)
return super(Track, self).__eq__(other)
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "<lastfm.Track: '%s' by %s>" % (self.name, self.artist.name)
import time
from datetime import datetime
from lastfm.api import Api
from lastfm.artist import Artist
from lastfm.album import Album
from lastfm.error import InvalidParametersError
from lastfm.stats import Stats
from lastfm.tag import Tag
from lastfm.user import User
from lastfm.wiki import Wiki
| gpl-3.0 |
cedk/odoo | addons/account/wizard/account_state_open.py | 341 | 1785 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
class account_state_open(osv.osv_memory):
_name = 'account.state.open'
_description = 'Account State Open'
def change_inv_state(self, cr, uid, ids, context=None):
proxy = self.pool.get('account.invoice')
if context is None:
context = {}
active_ids = context.get('active_ids')
if isinstance(active_ids, list):
invoice = proxy.browse(cr, uid, active_ids[0], context=context)
if invoice.reconciled:
raise osv.except_osv(_('Warning!'), _('Invoice is already reconciled.'))
invoice.signal_workflow('open_test')
return {'type': 'ir.actions.act_window_close'}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
StormTrooper/osmc | package/mediacenter-skin-next-osmc/files/usr/share/kodi/addons/script.module.unidecode/lib/unidecode/x02f.py | 252 | 4572 | data = (
'[?] ', # 0x00
'[?] ', # 0x01
'[?] ', # 0x02
'[?] ', # 0x03
'[?] ', # 0x04
'[?] ', # 0x05
'[?] ', # 0x06
'[?] ', # 0x07
'[?] ', # 0x08
'[?] ', # 0x09
'[?] ', # 0x0a
'[?] ', # 0x0b
'[?] ', # 0x0c
'[?] ', # 0x0d
'[?] ', # 0x0e
'[?] ', # 0x0f
'[?] ', # 0x10
'[?] ', # 0x11
'[?] ', # 0x12
'[?] ', # 0x13
'[?] ', # 0x14
'[?] ', # 0x15
'[?] ', # 0x16
'[?] ', # 0x17
'[?] ', # 0x18
'[?] ', # 0x19
'[?] ', # 0x1a
'[?] ', # 0x1b
'[?] ', # 0x1c
'[?] ', # 0x1d
'[?] ', # 0x1e
'[?] ', # 0x1f
'[?] ', # 0x20
'[?] ', # 0x21
'[?] ', # 0x22
'[?] ', # 0x23
'[?] ', # 0x24
'[?] ', # 0x25
'[?] ', # 0x26
'[?] ', # 0x27
'[?] ', # 0x28
'[?] ', # 0x29
'[?] ', # 0x2a
'[?] ', # 0x2b
'[?] ', # 0x2c
'[?] ', # 0x2d
'[?] ', # 0x2e
'[?] ', # 0x2f
'[?] ', # 0x30
'[?] ', # 0x31
'[?] ', # 0x32
'[?] ', # 0x33
'[?] ', # 0x34
'[?] ', # 0x35
'[?] ', # 0x36
'[?] ', # 0x37
'[?] ', # 0x38
'[?] ', # 0x39
'[?] ', # 0x3a
'[?] ', # 0x3b
'[?] ', # 0x3c
'[?] ', # 0x3d
'[?] ', # 0x3e
'[?] ', # 0x3f
'[?] ', # 0x40
'[?] ', # 0x41
'[?] ', # 0x42
'[?] ', # 0x43
'[?] ', # 0x44
'[?] ', # 0x45
'[?] ', # 0x46
'[?] ', # 0x47
'[?] ', # 0x48
'[?] ', # 0x49
'[?] ', # 0x4a
'[?] ', # 0x4b
'[?] ', # 0x4c
'[?] ', # 0x4d
'[?] ', # 0x4e
'[?] ', # 0x4f
'[?] ', # 0x50
'[?] ', # 0x51
'[?] ', # 0x52
'[?] ', # 0x53
'[?] ', # 0x54
'[?] ', # 0x55
'[?] ', # 0x56
'[?] ', # 0x57
'[?] ', # 0x58
'[?] ', # 0x59
'[?] ', # 0x5a
'[?] ', # 0x5b
'[?] ', # 0x5c
'[?] ', # 0x5d
'[?] ', # 0x5e
'[?] ', # 0x5f
'[?] ', # 0x60
'[?] ', # 0x61
'[?] ', # 0x62
'[?] ', # 0x63
'[?] ', # 0x64
'[?] ', # 0x65
'[?] ', # 0x66
'[?] ', # 0x67
'[?] ', # 0x68
'[?] ', # 0x69
'[?] ', # 0x6a
'[?] ', # 0x6b
'[?] ', # 0x6c
'[?] ', # 0x6d
'[?] ', # 0x6e
'[?] ', # 0x6f
'[?] ', # 0x70
'[?] ', # 0x71
'[?] ', # 0x72
'[?] ', # 0x73
'[?] ', # 0x74
'[?] ', # 0x75
'[?] ', # 0x76
'[?] ', # 0x77
'[?] ', # 0x78
'[?] ', # 0x79
'[?] ', # 0x7a
'[?] ', # 0x7b
'[?] ', # 0x7c
'[?] ', # 0x7d
'[?] ', # 0x7e
'[?] ', # 0x7f
'[?] ', # 0x80
'[?] ', # 0x81
'[?] ', # 0x82
'[?] ', # 0x83
'[?] ', # 0x84
'[?] ', # 0x85
'[?] ', # 0x86
'[?] ', # 0x87
'[?] ', # 0x88
'[?] ', # 0x89
'[?] ', # 0x8a
'[?] ', # 0x8b
'[?] ', # 0x8c
'[?] ', # 0x8d
'[?] ', # 0x8e
'[?] ', # 0x8f
'[?] ', # 0x90
'[?] ', # 0x91
'[?] ', # 0x92
'[?] ', # 0x93
'[?] ', # 0x94
'[?] ', # 0x95
'[?] ', # 0x96
'[?] ', # 0x97
'[?] ', # 0x98
'[?] ', # 0x99
'[?] ', # 0x9a
'[?] ', # 0x9b
'[?] ', # 0x9c
'[?] ', # 0x9d
'[?] ', # 0x9e
'[?] ', # 0x9f
'[?] ', # 0xa0
'[?] ', # 0xa1
'[?] ', # 0xa2
'[?] ', # 0xa3
'[?] ', # 0xa4
'[?] ', # 0xa5
'[?] ', # 0xa6
'[?] ', # 0xa7
'[?] ', # 0xa8
'[?] ', # 0xa9
'[?] ', # 0xaa
'[?] ', # 0xab
'[?] ', # 0xac
'[?] ', # 0xad
'[?] ', # 0xae
'[?] ', # 0xaf
'[?] ', # 0xb0
'[?] ', # 0xb1
'[?] ', # 0xb2
'[?] ', # 0xb3
'[?] ', # 0xb4
'[?] ', # 0xb5
'[?] ', # 0xb6
'[?] ', # 0xb7
'[?] ', # 0xb8
'[?] ', # 0xb9
'[?] ', # 0xba
'[?] ', # 0xbb
'[?] ', # 0xbc
'[?] ', # 0xbd
'[?] ', # 0xbe
'[?] ', # 0xbf
'[?] ', # 0xc0
'[?] ', # 0xc1
'[?] ', # 0xc2
'[?] ', # 0xc3
'[?] ', # 0xc4
'[?] ', # 0xc5
'[?] ', # 0xc6
'[?] ', # 0xc7
'[?] ', # 0xc8
'[?] ', # 0xc9
'[?] ', # 0xca
'[?] ', # 0xcb
'[?] ', # 0xcc
'[?] ', # 0xcd
'[?] ', # 0xce
'[?] ', # 0xcf
'[?] ', # 0xd0
'[?] ', # 0xd1
'[?] ', # 0xd2
'[?] ', # 0xd3
'[?] ', # 0xd4
'[?] ', # 0xd5
'[?]', # 0xd6
'[?]', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'[?]', # 0xe6
'[?]', # 0xe7
'[?]', # 0xe8
'[?]', # 0xe9
'[?]', # 0xea
'[?]', # 0xeb
'[?]', # 0xec
'[?]', # 0xed
'[?]', # 0xee
'[?]', # 0xef
'[?] ', # 0xf0
'[?] ', # 0xf1
'[?] ', # 0xf2
'[?] ', # 0xf3
'[?] ', # 0xf4
'[?] ', # 0xf5
'[?] ', # 0xf6
'[?] ', # 0xf7
'[?] ', # 0xf8
'[?] ', # 0xf9
'[?] ', # 0xfa
'[?] ', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
WangHanbin/shadowsocks | setup.py | 929 | 1321 | import codecs
from setuptools import setup
with codecs.open('README.rst', encoding='utf-8') as f:
long_description = f.read()
setup(
name="shadowsocks",
version="2.8.2",
license='http://www.apache.org/licenses/LICENSE-2.0',
description="A fast tunnel proxy that help you get through firewalls",
author='clowwindy',
author_email='clowwindy42@gmail.com',
url='https://github.com/shadowsocks/shadowsocks',
packages=['shadowsocks', 'shadowsocks.crypto'],
package_data={
'shadowsocks': ['README.rst', 'LICENSE']
},
install_requires=[],
entry_points="""
[console_scripts]
sslocal = shadowsocks.local:main
ssserver = shadowsocks.server:main
""",
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet :: Proxy Servers',
],
long_description=long_description,
)
| apache-2.0 |
rothnic/bokeh | bokeh/models/tests/test_plots.py | 35 | 2286 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import absolute_import
from mock import patch
import unittest
from bokeh.plotting import figure
from bokeh.models import GlyphRenderer
from bokeh.models.tools import PanTool
class TestPlotSelect(unittest.TestCase):
def setUp(self):
self._plot = figure(tools='pan')
self._plot.circle([1,2,3], [3,2,1], name='foo')
@patch('bokeh.models.plots.find')
def test_string_arg(self, mock_find):
self._plot.select('foo')
self.assertTrue(mock_find.called)
self.assertEqual(mock_find.call_args[0][1], dict(name='foo'))
@patch('bokeh.models.plots.find')
def test_type_arg(self, mock_find):
self._plot.select(PanTool)
self.assertTrue(mock_find.called)
self.assertEqual(mock_find.call_args[0][1], dict(type=PanTool))
@patch('bokeh.models.plots.find')
def test_kwargs(self, mock_find):
kw = dict(name='foo', type=GlyphRenderer)
self._plot.select(**kw)
self.assertTrue(mock_find.called)
self.assertEqual(mock_find.call_args[0][1], kw)
def test_too_many_args(self):
with self.assertRaises(TypeError) as cm:
self._plot.select('foo', 'bar')
self.assertEqual(
'select accepts at most ONE positional argument.',
str(cm.exception)
)
def test_no_input(self):
with self.assertRaises(TypeError) as cm:
self._plot.select()
self.assertEqual(
'select requires EITHER a positional argument, OR keyword arguments.',
str(cm.exception)
)
def test_arg_and_kwarg(self):
with self.assertRaises(TypeError) as cm:
self._plot.select('foo', type=PanTool)
self.assertEqual(
'select accepts EITHER a positional argument, OR keyword arguments (not both).',
str(cm.exception)
)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247971765/PyQt4/QtNetwork/QHttpRequestHeader.py | 1 | 1684 | # encoding: utf-8
# module PyQt4.QtNetwork
# from /usr/lib/python3/dist-packages/PyQt4/QtNetwork.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyQt4.QtCore as __PyQt4_QtCore
from .QHttpHeader import QHttpHeader
class QHttpRequestHeader(QHttpHeader):
"""
QHttpRequestHeader()
QHttpRequestHeader(str, str, int major=1, int minor=1)
QHttpRequestHeader(QHttpRequestHeader)
QHttpRequestHeader(str)
"""
def majorVersion(self): # real signature unknown; restored from __doc__
""" QHttpRequestHeader.majorVersion() -> int """
return 0
def method(self): # real signature unknown; restored from __doc__
""" QHttpRequestHeader.method() -> str """
return ""
def minorVersion(self): # real signature unknown; restored from __doc__
""" QHttpRequestHeader.minorVersion() -> int """
return 0
def parseLine(self, p_str, p_int): # real signature unknown; restored from __doc__
""" QHttpRequestHeader.parseLine(str, int) -> bool """
return False
def path(self): # real signature unknown; restored from __doc__
""" QHttpRequestHeader.path() -> str """
return ""
def setRequest(self, p_str, p_str_1, int_major=1, int_minor=1): # real signature unknown; restored from __doc__
""" QHttpRequestHeader.setRequest(str, str, int major=1, int minor=1) """
pass
def toString(self): # real signature unknown; restored from __doc__
""" QHttpRequestHeader.toString() -> str """
return ""
def __init__(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads
pass
| gpl-2.0 |
alexteodor/odoo | addons/claim_from_delivery/__openerp__.py | 172 | 1576 | ##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name' : 'Claim on Deliveries',
'version' : '1.0',
'author' : 'OpenERP SA',
'category' : 'Warehouse Management',
'depends' : ['base', 'crm_claim', 'stock'],
'demo' : [],
'description': """
Create a claim from a delivery order.
=====================================
Adds a Claim link to the delivery order.
""",
'data' : [
'claim_delivery_view.xml',
'claim_delivery_data.xml',],
'auto_install': False,
'installable': True,
'images': ['images/1_claim_link_delivery_order.jpeg'],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jamiefolsom/edx-platform | common/lib/capa/capa/tests/test_hint_functionality.py | 41 | 34139 | # -*- coding: utf-8 -*-
"""
Tests of extended hints
"""
import unittest
from ddt import ddt, data, unpack
# With the use of ddt, some of the data expected_string cases below are naturally long stretches
# of text text without whitespace. I think it's best to leave such lines intact
# in the test code. Therefore:
# pylint: disable=line-too-long
# For out many ddt data cases, prefer a compact form of { .. }
# pylint: disable=bad-continuation
from . import new_loncapa_problem, load_fixture
class HintTest(unittest.TestCase):
"""Base class for tests of extended hinting functionality."""
def correctness(self, problem_id, choice):
"""Grades the problem and returns the 'correctness' string from cmap."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
return cmap[problem_id]['correctness']
def get_hint(self, problem_id, choice):
"""Grades the problem and returns its hint from cmap or the empty string."""
student_answers = {problem_id: choice}
cmap = self.problem.grade_answers(answers=student_answers) # pylint: disable=no-member
adict = cmap.cmap.get(problem_id)
if adict:
return adict['msg']
else:
return ''
# It is a little surprising how much more complicated TextInput is than all the other cases.
@ddt
class TextInputHintsTest(HintTest):
"""
Test Text Input Hints Test
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'Blue')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_2',
'trigger_type': 'single',
'hint_label': u'Correct',
'correctness': True,
'student_answer': [u'Blue'],
'question_type': 'stringresponse',
'hints': [{'text': 'The red light is scattered by water molecules leaving only blue light.'}]}
)
@data(
{'problem_id': u'1_2_1', u'choice': u'GermanyΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">I do not think so.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'franceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'FranceΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Viva la France!Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'Mexico',
'expected_string': ''},
{'problem_id': u'1_2_1', u'choice': u'USAΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'usaΩ',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Less well known, but yes, there is a Paris, Texas.Ω</div></div>'},
{'problem_id': u'1_2_1', u'choice': u'uSAxΩ',
'expected_string': u''},
{'problem_id': u'1_2_1', u'choice': u'NICKLANDΩ',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">The country name does not end in LANDΩ</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'Blue',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">The red light is scattered by water molecules leaving only blue light.</div></div>'},
{'problem_id': u'1_3_1', u'choice': u'blue',
'expected_string': u''},
{'problem_id': u'1_3_1', u'choice': u'b',
'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseInsensitive(HintTest):
"""Test Text Input Extended hints Case Insensitive"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_5_1', 'choice': 'abc', 'expected_string': ''}, # wrong answer yielding no hint
{'problem_id': u'1_5_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Woo Hoo: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'a', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Woo Hoo: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'b', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'c', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint4</div></div>'},
# regexp cases
{'problem_id': u'1_5_1', 'choice': 'FGGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_5_1', 'choice': 'fgG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class TextInputExtendedHintsCaseSensitive(HintTest):
"""Sometimes the semantics can be encoded in the class name."""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_6_1', 'choice': 'abc', 'expected_string': ''},
{'problem_id': u'1_6_1', 'choice': 'A', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'a', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'B', 'expected_string':
u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'b', 'expected_string': u''},
{'problem_id': u'1_6_1', 'choice': 'C', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'c', 'expected_string': u''},
# regexp cases
{'problem_id': u'1_6_1', 'choice': 'FGG', 'expected_string':
u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_6_1', 'choice': 'fgG', 'expected_string': u''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
@ddt
class TextInputExtendedHintsCompatible(HintTest):
"""
Compatibility test with mixed old and new style additional_answer tags.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_7_1', 'choice': 'A', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'B', 'correct': 'correct', 'expected_string': ''},
{'problem_id': u'1_7_1', 'choice': 'C', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_7_1', 'choice': 'D', 'correct': 'incorrect', 'expected_string': ''},
# check going through conversion with difficult chars
{'problem_id': u'1_7_1', 'choice': """<&"'>""", 'correct': 'correct', 'expected_string': ''},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class TextInputExtendedHintsRegex(HintTest):
"""
Extended hints where the answer is regex mode.
"""
xml = load_fixture('extended_hints_text_input.xml')
problem = new_loncapa_problem(xml)
@data(
{'problem_id': u'1_8_1', 'choice': 'ABwrong', 'correct': 'incorrect', 'expected_string': ''},
{'problem_id': u'1_8_1', 'choice': 'ABC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'ABBBBC', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'aBc', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint1</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'BBBB', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'bbb', 'correct': 'correct',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">hint2</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'C', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'c', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint4</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'D', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
{'problem_id': u'1_8_1', 'choice': 'd', 'correct': 'incorrect',
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">hint6</div></div>'},
)
@unpack
def test_text_input_hints(self, problem_id, choice, correct, expected_string):
message_text = self.get_hint(problem_id, choice)
self.assertEqual(message_text, expected_string)
self.assertEqual(self.correctness(problem_id, choice), correct)
@ddt
class NumericInputHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the numeric input problem represented by the XML below.
"""
xml = load_fixture('extended_hints_numeric_input.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
def test_tracking_log(self):
self.get_hint(u'1_2_1', u'1.141')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'hint_label': u'Nice',
'correctness': True,
'student_answer': [u'1.141'],
'question_type': 'numericalresponse',
'hints': [{'text': 'The square root of two turns up in the strangest places.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': '1.141',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Nice: </div><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
{'problem_id': u'1_3_1', 'choice': '4',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Pretty easy, uh?.</div></div>'},
# should get hint, when correct via numeric-tolerance
{'problem_id': u'1_2_1', 'choice': '1.15',
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Nice: </div><div class="hint-text">The square root of two turns up in the strangest places.</div></div>'},
# when they answer wrong, nothing
{'problem_id': u'1_2_1', 'choice': '2', 'expected_string': ''},
)
@unpack
def test_numeric_input_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class CheckboxHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the checkbox problem represented by the XML below.
"""
xml = load_fixture('extended_hints_checkbox.xml')
problem = new_loncapa_problem(xml) # this problem is properly constructed
@data(
{'problem_id': u'1_2_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">Mushroom is a fungus, not a fruit.</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_4'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">Remember that apple is also a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">Remember that grape is also a fruit.</div><div class="hint-text">I do not know what a Camero is but it is not a fruit.</div></div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Almost right: </div><div class="hint-text">You are right that apple is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_1', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">You are right that grape is a fruit, but there is one you are missing. Also, mushroom is not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">You are right that apple is a fruit.</div><div class="hint-text">You are right that mushrooms are not fruit</div><div class="hint-text">You are right that grape is a fruit</div><div class="hint-text">What is a camero anyway?</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_3'],
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">You are right that mushrooms are not vegatbles</div><div class="hint-text">Brussel sprouts are vegetables.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Very funny: </div><div class="hint-text">Making a banana split?</div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_1', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">poor banana.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
{'problem_id': u'1_3_1', 'choice': [u'choice_0', u'choice_2'],
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">No, sorry, a banana is a fruit.</div><div class="hint-text">Mushroom is a fungus, not a vegetable.</div><div class="hint-text">Brussel sprout is the only vegetable in this list.</div></div></div>'},
# check for interaction between compoundhint and correct/incorrect
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_1'], # compound
'expected_string': u'<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">AB</div></div>'},
{'problem_id': u'1_4_1', 'choice': [u'choice_0', u'choice_2'], # compound
'expected_string': u'<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">AC</div></div>'},
# check for labeling where multiple child hints have labels
# These are some tricky cases
{'problem_id': '1_5_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">AA: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_5_1', 'choice': ['choice_1'],
'expected_string': ''},
{'problem_id': '1_5_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">BB: </div><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-incorrect"><div class="feedback-hint-multi"><div class="hint-text">aa</div></div></div>'},
{'problem_id': '1_6_1', 'choice': ['choice_0', 'choice_1'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-text">compoundo</div></div>'},
# The user selects *nothing*, but can still get "unselected" feedback
{'problem_id': '1_7_1', 'choice': [],
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="feedback-hint-multi"><div class="hint-text">bb</div></div></div>'},
# 100% not match of sel/unsel feedback
{'problem_id': '1_7_1', 'choice': ['choice_1'],
'expected_string': ''},
# Here we have the correct combination, and that makes feedback too
{'problem_id': '1_7_1', 'choice': ['choice_0'],
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="feedback-hint-multi"><div class="hint-text">aa</div><div class="hint-text">bb</div></div></div>'},
)
@unpack
def test_checkbox_hints(self, problem_id, choice, expected_string):
self.maxDiff = None # pylint: disable=invalid-name
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class CheckboxHintsTestTracking(HintTest):
"""
Test the rather complicated tracking log output for checkbox cases.
"""
xml = """
<problem>
<p>question</p>
<choiceresponse>
<checkboxgroup>
<choice correct="true">Apple
<choicehint selected="true">A true</choicehint>
<choicehint selected="false">A false</choicehint>
</choice>
<choice correct="false">Banana
</choice>
<choice correct="true">Cronut
<choicehint selected="true">C true</choicehint>
</choice>
<compoundhint value="A C">A C Compound</compoundhint>
</checkboxgroup>
</choiceresponse>
</problem>
"""
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test checkbox tracking log - by far the most complicated case"""
# A -> 1 hint
self.get_hint(u'1_2_1', [u'choice_0'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_0'],
'hints': [{'text': 'A true', 'trigger': [{'choice': 'choice_0', 'selected': True}]}],
'question_type': 'choiceresponse'}
)
# B C -> 2 hints
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_1', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Incorrect',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': False,
'trigger_type': 'single',
'student_answer': [u'choice_1', u'choice_2'],
'hints': [
{'text': 'A false', 'trigger': [{'choice': 'choice_0', 'selected': False}]},
{'text': 'C true', 'trigger': [{'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
# A C -> 1 Compound hint
self.problem.capa_module.runtime.track_function.reset_mock()
self.get_hint(u'1_2_1', [u'choice_0', u'choice_2'])
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'hint_label': u'Correct',
'module_id': 'i4x://Foo/bar/mock/abc',
'problem_part_id': '1_1',
'choice_all': ['choice_0', 'choice_1', 'choice_2'],
'correctness': True,
'trigger_type': 'compound',
'student_answer': [u'choice_0', u'choice_2'],
'hints': [
{'text': 'A C Compound',
'trigger': [{'choice': 'choice_0', 'selected': True}, {'choice': 'choice_2', 'selected': True}]}
],
'question_type': 'choiceresponse'}
)
@ddt
class MultpleChoiceHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'choice_2')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'choice_2'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'OOPS', 'hints': [{'text': 'Apple is a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-text">Mushroom is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': ''},
{'problem_id': u'1_3_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">Potato is a root vegetable.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">OUTSTANDING: </div><div class="hint-text">Apple is indeed a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">OOPS: </div><div class="hint-text">Apple is a fruit.</div></div>'},
{'problem_id': u'1_3_1', 'choice': u'choice_9',
'expected_string': ''},
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class MultpleChoiceHintsWithHtmlTest(HintTest):
"""
This class consists of a suite of test cases to be run on the multiple choice problem represented by the XML below.
"""
xml = load_fixture('extended_hints_multiple_choice_with_html.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_2_1', u'choice_0')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_1', 'trigger_type': 'single',
'student_answer': [u'choice_0'], 'correctness': False, 'question_type': 'multiplechoiceresponse',
'hint_label': 'Incorrect', 'hints': [{'text': 'Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': u'choice_0',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">Mushroom <img src="#" ale="#"/>is a fungus, not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_1',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">Potato is <img src="#" ale="#"/> not a fruit.</div></div>'},
{'problem_id': u'1_2_1', 'choice': u'choice_2',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text"><a href="#">Apple</a> is a fruit.</div></div>'}
)
@unpack
def test_multiplechoice_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
@ddt
class DropdownHintsTest(HintTest):
"""
This class consists of a suite of test cases to be run on the drop down problem represented by the XML below.
"""
xml = load_fixture('extended_hints_dropdown.xml')
problem = new_loncapa_problem(xml)
def test_tracking_log(self):
"""Test that the tracking log comes out right."""
self.problem.capa_module.reset_mock()
self.get_hint(u'1_3_1', u'FACES')
self.problem.capa_module.runtime.track_function.assert_called_with(
'edx.problem.hint.feedback_displayed',
{'module_id': 'i4x://Foo/bar/mock/abc', 'problem_part_id': '1_2', 'trigger_type': 'single',
'student_answer': [u'FACES'], 'correctness': True, 'question_type': 'optionresponse',
'hint_label': 'Correct', 'hints': [{'text': 'With lots of makeup, doncha know?'}]}
)
@data(
{'problem_id': u'1_2_1', 'choice': 'Multiple Choice',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Good Job: </div><div class="hint-text">Yes, multiple choice is the right answer.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Text Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">No, text input problems do not present options.</div></div>'},
{'problem_id': u'1_2_1', 'choice': 'Numerical Input',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">No, numerical input problems do not present options.</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'FACES',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">With lots of makeup, doncha know?</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'dogs',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">NOPE: </div><div class="hint-text">Not dogs, not cats, not toads</div></div>'},
{'problem_id': u'1_3_1', 'choice': 'wrongo',
'expected_string': ''},
# Regression case where feedback includes answer substring
{'problem_id': u'1_4_1', 'choice': 'AAA',
'expected_string': '<div class="feedback-hint-incorrect"><div class="hint-label">Incorrect: </div><div class="hint-text">AAABBB1</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'BBB',
'expected_string': '<div class="feedback-hint-correct"><div class="hint-label">Correct: </div><div class="hint-text">AAABBB2</div></div>'},
{'problem_id': u'1_4_1', 'choice': 'not going to match',
'expected_string': ''},
)
@unpack
def test_dropdown_hints(self, problem_id, choice, expected_string):
hint = self.get_hint(problem_id, choice)
self.assertEqual(hint, expected_string)
class ErrorConditionsTest(HintTest):
"""
Erroneous xml should raise exception.
"""
def test_error_conditions_illegal_element(self):
xml_with_errors = load_fixture('extended_hints_with_errors.xml')
with self.assertRaises(Exception):
new_loncapa_problem(xml_with_errors) # this problem is improperly constructed
| agpl-3.0 |
JeongJunSik/TizenRT | external/iotjs/deps/jerry/tools/gen-magic-strings.py | 27 | 10583 | #!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
import argparse
import fileinput
import json
import os
import re
from settings import PROJECT_DIR
MAGIC_STRINGS_INI = os.path.join(PROJECT_DIR, 'jerry-core', 'lit', 'lit-magic-strings.ini')
MAGIC_STRINGS_INC_H = os.path.join(PROJECT_DIR, 'jerry-core', 'lit', 'lit-magic-strings.inc.h')
def debug_dump(obj):
def deepcopy(obj):
if isinstance(obj, (list, tuple)):
return [deepcopy(e) for e in obj]
if isinstance(obj, set):
return [repr(e) for e in obj]
if isinstance(obj, dict):
return {repr(k): deepcopy(e) for k, e in obj.items()}
return obj
return json.dumps(deepcopy(obj), indent=4)
def read_magic_string_defs(debug=False):
# Read the `jerry-core/lit/lit-magic-strings.ini` file and returns the magic
# string definitions found therein in the form of
# [LIT_MAGIC_STRINGS]
# LIT_MAGIC_STRING_xxx = "vvv"
# ...
# as
# [('LIT_MAGIC_STRING_xxx', 'vvv'), ...]
# sorted by length and alpha.
ini_parser = ConfigParser()
ini_parser.optionxform = str # case sensitive options (magic string IDs)
ini_parser.read(MAGIC_STRINGS_INI)
defs = [(str_ref, json.loads(str_value) if str_value != '' else '')
for str_ref, str_value in ini_parser.items('LIT_MAGIC_STRINGS')]
defs = sorted(defs, key=lambda ref_value: (len(ref_value[1]), ref_value[1]))
if debug:
print('debug: magic string definitions: {dump}'
.format(dump=debug_dump(defs)))
return defs
def extract_magic_string_refs(debug=False):
results = {}
def process_line(fname, lnum, line, guard_stack):
# Build `results` dictionary as
# results['LIT_MAGIC_STRING_xxx'][('!defined (CONFIG_DISABLE_yyy_BUILTIN)', ...)]
# = [('zzz.c', 123), ...]
# meaning that the given literal is referenced under the given guards at
# the listed (file, line number) locations.
for str_ref in re.findall('LIT_MAGIC_STRING_[a-zA-Z0-9_]+', line):
if str_ref in ['LIT_MAGIC_STRING_DEF',
'LIT_MAGIC_STRING_FIRST_STRING_WITH_SIZE',
'LIT_MAGIC_STRING_LENGTH_LIMIT',
'LIT_MAGIC_STRING__COUNT']:
continue
guard_set = set()
for guards in guard_stack:
guard_set.update(guards)
guard_tuple = tuple(sorted(guard_set))
if str_ref not in results:
results[str_ref] = {}
str_guards = results[str_ref]
if guard_tuple not in str_guards:
str_guards[guard_tuple] = []
file_list = str_guards[guard_tuple]
file_list.append((fname, lnum))
def process_guard(guard):
# Transform `#ifndef MACRO` to `#if !defined (MACRO)` and
# `#ifdef MACRO` to `#if defined (MACRO)` to enable or-ing/and-ing the
# conditions later on.
if guard.startswith('ndef '):
guard = guard.replace('ndef ', '!defined (', 1) + ')'
elif guard.startswith('def '):
guard = guard.replace('def ', 'defined (', 1) + ')'
return guard
def process_file(fname):
# Builds `guard_stack` list for each line of a file as
# [['!defined (CONFIG_DISABLE_yyy_BUILTIN)', ...], ...]
# meaning that all the listed guards (conditionals) have to hold for the
# line to be kept by the preprocessor.
guard_stack = []
for line in fileinput.input(fname):
if_match = re.match('^# *if(.*)', line)
elif_match = re.match('^# *elif(.*)', line)
else_match = re.match('^# *else', line)
endif_match = re.match('^# *endif', line)
if if_match is not None:
guard_stack.append([process_guard(if_match.group(1))])
elif elif_match is not None:
guards = guard_stack[-1]
guards[-1] = '!(%s)' % guards[-1]
guards.append(process_guard(elif_match.group(1)))
elif else_match is not None:
guards = guard_stack[-1]
guards[-1] = '!(%s)' % guards[-1]
elif endif_match is not None:
guard_stack.pop()
lnum = fileinput.filelineno()
process_line(fname, lnum, line, guard_stack)
if guard_stack:
print('warning: {fname}: unbalanced preprocessor conditional '
'directives (analysis finished with no closing `#endif` '
'for {guard_stack})'
.format(fname=fname, guard_stack=guard_stack))
for root, _, files in os.walk(os.path.join(PROJECT_DIR, 'jerry-core')):
for fname in files:
if (fname.endswith('.c') or fname.endswith('.h')) \
and fname != 'lit-magic-strings.inc.h':
process_file(os.path.join(root, fname))
if debug:
print('debug: magic string references: {dump}'
.format(dump=debug_dump(results)))
return results
def calculate_magic_string_guards(defs, uses, debug=False):
extended_defs = []
for str_ref, str_value in defs:
if str_ref not in uses:
print('warning: unused magic string {str_ref}'
.format(str_ref=str_ref))
continue
# Calculate the most compact guard, i.e., if a magic string is
# referenced under various guards, keep the one that is more generic.
# E.g.,
# guard1 = A and B and C and D and E and F
# guard2 = A and B and C
# then guard1 or guard2 == guard2.
guards = [set(guard_tuple) for guard_tuple in uses[str_ref].keys()]
for i, guard_i in enumerate(guards):
if guard_i is None:
continue
for j, guard_j in enumerate(guards):
if j == i or guard_j is None:
continue
if guard_i < guard_j:
guards[j] = None
guards = {tuple(sorted(guard)) for guard in guards if guard is not None}
extended_defs.append((str_ref, str_value, guards))
if debug:
print('debug: magic string definitions (with guards): {dump}'
.format(dump=debug_dump(extended_defs)))
return extended_defs
def guards_to_str(guards):
return ' \\\n|| '.join(' && '.join(g for g in sorted(guard))
for guard in sorted(guards))
def generate_header(gen_file):
header = \
"""/* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This file is automatically generated by the %s script
* from %s. Do not edit! */
""" % (os.path.basename(__file__), os.path.basename(MAGIC_STRINGS_INI))
print(header, file=gen_file)
def generate_magic_string_defs(gen_file, defs):
print(file=gen_file) # empty line separator
last_guards = set([()])
for str_ref, str_value, guards in defs:
if last_guards != guards:
if () not in last_guards:
print('#endif', file=gen_file)
if () not in guards:
print('#if {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
print('LIT_MAGIC_STRING_DEF ({str_ref}, {str_value})'
.format(str_ref=str_ref, str_value=json.dumps(str_value)), file=gen_file)
last_guards = guards
if () not in last_guards:
print('#endif', file=gen_file)
def generate_first_magic_strings(gen_file, defs):
print(file=gen_file) # empty line separator
max_size = len(defs[-1][1])
for size in range(max_size + 1):
last_guards = set([()])
for str_ref, str_value, guards in defs:
if len(str_value) >= size:
if () not in guards and () in last_guards:
print('#if {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
elif () not in guards and () not in last_guards:
if guards == last_guards:
continue
print('#elif {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
elif () in guards and () not in last_guards:
print('#else', file=gen_file)
print('LIT_MAGIC_STRING_FIRST_STRING_WITH_SIZE ({size}, {str_ref})'
.format(size=size, str_ref=str_ref), file=gen_file)
if () in guards:
break
last_guards = guards
if () not in last_guards:
print('#endif', file=gen_file)
def main():
parser = argparse.ArgumentParser(description='lit-magic-strings.inc.h generator')
parser.add_argument('--debug', action='store_true', help='enable debug output')
args = parser.parse_args()
defs = read_magic_string_defs(debug=args.debug)
uses = extract_magic_string_refs(debug=args.debug)
extended_defs = calculate_magic_string_guards(defs, uses, debug=args.debug)
with open(MAGIC_STRINGS_INC_H, 'w') as gen_file:
generate_header(gen_file)
generate_magic_string_defs(gen_file, extended_defs)
generate_first_magic_strings(gen_file, extended_defs)
if __name__ == '__main__':
main()
| apache-2.0 |
sharadagarwal/autorest | AutoRest/Generators/Python/Python.Tests/Expected/AcceptanceTests/BodyDictionary/autorestswaggerbatdictionaryservice/models/error.py | 104 | 1285 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
from msrest.exceptions import HttpOperationError
class Error(Model):
"""Error
:param status:
:type status: int
:param message:
:type message: str
"""
_attribute_map = {
'status': {'key': 'status', 'type': 'int'},
'message': {'key': 'message', 'type': 'str'},
}
def __init__(self, status=None, message=None):
self.status = status
self.message = message
class ErrorException(HttpOperationError):
"""Server responsed with exception of type: 'Error'.
:param deserialize: A deserializer
:param response: Server response to be deserialized.
"""
def __init__(self, deserialize, response, *args):
super(ErrorException, self).__init__(deserialize, response, 'Error', *args)
| mit |
alexander-svendsen/ev3-python | examples/measurements/measure.py | 1 | 1527 | # -*- coding: utf-8 -*-
import os
import time
import psutil
import socket
FILE = socket.gethostname()
# store these as .csv files
class Measure():
def __init__(self):
self.pid = os.getpid()
self.process = psutil.Process(self.pid)
self.write_to_file = True
def start_measurement(self, interval):
# creates an empty file at the tmp location
a = open(FILE + ".csv", "w+")
a.close()
while True:
print "running"
cpu = self.measure_cpu()
print cpu
mem = self.measure_memory()
self.save_measurement(cpu, *mem)
time.sleep(interval)
def save_measurement(self, cpu, rss, vms, mem_percentage):
if not self.write_to_file:
return
timestamp = time.strftime("%H:%M:%S", time.gmtime())
with open(FILE + '.csv', 'a+') as csv_file:
print "saving to file"
csv_file.write("{0},{1},{2}\n".format(timestamp, cpu, mem_percentage))
def make_note(self, note):
if not self.write_to_file:
return
with open(FILE + '.csv', 'a+') as csv_file:
csv_file.write(', , ,{0}\n'.format(note))
def measure_cpu(self):
return self.process.get_cpu_percent(interval=1)
def measure_memory(self):
mem = self.process.get_memory_info()
return mem[0], mem[1], self.process.get_memory_percent()
if __name__ == "__main__":
measure = Measure()
measure.start_measurement(interval=5)
| mit |
MotorolaMobilityLLC/external-chromium_org | media/tools/bug_hunter/bug_hunter_test.py | 47 | 3299 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Integration tests for bug hunter."""
import csv
from optparse import Values
import os
import unittest
from bug_hunter import BugHunter
try:
import gdata.data
import gdata.projecthosting.client
except ImportError:
logging.error('gdata-client needs to be installed. Please install\n'
'and try again (http://code.google.com/p/gdata-python-client/)')
sys.exit(1)
class BugHunterTest(unittest.TestCase):
"""Unit tests for the Bug Hunter class."""
_TEST_FILENAME = 'test.csv'
def _CleanTestFile(self):
if os.path.exists(self._TEST_FILENAME):
os.remove(self._TEST_FILENAME)
def setUp(self):
self._CleanTestFile()
def tearDown(self):
self._CleanTestFile()
def _GetIssue(self):
return [{'issue_id': '0', 'title': 'title', 'author': 'author',
'status': 'status', 'state': 'state', 'content': 'content',
'comments': [], 'labels': [], 'urls': []}]
def _GetDefaultOption(self, set_10_days_ago, query='steps'):
ops = Values()
ops.query = query
if set_10_days_ago:
ops.interval_value = 10
ops.interval_unit = 'days'
else:
ops.interval_value = None
ops.email_entries = ['comments']
ops.project_name = 'chromium'
ops.query_title = 'query title'
ops.max_comments = None
return ops
def testGetIssueReturnedIssue(self):
bh = BugHunter(
self._GetDefaultOption(False,
query=('audio opened-after:2010/10/10'
' opened-before:2010/10/20')))
self.assertEquals(len(bh.GetIssues()), 18)
def testGetIssueReturnedIssueWithStatus(self):
ops = self._GetDefaultOption(False)
ops.query = 'Feature:Media* Status:Unconfirmed'
issues = BugHunter(ops).GetIssues()
for issue in issues:
self.assertEquals(issue['status'], 'Unconfirmed')
def testGetIssueReturnNoIssue(self):
ops = self._GetDefaultOption(True)
ops.query = 'thisshouldnotmatchpleaseignorethis*'
self.assertFalse(BugHunter(ops).GetIssues())
def testGetComments(self):
comments = BugHunter(self._GetDefaultOption(False)).GetComments(100000, 2)
self.assertEquals(len(comments), 2)
expected_comments = [(None, 'rby...@chromium.org',
'2011-10-31T19:54:40.000Z'),
(None, 'backer@chromium.org',
'2011-10-14T13:59:37.000Z')]
self.assertEquals(comments, expected_comments)
def testWriteIssuesToFileInCSV(self):
ops = self._GetDefaultOption(False)
bh = BugHunter(ops)
bh.WriteIssuesToFileInCSV(self._GetIssue(), self._TEST_FILENAME)
with open(self._TEST_FILENAME, 'r') as f:
reader = csv.reader(f)
self.assertEquals(reader.next(), ['status', 'content', 'state',
'issue_id', 'urls', 'title', 'labels',
'author', 'comments'])
self.assertEquals(reader.next(), ['status', 'content', 'state', '0',
'[]', 'title', '[]', 'author', '[]'])
self.assertRaises(StopIteration, reader.next)
| bsd-3-clause |
evshiron/shadowsocks | shadowsocks/crypto/openssl.py | 1038 | 5414 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
from ctypes import c_char_p, c_int, c_long, byref,\
create_string_buffer, c_void_p
from shadowsocks import common
from shadowsocks.crypto import util
__all__ = ['ciphers']
libcrypto = None
loaded = False
buf_size = 2048
def load_openssl():
global loaded, libcrypto, buf
libcrypto = util.find_library(('crypto', 'eay32'),
'EVP_get_cipherbyname',
'libcrypto')
if libcrypto is None:
raise Exception('libcrypto(OpenSSL) not found')
libcrypto.EVP_get_cipherbyname.restype = c_void_p
libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p
libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p,
c_char_p, c_char_p, c_int)
libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p,
c_char_p, c_int)
libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,)
libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,)
if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'):
libcrypto.OpenSSL_add_all_ciphers()
buf = create_string_buffer(buf_size)
loaded = True
def load_cipher(cipher_name):
func_name = 'EVP_' + cipher_name.replace('-', '_')
if bytes != str:
func_name = str(func_name, 'utf-8')
cipher = getattr(libcrypto, func_name, None)
if cipher:
cipher.restype = c_void_p
return cipher()
return None
class OpenSSLCrypto(object):
def __init__(self, cipher_name, key, iv, op):
self._ctx = None
if not loaded:
load_openssl()
cipher_name = common.to_bytes(cipher_name)
cipher = libcrypto.EVP_get_cipherbyname(cipher_name)
if not cipher:
cipher = load_cipher(cipher_name)
if not cipher:
raise Exception('cipher %s not found in libcrypto' % cipher_name)
key_ptr = c_char_p(key)
iv_ptr = c_char_p(iv)
self._ctx = libcrypto.EVP_CIPHER_CTX_new()
if not self._ctx:
raise Exception('can not create cipher context')
r = libcrypto.EVP_CipherInit_ex(self._ctx, cipher, None,
key_ptr, iv_ptr, c_int(op))
if not r:
self.clean()
raise Exception('can not initialize cipher context')
def update(self, data):
global buf_size, buf
cipher_out_len = c_long(0)
l = len(data)
if buf_size < l:
buf_size = l * 2
buf = create_string_buffer(buf_size)
libcrypto.EVP_CipherUpdate(self._ctx, byref(buf),
byref(cipher_out_len), c_char_p(data), l)
# buf is copied to a str object when we access buf.raw
return buf.raw[:cipher_out_len.value]
def __del__(self):
self.clean()
def clean(self):
if self._ctx:
libcrypto.EVP_CIPHER_CTX_cleanup(self._ctx)
libcrypto.EVP_CIPHER_CTX_free(self._ctx)
ciphers = {
'aes-128-cfb': (16, 16, OpenSSLCrypto),
'aes-192-cfb': (24, 16, OpenSSLCrypto),
'aes-256-cfb': (32, 16, OpenSSLCrypto),
'aes-128-ofb': (16, 16, OpenSSLCrypto),
'aes-192-ofb': (24, 16, OpenSSLCrypto),
'aes-256-ofb': (32, 16, OpenSSLCrypto),
'aes-128-ctr': (16, 16, OpenSSLCrypto),
'aes-192-ctr': (24, 16, OpenSSLCrypto),
'aes-256-ctr': (32, 16, OpenSSLCrypto),
'aes-128-cfb8': (16, 16, OpenSSLCrypto),
'aes-192-cfb8': (24, 16, OpenSSLCrypto),
'aes-256-cfb8': (32, 16, OpenSSLCrypto),
'aes-128-cfb1': (16, 16, OpenSSLCrypto),
'aes-192-cfb1': (24, 16, OpenSSLCrypto),
'aes-256-cfb1': (32, 16, OpenSSLCrypto),
'bf-cfb': (16, 8, OpenSSLCrypto),
'camellia-128-cfb': (16, 16, OpenSSLCrypto),
'camellia-192-cfb': (24, 16, OpenSSLCrypto),
'camellia-256-cfb': (32, 16, OpenSSLCrypto),
'cast5-cfb': (16, 8, OpenSSLCrypto),
'des-cfb': (8, 8, OpenSSLCrypto),
'idea-cfb': (16, 8, OpenSSLCrypto),
'rc2-cfb': (16, 8, OpenSSLCrypto),
'rc4': (16, 0, OpenSSLCrypto),
'seed-cfb': (16, 16, OpenSSLCrypto),
}
def run_method(method):
cipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 1)
decipher = OpenSSLCrypto(method, b'k' * 32, b'i' * 16, 0)
util.run_cipher(cipher, decipher)
def test_aes_128_cfb():
run_method('aes-128-cfb')
def test_aes_256_cfb():
run_method('aes-256-cfb')
def test_aes_128_cfb8():
run_method('aes-128-cfb8')
def test_aes_256_ofb():
run_method('aes-256-ofb')
def test_aes_256_ctr():
run_method('aes-256-ctr')
def test_bf_cfb():
run_method('bf-cfb')
def test_rc4():
run_method('rc4')
if __name__ == '__main__':
test_aes_128_cfb()
| apache-2.0 |
erincook/tenball | node_modules/node-gyp/gyp/PRESUBMIT.py | 1369 | 3662 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for GYP.
See http://dev.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into gcl.
"""
PYLINT_BLACKLIST = [
# TODO: fix me.
# From SCons, not done in google style.
'test/lib/TestCmd.py',
'test/lib/TestCommon.py',
'test/lib/TestGyp.py',
]
PYLINT_DISABLED_WARNINGS = [
# TODO: fix me.
# Many tests include modules they don't use.
'W0611',
# Possible unbalanced tuple unpacking with sequence.
'W0632',
# Attempting to unpack a non-sequence.
'W0633',
# Include order doesn't properly include local files?
'F0401',
# Some use of built-in names.
'W0622',
# Some unused variables.
'W0612',
# Operator not preceded/followed by space.
'C0323',
'C0322',
# Unnecessary semicolon.
'W0301',
# Unused argument.
'W0613',
# String has no effect (docstring in wrong place).
'W0105',
# map/filter on lambda could be replaced by comprehension.
'W0110',
# Use of eval.
'W0123',
# Comma not followed by space.
'C0324',
# Access to a protected member.
'W0212',
# Bad indent.
'W0311',
# Line too long.
'C0301',
# Undefined variable.
'E0602',
# Not exception type specified.
'W0702',
# No member of that name.
'E1101',
# Dangerous default {}.
'W0102',
# Cyclic import.
'R0401',
# Others, too many to sort.
'W0201', 'W0232', 'E1103', 'W0621', 'W0108', 'W0223', 'W0231',
'R0201', 'E0101', 'C0321',
# ************* Module copy
# W0104:427,12:_test.odict.__setitem__: Statement seems to have no effect
'W0104',
]
def CheckChangeOnUpload(input_api, output_api):
report = []
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api))
return report
def CheckChangeOnCommit(input_api, output_api):
report = []
# Accept any year number from 2009 to the current year.
current_year = int(input_api.time.strftime('%Y'))
allowed_years = (str(s) for s in reversed(xrange(2009, current_year + 1)))
years_re = '(' + '|'.join(allowed_years) + ')'
# The (c) is deprecated, but tolerate it until it's removed from all files.
license = (
r'.*? Copyright (\(c\) )?%(year)s Google Inc\. All rights reserved\.\n'
r'.*? Use of this source code is governed by a BSD-style license that '
r'can be\n'
r'.*? found in the LICENSE file\.\n'
) % {
'year': years_re,
}
report.extend(input_api.canned_checks.PanProjectChecks(
input_api, output_api, license_header=license))
report.extend(input_api.canned_checks.CheckTreeIsOpen(
input_api, output_api,
'http://gyp-status.appspot.com/status',
'http://gyp-status.appspot.com/current'))
import os
import sys
old_sys_path = sys.path
try:
sys.path = ['pylib', 'test/lib'] + sys.path
blacklist = PYLINT_BLACKLIST
if sys.platform == 'win32':
blacklist = [os.path.normpath(x).replace('\\', '\\\\')
for x in PYLINT_BLACKLIST]
report.extend(input_api.canned_checks.RunPylint(
input_api,
output_api,
black_list=blacklist,
disabled_warnings=PYLINT_DISABLED_WARNINGS))
finally:
sys.path = old_sys_path
return report
TRYBOTS = [
'linux_try',
'mac_try',
'win_try',
]
def GetPreferredTryMasters(_, change):
return {
'client.gyp': { t: set(['defaulttests']) for t in TRYBOTS },
}
| apache-2.0 |
joequery/django | django/contrib/gis/utils/layermapping.py | 335 | 27300 | # LayerMapping -- A Django Model/OGR Layer Mapping Utility
"""
The LayerMapping class provides a way to map the contents of OGR
vector files (e.g. SHP files) to Geographic-enabled Django models.
For more information, please consult the GeoDjango documentation:
https://docs.djangoproject.com/en/dev/ref/contrib/gis/layermapping/
"""
import sys
from decimal import Decimal, InvalidOperation as DecimalInvalidOperation
from django.contrib.gis.db.models import GeometryField
from django.contrib.gis.gdal import (
CoordTransform, DataSource, GDALException, OGRGeometry, OGRGeomType,
SpatialReference,
)
from django.contrib.gis.gdal.field import (
OFTDate, OFTDateTime, OFTInteger, OFTReal, OFTString, OFTTime,
)
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import connections, models, router, transaction
from django.utils import six
from django.utils.encoding import force_text
# LayerMapping exceptions.
class LayerMapError(Exception):
pass
class InvalidString(LayerMapError):
pass
class InvalidDecimal(LayerMapError):
pass
class InvalidInteger(LayerMapError):
pass
class MissingForeignKey(LayerMapError):
pass
class LayerMapping(object):
"A class that maps OGR Layers to GeoDjango Models."
# Acceptable 'base' types for a multi-geometry type.
MULTI_TYPES = {1: OGRGeomType('MultiPoint'),
2: OGRGeomType('MultiLineString'),
3: OGRGeomType('MultiPolygon'),
OGRGeomType('Point25D').num: OGRGeomType('MultiPoint25D'),
OGRGeomType('LineString25D').num: OGRGeomType('MultiLineString25D'),
OGRGeomType('Polygon25D').num: OGRGeomType('MultiPolygon25D'),
}
# Acceptable Django field types and corresponding acceptable OGR
# counterparts.
FIELD_TYPES = {
models.AutoField: OFTInteger,
models.IntegerField: (OFTInteger, OFTReal, OFTString),
models.FloatField: (OFTInteger, OFTReal),
models.DateField: OFTDate,
models.DateTimeField: OFTDateTime,
models.EmailField: OFTString,
models.TimeField: OFTTime,
models.DecimalField: (OFTInteger, OFTReal),
models.CharField: OFTString,
models.SlugField: OFTString,
models.TextField: OFTString,
models.URLField: OFTString,
models.BigIntegerField: (OFTInteger, OFTReal, OFTString),
models.SmallIntegerField: (OFTInteger, OFTReal, OFTString),
models.PositiveSmallIntegerField: (OFTInteger, OFTReal, OFTString),
}
def __init__(self, model, data, mapping, layer=0,
source_srs=None, encoding='utf-8',
transaction_mode='commit_on_success',
transform=True, unique=None, using=None):
"""
A LayerMapping object is initialized using the given Model (not an instance),
a DataSource (or string path to an OGR-supported data file), and a mapping
dictionary. See the module level docstring for more details and keyword
argument usage.
"""
# Getting the DataSource and the associated Layer.
if isinstance(data, six.string_types):
self.ds = DataSource(data, encoding=encoding)
else:
self.ds = data
self.layer = self.ds[layer]
self.using = using if using is not None else router.db_for_write(model)
self.spatial_backend = connections[self.using].ops
# Setting the mapping & model attributes.
self.mapping = mapping
self.model = model
# Checking the layer -- initialization of the object will fail if
# things don't check out before hand.
self.check_layer()
# Getting the geometry column associated with the model (an
# exception will be raised if there is no geometry column).
if connections[self.using].features.supports_transform:
self.geo_field = self.geometry_field()
else:
transform = False
# Checking the source spatial reference system, and getting
# the coordinate transformation object (unless the `transform`
# keyword is set to False)
if transform:
self.source_srs = self.check_srs(source_srs)
self.transform = self.coord_transform()
else:
self.transform = transform
# Setting the encoding for OFTString fields, if specified.
if encoding:
# Making sure the encoding exists, if not a LookupError
# exception will be thrown.
from codecs import lookup
lookup(encoding)
self.encoding = encoding
else:
self.encoding = None
if unique:
self.check_unique(unique)
transaction_mode = 'autocommit' # Has to be set to autocommit.
self.unique = unique
else:
self.unique = None
# Setting the transaction decorator with the function in the
# transaction modes dictionary.
self.transaction_mode = transaction_mode
if transaction_mode == 'autocommit':
self.transaction_decorator = None
elif transaction_mode == 'commit_on_success':
self.transaction_decorator = transaction.atomic
else:
raise LayerMapError('Unrecognized transaction mode: %s' % transaction_mode)
# #### Checking routines used during initialization ####
def check_fid_range(self, fid_range):
"This checks the `fid_range` keyword."
if fid_range:
if isinstance(fid_range, (tuple, list)):
return slice(*fid_range)
elif isinstance(fid_range, slice):
return fid_range
else:
raise TypeError
else:
return None
def check_layer(self):
"""
This checks the Layer metadata, and ensures that it is compatible
with the mapping information and model. Unlike previous revisions,
there is no need to increment through each feature in the Layer.
"""
# The geometry field of the model is set here.
# TODO: Support more than one geometry field / model. However, this
# depends on the GDAL Driver in use.
self.geom_field = False
self.fields = {}
# Getting lists of the field names and the field types available in
# the OGR Layer.
ogr_fields = self.layer.fields
ogr_field_types = self.layer.field_types
# Function for determining if the OGR mapping field is in the Layer.
def check_ogr_fld(ogr_map_fld):
try:
idx = ogr_fields.index(ogr_map_fld)
except ValueError:
raise LayerMapError('Given mapping OGR field "%s" not found in OGR Layer.' % ogr_map_fld)
return idx
# No need to increment through each feature in the model, simply check
# the Layer metadata against what was given in the mapping dictionary.
for field_name, ogr_name in self.mapping.items():
# Ensuring that a corresponding field exists in the model
# for the given field name in the mapping.
try:
model_field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
raise LayerMapError('Given mapping field "%s" not in given Model fields.' % field_name)
# Getting the string name for the Django field class (e.g., 'PointField').
fld_name = model_field.__class__.__name__
if isinstance(model_field, GeometryField):
if self.geom_field:
raise LayerMapError('LayerMapping does not support more than one GeometryField per model.')
# Getting the coordinate dimension of the geometry field.
coord_dim = model_field.dim
try:
if coord_dim == 3:
gtype = OGRGeomType(ogr_name + '25D')
else:
gtype = OGRGeomType(ogr_name)
except GDALException:
raise LayerMapError('Invalid mapping for GeometryField "%s".' % field_name)
# Making sure that the OGR Layer's Geometry is compatible.
ltype = self.layer.geom_type
if not (ltype.name.startswith(gtype.name) or self.make_multi(ltype, model_field)):
raise LayerMapError('Invalid mapping geometry; model has %s%s, '
'layer geometry type is %s.' %
(fld_name, '(dim=3)' if coord_dim == 3 else '', ltype))
# Setting the `geom_field` attribute w/the name of the model field
# that is a Geometry. Also setting the coordinate dimension
# attribute.
self.geom_field = field_name
self.coord_dim = coord_dim
fields_val = model_field
elif isinstance(model_field, models.ForeignKey):
if isinstance(ogr_name, dict):
# Is every given related model mapping field in the Layer?
rel_model = model_field.remote_field.model
for rel_name, ogr_field in ogr_name.items():
idx = check_ogr_fld(ogr_field)
try:
rel_model._meta.get_field(rel_name)
except FieldDoesNotExist:
raise LayerMapError('ForeignKey mapping field "%s" not in %s fields.' %
(rel_name, rel_model.__class__.__name__))
fields_val = rel_model
else:
raise TypeError('ForeignKey mapping must be of dictionary type.')
else:
# Is the model field type supported by LayerMapping?
if model_field.__class__ not in self.FIELD_TYPES:
raise LayerMapError('Django field type "%s" has no OGR mapping (yet).' % fld_name)
# Is the OGR field in the Layer?
idx = check_ogr_fld(ogr_name)
ogr_field = ogr_field_types[idx]
# Can the OGR field type be mapped to the Django field type?
if not issubclass(ogr_field, self.FIELD_TYPES[model_field.__class__]):
raise LayerMapError('OGR field "%s" (of type %s) cannot be mapped to Django %s.' %
(ogr_field, ogr_field.__name__, fld_name))
fields_val = model_field
self.fields[field_name] = fields_val
def check_srs(self, source_srs):
"Checks the compatibility of the given spatial reference object."
if isinstance(source_srs, SpatialReference):
sr = source_srs
elif isinstance(source_srs, self.spatial_backend.spatial_ref_sys()):
sr = source_srs.srs
elif isinstance(source_srs, (int, six.string_types)):
sr = SpatialReference(source_srs)
else:
# Otherwise just pulling the SpatialReference from the layer
sr = self.layer.srs
if not sr:
raise LayerMapError('No source reference system defined.')
else:
return sr
def check_unique(self, unique):
"Checks the `unique` keyword parameter -- may be a sequence or string."
if isinstance(unique, (list, tuple)):
# List of fields to determine uniqueness with
for attr in unique:
if attr not in self.mapping:
raise ValueError
elif isinstance(unique, six.string_types):
# Only a single field passed in.
if unique not in self.mapping:
raise ValueError
else:
raise TypeError('Unique keyword argument must be set with a tuple, list, or string.')
# Keyword argument retrieval routines ####
def feature_kwargs(self, feat):
"""
Given an OGR Feature, this will return a dictionary of keyword arguments
for constructing the mapped model.
"""
# The keyword arguments for model construction.
kwargs = {}
# Incrementing through each model field and OGR field in the
# dictionary mapping.
for field_name, ogr_name in self.mapping.items():
model_field = self.fields[field_name]
if isinstance(model_field, GeometryField):
# Verify OGR geometry.
try:
val = self.verify_geom(feat.geom, model_field)
except GDALException:
raise LayerMapError('Could not retrieve geometry from feature.')
elif isinstance(model_field, models.base.ModelBase):
# The related _model_, not a field was passed in -- indicating
# another mapping for the related Model.
val = self.verify_fk(feat, model_field, ogr_name)
else:
# Otherwise, verify OGR Field type.
val = self.verify_ogr_field(feat[ogr_name], model_field)
# Setting the keyword arguments for the field name with the
# value obtained above.
kwargs[field_name] = val
return kwargs
def unique_kwargs(self, kwargs):
"""
Given the feature keyword arguments (from `feature_kwargs`) this routine
will construct and return the uniqueness keyword arguments -- a subset
of the feature kwargs.
"""
if isinstance(self.unique, six.string_types):
return {self.unique: kwargs[self.unique]}
else:
return {fld: kwargs[fld] for fld in self.unique}
# #### Verification routines used in constructing model keyword arguments. ####
def verify_ogr_field(self, ogr_field, model_field):
"""
Verifies if the OGR Field contents are acceptable to the Django
model field. If they are, the verified value is returned,
otherwise the proper exception is raised.
"""
if (isinstance(ogr_field, OFTString) and
isinstance(model_field, (models.CharField, models.TextField))):
if self.encoding:
# The encoding for OGR data sources may be specified here
# (e.g., 'cp437' for Census Bureau boundary files).
val = force_text(ogr_field.value, self.encoding)
else:
val = ogr_field.value
if model_field.max_length and len(val) > model_field.max_length:
raise InvalidString('%s model field maximum string length is %s, given %s characters.' %
(model_field.name, model_field.max_length, len(val)))
elif isinstance(ogr_field, OFTReal) and isinstance(model_field, models.DecimalField):
try:
# Creating an instance of the Decimal value to use.
d = Decimal(str(ogr_field.value))
except DecimalInvalidOperation:
raise InvalidDecimal('Could not construct decimal from: %s' % ogr_field.value)
# Getting the decimal value as a tuple.
dtup = d.as_tuple()
digits = dtup[1]
d_idx = dtup[2] # index where the decimal is
# Maximum amount of precision, or digits to the left of the decimal.
max_prec = model_field.max_digits - model_field.decimal_places
# Getting the digits to the left of the decimal place for the
# given decimal.
if d_idx < 0:
n_prec = len(digits[:d_idx])
else:
n_prec = len(digits) + d_idx
# If we have more than the maximum digits allowed, then throw an
# InvalidDecimal exception.
if n_prec > max_prec:
raise InvalidDecimal(
'A DecimalField with max_digits %d, decimal_places %d must '
'round to an absolute value less than 10^%d.' %
(model_field.max_digits, model_field.decimal_places, max_prec)
)
val = d
elif isinstance(ogr_field, (OFTReal, OFTString)) and isinstance(model_field, models.IntegerField):
# Attempt to convert any OFTReal and OFTString value to an OFTInteger.
try:
val = int(ogr_field.value)
except ValueError:
raise InvalidInteger('Could not construct integer from: %s' % ogr_field.value)
else:
val = ogr_field.value
return val
def verify_fk(self, feat, rel_model, rel_mapping):
"""
Given an OGR Feature, the related model and its dictionary mapping,
this routine will retrieve the related model for the ForeignKey
mapping.
"""
# TODO: It is expensive to retrieve a model for every record --
# explore if an efficient mechanism exists for caching related
# ForeignKey models.
# Constructing and verifying the related model keyword arguments.
fk_kwargs = {}
for field_name, ogr_name in rel_mapping.items():
fk_kwargs[field_name] = self.verify_ogr_field(feat[ogr_name], rel_model._meta.get_field(field_name))
# Attempting to retrieve and return the related model.
try:
return rel_model.objects.using(self.using).get(**fk_kwargs)
except ObjectDoesNotExist:
raise MissingForeignKey(
'No ForeignKey %s model found with keyword arguments: %s' %
(rel_model.__name__, fk_kwargs)
)
def verify_geom(self, geom, model_field):
"""
Verifies the geometry -- will construct and return a GeometryCollection
if necessary (for example if the model field is MultiPolygonField while
the mapped shapefile only contains Polygons).
"""
# Downgrade a 3D geom to a 2D one, if necessary.
if self.coord_dim != geom.coord_dim:
geom.coord_dim = self.coord_dim
if self.make_multi(geom.geom_type, model_field):
# Constructing a multi-geometry type to contain the single geometry
multi_type = self.MULTI_TYPES[geom.geom_type.num]
g = OGRGeometry(multi_type)
g.add(geom)
else:
g = geom
# Transforming the geometry with our Coordinate Transformation object,
# but only if the class variable `transform` is set w/a CoordTransform
# object.
if self.transform:
g.transform(self.transform)
# Returning the WKT of the geometry.
return g.wkt
# #### Other model methods ####
def coord_transform(self):
"Returns the coordinate transformation object."
SpatialRefSys = self.spatial_backend.spatial_ref_sys()
try:
# Getting the target spatial reference system
target_srs = SpatialRefSys.objects.using(self.using).get(srid=self.geo_field.srid).srs
# Creating the CoordTransform object
return CoordTransform(self.source_srs, target_srs)
except Exception as msg:
new_msg = 'Could not translate between the data source and model geometry: %s' % msg
six.reraise(LayerMapError, LayerMapError(new_msg), sys.exc_info()[2])
def geometry_field(self):
"Returns the GeometryField instance associated with the geographic column."
# Use `get_field()` on the model's options so that we
# get the correct field instance if there's model inheritance.
opts = self.model._meta
return opts.get_field(self.geom_field)
def make_multi(self, geom_type, model_field):
"""
Given the OGRGeomType for a geometry and its associated GeometryField,
determine whether the geometry should be turned into a GeometryCollection.
"""
return (geom_type.num in self.MULTI_TYPES and
model_field.__class__.__name__ == 'Multi%s' % geom_type.django)
def save(self, verbose=False, fid_range=False, step=False,
progress=False, silent=False, stream=sys.stdout, strict=False):
"""
Saves the contents from the OGR DataSource Layer into the database
according to the mapping dictionary given at initialization.
Keyword Parameters:
verbose:
If set, information will be printed subsequent to each model save
executed on the database.
fid_range:
May be set with a slice or tuple of (begin, end) feature ID's to map
from the data source. In other words, this keyword enables the user
to selectively import a subset range of features in the geographic
data source.
step:
If set with an integer, transactions will occur at every step
interval. For example, if step=1000, a commit would occur after
the 1,000th feature, the 2,000th feature etc.
progress:
When this keyword is set, status information will be printed giving
the number of features processed and successfully saved. By default,
progress information will pe printed every 1000 features processed,
however, this default may be overridden by setting this keyword with an
integer for the desired interval.
stream:
Status information will be written to this file handle. Defaults to
using `sys.stdout`, but any object with a `write` method is supported.
silent:
By default, non-fatal error notifications are printed to stdout, but
this keyword may be set to disable these notifications.
strict:
Execution of the model mapping will cease upon the first error
encountered. The default behavior is to attempt to continue.
"""
# Getting the default Feature ID range.
default_range = self.check_fid_range(fid_range)
# Setting the progress interval, if requested.
if progress:
if progress is True or not isinstance(progress, int):
progress_interval = 1000
else:
progress_interval = progress
def _save(feat_range=default_range, num_feat=0, num_saved=0):
if feat_range:
layer_iter = self.layer[feat_range]
else:
layer_iter = self.layer
for feat in layer_iter:
num_feat += 1
# Getting the keyword arguments
try:
kwargs = self.feature_kwargs(feat)
except LayerMapError as msg:
# Something borked the validation
if strict:
raise
elif not silent:
stream.write('Ignoring Feature ID %s because: %s\n' % (feat.fid, msg))
else:
# Constructing the model using the keyword args
is_update = False
if self.unique:
# If we want unique models on a particular field, handle the
# geometry appropriately.
try:
# Getting the keyword arguments and retrieving
# the unique model.
u_kwargs = self.unique_kwargs(kwargs)
m = self.model.objects.using(self.using).get(**u_kwargs)
is_update = True
# Getting the geometry (in OGR form), creating
# one from the kwargs WKT, adding in additional
# geometries, and update the attribute with the
# just-updated geometry WKT.
geom = getattr(m, self.geom_field).ogr
new = OGRGeometry(kwargs[self.geom_field])
for g in new:
geom.add(g)
setattr(m, self.geom_field, geom.wkt)
except ObjectDoesNotExist:
# No unique model exists yet, create.
m = self.model(**kwargs)
else:
m = self.model(**kwargs)
try:
# Attempting to save.
m.save(using=self.using)
num_saved += 1
if verbose:
stream.write('%s: %s\n' % ('Updated' if is_update else 'Saved', m))
except Exception as msg:
if strict:
# Bailing out if the `strict` keyword is set.
if not silent:
stream.write(
'Failed to save the feature (id: %s) into the '
'model with the keyword arguments:\n' % feat.fid
)
stream.write('%s\n' % kwargs)
raise
elif not silent:
stream.write('Failed to save %s:\n %s\nContinuing\n' % (kwargs, msg))
# Printing progress information, if requested.
if progress and num_feat % progress_interval == 0:
stream.write('Processed %d features, saved %d ...\n' % (num_feat, num_saved))
# Only used for status output purposes -- incremental saving uses the
# values returned here.
return num_saved, num_feat
if self.transaction_decorator is not None:
_save = self.transaction_decorator(_save)
nfeat = self.layer.num_feat
if step and isinstance(step, int) and step < nfeat:
# Incremental saving is requested at the given interval (step)
if default_range:
raise LayerMapError('The `step` keyword may not be used in conjunction with the `fid_range` keyword.')
beg, num_feat, num_saved = (0, 0, 0)
indices = range(step, nfeat, step)
n_i = len(indices)
for i, end in enumerate(indices):
# Constructing the slice to use for this step; the last slice is
# special (e.g, [100:] instead of [90:100]).
if i + 1 == n_i:
step_slice = slice(beg, None)
else:
step_slice = slice(beg, end)
try:
num_feat, num_saved = _save(step_slice, num_feat, num_saved)
beg = end
except: # Deliberately catch everything
stream.write('%s\nFailed to save slice: %s\n' % ('=-' * 20, step_slice))
raise
else:
# Otherwise, just calling the previously defined _save() function.
_save()
| bsd-3-clause |
brandond/ansible | lib/ansible/plugins/callback/debug.py | 49 | 1773 | # (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: debug
type: stdout
short_description: formatted stdout/stderr display
description:
- Use this callback to sort through extensive debug output
version_added: "2.4"
extends_documentation_fragment:
- default_callback
requirements:
- set as stdout in configuration
'''
from ansible.plugins.callback.default import CallbackModule as CallbackModule_default
class CallbackModule(CallbackModule_default): # pylint: disable=too-few-public-methods,no-init
'''
Override for the default callback module.
Render std err/out outside of the rest of the result which it prints with
indentation.
'''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'stdout'
CALLBACK_NAME = 'debug'
def _dump_results(self, result, indent=None, sort_keys=True, keep_invocation=False):
'''Return the text to output for a result.'''
# Enable JSON identation
result['_ansible_verbose_always'] = True
save = {}
for key in ['stdout', 'stdout_lines', 'stderr', 'stderr_lines', 'msg', 'module_stdout', 'module_stderr']:
if key in result:
save[key] = result.pop(key)
output = CallbackModule_default._dump_results(self, result)
for key in ['stdout', 'stderr', 'msg', 'module_stdout', 'module_stderr']:
if key in save and save[key]:
output += '\n\n%s:\n\n%s\n' % (key.upper(), save[key])
for key, value in save.items():
result[key] = value
return output
| gpl-3.0 |
wbrefvem/openshift-ansible | roles/openshift_health_checker/test/fluentd_config_test.py | 48 | 10009 | import pytest
from openshift_checks.logging.fluentd_config import FluentdConfig, OpenShiftCheckException
def canned_fluentd_pod(containers):
return {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
"name": "logging-fluentd-1",
},
"spec": {
"host": "node1",
"nodeName": "node1",
"containers": containers,
},
"status": {
"phase": "Running",
"containerStatuses": [{"ready": True}],
"conditions": [{"status": "True", "type": "Ready"}],
}
}
fluentd_pod = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
"name": "logging-fluentd-1",
},
"spec": {
"host": "node1",
"nodeName": "node1",
"containers": [
{
"name": "container1",
"env": [
{
"name": "USE_JOURNAL",
"value": "true",
}
],
}
],
},
"status": {
"phase": "Running",
"containerStatuses": [{"ready": True}],
"conditions": [{"status": "True", "type": "Ready"}],
}
}
not_running_fluentd_pod = {
"metadata": {
"labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
"name": "logging-fluentd-2",
},
"status": {
"phase": "Unknown",
"containerStatuses": [{"ready": True}, {"ready": False}],
"conditions": [{"status": "True", "type": "Ready"}],
}
}
@pytest.mark.parametrize('name, use_journald, logging_driver, extra_words', [
(
'test success with use_journald=false, and docker config set to use "json-file"',
False,
"json-file",
[],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_non_master(name, use_journald, logging_driver, extra_words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_nodes_to_config", "oo_etcd_to_config"],
openshift_logging_fluentd_use_journal=use_journald,
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
error = check.check_logging_config()
assert error is None
@pytest.mark.parametrize('name, use_journald, logging_driver, words', [
(
'test failure with use_journald=false, but docker config set to use "journald"',
False,
"journald",
['json log files', 'has been set to use "journald"'],
),
(
'test failure with use_journald=false, but docker config set to use an "unsupported" driver',
False,
"unsupported",
["json log files", 'has been set to use "unsupported"'],
),
(
'test failure with use_journald=true, but docker config set to use "json-file"',
True,
"json-file",
['logs from "journald"', 'has been set to use "json-file"'],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_non_master_failed(name, use_journald, logging_driver, words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_nodes_to_config", "oo_etcd_to_config"],
openshift_logging_fluentd_use_journal=use_journald,
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
error = check.check_logging_config()
assert error is not None
for word in words:
assert word in error
@pytest.mark.parametrize('name, pods, logging_driver, extra_words', [
# use_journald returns false (not using journald), but check succeeds
# since docker is set to use json-file
(
'test success with use_journald=false, and docker config set to use default driver "json-file"',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "USE_JOURNAL",
"value": "false",
}],
},
]
)],
"json-file",
[],
),
(
'test success with USE_JOURNAL env var missing and docker config set to use default driver "json-file"',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "RANDOM",
"value": "value",
}],
},
]
)],
"json-file",
[],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master(name, pods, logging_driver, extra_words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
check.get_pods_for_component = lambda _: pods
error = check.check_logging_config()
assert error is None
@pytest.mark.parametrize('name, pods, logging_driver, words', [
(
'test failure with use_journald=false, but docker config set to use "journald"',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "USE_JOURNAL",
"value": "false",
}],
},
]
)],
"journald",
['json log files', 'has been set to use "journald"'],
),
(
'test failure with use_journald=true, but docker config set to use "json-file"',
[fluentd_pod],
"json-file",
['logs from "journald"', 'has been set to use "json-file"'],
),
(
'test failure with use_journald=false, but docker set to use an "unsupported" driver',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "USE_JOURNAL",
"value": "false",
}],
},
]
)],
"unsupported",
["json log files", 'has been set to use "unsupported"'],
),
(
'test failure with USE_JOURNAL env var missing and docker config set to use "journald"',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [{
"name": "RANDOM",
"value": "value",
}],
},
]
)],
"journald",
["configuration is set to", "json log files"],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master_failed(name, pods, logging_driver, words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.execute_module = execute_module
check.get_pods_for_component = lambda _: pods
error = check.check_logging_config()
assert error is not None
for word in words:
assert word in error
@pytest.mark.parametrize('name, pods, response, logging_driver, extra_words', [
(
'test OpenShiftCheckException with no running containers',
[canned_fluentd_pod([])],
{
"failed": True,
"result": "unexpected",
},
"json-file",
['no running containers'],
),
(
'test OpenShiftCheckException one container and no env vars set',
[canned_fluentd_pod(
[
{
"name": "container1",
"env": [],
},
]
)],
{
"failed": True,
"result": "unexpected",
},
"json-file",
['no environment variables'],
),
], ids=lambda argvals: argvals[0])
def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods, response, logging_driver, extra_words):
def execute_module(module_name, args):
if module_name == "docker_info":
return {
"info": {
"LoggingDriver": logging_driver,
}
}
return {}
task_vars = dict(
group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
)
check = FluentdConfig(execute_module, task_vars)
check.get_pods_for_component = lambda _: pods
with pytest.raises(OpenShiftCheckException) as error:
check.check_logging_config()
assert error is not None
for word in extra_words:
assert word in str(error)
| apache-2.0 |
WorkflowConversion/CTDConverter | ctdconverter/convert.py | 1 | 6620 | import os
import sys
import traceback
from argparse import (
ArgumentParser,
RawDescriptionHelpFormatter
)
from . import (
__updated__,
__version__
)
from .common import utils
from .common.exceptions import (
ApplicationException,
ModelError
)
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = f'%(prog)s {program_version} ({program_build_date})'
program_short_description = "CTDConverter - A project from the WorkflowConversion family " \
"(https://github.com/WorkflowConversion/CTDConverter)"
program_usage = '''
USAGE:
$ python convert.py [FORMAT] [ARGUMENTS ...]
FORMAT can be either one of the supported output formats: cwl, galaxy.
There is one converter for each supported FORMAT, each taking a different set of arguments. Please consult the detailed
documentation for each of the converters. Nevertheless, all converters have the following common parameters/options:
I - Parsing a single CTD file and convert it:
$ python convert.py [FORMAT] -i [INPUT_FILE] -o [OUTPUT_FILE]
II - Parsing several CTD files, output converted wrappers in a given folder:
$ python converter.py [FORMAT] -i [INPUT_FILES] -o [OUTPUT_DIRECTORY]
For more detailed help see README.md in the root folder as well as `galaxy/README.md` or `cwl/README.md`.
'''
program_license = '''{short_description}
Copyright 2017, WorklfowConversion
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
{usage}
'''.format(short_description=program_short_description, usage=program_usage)
def main(argv=None):
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
# check that we have, at least, one argument provided
# at this point we cannot parse the arguments, because each converter takes different arguments, meaning each
# converter will register its own parameters after we've registered the basic ones... we have to do it old school
if len(argv) < 2:
utils.logger.error("Not enough arguments provided")
print("\nUsage: $ CTDConverter [TARGET] [ARGUMENTS]\n\n"
"Where:\n"
" target: one of 'cwl' or 'galaxy'\n\n"
"Run again using the -h/--help option to print more detailed help.\n")
return 1
# TODO: at some point this should look like real software engineering and use a map containing converter instances
# whose keys would be the name of the converter (e.g., cwl, galaxy), but for the time being, only two formats
# are supported
target = str.lower(argv[1])
if target == 'cwl':
from .cwl import converter
elif target == 'galaxy':
from .galaxy import converter
# elif target == '-h' or target == '--help' or target == '--h' or target == 'help':
# print(program_license)
# return 0
else:
utils.logger.error("Unrecognized target engine. Supported targets are 'cwl' and 'galaxy'.")
return 1
utils.logger.info("Using %s converter" % target)
try:
# Setup argument parser
parser = ArgumentParser(prog="CTDConverter", description=program_license,
formatter_class=RawDescriptionHelpFormatter, add_help=True)
utils.add_common_parameters(parser, program_version_message, program_build_date)
# add tool-specific arguments
converter.add_specific_args(parser)
# parse arguments and perform some basic, common validation
args = parser.parse_args()
validate_and_prepare_common_arguments(args)
# parse the input CTD files into CTDModels
parsed_ctds = utils.parse_input_ctds(args.xsd_location, args.input_files, args.output_destination,
converter.get_preferred_file_extension())
# let the converter do its own thing
converter.convert_models(args, parsed_ctds)
return 0
except KeyboardInterrupt:
print("Interrupted...")
return 0
except ApplicationException as e:
traceback.print_exc()
utils.logger.error("CTDConverter could not complete the requested operation.", 0)
utils.logger.error("Reason: " + e.msg, 0)
return 1
except ModelError as e:
traceback.print_exc()
utils.logger.error("There seems to be a problem with one of your input CTDs.", 0)
utils.logger.error("Reason: " + e.msg, 0)
return 1
except Exception as e:
traceback.print_exc()
utils.logger.error("CTDConverter could not complete the requested operation.", 0)
utils.logger.error("Reason: " + e.msg, 0)
return 2
def validate_and_prepare_common_arguments(args):
# flatten lists of lists to a list containing elements
lists_to_flatten = ["input_files"]
for list_to_flatten in lists_to_flatten:
utils.flatten_list_of_lists(args, list_to_flatten)
# if input is a single file, we expect output to be a file (and not a dir that already exists)
if len(args.input_files) == 1:
if os.path.isdir(args.output_destination):
raise ApplicationException("If a single input file is provided, output (%s) is expected to be a file "
"and not a folder.\n" % args.output_destination)
# if input is a list of files, we expect output to be a folder
if len(args.input_files) > 1:
if not os.path.isdir(args.output_destination):
raise ApplicationException("If several input files are provided, output (%s) is expected to be an "
"existing directory.\n" % args.output_destination)
# check that the provided input files, if provided, contain a valid file path
input_arguments_to_check = ["xsd_location", "input_files", "hardcoded_parameters"]
for argument_name in input_arguments_to_check:
utils.validate_argument_is_valid_path(args, argument_name)
# add the parameter hardcoder
args.parameter_hardcoder = utils.parse_hardcoded_parameters(args.hardcoded_parameters)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
saleemjaveds/https-github.com-openstack-nova | nova/tests/integrated/v3/test_remote_consoles.py | 29 | 2479 | # Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.integrated.v3 import test_servers
class ConsolesSampleJsonTests(test_servers.ServersSampleBase):
extension_name = "os-remote-consoles"
def setUp(self):
super(ConsolesSampleJsonTests, self).setUp()
self.flags(vnc_enabled=True)
self.flags(enabled=True, group='spice')
self.flags(enabled=True, group='rdp')
def test_get_vnc_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-vnc-console-post-req',
{'action': 'os-getVNCConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-vnc-console-post-resp', subs, response, 200)
def test_get_spice_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-spice-console-post-req',
{'action': 'os-getSPICEConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-spice-console-post-resp', subs,
response, 200)
def test_get_rdp_console(self):
uuid = self._post_server()
response = self._do_post('servers/%s/action' % uuid,
'get-rdp-console-post-req',
{'action': 'os-getRDPConsole'})
subs = self._get_regexes()
subs["url"] = \
"((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)"
self._verify_response('get-rdp-console-post-resp', subs,
response, 200)
| apache-2.0 |
hkawasaki/kawasaki-aio8-1 | common/djangoapps/third_party_auth/tests/specs/test_google.py | 78 | 1124 | """Integration tests for Google providers."""
from third_party_auth import provider
from third_party_auth.tests.specs import base
class GoogleOauth2IntegrationTest(base.Oauth2IntegrationTest):
"""Integration tests for provider.GoogleOauth2."""
PROVIDER_CLASS = provider.GoogleOauth2
PROVIDER_SETTINGS = {
'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY': 'google_oauth2_key',
'SOCIAL_AUTH_GOOGLE_OAUTH2_SECRET': 'google_oauth2_secret',
}
TOKEN_RESPONSE_DATA = {
'access_token': 'access_token_value',
'expires_in': 'expires_in_value',
'id_token': 'id_token_value',
'token_type': 'token_type_value',
}
USER_RESPONSE_DATA = {
'email': 'email_value@example.com',
'family_name': 'family_name_value',
'given_name': 'given_name_value',
'id': 'id_value',
'link': 'link_value',
'locale': 'locale_value',
'name': 'name_value',
'picture': 'picture_value',
'verified_email': 'verified_email_value',
}
def get_username(self):
return self.get_response_data().get('email').split('@')[0]
| agpl-3.0 |
vinthony/racpider | src/server/web.py | 1 | 44130 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
A simple, lightweight, WSGI-compatible web framework.
'''
__author__ = 'Michael Liao'
import types, os, re, cgi, sys, time, datetime, functools, mimetypes, threading, urllib, traceback
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# thread local object for storing request and response:
ctx = threading.local()
# Dict object:
class Dict(dict):
'''
Simple dict but support access as x.y style.
>>> d1 = Dict()
>>> d1['x'] = 100
>>> d1.x
100
>>> d1.y = 200
>>> d1['y']
200
>>> d2 = Dict(a=1, b=2, c='3')
>>> d2.c
'3'
>>> d2['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> d2.empty
Traceback (most recent call last):
...
AttributeError: 'Dict' object has no attribute 'empty'
>>> d3 = Dict(('a', 'b', 'c'), (1, 2, 3))
>>> d3.a
1
>>> d3.b
2
>>> d3.c
3
'''
def __init__(self, names=(), values=(), **kw):
super(Dict, self).__init__(**kw)
for k, v in zip(names, values):
self[k] = v
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
_TIMEDELTA_ZERO = datetime.timedelta(0)
# timezone as UTC+8:00, UTC-10:00
_RE_TZ = re.compile('^([\+\-])([0-9]{1,2})\:([0-9]{1,2})$')
class UTC(datetime.tzinfo):
'''
A UTC tzinfo object.
>>> tz0 = UTC('+00:00')
>>> tz0.tzname(None)
'UTC+00:00'
>>> tz8 = UTC('+8:00')
>>> tz8.tzname(None)
'UTC+8:00'
>>> tz7 = UTC('+7:30')
>>> tz7.tzname(None)
'UTC+7:30'
>>> tz5 = UTC('-05:30')
>>> tz5.tzname(None)
'UTC-05:30'
>>> from datetime import datetime
>>> u = datetime.utcnow().replace(tzinfo=tz0)
>>> l1 = u.astimezone(tz8)
>>> l2 = u.replace(tzinfo=tz8)
>>> d1 = u - l1
>>> d2 = u - l2
>>> d1.seconds
0
>>> d2.seconds
28800
'''
def __init__(self, utc):
utc = str(utc.strip().upper())
mt = _RE_TZ.match(utc)
if mt:
minus = mt.group(1)=='-'
h = int(mt.group(2))
m = int(mt.group(3))
if minus:
h, m = (-h), (-m)
self._utcoffset = datetime.timedelta(hours=h, minutes=m)
self._tzname = 'UTC%s' % utc
else:
raise ValueError('bad utc time zone')
def utcoffset(self, dt):
return self._utcoffset
def dst(self, dt):
return _TIMEDELTA_ZERO
def tzname(self, dt):
return self._tzname
def __str__(self):
return 'UTC tzinfo object (%s)' % self._tzname
__repr__ = __str__
# all known response statues:
_RESPONSE_STATUSES = {
# Informational
100: 'Continue',
101: 'Switching Protocols',
102: 'Processing',
# Successful
200: 'OK',
201: 'Created',
202: 'Accepted',
203: 'Non-Authoritative Information',
204: 'No Content',
205: 'Reset Content',
206: 'Partial Content',
207: 'Multi Status',
226: 'IM Used',
# Redirection
300: 'Multiple Choices',
301: 'Moved Permanently',
302: 'Found',
303: 'See Other',
304: 'Not Modified',
305: 'Use Proxy',
307: 'Temporary Redirect',
# Client Error
400: 'Bad Request',
401: 'Unauthorized',
402: 'Payment Required',
403: 'Forbidden',
404: 'Not Found',
405: 'Method Not Allowed',
406: 'Not Acceptable',
407: 'Proxy Authentication Required',
408: 'Request Timeout',
409: 'Conflict',
410: 'Gone',
411: 'Length Required',
412: 'Precondition Failed',
413: 'Request Entity Too Large',
414: 'Request URI Too Long',
415: 'Unsupported Media Type',
416: 'Requested Range Not Satisfiable',
417: 'Expectation Failed',
418: "I'm a teapot",
422: 'Unprocessable Entity',
423: 'Locked',
424: 'Failed Dependency',
426: 'Upgrade Required',
# Server Error
500: 'Internal Server Error',
501: 'Not Implemented',
502: 'Bad Gateway',
503: 'Service Unavailable',
504: 'Gateway Timeout',
505: 'HTTP Version Not Supported',
507: 'Insufficient Storage',
510: 'Not Extended',
}
_RE_RESPONSE_STATUS = re.compile(r'^\d\d\d(\ [\w\ ]+)?$')
_RESPONSE_HEADERS = (
'Accept-Ranges',
'Age',
'Allow',
'Cache-Control',
'Connection',
'Content-Encoding',
'Content-Language',
'Content-Length',
'Content-Location',
'Content-MD5',
'Content-Disposition',
'Content-Range',
'Content-Type',
'Date',
'ETag',
'Expires',
'Last-Modified',
'Link',
'Location',
'P3P',
'Pragma',
'Proxy-Authenticate',
'Refresh',
'Retry-After',
'Server',
'Set-Cookie',
'Strict-Transport-Security',
'Trailer',
'Transfer-Encoding',
'Vary',
'Via',
'Warning',
'WWW-Authenticate',
'X-Frame-Options',
'X-XSS-Protection',
'X-Content-Type-Options',
'X-Forwarded-Proto',
'X-Powered-By',
'X-UA-Compatible',
)
_RESPONSE_HEADER_DICT = dict(zip(map(lambda x: x.upper(), _RESPONSE_HEADERS), _RESPONSE_HEADERS))
_HEADER_X_POWERED_BY = ('X-Powered-By', 'transwarp/1.0')
class HttpError(Exception):
'''
HttpError that defines http error code.
>>> e = HttpError(404)
>>> e.status
'404 Not Found'
'''
def __init__(self, code):
'''
Init an HttpError with response code.
'''
super(HttpError, self).__init__()
self.status = '%d %s' % (code, _RESPONSE_STATUSES[code])
def header(self, name, value):
if not hasattr(self, '_headers'):
self._headers = [_HEADER_X_POWERED_BY]
self._headers.append((name, value))
@property
def headers(self):
if hasattr(self, '_headers'):
return self._headers
return []
def __str__(self):
return self.status
__repr__ = __str__
class RedirectError(HttpError):
'''
RedirectError that defines http redirect code.
>>> e = RedirectError(302, 'http://www.apple.com/')
>>> e.status
'302 Found'
>>> e.location
'http://www.apple.com/'
'''
def __init__(self, code, location):
'''
Init an HttpError with response code.
'''
super(RedirectError, self).__init__(code)
self.location = location
def __str__(self):
return '[ %s ], %s' % (self.status, self.location)
__repr__ = __str__
def badrequest():
'''
Send a bad request response.
>>> raise badrequest()
Traceback (most recent call last):
...
HttpError: 400 Bad Request
'''
return HttpError(400)
def unauthorized():
'''
Send an unauthorized response.
>>> raise unauthorized()
Traceback (most recent call last):
...
HttpError: 401 Unauthorized
'''
return HttpError(401)
def forbidden():
'''
Send a forbidden response.
>>> raise forbidden()
Traceback (most recent call last):
...
HttpError: 403 Forbidden
'''
return HttpError(403)
def notfound():
'''
Send a not found response.
>>> raise notfound()
Traceback (most recent call last):
...
HttpError: 404 Not Found
'''
return HttpError(404)
def conflict():
'''
Send a conflict response.
>>> raise conflict()
Traceback (most recent call last):
...
HttpError: 409 Conflict
'''
return HttpError(409)
def internalerror():
'''
Send an internal error response.
>>> raise internalerror()
Traceback (most recent call last):
...
HttpError: 500 Internal Server Error
'''
return HttpError(500)
def redirect(location):
'''
Do permanent redirect.
>>> raise redirect('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 301 Moved Permanently, http://www.itranswarp.com/
'''
return RedirectError(301, location)
def found(location):
'''
Do temporary redirect.
>>> raise found('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 302 Found, http://www.itranswarp.com/
'''
return RedirectError(302, location)
def seeother(location):
'''
Do temporary redirect.
>>> raise seeother('http://www.itranswarp.com/')
Traceback (most recent call last):
...
RedirectError: 303 See Other, http://www.itranswarp.com/
>>> e = seeother('http://www.itranswarp.com/seeother?r=123')
>>> e.location
'http://www.itranswarp.com/seeother?r=123'
'''
return RedirectError(303, location)
def _to_str(s):
'''
Convert to str.
>>> _to_str('s123') == 's123'
True
>>> _to_str(u'\u4e2d\u6587') == '\xe4\xb8\xad\xe6\x96\x87'
True
>>> _to_str(-123) == '-123'
True
'''
if isinstance(s, str):
return s
if isinstance(s, unicode):
return s.encode('utf-8')
return str(s)
def _to_unicode(s, encoding='utf-8'):
'''
Convert to unicode.
>>> _to_unicode('\xe4\xb8\xad\xe6\x96\x87') == u'\u4e2d\u6587'
True
'''
return s.decode('utf-8')
def _quote(s, encoding='utf-8'):
'''
Url quote as str.
>>> _quote('http://example/test?a=1+')
'http%3A//example/test%3Fa%3D1%2B'
>>> _quote(u'hello world!')
'hello%20world%21'
'''
if isinstance(s, unicode):
s = s.encode(encoding)
return urllib.quote(s)
def _unquote(s, encoding='utf-8'):
'''
Url unquote as unicode.
>>> _unquote('http%3A//example/test%3Fa%3D1+')
u'http://example/test?a=1+'
'''
return urllib.unquote(s).decode(encoding)
def get(path):
'''
A @get decorator.
@get('/:id')
def index(id):
pass
>>> @get('/test/:id')
... def test():
... return 'ok'
...
>>> test.__web_route__
'/test/:id'
>>> test.__web_method__
'GET'
>>> test()
'ok'
'''
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'GET'
return func
return _decorator
def post(path):
'''
A @post decorator.
>>> @post('/post/:id')
... def testpost():
... return '200'
...
>>> testpost.__web_route__
'/post/:id'
>>> testpost.__web_method__
'POST'
>>> testpost()
'200'
'''
def _decorator(func):
func.__web_route__ = path
func.__web_method__ = 'POST'
return func
return _decorator
_re_route = re.compile(r'(\:[a-zA-Z_]\w*)')
def _build_regex(path):
r'''
Convert route path to regex.
>>> _build_regex('/path/to/:file')
'^\\/path\\/to\\/(?P<file>[^\\/]+)$'
>>> _build_regex('/:user/:comments/list')
'^\\/(?P<user>[^\\/]+)\\/(?P<comments>[^\\/]+)\\/list$'
>>> _build_regex(':id-:pid/:w')
'^(?P<id>[^\\/]+)\\-(?P<pid>[^\\/]+)\\/(?P<w>[^\\/]+)$'
'''
re_list = ['^']
var_list = []
is_var = False
for v in _re_route.split(path):#?
if is_var:
var_name = v[1:]
var_list.append(var_name)
re_list.append(r'(?P<%s>[^\/]+)' % var_name)
else:
s = ''
for ch in v:
if ch>='0' and ch<='9':
s = s + ch
elif ch>='A' and ch<='Z':
s = s + ch
elif ch>='a' and ch<='z':
s = s + ch
else:
s = s + '\\' + ch
re_list.append(s)
is_var = not is_var
re_list.append('$')
return ''.join(re_list)
class Route(object):
'''
A Route object is a callable object.
'''
def __init__(self, func):
self.path = func.__web_route__
self.method = func.__web_method__
self.is_static = _re_route.search(self.path) is None
if not self.is_static:
self.route = re.compile(_build_regex(self.path))
self.func = func
def match(self, url):
m = self.route.match(url)
if m:
return m.groups()
return None
def __call__(self, *args):
return self.func(*args)
def __str__(self):
if self.is_static:
return '[ static ][ %s ] %s' % (self.method, self.path)
return '[ dynamic ][ %s ] %s' % (self.method, self.path)
__repr__ = __str__
def _static_file_generator(fpath):
BLOCK_SIZE = 8192
with open(fpath, 'rb') as f:
block = f.read(BLOCK_SIZE)
while block:
yield block
block = f.read(BLOCK_SIZE)
class StaticFileRoute(object):
def __init__(self):
self.method = 'GET'
self.is_static = False
self.route = re.compile('^/static/(.+)$')
def match(self, url):
if url.startswith('/static/'):
return (url[1:], )
return None
def __call__(self, *args):
fpath = os.path.join(ctx.application.document_root, args[0])
if not os.path.isfile(fpath):
raise notfound()
fext = os.path.splitext(fpath)[1]
ctx.response.content_type = mimetypes.types_map.get(fext.lower(), 'application/octet-stream')
return _static_file_generator(fpath)
def favicon_handler():
return static_file_handler('/favicon.ico')
class MultipartFile(object):
'''
Multipart file storage get from request input.
f = ctx.request['file']
f.filename # 'test.png'
f.file # file-like object
'''
def __init__(self, storage):
self.filename = _to_unicode(storage.filename)
self.file = storage.file
class Request(object):
'''
Request object for obtaining all http request information.
'''
def __init__(self, environ):
self._environ = environ
def _parse_input(self):
def _convert(item):
if isinstance(item, list):
return [_to_unicode(i.value) for i in item]
if item.filename:
return MultipartFile(item)
return _to_unicode(item.value)
fs = cgi.FieldStorage(fp=self._environ['wsgi.input'], environ=self._environ, keep_blank_values=True)
inputs = dict()
for key in fs:
inputs[key] = _convert(fs[key])
return inputs
def _get_raw_input(self):
'''
Get raw input as dict containing values as unicode, list or MultipartFile.
'''
if not hasattr(self, '_raw_input'):
self._raw_input = self._parse_input()
return self._raw_input
def __getitem__(self, key):
'''
Get input parameter value. If the specified key has multiple value, the first one is returned.
If the specified key is not exist, then raise KeyError.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r['a']
u'1'
>>> r['c']
u'ABC'
>>> r['empty']
Traceback (most recent call last):
...
KeyError: 'empty'
>>> b = '----WebKitFormBoundaryQQ3J8kPsjFpTmqNz'
>>> pl = ['--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Scofield', '--%s' % b, 'Content-Disposition: form-data; name=\\"name\\"\\n', 'Lincoln', '--%s' % b, 'Content-Disposition: form-data; name=\\"file\\"; filename=\\"test.txt\\"', 'Content-Type: text/plain\\n', 'just a test', '--%s' % b, 'Content-Disposition: form-data; name=\\"id\\"\\n', '4008009001', '--%s--' % b, '']
>>> payload = '\\n'.join(pl)
>>> r = Request({'REQUEST_METHOD':'POST', 'CONTENT_LENGTH':str(len(payload)), 'CONTENT_TYPE':'multipart/form-data; boundary=%s' % b, 'wsgi.input':StringIO(payload)})
>>> r.get('name')
u'Scofield'
>>> r.gets('name')
[u'Scofield', u'Lincoln']
>>> f = r.get('file')
>>> f.filename
u'test.txt'
>>> f.file.read()
'just a test'
'''
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[0]
return r
def get(self, key, default=None):
'''
The same as request[key], but return default value if key is not found.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.get('a')
u'1'
>>> r.get('empty')
>>> r.get('empty', 'DEFAULT')
'DEFAULT'
'''
r = self._get_raw_input().get(key, default)
if isinstance(r, list):
return r[0]
return r
def gets(self, key):
'''
Get multiple values for specified key.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> r.gets('a')
[u'1']
>>> r.gets('c')
[u'ABC', u'XYZ']
>>> r.gets('empty')
Traceback (most recent call last):
...
KeyError: 'empty'
'''
r = self._get_raw_input()[key]
if isinstance(r, list):
return r[:]
return [r]
def input(self, **kw):
'''
Get input as dict from request, fill dict using provided default value if key not exist.
i = ctx.request.input(role='guest')
i.role ==> 'guest'
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('a=1&b=M%20M&c=ABC&c=XYZ&e=')})
>>> i = r.input(x=2008)
>>> i.a
u'1'
>>> i.b
u'M M'
>>> i.c
u'ABC'
>>> i.x
2008
>>> i.get('d', u'100')
u'100'
>>> i.x
2008
'''
copy = Dict(**kw)
raw = self._get_raw_input()
for k, v in raw.iteritems():
copy[k] = v[0] if isinstance(v, list) else v
return copy
def get_body(self):
'''
Get raw data from HTTP POST and return as str.
>>> from StringIO import StringIO
>>> r = Request({'REQUEST_METHOD':'POST', 'wsgi.input':StringIO('<xml><raw/>')})
>>> r.get_body()
'<xml><raw/>'
'''
fp = self._environ['wsgi.input']
return fp.read()
@property
def remote_addr(self):
'''
Get remote addr. Return '0.0.0.0' if cannot get remote_addr.
>>> r = Request({'REMOTE_ADDR': '192.168.0.100'})
>>> r.remote_addr
'192.168.0.100'
'''
return self._environ.get('REMOTE_ADDR', '0.0.0.0')
@property
def document_root(self):
'''
Get raw document_root as str. Return '' if no document_root.
>>> r = Request({'DOCUMENT_ROOT': '/srv/path/to/doc'})
>>> r.document_root
'/srv/path/to/doc'
'''
return self._environ.get('DOCUMENT_ROOT', '')
@property
def query_string(self):
'''
Get raw query string as str. Return '' if no query string.
>>> r = Request({'QUERY_STRING': 'a=1&c=2'})
>>> r.query_string
'a=1&c=2'
>>> r = Request({})
>>> r.query_string
''
'''
return self._environ.get('QUERY_STRING', '')
@property
def environ(self):
'''
Get raw environ as dict, both key, value are str.
>>> r = Request({'REQUEST_METHOD': 'GET', 'wsgi.url_scheme':'http'})
>>> r.environ.get('REQUEST_METHOD')
'GET'
>>> r.environ.get('wsgi.url_scheme')
'http'
>>> r.environ.get('SERVER_NAME')
>>> r.environ.get('SERVER_NAME', 'unamed')
'unamed'
'''
return self._environ
@property
def request_method(self):
'''
Get request method. The valid returned values are 'GET', 'POST', 'HEAD'.
>>> r = Request({'REQUEST_METHOD': 'GET'})
>>> r.request_method
'GET'
>>> r = Request({'REQUEST_METHOD': 'POST'})
>>> r.request_method
'POST'
'''
return self._environ['REQUEST_METHOD']
@property
def path_info(self):
'''
Get request path as str.
>>> r = Request({'PATH_INFO': '/test/a%20b.html'})
>>> r.path_info
'/test/a b.html'
'''
return urllib.unquote(self._environ.get('PATH_INFO', ''))
@property
def host(self):
'''
Get request host as str. Default to '' if cannot get host..
>>> r = Request({'HTTP_HOST': 'localhost:8080'})
>>> r.host
'localhost:8080'
'''
return self._environ.get('HTTP_HOST', '')
def _get_headers(self):
if not hasattr(self, '_headers'):
hdrs = {}
for k, v in self._environ.iteritems():
if k.startswith('HTTP_'):
# convert 'HTTP_ACCEPT_ENCODING' to 'ACCEPT-ENCODING'
hdrs[k[5:].replace('_', '-').upper()] = v.decode('utf-8')
self._headers = hdrs
return self._headers
@property
def headers(self):
'''
Get all HTTP headers with key as str and value as unicode. The header names are 'XXX-XXX' uppercase.
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> H = r.headers
>>> H['ACCEPT']
u'text/html'
>>> H['USER-AGENT']
u'Mozilla/5.0'
>>> L = H.items()
>>> L.sort()
>>> L
[('ACCEPT', u'text/html'), ('USER-AGENT', u'Mozilla/5.0')]
'''
return dict(**self._get_headers())
def header(self, header, default=None):
'''
Get header from request as unicode, return None if not exist, or default if specified.
The header name is case-insensitive such as 'USER-AGENT' or u'content-Type'.
>>> r = Request({'HTTP_USER_AGENT': 'Mozilla/5.0', 'HTTP_ACCEPT': 'text/html'})
>>> r.header('User-Agent')
u'Mozilla/5.0'
>>> r.header('USER-AGENT')
u'Mozilla/5.0'
>>> r.header('Accept')
u'text/html'
>>> r.header('Test')
>>> r.header('Test', u'DEFAULT')
u'DEFAULT'
'''
return self._get_headers().get(header.upper(), default)
def _get_cookies(self):
if not hasattr(self, '_cookies'):
cookies = {}
cookie_str = self._environ.get('HTTP_COOKIE')
if cookie_str:
for c in cookie_str.split(';'):
pos = c.find('=')
if pos>0:
cookies[c[:pos].strip()] = _unquote(c[pos+1:])
self._cookies = cookies
return self._cookies
@property
def cookies(self):
'''
Return all cookies as dict. The cookie name is str and values is unicode.
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookies['A']
u'123'
>>> r.cookies['url']
u'http://www.example.com/'
'''
return Dict(**self._get_cookies())
def cookie(self, name, default=None):
'''
Return specified cookie value as unicode. Default to None if cookie not exists.
>>> r = Request({'HTTP_COOKIE':'A=123; url=http%3A%2F%2Fwww.example.com%2F'})
>>> r.cookie('A')
u'123'
>>> r.cookie('url')
u'http://www.example.com/'
>>> r.cookie('test')
>>> r.cookie('test', u'DEFAULT')
u'DEFAULT'
'''
return self._get_cookies().get(name, default)
UTC_0 = UTC('+00:00')
class Response(object):
def __init__(self):
self._status = '200 OK'
self._headers = {'CONTENT-TYPE': 'text/html; charset=utf-8'}
@property
def headers(self):
'''
Return response headers as [(key1, value1), (key2, value2)...] including cookies.
>>> r = Response()
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('X-Powered-By', 'transwarp/1.0')]
>>> r.set_cookie('s1', 'ok', 3600)
>>> r.headers
[('Content-Type', 'text/html; charset=utf-8'), ('Set-Cookie', 's1=ok; Max-Age=3600; Path=/; HttpOnly'), ('X-Powered-By', 'transwarp/1.0')]
'''
L = [(_RESPONSE_HEADER_DICT.get(k, k), v) for k, v in self._headers.iteritems()]
if hasattr(self, '_cookies'):
for v in self._cookies.itervalues():
L.append(('Set-Cookie', v))
L.append(_HEADER_X_POWERED_BY)
return L
def header(self, name):
'''
Get header by name, case-insensitive.
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.header('CONTENT-type')
'text/html; charset=utf-8'
>>> r.header('X-Powered-By')
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
return self._headers.get(key)
def unset_header(self, name):
'''
Unset header by name and value.
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.unset_header('CONTENT-type')
>>> r.header('content-type')
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
if key in self._headers:
del self._headers[key]
def set_header(self, name, value):
'''
Set header by name and value.
>>> r = Response()
>>> r.header('content-type')
'text/html; charset=utf-8'
>>> r.set_header('CONTENT-type', 'image/png')
>>> r.header('content-TYPE')
'image/png'
'''
key = name.upper()
if not key in _RESPONSE_HEADER_DICT:
key = name
self._headers[key] = _to_str(value)
@property
def content_type(self):
'''
Get content type from response. This is a shortcut for header('Content-Type').
>>> r = Response()
>>> r.content_type
'text/html; charset=utf-8'
>>> r.content_type = 'application/json'
>>> r.content_type
'application/json'
'''
return self.header('CONTENT-TYPE')
@content_type.setter
def content_type(self, value):
'''
Set content type for response. This is a shortcut for set_header('Content-Type', value).
'''
if value:
self.set_header('CONTENT-TYPE', value)
else:
self.unset_header('CONTENT-TYPE')
@property
def content_length(self):
'''
Get content length. Return None if not set.
>>> r = Response()
>>> r.content_length
>>> r.content_length = 100
>>> r.content_length
'100'
'''
return self.header('CONTENT-LENGTH')
@content_length.setter
def content_length(self, value):
'''
Set content length, the value can be int or str.
>>> r = Response()
>>> r.content_length = '1024'
>>> r.content_length
'1024'
>>> r.content_length = 1024 * 8
>>> r.content_length
'8192'
'''
self.set_header('CONTENT-LENGTH', str(value))
def delete_cookie(self, name):
'''
Delete a cookie immediately.
Args:
name: the cookie name.
'''
self.set_cookie(name, '__deleted__', expires=0)
def set_cookie(self, name, value, max_age=None, expires=None, path='/', domain=None, secure=False, http_only=True):
'''
Set a cookie.
Args:
name: the cookie name.
value: the cookie value.
max_age: optional, seconds of cookie's max age.
expires: optional, unix timestamp, datetime or date object that indicate an absolute time of the
expiration time of cookie. Note that if expires specified, the max_age will be ignored.
path: the cookie path, default to '/'.
domain: the cookie domain, default to None.
secure: if the cookie secure, default to False.
http_only: if the cookie is for http only, default to True for better safty
(client-side script cannot access cookies with HttpOnly flag).
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.set_cookie('company', r'Example="Limited"', expires=1342274794.123, path='/sub/')
>>> r._cookies
{'company': 'company=Example%3D%22Limited%22; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/sub/; HttpOnly'}
>>> dt = datetime.datetime(2012, 7, 14, 22, 6, 34, tzinfo=UTC('+8:00'))
>>> r.set_cookie('company', 'Expires', expires=dt)
>>> r._cookies
{'company': 'company=Expires; Expires=Sat, 14-Jul-2012 14:06:34 GMT; Path=/; HttpOnly'}
'''
if not hasattr(self, '_cookies'):
self._cookies = {}
L = ['%s=%s' % (_quote(name), _quote(value))]
if expires is not None:
if isinstance(expires, (float, int, long)):
L.append('Expires=%s' % datetime.datetime.fromtimestamp(expires, UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
if isinstance(expires, (datetime.date, datetime.datetime)):
L.append('Expires=%s' % expires.astimezone(UTC_0).strftime('%a, %d-%b-%Y %H:%M:%S GMT'))
elif isinstance(max_age, (int, long)):
L.append('Max-Age=%d' % max_age)
L.append('Path=%s' % path)
if domain:
L.append('Domain=%s' % domain)
if secure:
L.append('Secure')
if http_only:
L.append('HttpOnly')
self._cookies[name] = '; '.join(L)
def unset_cookie(self, name):
'''
Unset a cookie.
>>> r = Response()
>>> r.set_cookie('company', 'Abc, Inc.', max_age=3600)
>>> r._cookies
{'company': 'company=Abc%2C%20Inc.; Max-Age=3600; Path=/; HttpOnly'}
>>> r.unset_cookie('company')
>>> r._cookies
{}
'''
if hasattr(self, '_cookies'):
if name in self._cookies:
del self._cookies[name]
@property
def status_code(self):
'''
Get response status code as int.
>>> r = Response()
>>> r.status_code
200
>>> r.status = 404
>>> r.status_code
404
>>> r.status = '500 Internal Error'
>>> r.status_code
500
'''
return int(self._status[:3])
@property
def status(self):
'''
Get response status. Default to '200 OK'.
>>> r = Response()
>>> r.status
'200 OK'
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 Oh My God'
>>> r.status
'500 Oh My God'
'''
return self._status
@status.setter
def status(self, value):
'''
Set response status as int or str.
>>> r = Response()
>>> r.status = 404
>>> r.status
'404 Not Found'
>>> r.status = '500 ERR'
>>> r.status
'500 ERR'
>>> r.status = u'403 Denied'
>>> r.status
'403 Denied'
>>> r.status = 99
Traceback (most recent call last):
...
ValueError: Bad response code: 99
>>> r.status = 'ok'
Traceback (most recent call last):
...
ValueError: Bad response code: ok
>>> r.status = [1, 2, 3]
Traceback (most recent call last):
...
TypeError: Bad type of response code.
'''
if isinstance(value, (int, long)):
if value>=100 and value<=999:
st = _RESPONSE_STATUSES.get(value, '')
if st:
self._status = '%d %s' % (value, st)
else:
self._status = str(value)
else:
raise ValueError('Bad response code: %d' % value)
elif isinstance(value, basestring):
if isinstance(value, unicode):
value = value.encode('utf-8')
if _RE_RESPONSE_STATUS.match(value):
self._status = value
else:
raise ValueError('Bad response code: %s' % value)
else:
raise TypeError('Bad type of response code.')
class Template(object):
def __init__(self, template_name, **kw):
'''
Init a template object with template name, model as dict, and additional kw that will append to model.
>>> t = Template('hello.html', title='Hello', copyright='@2012')
>>> t.model['title']
'Hello'
>>> t.model['copyright']
'@2012'
>>> t = Template('test.html', abc=u'ABC', xyz=u'XYZ')
>>> t.model['abc']
u'ABC'
'''
self.template_name = template_name
self.model = dict(**kw)
class TemplateEngine(object):
'''
Base template engine.
'''
def __call__(self, path, model):
return '<!-- override this method to render template -->'
class Jinja2TemplateEngine(TemplateEngine):
'''
Render using jinja2 template engine.
>>> templ_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'test')
>>> engine = Jinja2TemplateEngine(templ_path)
>>> engine.add_filter('datetime', lambda dt: dt.strftime('%Y-%m-%d %H:%M:%S'))
>>> engine('jinja2-test.html', dict(name='Michael', posted_at=datetime.datetime(2014, 6, 1, 10, 11, 12)))
'<p>Hello, Michael.</p><span>2014-06-01 10:11:12</span>'
'''
def __init__(self, templ_dir, **kw):
from jinja2 import Environment, FileSystemLoader
if not 'autoescape' in kw:
kw['autoescape'] = True
self._env = Environment(loader=FileSystemLoader(templ_dir), **kw)
def add_filter(self, name, fn_filter):
self._env.filters[name] = fn_filter
def __call__(self, path, model):
return self._env.get_template(path).render(**model).encode('utf-8')
def _default_error_handler(e, start_response, is_debug):
if isinstance(e, HttpError):
#colorlog.info('HttpError: %s' % e.status)
headers = e.headers[:]
headers.append(('Content-Type', 'text/html'))
start_response(e.status, headers)
return ('<html><body><h1>%s</h1></body></html>' % e.status)
#colorlog.exception('Exception:')
start_response('500 Internal Server Error', [('Content-Type', 'text/html'), _HEADER_X_POWERED_BY])
if is_debug:
return _debug()
return ('<html><body><h1>500 Internal Server Error</h1><h3>%s</h3></body></html>' % str(e))
def view(path):
'''
A view decorator that render a view by dict.
>>> @view('test/view.html')
... def hello():
... return dict(name='Bob')
>>> t = hello()
>>> isinstance(t, Template)
True
>>> t.template_name
'test/view.html'
>>> @view('test/view.html')
... def hello2():
... return ['a list']
>>> t = hello2()
Traceback (most recent call last):
...
ValueError: Expect return a dict when using @view() decorator.
'''
def _decorator(func):
@functools.wraps(func)
def _wrapper(*args, **kw):
r = func(*args, **kw)
if isinstance(r, dict):
return Template(path, **r)
raise ValueError('Expect return a dict when using @view() decorator.')
return _wrapper
return _decorator
_RE_INTERCEPTROR_STARTS_WITH = re.compile(r'^([^\*\?]+)\*?$')
_RE_INTERCEPTROR_ENDS_WITH = re.compile(r'^\*([^\*\?]+)$')
def _build_pattern_fn(pattern):
m = _RE_INTERCEPTROR_STARTS_WITH.match(pattern)
if m:
return lambda p: p.startswith(m.group(1))
m = _RE_INTERCEPTROR_ENDS_WITH.match(pattern)
if m:
return lambda p: p.endswith(m.group(1))
raise ValueError('Invalid pattern definition in interceptor.')
def interceptor(pattern='/'):
'''
An @interceptor decorator.
@interceptor('/admin/')
def check_admin(req, resp):
pass
'''
def _decorator(func):
func.__interceptor__ = _build_pattern_fn(pattern)
return func
return _decorator
def _build_interceptor_fn(func, next):
@functools.wraps(func)
def _wrapper():
if func.__interceptor__(ctx.request.path_info):
return func(next)
else:
return next()
return _wrapper
def _build_interceptor_chain(last_fn, *interceptors):
'''
Build interceptor chain.
>>> def target():
... print 'target'
... return 123
>>> @interceptor('/')
... def f1(next):
... print 'before f1()'
... return next()
>>> @interceptor('/test/')
... def f2(next):
... print 'before f2()'
... try:
... return next()
... finally:
... print 'after f2()'
>>> @interceptor('/')
... def f3(next):
... print 'before f3()'
... try:
... return next()
... finally:
... print 'after f3()'
>>> chain = _build_interceptor_chain(target, f1, f2, f3)
>>> ctx.request = Dict(path_info='/test/abc')
>>> chain()
before f1()
before f2()
before f3()
target
after f3()
after f2()
123
>>> ctx.request = Dict(path_info='/api/')
>>> chain()
before f1()
before f3()
target
after f3()
123
'''
L = list(interceptors)
L.reverse()
fn = last_fn
for f in L:
fn = _build_interceptor_fn(f, fn)
return fn
def _load_module(module_name):
'''
Load module from name as str.
>>> m = _load_module('xml')
>>> m.__name__
'xml'
>>> m = _load_module('xml.sax')
>>> m.__name__
'xml.sax'
>>> m = _load_module('xml.sax.handler')
>>> m.__name__
'xml.sax.handler'
'''
last_dot = module_name.rfind('.')
if last_dot==(-1):
return __import__(module_name, globals(), locals())
from_module = module_name[:last_dot]
import_module = module_name[last_dot+1:]
m = __import__(from_module, globals(), locals(), [import_module])
return getattr(m, import_module)
class WSGIApplication(object):
def __init__(self, document_root=None, **kw):
'''
Init a WSGIApplication.
Args:
document_root: document root path.
'''
self._running = False
self._document_root = document_root
self._interceptors = []
self._template_engine = None
self._get_static = {}
self._post_static = {}
self._get_dynamic = []
self._post_dynamic = []
def _check_not_running(self):
if self._running:
raise RuntimeError('Cannot modify WSGIApplication when running.')
@property
def template_engine(self):
return self._template_engine
@template_engine.setter
def template_engine(self, engine):
self._check_not_running()
self._template_engine = engine
def add_module(self, mod):
self._check_not_running()
m = mod if type(mod)==types.ModuleType else _load_module(mod)
# #colorlog.info('Add module: %s' % m.__name__)
for name in dir(m):
fn = getattr(m, name)
if callable(fn) and hasattr(fn, '__web_route__') and hasattr(fn, '__web_method__'):
self.add_url(fn)
def add_url(self, func):
self._check_not_running()
route = Route(func)
if route.is_static:
if route.method=='GET':
self._get_static[route.path] = route
if route.method=='POST':
self._post_static[route.path] = route
else:
if route.method=='GET':
self._get_dynamic.append(route)
if route.method=='POST':
self._post_dynamic.append(route)
#colorlog.info('[200]%s' % str(route))
def add_interceptor(self, func):
self._check_not_running()
self._interceptors.append(func)
#colorlog.info('Add interceptor: %s' % str(func))
def run(self, config):
from wsgiref.simple_server import make_server
if not config:
host = "127.0.0.1"
port = 5237
debug = True
else:
host = config["host"]
port = int(config["port"])
debug = False
print 'application (%s) will start at %s:%s...' % (self._document_root, host, port)
server = make_server(host, port, self.get_wsgi_application(debug=True))
server.serve_forever()
def get_wsgi_application(self, debug=False):
self._check_not_running()
if debug:
self._get_dynamic.append(StaticFileRoute())
self._running = True
_application = Dict(document_root=self._document_root)
def fn_route():
request_method = ctx.request.request_method
path_info = ctx.request.path_info
if request_method=='GET':
fn = self._get_static.get(path_info, None)
if fn:
return fn()
for fn in self._get_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise notfound()
if request_method=='POST':
fn = self._post_static.get(path_info, None)
if fn:
return fn()
for fn in self._post_dynamic:
args = fn.match(path_info)
if args:
return fn(*args)
raise notfound()
raise badrequest()
fn_exec = _build_interceptor_chain(fn_route, *self._interceptors)
def wsgi(env, start_response):
ctx.application = _application
ctx.request = Request(env)
response = ctx.response = Response()
try:
r = fn_exec()
if isinstance(r, Template):
r = self._template_engine(r.template_name, r.model)
if isinstance(r, unicode):
r = r.encode('utf-8')
if r is None:
r = []
start_response(response.status, response.headers)
return r
except RedirectError, e:
response.set_header('Location', e.location)
start_response(e.status, response.headers)
return []
except HttpError, e:
start_response(e.status, response.headers)
return ['<html><body><h1>', e.status, '</h1></body></html>']
except Exception, e:
#colorlog.info(e)
if not debug:
start_response('500 Internal Server Error', [])
return ['<html><body><h1>500 Internal Server Error</h1></body></html>']
exc_type, exc_value, exc_traceback = sys.exc_info()
fp = StringIO()
traceback.print_exception(exc_type, exc_value, exc_traceback, file=fp)
stacks = fp.getvalue()
fp.close()
start_response('500 Internal Server Error', [])
return [
r'''<html><body><h1>500 Internal Server Error</h1><div style="font-family:Monaco, Menlo, Consolas, 'Courier New', monospace;"><pre>''',
stacks.replace('<', '<').replace('>', '>'),
'</pre></div></body></html>']
finally:
del ctx.application
del ctx.request
del ctx.response
return wsgi
if __name__=='__main__':
sys.path.append('.')
import doctest
doctest.testmod()
| mit |
raccoongang/edx-platform | common/lib/xmodule/xmodule/library_content_module.py | 3 | 27080 | # -*- coding: utf-8 -*-
"""
LibraryContent: The XBlock used to include blocks from a library in a course.
"""
import json
import random
from copy import copy
from gettext import ngettext
from lazy import lazy
from lxml import etree
from opaque_keys.edx.locator import LibraryLocator
from pkg_resources import resource_string
from webob import Response
from xblock.core import XBlock
from xblock.fields import Boolean, Integer, List, Scope, String
from xblock.fragment import Fragment
from capa.responsetypes import registry
from xmodule.studio_editable import StudioEditableDescriptor, StudioEditableModule
from xmodule.validation import StudioValidation, StudioValidationMessage
from xmodule.x_module import STUDENT_VIEW, XModule
from .mako_module import MakoModuleDescriptor
from .xml_module import XmlDescriptor
# Make '_' a no-op so we can scrape strings. Using lambda instead of
# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file
_ = lambda text: text
ANY_CAPA_TYPE_VALUE = 'any'
def _get_human_name(problem_class):
"""
Get the human-friendly name for a problem type.
"""
return getattr(problem_class, 'human_name', problem_class.__name__)
def _get_capa_types():
"""
Gets capa types tags and labels
"""
capa_types = {tag: _get_human_name(registry.get_class_for_tag(tag)) for tag in registry.registered_tags()}
return [{'value': ANY_CAPA_TYPE_VALUE, 'display_name': _('Any Type')}] + sorted([
{'value': capa_type, 'display_name': caption}
for capa_type, caption in capa_types.items()
], key=lambda item: item.get('display_name'))
class LibraryContentFields(object):
"""
Fields for the LibraryContentModule.
Separated out for now because they need to be added to the module and the
descriptor.
"""
# Please note the display_name of each field below is used in
# common/test/acceptance/pages/studio/library.py:StudioLibraryContentXBlockEditModal
# to locate input elements - keep synchronized
display_name = String(
display_name=_("Display Name"),
help=_("The display name for this component."),
default="Randomized Content Block",
scope=Scope.settings,
)
source_library_id = String(
display_name=_("Library"),
help=_("Select the library from which you want to draw content."),
scope=Scope.settings,
values_provider=lambda instance: instance.source_library_values(),
)
source_library_version = String(
# This is a hidden field that stores the version of source_library when we last pulled content from it
display_name=_("Library Version"),
scope=Scope.settings,
)
mode = String(
display_name=_("Mode"),
help=_("Determines how content is drawn from the library"),
default="random",
values=[
{"display_name": _("Choose n at random"), "value": "random"}
# Future addition: Choose a new random set of n every time the student refreshes the block, for self tests
# Future addition: manually selected blocks
],
scope=Scope.settings,
)
max_count = Integer(
display_name=_("Count"),
help=_("Enter the number of components to display to each student."),
default=1,
scope=Scope.settings,
)
capa_type = String(
display_name=_("Problem Type"),
help=_('Choose a problem type to fetch from the library. If "Any Type" is selected no filtering is applied.'),
default=ANY_CAPA_TYPE_VALUE,
values=_get_capa_types(),
scope=Scope.settings,
)
selected = List(
# This is a list of (block_type, block_id) tuples used to record
# which random/first set of matching blocks was selected per user
default=[],
scope=Scope.user_state,
)
has_children = True
@property
def source_library_key(self):
"""
Convenience method to get the library ID as a LibraryLocator and not just a string
"""
return LibraryLocator.from_string(self.source_library_id)
#pylint: disable=abstract-method
@XBlock.wants('library_tools') # Only needed in studio
class LibraryContentModule(LibraryContentFields, XModule, StudioEditableModule):
"""
An XBlock whose children are chosen dynamically from a content library.
Can be used to create randomized assessments among other things.
Note: technically, all matching blocks from the content library are added
as children of this block, but only a subset of those children are shown to
any particular student.
"""
@classmethod
def make_selection(cls, selected, children, max_count, mode):
"""
Dynamically selects block_ids indicating which of the possible children are displayed to the current user.
Arguments:
selected - list of (block_type, block_id) tuples assigned to this student
children - children of this block
max_count - number of components to display to each student
mode - how content is drawn from the library
Returns:
A dict containing the following keys:
'selected' (set) of (block_type, block_id) tuples assigned to this student
'invalid' (set) of dropped (block_type, block_id) tuples that are no longer valid
'overlimit' (set) of dropped (block_type, block_id) tuples that were previously selected
'added' (set) of newly added (block_type, block_id) tuples
"""
selected = set(tuple(k) for k in selected) # set of (block_type, block_id) tuples assigned to this student
# Determine which of our children we will show:
valid_block_keys = set([(c.block_type, c.block_id) for c in children])
# Remove any selected blocks that are no longer valid:
invalid_block_keys = (selected - valid_block_keys)
if invalid_block_keys:
selected -= invalid_block_keys
# If max_count has been decreased, we may have to drop some previously selected blocks:
overlimit_block_keys = set()
while len(selected) > max_count:
overlimit_block_keys.add(selected.pop())
# Do we have enough blocks now?
num_to_add = max_count - len(selected)
added_block_keys = None
if num_to_add > 0:
# We need to select [more] blocks to display to this user:
pool = valid_block_keys - selected
if mode == "random":
num_to_add = min(len(pool), num_to_add)
added_block_keys = set(random.sample(pool, num_to_add))
# We now have the correct n random children to show for this user.
else:
raise NotImplementedError("Unsupported mode.")
selected |= added_block_keys
return {
'selected': selected,
'invalid': invalid_block_keys,
'overlimit': overlimit_block_keys,
'added': added_block_keys,
}
def _publish_event(self, event_name, result, **kwargs):
"""
Helper method to publish an event for analytics purposes
"""
event_data = {
"location": unicode(self.location),
"result": result,
"previous_count": getattr(self, "_last_event_result_count", len(self.selected)),
"max_count": self.max_count,
}
event_data.update(kwargs)
self.runtime.publish(self, "edx.librarycontentblock.content.{}".format(event_name), event_data)
self._last_event_result_count = len(result) # pylint: disable=attribute-defined-outside-init
@classmethod
def publish_selected_children_events(cls, block_keys, format_block_keys, publish_event):
"""
Helper method for publishing events when children blocks are
selected/updated for a user. This helper is also used by
the ContentLibraryTransformer.
Arguments:
block_keys -
A dict describing which events to publish (add or
remove), see `make_selection` above for format details.
format_block_keys -
A function to convert block keys to the format expected
by publish_event. Must have the signature:
[(block_type, block_id)] -> T
Where T is a collection of block keys as accepted by
`publish_event`.
publish_event -
Function that handles the actual publishing. Must have
the signature:
<'removed'|'assigned'> -> result:T -> removed:T -> reason:basestring -> None
Where T is a collection of block_keys as returned by
`format_block_keys`.
"""
if block_keys['invalid']:
# reason "invalid" means deleted from library or a different library is now being used.
publish_event(
"removed",
result=format_block_keys(block_keys['selected']),
removed=format_block_keys(block_keys['invalid']),
reason="invalid"
)
if block_keys['overlimit']:
publish_event(
"removed",
result=format_block_keys(block_keys['selected']),
removed=format_block_keys(block_keys['overlimit']),
reason="overlimit"
)
if block_keys['added']:
publish_event(
"assigned",
result=format_block_keys(block_keys['selected']),
added=format_block_keys(block_keys['added'])
)
def selected_children(self):
"""
Returns a set() of block_ids indicating which of the possible children
have been selected to display to the current user.
This reads and updates the "selected" field, which has user_state scope.
Note: self.selected and the return value contain block_ids. To get
actual BlockUsageLocators, it is necessary to use self.children,
because the block_ids alone do not specify the block type.
"""
if hasattr(self, "_selected_set"):
# Already done:
return self._selected_set # pylint: disable=access-member-before-definition
block_keys = self.make_selection(self.selected, self.children, self.max_count, "random") # pylint: disable=no-member
# Publish events for analytics purposes:
lib_tools = self.runtime.service(self, 'library_tools')
format_block_keys = lambda keys: lib_tools.create_block_analytics_summary(self.location.course_key, keys)
self.publish_selected_children_events(
block_keys,
format_block_keys,
self._publish_event,
)
# Save our selections to the user state, to ensure consistency:
selected = block_keys['selected']
self.selected = list(selected) # TODO: this doesn't save from the LMS "Progress" page.
# Cache the results
self._selected_set = selected # pylint: disable=attribute-defined-outside-init
return selected
def _get_selected_child_blocks(self):
"""
Generator returning XBlock instances of the children selected for the
current user.
"""
for block_type, block_id in self.selected_children():
yield self.runtime.get_block(self.location.course_key.make_usage_key(block_type, block_id))
def student_view(self, context):
fragment = Fragment()
contents = []
child_context = {} if not context else copy(context)
for child in self._get_selected_child_blocks():
for displayable in child.displayable_items():
rendered_child = displayable.render(STUDENT_VIEW, child_context)
fragment.add_frag_resources(rendered_child)
contents.append({
'id': displayable.location.to_deprecated_string(),
'content': rendered_child.content,
})
fragment.add_content(self.system.render_template('vert_module.html', {
'items': contents,
'xblock_context': context,
'show_bookmark_button': False,
}))
return fragment
def validate(self):
"""
Validates the state of this Library Content Module Instance.
"""
return self.descriptor.validate()
def author_view(self, context):
"""
Renders the Studio views.
Normal studio view: If block is properly configured, displays library status summary
Studio container view: displays a preview of all possible children.
"""
fragment = Fragment()
root_xblock = context.get('root_xblock')
is_root = root_xblock and root_xblock.location == self.location
if is_root:
# User has clicked the "View" link. Show a preview of all possible children:
if self.children: # pylint: disable=no-member
fragment.add_content(self.system.render_template("library-block-author-preview-header.html", {
'max_count': self.max_count,
'display_name': self.display_name or self.url_name,
}))
context['can_edit_visibility'] = False
context['can_move'] = False
self.render_children(context, fragment, can_reorder=False, can_add=False)
# else: When shown on a unit page, don't show any sort of preview -
# just the status of this block in the validation area.
# The following JS is used to make the "Update now" button work on the unit page and the container view:
fragment.add_javascript_url(self.runtime.local_resource_url(self, 'public/js/library_content_edit.js'))
fragment.initialize_js('LibraryContentAuthorView')
return fragment
def get_child_descriptors(self):
"""
Return only the subset of our children relevant to the current student.
"""
return list(self._get_selected_child_blocks())
@XBlock.wants('user')
@XBlock.wants('library_tools') # Only needed in studio
@XBlock.wants('studio_user_permissions') # Only available in studio
class LibraryContentDescriptor(LibraryContentFields, MakoModuleDescriptor, XmlDescriptor, StudioEditableDescriptor):
"""
Descriptor class for LibraryContentModule XBlock.
"""
resources_dir = 'assets/library_content'
module_class = LibraryContentModule
mako_template = 'widgets/metadata-edit.html'
js = {'coffee': [resource_string(__name__, 'js/src/vertical/edit.coffee')]}
js_module_name = "VerticalDescriptor"
show_in_read_only_mode = True
@property
def non_editable_metadata_fields(self):
non_editable_fields = super(LibraryContentDescriptor, self).non_editable_metadata_fields
# The only supported mode is currently 'random'.
# Add the mode field to non_editable_metadata_fields so that it doesn't
# render in the edit form.
non_editable_fields.extend([LibraryContentFields.mode, LibraryContentFields.source_library_version])
return non_editable_fields
@lazy
def tools(self):
"""
Grab the library tools service or raise an error.
"""
return self.runtime.service(self, 'library_tools')
def get_user_id(self):
"""
Get the ID of the current user.
"""
user_service = self.runtime.service(self, 'user')
if user_service:
# May be None when creating bok choy test fixtures
user_id = user_service.get_current_user().opt_attrs.get('edx-platform.user_id', None)
else:
user_id = None
return user_id
@XBlock.handler
def refresh_children(self, request=None, suffix=None): # pylint: disable=unused-argument
"""
Refresh children:
This method is to be used when any of the libraries that this block
references have been updated. It will re-fetch all matching blocks from
the libraries, and copy them as children of this block. The children
will be given new block_ids, but the definition ID used should be the
exact same definition ID used in the library.
This method will update this block's 'source_library_id' field to store
the version number of the libraries used, so we easily determine if
this block is up to date or not.
"""
user_perms = self.runtime.service(self, 'studio_user_permissions')
user_id = self.get_user_id()
if not self.tools:
return Response("Library Tools unavailable in current runtime.", status=400)
self.tools.update_children(self, user_id, user_perms)
return Response()
# Copy over any overridden settings the course author may have applied to the blocks.
def _copy_overrides(self, store, user_id, source, dest):
"""
Copy any overrides the user has made on blocks in this library.
"""
for field in source.fields.itervalues():
if field.scope == Scope.settings and field.is_set_on(source):
setattr(dest, field.name, field.read_from(source))
if source.has_children:
source_children = [self.runtime.get_block(source_key) for source_key in source.children]
dest_children = [self.runtime.get_block(dest_key) for dest_key in dest.children]
for source_child, dest_child in zip(source_children, dest_children):
self._copy_overrides(store, user_id, source_child, dest_child)
store.update_item(dest, user_id)
def studio_post_duplicate(self, store, source_block):
"""
Used by the studio after basic duplication of a source block. We handle the children
ourselves, because we have to properly reference the library upstream and set the overrides.
Otherwise we'll end up losing data on the next refresh.
"""
# The first task will be to refresh our copy of the library to generate the children.
# We must do this at the currently set version of the library block. Otherwise we may not have
# exactly the same children-- someone may be duplicating an out of date block, after all.
user_id = self.get_user_id()
user_perms = self.runtime.service(self, 'studio_user_permissions')
if not self.tools:
raise RuntimeError("Library tools unavailable, duplication will not be sane!")
self.tools.update_children(self, user_id, user_perms, version=self.source_library_version)
self._copy_overrides(store, user_id, source_block, self)
# Children have been handled.
return True
def _validate_library_version(self, validation, lib_tools, version, library_key):
"""
Validates library version
"""
latest_version = lib_tools.get_library_version(library_key)
if latest_version is not None:
if version is None or version != unicode(latest_version):
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u'This component is out of date. The library has new content.'),
# TODO: change this to action_runtime_event='...' once the unit page supports that feature.
# See https://openedx.atlassian.net/browse/TNL-993
action_class='library-update-btn',
# Translators: {refresh_icon} placeholder is substituted to "↻" (without double quotes)
action_label=_(u"{refresh_icon} Update now.").format(refresh_icon=u"↻")
)
)
return False
else:
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(u'Library is invalid, corrupt, or has been deleted.'),
action_class='edit-button',
action_label=_(u"Edit Library List.")
)
)
return False
return True
def _set_validation_error_if_empty(self, validation, summary):
""" Helper method to only set validation summary if it's empty """
if validation.empty:
validation.set_summary(summary)
def validate(self):
"""
Validates the state of this Library Content Module Instance. This
is the override of the general XBlock method, and it will also ask
its superclass to validate.
"""
validation = super(LibraryContentDescriptor, self).validate()
if not isinstance(validation, StudioValidation):
validation = StudioValidation.copy(validation)
library_tools = self.runtime.service(self, "library_tools")
if not (library_tools and library_tools.can_use_library_content(self)):
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.ERROR,
_(
u"This course does not support content libraries. "
u"Contact your system administrator for more information."
)
)
)
return validation
if not self.source_library_id:
validation.set_summary(
StudioValidationMessage(
StudioValidationMessage.NOT_CONFIGURED,
_(u"A library has not yet been selected."),
action_class='edit-button',
action_label=_(u"Select a Library.")
)
)
return validation
lib_tools = self.runtime.service(self, 'library_tools')
self._validate_library_version(validation, lib_tools, self.source_library_version, self.source_library_key)
# Note: we assume refresh_children() has been called
# since the last time fields like source_library_id or capa_types were changed.
matching_children_count = len(self.children) # pylint: disable=no-member
if matching_children_count == 0:
self._set_validation_error_if_empty(
validation,
StudioValidationMessage(
StudioValidationMessage.WARNING,
_(u'There are no matching problem types in the specified libraries.'),
action_class='edit-button',
action_label=_(u"Select another problem type.")
)
)
if matching_children_count < self.max_count:
self._set_validation_error_if_empty(
validation,
StudioValidationMessage(
StudioValidationMessage.WARNING,
(
ngettext(
u'The specified library is configured to fetch {count} problem, ',
u'The specified library is configured to fetch {count} problems, ',
self.max_count
) +
ngettext(
u'but there is only {actual} matching problem.',
u'but there are only {actual} matching problems.',
matching_children_count
)
).format(count=self.max_count, actual=matching_children_count),
action_class='edit-button',
action_label=_(u"Edit the library configuration.")
)
)
return validation
def source_library_values(self):
"""
Return a list of possible values for self.source_library_id
"""
lib_tools = self.runtime.service(self, 'library_tools')
user_perms = self.runtime.service(self, 'studio_user_permissions')
all_libraries = [
(key, name) for key, name in lib_tools.list_available_libraries()
if user_perms.can_read(key) or self.source_library_id == unicode(key)
]
all_libraries.sort(key=lambda entry: entry[1]) # Sort by name
if self.source_library_id and self.source_library_key not in [entry[0] for entry in all_libraries]:
all_libraries.append((self.source_library_id, _(u"Invalid Library")))
all_libraries = [(u"", _("No Library Selected"))] + all_libraries
values = [{"display_name": name, "value": unicode(key)} for key, name in all_libraries]
return values
def editor_saved(self, user, old_metadata, old_content):
"""
If source_library_id or capa_type has been edited, refresh_children automatically.
"""
old_source_library_id = old_metadata.get('source_library_id', [])
if (old_source_library_id != self.source_library_id or
old_metadata.get('capa_type', ANY_CAPA_TYPE_VALUE) != self.capa_type):
try:
self.refresh_children()
except ValueError:
pass # The validation area will display an error message, no need to do anything now.
def has_dynamic_children(self):
"""
Inform the runtime that our children vary per-user.
See get_child_descriptors() above
"""
return True
def get_content_titles(self):
"""
Returns list of friendly titles for our selected children only; without
thi, all possible children's titles would be seen in the sequence bar in
the LMS.
This overwrites the get_content_titles method included in x_module by default.
"""
titles = []
for child in self._xmodule.get_child_descriptors():
titles.extend(child.get_content_titles())
return titles
@classmethod
def definition_from_xml(cls, xml_object, system):
children = [
system.process_xml(etree.tostring(child)).scope_ids.usage_id
for child in xml_object.getchildren()
]
definition = {
attr_name: json.loads(attr_value)
for attr_name, attr_value in xml_object.attrib
}
return definition, children
def definition_to_xml(self, resource_fs):
""" Exports Library Content Module to XML """
xml_object = etree.Element('library_content')
for child in self.get_children():
self.runtime.add_block_as_child_node(child, xml_object)
# Set node attributes based on our fields.
for field_name, field in self.fields.iteritems():
if field_name in ('children', 'parent', 'content'):
continue
if field.is_set_on(self):
xml_object.set(field_name, unicode(field.read_from(self)))
return xml_object
| agpl-3.0 |
bstroebl/QGIS | python/plugins/sextante/taudem/dinftranslimaccum2.py | 2 | 5247 | # -*- coding: utf-8 -*-
"""
***************************************************************************
dinftranslimaccum2.py
---------------------
Date : October 2012
Copyright : (C) 2012 by Alexander Bruy
Email : alexander dot bruy at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Alexander Bruy'
__date__ = 'October 2012'
__copyright__ = '(C) 2012, Alexander Bruy'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from PyQt4.QtGui import *
from sextante.core.GeoAlgorithm import GeoAlgorithm
from sextante.core.SextanteLog import SextanteLog
from sextante.core.SextanteUtils import SextanteUtils
from sextante.core.SextanteConfig import SextanteConfig
from sextante.core.GeoAlgorithmExecutionException import GeoAlgorithmExecutionException
from sextante.parameters.ParameterRaster import ParameterRaster
from sextante.parameters.ParameterVector import ParameterVector
from sextante.parameters.ParameterBoolean import ParameterBoolean
from sextante.outputs.OutputRaster import OutputRaster
from sextante.taudem.TauDEMUtils import TauDEMUtils
class DinfTransLimAccum2(GeoAlgorithm):
DINF_FLOW_DIR_GRID = "DINF_FLOW_DIR_GRID"
SUPPLY_GRID = "SUPPLY_GRID"
CAPACITY_GRID = "CAPACITY_GRID"
IN_CONCENTR_GRID = "IN_CONCENTR_GRID"
OUTLETS_SHAPE = "OUTLETS_SHAPE"
EDGE_CONTAM = "EDGE_CONTAM"
TRANSP_LIM_ACCUM_GRID = "TRANSP_LIM_ACCUM_GRID"
DEPOSITION_GRID = "DEPOSITION_GRID"
OUT_CONCENTR_GRID = "OUT_CONCENTR_GRID"
def getIcon(self):
return QIcon(os.path.dirname(__file__) + "/../images/taudem.png")
def defineCharacteristics(self):
self.name = "D-Infinity Transport Limited Accumulation - 2"
self.cmdName = "dinftranslimaccum"
self.group = "Specialized Grid Analysis tools"
self.addParameter(ParameterRaster(self.DINF_FLOW_DIR_GRID, "D-Infinity Flow Direction Grid", False))
self.addParameter(ParameterRaster(self.SUPPLY_GRID, "Supply Grid", False))
self.addParameter(ParameterRaster(self.CAPACITY_GRID, "Transport Capacity Grid", False))
self.addParameter(ParameterRaster(self.IN_CONCENTR_GRID, "Input Concentration Grid", False))
self.addParameter(ParameterVector(self.OUTLETS_SHAPE, "Outlets Shapefile", ParameterVector.VECTOR_TYPE_POINT, True))
self.addParameter(ParameterBoolean(self.EDGE_CONTAM, "Check for edge contamination", True))
self.addOutput(OutputRaster(self.TRANSP_LIM_ACCUM_GRID, "Transport Limited Accumulation Grid"))
self.addOutput(OutputRaster(self.DEPOSITION_GRID, "Deposition Grid"))
self.addOutput(OutputRaster(self.OUT_CONCENTR_GRID, "Output Concentration Grid"))
def processAlgorithm(self, progress):
commands = []
commands.append(os.path.join(TauDEMUtils.mpiexecPath(), "mpiexec"))
processNum = SextanteConfig.getSetting(TauDEMUtils.MPI_PROCESSES)
if processNum <= 0:
raise GeoAlgorithmExecutionException("Wrong number of MPI processes used.\nPlease set correct number before running TauDEM algorithms.")
commands.append("-n")
commands.append(str(processNum))
commands.append(os.path.join(TauDEMUtils.taudemPath(), self.cmdName))
commands.append("-ang")
commands.append(self.getParameterValue(self.DINF_FLOW_DIR_GRID))
commands.append("-tsup")
commands.append(self.getParameterValue(self.SUPPLY_GRID))
commands.append("-tc")
commands.append(self.getParameterValue(self.CAPACITY_GRID))
commands.append("-cs")
commands.append(self.getParameterValue(self.IN_CONCENTR_GRID))
param = self.getParameterValue(self.OUTLETS_SHAPE)
if param is not None:
commands.append("-o")
commands.append(param)
if str(self.getParameterValue(self.EDGE_CONTAM)).lower() == "false":
commands.append("-nc")
commands.append("-tla")
commands.append(self.getOutputValue(self.TRANSP_LIM_ACCUM_GRID))
commands.append("-tdep")
commands.append(self.getOutputValue(self.DEPOSITION_GRID))
commands.append("-ctpt")
commands.append(self.getOutputValue(self.OUT_CONCENTR_GRID))
loglines = []
loglines.append("TauDEM execution command")
for line in commands:
loglines.append(line)
SextanteLog.addToLog(SextanteLog.LOG_INFO, loglines)
TauDEMUtils.executeTauDEM(commands, progress)
def helpFile(self):
return os.path.join(os.path.dirname(__file__), "help", self.cmdName + ".html")
| gpl-2.0 |
Elchi3/kuma | kuma/core/management/commands/translate_locales_name.py | 2 | 1244 | from django.conf import settings
from django.core.management.base import BaseCommand
# A script to generate a template which will be used to localize the supported locale name
# For more information see https://bugzil.la/859499#c11
class Command(BaseCommand):
help = "Generate a template to get the locales name localized"
def handle(self, *args, **options):
template_string = (
"This template is automatically generated by "
"*manage.py translate_locales_name* in order "
"to make the languages name localizable. "
"Do not edit it manually\n"
"Background: https://bugzil.la/859499#c11\n"
)
LANGUAGES = sorted(
[
lang_info.english
for lang_code, lang_info in settings.LOCALES.items()
if lang_code in settings.ENABLED_LOCALES
]
)
for lang in LANGUAGES:
template_string += "{{ _('%s') }}\n" % lang
jinja_path = settings.TEMPLATES[0]["DIRS"][0]
template_path = jinja_path + "/includes/translate_locales.html"
outfile = open(template_path, "w")
outfile.write(template_string.encode("utf8"))
outfile.close()
| mpl-2.0 |
MarekIgnaszak/econ-project-templates | .mywaflib/waflib/extras/run_do_script.py | 5 | 7290 | #!/usr/bin/env python
# encoding: utf-8
# Hans-Martin von Gaudecker, 2012-16
"""
Run a Stata do-script in the directory specified by **ctx.bldnode**. The
first and only argument will be the name of the do-script (no extension),
which can be accessed inside the do-script by the local macro `1'. Useful
for keeping a log file.
The tool uses the log file that is automatically kept by Stata only
for error-catching purposes, it will be destroyed if the task finished
without error. In case of an error in **some_script.do**, you can inspect
it as **some_script.log** in the **ctx.bldnode** directory.
Note that Stata will not return an error code if it exits abnormally --
catching errors relies on parsing the log file mentioned before. Should
the parser behave incorrectly please send an email to hmgaudecker [at] gmail.
**WARNING**
The tool will not work if multiple do-scripts of the same name---but in
different directories---are run at the same time! Avoid this situation.
Strings supplied to the **prepend** and **append** keywords will be added
to the command line.
Usage::
ctx(
features='run_do_script',
source='some_script.do',
target=['some_table.tex', 'some_figure.eps'],
deps='some_data.csv',
append='',
prepend=''
)
"""
import os
import re
import sys
from waflib import Task, TaskGen, Logs
if sys.platform == 'darwin':
STATA_NAMES = [
'Stata64MP', 'StataMP',
'Stata64SE', 'StataSE',
'Stata64', 'Stata'
]
STATA_PATHS = ['/Applications/Stata/%s.app/Contents/MacOS/%s' % (sv, sv)
for sv in STATA_NAMES]
STATA_COMMANDS = STATA_NAMES + STATA_PATHS
STATAFLAGS = '-e -q do'
STATAENCODING = 'MacRoman'
elif sys.platform.startswith('linux'):
STATA_COMMANDS = ['stata-mp', 'stata-se', 'stata']
STATAFLAGS = '-b -q do'
# Not sure whether this is correct...
STATAENCODING = 'Latin-1'
elif sys.platform.lower().startswith('win'):
STATA_COMMANDS = [
'StataMP-64', 'StataMP-ia',
'StataMP', 'StataSE-64',
'StataSE-ia', 'StataSE',
'Stata-64', 'Stata-ia',
'Stata', 'WMPSTATA',
'WSESTATA', 'WSTATA'
]
STATAFLAGS = '/e do'
STATAENCODING = 'Latin-1'
else:
raise Exception("Unknown sys.platform: %s " % sys.platform)
def configure(ctx):
ctx.find_program(
STATA_COMMANDS,
var='STATACMD',
errmsg="""\n
No Stata executable found!\n\n
If Stata is needed:\n
1) Check the settings of your system PATH.
2) Note we are looking for Stata executables named
%s,
both in the Applications folder and on the PATH.
If yours has a different name, please report to hmgaudecker [at] gmail\n
Else:\n
Do not load the 'run_do_script' tool in the main wscript.\n\n"""
% STATA_COMMANDS
)
ctx.env.STATAFLAGS = STATAFLAGS
ctx.env.STATAENCODING = STATAENCODING
class run_do_script_base(Task.Task):
"""Run a Stata do-script from the bldnode directory."""
run_str = '${PREPEND} "${STATACMD}" ${STATAFLAGS} "${SRC[0].abspath()}" "${DOFILETRUNK}" ${APPEND}'
shell = True
def exec_command(self, cmd, **kw):
bld = self.generator.bld
try:
if not kw.get('cwd', None):
kw['cwd'] = bld.cwd
except AttributeError:
bld.cwd = kw['cwd'] = bld.variant_dir
if not self.buffer_output:
kw["stdout"] = kw["stderr"] = None
return bld.exec_command(cmd, **kw)
def keyword(self):
"""
Override the 'Compiling' default.
"""
return 'Running'
def __str__(self):
"""
More useful output.
"""
return "{prepend} [Stata] {stataflags} {fn} {dofiletrunk} {append}".format(
prepend=self.env.PREPEND,
stataflags=self.env.STATAFLAGS,
fn=self.inputs[0].path_from(self.inputs[0].ctx.launch_node()),
dofiletrunk=self.env.DOFILETRUNK,
append=self.env.APPEND
)
class run_do_script(run_do_script_base):
"""Use the log file automatically kept by Stata for error-catching.
Erase it if the task finished without error. If not, it will show
up as do_script.log in the bldnode directory.
"""
def run(self):
run_do_script_base.run(self)
ret, log_tail = self.check_erase_log_file()
if ret:
Logs.error(
"""Running Stata on %s failed with code %r.\n
Check the log file %s, last 10 lines\n\n%s\n\n\n"""
% (
self.inputs[0].relpath(),
ret,
self.env.LOGFILEPATH,
log_tail
)
)
return ret
def check_erase_log_file(self):
"""Parse Stata's default log file and erase it if everything okay.
Parser is based on Brendan Halpin's shell script found here:
http://teaching.sociology.ul.ie/bhalpin/wordpress/?p=122
"""
if sys.version_info.major >= 3:
kwargs = {'file': self.env.LOGFILEPATH, 'mode':
'r', 'encoding': self.env.STATAENCODING}
else:
kwargs = {'name': self.env.LOGFILEPATH, 'mode': 'r'}
with open(**kwargs) as log:
log_tail = log.readlines()[-10:]
for line in log_tail:
error_found = re.match("r\(([0-9]+)\)", line)
if error_found:
return error_found.group(1), ''.join(log_tail)
else:
pass
# Only end up here if the parser did not identify an error.
os.remove(self.env.LOGFILEPATH)
return None, None
@TaskGen.feature('run_do_script')
@TaskGen.before_method('process_source')
def apply_run_do_script(tg):
"""Task generator customising the options etc. to call Stata in batch
mode for running a do-script.
"""
# Convert sources and targets to nodes
src_node = tg.path.find_resource(tg.source)
if src_node is None:
tg.bld.fatal(
"Could not find source file: {}".format(os.path.join(tg.path.relpath(), tg.source))
)
tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)]
tsk = tg.create_task('run_do_script', src=src_node, tgt=tgt_nodes)
tsk.env.DOFILETRUNK = os.path.splitext(src_node.name)[0]
tsk.env.LOGFILEPATH = os.path.join(
tg.bld.bldnode.abspath(), '%s.log' % (tsk.env.DOFILETRUNK)
)
tsk.env.APPEND = getattr(tg, 'append', '')
tsk.env.PREPEND = getattr(tg, 'prepend', '')
tsk.buffer_output = getattr(tg, 'buffer_output', True)
# dependencies (if the attribute 'deps' changes, trigger a recompilation)
for x in tg.to_list(getattr(tg, 'deps', [])):
node = tg.path.find_resource(x)
if not node:
tg.bld.fatal(
'Could not find dependency %r for running %r'
% (x, src_node.relpath())
)
tsk.dep_nodes.append(node)
Logs.debug(
'deps: found dependencies %r for running %r'
% (tsk.dep_nodes, src_node.relpath())
)
# Bypass the execution of process_source by setting the source to an empty
# list
tg.source = []
| bsd-3-clause |
BrainTech/pisak | pisak/scanning.py | 1 | 37237 | """
Classes for defining scanning in JSON layouts.
"""
import time
from gi.repository import Clutter, GObject
import pisak
from pisak import logger, exceptions, properties, configurator, dirs
from pisak.sound_effects import Synthesizer
_LOG = logger.get_logger(__name__)
class Scannable:
"""
Interface of object scannable by switcher groups. Switcher groups expect
widgets implement this interface.
"""
def activate(self):
"""
Performs widgets action.
"""
raise NotImplementedError()
def enable_hilite(self):
"""
Enables hilite style for this widget.
"""
raise NotImplementedError()
def disable_hilite(self):
"""
Disables hilite style for this widget.
"""
raise NotImplementedError()
def enable_scanned(self):
"""
Enables scanned style for this widget.
"""
raise NotImplementedError()
def enable_lag_hilite(self):
"""
Enables lag_hilite style for this widget.
"""
raise NotImplementedError()
def disable_lag_hilite(self):
"""
Disables lag_hilite style for this widget.
"""
raise NotImplementedError()
def disable_scanned(self):
"""
Enables hilite style for this widget.
"""
raise NotImplementedError()
def is_disabled(self):
"""
Checks whether element is disabled from activation.
"""
raise NotImplementedError()
class StylableScannable(Scannable):
"""
Partial implementation of Scannable interface for stylable widgets.
Hilighted and scanned widgets are marked with CSS pseudoclasses.
"""
def enable_hilite(self):
"""
Enables hilite style for this widget.
"""
self.style_pseudo_class_add("hover")
def disable_hilite(self):
"""
Disables hilite style for this widget.
"""
self.style_pseudo_class_remove("hover")
def enable_scanned(self):
"""
Enables scanned style for this widget.
"""
self.style_pseudo_class_add("scanning")
def disable_scanned(self):
"""
Disables scanned style for this widget.
"""
self.style_pseudo_class_remove("scanning")
def enable_lag_hilite(self):
"""
Enables lag_hilite style for this widget.
"""
self.style_pseudo_class_add("lag_hilite")
def disable_lag_hilite(self):
"""
Disables lag_hilite style for this widget.
"""
self.style_pseudo_class_remove("lag_hilite")
def activate(self):
"""
Performs widgets action.
"""
raise NotImplementedError()
def is_disabled(self):
"""
Checks whether element is disabled from activation.
"""
raise NotImplementedError()
class Strategy(Clutter.Actor):
"""
Abstract base class for scanning strategies.
"""
def __init__(self):
super().__init__()
self._group = None
@property
def group(self):
"""
Reference to a group which owns the strategy.
"""
return self._group
@group.setter
def group(self, value):
self._group = value
def select(self, element=None):
"""
Selects currently highlighted element.
:param element: optional, element to be directly selected.
"""
select_lag_disabled = False
element = element or self.get_current_element()
if element is None:
_LOG.debug("There is no current element that could be frozen.")
return
if self.select_sound_enabled:
self.play_selection_sound()
if isinstance(element, Group):
if not self.group.paused:
self.group.stop_cycle()
if self.select_lag > 0:
self._on_lag("select", element, self.select_lag)
else:
self._do_select(element)
elif hasattr(element, "enable_hilite"):
pisak.app.window.pending_group = self.unwind_to
if hasattr(element, "scanning_pauser") and element.scanning_pauser:
if self.group.paused:
select_lag_disabled = True
self.group.paused = not self.group.paused
if not self.group.paused:
self.group.stop_cycle()
if self.select_lag > 0 and not select_lag_disabled:
self._on_lag("select", element, self.select_lag)
else:
self._do_select(element)
def unwind(self):
"""
Stops the group cycle. Starts scanning a group set
as an 'unwind' or a parent group if no 'unwind' has been set.
"""
self.group.stop_cycle()
if self.unwind_to is not None:
self.unwind_to.start_cycle()
else:
self.group.parent_group.start_cycle()
def get_current_element(self):
"""
Abstract method to extract currently highlighted element from an
internal strategy state.
:return: currently highlighed element.
"""
raise NotImplementedError("Incomplete strategy implementation")
def _do_select(self, element):
if isinstance(element, Group):
if not self.group.killed:
element.parent_group = self.group
element.start_cycle()
elif hasattr(element, "enable_hilite"):
if not self.group.killed:
element.activate()
if hasattr(element, "disable_lag_hilite"):
element.disable_lag_hilite()
if not self.group.killed and not self.group.paused:
# launch next group
if pisak.app.window.pending_group:
pisak.app.window.pending_group.start_cycle()
else:
self.group.start_cycle()
else:
raise Exception("Unsupported selection")
class ScanningException(exceptions.PisakException):
"""
Scanning specific exception.
"""
pass
class _GroupObserver:
"""
Helper class for Group. This class observes all group descendants. When
subgroup change it schedules update in scanning seqence.
"""
def __init__(self, group):
self.group = group
self._init_connections()
def _observe(self, actor):
"""
Adds handler recursively.
"""
add_handler = actor.connect("actor-added", self._add_actor)
remove_handler = actor.connect("actor-removed", self._remove_actor)
for child in actor.get_children():
self._observe(child)
def _init_connections(self):
# observe group children
self._observe(self.group)
def _add_actor(self, _parent, descendant):
if isinstance(descendant, Group):
# rescan: a new group was added
self.group.schedule_update()
elif hasattr(descendant, "enable_hilite"):
# rescan: a new scannable was added
self.group.schedule_update()
else:
# connect handler to a new actor
for child in descendant.get_children():
self._add_actor(descendant, child)
self._observe(descendant)
def _remove_actor(self, _parent, descendant):
if isinstance(descendant, Group):
# rescan: a group was removed
self.group.schedule_update()
elif hasattr(descendant, "enable_hilite"):
# rescan: a scannable was removed
self.group.schedule_update()
else:
for child in descendant.get_children():
self._remove_actor(descendant, child)
class Group(Clutter.Actor, properties.PropertyAdapter,
configurator.Configurable):
"""
Container for grouping widgets for scanning purposes.
"""
__gtype_name__ = "PisakScanningGroup"
__gproperties__ = {
"strategy": (
Strategy.__gtype__,
"", "",
GObject.PARAM_READWRITE),
"scanning-hilite": (
GObject.TYPE_BOOLEAN,
"", "", False,
GObject.PARAM_READWRITE),
"sound": (
GObject.TYPE_STRING,
"", "", "scan",
GObject.PARAM_READWRITE
)
}
def __init__(self):
self.fresh_subgroups = False
self._subgroups = []
self._hilited = []
self._scanned = []
self._lag_hilited = []
self._strategy = None
self._sound = dirs.get_sound_path('scan.wav')
self.paused = False
self.killed = False
self.suppress_collapse_select_on_init = False
self.parent_group = None
self.signal_source = None
self._scanning_hilite = False
self.user_action_handler = None
self.input_handler_token = None
super().__init__()
self.observer = _GroupObserver(self)
self.set_layout_manager(Clutter.BinLayout())
self.apply_props()
@property
def strategy(self):
"""
Scanning strategy that will manage the entire scanning cycle.
"""
return self._strategy
@strategy.setter
def strategy(self, value):
if self.strategy is not None:
self.strategy.group = None
self._strategy = value
if self._strategy is not None:
self._strategy.group = self
@property
def scanning_hilite(self):
"""
Whether the 'scanning' hilite style should be enabled, boolean.
"""
return self._scanning_hilite
@scanning_hilite.setter
def scanning_hilite(self, value):
self._scanning_hilite = value
if not value:
self.disable_scan_hilite()
@property
def sound(self):
"""
Sound specific for the group, played when the group is being scanned.
"""
return self._sound
@sound.setter
def sound(self, name):
if isinstance(name, str):
self._sound = dirs.get_sound_path(name + '.wav') or self._sound
def schedule_update(self):
"""
Schedule updating a list of the group current subgroups.
"""
self.fresh_subgroups = False
def get_subgroups(self):
"""
Get a list of the subgroups belonging currently to the group.
:return: list of all the subgroups.
"""
if not self.fresh_subgroups:
self.fresh_subgroups = True
self._subgroups = list(self._gen_subgroups())
return self._subgroups
def is_flat(self):
"""
Test if group is flat, that is whether it contains
any nested subgroups.
:return: True if group has no subgroups, False otherwise.
"""
for obj in self.get_children():
if isinstance(obj, Group):
return False
return True
def is_empty(self):
"""
Tests if group is empty.
:return: True if group has subgroups, False otherwise.
"""
return len(self.get_subgroups()) == 0
def is_singular(self):
"""
Test if group has exactly 1 element.
:return: True if group has exactly 1 subgroup, False otherwise.
"""
return len(self.get_subgroups()) == 1
def go_standby(self):
"""
Turn off the scanning of the group and make it wait for being restarted
by a user action.
"""
if self.scanning_hilite:
self.disable_scan_hilite()
self.strategy.stop()
self.user_action_handler = self.restart_cycle
def restart_cycle(self):
"""
Restart group cycle that had already been startd before but went
standby in a meantime.
"""
if self.input_handler_token is not None:
self.signal_source.disconnect(self.input_handler_token)
self.start_cycle()
def start_cycle(self):
"""
Starts group cycle. The cycle can be stopped with `stop_cycle` method.
The cycle will also be stopped if the strategy's `has_next` method returns
False.
"""
_LOG.debug("Starting group {}".format(self.get_id()))
self.observer = _GroupObserver(self)
if not self.get_property("mapped"):
self.connect('notify::mapped', lambda *_: self.start_cycle())
message = \
"Started cycle in unmapped group: {}".format(self.get_id())
_LOG.warning(message)
# TODO: do something wise here
return
_LOG.debug("Starting group {}".format(self.get_id()))
collapsed = get_top_level_group([self])
if collapsed is not self:
if not (collapsed.strategy.unwind_to or collapsed.parent_group):
collapsed.strategy.unwind_to = self.strategy.unwind_to or self.parent_group
collapsed.start_cycle()
return
if self.is_singular() and self._on_singular():
return
signal, handler, self.signal_source = \
pisak.app.window.input_group.get_scanning_desc(self)
# different registation type needed for pisak-switch to work
input_source = pisak.config.get("input")
if input_source != "pisak-switch":
self.input_handler_token = self.signal_source.connect(
signal, lambda *args: handler(self, *args))
else:
self.input_handler_token = self.signal_source.connect(
signal, lambda *args: Clutter.threads_add_idle(100, handler, self, *args))
self.killed = False
if self.scanning_hilite:
self.enable_scan_hilite()
self.user_action_handler = self.strategy.select
self.set_key_focus()
self.strategy.start()
def stop_cycle(self):
"""
Stop currently running group cycle.
"""
if self.signal_source and self.input_handler_token:
self.signal_source.disconnect(self.input_handler_token)
if self.scanning_hilite:
self.disable_scan_hilite()
self.strategy.stop()
def set_key_focus(self):
"""
Set key focus to the stage owning the group.
"""
stage = self.get_stage()
if stage is not None:
stage.set_key_focus(self)
def key_release(self, _source, event):
"""
Key release handler. Triggers an action.
:param _source: signal source.
:param event: event specification, contains a released key code.
:return: True.
"""
if event.unicode_value == ' ':
self.user_action_handler()
return True
def button_release(self, source, event=None):
"""
Button release handler. Triggers an action.
:param source: signal source.
:param event: optional, event specification.
:return: False.
"""
self.user_action_handler()
return False
def enable_hilite(self):
"""
Recursively enable hilite.
"""
def operation(s):
s.enable_hilite()
self._hilited.append(s)
self._recursive_apply(
lambda s: hasattr(s, "enable_hilite"),
operation)
def disable_hilite(self):
"""
Disable hilite of all the previously hilited elements.
"""
for s in self._hilited:
s.disable_hilite()
self._hilited = []
def enable_lag_hilite(self):
"""
Recursively enable lag hilite.
"""
def operation(s):
s.enable_lag_hilite()
self._lag_hilited.append(s)
self._recursive_apply(
lambda s: hasattr(s, "enable_lag_hilite"),
operation)
def disable_lag_hilite(self):
"""
Disable lag hilite of all the previously lag-hilited elements.
"""
for s in self._lag_hilited:
s.disable_lag_hilite()
self._lag_hilited = []
def enable_scan_hilite(self):
"""
Recursively enable scan hilite.
"""
def operation(s):
s.enable_scanned()
self._scanned.append(s)
self._recursive_apply(
lambda s: hasattr(s, "enable_scanned"),
operation)
def disable_scan_hilite(self):
"""
Disable hilite of all the previously scan-hilited elements.
"""
for s in self._scanned:
s.disable_scanned()
self._scanned = []
def _on_singular(self):
"""
Do something when the group is singular. If its only child is a
scanning group then start its cycle otherwise select the child element unless
this behaviour has been suppressed.
:return: boolean.
"""
sub_element = self.get_subgroups()[0]
if isinstance(sub_element, Group):
msg = 'Group {} is singular. Starting its only subgroup.'
_LOG.debug(msg.format(self.get_id()))
sub_element.start_cycle()
ret = True
else:
if not self.suppress_collapse_select_on_init:
self.strategy.select(sub_element)
ret = True
else:
self.suppress_collapse_select_on_init = False
ret = False
return ret
def _recursive_apply(self, test, operation):
subgroups = self.get_subgroups()
for s in subgroups:
if test(s):
operation(s)
elif isinstance(s, Group):
s._recursive_apply(test, operation)
def _gen_subgroups(self):
"""
Generator of all subgroups of the group.
"""
to_scan = self.get_children()
while len(to_scan) > 0:
current = to_scan.pop(0)
if isinstance(current, Group):
if current.is_empty():
pass
elif current.is_singular():
yield current.get_subgroups()[0]
else:
yield current
elif hasattr(current, "enable_hilite"):
if not current.is_disabled():
yield current
else:
pass
else:
to_scan.extend(current.get_children())
class BaseStrategy(Strategy, properties.PropertyAdapter,
configurator.Configurable):
"""
Base class for implementations of any specific strategy.
"""
__gproperties__ = {
"interval": (
GObject.TYPE_UINT,
"", "",
0, GObject.G_MAXUINT, 1000,
GObject.PARAM_READWRITE),
"max-cycle-count": (
GObject.TYPE_INT,
"", "",
-1, GObject.G_MAXINT, 2,
GObject.PARAM_READWRITE),
"unwind-to": (
Group.__gtype__,
"", "",
GObject.PARAM_READWRITE),
"start-up-lag": (
GObject.TYPE_UINT,
"", "",
0, GObject.G_MAXUINT, 0,
GObject.PARAM_READWRITE),
"lag-hilite-mode": (
GObject.TYPE_STRING,
"", "", "",
GObject.PARAM_READWRITE),
"select-lag": (
GObject.TYPE_UINT,
"", "",
0, GObject.G_MAXUINT, 0,
GObject.PARAM_READWRITE)
}
def __init__(self):
self._group = None
self._allocation_slot = None
self._subgroups = []
self.index = None
super().__init__()
self._select_lag = 1000
self._start_up_lag = 0
self._interval = 1000
self._lag_hilite_mode = "still"
self.blinking_freq = 100
self._max_cycle_count = 2
self._buttons = []
self._unwind_to = None
self.timeout_token = None
self.player = pisak.app.sound_effects_player
sounds_enabled = pisak.config.as_bool("sound_effects_enabled")
self.button_sound_support_enabled = sounds_enabled and \
pisak.config.as_bool("sound_support_enabled")
self.scan_sound_enabled = sounds_enabled and \
pisak.config.as_bool('scan_sound_enabled')
self.select_sound_enabled = sounds_enabled and \
pisak.config.as_bool('select_sound_enabled')
self.apply_props()
@property
def start_up_lag(self):
"""
Starting delay.
"""
return self._start_up_lag
@start_up_lag.setter
def start_up_lag(self, value):
self._start_up_lag = int(value)
@property
def lag_hilite_mode(self):
"""
Type of starting lag hilite. Available are 'blink'
and 'still'.
"""
return self._lag_hilite_mode
@lag_hilite_mode.setter
def lag_hilite_mode(self, value):
self._lag_hilite_mode = value
@property
def select_lag(self):
"""
Duration of lag on selection.
"""
return self._select_lag
@select_lag.setter
def select_lag(self, value):
self._select_lag = int(value)
@property
def interval(self):
"""
Scanning interval
"""
return self._interval
@interval.setter
def interval(self, value):
self._interval = int(value)
@property
def max_cycle_count(self):
"""
Number of repeats
"""
return self._max_cycle_count
@max_cycle_count.setter
def max_cycle_count(self, value):
self._max_cycle_count = int(value)
@property
def unwind_to(self):
"""
Identifier of group which will be started after current group finishes
"""
return self._unwind_to
@unwind_to.setter
def unwind_to(self, value):
self._unwind_to = value
def start(self):
"""
Method invoked by a group which wants its scanning cycle
to be started.
"""
self.compute_sequence()
if len(self._subgroups) == 0:
# stop immediately
self.index = None
Clutter.threads_add_timeout(0, self.interval, self.cycle_timeout,
self.timeout_token)
else:
if self.start_up_lag > 0:
self._on_lag("start_up", self.group, self.start_up_lag)
else:
self._do_start()
@staticmethod
def play_scanning_sound():
"""
Play a 'tic toc'-like sound indicating the scanning cycle progress.
"""
if pisak.app:
pisak.app.play_sound_effect('scanning')
@staticmethod
def play_selection_sound():
"""
Play a sound indicating that some selection has been made.
"""
if pisak.app:
pisak.app.play_sound_effect('selection')
@staticmethod
def blink(blinking_element, timeout_start, overall_duration, freq):
"""
Make the given element blinking.
:param blinking_element: any :class:`Scannable` instance.
:param timeout_start: current timestamp, helps calculating
when the animation should be over.
:param overall_duration: total duration of the blinking animation.
:param freq: frequency of blinking.
"""
hilitten = False
def switch_hilite():
nonlocal hilitten
when_to_exit = timeout_start + (overall_duration
- 2*freq)/1000
if time.time() > when_to_exit:
if hasattr(blinking_element, "disable_lag_hilite"):
blinking_element.disable_lag_hilite()
return False
else:
if hilitten:
if hasattr(blinking_element, "disable_lag_hilite"):
blinking_element.disable_lag_hilite()
hilitten = False
else:
if hasattr(blinking_element, "enable_lag_hilite"):
blinking_element.enable_lag_hilite()
hilitten = True
return True
Clutter.threads_add_timeout(0, freq, switch_hilite)
def stop(self):
"""
Stop the currently running scanning cycle.
"""
self.timeout_token = None
self._stop_cycle()
def cycle_timeout(self, token):
"""
Callback run on scanning cycle timeout. It can either move to
the next element in the current scanning sequence
or stop the cycle if it has been requested or pause the cycle or
start an 'unwind' group's cycle if there is nothing to do
with this one.
:param token: signal handler token, helps avoiding mess when
multiple handlers are registered.
:return: True or False, depending on whether the
cycle should be continued.
"""
if self.timeout_token != token:
# timeout event not from current cycle
return False
elif self._is_killed():
self.group.stop_cycle()
return False
elif self._has_next():
if not self.group.paused:
self._expose_next()
return True
elif not self._has_unwind_to():
self._go_to_sleep()
return False
else:
self.unwind()
return False
def get_current_element(self):
"""
Get current element from the scanning sequence.
:return: Scannable element.
"""
if self.index is not None and self.index < len(self._subgroups):
return self._subgroups[self.index]
else:
msg = "There is no current element being a subgroup of group {}."
_LOG.warning(msg.format(self.group.get_id()))
def _on_lag(self, lag_type, element_to_hilite, lag_duration):
"""
Stops ('lags') the scanning proccess for the given amount of time
and performs all the previously ordered actions, i.e. highlights
the current element. In the end schedules an adequate closure.
:param lag_type: type of lag to be performed. Currently there are
only two of them: 'start_up' that can happen before the scanning
process starts and 'select', after selection of an element.
:param element_to_hilite: element that has scanning focus during the
lag and that should be highlighted.
:param lag_duration: duration of the lag in miliseconds.
"""
if self.lag_hilite_mode == "blink":
timeout_start = time.time()
self.blink(element_to_hilite, timeout_start, lag_duration,
self.blinking_freq)
elif self.lag_hilite_mode == "still":
if hasattr(element_to_hilite, "enable_lag_hilite"):
element_to_hilite.enable_lag_hilite()
if lag_type == "start_up":
closure = self._do_start
param = None
elif lag_type == "select":
closure = self._do_select
param = element_to_hilite
Clutter.threads_add_timeout(0, lag_duration, closure, param)
def _do_start(self, *_source):
self.index = None
self._cycle_count = 0
self._expose_next(enforced=True)
self.timeout_token = object()
if hasattr(self.group, "disable_lag_hilite"):
self.group.disable_lag_hilite()
Clutter.threads_add_timeout(0, self.interval, self.cycle_timeout,
self.timeout_token)
def _stop_cycle(self):
if self.index is not None:
if self.index < len(self._subgroups):
selection = self._subgroups[self.index]
if hasattr(selection, "disable_hilite"):
selection.disable_hilite()
elif isinstance(selection, Group):
selection.disable_hilite()
self.index = None
def _expose_next(self, enforced=False):
# disable old hilite and increase index
if self.index is not None and self.index < len(self._subgroups):
selection = self._subgroups[self.index]
if hasattr(selection, "disable_hilite"):
selection.disable_hilite()
elif isinstance(selection, Group):
selection.disable_hilite()
self.index = (self.index + 1) % len(self._subgroups)
else:
if not enforced:
self.index = 0
# check freshness
if not self.group.fresh_subgroups:
self.compute_sequence()
# return to start when index is invalid
if self.index is not None and self.index > len(self._subgroups):
self.index = 0
if self.index is not None and self.index < len(self._subgroups):
selection = self._subgroups[self.index]
if self.button_sound_support_enabled:
strateg_conf = pisak.config['PisakRowStrategy']
scan_time = strateg_conf.as_int('interval') / 1000
if isinstance(selection, pisak.widgets.Button):
label = selection.get_label()
if label in selection.sounds:
self.player.play(selection.sounds[label])
elif selection.sound:
self.player.play(selection.sound)
elif label in [' ', '']:
icon_name = selection.current_icon_name
if icon_name in selection.sounds:
self.player.play(selection.sounds[icon_name])
else:
if pisak.config.as_bool('speech_synthesis'):
synthesizer = Synthesizer(label)
synthesizer.read(scan_time)
elif isinstance(selection, Group):
self.player.play(selection.sound)
elif isinstance(selection, pisak.widgets.PhotoTile):
if pisak.config.as_bool('speech_synthesis'):
synthesizer = Synthesizer(selection.label_text)
synthesizer.read(scan_time)
else:
self.play_scanning_sound()
else:
self.play_scanning_sound()
else:
if self.scan_sound_enabled:
self.play_scanning_sound()
if hasattr(selection, "enable_hilite"):
selection.enable_hilite()
elif isinstance(selection, Group):
selection.enable_hilite()
if self.index == len(self._subgroups) - 1:
self._cycle_count += 1
def _has_next(self):
if len(self._subgroups) == 0:
return False
else:
return (self.max_cycle_count == -1) or \
(self._cycle_count < self.max_cycle_count)
def _has_unwind_to(self):
"""
Test whether scanning has anywhere to unwind to.
:return: True if scanning has anywhere to unwind to, False otherwise.
"""
return self.unwind_to is not None or self.group.parent_group is not None
def _is_killed(self):
"""
Test whether scanning of the group has been killed by some
external agent.
"""
return self.group.killed
def _go_to_sleep(self):
self.group.go_standby()
class RowStrategy(BaseStrategy):
"""
Implementation of a row-based strategy suitable for groups
of widgets arranged in rows.
"""
__gtype_name__ = "PisakRowStrategy"
def __init__(self):
self._allocation_slot = None
super().__init__()
@property
def group(self):
"""
Group owning the strategy.
"""
return self._group
@group.setter
def group(self, value):
if self.group is not None:
message = "Group strategy reuse, old {}, new {}"
_LOG.warning(message.format(self.group.get_id(), value.get_id()))
_LOG.debug("new {}, old {}".format(self.group, value))
self.group.disconnect(self._allocation_slot)
self._group = value
if self.group is not None:
self._allocation_slot = \
self.group.connect("allocation-changed", self.update_rows)
def update_rows(self, *_args):
"""
Updates any pending hilites and creates a new scanning sequence.
:param args: optional, arguments passed
when the function is registered as some signal handler.
"""
_LOG.debug("Row layout allocation changed")
if self.index is not None:
if self.index < len(self._subgroups):
selection = self._subgroups[self.index]
if hasattr(selection, "disable_hilite"):
selection.disable_hilite()
self.compute_sequence()
self.index = None
def compute_sequence(self):
"""
Creates a new scanning sequence.
"""
subgroups = self.group.get_subgroups()
key_function = lambda a: list(reversed(a.get_transformed_position()))
subgroups.sort(key=key_function)
self._subgroups = subgroups
class ArbitraryOrderStrategy(BaseStrategy):
"""
Strategy with arbitrary order of scanning
"""
__gtype_name__ = "PisakArbitraryOrderStrategy"
__gproperties__ = {
"subgroup-order":
(GObject.TYPE_STRING, "", "", "", GObject.PARAM_READWRITE),
}
def __init__(self):
self._subgroup_order = []
super().__init__()
@property
def subgroup_order(self):
"""
List of elements to scan, arbitrarily ordered.
"""
return self._subgroup_order
@subgroup_order.setter
def subgroup_order(self, value):
value_list = [v.strip() for v in value.split(",")]
self._subgroup_order = value_list
def compute_sequence(self):
"""
Creates a new scanning sequence.
"""
subgroups = self.group.get_subgroups()
unordered = dict([(s.get_id(), s) for s in subgroups])
self._subgroups = []
for s in self.subgroup_order:
if s in unordered:
self._subgroups.append(unordered[s])
def get_top_level_group(top_level):
"""
Get a non-empty top-level scanning group from the given object tree.
:param top_level: list of the top-level objects from the object tree.
:return: top-level scanning group or None.
"""
def get_collapsed(top_level_group):
"""
Collapse the given top-level scanning group.
If there is only one non-empty scanning group being
a subgroup of the given top-level group then such a non-empty
group is returned. Otherwise the given top-level group is returned.
For the definition of a group being empty
:see: `Group.is_empty`.
:param top_level_group: scanning group that should get collapsed.
:return: scanning group.
"""
def is_empty_branch(node_list):
"""
Check if the given branch is empty from the
scanning point of view, that is whether there are any elements
that could get scanned.
:param node_list: list of top-level objects from some object branch.
:return: True or False.
"""
nested_nodes = []
for node in node_list:
if (isinstance(node, Group) and node.is_flat() and not
node.is_empty()):
return False
else:
nested_nodes.extend(node.get_children())
return is_empty_branch(nested_nodes) if nested_nodes else True
branches = []
def find_branches(origin_level):
"""
Find all the scanning branches in the object tree, originating
from the given top level.
:param origin_level: list of top-level objects.
:return: None.
"""
for element in origin_level:
if isinstance(element, Group):
branches.append(element)
else:
find_branches(element.get_children())
non_empty = None
non_empty_count = 0
find_branches(top_level_group.get_children())
for branch in branches:
branch_list = [branch]
if not is_empty_branch(branch_list):
non_empty = get_top_level_group(branch_list)
non_empty_count += 1
return non_empty if non_empty_count == 1 else top_level_group
next_level = []
for obj in top_level:
if isinstance(obj, Group):
return get_collapsed(obj)
else:
next_level.extend(obj.get_children())
if next_level:
return get_top_level_group(next_level)
| gpl-3.0 |
Venturi/oldcms | env/lib/python2.7/site-packages/django/utils/dates.py | 590 | 2296 | "Commonly-used date structures"
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
WEEKDAYS = {
0: _('Monday'), 1: _('Tuesday'), 2: _('Wednesday'), 3: _('Thursday'), 4: _('Friday'),
5: _('Saturday'), 6: _('Sunday')
}
WEEKDAYS_ABBR = {
0: _('Mon'), 1: _('Tue'), 2: _('Wed'), 3: _('Thu'), 4: _('Fri'),
5: _('Sat'), 6: _('Sun')
}
WEEKDAYS_REV = {
'monday': 0, 'tuesday': 1, 'wednesday': 2, 'thursday': 3, 'friday': 4,
'saturday': 5, 'sunday': 6
}
MONTHS = {
1: _('January'), 2: _('February'), 3: _('March'), 4: _('April'), 5: _('May'), 6: _('June'),
7: _('July'), 8: _('August'), 9: _('September'), 10: _('October'), 11: _('November'),
12: _('December')
}
MONTHS_3 = {
1: _('jan'), 2: _('feb'), 3: _('mar'), 4: _('apr'), 5: _('may'), 6: _('jun'),
7: _('jul'), 8: _('aug'), 9: _('sep'), 10: _('oct'), 11: _('nov'), 12: _('dec')
}
MONTHS_3_REV = {
'jan': 1, 'feb': 2, 'mar': 3, 'apr': 4, 'may': 5, 'jun': 6, 'jul': 7, 'aug': 8,
'sep': 9, 'oct': 10, 'nov': 11, 'dec': 12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| apache-2.0 |
uni-peter-zheng/autotest | autotest-client-tests/fsx/fsx.py | 5 | 1732 | # This requires aio headers to build.
# Should work automagically out of deps now.
# NOTE - this should also have the ability to mount a filesystem,
# run the tests, unmount it, then fsck the filesystem
import os
from autotest.client import test, utils
class fsx(test.test):
version = 3
def initialize(self):
self.job.require_gcc()
# http://www.zip.com.au/~akpm/linux/patches/stuff/ext3-tools.tar.gz
def setup(self, tarball='ext3-tools.tar.gz'):
self.tarball = utils.unmap_url(self.bindir, tarball, self.tmpdir)
utils.extract_tarball_to_dir(self.tarball, self.srcdir)
self.job.setup_dep(['libaio'])
ldflags = '-L' + self.autodir + '/deps/libaio/lib'
cflags = '-I' + self.autodir + '/deps/libaio/include'
var_ldflags = 'LDFLAGS="' + ldflags + '"'
var_cflags = 'CFLAGS="' + cflags + '"'
self.make_flags = var_ldflags + ' ' + var_cflags
os.chdir(self.srcdir)
p1 = '0001-Minor-fixes-to-PAGE_SIZE-handling.patch'
p2 = '0002-Enable-cross-compiling-for-fsx.patch'
utils.system('patch -p1 < %s/%s' % (self.bindir, p1))
utils.system('patch -p1 < %s/%s' % (self.bindir, p2))
utils.system(self.make_flags + ' make fsx-linux')
def run_once(self, dir=None, repeat=100000):
args = '-N %s' % repeat
if not dir:
dir = self.tmpdir
os.chdir(dir)
libs = self.autodir + '/deps/libaio/lib/'
ld_path = utils.prepend_path(libs,
utils.environ('LD_LIBRARY_PATH'))
var_ld_path = 'LD_LIBRARY_PATH=' + ld_path
cmd = self.srcdir + '/fsx-linux ' + args + ' poo'
utils.system(var_ld_path + ' ' + cmd)
| gpl-2.0 |
rryan/django-cms | cms/tests/nonroot.py | 24 | 3211 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from django.template import Template
from cms.api import create_page
from cms.models import Page
from cms.test_utils.testcases import CMSTestCase
from cms.templatetags.cms_admin import preview_link
from cms.utils.i18n import force_language
from menus.base import NavigationNode
class NonRootCase(CMSTestCase):
urls = 'cms.test_utils.project.nonroot_urls'
def setUp(self):
u = self._create_user("test", True, True)
with self.login_user_context(u):
self.create_some_pages()
def create_some_pages(self):
"""
Creates the following structure:
+ P1
| + P2
| + P3
+ P4
"""
self.page1 = create_page("page1", "nav_playground.html", "en",
published=True, in_navigation=True)
self.page2 = create_page("page2", "nav_playground.html", "en",
parent=self.page1, published=True, in_navigation=True)
self.page3 = create_page("page3", "nav_playground.html", "en",
parent=self.page2, published=True, in_navigation=True)
self.page4 = create_page("page4", "nav_playground.html", "en",
published=True, in_navigation=True)
self.all_pages = [self.page1, self.page2, self.page3, self.page4]
self.top_level_pages = [self.page1, self.page4]
self.level1_pages = [self.page2]
self.level2_pages = [self.page3]
def test_get_page_root(self):
self.assertEqual(self.get_pages_root(), '/en/content/')
def test_basic_cms_menu(self):
response = self.client.get(self.get_pages_root())
self.assertEqual(response.status_code, 200)
self.assertEqual(self.get_pages_root(), "/en/content/")
def test_show_menu(self):
context = self.get_context()
tpl = Template("{% load menu_tags %}{% show_menu %}")
tpl.render(context)
nodes = context['children']
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[0].get_absolute_url(), "/en/content/")
def test_show_breadcrumb(self):
page2 = Page.objects.get(pk=self.page2.pk)
context = self.get_context(path=self.page2.get_absolute_url())
tpl = Template("{% load menu_tags %}{% show_breadcrumb %}")
tpl.render(context)
nodes = context['ancestors']
self.assertEqual(nodes[0].get_absolute_url(), self.get_pages_root())
self.assertEqual(nodes[0].get_absolute_url(), "/en/content/")
self.assertEqual(isinstance(nodes[0], NavigationNode), True)
self.assertEqual(nodes[1].get_absolute_url(), page2.get_absolute_url())
def test_form_multilingual_admin(self):
"""
Tests for correct form URL mangling in preview_link templatetag
"""
language = 'en'
with force_language("en"):
pages_root = self.get_pages_root()
link = preview_link(self.page2,language=language)
self.assertEqual(link,'%s%s/' % (pages_root,self.page2.get_slug()))
self.assertEqual(link,'/en/content/page2/')
| bsd-3-clause |
jamesjinnz/domoticz | hardware/telldus-core/tests/cpplint.py | 42 | 134162 | #!/usr/bin/python
#
# Copyright (c) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Here are some issues that I've had people identify in my code during reviews,
# that I think are possible to flag automatically in a lint tool. If these were
# caught by lint, it would save time both for myself and that of my reviewers.
# Most likely, some of these are beyond the scope of the current lint framework,
# but I think it is valuable to retain these wish-list items even if they cannot
# be immediately implemented.
#
# Suggestions
# -----------
# - Check for no 'explicit' for multi-arg ctor
# - Check for boolean assign RHS in parens
# - Check for ctor initializer-list colon position and spacing
# - Check that if there's a ctor, there should be a dtor
# - Check accessors that return non-pointer member variables are
# declared const
# - Check accessors that return non-const pointer member vars are
# *not* declared const
# - Check for using public includes for testing
# - Check for spaces between brackets in one-line inline method
# - Check for no assert()
# - Check for spaces surrounding operators
# - Check for 0 in pointer context (should be NULL)
# - Check for 0 in char context (should be '\0')
# - Check for camel-case method name conventions for methods
# that are not simple inline getters and setters
# - Check that base classes have virtual destructors
# put " // namespace" after } that closes a namespace, with
# namespace's name after 'namespace' if it is named.
# - Do not indent namespace contents
# - Avoid inlining non-trivial constructors in header files
# include base/basictypes.h if DISALLOW_EVIL_CONSTRUCTORS is used
# - Check for old-school (void) cast for call-sites of functions
# ignored return value
# - Check gUnit usage of anonymous namespace
# - Check for class declaration order (typedefs, consts, enums,
# ctor(s?), dtor, friend declarations, methods, member vars)
#
"""Does google-lint on c++ files.
The goal of this script is to identify places in the code that *may*
be in non-compliance with google style. It does not attempt to fix
up these problems -- the point is to educate. It does also not
attempt to find all problems, or to ensure that everything it does
find is legitimately a problem.
In particular, we can get very confused by /* and // inside strings!
We do a small hack, which is to ignore //'s with "'s after them on the
same line, but it is far from perfect (in either direction).
"""
import codecs
import getopt
import math # for log
import os
import re
import sre_compile
import string
import sys
import unicodedata
_USAGE = """
Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...]
[--counting=total|toplevel|detailed]
<file> [file] ...
The style guidelines this tries to follow are those in
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
Every problem is given a confidence score from 1-5, with 5 meaning we are
certain of the problem, and 1 meaning it could be a legitimate construct.
This will miss some errors, and is not a substitute for a code review.
To suppress false-positive errors of a certain category, add a
'NOLINT(category)' comment to the line. NOLINT or NOLINT(*)
suppresses errors of all categories on that line.
The files passed in will be linted; at least one file must be provided.
Linted extensions are .cc, .cpp, and .h. Other file types will be ignored.
Flags:
output=vs7
By default, the output is formatted to ease emacs parsing. Visual Studio
compatible output (vs7) may also be used. Other formats are unsupported.
verbose=#
Specify a number 0-5 to restrict errors to certain verbosity levels.
filter=-x,+y,...
Specify a comma-separated list of category-filters to apply: only
error messages whose category names pass the filters will be printed.
(Category names are printed with the message and look like
"[whitespace/indent]".) Filters are evaluated left to right.
"-FOO" and "FOO" means "do not print categories that start with FOO".
"+FOO" means "do print categories that start with FOO".
Examples: --filter=-whitespace,+whitespace/braces
--filter=whitespace,runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
To see a list of all the categories used in cpplint, pass no arg:
--filter=
counting=total|toplevel|detailed
The total number of errors found is always printed. If
'toplevel' is provided, then the count of errors in each of
the top-level categories like 'build' and 'whitespace' will
also be printed. If 'detailed' is provided, then a count
is provided for each category like 'build/class'.
"""
# We categorize each error message we print. Here are the categories.
# We want an explicit list so we can list them all in cpplint --filter=.
# If you add a new error message with a new category, add it to the list
# here! cpplint_unittest.py should tell you if you forget to do this.
# \ used for clearer layout -- pylint: disable-msg=C6013
_ERROR_CATEGORIES = [
'build/class',
'build/deprecated',
'build/endif_comment',
'build/explicit_make_pair',
'build/forward_decl',
'build/header_guard',
'build/include',
'build/include_alpha',
'build/include_order',
'build/include_what_you_use',
'build/namespaces',
'build/printf_format',
'build/storage_class',
'legal/copyright',
'readability/braces',
'readability/casting',
'readability/check',
'readability/constructors',
'readability/fn_size',
'readability/function',
'readability/multiline_comment',
'readability/multiline_string',
'readability/nolint',
'readability/streams',
'readability/todo',
'readability/utf8',
'runtime/arrays',
'runtime/casting',
'runtime/explicit',
'runtime/int',
'runtime/init',
'runtime/invalid_increment',
'runtime/member_string_references',
'runtime/memset',
'runtime/operator',
'runtime/printf',
'runtime/printf_format',
'runtime/references',
'runtime/rtti',
'runtime/sizeof',
'runtime/string',
'runtime/threadsafe_fn',
'runtime/virtual',
'whitespace/blank_line',
'whitespace/braces',
'whitespace/comma',
'whitespace/comments',
'whitespace/end_of_line',
'whitespace/ending_newline',
'whitespace/indent',
'whitespace/labels',
'whitespace/line_length',
'whitespace/newline',
'whitespace/operators',
'whitespace/parens',
'whitespace/semicolon',
'whitespace/tab',
'whitespace/todo',
'whitespace/use_tab_for_indentation'
]
# The default state of the category filter. This is overrided by the --filter=
# flag. By default all errors are on, so only add here categories that should be
# off by default (i.e., categories that must be enabled by the --filter= flags).
# All entries here should start with a '-' or '+', as in the --filter= flag.
_DEFAULT_FILTERS = [
'-build/include_alpha',
'-whitespace/use_tab_for_indentation'
]
# We used to check for high-bit characters, but after much discussion we
# decided those were OK, as long as they were in UTF-8 and didn't represent
# hard-coded international strings, which belong in a separate i18n file.
# Headers that we consider STL headers.
_STL_HEADERS = frozenset([
'algobase.h', 'algorithm', 'alloc.h', 'bitset', 'deque', 'exception',
'function.h', 'functional', 'hash_map', 'hash_map.h', 'hash_set',
'hash_set.h', 'iterator', 'list', 'list.h', 'map', 'memory', 'new',
'pair.h', 'pthread_alloc', 'queue', 'set', 'set.h', 'sstream', 'stack',
'stl_alloc.h', 'stl_relops.h', 'type_traits.h',
'utility', 'vector', 'vector.h',
])
# Non-STL C++ system headers.
_CPP_HEADERS = frozenset([
'algo.h', 'builtinbuf.h', 'bvector.h', 'cassert', 'cctype',
'cerrno', 'cfloat', 'ciso646', 'climits', 'clocale', 'cmath',
'complex', 'complex.h', 'csetjmp', 'csignal', 'cstdarg', 'cstddef',
'cstdio', 'cstdlib', 'cstring', 'ctime', 'cwchar', 'cwctype',
'defalloc.h', 'deque.h', 'editbuf.h', 'exception', 'fstream',
'fstream.h', 'hashtable.h', 'heap.h', 'indstream.h', 'iomanip',
'iomanip.h', 'ios', 'iosfwd', 'iostream', 'iostream.h', 'istream',
'istream.h', 'iterator.h', 'limits', 'map.h', 'multimap.h', 'multiset.h',
'numeric', 'ostream', 'ostream.h', 'parsestream.h', 'pfstream.h',
'PlotFile.h', 'procbuf.h', 'pthread_alloc.h', 'rope', 'rope.h',
'ropeimpl.h', 'SFile.h', 'slist', 'slist.h', 'stack.h', 'stdexcept',
'stdiostream.h', 'streambuf.h', 'stream.h', 'strfile.h', 'string',
'strstream', 'strstream.h', 'tempbuf.h', 'tree.h', 'typeinfo', 'valarray',
])
# Assertion macros. These are defined in base/logging.h and
# testing/base/gunit.h. Note that the _M versions need to come first
# for substring matching to work.
_CHECK_MACROS = [
'DCHECK', 'CHECK',
'EXPECT_TRUE_M', 'EXPECT_TRUE',
'ASSERT_TRUE_M', 'ASSERT_TRUE',
'EXPECT_FALSE_M', 'EXPECT_FALSE',
'ASSERT_FALSE_M', 'ASSERT_FALSE',
]
# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE
_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS])
for op, replacement in [('==', 'EQ'), ('!=', 'NE'),
('>=', 'GE'), ('>', 'GT'),
('<=', 'LE'), ('<', 'LT')]:
_CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement
_CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement
_CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement
_CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement
for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'),
('>=', 'LT'), ('>', 'LE'),
('<=', 'GT'), ('<', 'GE')]:
_CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement
_CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement
_CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement
# These constants define types of headers for use with
# _IncludeState.CheckNextIncludeOrder().
_C_SYS_HEADER = 1
_CPP_SYS_HEADER = 2
_LIKELY_MY_HEADER = 3
_POSSIBLE_MY_HEADER = 4
_OTHER_HEADER = 5
_regexp_compile_cache = {}
# Finds occurrences of NOLINT or NOLINT(...).
_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?')
# {str, set(int)}: a map from error categories to sets of linenumbers
# on which those errors are expected and should be suppressed.
_error_suppressions = {}
def ParseNolintSuppressions(filename, raw_line, linenum, error):
"""Updates the global list of error-suppressions.
Parses any NOLINT comments on the current line, updating the global
error_suppressions store. Reports an error if the NOLINT comment
was malformed.
Args:
filename: str, the name of the input file.
raw_line: str, the line of input text, with comments.
linenum: int, the number of the current line.
error: function, an error handler.
"""
# FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*).
matched = _RE_SUPPRESSION.search(raw_line)
if matched:
category = matched.group(1)
if category in (None, '(*)'): # => "suppress all"
_error_suppressions.setdefault(None, set()).add(linenum)
else:
if category.startswith('(') and category.endswith(')'):
category = category[1:-1]
if category in _ERROR_CATEGORIES:
_error_suppressions.setdefault(category, set()).add(linenum)
else:
error(filename, linenum, 'readability/nolint', 5,
'Unknown NOLINT error category: %s' % category)
def ResetNolintSuppressions():
"Resets the set of NOLINT suppressions to empty."
_error_suppressions.clear()
def IsErrorSuppressedByNolint(category, linenum):
"""Returns true if the specified error category is suppressed on this line.
Consults the global error_suppressions map populated by
ParseNolintSuppressions/ResetNolintSuppressions.
Args:
category: str, the category of the error.
linenum: int, the current line number.
Returns:
bool, True iff the error should be suppressed due to a NOLINT comment.
"""
return (linenum in _error_suppressions.get(category, set()) or
linenum in _error_suppressions.get(None, set()))
def Match(pattern, s):
"""Matches the string with the pattern, caching the compiled regexp."""
# The regexp compilation caching is inlined in both Match and Search for
# performance reasons; factoring it out into a separate function turns out
# to be noticeably expensive.
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].match(s)
def Search(pattern, s):
"""Searches the string for the pattern, caching the compiled regexp."""
if not pattern in _regexp_compile_cache:
_regexp_compile_cache[pattern] = sre_compile.compile(pattern)
return _regexp_compile_cache[pattern].search(s)
class _IncludeState(dict):
"""Tracks line numbers for includes, and the order in which includes appear.
As a dict, an _IncludeState object serves as a mapping between include
filename and line number on which that file was included.
Call CheckNextIncludeOrder() once for each header in the file, passing
in the type constants defined above. Calls in an illegal order will
raise an _IncludeError with an appropriate error message.
"""
# self._section will move monotonically through this set. If it ever
# needs to move backwards, CheckNextIncludeOrder will raise an error.
_INITIAL_SECTION = 0
_MY_H_SECTION = 1
_C_SECTION = 2
_CPP_SECTION = 3
_OTHER_H_SECTION = 4
_TYPE_NAMES = {
_C_SYS_HEADER: 'C system header',
_CPP_SYS_HEADER: 'C++ system header',
_LIKELY_MY_HEADER: 'header this file implements',
_POSSIBLE_MY_HEADER: 'header this file may implement',
_OTHER_HEADER: 'other header',
}
_SECTION_NAMES = {
_INITIAL_SECTION: "... nothing. (This can't be an error.)",
_MY_H_SECTION: 'a header this file implements',
_C_SECTION: 'C system header',
_CPP_SECTION: 'C++ system header',
_OTHER_H_SECTION: 'other header',
}
def __init__(self):
dict.__init__(self)
# The name of the current section.
self._section = self._INITIAL_SECTION
# The path of last found header.
self._last_header = ''
def CanonicalizeAlphabeticalOrder(self, header_path):
"""Returns a path canonicalized for alphabetical comparison.
- replaces "-" with "_" so they both cmp the same.
- removes '-inl' since we don't require them to be after the main header.
- lowercase everything, just in case.
Args:
header_path: Path to be canonicalized.
Returns:
Canonicalized path.
"""
return header_path.replace('-inl.h', '.h').replace('-', '_').lower()
def IsInAlphabeticalOrder(self, header_path):
"""Check if a header is in alphabetical order with the previous header.
Args:
header_path: Header to be checked.
Returns:
Returns true if the header is in alphabetical order.
"""
canonical_header = self.CanonicalizeAlphabeticalOrder(header_path)
if self._last_header > canonical_header:
return False
self._last_header = canonical_header
return True
def CheckNextIncludeOrder(self, header_type):
"""Returns a non-empty error message if the next header is out of order.
This function also updates the internal state to be ready to check
the next include.
Args:
header_type: One of the _XXX_HEADER constants defined above.
Returns:
The empty string if the header is in the right order, or an
error message describing what's wrong.
"""
error_message = ('Found %s after %s' %
(self._TYPE_NAMES[header_type],
self._SECTION_NAMES[self._section]))
last_section = self._section
if header_type == _C_SYS_HEADER:
if self._section <= self._C_SECTION:
self._section = self._C_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _CPP_SYS_HEADER:
if self._section <= self._CPP_SECTION:
self._section = self._CPP_SECTION
else:
self._last_header = ''
return error_message
elif header_type == _LIKELY_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
self._section = self._OTHER_H_SECTION
elif header_type == _POSSIBLE_MY_HEADER:
if self._section <= self._MY_H_SECTION:
self._section = self._MY_H_SECTION
else:
# This will always be the fallback because we're not sure
# enough that the header is associated with this file.
self._section = self._OTHER_H_SECTION
else:
assert header_type == _OTHER_HEADER
self._section = self._OTHER_H_SECTION
if last_section != self._section:
self._last_header = ''
return ''
class _CppLintState(object):
"""Maintains module-wide state.."""
def __init__(self):
self.verbose_level = 1 # global setting.
self.error_count = 0 # global count of reported errors
# filters to apply when emitting error messages
self.filters = _DEFAULT_FILTERS[:]
self.counting = 'total' # In what way are we counting errors?
self.errors_by_category = {} # string to int dict storing error counts
# output format:
# "emacs" - format that emacs can parse (default)
# "vs7" - format that Microsoft Visual Studio 7 can parse
self.output_format = 'emacs'
def SetOutputFormat(self, output_format):
"""Sets the output format for errors."""
self.output_format = output_format
def SetVerboseLevel(self, level):
"""Sets the module's verbosity, and returns the previous setting."""
last_verbose_level = self.verbose_level
self.verbose_level = level
return last_verbose_level
def SetCountingStyle(self, counting_style):
"""Sets the module's counting options."""
self.counting = counting_style
def SetFilters(self, filters):
"""Sets the error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "+whitespace/indent").
Each filter should start with + or -; else we die.
Raises:
ValueError: The comma-separated filters did not all start with '+' or '-'.
E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter"
"""
# Default filters always have less priority than the flag ones.
self.filters = _DEFAULT_FILTERS[:]
for filt in filters.split(','):
clean_filt = filt.strip()
if clean_filt:
self.filters.append(clean_filt)
for filt in self.filters:
if not (filt.startswith('+') or filt.startswith('-')):
raise ValueError('Every filter in --filters must start with + or -'
' (%s does not)' % filt)
def ResetErrorCounts(self):
"""Sets the module's error statistic back to zero."""
self.error_count = 0
self.errors_by_category = {}
def IncrementErrorCount(self, category):
"""Bumps the module's error statistic."""
self.error_count += 1
if self.counting in ('toplevel', 'detailed'):
if self.counting != 'detailed':
category = category.split('/')[0]
if category not in self.errors_by_category:
self.errors_by_category[category] = 0
self.errors_by_category[category] += 1
def PrintErrorCounts(self):
"""Print a summary of errors by category, and the total."""
for category, count in self.errors_by_category.iteritems():
sys.stderr.write('Category \'%s\' errors found: %d\n' %
(category, count))
sys.stderr.write('Total errors found: %d\n' % self.error_count)
_cpplint_state = _CppLintState()
def _OutputFormat():
"""Gets the module's output format."""
return _cpplint_state.output_format
def _SetOutputFormat(output_format):
"""Sets the module's output format."""
_cpplint_state.SetOutputFormat(output_format)
def _VerboseLevel():
"""Returns the module's verbosity setting."""
return _cpplint_state.verbose_level
def _SetVerboseLevel(level):
"""Sets the module's verbosity, and returns the previous setting."""
return _cpplint_state.SetVerboseLevel(level)
def _SetCountingStyle(level):
"""Sets the module's counting options."""
_cpplint_state.SetCountingStyle(level)
def _Filters():
"""Returns the module's list of output filters, as a list."""
return _cpplint_state.filters
def _SetFilters(filters):
"""Sets the module's error-message filters.
These filters are applied when deciding whether to emit a given
error message.
Args:
filters: A string of comma-separated filters (eg "whitespace/indent").
Each filter should start with + or -; else we die.
"""
_cpplint_state.SetFilters(filters)
class _FunctionState(object):
"""Tracks current function name and the number of lines in its body."""
_NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc.
_TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER.
def __init__(self):
self.in_a_function = False
self.lines_in_function = 0
self.current_function = ''
def Begin(self, function_name):
"""Start analyzing function body.
Args:
function_name: The name of the function being tracked.
"""
self.in_a_function = True
self.lines_in_function = 0
self.current_function = function_name
def Count(self):
"""Count line in current function body."""
if self.in_a_function:
self.lines_in_function += 1
def Check(self, error, filename, linenum):
"""Report if too many lines in function body.
Args:
error: The function to call with any errors found.
filename: The name of the current file.
linenum: The number of the line to check.
"""
if Match(r'T(EST|est)', self.current_function):
base_trigger = self._TEST_TRIGGER
else:
base_trigger = self._NORMAL_TRIGGER
trigger = base_trigger * 2**_VerboseLevel()
if self.lines_in_function > trigger:
error_level = int(math.log(self.lines_in_function / base_trigger, 2))
# 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ...
if error_level > 5:
error_level = 5
error(filename, linenum, 'readability/fn_size', error_level,
'Small and focused functions are preferred:'
' %s has %d non-comment lines'
' (error triggered by exceeding %d lines).' % (
self.current_function, self.lines_in_function, trigger))
def End(self):
"""Stop analyzing function body."""
self.in_a_function = False
class _IncludeError(Exception):
"""Indicates a problem with the include order in a file."""
pass
class FileInfo:
"""Provides utility functions for filenames.
FileInfo provides easy access to the components of a file's path
relative to the project root.
"""
def __init__(self, filename):
self._filename = filename
def FullName(self):
"""Make Windows paths like Unix."""
return os.path.abspath(self._filename).replace('\\', '/')
def RepositoryName(self):
"""FullName after removing the local path to the repository.
If we have a real absolute path name here we can try to do something smart:
detecting the root of the checkout and truncating /path/to/checkout from
the name so that we get header guards that don't include things like
"C:\Documents and Settings\..." or "/home/username/..." in them and thus
people on different computers who have checked the source out to different
locations won't see bogus errors.
"""
fullname = self.FullName()
if os.path.exists(fullname):
project_dir = os.path.dirname(fullname)
if os.path.exists(os.path.join(project_dir, ".svn")):
# If there's a .svn file in the current directory, we recursively look
# up the directory tree for the top of the SVN checkout
root_dir = project_dir
one_up_dir = os.path.dirname(root_dir)
while os.path.exists(os.path.join(one_up_dir, ".svn")):
root_dir = os.path.dirname(root_dir)
one_up_dir = os.path.dirname(one_up_dir)
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by
# searching up from the current path.
root_dir = os.path.dirname(fullname)
while (root_dir != os.path.dirname(root_dir) and
not os.path.exists(os.path.join(root_dir, ".git")) and
not os.path.exists(os.path.join(root_dir, ".hg")) and
not os.path.exists(os.path.join(root_dir, ".svn"))):
root_dir = os.path.dirname(root_dir)
if (os.path.exists(os.path.join(root_dir, ".git")) or
os.path.exists(os.path.join(root_dir, ".hg")) or
os.path.exists(os.path.join(root_dir, ".svn"))):
prefix = os.path.commonprefix([root_dir, project_dir])
return fullname[len(prefix) + 1:]
# Don't know what to do; header guard warnings may be wrong...
return fullname
def Split(self):
"""Splits the file into the directory, basename, and extension.
For 'chrome/browser/browser.cc', Split() would
return ('chrome/browser', 'browser', '.cc')
Returns:
A tuple of (directory, basename, extension).
"""
googlename = self.RepositoryName()
project, rest = os.path.split(googlename)
return (project,) + os.path.splitext(rest)
def BaseName(self):
"""File base name - text after the final slash, before the final period."""
return self.Split()[1]
def Extension(self):
"""File extension - text following the final period."""
return self.Split()[2]
def NoExtension(self):
"""File has no source file extension."""
return '/'.join(self.Split()[0:2])
def IsSource(self):
"""File has a source file extension."""
return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx')
def _ShouldPrintError(category, confidence, linenum):
"""If confidence >= verbose, category passes filter and is not suppressed."""
# There are three ways we might decide not to print an error message:
# a "NOLINT(category)" comment appears in the source,
# the verbosity level isn't high enough, or the filters filter it out.
if IsErrorSuppressedByNolint(category, linenum):
return False
if confidence < _cpplint_state.verbose_level:
return False
is_filtered = False
for one_filter in _Filters():
if one_filter.startswith('-'):
if category.startswith(one_filter[1:]):
is_filtered = True
elif one_filter.startswith('+'):
if category.startswith(one_filter[1:]):
is_filtered = False
else:
assert False # should have been checked for in SetFilter.
if is_filtered:
return False
return True
def Error(filename, linenum, category, confidence, message):
"""Logs the fact we've found a lint error.
We log where the error was found, and also our confidence in the error,
that is, how certain we are this is a legitimate style regression, and
not a misidentification or a use that's sometimes justified.
False positives can be suppressed by the use of
"cpplint(category)" comments on the offending line. These are
parsed into _error_suppressions.
Args:
filename: The name of the file containing the error.
linenum: The number of the line containing the error.
category: A string used to describe the "category" this bug
falls under: "whitespace", say, or "runtime". Categories
may have a hierarchy separated by slashes: "whitespace/indent".
confidence: A number from 1-5 representing a confidence score for
the error, with 5 meaning that we are certain of the problem,
and 1 meaning that it could be a legitimate construct.
message: The error message.
"""
if _ShouldPrintError(category, confidence, linenum):
_cpplint_state.IncrementErrorCount(category)
if _cpplint_state.output_format == 'vs7':
sys.stderr.write('%s(%s): %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
else:
sys.stderr.write('%s:%s: %s [%s] [%d]\n' % (
filename, linenum, message, category, confidence))
# Matches standard C++ escape esequences per 2.13.2.3 of the C++ standard.
_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile(
r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)')
# Matches strings. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"')
# Matches characters. Escape codes should already be removed by ESCAPES.
_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'")
# Matches multi-line C++ comments.
# This RE is a little bit more complicated than one might expect, because we
# have to take care of space removals tools so we can handle comments inside
# statements better.
# The current rule is: We only clear spaces from both sides when we're at the
# end of the line. Otherwise, we try to remove spaces from the right side,
# if this doesn't work we try on left side but only if there's a non-character
# on the right.
_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile(
r"""(\s*/\*.*\*/\s*$|
/\*.*\*/\s+|
\s+/\*.*\*/(?=\W)|
/\*.*\*/)""", re.VERBOSE)
def IsCppString(line):
"""Does line terminate so, that the next symbol is in string constant.
This function does not consider single-line nor multi-line comments.
Args:
line: is a partial line of code starting from the 0..n.
Returns:
True, if next character appended to 'line' is inside a
string constant.
"""
line = line.replace(r'\\', 'XX') # after this, \\" does not match to \"
return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1
def FindNextMultiLineCommentStart(lines, lineix):
"""Find the beginning marker for a multiline comment."""
while lineix < len(lines):
if lines[lineix].strip().startswith('/*'):
# Only return this marker if the comment goes beyond this line
if lines[lineix].strip().find('*/', 2) < 0:
return lineix
lineix += 1
return len(lines)
def FindNextMultiLineCommentEnd(lines, lineix):
"""We are inside a comment, find the end marker."""
while lineix < len(lines):
if lines[lineix].strip().endswith('*/'):
return lineix
lineix += 1
return len(lines)
def RemoveMultiLineCommentsFromRange(lines, begin, end):
"""Clears a range of lines for multi-line comments."""
# Having // dummy comments makes the lines non-empty, so we will not get
# unnecessary blank line warnings later in the code.
for i in range(begin, end):
lines[i] = '// dummy'
def RemoveMultiLineComments(filename, lines, error):
"""Removes multiline (c-style) comments from lines."""
lineix = 0
while lineix < len(lines):
lineix_begin = FindNextMultiLineCommentStart(lines, lineix)
if lineix_begin >= len(lines):
return
lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin)
if lineix_end >= len(lines):
error(filename, lineix_begin + 1, 'readability/multiline_comment', 5,
'Could not find end of multi-line comment')
return
RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1)
lineix = lineix_end + 1
def CleanseComments(line):
"""Removes //-comments and single-line C-style /* */ comments.
Args:
line: A line of C++ source.
Returns:
The line with single-line comments removed.
"""
commentpos = line.find('//')
if commentpos != -1 and not IsCppString(line[:commentpos]):
line = line[:commentpos].rstrip()
# get rid of /* ... */
return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
class CleansedLines(object):
"""Holds 3 copies of all lines with different preprocessing applied to them.
1) elided member contains lines without strings and comments,
2) lines member contains lines without comments, and
3) raw member contains all the lines without processing.
All these three members are of <type 'list'>, and of the same length.
"""
def __init__(self, lines):
self.elided = []
self.lines = []
self.raw_lines = lines
self.num_lines = len(lines)
for linenum in range(len(lines)):
self.lines.append(CleanseComments(lines[linenum]))
elided = self._CollapseStrings(lines[linenum])
self.elided.append(CleanseComments(elided))
def NumLines(self):
"""Returns the number of lines represented."""
return self.num_lines
@staticmethod
def _CollapseStrings(elided):
"""Collapses strings and chars on a line to simple "" or '' blocks.
We nix strings first so we're not fooled by text like '"http://"'
Args:
elided: The line being processed.
Returns:
The line with collapsed strings.
"""
if not _RE_PATTERN_INCLUDE.match(elided):
# Remove escaped characters first to make quote/single quote collapsing
# basic. Things that look like escaped characters shouldn't occur
# outside of strings and chars.
elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided)
elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided)
elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided)
return elided
def CloseExpression(clean_lines, linenum, pos):
"""If input points to ( or { or [, finds the position that closes it.
If lines[linenum][pos] points to a '(' or '{' or '[', finds the
linenum/pos that correspond to the closing of the expression.
Args:
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
pos: A position on the line.
Returns:
A tuple (line, linenum, pos) pointer *past* the closing brace, or
(line, len(lines), -1) if we never find a close. Note we ignore
strings and comments when matching; and the line we return is the
'cleansed' line at linenum.
"""
line = clean_lines.elided[linenum]
startchar = line[pos]
if startchar not in '({[':
return (line, clean_lines.NumLines(), -1)
if startchar == '(': endchar = ')'
if startchar == '[': endchar = ']'
if startchar == '{': endchar = '}'
num_open = line.count(startchar) - line.count(endchar)
while linenum < clean_lines.NumLines() and num_open > 0:
linenum += 1
line = clean_lines.elided[linenum]
num_open += line.count(startchar) - line.count(endchar)
# OK, now find the endchar that actually got us back to even
endpos = len(line)
while num_open >= 0:
endpos = line.rfind(')', 0, endpos)
num_open -= 1 # chopped off another )
return (line, linenum, endpos + 1)
def CheckForCopyright(filename, lines, error):
"""Logs an error if no Copyright message appears at the top of the file."""
# We'll say it should occur by line 10. Don't forget there's a
# dummy line at the front.
for line in xrange(1, min(len(lines), 11)):
if re.search(r'Copyright', lines[line], re.I): break
else: # means no copyright line was found
error(filename, 0, 'legal/copyright', 5,
'No copyright message found. '
'You should have a line: "Copyright [year] <Copyright Owner>"')
def GetHeaderGuardCPPVariable(filename):
"""Returns the CPP variable that should be used as a header guard.
Args:
filename: The name of a C++ header file.
Returns:
The CPP variable that should be used as a header guard in the
named file.
"""
# Restores original filename in case that cpplint is invoked from Emacs's
# flymake.
filename = re.sub(r'_flymake\.h$', '.h', filename)
fileinfo = FileInfo(filename)
return re.sub(r'[-./\s]', '_', fileinfo.RepositoryName()).upper() + '_'
def CheckForHeaderGuard(filename, lines, error):
"""Checks that the file contains a header guard.
Logs an error if no #ifndef header guard is present. For other
headers, checks that the full pathname is used.
Args:
filename: The name of the C++ header file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
cppvar = GetHeaderGuardCPPVariable(filename)
ifndef = None
ifndef_linenum = 0
define = None
endif = None
endif_linenum = 0
for linenum, line in enumerate(lines):
linesplit = line.split()
if len(linesplit) >= 2:
# find the first occurrence of #ifndef and #define, save arg
if not ifndef and linesplit[0] == '#ifndef':
# set ifndef to the header guard presented on the #ifndef line.
ifndef = linesplit[1]
ifndef_linenum = linenum
if not define and linesplit[0] == '#define':
define = linesplit[1]
# find the last occurrence of #endif, save entire line
if line.startswith('#endif'):
endif = line
endif_linenum = linenum
if not ifndef:
error(filename, 0, 'build/header_guard', 5,
'No #ifndef header guard found, suggested CPP variable is: %s' %
cppvar)
return
if not define:
error(filename, 0, 'build/header_guard', 5,
'No #define header guard found, suggested CPP variable is: %s' %
cppvar)
return
# The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__
# for backward compatibility.
if ifndef != cppvar:
error_level = 0
if ifndef != cppvar + '_':
error_level = 5
ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum,
error)
error(filename, ifndef_linenum, 'build/header_guard', error_level,
'#ifndef header guard has wrong style, please use: %s' % cppvar)
if define != ifndef:
error(filename, 0, 'build/header_guard', 5,
'#ifndef and #define don\'t match, suggested CPP variable is: %s' %
cppvar)
return
if endif != ('#endif // %s' % cppvar):
error_level = 0
if endif != ('#endif // %s' % (cppvar + '_')):
error_level = 5
ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum,
error)
error(filename, endif_linenum, 'build/header_guard', error_level,
'#endif line should be "#endif // %s"' % cppvar)
def CheckForUnicodeReplacementCharacters(filename, lines, error):
"""Logs an error for each line containing Unicode replacement characters.
These indicate that either the file contained invalid UTF-8 (likely)
or Unicode replacement characters (which it shouldn't). Note that
it's possible for this to throw off line numbering if the invalid
UTF-8 occurred adjacent to a newline.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
for linenum, line in enumerate(lines):
if u'\ufffd' in line:
error(filename, linenum, 'readability/utf8', 5,
'Line contains invalid UTF-8 (or Unicode replacement character).')
def CheckForNewlineAtEOF(filename, lines, error):
"""Logs an error if there is no newline char at the end of the file.
Args:
filename: The name of the current file.
lines: An array of strings, each representing a line of the file.
error: The function to call with any errors found.
"""
# The array lines() was created by adding two newlines to the
# original file (go figure), then splitting on \n.
# To verify that the file ends in \n, we just have to make sure the
# last-but-two element of lines() exists and is empty.
if len(lines) < 3 or lines[-2]:
error(filename, len(lines) - 2, 'whitespace/ending_newline', 5,
'Could not find a newline character at the end of the file.')
def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error):
"""Logs an error if we see /* ... */ or "..." that extend past one line.
/* ... */ comments are legit inside macros, for one line.
Otherwise, we prefer // comments, so it's ok to warn about the
other. Likewise, it's ok for strings to extend across multiple
lines, as long as a line continuation character (backslash)
terminates each line. Although not currently prohibited by the C++
style guide, it's ugly and unnecessary. We don't do well with either
in this lint program, so we warn about both.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remove all \\ (escaped backslashes) from the line. They are OK, and the
# second (escaped) slash may trigger later \" detection erroneously.
line = line.replace('\\\\', '')
if line.count('/*') > line.count('*/'):
error(filename, linenum, 'readability/multiline_comment', 5,
'Complex multi-line /*...*/-style comment found. '
'Lint may give bogus warnings. '
'Consider replacing these with //-style comments, '
'with #if 0...#endif, '
'or with more clearly structured multi-line comments.')
if (line.count('"') - line.count('\\"')) % 2:
error(filename, linenum, 'readability/multiline_string', 5,
'Multi-line string ("...") found. This lint script doesn\'t '
'do well with such strings, and may give bogus warnings. They\'re '
'ugly and unnecessary, and you should use concatenation instead".')
threading_list = (
('asctime(', 'asctime_r('),
('ctime(', 'ctime_r('),
('getgrgid(', 'getgrgid_r('),
('getgrnam(', 'getgrnam_r('),
('getlogin(', 'getlogin_r('),
('getpwnam(', 'getpwnam_r('),
('getpwuid(', 'getpwuid_r('),
('gmtime(', 'gmtime_r('),
('localtime(', 'localtime_r('),
('rand(', 'rand_r('),
('readdir(', 'readdir_r('),
('strtok(', 'strtok_r('),
('ttyname(', 'ttyname_r('),
)
def CheckPosixThreading(filename, clean_lines, linenum, error):
"""Checks for calls to thread-unsafe functions.
Much code has been originally written without consideration of
multi-threading. Also, engineers are relying on their old experience;
they have learned posix before threading extensions were added. These
tests guide the engineers to use thread-safe functions (when using
posix directly).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
for single_thread_function, multithread_safe_function in threading_list:
ix = line.find(single_thread_function)
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and
line[ix - 1] not in ('_', '.', '>'))):
error(filename, linenum, 'runtime/threadsafe_fn', 2,
'Consider using ' + multithread_safe_function +
'...) instead of ' + single_thread_function +
'...) for improved thread safety.')
# Matches invalid increment: *count++, which moves pointer instead of
# incrementing a value.
_RE_PATTERN_INVALID_INCREMENT = re.compile(
r'^\s*\*\w+(\+\+|--);')
def CheckInvalidIncrement(filename, clean_lines, linenum, error):
"""Checks for invalid increment *count++.
For example following function:
void increment_counter(int* count) {
*count++;
}
is invalid, because it effectively does count++, moving pointer, and should
be replaced with ++*count, (*count)++ or *count += 1.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
if _RE_PATTERN_INVALID_INCREMENT.match(line):
error(filename, linenum, 'runtime/invalid_increment', 5,
'Changing pointer instead of value (or unused value of operator*).')
class _ClassInfo(object):
"""Stores information about a class."""
def __init__(self, name, clean_lines, linenum):
self.name = name
self.linenum = linenum
self.seen_open_brace = False
self.is_derived = False
self.virtual_method_linenumber = None
self.has_virtual_destructor = False
self.brace_depth = 0
# Try to find the end of the class. This will be confused by things like:
# class A {
# } *x = { ...
#
# But it's still good enough for CheckSectionSpacing.
self.last_line = 0
depth = 0
for i in range(linenum, clean_lines.NumLines()):
line = clean_lines.lines[i]
depth += line.count('{') - line.count('}')
if not depth:
self.last_line = i
break
class _ClassState(object):
"""Holds the current state of the parse relating to class declarations.
It maintains a stack of _ClassInfos representing the parser's guess
as to the current nesting of class declarations. The innermost class
is at the top (back) of the stack. Typically, the stack will either
be empty or have exactly one entry.
"""
def __init__(self):
self.classinfo_stack = []
def CheckFinished(self, filename, error):
"""Checks that all classes have been completely parsed.
Call this when all lines in a file have been processed.
Args:
filename: The name of the current file.
error: The function to call with any errors found.
"""
if self.classinfo_stack:
# Note: This test can result in false positives if #ifdef constructs
# get in the way of brace matching. See the testBuildClass test in
# cpplint_unittest.py for an example of this.
error(filename, self.classinfo_stack[0].linenum, 'build/class', 5,
'Failed to find complete declaration of class %s' %
self.classinfo_stack[0].name)
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
class_state, error):
"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
- classes with virtual methods need virtual destructors (compiler warning
available, but not turned on yet.)
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(auto|register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage class (static, extern, typedef, etc) should be first.')
if Match(r'\s*#\s*endif\s*[^/\s]+', line):
error(filename, linenum, 'build/endif_comment', 5,
'Uncommented text after #endif is non-standard. Use a comment.')
if Match(r'\s*class\s+(\w+\s*::\s*)+\w+\s*;', line):
error(filename, linenum, 'build/forward_decl', 5,
'Inner-style forward declarations are invalid. Remove this line.')
if Search(r'(\w+|[+-]?\d+(\.\d*)?)\s*(<|>)\?=?\s*(\w+|[+-]?\d+)(\.\d*)?',
line):
error(filename, linenum, 'build/deprecated', 3,
'>? and <? (max and min) operators are non-standard and deprecated.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Track class entry and exit, and attempt to find cases within the
# class declaration that don't meet the C++ style
# guidelines. Tracking is very dependent on the code matching Google
# style guidelines, but it seems to perform well enough in testing
# to be a worthwhile addition to the checks.
classinfo_stack = class_state.classinfo_stack
# Look for a class declaration. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'\s*(template\s*<[\w\s<>,:]*>\s*)?'
'(class|struct)\s+([A-Z_]+\s+)*(\w+(::\w+)*)', line)
if class_decl_match:
classinfo_stack.append(_ClassInfo(
class_decl_match.group(4), clean_lines, linenum))
# Everything else in this function uses the top of the stack if it's
# not empty.
if not classinfo_stack:
return
classinfo = classinfo_stack[-1]
# If the opening brace hasn't been seen look for it and also
# parent class declarations.
if not classinfo.seen_open_brace:
# If the line has a ';' in it, assume it's a forward declaration or
# a single-line class declaration, which we won't process.
if line.find(';') != -1:
classinfo_stack.pop()
return
classinfo.seen_open_brace = (line.find('{') != -1)
# Look for a bare ':'
if Search('(^|[^:]):($|[^:])', line):
classinfo.is_derived = True
if not classinfo.seen_open_brace:
return # Everything else in this function is for after open brace
# The class may have been declared with namespace or classname qualifiers.
# The constructor and destructor will not have those qualifiers.
base_classname = classinfo.name.split('::')[-1]
# Look for single-argument constructors that aren't marked explicit.
# Technically a valid construct, but against style.
args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)'
% re.escape(base_classname),
line)
if (args and
args.group(1) != 'void' and
not Match(r'(const\s+)?%s\s*(?:<\w+>\s*)?&' % re.escape(base_classname),
args.group(1).strip())):
error(filename, linenum, 'runtime/explicit', 5,
'Single-argument constructors should be marked explicit.')
# Look for methods declared virtual.
if Search(r'\bvirtual\b', line):
classinfo.virtual_method_linenumber = linenum
# Only look for a destructor declaration on the same line. It would
# be extremely unlikely for the destructor declaration to occupy
# more than one line.
if Search(r'~%s\s*\(' % base_classname, line):
classinfo.has_virtual_destructor = True
# Look for class end.
brace_depth = classinfo.brace_depth
brace_depth = brace_depth + line.count('{') - line.count('}')
if brace_depth <= 0:
classinfo = classinfo_stack.pop()
# Try to detect missing virtual destructor declarations.
# For now, only warn if a non-derived class with virtual methods lacks
# a virtual destructor. This is to make it less likely that people will
# declare derived virtual destructors without declaring the base
# destructor virtual.
if ((classinfo.virtual_method_linenumber is not None) and
(not classinfo.has_virtual_destructor) and
(not classinfo.is_derived)): # Only warn for base classes
error(filename, classinfo.linenum, 'runtime/virtual', 4,
'The class %s probably needs a virtual destructor due to '
'having virtual method(s), one declared at line %d.'
% (classinfo.name, classinfo.virtual_method_linenumber))
else:
classinfo.brace_depth = brace_depth
def CheckSpacingForFunctionCall(filename, line, linenum, error):
"""Checks for the correctness of various spacing around function calls.
Args:
filename: The name of the current file.
line: The text of the line to check.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Since function calls often occur inside if/for/while/switch
# expressions - which have their own, more liberal conventions - we
# first see if we should be looking inside such an expression for a
# function call, to which we can apply more strict standards.
fncall = line # if there's no control flow construct, look at whole line
for pattern in (r'\bif\s*\((.*)\)\s*{',
r'\bfor\s*\((.*)\)\s*{',
r'\bwhile\s*\((.*)\)\s*[{;]',
r'\bswitch\s*\((.*)\)\s*{'):
match = Search(pattern, line)
if match:
fncall = match.group(1) # look inside the parens for function calls
break
# Except in if/for/while/switch, there should never be space
# immediately inside parens (eg "f( 3, 4 )"). We make an exception
# for nested parens ( (a+b) + c ). Likewise, there should never be
# a space before a ( when it's a function argument. I assume it's a
# function argument when the char before the whitespace is legal in
# a function name (alnum + _) and we're not starting a macro. Also ignore
# pointers and references to arrays and functions coz they're too tricky:
# we use a very simple way to recognize these:
# " (something)(maybe-something)" or
# " (something)(maybe-something," or
# " (something)[something]"
# Note that we assume the contents of [] to be short enough that
# they'll never need to wrap.
if ( # Ignore control structures.
not Search(r'\b(if|for|while|switch|return|delete)\b', fncall) and
# Ignore pointers/references to functions.
not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and
# Ignore pointers/references to arrays.
not Search(r' \([^)]+\)\[[^\]]+\]', fncall)):
if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call
error(filename, linenum, 'whitespace/parens', 4,
'Extra space after ( in function call')
elif Search(r'\(\s+(?!(\s*\\)|\()', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Extra space after (')
if (Search(r'\w\s+\(', fncall) and
not Search(r'#\s*define|typedef', fncall)):
error(filename, linenum, 'whitespace/parens', 4,
'Extra space before ( in function call')
# If the ) is followed only by a newline or a { + newline, assume it's
# part of a control statement (if/while/etc), and don't complain
if Search(r'[^)]\s+\)\s*[^{\s]', fncall):
# If the closing parenthesis is preceded by only whitespaces,
# try to give a more descriptive error message.
if Search(r'^\s+\)', fncall):
error(filename, linenum, 'whitespace/parens', 2,
'Closing ) should be moved to the previous line')
else:
error(filename, linenum, 'whitespace/parens', 2,
'Extra space before )')
def IsBlankLine(line):
"""Returns true if the given line is blank.
We consider a line to be blank if the line is empty or consists of
only white spaces.
Args:
line: A line of a string.
Returns:
True, if the given line is blank.
"""
return not line or line.isspace()
def CheckForFunctionLengths(filename, clean_lines, linenum,
function_state, error):
"""Reports for long function bodies.
For an overview why this is done, see:
http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions
Uses a simplistic algorithm assuming other style guidelines
(especially spacing) are followed.
Only checks unindented functions, so class members are unchecked.
Trivial bodies are unchecked, so constructors with huge initializer lists
may be missed.
Blank/comment lines are not counted so as to avoid encouraging the removal
of vertical space and comments just to get through a lint check.
NOLINT *on the last line of a function* disables this check.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
function_state: Current function name and lines in body so far.
error: The function to call with any errors found.
"""
lines = clean_lines.lines
line = lines[linenum]
raw = clean_lines.raw_lines
raw_line = raw[linenum]
joined_line = ''
starting_func = False
regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ...
match_result = Match(regexp, line)
if match_result:
# If the name is all caps and underscores, figure it's a macro and
# ignore it, unless it's TEST or TEST_F.
function_name = match_result.group(1).split()[-1]
if function_name == 'TEST' or function_name == 'TEST_F' or (
not Match(r'[A-Z_]+$', function_name)):
starting_func = True
if starting_func:
body_found = False
for start_linenum in xrange(linenum, clean_lines.NumLines()):
start_line = lines[start_linenum]
joined_line += ' ' + start_line.lstrip()
if Search(r'(;|})', start_line): # Declarations and trivial functions
body_found = True
break # ... ignore
elif Search(r'{', start_line):
body_found = True
function = Search(r'((\w|:)*)\(', line).group(1)
if Match(r'TEST', function): # Handle TEST... macros
parameter_regexp = Search(r'(\(.*\))', joined_line)
if parameter_regexp: # Ignore bad syntax
function += parameter_regexp.group(1)
else:
function += '()'
function_state.Begin(function)
break
if not body_found:
# No body for the function (or evidence of a non-function) was found.
error(filename, linenum, 'readability/fn_size', 5,
'Lint failed to find start of function body.')
elif Match(r'^\}\s*$', line): # function end
function_state.Check(error, filename, linenum)
function_state.End()
elif not Match(r'^\s*$', line):
function_state.Count() # Count non-blank/non-comment lines.
_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?')
def CheckComment(comment, filename, linenum, error):
"""Checks for common mistakes in TODO comments.
Args:
comment: The text of the comment from the line in question.
filename: The name of the current file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
match = _RE_PATTERN_TODO.match(comment)
if match:
# One whitespace is correct; zero whitespace is handled elsewhere.
leading_whitespace = match.group(1)
if len(leading_whitespace) > 1:
error(filename, linenum, 'whitespace/todo', 2,
'Too many spaces before TODO')
username = match.group(2)
if not username:
error(filename, linenum, 'readability/todo', 2,
'Missing username in TODO; it should look like '
'"// TODO(my_username): Stuff."')
middle_whitespace = match.group(3)
# Comparisons made explicit for correctness -- pylint: disable-msg=C6403
if middle_whitespace != ' ' and middle_whitespace != '':
error(filename, linenum, 'whitespace/todo', 2,
'TODO(my_username) should be followed by a space')
def CheckSpacing(filename, clean_lines, linenum, error):
"""Checks for the correctness of various spacing issues in the code.
Things we check for: spaces around operators, spaces after
if/for/while/switch, no spaces around parens in function calls, two
spaces between code and comment, don't start a block with a blank
line, don't end a function with a blank line, don't add a blank line
after public/protected/private, don't have too many blank lines in a row.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
# Before nixing comments, check if the line is blank for no good
# reason. This includes the first line after a block is opened, and
# blank lines at the end of a function (ie, right before a line like '}'
if IsBlankLine(line):
elided = clean_lines.elided
prev_line = elided[linenum - 1]
prevbrace = prev_line.rfind('{')
# TODO(unknown): Don't complain if line before blank line, and line after,
# both start with alnums and are indented the same amount.
# This ignores whitespace at the start of a namespace block
# because those are not usually indented.
if (prevbrace != -1 and prev_line[prevbrace:].find('}') == -1
and prev_line[:prevbrace].find('namespace') == -1):
# OK, we have a blank line at the start of a code block. Before we
# complain, we check if it is an exception to the rule: The previous
# non-empty line has the parameters of a function header that are indented
# 4 spaces (because they did not fit in a 80 column line when placed on
# the same line as the function name). We also check for the case where
# the previous line is indented 6 spaces, which may happen when the
# initializers of a constructor do not fit into a 80 column line.
exception = False
if Match(r' {6}\w', prev_line): # Initializer list?
# We are looking for the opening column of initializer list, which
# should be indented 4 spaces to cause 6 space indentation afterwards.
search_position = linenum-2
while (search_position >= 0
and Match(r' {6}\w', elided[search_position])):
search_position -= 1
exception = (search_position >= 0
and elided[search_position][:5] == ' :')
else:
# Search for the function arguments or an initializer list. We use a
# simple heuristic here: If the line is indented 4 spaces; and we have a
# closing paren, without the opening paren, followed by an opening brace
# or colon (for initializer lists) we assume that it is the last line of
# a function header. If we have a colon indented 4 spaces, it is an
# initializer list.
exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)',
prev_line)
or Match(r' {4}:', prev_line))
if not exception:
error(filename, linenum, 'whitespace/blank_line', 2,
'Blank line at the start of a code block. Is this needed?')
# This doesn't ignore whitespace at the end of a namespace block
# because that is too hard without pairing open/close braces;
# however, a special exception is made for namespace closing
# brackets which have a comment containing "namespace".
#
# Also, ignore blank lines at the end of a block in a long if-else
# chain, like this:
# if (condition1) {
# // Something followed by a blank line
#
# } else if (condition2) {
# // Something else
# }
if linenum + 1 < clean_lines.NumLines():
next_line = raw[linenum + 1]
if (next_line
and Match(r'\s*}', next_line)
and next_line.find('namespace') == -1
and next_line.find('} else ') == -1):
error(filename, linenum, 'whitespace/blank_line', 3,
'Blank line at the end of a code block. Is this needed?')
matched = Match(r'\s*(public|protected|private):', prev_line)
if matched:
error(filename, linenum, 'whitespace/blank_line', 3,
'Do not leave a blank line after "%s:"' % matched.group(1))
# Next, we complain if there's a comment too near the text
commentpos = line.find('//')
if commentpos != -1:
# Check if the // may be in quotes. If so, ignore it
# Comparisons made explicit for clarity -- pylint: disable-msg=C6403
if (line.count('"', 0, commentpos) -
line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes
# Allow one space for new scopes, two spaces otherwise:
if (not Match(r'^\s*{ //', line) and
((commentpos >= 1 and
line[commentpos-1] not in string.whitespace) or
(commentpos >= 2 and
line[commentpos-2] not in string.whitespace))):
error(filename, linenum, 'whitespace/comments', 2,
'At least two spaces is best between code and comments')
# There should always be a space between the // and the comment
commentend = commentpos + 2
if commentend < len(line) and not line[commentend] == ' ':
# but some lines are exceptions -- e.g. if they're big
# comment delimiters like:
# //----------------------------------------------------------
# or are an empty C++ style Doxygen comment, like:
# ///
# or they begin with multiple slashes followed by a space:
# //////// Header comment
match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or
Search(r'^/$', line[commentend:]) or
Search(r'^/+ ', line[commentend:]))
if not match:
error(filename, linenum, 'whitespace/comments', 4,
'Should have a space between // and comment')
CheckComment(line[commentpos:], filename, linenum, error)
line = clean_lines.elided[linenum] # get rid of comments and strings
# Don't try to do spacing checks for operator methods
line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line)
# We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )".
# Otherwise not. Note we only check for non-spaces on *both* sides;
# sometimes people put non-spaces on one side when aligning ='s among
# many lines (not that this is behavior that I approve of...)
if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line):
error(filename, linenum, 'whitespace/operators', 4,
'Missing spaces around =')
# It's ok not to have spaces around binary operators like + - * /, but if
# there's too little whitespace, we get concerned. It's hard to tell,
# though, so we punt on this one for now. TODO.
# You should always have whitespace around binary operators.
# Alas, we can't test < or > because they're legitimately used sans spaces
# (a->b, vector<int> a). The only time we can tell is a < with no >, and
# only if it's not template params list spilling into the next line.
match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line)
if not match:
# Note that while it seems that the '<[^<]*' term in the following
# regexp could be simplified to '<.*', which would indeed match
# the same class of strings, the [^<] means that searching for the
# regexp takes linear rather than quadratic time.
if not Search(r'<[^<]*,\s*$', line): # template params spill
match = Search(r'[^<>=!\s](<)[^<>=!\s]([^>]|->)*$', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# We allow no-spaces around << and >> when used like this: 10<<20, but
# not otherwise (particularly, not when used as streams)
match = Search(r'[^0-9\s](<<|>>)[^0-9\s]', line)
if match:
error(filename, linenum, 'whitespace/operators', 3,
'Missing spaces around %s' % match.group(1))
# There shouldn't be space around unary operators
match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line)
if match:
error(filename, linenum, 'whitespace/operators', 4,
'Extra space for operator %s' % match.group(1))
# A pet peeve of mine: no spaces after an if, while, switch, or for
match = Search(r' (if\(|for\(|while\(|switch\()', line)
if match:
error(filename, linenum, 'whitespace/parens', 5,
'Missing space before ( in %s' % match.group(1))
# For if/for/while/switch, the left and right parens should be
# consistent about how many spaces are inside the parens, and
# there should either be zero or one spaces inside the parens.
# We don't want: "if ( foo)" or "if ( foo )".
# Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed.
match = Search(r'\b(if|for|while|switch)\s*'
r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$',
line)
if match:
if len(match.group(2)) != len(match.group(4)):
if not (match.group(3) == ';' and
len(match.group(2)) == 1 + len(match.group(4)) or
not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)):
error(filename, linenum, 'whitespace/parens', 5,
'Mismatching spaces inside () in %s' % match.group(1))
if not len(match.group(2)) in [0, 1]:
error(filename, linenum, 'whitespace/parens', 5,
'Should have zero or one spaces inside ( and ) in %s' %
match.group(1))
# You should always have a space after a comma (either as fn arg or operator)
if Search(r',[^\s]', line):
error(filename, linenum, 'whitespace/comma', 3,
'Missing space after ,')
# You should always have a space after a semicolon
# except for few corner cases
# TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more
# space after ;
if Search(r';[^\s};\\)/]', line):
error(filename, linenum, 'whitespace/semicolon', 3,
'Missing space after ;')
# Next we will look for issues with function calls.
CheckSpacingForFunctionCall(filename, line, linenum, error)
# Except after an opening paren, or after another opening brace (in case of
# an initializer list, for instance), you should have spaces before your
# braces. And since you should never have braces at the beginning of a line,
# this is an easy test.
if Search(r'[^ \t({]{', line): # The check for tab was added by Telldus
error(filename, linenum, 'whitespace/braces', 5,
'Missing whitespace before {')
# Make sure '} else {' has spaces.
if Search(r'}else', line):
error(filename, linenum, 'whitespace/braces', 5,
'Missing space before else')
# You shouldn't have spaces before your brackets, except maybe after
# 'delete []' or 'new char * []'.
if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line):
error(filename, linenum, 'whitespace/braces', 5,
'Extra space before [')
# You shouldn't have a space before a semicolon at the end of the line.
# There's a special case for "for" since the style guide allows space before
# the semicolon there.
if Search(r':\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Semicolon defining empty statement. Use { } instead.')
elif Search(r'^\s*;\s*$', line):
error(filename, linenum, 'whitespace/semicolon', 5,
'Line contains only semicolon. If this should be an empty statement, '
'use { } instead.')
elif (Search(r'\s+;\s*$', line) and
not Search(r'\bfor\b', line)):
error(filename, linenum, 'whitespace/semicolon', 5,
'Extra space before last semicolon. If this should be an empty '
'statement, use { } instead.')
def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error):
"""Checks for additional blank line issues related to sections.
Currently the only thing checked here is blank line before protected/private.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
class_info: A _ClassInfo objects.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Skip checks if the class is small, where small means 25 lines or less.
# 25 lines seems like a good cutoff since that's the usual height of
# terminals, and any class that can't fit in one screen can't really
# be considered "small".
#
# Also skip checks if we are on the first line. This accounts for
# classes that look like
# class Foo { public: ... };
#
# If we didn't find the end of the class, last_line would be zero,
# and the check will be skipped by the first condition.
if (class_info.last_line - class_info.linenum <= 24 or
linenum <= class_info.linenum):
return
matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum])
if matched:
# Issue warning if the line before public/protected/private was
# not a blank line, but don't do this if the previous line contains
# "class" or "struct". This can happen two ways:
# - We are at the beginning of the class.
# - We are forward-declaring an inner class that is semantically
# private, but needed to be public for implementation reasons.
prev_line = clean_lines.lines[linenum - 1]
if (not IsBlankLine(prev_line) and
not Search(r'\b(class|struct)\b', prev_line)):
# Try a bit harder to find the beginning of the class. This is to
# account for multi-line base-specifier lists, e.g.:
# class Derived
# : public Base {
end_class_head = class_info.linenum
for i in range(class_info.linenum, linenum):
if Search(r'\{\s*$', clean_lines.lines[i]):
end_class_head = i
break
if end_class_head < linenum - 1:
error(filename, linenum, 'whitespace/blank_line', 3,
'"%s:" should be preceded by a blank line' % matched.group(1))
def GetPreviousNonBlankLine(clean_lines, linenum):
"""Return the most recent non-blank line and its line number.
Args:
clean_lines: A CleansedLines instance containing the file contents.
linenum: The number of the line to check.
Returns:
A tuple with two elements. The first element is the contents of the last
non-blank line before the current line, or the empty string if this is the
first non-blank line. The second is the line number of that line, or -1
if this is the first non-blank line.
"""
prevlinenum = linenum - 1
while prevlinenum >= 0:
prevline = clean_lines.elided[prevlinenum]
if not IsBlankLine(prevline): # if not a blank line...
return (prevline, prevlinenum)
prevlinenum -= 1
return ('', -1)
def CheckBraces(filename, clean_lines, linenum, error):
"""Looks for misplaced braces (e.g. at the end of line).
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum] # get rid of comments and strings
if Match(r'\s*{\s*$', line):
# We allow an open brace to start a line in the case where someone
# is using braces in a block to explicitly create a new scope,
# which is commonly used to control the lifetime of
# stack-allocated variables. We don't detect this perfectly: we
# just don't complain if the last non-whitespace character on the
# previous non-blank line is ';', ':', '{', or '}'.
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if not Search(r'[;:}{]\s*$', prevline):
error(filename, linenum, 'whitespace/braces', 4,
'{ should almost always be at the end of the previous line')
# An else clause should be on the same line as the preceding closing brace.
if Match(r'\s*else\s*', line):
prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0]
if Match(r'\s*}\s*$', prevline):
error(filename, linenum, 'whitespace/newline', 4,
'An else should appear on the same line as the preceding }')
# If braces come on one side of an else, they should be on both.
# However, we have to worry about "else if" that spans multiple lines!
if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line):
if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if
# find the ( after the if
pos = line.find('else if')
pos = line.find('(', pos)
if pos > 0:
(endline, _, endpos) = CloseExpression(clean_lines, linenum, pos)
if endline[endpos:].find('{') == -1: # must be brace after if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
else: # common case: else not followed by a multi-line if
error(filename, linenum, 'readability/braces', 5,
'If an else has a brace on one side, it should have it on both')
# Likewise, an else should never have the else clause on the same line
if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line):
error(filename, linenum, 'whitespace/newline', 4,
'Else clause should never be on same line as else (use 2 lines)')
# In the same way, a do/while should never be on one line
if Match(r'\s*do [^\s{]', line):
error(filename, linenum, 'whitespace/newline', 4,
'do/while clauses should not be on a single line')
# Braces shouldn't be followed by a ; unless they're defining a struct
# or initializing an array.
# We can't tell in general, but we can for some common cases.
prevlinenum = linenum
while True:
(prevline, prevlinenum) = GetPreviousNonBlankLine(clean_lines, prevlinenum)
if Match(r'\s+{.*}\s*;', line) and not prevline.count(';'):
line = prevline + line
else:
break
if (Search(r'{.*}\s*;', line) and
line.count('{') == line.count('}') and
not Search(r'struct|class|enum|\s*=\s*{', line)):
error(filename, linenum, 'readability/braces', 4,
"You don't need a ; after a }")
def ReplaceableCheck(operator, macro, line):
"""Determine whether a basic CHECK can be replaced with a more specific one.
For example suggest using CHECK_EQ instead of CHECK(a == b) and
similarly for CHECK_GE, CHECK_GT, CHECK_LE, CHECK_LT, CHECK_NE.
Args:
operator: The C++ operator used in the CHECK.
macro: The CHECK or EXPECT macro being called.
line: The current source line.
Returns:
True if the CHECK can be replaced with a more specific one.
"""
# This matches decimal and hex integers, strings, and chars (in that order).
match_constant = r'([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')'
# Expression to match two sides of the operator with something that
# looks like a literal, since CHECK(x == iterator) won't compile.
# This means we can't catch all the cases where a more specific
# CHECK is possible, but it's less annoying than dealing with
# extraneous warnings.
match_this = (r'\s*' + macro + r'\((\s*' +
match_constant + r'\s*' + operator + r'[^<>].*|'
r'.*[^<>]' + operator + r'\s*' + match_constant +
r'\s*\))')
# Don't complain about CHECK(x == NULL) or similar because
# CHECK_EQ(x, NULL) won't compile (requires a cast).
# Also, don't complain about more complex boolean expressions
# involving && or || such as CHECK(a == b || c == d).
return Match(match_this, line) and not Search(r'NULL|&&|\|\|', line)
def CheckCheck(filename, clean_lines, linenum, error):
"""Checks the use of CHECK and EXPECT macros.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
# Decide the set of replacement macros that should be suggested
raw_lines = clean_lines.raw_lines
current_macro = ''
for macro in _CHECK_MACROS:
if raw_lines[linenum].find(macro) >= 0:
current_macro = macro
break
if not current_macro:
# Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT'
return
line = clean_lines.elided[linenum] # get rid of comments and strings
# Encourage replacing plain CHECKs with CHECK_EQ/CHECK_NE/etc.
for operator in ['==', '!=', '>=', '>', '<=', '<']:
if ReplaceableCheck(operator, current_macro, line):
error(filename, linenum, 'readability/check', 2,
'Consider using %s instead of %s(a %s b)' % (
_CHECK_REPLACEMENT[current_macro][operator],
current_macro, operator))
break
def GetLineWidth(line):
"""Determines the width of the line in column positions.
Args:
line: A string, which may be a Unicode string.
Returns:
The width of the line in column positions, accounting for Unicode
combining characters and wide characters.
"""
if isinstance(line, unicode):
width = 0
for uc in unicodedata.normalize('NFC', line):
if unicodedata.east_asian_width(uc) in ('W', 'F'):
width += 2
elif not unicodedata.combining(uc):
width += 1
return width
else:
return len(line)
def CheckStyle(filename, clean_lines, linenum, file_extension, class_state,
error):
"""Checks rules from the 'C++ style rules' section of cppguide.html.
Most of these rules are hard to test (naming, comment style), but we
do what we can. In particular we check for 2-space indents, line lengths,
tab usage, spaces inside code, etc.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
error: The function to call with any errors found.
"""
raw_lines = clean_lines.raw_lines
line = raw_lines[linenum]
if line.find('\t') != -1:
error(filename, linenum, 'whitespace/tab', 1,
'Tab found; better to use spaces')
if line and line[0] == ' ':
error(filename, linenum, 'whitespace/use_tab_for_indentation', 1,
'Space found; use tabs for indentation')
# One or three blank spaces at the beginning of the line is weird; it's
# hard to reconcile that with 2-space indents.
# NOTE: here are the conditions rob pike used for his tests. Mine aren't
# as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces
# if(RLENGTH > 20) complain = 0;
# if(match($0, " +(error|private|public|protected):")) complain = 0;
# if(match(prev, "&& *$")) complain = 0;
# if(match(prev, "\\|\\| *$")) complain = 0;
# if(match(prev, "[\",=><] *$")) complain = 0;
# if(match($0, " <<")) complain = 0;
# if(match(prev, " +for \\(")) complain = 0;
# if(prevodd && match(prevprev, " +for \\(")) complain = 0;
initial_spaces = 0
cleansed_line = clean_lines.elided[linenum]
while initial_spaces < len(line) and line[initial_spaces] == ' ':
initial_spaces += 1
if line and line[-1].isspace():
error(filename, linenum, 'whitespace/end_of_line', 4,
'Line ends in whitespace. Consider deleting these extra spaces.')
# There are certain situations we allow one space, notably for labels
elif ((initial_spaces == 1 or initial_spaces == 3) and
not Match(r'\s*\w+\s*:\s*$', cleansed_line)):
error(filename, linenum, 'whitespace/indent', 3,
'Weird number of spaces at line-start. '
'Are you using a 2-space indent?')
# Labels should always be indented at least one space.
elif not initial_spaces and line[:2] != '//' and Search(r'[^:]:\s*$',
line):
error(filename, linenum, 'whitespace/labels', 4,
'Labels should always be indented at least one space. '
'If this is a member-initializer list in a constructor or '
'the base class list in a class definition, the colon should '
'be on the following line.')
# Check if the line is a header guard.
is_header_guard = False
if file_extension == 'h':
cppvar = GetHeaderGuardCPPVariable(filename)
if (line.startswith('#ifndef %s' % cppvar) or
line.startswith('#define %s' % cppvar) or
line.startswith('#endif // %s' % cppvar)):
is_header_guard = True
# #include lines and header guards can be long, since there's no clean way to
# split them.
#
# URLs can be long too. It's possible to split these, but it makes them
# harder to cut&paste.
#
# The "$Id:...$" comment may also get very long without it being the
# developers fault.
if (not line.startswith('#include') and not is_header_guard and
not Match(r'^\s*//.*http(s?)://\S*$', line) and
not Match(r'^// \$Id:.*#[0-9]+ \$$', line)):
line_width = GetLineWidth(line)
if line_width > 100:
error(filename, linenum, 'whitespace/line_length', 4,
'Lines should very rarely be longer than 100 characters')
elif line_width > 80:
error(filename, linenum, 'whitespace/line_length', 2,
'Lines should be <= 80 characters long')
if (cleansed_line.count(';') > 1 and
# for loops are allowed two ;'s (and may run over two lines).
cleansed_line.find('for') == -1 and
(GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or
GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and
# It's ok to have many commands in a switch case that fits in 1 line
not ((cleansed_line.find('case ') != -1 or
cleansed_line.find('default:') != -1) and
cleansed_line.find('break;') != -1)):
error(filename, linenum, 'whitespace/newline', 4,
'More than one command on the same line')
# Some more style checks
CheckBraces(filename, clean_lines, linenum, error)
CheckSpacing(filename, clean_lines, linenum, error)
CheckCheck(filename, clean_lines, linenum, error)
if class_state and class_state.classinfo_stack:
CheckSectionSpacing(filename, clean_lines,
class_state.classinfo_stack[-1], linenum, error)
_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"')
_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$')
# Matches the first component of a filename delimited by -s and _s. That is:
# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo'
# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo'
_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+')
def _DropCommonSuffixes(filename):
"""Drops common suffixes like _test.cc or -inl.h from filename.
For example:
>>> _DropCommonSuffixes('foo/foo-inl.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/bar/foo.cc')
'foo/bar/foo'
>>> _DropCommonSuffixes('foo/foo_internal.h')
'foo/foo'
>>> _DropCommonSuffixes('foo/foo_unusualinternal.h')
'foo/foo_unusualinternal'
Args:
filename: The input filename.
Returns:
The filename with the common suffix removed.
"""
for suffix in ('test.cc', 'regtest.cc', 'unittest.cc',
'inl.h', 'impl.h', 'internal.h'):
if (filename.endswith(suffix) and len(filename) > len(suffix) and
filename[-len(suffix) - 1] in ('-', '_')):
return filename[:-len(suffix) - 1]
return os.path.splitext(filename)[0]
def _IsTestFilename(filename):
"""Determines if the given filename has a suffix that identifies it as a test.
Args:
filename: The input filename.
Returns:
True if 'filename' looks like a test, False otherwise.
"""
if (filename.endswith('_test.cc') or
filename.endswith('_unittest.cc') or
filename.endswith('_regtest.cc')):
return True
else:
return False
def _ClassifyInclude(fileinfo, include, is_system):
"""Figures out what kind of header 'include' is.
Args:
fileinfo: The current file cpplint is running over. A FileInfo instance.
include: The path to a #included file.
is_system: True if the #include used <> rather than "".
Returns:
One of the _XXX_HEADER constants.
For example:
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True)
_C_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True)
_CPP_SYS_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False)
_LIKELY_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'),
... 'bar/foo_other_ext.h', False)
_POSSIBLE_MY_HEADER
>>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False)
_OTHER_HEADER
"""
# This is a list of all standard c++ header files, except
# those already checked for above.
is_stl_h = include in _STL_HEADERS
is_cpp_h = is_stl_h or include in _CPP_HEADERS
if is_system:
if is_cpp_h:
return _CPP_SYS_HEADER
else:
return _C_SYS_HEADER
# If the target file and the include we're checking share a
# basename when we drop common extensions, and the include
# lives in . , then it's likely to be owned by the target file.
target_dir, target_base = (
os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName())))
include_dir, include_base = os.path.split(_DropCommonSuffixes(include))
if target_base == include_base and (
include_dir == target_dir or
include_dir == os.path.normpath(target_dir + '/../public')):
return _LIKELY_MY_HEADER
# If the target and include share some initial basename
# component, it's possible the target is implementing the
# include, so it's allowed to be first, but we'll never
# complain if it's not there.
target_first_component = _RE_FIRST_COMPONENT.match(target_base)
include_first_component = _RE_FIRST_COMPONENT.match(include_base)
if (target_first_component and include_first_component and
target_first_component.group(0) ==
include_first_component.group(0)):
return _POSSIBLE_MY_HEADER
return _OTHER_HEADER
def CheckIncludeLine(filename, clean_lines, linenum, include_state, error):
"""Check rules that are applicable to #include lines.
Strings on #include lines are NOT removed from elided line, to make
certain tasks easier. However, to prevent false positives, checks
applicable to #include lines in CheckLanguage must be put here.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
fileinfo = FileInfo(filename)
line = clean_lines.lines[linenum]
# "include" should use the new style "foo/bar.h" instead of just "bar.h"
if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line):
error(filename, linenum, 'build/include', 4,
'Include the directory when naming .h files')
# we shouldn't include a file more than once. actually, there are a
# handful of instances where doing so is okay, but in general it's
# not.
match = _RE_PATTERN_INCLUDE.search(line)
if match:
include = match.group(2)
is_system = (match.group(1) == '<')
if include in include_state:
error(filename, linenum, 'build/include', 4,
'"%s" already included at %s:%s' %
(include, filename, include_state[include]))
else:
include_state[include] = linenum
# We want to ensure that headers appear in the right order:
# 1) for foo.cc, foo.h (preferred location)
# 2) c system files
# 3) cpp system files
# 4) for foo.cc, foo.h (deprecated location)
# 5) other google headers
#
# We classify each include statement as one of those 5 types
# using a number of techniques. The include_state object keeps
# track of the highest type seen, and complains if we see a
# lower type after that.
error_message = include_state.CheckNextIncludeOrder(
_ClassifyInclude(fileinfo, include, is_system))
if error_message:
error(filename, linenum, 'build/include_order', 4,
'%s. Should be: %s.h, c system, c++ system, other.' %
(error_message, fileinfo.BaseName()))
if not include_state.IsInAlphabeticalOrder(include):
error(filename, linenum, 'build/include_alpha', 4,
'Include "%s" not in alphabetical order' % include)
# Look for any of the stream classes that are part of standard C++.
match = _RE_PATTERN_INCLUDE.match(line)
if match:
include = match.group(2)
if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include):
# Many unit tests use cout, so we exempt them.
if not _IsTestFilename(filename):
error(filename, linenum, 'readability/streams', 3,
'Streams are highly discouraged.')
def _GetTextInside(text, start_pattern):
"""Retrieves all the text between matching open and close parentheses.
Given a string of lines and a regular expression string, retrieve all the text
following the expression and between opening punctuation symbols like
(, [, or {, and the matching close-punctuation symbol. This properly nested
occurrences of the punctuations, so for the text like
printf(a(), b(c()));
a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'.
start_pattern must match string having an open punctuation symbol at the end.
Args:
text: The lines to extract text. Its comments and strings must be elided.
It can be single line and can span multiple lines.
start_pattern: The regexp string indicating where to start extracting
the text.
Returns:
The extracted text.
None if either the opening string or ending punctuation could not be found.
"""
# TODO(sugawarayu): Audit cpplint.py to see what places could be profitably
# rewritten to use _GetTextInside (and use inferior regexp matching today).
# Give opening punctuations to get the matching close-punctuations.
matching_punctuation = {'(': ')', '{': '}', '[': ']'}
closing_punctuation = set(matching_punctuation.itervalues())
# Find the position to start extracting text.
match = re.search(start_pattern, text, re.M)
if not match: # start_pattern not found in text.
return None
start_position = match.end(0)
assert start_position > 0, (
'start_pattern must ends with an opening punctuation.')
assert text[start_position - 1] in matching_punctuation, (
'start_pattern must ends with an opening punctuation.')
# Stack of closing punctuations we expect to have in text after position.
punctuation_stack = [matching_punctuation[text[start_position - 1]]]
position = start_position
while punctuation_stack and position < len(text):
if text[position] == punctuation_stack[-1]:
punctuation_stack.pop()
elif text[position] in closing_punctuation:
# A closing punctuation without matching opening punctuations.
return None
elif text[position] in matching_punctuation:
punctuation_stack.append(matching_punctuation[text[position]])
position += 1
if punctuation_stack:
# Opening punctuations left without matching close-punctuations.
return None
# punctuations match.
return text[start_position:position - 1]
def CheckLanguage(filename, clean_lines, linenum, file_extension, include_state,
error):
"""Checks rules from the 'C++ language rules' section of cppguide.html.
Some of these rules are hard to test (function overloading, using
uint32 inappropriately), but we do the best we can.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
file_extension: The extension (without the dot) of the filename.
include_state: An _IncludeState instance in which the headers are inserted.
error: The function to call with any errors found.
"""
# If the line is empty or consists of entirely a comment, no need to
# check it.
line = clean_lines.elided[linenum]
if not line:
return
match = _RE_PATTERN_INCLUDE.search(line)
if match:
CheckIncludeLine(filename, clean_lines, linenum, include_state, error)
return
# Create an extended_line, which is the concatenation of the current and
# next lines, for more effective checking of code that may span more than one
# line.
if linenum + 1 < clean_lines.NumLines():
extended_line = line + clean_lines.elided[linenum + 1]
else:
extended_line = line
# Make Windows paths like Unix.
fullname = os.path.abspath(filename).replace('\\', '/')
# TODO(unknown): figure out if they're using default arguments in fn proto.
# Check for non-const references in functions. This is tricky because &
# is also used to take the address of something. We allow <> for templates,
# (ignoring whatever is between the braces) and : for classes.
# These are complicated re's. They try to capture the following:
# paren (for fn-prototype start), typename, &, varname. For the const
# version, we're willing for const to be before typename or after
# Don't check the implementation on same line.
fnline = line.split('{', 1)[0]
if (len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) >
len(re.findall(r'\([^()]*\bconst\s+(?:typename\s+)?(?:struct\s+)?'
r'(?:[\w:]|<[^()]*>)+(\s?&|&\s?)\w+', fnline)) +
len(re.findall(r'\([^()]*\b(?:[\w:]|<[^()]*>)+\s+const(\s?&|&\s?)[\w]+',
fnline))):
# We allow non-const references in a few standard places, like functions
# called "swap()" or iostream operators like "<<" or ">>".
if not Search(
r'(swap|Swap|operator[<>][<>])\s*\(\s*(?:[\w:]|<.*>)+\s*&',
fnline):
error(filename, linenum, 'runtime/references', 2,
'Is this a non-const reference? '
'If so, make const or use a pointer.')
# Check to see if they're using an conversion function cast.
# I just try to capture the most common basic types, though there are more.
# Parameterless conversion functions, such as bool(), are allowed as they are
# probably a member operator declaration or default constructor.
match = Search(
r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there
r'(int|float|double|bool|char|int32|uint32|int64|uint64)\([^)]', line)
if match:
# gMock methods are defined using some variant of MOCK_METHODx(name, type)
# where type may be float(), int(string), etc. Without context they are
# virtually indistinguishable from int(x) casts. Likewise, gMock's
# MockCallback takes a template parameter of the form return_type(arg_type),
# which looks much like the cast we're trying to detect.
if (match.group(1) is None and # If new operator, then this isn't a cast
not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or
Match(r'^\s*MockCallback<.*>', line))):
error(filename, linenum, 'readability/casting', 4,
'Using deprecated casting style. '
'Use static_cast<%s>(...) instead' %
match.group(2))
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'static_cast',
r'\((int|float|double|bool|char|u?int(16|32|64))\)', error)
# This doesn't catch all cases. Consider (const char * const)"hello".
#
# (char *) "foo" should always be a const_cast (reinterpret_cast won't
# compile).
if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'const_cast', r'\((char\s?\*+\s?)\)\s*"', error):
pass
else:
# Check pointer casts for other than string constants
CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum],
'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error)
# In addition, we look for people taking the address of a cast. This
# is dangerous -- casts can assign to temporaries, so the pointer doesn't
# point where you think.
if Search(
r'(&\([^)]+\)[\w(])|(&(static|dynamic|reinterpret)_cast\b)', line):
error(filename, linenum, 'runtime/casting', 4,
('Are you taking an address of a cast? '
'This is dangerous: could be a temp var. '
'Take the address before doing the cast, rather than after'))
# Check for people declaring static/global STL strings at the top level.
# This is dangerous because the C++ language does not guarantee that
# globals with constructors are initialized before the first access.
match = Match(
r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)',
line)
# Make sure it's not a function.
# Function template specialization looks like: "string foo<Type>(...".
# Class template definitions look like: "string Foo<Type>::Method(...".
if match and not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)',
match.group(3)):
error(filename, linenum, 'runtime/string', 4,
'For a static/global string constant, use a C style string instead: '
'"%schar %s[]".' %
(match.group(1), match.group(2)))
# Check that we're not using RTTI outside of testing code.
if Search(r'\bdynamic_cast<', line) and not _IsTestFilename(filename):
error(filename, linenum, 'runtime/rtti', 5,
'Do not use dynamic_cast<>. If you need to cast within a class '
"hierarchy, use static_cast<> to upcast. Google doesn't support "
'RTTI.')
if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line):
error(filename, linenum, 'runtime/init', 4,
'You seem to be initializing a member variable with itself.')
if file_extension == 'h':
# TODO(unknown): check that 1-arg constructors are explicit.
# How to tell it's a constructor?
# (handled in CheckForNonStandardConstructs for now)
# TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS
# (level 1 error)
pass
# Check if people are using the verboten C basic types. The only exception
# we regularly allow is "unsigned short port" for port.
if Search(r'\bshort port\b', line):
if not Search(r'\bunsigned short port\b', line):
error(filename, linenum, 'runtime/int', 4,
'Use "unsigned short" for ports, not "short"')
else:
match = Search(r'\b(short|long(?! +double)|long long)\b', line)
if match:
error(filename, linenum, 'runtime/int', 4,
'Use int16/int64/etc, rather than the C type %s' % match.group(1))
# When snprintf is used, the second argument shouldn't be a literal.
match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line)
if match and match.group(2) != '0':
# If 2nd arg is zero, snprintf is used to calculate size.
error(filename, linenum, 'runtime/printf', 3,
'If you can, use sizeof(%s) instead of %s as the 2nd arg '
'to snprintf.' % (match.group(1), match.group(2)))
# Check if some verboten C functions are being used.
if Search(r'\bsprintf\b', line):
error(filename, linenum, 'runtime/printf', 5,
'Never use sprintf. Use snprintf instead.')
match = Search(r'\b(strcpy|strcat)\b', line)
if match:
error(filename, linenum, 'runtime/printf', 4,
'Almost always, snprintf is better than %s' % match.group(1))
if Search(r'\bsscanf\b', line):
error(filename, linenum, 'runtime/printf', 1,
'sscanf can be ok, but is slow and can overflow buffers.')
# Check if some verboten operator overloading is going on
# TODO(unknown): catch out-of-line unary operator&:
# class X {};
# int operator&(const X& x) { return 42; } // unary operator&
# The trick is it's hard to tell apart from binary operator&:
# class Y { int operator&(const Y& x) { return 23; } }; // binary operator&
if Search(r'\boperator\s*&\s*\(\s*\)', line):
error(filename, linenum, 'runtime/operator', 4,
'Unary operator& is dangerous. Do not use it.')
# Check for suspicious usage of "if" like
# } if (a == b) {
if Search(r'\}\s*if\s*\(', line):
error(filename, linenum, 'readability/braces', 4,
'Did you mean "else if"? If not, start a new line for "if".')
# Check for potential format string bugs like printf(foo).
# We constrain the pattern not to pick things like DocidForPrintf(foo).
# Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str())
# TODO(sugawarayu): Catch the following case. Need to change the calling
# convention of the whole function to process multiple line to handle it.
# printf(
# boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line);
printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(')
if printf_args:
match = Match(r'([\w.\->()]+)$', printf_args)
if match:
function_name = re.search(r'\b((?:string)?printf)\s*\(',
line, re.I).group(1)
error(filename, linenum, 'runtime/printf', 4,
'Potential format string bug. Do %s("%%s", %s) instead.'
% (function_name, match.group(1)))
# Check for potential memset bugs like memset(buf, sizeof(buf), 0).
match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line)
if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)):
error(filename, linenum, 'runtime/memset', 4,
'Did you mean "memset(%s, 0, %s)"?'
% (match.group(1), match.group(2)))
if Search(r'\busing namespace\b', line):
error(filename, linenum, 'build/namespaces', 5,
'Do not use namespace using-directives. '
'Use using-declarations instead.')
# Detect variable-length arrays.
match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line)
if (match and match.group(2) != 'return' and match.group(2) != 'delete' and
match.group(3).find(']') == -1):
# Split the size using space and arithmetic operators as delimiters.
# If any of the resulting tokens are not compile time constants then
# report the error.
tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3))
is_const = True
skip_next = False
for tok in tokens:
if skip_next:
skip_next = False
continue
if Search(r'sizeof\(.+\)', tok): continue
if Search(r'arraysize\(\w+\)', tok): continue
tok = tok.lstrip('(')
tok = tok.rstrip(')')
if not tok: continue
if Match(r'\d+', tok): continue
if Match(r'0[xX][0-9a-fA-F]+', tok): continue
if Match(r'k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue
if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue
# A catch all for tricky sizeof cases, including 'sizeof expression',
# 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)'
# requires skipping the next token because we split on ' ' and '*'.
if tok.startswith('sizeof'):
skip_next = True
continue
is_const = False
break
if not is_const:
error(filename, linenum, 'runtime/arrays', 1,
'Do not use variable-length arrays. Use an appropriately named '
"('k' followed by CamelCase) compile-time constant for the size.")
# If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or
# DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing
# in the class declaration.
match = Match(
(r'\s*'
r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))'
r'\(.*\);$'),
line)
if match and linenum + 1 < clean_lines.NumLines():
next_line = clean_lines.elided[linenum + 1]
# We allow some, but not all, declarations of variables to be present
# in the statement that defines the class. The [\w\*,\s]* fragment of
# the regular expression below allows users to declare instances of
# the class or pointers to instances, but not less common types such
# as function pointers or arrays. It's a tradeoff between allowing
# reasonable code and avoiding trying to parse more C++ using regexps.
if not Search(r'^\s*}[\w\*,\s]*;', next_line):
error(filename, linenum, 'readability/constructors', 3,
match.group(1) + ' should be the last thing in the class')
# Check for use of unnamed namespaces in header files. Registration
# macros are typically OK, so we allow use of "namespace {" on lines
# that end with backslashes.
if (file_extension == 'h'
and Search(r'\bnamespace\s*{', line)
and line[-1] != '\\'):
error(filename, linenum, 'build/namespaces', 4,
'Do not use unnamed namespaces in header files. See '
'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces'
' for more information.')
def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern,
error):
"""Checks for a C-style cast by looking for the pattern.
This also handles sizeof(type) warnings, due to similarity of content.
Args:
filename: The name of the current file.
linenum: The number of the line to check.
line: The line of code to check.
raw_line: The raw line of code to check, with comments.
cast_type: The string for the C++ cast to recommend. This is either
reinterpret_cast, static_cast, or const_cast, depending.
pattern: The regular expression used to find C-style casts.
error: The function to call with any errors found.
Returns:
True if an error was emitted.
False otherwise.
"""
match = Search(pattern, line)
if not match:
return False
# e.g., sizeof(int)
sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1])
if sizeof_match:
error(filename, linenum, 'runtime/sizeof', 1,
'Using sizeof(type). Use sizeof(varname) instead if possible')
return True
remainder = line[match.end(0):]
# The close paren is for function pointers as arguments to a function.
# eg, void foo(void (*bar)(int));
# The semicolon check is a more basic function check; also possibly a
# function pointer typedef.
# eg, void foo(int); or void foo(int) const;
# The equals check is for function pointer assignment.
# eg, void *(*foo)(int) = ...
# The > is for MockCallback<...> ...
#
# Right now, this will only catch cases where there's a single argument, and
# it's unnamed. It should probably be expanded to check for multiple
# arguments with some unnamed.
function_match = Match(r'\s*(\)|=|(const)?\s*(;|\{|throw\(\)|>))', remainder)
if function_match:
if (not function_match.group(3) or
function_match.group(3) == ';' or
('MockCallback<' not in raw_line and
'/*' not in raw_line)):
error(filename, linenum, 'readability/function', 3,
'All parameters should be named in a function')
return True
# At this point, all that should be left is actual casts.
error(filename, linenum, 'readability/casting', 4,
'Using C-style cast. Use %s<%s>(...) instead' %
(cast_type, match.group(1)))
return True
_HEADERS_CONTAINING_TEMPLATES = (
('<deque>', ('deque',)),
('<functional>', ('unary_function', 'binary_function',
'plus', 'minus', 'multiplies', 'divides', 'modulus',
'negate',
'equal_to', 'not_equal_to', 'greater', 'less',
'greater_equal', 'less_equal',
'logical_and', 'logical_or', 'logical_not',
'unary_negate', 'not1', 'binary_negate', 'not2',
'bind1st', 'bind2nd',
'pointer_to_unary_function',
'pointer_to_binary_function',
'ptr_fun',
'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t',
'mem_fun_ref_t',
'const_mem_fun_t', 'const_mem_fun1_t',
'const_mem_fun_ref_t', 'const_mem_fun1_ref_t',
'mem_fun_ref',
)),
('<limits>', ('numeric_limits',)),
('<list>', ('list',)),
('<map>', ('map', 'multimap',)),
('<memory>', ('allocator',)),
('<queue>', ('queue', 'priority_queue',)),
('<set>', ('set', 'multiset',)),
('<stack>', ('stack',)),
('<string>', ('char_traits', 'basic_string',)),
('<utility>', ('pair',)),
('<vector>', ('vector',)),
# gcc extensions.
# Note: std::hash is their hash, ::hash is our hash
('<hash_map>', ('hash_map', 'hash_multimap',)),
('<hash_set>', ('hash_set', 'hash_multiset',)),
('<slist>', ('slist',)),
)
_RE_PATTERN_STRING = re.compile(r'\bstring\b')
_re_pattern_algorithm_header = []
for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap',
'transform'):
# Match max<type>(..., ...), max(..., ...), but not foo->max, foo.max or
# type::max().
_re_pattern_algorithm_header.append(
(re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'),
_template,
'<algorithm>'))
_re_pattern_templates = []
for _header, _templates in _HEADERS_CONTAINING_TEMPLATES:
for _template in _templates:
_re_pattern_templates.append(
(re.compile(r'(\<|\b)' + _template + r'\s*\<'),
_template + '<>',
_header))
def FilesBelongToSameModule(filename_cc, filename_h):
"""Check if these two filenames belong to the same module.
The concept of a 'module' here is a as follows:
foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the
same 'module' if they are in the same directory.
some/path/public/xyzzy and some/path/internal/xyzzy are also considered
to belong to the same module here.
If the filename_cc contains a longer path than the filename_h, for example,
'/absolute/path/to/base/sysinfo.cc', and this file would include
'base/sysinfo.h', this function also produces the prefix needed to open the
header. This is used by the caller of this function to more robustly open the
header file. We don't have access to the real include paths in this context,
so we need this guesswork here.
Known bugs: tools/base/bar.cc and base/bar.h belong to the same module
according to this implementation. Because of this, this function gives
some false positives. This should be sufficiently rare in practice.
Args:
filename_cc: is the path for the .cc file
filename_h: is the path for the header path
Returns:
Tuple with a bool and a string:
bool: True if filename_cc and filename_h belong to the same module.
string: the additional prefix needed to open the header file.
"""
if not filename_cc.endswith('.cc'):
return (False, '')
filename_cc = filename_cc[:-len('.cc')]
if filename_cc.endswith('_unittest'):
filename_cc = filename_cc[:-len('_unittest')]
elif filename_cc.endswith('_test'):
filename_cc = filename_cc[:-len('_test')]
filename_cc = filename_cc.replace('/public/', '/')
filename_cc = filename_cc.replace('/internal/', '/')
if not filename_h.endswith('.h'):
return (False, '')
filename_h = filename_h[:-len('.h')]
if filename_h.endswith('-inl'):
filename_h = filename_h[:-len('-inl')]
filename_h = filename_h.replace('/public/', '/')
filename_h = filename_h.replace('/internal/', '/')
files_belong_to_same_module = filename_cc.endswith(filename_h)
common_path = ''
if files_belong_to_same_module:
common_path = filename_cc[:-len(filename_h)]
return files_belong_to_same_module, common_path
def UpdateIncludeState(filename, include_state, io=codecs):
"""Fill up the include_state with new includes found from the file.
Args:
filename: the name of the header to read.
include_state: an _IncludeState instance in which the headers are inserted.
io: The io factory to use to read the file. Provided for testability.
Returns:
True if a header was succesfully added. False otherwise.
"""
headerfile = None
try:
headerfile = io.open(filename, 'r', 'utf8', 'replace')
except IOError:
return False
linenum = 0
for line in headerfile:
linenum += 1
clean_line = CleanseComments(line)
match = _RE_PATTERN_INCLUDE.search(clean_line)
if match:
include = match.group(2)
# The value formatting is cute, but not really used right now.
# What matters here is that the key is in include_state.
include_state.setdefault(include, '%s:%d' % (filename, linenum))
return True
def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error,
io=codecs):
"""Reports for missing stl includes.
This function will output warnings to make sure you are including the headers
necessary for the stl containers and functions that you use. We only give one
reason to include a header. For example, if you use both equal_to<> and
less<> in a .h file, only one (the latter in the file) of these will be
reported as a reason to include the <functional>.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
include_state: An _IncludeState instance.
error: The function to call with any errors found.
io: The IO factory to use to read the header file. Provided for unittest
injection.
"""
required = {} # A map of header name to linenumber and the template entity.
# Example of required: { '<functional>': (1219, 'less<>') }
for linenum in xrange(clean_lines.NumLines()):
line = clean_lines.elided[linenum]
if not line or line[0] == '#':
continue
# String is special -- it is a non-templatized type in STL.
matched = _RE_PATTERN_STRING.search(line)
if matched:
# Don't warn about strings in non-STL namespaces:
# (We check only the first match per line; good enough.)
prefix = line[:matched.start()]
if prefix.endswith('std::') or not prefix.endswith('::'):
required['<string>'] = (linenum, 'string')
for pattern, template, header in _re_pattern_algorithm_header:
if pattern.search(line):
required[header] = (linenum, template)
# The following function is just a speed up, no semantics are changed.
if not '<' in line: # Reduces the cpu time usage by skipping lines.
continue
for pattern, template, header in _re_pattern_templates:
if pattern.search(line):
required[header] = (linenum, template)
# The policy is that if you #include something in foo.h you don't need to
# include it again in foo.cc. Here, we will look at possible includes.
# Let's copy the include_state so it is only messed up within this function.
include_state = include_state.copy()
# Did we find the header for this file (if any) and succesfully load it?
header_found = False
# Use the absolute path so that matching works properly.
abs_filename = FileInfo(filename).FullName()
# For Emacs's flymake.
# If cpplint is invoked from Emacs's flymake, a temporary file is generated
# by flymake and that file name might end with '_flymake.cc'. In that case,
# restore original file name here so that the corresponding header file can be
# found.
# e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h'
# instead of 'foo_flymake.h'
abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename)
# include_state is modified during iteration, so we iterate over a copy of
# the keys.
header_keys = include_state.keys()
for header in header_keys:
(same_module, common_path) = FilesBelongToSameModule(abs_filename, header)
fullpath = common_path + header
if same_module and UpdateIncludeState(fullpath, include_state, io):
header_found = True
# If we can't find the header file for a .cc, assume it's because we don't
# know where to look. In that case we'll give up as we're not sure they
# didn't include it in the .h file.
# TODO(unknown): Do a better job of finding .h files so we are confident that
# not having the .h file means there isn't one.
if filename.endswith('.cc') and not header_found:
return
# All the lines have been processed, report the errors found.
for required_header_unstripped in required:
template = required[required_header_unstripped][1]
if required_header_unstripped.strip('<>"') not in include_state:
error(filename, required[required_header_unstripped][0],
'build/include_what_you_use', 4,
'Add #include ' + required_header_unstripped + ' for ' + template)
_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<')
def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error):
"""Check that make_pair's template arguments are deduced.
G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are
specified explicitly, and such use isn't intended in any case.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
raw = clean_lines.raw_lines
line = raw[linenum]
match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line)
if match:
error(filename, linenum, 'build/explicit_make_pair',
4, # 4 = high confidence
'Omit template arguments from make_pair OR use pair directly OR'
' if appropriate, construct a pair directly')
def ProcessLine(filename, file_extension,
clean_lines, line, include_state, function_state,
class_state, error, extra_check_functions=[]):
"""Processes a single line in the file.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
clean_lines: An array of strings, each representing a line of the file,
with comments stripped.
line: Number of line being processed.
include_state: An _IncludeState instance in which the headers are inserted.
function_state: A _FunctionState instance which counts function lines, etc.
class_state: A _ClassState instance which maintains information about
the current stack of nested class declarations being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
raw_lines = clean_lines.raw_lines
ParseNolintSuppressions(filename, raw_lines[line], line, error)
CheckForFunctionLengths(filename, clean_lines, line, function_state, error)
CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error)
CheckStyle(filename, clean_lines, line, file_extension, class_state, error)
CheckLanguage(filename, clean_lines, line, file_extension, include_state,
error)
CheckForNonStandardConstructs(filename, clean_lines, line,
class_state, error)
CheckPosixThreading(filename, clean_lines, line, error)
CheckInvalidIncrement(filename, clean_lines, line, error)
CheckMakePairUsesDeduction(filename, clean_lines, line, error)
for check_fn in extra_check_functions:
check_fn(filename, clean_lines, line, error)
def ProcessFileData(filename, file_extension, lines, error,
extra_check_functions=[]):
"""Performs lint checks and reports any errors to the given error function.
Args:
filename: Filename of the file that is being processed.
file_extension: The extension (dot not included) of the file.
lines: An array of strings, each representing a line of the file, with the
last element being empty if the file is terminated with a newline.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
lines = (['// marker so line numbers and indices both start at 1'] + lines +
['// marker so line numbers end in a known way'])
include_state = _IncludeState()
function_state = _FunctionState()
class_state = _ClassState()
ResetNolintSuppressions()
CheckForCopyright(filename, lines, error)
if file_extension == 'h':
CheckForHeaderGuard(filename, lines, error)
RemoveMultiLineComments(filename, lines, error)
clean_lines = CleansedLines(lines)
for line in xrange(clean_lines.NumLines()):
ProcessLine(filename, file_extension, clean_lines, line,
include_state, function_state, class_state, error,
extra_check_functions)
class_state.CheckFinished(filename, error)
CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error)
# We check here rather than inside ProcessLine so that we see raw
# lines rather than "cleaned" lines.
CheckForUnicodeReplacementCharacters(filename, lines, error)
CheckForNewlineAtEOF(filename, lines, error)
def ProcessFile(filename, vlevel, extra_check_functions=[]):
"""Does google-lint on a single file.
Args:
filename: The name of the file to parse.
vlevel: The level of errors to report. Every error of confidence
>= verbose_level will be reported. 0 is a good default.
extra_check_functions: An array of additional check functions that will be
run on each source line. Each function takes 4
arguments: filename, clean_lines, line, error
"""
_SetVerboseLevel(vlevel)
try:
# Support the UNIX convention of using "-" for stdin. Note that
# we are not opening the file with universal newline support
# (which codecs doesn't support anyway), so the resulting lines do
# contain trailing '\r' characters if we are reading a file that
# has CRLF endings.
# If after the split a trailing '\r' is present, it is removed
# below. If it is not expected to be present (i.e. os.linesep !=
# '\r\n' as in Windows), a warning is issued below if this file
# is processed.
if filename == '-':
lines = codecs.StreamReaderWriter(sys.stdin,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace').read().split('\n')
else:
lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n')
carriage_return_found = False
# Remove trailing '\r'.
for linenum in range(len(lines)):
if lines[linenum].endswith('\r'):
lines[linenum] = lines[linenum].rstrip('\r')
carriage_return_found = True
except IOError:
sys.stderr.write(
"Skipping input '%s': Can't open for reading\n" % filename)
return
# Note, if no dot is found, this will give the entire filename as the ext.
file_extension = filename[filename.rfind('.') + 1:]
# When reading from stdin, the extension is unknown, so no cpplint tests
# should rely on the extension.
if (filename != '-' and file_extension != 'cc' and file_extension != 'h'
and file_extension != 'cpp'):
sys.stderr.write('Ignoring %s; not a .cc or .h file\n' % filename)
else:
ProcessFileData(filename, file_extension, lines, Error,
extra_check_functions)
if carriage_return_found and os.linesep != '\r\n':
# Use 0 for linenum since outputting only one error for potentially
# several lines.
Error(filename, 0, 'whitespace/newline', 1,
'One or more unexpected \\r (^M) found;'
'better to use only a \\n')
sys.stderr.write('Done processing %s\n' % filename)
def PrintUsage(message):
"""Prints a brief usage string and exits, optionally with an error message.
Args:
message: The optional error message.
"""
sys.stderr.write(_USAGE)
if message:
sys.exit('\nFATAL ERROR: ' + message)
else:
sys.exit(1)
def PrintCategories():
"""Prints a list of all the error-categories used by error messages.
These are the categories used to filter messages via --filter.
"""
sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES))
sys.exit(0)
def ParseArguments(args):
"""Parses the command line arguments.
This may set the output format and verbosity level as side-effects.
Args:
args: The command line arguments:
Returns:
The list of filenames to lint.
"""
try:
(opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=',
'counting=',
'filter='])
except getopt.GetoptError:
PrintUsage('Invalid arguments.')
verbosity = _VerboseLevel()
output_format = _OutputFormat()
filters = ''
counting_style = ''
for (opt, val) in opts:
if opt == '--help':
PrintUsage(None)
elif opt == '--output':
if not val in ('emacs', 'vs7'):
PrintUsage('The only allowed output formats are emacs and vs7.')
output_format = val
elif opt == '--verbose':
verbosity = int(val)
elif opt == '--filter':
filters = val
if not filters:
PrintCategories()
elif opt == '--counting':
if val not in ('total', 'toplevel', 'detailed'):
PrintUsage('Valid counting options are total, toplevel, and detailed')
counting_style = val
if not filenames:
PrintUsage('No files were specified.')
_SetOutputFormat(output_format)
_SetVerboseLevel(verbosity)
_SetFilters(filters)
_SetCountingStyle(counting_style)
return filenames
def main():
filenames = ParseArguments(sys.argv[1:])
# Change stderr to write with replacement characters so we don't die
# if we try to print something containing non-ASCII characters.
sys.stderr = codecs.StreamReaderWriter(sys.stderr,
codecs.getreader('utf8'),
codecs.getwriter('utf8'),
'replace')
_cpplint_state.ResetErrorCounts()
for filename in filenames:
ProcessFile(filename, _cpplint_state.verbose_level)
_cpplint_state.PrintErrorCounts()
sys.exit(_cpplint_state.error_count > 0)
if __name__ == '__main__':
main()
| gpl-3.0 |
rebstar6/servo | components/script/dom/bindings/codegen/parser/tests/test_enum.py | 142 | 3122 | import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
enum TestEnum {
"",
"foo",
"bar"
};
interface TestEnumInterface {
TestEnum doFoo(boolean arg);
readonly attribute TestEnum foo;
};
""")
results = parser.finish()
harness.ok(True, "TestEnumInterfaces interface parsed without error.")
harness.check(len(results), 2, "Should be one production")
harness.ok(isinstance(results[0], WebIDL.IDLEnum),
"Should be an IDLEnum")
harness.ok(isinstance(results[1], WebIDL.IDLInterface),
"Should be an IDLInterface")
enum = results[0]
harness.check(enum.identifier.QName(), "::TestEnum", "Enum has the right QName")
harness.check(enum.identifier.name, "TestEnum", "Enum has the right name")
harness.check(enum.values(), ["", "foo", "bar"], "Enum has the right values")
iface = results[1]
harness.check(iface.identifier.QName(), "::TestEnumInterface", "Interface has the right QName")
harness.check(iface.identifier.name, "TestEnumInterface", "Interface has the right name")
harness.check(iface.parent, None, "Interface has no parent")
members = iface.members
harness.check(len(members), 2, "Should be one production")
harness.ok(isinstance(members[0], WebIDL.IDLMethod),
"Should be an IDLMethod")
method = members[0]
harness.check(method.identifier.QName(), "::TestEnumInterface::doFoo",
"Method has correct QName")
harness.check(method.identifier.name, "doFoo", "Method has correct name")
signatures = method.signatures()
harness.check(len(signatures), 1, "Expect one signature")
(returnType, arguments) = signatures[0]
harness.check(str(returnType), "TestEnum (Wrapper)", "Method type is the correct name")
harness.check(len(arguments), 1, "Method has the right number of arguments")
arg = arguments[0]
harness.ok(isinstance(arg, WebIDL.IDLArgument), "Should be an IDLArgument")
harness.check(str(arg.type), "Boolean", "Argument has the right type")
attr = members[1]
harness.check(attr.identifier.QName(), "::TestEnumInterface::foo",
"Attr has correct QName")
harness.check(attr.identifier.name, "foo", "Attr has correct name")
harness.check(str(attr.type), "TestEnum (Wrapper)", "Attr type is the correct name")
# Now reset our parser
parser = parser.reset()
threw = False
try:
parser.parse("""
enum Enum {
"a",
"b",
"c"
};
interface TestInterface {
void foo(optional Enum e = "d");
};
""")
results = parser.finish()
except:
threw = True
harness.ok(threw, "Should not allow a bogus default value for an enum")
# Now reset our parser
parser = parser.reset()
parser.parse("""
enum Enum {
"a",
"b",
"c",
};
""")
results = parser.finish()
harness.check(len(results), 1, "Should allow trailing comma in enum")
| mpl-2.0 |
kaiyuanl/gem5 | src/arch/x86/isa/insts/x87/control/save_and_restore_x87_control_word.py | 68 | 2493 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop FLDCW_M {
ld t1, seg, sib, disp, dataSize=2
wrval fcw, t1
};
def macroop FLDCW_P {
ld t1, seg, sib, disp, dataSize=2
wrval fcw, t1
};
# FSTCW
def macroop FNSTCW_M {
rdval t1, fcw
st t1, seg, sib, disp, dataSize=2
};
def macroop FNSTCW_P {
rdip t7
rdval t1, fcw
st t1, seg, sib, disp, dataSize=2
};
'''
| bsd-3-clause |
youfoh/webkit-efl | Tools/Scripts/webkitpy/style/checkers/xml.py | 187 | 2044 | # Copyright (C) 2010 Apple Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Checks WebKit style for XML files."""
from __future__ import absolute_import
from xml.parsers import expat
class XMLChecker(object):
"""Processes XML lines for checking style."""
def __init__(self, file_path, handle_style_error):
self._handle_style_error = handle_style_error
self._handle_style_error.turn_off_line_filtering()
def check(self, lines):
parser = expat.ParserCreate()
try:
for line in lines:
parser.Parse(line)
parser.Parse('\n')
parser.Parse('', True)
except expat.ExpatError, error:
self._handle_style_error(error.lineno, 'xml/syntax', 5, expat.ErrorString(error.code))
| lgpl-2.1 |
av8ramit/tensorflow | tensorflow/python/debug/lib/source_remote_test.py | 13 | 7265 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Unit tests for source_remote."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import traceback
from tensorflow.core.debug import debug_service_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import grpc_debug_test_server
from tensorflow.python.debug.lib import source_remote
from tensorflow.python.debug.lib import source_utils
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
# Import resource_variable_ops for the variables-to-tensor implicit conversion.
from tensorflow.python.ops import resource_variable_ops # pylint: disable=unused-import
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.util import tf_inspect
def line_number_above():
return tf_inspect.stack()[1][2] - 1
class SendTracebacksTest(test_util.TensorFlowTestCase):
@classmethod
def setUpClass(cls):
test_util.TensorFlowTestCase.setUpClass()
(cls._server_port, cls._debug_server_url, cls._server_dump_dir,
cls._server_thread,
cls._server) = grpc_debug_test_server.start_server_on_separate_thread()
cls._server_address = "localhost:%d" % cls._server_port
(cls._server_port_2, cls._debug_server_url_2, cls._server_dump_dir_2,
cls._server_thread_2,
cls._server_2) = grpc_debug_test_server.start_server_on_separate_thread()
cls._server_address_2 = "localhost:%d" % cls._server_port_2
cls._curr_file_path = os.path.normpath(os.path.abspath(__file__))
@classmethod
def tearDownClass(cls):
# Stop the test server and join the thread.
cls._server.stop_server().wait()
cls._server_thread.join()
cls._server_2.stop_server().wait()
cls._server_thread_2.join()
test_util.TensorFlowTestCase.tearDownClass()
def tearDown(self):
ops.reset_default_graph()
self._server.clear_data()
self._server_2.clear_data()
super(SendTracebacksTest, self).tearDown()
def _findFirstTraceInsideTensorFlowPyLibrary(self, op):
"""Find the first trace of an op that belongs to the TF Python library."""
for trace in op.traceback:
if source_utils.guess_is_tensorflow_py_library(trace[0]):
return trace
def testSendGraphTracebacksToSingleDebugServer(self):
this_func_name = "testSendGraphTracebacksToSingleDebugServer"
with session.Session() as sess:
a = variables.Variable(21.0, name="a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="b")
b_lineno = line_number_above()
math_ops.add(a, b, name="x")
x_lineno = line_number_above()
send_stack = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
self._server_address, "dummy_run_key", send_stack, sess.graph)
tb = self._server.query_op_traceback("a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = self._server.query_op_traceback("x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
self.assertEqual(
" a = variables.Variable(21.0, name=\"a\")",
self._server.query_source_file_line(__file__, a_lineno))
# Files in the TensorFlow code base shouldn not have been sent.
tf_trace_file_path = self._findFirstTraceInsideTensorFlowPyLibrary(a.op)
with self.assertRaises(ValueError):
self._server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
self._server.query_call_types())
self.assertEqual(["dummy_run_key"], self._server.query_call_keys())
self.assertEqual(
[sess.graph.version], self._server.query_graph_versions())
def testSendGraphTracebacksToTwoDebugServers(self):
this_func_name = "testSendGraphTracebacksToTwoDebugServers"
with session.Session() as sess:
a = variables.Variable(21.0, name="two/a")
a_lineno = line_number_above()
b = variables.Variable(2.0, name="two/b")
b_lineno = line_number_above()
x = math_ops.add(a, b, name="two/x")
x_lineno = line_number_above()
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_graph_tracebacks(
[self._server_address, self._server_address_2],
"dummy_run_key", send_traceback, sess.graph)
servers = [self._server, self._server_2]
for server in servers:
tb = server.query_op_traceback("two/a")
self.assertIn((self._curr_file_path, a_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/b")
self.assertIn((self._curr_file_path, b_lineno, this_func_name), tb)
tb = server.query_op_traceback("two/x")
self.assertIn((self._curr_file_path, x_lineno, this_func_name), tb)
self.assertIn(
(self._curr_file_path, send_lineno, this_func_name),
server.query_origin_stack()[-1])
self.assertEqual(
" x = math_ops.add(a, b, name=\"two/x\")",
server.query_source_file_line(__file__, x_lineno))
tf_trace_file_path = self._findFirstTraceInsideTensorFlowPyLibrary(x.op)
with self.assertRaises(ValueError):
server.query_source_file_line(tf_trace_file_path, 0)
self.assertEqual([debug_service_pb2.CallTraceback.GRAPH_EXECUTION],
server.query_call_types())
self.assertEqual(["dummy_run_key"], server.query_call_keys())
self.assertEqual([sess.graph.version], server.query_graph_versions())
def testSendEagerTracebacksToSingleDebugServer(self):
this_func_name = "testSendEagerTracebacksToSingleDebugServer"
send_traceback = traceback.extract_stack()
send_lineno = line_number_above()
source_remote.send_eager_tracebacks(self._server_address, send_traceback)
self.assertEqual([debug_service_pb2.CallTraceback.EAGER_EXECUTION],
self._server.query_call_types())
self.assertIn((self._curr_file_path, send_lineno, this_func_name),
self._server.query_origin_stack()[-1])
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
airelil/pywinauto | pywinauto/linux/keyboard.py | 1 | 17458 | # -*- coding: utf-8 -*-
# GUI Application automation and testing library
# Copyright (C) 2006-2018 Mark Mc Mahon and Contributors
# https://github.com/pywinauto/pywinauto/graphs/contributors
# http://pywinauto.readthedocs.io/en/latest/credits.html
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of pywinauto nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Linux/Unix branch of the keyboard module
It allows to send keystrokes to the active display using python-xlib library.
"""
from __future__ import print_function
from Xlib.display import Display
from Xlib import X
from Xlib.ext.xtest import fake_input
import Xlib.XK
import time
import six
_display = Display()
DEBUG = 0
spec_keysyms = {
' ': "space",
'\t': "Tab",
'\n': "Return", # for some reason this needs to be cr, not lf
'\r': "Return",
'\e': "Escape",
'!': "exclam",
'#': "numbersign",
'%': "percent",
'$': "dollar",
'&': "ampersand",
'"': "quotedbl",
'\'': "apostrophe",
'(': "parenleft",
')': "parenright",
'*': "asterisk",
'=': "equal",
'+': "plus",
',': "comma",
'-': "minus",
'.': "period",
'/': "slash",
':': "colon",
';': "semicolon",
'<': "less",
'>': "greater",
'?': "question",
'@': "at",
'[': "bracketleft",
']': "bracketright",
'\\': "backslash",
'^': "asciicircum",
'_': "underscore",
'`': "grave",
'{': "braceleft",
'|': "bar",
'}': "braceright",
'~': "asciitilde"
}
def _to_keycode(key):
"""return python X11 keycode of symbol"""
return _display.keysym_to_keycode(Xlib.XK.string_to_keysym(key))
INPUT_KEYBOARD = 1
KEYEVENTF_EXTENDEDKEY = 1
KEYEVENTF_KEYUP = 2
KEYEVENTF_UNICODE = 4
KEYEVENTF_SCANCODE = 8
VK_SHIFT = _to_keycode('Shift_L')
VK_CONTROL = _to_keycode('Control_L')
VK_MENU = _to_keycode('Menu')
# 'codes' recognized as {CODE repeat)?}
CODES = {
'BACK': _to_keycode('BackSpace'),
'BACKSPACE': _to_keycode('BackSpace'),
'BKSP': _to_keycode('BackSpace'),
'BREAK': _to_keycode('Break'),
'BS': _to_keycode('BackSpace'),
'CAP': _to_keycode('Caps_Lock'),
'CAPSLOCK': _to_keycode('Caps_Lock'),
'DEL': _to_keycode('Delete'),
'DELETE': _to_keycode('Delete'),
'DOWN': _to_keycode('Down'),
'END': _to_keycode('End'),
'ENTER': _to_keycode('Return'),
'ESC': _to_keycode('Escape'),
'F1': _to_keycode('F1'),
'F2': _to_keycode('F2'),
'F3': _to_keycode('F3'),
'F4': _to_keycode('F4'),
'F5': _to_keycode('F5'),
'F6': _to_keycode('F6'),
'F7': _to_keycode('F7'),
'F8': _to_keycode('F8'),
'F9': _to_keycode('F9'),
'F10': _to_keycode('F10'),
'F11': _to_keycode('F11'),
'F12': _to_keycode('F12'),
'F13': _to_keycode('F13'),
'F14': _to_keycode('F14'),
'F15': _to_keycode('F15'),
'F16': _to_keycode('F16'),
'F17': _to_keycode('F17'),
'F18': _to_keycode('F18'),
'F19': _to_keycode('F19'),
'F20': _to_keycode('F20'),
'F21': _to_keycode('F21'),
'F22': _to_keycode('F22'),
'F23': _to_keycode('F23'),
'F24': _to_keycode('F24'),
'HELP': _to_keycode('Help'),
'HOME': _to_keycode('Home'),
'INS': _to_keycode('Insert'),
'INSERT': _to_keycode('Insert'),
'LEFT': _to_keycode('Left'),
'LWIN': _to_keycode('Super_L'),
'NUMLOCK': _to_keycode('Num_Lock'),
'PGDN': _to_keycode('Page_Down'),
'PGUP': _to_keycode('Page_Up'),
'PRTSC': _to_keycode('Print'),
'RIGHT': _to_keycode('Right'),
'RMENU': _to_keycode('Alt_R'),
'RWIN': _to_keycode('Super_R'),
'SCROLLLOCK': _to_keycode('Scroll_Lock'),
'SPACE': _to_keycode('space'),
'TAB': _to_keycode('Tab'),
'UP': _to_keycode('Up'),
'VK_ACCEPT': 30,
'VK_ADD': 107,
'VK_APPS': 93,
'VK_ATTN': 246,
'VK_BACK': _to_keycode('BackSpace'),
'VK_CANCEL': _to_keycode('Break'),
'VK_CAPITAL': _to_keycode('Caps_Lock'),
'VK_CLEAR': 12,
'VK_CONTROL': _to_keycode('Control_L'),
'VK_CONVERT': 28,
'VK_CRSEL': 247,
'VK_DECIMAL': 110,
'VK_DELETE': _to_keycode('Delete'),
'VK_DIVIDE': 111,
'VK_DOWN': _to_keycode('Down'),
'VK_END': _to_keycode('End'),
'VK_EREOF': 249,
'VK_ESCAPE': _to_keycode('Escape'),
'VK_EXECUTE': 43,
'VK_EXSEL': 248,
'VK_F1': _to_keycode('F1'),
'VK_F2': _to_keycode('F2'),
'VK_F3': _to_keycode('F3'),
'VK_F4': _to_keycode('F4'),
'VK_F5': _to_keycode('F5'),
'VK_F6': _to_keycode('F6'),
'VK_F7': _to_keycode('F7'),
'VK_F8': _to_keycode('F8'),
'VK_F9': _to_keycode('F9'),
'VK_F10': _to_keycode('F10'),
'VK_F11': _to_keycode('F11'),
'VK_F12': _to_keycode('F12'),
'VK_F13': _to_keycode('F13'),
'VK_F14': _to_keycode('F14'),
'VK_F15': _to_keycode('F15'),
'VK_F16': _to_keycode('F16'),
'VK_F17': _to_keycode('F17'),
'VK_F18': _to_keycode('F18'),
'VK_F19': _to_keycode('F19'),
'VK_F20': _to_keycode('F20'),
'VK_F21': _to_keycode('F21'),
'VK_F22': _to_keycode('F22'),
'VK_F23': _to_keycode('F23'),
'VK_F24': _to_keycode('F24'),
'VK_FINAL': 24,
'VK_HANGEUL': 21,
'VK_HANGUL': 21,
'VK_HANJA': 25,
'VK_HELP': _to_keycode('Help'),
'VK_HOME': _to_keycode('Home'),
'VK_INSERT': _to_keycode('Insert'),
'VK_JUNJA': 23,
'VK_KANA': 21,
'VK_KANJI': 25,
'VK_LBUTTON': 1,
'VK_LCONTROL': _to_keycode('Control_L'),
'VK_LEFT': _to_keycode('Left'),
'VK_LMENU': _to_keycode('Alt_L'),
'VK_LSHIFT': _to_keycode('Shift_L'),
'VK_LWIN': _to_keycode('Super_L'),
'VK_MBUTTON': 4,
'VK_MENU': _to_keycode('Alt_L'),
'VK_MODECHANGE': 31,
'VK_MULTIPLY': 106,
'VK_NEXT': _to_keycode('Page_Down'),
'VK_NONAME': 252,
'VK_NONCONVERT': 29,
'VK_NUMLOCK': _to_keycode('Num_Lock'),
'VK_NUMPAD0': _to_keycode('KP_0'),
'VK_NUMPAD1': _to_keycode('KP_1'),
'VK_NUMPAD2': _to_keycode('KP_2'),
'VK_NUMPAD3': _to_keycode('KP_3'),
'VK_NUMPAD4': _to_keycode('KP_4'),
'VK_NUMPAD5': _to_keycode('KP_5'),
'VK_NUMPAD6': _to_keycode('KP_6'),
'VK_NUMPAD7': _to_keycode('KP_7'),
'VK_NUMPAD8': _to_keycode('KP_8'),
'VK_NUMPAD9': _to_keycode('KP_9'),
'VK_OEM_CLEAR': 254,
'VK_PA1': 253,
'VK_PAUSE': 19,
'VK_PLAY': 250,
'VK_PRINT': _to_keycode('Print'),
'VK_PRIOR': _to_keycode('Page_Up'),
'VK_PROCESSKEY': 229,
'VK_RBUTTON': 2,
'VK_RCONTROL': _to_keycode('Control_R'),
'VK_RETURN': _to_keycode('Return'),
'VK_RIGHT': _to_keycode('Right'),
'VK_RMENU': _to_keycode('Alt_R'),
'VK_RSHIFT': _to_keycode('Shift_R'),
'VK_RWIN': _to_keycode('Super_R'),
'VK_SCROLL': _to_keycode('Scroll_Lock'),
'VK_SELECT': 41,
'VK_SEPARATOR': 108,
'VK_SHIFT': _to_keycode('Shift_L'),
'VK_SNAPSHOT': _to_keycode('Print'),
'VK_SPACE': _to_keycode('Space'),
'VK_SUBTRACT': 109,
'VK_TAB': _to_keycode('Tab'),
'VK_UP': _to_keycode('Up'),
'ZOOM': 251, #no item in xlib
}
# modifier keys
MODIFIERS = {
'+': VK_SHIFT,
'^': VK_CONTROL,
'%': VK_MENU,
}
class KeySequenceError(Exception):
"""Exception raised when a key sequence string has a syntax error"""
def __str__(self):
return ' '.join(self.args)
class KeyAction(object):
"""
Class that represents a single 'keyboard' action
It represents either a PAUSE action (not reallly keyboard) or a keyboard
action (press or release or both) of a particular key.
"""
def __init__(self, key, down = True, up = True):
"""Init a single key action params"""
self.key = key
self.down = down
self.up = up
self.ctrl = False
self.alt = False
self.shift = False
self.is_shifted = False
@staticmethod
def _key_modifiers(ctrl, shift, alt, action = X.KeyPress):
"""Apply key modifiers"""
if ctrl:
fake_input(_display, action, CODES['VK_CONTROL'])
if shift:
fake_input(_display, action, CODES['VK_SHIFT'])
if alt:
fake_input(_display, action, CODES['VK_MENU'])
def run(self):
"""Do a single keyboard action using xlib"""
if isinstance(self.key, six.string_types):
key = self.key
self.key = Xlib.XK.string_to_keysym(self.key)
if self.key == 0:
self.key = Xlib.XK.string_to_keysym(spec_keysyms[key])
self.key = _display.keysym_to_keycode(self.key)
if self.key == 0:
raise RuntimeError('Key {} not found!'.format(self.key))
self.is_shifted = key.isupper() or key in '~!@#$%^&*()_+{}|:"<>?'
elif not isinstance(self.key, six.integer_types):
raise TypeError('self.key = {} is not a string or integer'.format(self.key))
self._key_modifiers(self.ctrl, (self.shift or self.is_shifted),
self.alt, action = X.KeyPress)
if self.down:
fake_input(_display, X.KeyPress, self.key)
_display.sync()
if self.up:
fake_input(_display, X.KeyRelease, self.key)
_display.sync()
self._key_modifiers(self.ctrl, (self.shift or self.is_shifted),
self.alt, action = X.KeyRelease)
_display.sync()
def _get_down_up_string(self):
"""Return a string that will show whether the string is up or down
return 'down' if the key is a press only
return 'up' if the key is up only
return '' if the key is up & down (as default)
"""
if self.down and self.up:
return ""
if self.down:
return "down"
if self.up:
return "up"
return "" # TODO: raise RuntimeError('Nor "down" or "up" action specified!')
def key_description(self):
"""Return a description of the key"""
return "{}".format(self.key)
def __str__(self):
"""Return key with modifiers as a string"""
parts = []
parts.append(self.key_description())
up_down = self._get_down_up_string()
if up_down:
parts.append(up_down)
return "<{}>".format(" ".join(parts))
__repr__ = __str__
class PauseAction(KeyAction):
"""Represents a pause action"""
def __init__(self, how_long):
self.how_long = how_long
def run(self):
"""Pause for the lenght of time specified"""
time.sleep(self.how_long)
def __str__(self):
return "<PAUSE {}>".format(self.how_long)
__repr__ = __str__
def handle_code(code, vk_packet=True):
"""Handle a key or sequence of keys in braces"""
code_keys = []
# it is a known code (e.g. {DOWN}, {ENTER}, etc)
if code in CODES:
code_keys.append(KeyAction(CODES[code]))
# it is an escaped modifier e.g. {%}, {^}, {+}
elif len(code) == 1:
code_keys.append(KeyAction(code))
# it is a repetition or a pause {DOWN 5}, {PAUSE 1.3}
elif ' ' in code:
to_repeat, count = code.rsplit(None, 1)
if to_repeat == "PAUSE":
try:
pause_time = float(count)
except ValueError:
raise KeySequenceError('invalid pause time {}'.format(count))
code_keys.append(PauseAction(pause_time))
else:
try:
count = int(count)
except ValueError:
raise KeySequenceError(
'invalid repetition count {}'.format(count))
# If the value in to_repeat is a VK e.g. DOWN
# we need to add the code repeated
if to_repeat in CODES:
code_keys.extend(
[KeyAction(CODES[to_repeat])] * count)
# otherwise parse the keys and we get back a KeyAction
else:
to_repeat = parse_keys(to_repeat)
if isinstance(to_repeat, list):
keys = to_repeat * count
else:
keys = [to_repeat] * count
code_keys.extend(keys)
else:
raise RuntimeError("Unknown code: {}".format(code))
return code_keys
def parse_keys(string,
with_spaces = False,
with_tabs = False,
with_newlines = False,
modifiers = None,
vk_packet=True):
"""Return the parsed keys"""
keys = []
if not modifiers:
modifiers = []
index = 0
while index < len(string):
c = string[index]
index += 1
# check if one of CTRL, SHIFT, ALT has been pressed
if c in MODIFIERS.keys():
# remember that we are currently modified
modifiers.append(c)
# hold down the modifier key
#keys.append(KeyAction(modifier, up = False))
if DEBUG:
print("MODS+", modifiers)
continue
# Apply modifiers over a bunch of characters (not just one!)
elif c == "(":
# find the end of the bracketed text
end_pos = string.find(")", index)
if end_pos == -1:
raise KeySequenceError('`)` not found')
keys.extend(
parse_keys(string[index:end_pos], modifiers = modifiers))
index = end_pos + 1
# Escape or named key
elif c == "{":
# We start searching from index + 1 to account for the case {}}
end_pos = string.find("}", index+1)
if end_pos == -1:
raise KeySequenceError('`}` not found')
code = string[index:end_pos]
index = end_pos + 1
keys.extend(handle_code(code))
# unmatched ")"
elif c == ')':
raise KeySequenceError('`)` should be preceeded by `(`')
# unmatched "}"
elif c == '}':
raise KeySequenceError('`}` should be preceeded by `{`')
# so it is a normal character
else:
# don't output white space unless flags to output have been set
if (c == ' ' and not with_spaces or
c == '\t' and not with_tabs or
c == '\n' and not with_newlines):
continue
# output newline
if c in ('~', '\n'):
keys.append(KeyAction(CODES["ENTER"]))
elif modifiers:
keys.append(KeyAction(c))
else:
keys.append(KeyAction(c))
# as we have handled the text - release the modifiers
while modifiers:
if DEBUG:
print("MODS-", modifiers)
mod = modifiers.pop()
if mod == '+':
keys[-1].shift = True
elif mod == '^':
keys[-1].ctrl = True
elif mod == '%':
keys[-1].alt = True
# just in case there were any modifiers left pressed - release them
while modifiers:
keys.append(KeyAction(modifiers.pop(), down = False))
return keys
def send_keys(keys,
pause=0.05,
with_spaces=False,
with_tabs=False,
with_newlines=False,
turn_off_numlock=True,
vk_packet=True):
"""Parse the keys and type them"""
keys = parse_keys(keys, with_spaces, with_tabs, with_newlines)
for k in keys:
k.run()
time.sleep(pause)
| bsd-3-clause |
akretion/account-financial-reporting | __unported__/account_financial_report/report/__init__.py | 38 | 1484 | # -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
# Copyright (C) OpenERP Venezuela (<http://openerp.com.ve>).
# All Rights Reserved
# Credits######################################################
# Coded by: Humberto Arocha humberto@openerp.com.ve
# Angelica Barrios angelicaisabelb@gmail.com
# Jordi Esteve <jesteve@zikzakmedia.com>
# Planified by: Humberto Arocha
# Finance by: LUBCAN COL S.A.S http://www.lubcancol.com
# Audited by: Humberto Arocha humberto@openerp.com.ve
#############################################################################
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
from . import parser
| agpl-3.0 |
PeterWangIntel/chromium-crosswalk | third_party/typ/typ/fakes/tests/test_result_server_fake_test.py | 81 | 1298 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typ.fakes import test_result_server_fake
from typ import Host
class TestResultServerFakeTest(unittest.TestCase):
def test_basic_upload(self):
host = Host()
server = None
posts = []
try:
server = test_result_server_fake.start()
url = 'http://%s:%d/testfile/upload' % server.server_address
if server:
resp = host.fetch(url, 'foo=bar')
finally:
if server:
posts = server.stop()
self.assertEqual(posts, [('post', '/testfile/upload',
'foo=bar'.encode('utf8'))])
self.assertNotEqual(server.log.getvalue(), '')
| bsd-3-clause |
javierTerry/odoo | addons/account_check_writing/report/__init__.py | 446 | 1066 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import check_print
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jdelight/django | django/views/decorators/vary.py | 586 | 1200 | from functools import wraps
from django.utils.cache import patch_vary_headers
from django.utils.decorators import available_attrs
def vary_on_headers(*headers):
"""
A view decorator that adds the specified headers to the Vary header of the
response. Usage:
@vary_on_headers('Cookie', 'Accept-language')
def index(request):
...
Note that the header names are not case-sensitive.
"""
def decorator(func):
@wraps(func, assigned=available_attrs(func))
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, headers)
return response
return inner_func
return decorator
def vary_on_cookie(func):
"""
A view decorator that adds "Cookie" to the Vary header of a response. This
indicates that a page's contents depends on cookies. Usage:
@vary_on_cookie
def index(request):
...
"""
@wraps(func, assigned=available_attrs(func))
def inner_func(*args, **kwargs):
response = func(*args, **kwargs)
patch_vary_headers(response, ('Cookie',))
return response
return inner_func
| bsd-3-clause |
apdjustino/DRCOG_Urbansim | src/opus_core/models/choice_model.py | 1 | 83412 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
import gc
import re
from opus_core.datasets.dataset_factory import DatasetFactory
from opus_core.datasets.dataset import Dataset, DatasetSubset
from opus_core.datasets.interaction_dataset import InteractionDataset
from opus_core.specified_coefficients import SpecifiedCoefficients, SpecifiedCoefficientsFor1Submodel
from opus_core.coefficients import create_coefficient_from_specification, Coefficients
from opus_core.equation_specification import EquationSpecification
from opus_core.resources import Resources
from opus_core.storage_factory import StorageFactory
from opus_core.upc_factory import UPCFactory
from opus_core.sampler_factory import SamplerFactory
from opus_core.model_component_creator import ModelComponentCreator
from opus_core.misc import DebugPrinter, unique
from opus_core.sampling_toolbox import sample_noreplace
from opus_core.models.chunk_model import ChunkModel
from opus_core.chunk_specification import ChunkSpecification
from opus_core.class_factory import ClassFactory
from opus_core.model import get_specification_for_estimation, prepare_specification_and_coefficients
from opus_core.variables.variable_name import VariableName
from opus_core.logger import logger
from numpy import where, zeros, array, arange, ones, take, ndarray, resize, concatenate, alltrue
from numpy import int32, compress, float64, newaxis, row_stack, asarray, delete
from numpy import isinf, isnan, inf, nan, nan_to_num
from numpy.random import permutation
import numpy as np
from opus_core.variables.attribute_type import AttributeType
class ChoiceModel(ChunkModel):
"""
Implements a discrete choice model, where agents choose from given choices.
Method 'run' runs a simulation for a given agent_set.
Method 'estimate' runs an estimation process. The specific algorithm is implemented in the given upc_sequence.
"""
model_name = "Choice Model"
model_short_name ="ChoiceM"
def __init__(self, choice_set,
model_name=None,
short_name=None,
utilities="opus_core.upc.linear_utilities",
probabilities="opus_core.upc.mnl_probabilities",
choices="opus_core.upc.random_choices",
sampler=None, sampler_size=None,
submodel_string=None,
choice_attribute_name="choice_id",
interaction_pkg="opus_core",
run_config=None,
estimate_config=None,
debuglevel=0,
dataset_pool=None,
**kwargs
):
"""
Arguments:
choice_set - Dataset or array/list of choices.
utilities - name of utilities module
probabilities - name of probabilities module
choices - name of module for computing agent choices
sampler - name of sampling module to be used for sampling alternatives. If it is None, no sampling is performed
and all alternatives are considered for choice.
submodel_string - character string specifying what agent attribute determines submodels.
choice_attribute_name - name of the attribute that identifies the choices. This argument is
only relevant if choice_set is not an instance of Dataset.
Otherwise the choices are identified by the unique identifier of choice_set.
interaction_pkg - only relevant if there is an implementation of an interaction dataset
that corresponds to interaction between agents and choices.
run_config - collection of additional arguments that control a simulation run. It is of class Resources.
estimate_config - collection of additional arguments that control an estimation run. It is of class Resources.
debuglevel - debuglevel for the constructor. The level is overwritten by the argument in the run and estimate method.
An instance of upc_sequence class with components utilities, probabilities and choices is created. Also an instance
of Sampler class for given sampler procedure is created.
"""
if model_name is not None:
self.model_name = model_name
if short_name is not None:
self.model_short_name = short_name
self.debug = DebugPrinter(debuglevel)
self.compute_choice_attribute = False
self.choice_attribute_name = choice_attribute_name
if (self.choice_attribute_name is not None):
self.choice_attribute_name = VariableName(self.choice_attribute_name)
if self.choice_attribute_name.get_dataset_name() is not None:
self.compute_choice_attribute = True
if not isinstance(choice_set, Dataset):
storage = StorageFactory().get_storage('dict_storage')
storage_table_name = 'choice_set'
table_data = {self.choice_attribute_name.get_alias():array(choice_set)}
storage.write_table(
table_name=storage_table_name,
table_data=table_data
)
choice_set = Dataset(
in_storage = storage,
in_table_name = storage_table_name,
id_name = self.choice_attribute_name.get_alias(),
dataset_name = 'choice',
)
self.choice_set = choice_set
self.upc_sequence = UPCFactory().get_model(utilities=utilities,
probabilities=probabilities,
choices=choices,
debuglevel=debuglevel)
self.sampler_class = SamplerFactory().get_sampler(sampler)
if (sampler <> None) and (self.sampler_class == None):
logger.log_warning("Error in loading sampler class. No sampling will be performed.")
self.sampler_size=sampler_size
self.submodel_string = submodel_string # which attribute determines submodels
self.availability = kwargs.get('availability', None)
self.run_config = run_config
if self.run_config == None:
self.run_config = Resources()
self.estimate_config = estimate_config
if self.estimate_config == None:
self.estimate_config = Resources()
self.result_choices = None
self.dataset_pool = self.create_dataset_pool(dataset_pool)
self.dataset_pool.replace_dataset(self.choice_set.get_dataset_name(), self.choice_set)
self.coefficient_names = {}
self.model_interaction = ModelInteraction(self, interaction_pkg, self.choice_set)
ChunkModel.__init__(self)
self.get_status_for_gui().initialize_pieces(3, pieces_description = array(['initialization', 'computing variables', 'submodel: 1']))
def run(self, specification, coefficients, agent_set,
agents_index=None,
chunk_specification=None,
data_objects=None,
run_config=None,
debuglevel=0):
""" Run a simulation and return a numpy array of length agents_index, giving agent choices (ids of locations).
'specification' is of type EquationSpecification,
'coefficients' is of type Coefficients,
'agent_set' is of type Dataset,
'agent_index' are indices of individuals in the agent_set for which
the model runs. If it is None, the whole agent_set is considered.
'chunk_specification' - specifies how to determine the number of chunks to use
when computing.
'data_objects' is a dictionary where each key is the name of an data object
('zone', ...) and its value is an object of class Dataset.
'run_config' is of type Resources, it gives additional arguments for the run.
'debuglevel' overwrites the constructor 'debuglevel'.
"""
self.debug.flag = debuglevel
if run_config == None:
run_config = Resources()
## plug in default switches:
## whether to include_chosen_choice for sampler,
## whether it is called in estimate method
run_config.merge_with_defaults({"include_chosen_choice":False, "estimate":False})
self.run_config = run_config.merge_with_defaults(self.run_config)
self.dataset_pool.replace_dataset(agent_set.get_dataset_name(), agent_set)
if agents_index==None:
agents_index=arange(agent_set.size())
## compute_variables is not meaningful given it's going to be overwritten by run()
## (HS) but we need it in cases when doesn't exist and is added as primary attribute,
## e.g. cars_category in auto-ownership model.
if self.compute_choice_attribute:
agent_set.compute_variables([self.choice_attribute_name], dataset_pool=self.dataset_pool)
## add a primary attribute for choice_id_name or convert it to primary attribute
choice_id_name = self.choice_set.get_id_name()[0]
if (choice_id_name not in agent_set.get_known_attribute_names()):
agent_set.add_primary_attribute(name=choice_id_name,
data=resize(array([-1]), agent_set.size()))
else:
if choice_id_name in agent_set.get_nonloaded_attribute_names():
agent_set.get_attribute(choice_id_name)
if agent_set._get_attribute_type(choice_id_name) == AttributeType.COMPUTED:
agent_set.attribute_boxes[choice_id_name].set_type(AttributeType.PRIMARY)
agent_set._add_to_primary_attribute_names(choice_id_name)
if self.run_config.get("demand_string", None):
self.choice_set.add_primary_attribute(name=self.run_config.get("demand_string"),
data=zeros(self.choice_set.size(), dtype="float32"))
self.compute_demand_flag = True
else:
self.compute_demand_flag = False
## calculate cumulative supply that is compatible with demand calculation
if self.run_config.get("supply_string", None):
current_choice = agent_set.get_attribute(choice_id_name)
agent_set.modify_attribute(choice_id_name, zeros(agents_index.size)-1, index=agents_index)
supply_varaible_name = VariableName(self.run_config.get("supply_string"))
supply_alias = supply_varaible_name.get_alias()
if supply_alias in self.choice_set.get_primary_attribute_names():
self.choice_set.delete_one_attribute(supply_alias)
supply = self.choice_set.compute_variables(supply_varaible_name, dataset_pool=self.dataset_pool)
self.choice_set.add_primary_attribute(name=supply_alias, data=supply)
agent_set.modify_attribute(choice_id_name, current_choice)
if data_objects is not None:
self.dataset_pool.add_datasets_if_not_included(data_objects)
self.model_interaction.set_agent_set(agent_set)
self.result_choices = ChunkModel.run(self, chunk_specification, agent_set, agents_index, int32,
specification=specification, coefficients=coefficients)
return self.result_choices
def run_chunk(self, agents_index, agent_set, specification, coefficients):
if agents_index.size <= 0:
return array([], dtype='int32')
self.set_choice_set_size()
nchoices = self.get_choice_set_size()
## initial specified_coefficients with dummy values as we may need to get submodel information from it
## recreated specified_coefficients when the sampling is done and choice_ids is known
#self.model_interaction.create_specified_coefficients(coefficients, specification, choice_ids=arange(nchoices)+1)
submodels = specification.get_distinct_submodels()
self.map_agents_to_submodels(submodels, self.submodel_string, agent_set, agents_index,
dataset_pool=self.dataset_pool, resources = Resources({"debug": self.debug}))
if self.observations_mapping['mapped_index'].size == 0:
logger.log_status("No agents mapped to submodels.")
return array(agents_index.size*[-1], dtype="int32")
self.create_interaction_datasets(agent_set, agents_index, self.run_config, submodels=submodels)
## move Choice set size log after compute_variables so it is not buried in compute_variable msg
#logger.log_status("Choice set size: %i" % self.get_choice_set_size())
index = self.model_interaction.get_choice_index()
if index.size <=0:
logger.log_warning("No choices available.")
return array(agents_index.size*[-1], dtype="int32")
self.debug.print_debug("Create specified coefficients ...",4)
self.model_interaction.create_specified_coefficients(coefficients, specification, self.choice_set.get_id_attribute()[index])
self.run_config.merge({"index":index})
self.model_interaction.compute_availabilities(submodels)
self.get_status_for_gui().update_pieces_using_submodels(submodels=submodels, leave_pieces=2)
# simulate
choice_indices = self.simulate_chunk()
choice_set_ids = self.choice_set.get_id_attribute()
allchoice_ind = (zeros(agents_index.size) - 1).astype(int32)
allchoice_ind[self.observations_mapping['mapped_index']] = choice_indices
choices = where(allchoice_ind < 0, -1, choice_set_ids[allchoice_ind])
#modify choices
agent_set.set_values_of_one_attribute(self.choice_set.get_id_name()[0], choices, agents_index)
del self.run_config["index"]
return choices
def simulate_chunk(self):
self.debug.print_debug("Compute variables ...",4)
self.increment_current_status_piece()
self.model_interaction.compute_variables()
logger.log_status("Choice set size: %i" % self.get_choice_set_size())
coef = {}
index = self.run_config["index"]
self.debug.print_debug("Simulate ...",4)
utilities = np.empty((self.observations_mapping["index"].size,
self.get_choice_set_size())
)
availability = utilities.copy()
price_coef_name = self.run_config.get('price_coef_name', None)
price_var_name = self.run_config.get('price_variable_name',
price_coef_name)
if price_var_name is not None:
price_coef_val = np.empty((self.observations_mapping["index"].size, 1))
price = self.choice_set[price_var_name]
for submodel in self.model_interaction.get_submodels():
self.model_interaction.prepare_data_for_simulation(submodel)
coef[submodel] = self.model_interaction.get_submodel_coefficients(submodel)
self.coefficient_names[submodel] = self.model_interaction.get_variable_names_for_simulation(submodel)
self.debug.print_debug(" submodel: %s nobs: %s" % (submodel, self.observations_mapping[submodel].size), 5)
self.increment_current_status_piece()
## TODO: there may be submodels without specification/data,
## TODO: which need to be excluded from utilities & index,
## TODO: and therefore probabilities & choices step
if self.model_interaction.is_there_data(submodel): # observations for this submodel available
self.run_config.merge({"specified_coefficients": coef[submodel]})
coef_vals = coef[submodel].get_coefficient_values()
coef_names = coef[submodel].get_coefficient_names()
data = self.get_all_data(submodel)
nan_index = where(isnan(data))[2]
inf_index = where(isinf(data))[2]
vnames = asarray(coef[submodel].get_variable_names())
if nan_index.size > 0:
nan_var_index = unique(nan_index)
data = nan_to_num(data)
logger.log_warning("NaN(Not A Number) is returned from variable %s; it is replaced with %s." % (vnames[nan_var_index], nan_to_num(nan)))
#raise ValueError, "NaN(Not a Number) is returned from variable %s; check the model specification table and/or attribute values used in the computation for the variable." % vnames[nan_var_index]
if inf_index.size > 0:
inf_var_index = unique(inf_index)
data = nan_to_num(data)
logger.log_warning("Inf is returned from variable %s; it is replaced with %s." % (vnames[inf_var_index], nan_to_num(inf)))
#raise ValueError, "Inf is returned from variable %s; check the model specification table and/or attribute values used in the computation for the variable." % vnames[inf_var_index]
utilities[self.observations_mapping[submodel], :] = self.upc_sequence.compute_utilities(data, coef_vals, resources=self.run_config)
if self.availability is not None:
availability[self.observations_mapping[submodel], :] = self.model_interaction.get_availability(submodel)
if price_coef_name is not None:
##assume the price coef doesn't vary by equation (alternative)
price_coef_val[self.observations_mapping[submodel], 0] = coef_vals[coef_names==price_coef_name][0]
if price_coef_name is not None:
self.run_config.merge({'price': price,
'price_beta': price_coef_val,
'utilities': utilities})
if self.availability is not None:
zero_avail_sum = where(availability[self.observations_mapping['mapped_index'], :].sum(axis=1)==0)[0]
if zero_avail_sum.size > 0: # agents with no availability of choices - remove from mapped_index
self.observations_mapping['mapped_index'] = delete(self.observations_mapping['mapped_index'], self.observations_mapping['mapped_index'][zero_avail_sum])
self.run_config["availability"] = availability[self.observations_mapping['mapped_index'],:]*(index[self.observations_mapping['mapped_index'],:]>=0)
self.upc_sequence.utilities = utilities[self.observations_mapping['mapped_index'],:]
self.run_config["index"] = index[self.observations_mapping['mapped_index'],:]
self.upc_sequence.compute_probabilities(resources=self.run_config)
choices = self.upc_sequence.compute_choices(resources=self.run_config)
#attach converged price coming out from equilibration_choices
if price_coef_name is not None and self.run_config.has_key('price_converged'):
price_converged = price_var_name + "_converged"
if price_converged not in self.choice_set.get_known_attribute_names():
self.choice_set.add_attribute(data=self.choice_set[price_var_name],
name=price_converged,
metadata=1)
self.choice_set.modify_attribute(name=price_converged,
data=self.run_config.get('price_converged'),
index=index[0, :])
if self.run_config.get("export_simulation_data", False):
self.export_probabilities(self.upc_sequence.probabilities,
self.run_config.get("simulation_data_file_name", './choice_model_data.txt'))
if self.compute_demand_flag:
self.compute_demand(self.upc_sequence.probabilities)
choices = choices.astype(int32)
#comment out code that hasn't been run (which will not work anyway)
#res_positive_idx = where(choices>=0)[0]
#if index is not None:
# if index.shape[1] <> coef[submodel].nequations():
# choices[res_positive_idx] = array(map(lambda x:
# index[x,coef[submodel].get_equations_index()[res[x]]], res_positive_idx)).astype(int32)
#else:
# choices[res_positive_idx] = coef[submodel].get_equations_index()[res[res_positive_idx]].astype(int32)
return choices
def compute_demand(self, probabilities):
"""sums probabilities for each alternative and adds it to the demand attribute of the choice set.
"""
demand = probabilities.sum(axis=0)
demand_attr = self.run_config.get("demand_string")
self.choice_set.modify_attribute(name=demand_attr,
data = self.choice_set.get_attribute(demand_attr) + demand)
def estimate(self, specification, agent_set, agents_index=None, procedure=None, data_objects=None,
estimate_config=None, debuglevel=0):
""" Run an estimation process and return a tuple where the first element is an object of class Coefficients
containing the estimated coefficients, and the second element is a dictionary with an entry for each submodel
giving the return values of the specified estimation procedure.
'specification' is of type EquationSpecification,
'agent_set' is of type Dataset,
'agent_index' are indices of individuals in the agent_set for which
the model is estimated. If it is None, the whole agent_set is considered.
'procedure' is a string giving the name of the estimation procedure. If it is None,
there should be an entry "estimation" in 'estimate_config' that determines the procedure. The class
must have a method 'run' that takes as arguments 'data', 'upc_sequence' and 'resources'. It returns a dictionary
with entries 'estimators', 'standard_errors' and 't_values' (all 1D numpy arrays).
'data_objects' is a dictionary where each key is the name of an data object
('zone', ...) and its value is an object of class Dataset.
'estimate_config' is of type Resources, it gives additional arguments for the estimation procedure.
'debuglevel' overwrites the class 'debuglevel'.
"""
self.debug.flag = debuglevel
if estimate_config == None:
estimate_config = Resources()
## plug in default switches:
## whether to include_chosen_choice for sampler,
## whether it is called in estimate method
estimate_config.merge_with_defaults({"include_chosen_choice":True, "estimate":True})
self.estimate_config = estimate_config.merge_with_defaults(self.estimate_config)
if data_objects is not None:
self.dataset_pool.add_datasets_if_not_included(data_objects)
self.procedure=procedure
if self.procedure == None:
self.procedure = self.estimate_config.get("estimation", None)
self.procedure = ModelComponentCreator().get_model_component(self.procedure)
if self.procedure == None:
raise StandardError, "No estimation procedure given, or error when loading the corresponding module."
if agent_set.size()<=0:
agent_set.get_id_attribute()
if agents_index==None:
agents_index=arange(agent_set.size())
if not isinstance(agents_index,ndarray):
agents_index=array(agents_index)
if self.compute_choice_attribute:
agent_set.compute_variables([self.choice_attribute_name], dataset_pool=self.dataset_pool)
self.model_interaction.set_agent_set(agent_set)
self.set_choice_set_size(estimation=True)
nchoices = self.get_choice_set_size()
estimation_size_agents = self.estimate_config.get("estimation_size_agents", None) # should be a proportion of the agent_set
if estimation_size_agents == None:
estimation_size_agents = 1.0
else:
estimation_size_agents = max(min(estimation_size_agents,1.0),0.0) # between 0 and 1
if estimation_size_agents < 1.0:
self.debug.print_debug("Sampling agents for estimation ...",3)
agents_index_for_estimation = sample_noreplace(agents_index,
int(agents_index.size*estimation_size_agents))
else:
agents_index_for_estimation = agents_index
self.debug.print_debug("Number of agents for estimation: " + str(agents_index_for_estimation.size),2)
if agents_index_for_estimation.size <= 0:
self.debug.print_debug("Nothing to be done.",4)
return None
submodels = specification.get_distinct_submodels()
self.get_status_for_gui().update_pieces_using_submodels(submodels=submodels, leave_pieces=2)
self.map_agents_to_submodels(submodels, self.submodel_string, agent_set,
agents_index_for_estimation,
dataset_pool = self.dataset_pool,
resources = Resources({"debug": self.debug}),
submodel_size_max=self.estimate_config.get('submodel_size_max', None))
self.create_interaction_datasets(agent_set, agents_index_for_estimation, self.estimate_config, submodels=submodels)
## move Choice set size log after compute_variables so it is not buried in compute_variable msg
#logger.log_status("Choice set size: %i" % self.get_choice_set_size())
#self.model_interaction.set_chosen_choice(agents_index_for_estimation)
self.model_interaction.set_chosen_choice_if_necessary(agents_index=agents_index_for_estimation)
index = self.model_interaction.get_choice_index()
self.coefficients = create_coefficient_from_specification(specification)
self.model_interaction.create_specified_coefficients(self.coefficients, specification, self.choice_set.get_id_attribute()[index])
self.model_interaction.compute_availabilities(submodels)
#run estimation
result = self.estimate_step()
return (self.coefficients, result)
def create_interaction_datasets(self, agent_set, agents_index, config, **kwargs):
"""Create interaction dataset agent_x_choice.
config can be used to pass extra parameters to the sampler.
"""
if self.sampler_class is None:
self.model_interaction.create_interaction_datasets(agents_index, arange(self.choice_set_size))
else:
nchoices = self.get_number_of_elemental_alternatives()
if nchoices == self.choice_set.size():
#sampler class specified, but the sample size equals the size of choice set
self.model_interaction.create_interaction_datasets(agents_index, arange(self.choice_set_size))
else:
sampling_weights = self.get_sampling_weights(config, agent_set=agent_set, agents_index=agents_index)
choice_index = None
chunk_specification = config.get("chunk_specification_for_sampling", ChunkSpecification({"nchunks":1}))
nchunks = chunk_specification.nchunks(agents_index)
chunksize = chunk_specification.chunk_size(agents_index)
interaction_dataset = self.sample_alternatives_by_chunk(agent_set, agents_index,
choice_index, nchoices,
weights=sampling_weights,
config=config,
nchunks=nchunks, chunksize=chunksize)
if not config.get("accept_unavailability_of_choices", False) and interaction_dataset.get_reduced_m() == 0:
raise StandardError, "There are no choices available for the given sampling weights."
self.update_choice_set_size(interaction_dataset.get_reduced_m())
self.model_interaction.interaction_dataset = interaction_dataset
def estimate_step(self):
self.debug.print_debug("Compute variables ...",4)
self.increment_current_status_piece()
self.model_interaction.compute_variables()
logger.log_status("Choice set size: %i" % self.get_choice_set_size())
coef = {}
result = {}
#index = self.estimate_config["index"]
self.debug.print_debug("Estimate ...",4)
for submodel in self.model_interaction.get_submodels():
logger.log_status("submodel: %s" % submodel)
self.increment_current_status_piece()
self.model_interaction.prepare_data_for_estimation(submodel)
coef[submodel] = self.model_interaction.get_submodel_coefficients(submodel)
self.coefficient_names[submodel] = self.model_interaction.get_coefficient_names(submodel)
if self.model_interaction.is_there_data(submodel): # observations for this submodel available
# remove not used choices
is_submodel_chosen_choice = self.model_interaction.set_chosen_choice_for_submodel_and_update_data(submodel)
self.estimate_config['index'] = self.model_interaction.get_choice_index_for_submodel(submodel)
self.estimate_config["chosen_choice"] = is_submodel_chosen_choice
self.estimate_config.merge({"coefficient_names":self.coefficient_names[submodel]})
self.estimate_config.merge({"specified_coefficients": coef[submodel]})
self.estimate_config.merge({"variable_names": self.model_interaction.get_variable_names(submodel)})
self.estimate_config.merge({"fixed_values": self.model_interaction.get_coefficient_fixed_values(submodel)})
self.estimate_config.merge({"submodel": submodel})
self.estimate_config.merge({"_model_":self})
self.estimate_config['availability'] = self.model_interaction.get_availability(submodel)
result[submodel] = self.estimate_submodel(self.get_all_data(submodel), submodel)
if "estimators" in result[submodel].keys():
coef[submodel].set_beta_alt(result[submodel]["estimators"])
if "standard_errors" in result[submodel].keys():
coef[submodel].set_beta_se_alt(result[submodel]["standard_errors"])
if "other_measures" in result[submodel].keys():
for measure in result[submodel]["other_measures"].keys():
coef[submodel].set_measure_from_alt(measure,
result[submodel]["other_measures"][measure])
if "other_info" in result[submodel].keys():
for info in result[submodel]["other_info"]:
coef[submodel].set_other_info(info, result[submodel]["other_info"][info])
coef[submodel].fill_beta_from_beta_alt()
if self.estimate_config.get("export_estimation_data", False):
self.export_estimation_data(submodel, is_submodel_chosen_choice, self.get_all_data(submodel),
coef[submodel].get_coefficient_names_from_alt(),
self.estimate_config.get("estimation_data_file_name", './estimation_data.txt'),
self.estimate_config.get("use_biogeme_data_format",False))
self.coefficients.fill_coefficients(coef)
self.model_interaction.set_coefficients(self.coefficients)
self.estimate_config["coefficient_names"]=None
self.estimate_config["variable_names"]=None
return result
def estimate_submodel(self, data, submodel=0):
if self.model_interaction.is_there_data(submodel):
return self.procedure.run(data, upc_sequence=self.upc_sequence, resources=self.estimate_config)
return {}
def get_sampling_weights(self, config, agent_set=None, agents_index=None, **kwargs):
"""Return weights_string in the config
which is the value for key
'weights_for_estimation_string' or 'weights_for_simulation_string'.
Can be overwritten in child class and return an 1d or 2d array
If it is the equal sign (i.e. '='), all choices have equal weights.
"""
weights_string = None
if config is not None:
if config.get('estimate', False): #if we are in estimate()
weights_string = config.get("weights_for_estimation_string", None)
else: # otherwise
weights_string = config.get("weights_for_simulation_string", None)
if weights_string == '=':
return ones(self.choice_set.size(), dtype="int32")
return weights_string
def sample_alternatives_by_chunk(self, agent_set, agents_index,
choice_index, sample_size,
weights=None,
with_replacement=False,
include_chosen_choice=None,
config=None,
nchunks=1, chunksize=None):
"""Return interaction data with sampled alternatives (agents_index * sample_size)
Do it in 'nchunks' iterations, and stack the results from each chunk
"""
if not chunksize:
chunksize=agents_index.size
index2 = -1 + zeros((agents_index.size, sample_size), dtype="int32")
attributes = {}
for ichunk in range(nchunks):
index_for_ichunk = self.get_index_for_chunk(agents_index.size, ichunk, chunksize)
agents_index_in_ichunk = agents_index[index_for_ichunk]
interaction_dataset = self.sampler_class.run(agent_set, self.choice_set,
index1=agents_index_in_ichunk,
index2=choice_index,
sample_size=sample_size,
weight=weights,
with_replacement=with_replacement,
include_chosen_choice=include_chosen_choice,
resources=config,
dataset_pool=self.dataset_pool
)
if nchunks>1:
index2[index_for_ichunk,:] = interaction_dataset.index2
for name in interaction_dataset.get_known_attribute_names():
attr_val = interaction_dataset.get_attribute(name)
if not attributes.has_key(name):
attributes[name] = zeros(index2.shape, dtype=attr_val.dtype)
attributes[name][index_for_ichunk,:] = attr_val
if nchunks>1:
interaction_dataset = self.sampler_class.create_interaction_dataset(interaction_dataset.dataset1,
interaction_dataset.dataset2,
index1=agents_index,
index2=index2)
for name in attributes.keys():
interaction_dataset.add_attribute(attributes[name], name)
return interaction_dataset
def get_agents_for_chunk(self, agents_index, ichunk, chunksize):
"""Return ichunk with size chunksize in agents_index,
return agents_index if chunksize >= agents_index.size
"""
max_index = agents_index.size
return agents_index[self.get_index_for_chunk(max_index, ichunk, chunksize)]
def get_index_for_chunk(self, max_index, ichunk, chunksize):
"""
"""
return arange(ichunk*chunksize, min((ichunk+1)*chunksize, max_index))
def get_export_simulation_file_names(self, file_name):
import os
file_name_root, file_name_ext = os.path.splitext(file_name)
out_file_probs = "%s_probabilities%s" % (file_name_root, file_name_ext)
out_file_choices = "%s_choices%s" % (file_name_root, file_name_ext)
return (out_file_probs, out_file_choices)
def get_probabilities_and_choices(self, probabilities):
"""Return a tuple of probabilities (2d array, first column are the agent ids, remaining columns
are probabilities for each choice) and choices (2d array of [possibly sampled] choice ids,
where the first column are the agent ids)."""
from numpy import argsort
agent_ids = self.model_interaction.interaction_dataset.get_id_attribute_of_dataset(1)
probs = concatenate((agent_ids[...,newaxis], probabilities), axis=1)
choice_ids = concatenate((agent_ids[...,newaxis],
self.model_interaction.get_choice_index()), axis=1)
# sort results
order_idx = argsort(agent_ids)
probs = probs[order_idx,:]
choice_ids = choice_ids[order_idx,:]
return (probs, choice_ids)
def export_probabilities(self, probabilities, file_name):
"""Export the current probabilities into a file.
"""
from opus_core.misc import write_table_to_text_file
if self.index_of_current_chunk == 0:
mode = 'w'
else:
mode = 'a'
export_file_probs, export_file_choices = self.get_export_simulation_file_names(file_name)
probs, choice_ids = self.get_probabilities_and_choices(probabilities)
logger.start_block('Exporting probabilities (%s x %s) into %s' % (probs.shape[0], probs.shape[1], export_file_probs))
write_table_to_text_file(export_file_probs, probs, mode=mode, delimiter='\t')
logger.end_block()
logger.start_block('Exporting choices into %s' % export_file_choices)
write_table_to_text_file(export_file_choices, choice_ids, mode=mode, delimiter='\t')
logger.end_block()
def export_estimation_data(self, submodel, is_chosen_choice, data, coef_names, file_name, use_biogeme_data_format=False):
from numpy import reshape, repeat
import os
delimiter = '\t'
if use_biogeme_data_format:
nobs, alts, nvars = data.shape
avs = ones(shape=(nobs,alts,1)) # if the choice is available, set to all ones
data = concatenate((data, avs),axis=2)
coef_names = coef_names.tolist() + ['av']
nvars += 1
try:
from numpy import argsort
stratum_id = self.sampler_class._stratum_id[...,newaxis]
data = concatenate((data, stratum_id, self.sampler_class._sampling_probability[...,newaxis], ),axis=2)
iid = argsort(stratum_id, axis=1)
for i in range(data.shape[0]): # re-arrange data based on stratum id
data[i,...] = data[i,...][iid[i,:,0]]
is_chosen_choice[i,:] = is_chosen_choice[i,:][iid[i,:,0]]
coef_names = coef_names + ['stratum', 'sampling_prob']
nvars += 2
logger.log_status("added variables specific to stratified sampler")
except:
pass
chosen_choice = where(is_chosen_choice)[1] + 1
index_of_non_constants = []
for i in range(nvars):
if not (coef_names[i] == "constant"):
index_of_non_constants.append(i)
index_of_non_constants = array(index_of_non_constants)
nvars_without_const = index_of_non_constants.size
#additional 2 columns for ID and choice
data_for_biogeme = zeros((nobs, alts*nvars_without_const + 1), dtype=float64)
biogeme_var_names = []
for ivar in range(nvars_without_const):
for ialt in range(alts):
biogeme_var_names.append(coef_names[index_of_non_constants[ivar]] + "_" + str(ialt+1))
data_for_biogeme[:,1+ivar*alts+ialt] = data[:,ialt, index_of_non_constants[ivar]]
data_for_biogeme[:, 0] = chosen_choice
ids = reshape(arange(nobs)+1, (nobs,1))
data_for_biogeme = concatenate((ids, data_for_biogeme),axis=1)
data = data_for_biogeme
header = ['ID', 'choice'] + biogeme_var_names
nrows, ncols = data.shape
else:
nobs, alts, nvars = data.shape
ids = reshape(repeat(arange(nobs, dtype='int32')+1, alts), (nobs,alts,1))
data = concatenate((ids, is_chosen_choice[...,newaxis].astype("int16"), data),axis=2)
nvars += 2
nrows = nobs * alts
header = ['ID', 'choice'] + coef_names.tolist()
data = reshape(data,(-1,nvars))
file_name_root, file_name_ext = os.path.splitext(file_name)
out_file = "%s_submodel_%s.txt" % (file_name_root, submodel)
fh = open(out_file,'w')
fh.write(delimiter.join(header) + '\n') #file header
for row in range(nrows):
line = [str(x) for x in data[row,]]
fh.write(delimiter.join(line) + '\n')
fh.flush()
fh.close
print 'Data written into %s' % out_file
def get_agents_order(self, agents):
return permutation(agents.size())
def set_choice_set_size(self, estimation=False):
"""If "sample_size_locations" is specified in resources, it is considered as the choice set size. Otherwise
the value of resources entry "sample_proportion_locations" is considered as determining the size of
the choice set.
"""
if self.sampler_class is not None:
if estimation:
resources = self.estimate_config
else:
resources = self.run_config
pchoices = resources.get("sample_proportion_locations", None)
nchoices = resources.get("sample_size_locations", None)
if nchoices == None:
if pchoices == None:
logger.log_warning("Neither 'sample_proportion_locations' nor 'sample_size_locations' " +
"given. Choice set will not be sampled.")
nchoices = self.choice_set.size()
else:
nchoices = int(pchoices*self.choice_set.size())
else:
nchoices = self.choice_set.size()
self.choice_set_size = min(nchoices, self.choice_set.size())
def update_choice_set_size(self, nchoices):
"""
update self.choice_set_size if necessary
when the sampler class is stratified_sampler that won't be able to know
the accurate number of choice beforehand, this method update the choice
set size after the sampler_class is run.
"""
self.choice_set_size = nchoices
#def set_choice_set_size(self):
#self.choice_set_size = self.choice_set.size()
def get_choice_set_size(self):
return self.choice_set_size
def get_number_of_elemental_alternatives(self):
return self.get_choice_set_size()
def plot_histogram(self, main="", file=None):
"""Plots histogram of choices and probabilities for the last chunk."""
self.upc_sequence.plot_histogram(main=main)
self.upc_sequence.show_plots(file)
def plot_histogram_of_choices(self, index=None, main="", bins=None):
"""Plots a histogram of choices for the whole dataset.
"""
from opus_core.plot_functions import plot_histogram
if self.result_choices is None:
raise StandardError, "Model does not have any results. Try to run it first."
if index is None:
values = self.result_choices
else:
values = self.result_choices[index]
plot_histogram(values, main, xlabel="choices", bins=bins)
def prepare_for_run(self, agent_set=None, agent_filter=None, filter_threshold=0, **kwargs):
spec, coef = prepare_specification_and_coefficients(**kwargs)
if (agent_set is not None) and (agent_filter is not None):
filter_values = agent_set.compute_variables([agent_filter], dataset_pool=self.dataset_pool)
index = where(filter_values > filter_threshold)[0]
else:
index = None
return (spec, coef, index)
def prepare_for_estimate(self, agent_set=None, agent_filter=None, filter_threshold=0, **kwargs):
spec = get_specification_for_estimation(**kwargs)
if 'agents_for_estimation_storage' in kwargs and 'agents_for_estimation_table' in kwargs:
estimation_set = Dataset(in_storage = kwargs['agents_for_estimation_storage'],
in_table_name=kwargs['agents_for_estimation_table'],
id_name=agent_set.get_id_name(),
dataset_name=agent_set.get_dataset_name())
if agent_filter:
values = estimation_set.compute_variables(agent_filter)
index = where(values > 0)[0]
estimation_set.subset_by_index(index, flush_attributes_if_not_loaded=False)
if 'join_datasets' in kwargs and kwargs['join_dataset']:
agent_set.join_by_rows(estimation_set, require_all_attributes=False,
change_ids_if_not_unique=True)
index = arange(agent_set.size()-estimation_set.size(),agent_set.size())
elif (agent_set is not None) and (agent_filter is not None):
filter_values = agent_set.compute_variables([agent_filter], dataset_pool=self.dataset_pool)
index = where(filter_values > filter_threshold)[0]
else:
index = None
return (spec, index)
def get_data(self, coefficient, submodel=-2):
return ChunkModel.get_data(self, coefficient, submodel, is3d=True)
def get_dataset_pool(self):
return self.dataset_pool
def get_data_as_dataset(self, submodel=-2):
"""Like get_all_data, but the returning value is an InteractionDataset containing attributes that
correspond to the data columns. Their names are coefficient names."""
all_data = self.get_all_data(submodel)
if all_data is None:
return None
names = self.get_coefficient_names(submodel)
if names is None:
return None
dataset_data = {}
for i in range(names.size):
dataset_data[names[i]] = all_data[:, :, i].reshape((all_data.shape[0], all_data.shape[1]))
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(table_name = 'dataset',
table_data = dataset_data)
ds = Dataset(in_storage=storage, in_table_name='dataset', id_name=[])
return ds
def get_all_data(self, submodel):
return self.model_interaction.get_data(submodel)
def get_specified_coefficients(self):
return self.model_interaction.get_specified_coefficients()
def _get_status_total_pieces(self):
return ChunkModel._get_status_total_pieces(self) * self.get_status_for_gui().get_total_number_of_pieces()
def _get_status_current_piece(self):
return ChunkModel._get_status_current_piece(self)*self.get_status_for_gui().get_total_number_of_pieces() + self.get_status_for_gui().get_current_piece()
def _get_status_piece_description(self):
return "%s %s" % (ChunkModel._get_status_piece_description(self), self.get_status_for_gui().get_current_piece_description())
def get_attribute_for_submodel(self, name, config):
values = self.model_interaction.interaction_dataset.compute_variables(name, dataset_pool=self.dataset_pool)
return values[self.observations_mapping[config['submodel']],:]
class ModelInteraction:
""" This class handles all the work that involves the interaction dataset of the Choice model."""
def __init__(self, model, package=None, choice_set=None):
self.interaction_package = package
self.choice_set = choice_set
if (choice_set is not None):
if isinstance(choice_set, str): # can be a dataset name
self.choice_set = model.dataset_pool.get_dataset(choice_set)
self.interaction_module = None
self.interaction_class_name = None
self.interaction_dataset = None
self.model = model
self.interaction_resources = None
self.data = {}
self.specified_coefficients = None
self.chosen_choice = None
self.chosen_choice_per_submodel = {}
self.submodel_coefficients = {}
self.data_include_rows = {}
def set_agent_set(self, agent_set):
self.agent_set = agent_set
factory = DatasetFactory()
self.interaction_module, self.interaction_class_name = \
factory.compose_interaction_dataset_name(self.agent_set.get_dataset_name(),
self.choice_set.get_dataset_name())
def create_interaction_datasets(self, agents_index, choice_set_index):
# free memory of existing interaction set
if isinstance(self.interaction_dataset, InteractionDataset):
self.interaction_dataset.unload_all_attributes()
gc.collect()
self.interaction_resources = Resources({"debug":self.model.debug})
try:
self.interaction_dataset = ClassFactory().get_class(
self.interaction_package+"."+self.interaction_module,
class_name=self.interaction_class_name,
arguments={"dataset1":self.agent_set, "dataset2":self.choice_set,
"index1":agents_index, "index2":choice_set_index})
except ImportError:
self.interaction_dataset = InteractionDataset(dataset1=self.agent_set, dataset2=self.choice_set,
index1=agents_index, index2=choice_set_index, debug=self.model.debug)
def get_choice_index(self):
if self.interaction_dataset is None:
return array([])
return self.interaction_dataset.get_2d_index()
def get_choice_index_for_submodel(self, submodel):
index = self.get_choice_index()
if index is not None:
index = take(index, self.model.observations_mapping[submodel], axis=0)
if submodel in self.data_include_rows.keys():
index = compress(self.data_include_rows[submodel], index, axis=0)
return index
def get_choice_ids_for_submodel(self, submodel):
index = self.get_choice_index_for_submodel(submodel)
if index is not None:
return self.interaction_dataset.get_dataset(2).get_id_attribute()[index]
def get_agent_ids_for_submodel(self, submodel):
return self.interaction_dataset.get_id_attribute_of_dataset(1)[self.model.observations_mapping[submodel]]
def compute_variables(self, variables=None):
if variables is not None:
var_list_for_this_choice_set = variables
else:
var_list_for_this_choice_set = \
self.specified_coefficients.get_variables_without_constants_and_reserved_names()
if var_list_for_this_choice_set is not None and len(var_list_for_this_choice_set) > 0:
self.interaction_dataset.compute_variables(var_list_for_this_choice_set,
dataset_pool=self.model.dataset_pool,
resources = self.interaction_resources)
def prepare_data_for_simulation(self, submodel):
# free up memory from previous chunks
if submodel in self.data.keys():
del self.data[submodel]
gc.collect()
self.submodel_coefficients[submodel] = SpecifiedCoefficientsFor1Submodel(self.specified_coefficients, submodel)
self.data[submodel] = self.interaction_dataset.create_logit_data(self.submodel_coefficients[submodel],
index = self.model.observations_mapping[submodel])
def prepare_data_for_estimation(self, submodel):
self.submodel_coefficients[submodel] = SpecifiedCoefficientsFor1Submodel(self.specified_coefficients, submodel)
self.data[submodel] = self.interaction_dataset.create_logit_data_from_beta_alt(
self.submodel_coefficients[submodel],
index=self.model.observations_mapping[submodel])
def convert_data_from_estimation_to_simulation_format(self, submodel):
from numpy import repeat, newaxis, sort, reshape
from opus_core.ndimage import sum as ndimage_sum
coef = self.submodel_coefficients[submodel]
data = self.data[submodel]
nvar = coef.get_coefficient_values().shape[1]
if data.shape[2] == nvar: return data # no difference
labels = repeat(coef.get_coefmap_alt()[newaxis,...]+1, data.shape[1], axis=0)
for i in range(1, labels.shape[0]):
labels[i,:] = labels[i,:] + i*nvar
index = repeat(unique(coef.get_coefmap_alt())[newaxis,...]+1, data.shape[1], axis=0)
for i in range(1, index.shape[0]):
index[i,:] = index[i,:] + i*nvar
data_sim = zeros((data.shape[0], data.shape[1], nvar), dtype=data.dtype)
for i in range(data_sim.shape[0]):
data_sim[i,:,:] = reshape(array(ndimage_sum(data[i,:,:], labels, index=index)), index.shape)
return data_sim
def get_submodel_coefficients(self, submodel):
return self.submodel_coefficients[submodel]
def remove_rows_from_data(self, where_not_remove, submodel):
self.data[submodel] = compress(where_not_remove, self.data[submodel], axis=0)
def get_data(self, submodel):
return self.data[submodel]
def create_specified_coefficients(self, coefficients, specification, choice_ids=None):
equation_ids=None
if len(choice_ids.shape) > 1:
same_ids = True
for i in range(choice_ids.shape[1]):
if not alltrue(choice_ids[:,i]==choice_ids[0,i]):
same_ids=False
break
if same_ids:
equation_ids = choice_ids[0,:]
else:
equation_ids = choice_ids
nchoices = self.model.get_choice_set_size()
self.specified_coefficients = SpecifiedCoefficients().create(coefficients, specification, neqs=nchoices,
equation_ids=equation_ids)
def get_specified_coefficients(self):
return self.specified_coefficients
def set_coefficients(self, coefficients):
self.specified_coefficients.coefficients = coefficients
def get_submodels(self):
return self.specified_coefficients.get_submodels()
def set_chosen_choice_if_necessary(self, agents_index=None, chosen_choice=None):
if 'chosen_choice' not in self.interaction_dataset.get_known_attribute_names():
self.set_chosen_choice(agents_index=agents_index, chosen_choice=chosen_choice)
else:
self.chosen_choice = self.get_chosen_choice_index()
def set_chosen_choice(self, agents_index=None, chosen_choice=None):
if chosen_choice is None:
if agents_index is None:
raise ValueError, "Either agents_index or chosen_choice must be specified"
else:
chosen_choice = self.choice_set.get_id_index(id=
self.agent_set.get_attribute_by_index(self.choice_set.get_id_name()[0],
agents_index)
)
if chosen_choice.ndim==1:
data = (self.get_choice_index() - chosen_choice[:, newaxis]) == 0
elif chosen_choice.ndim==2:
data = chosen_choice
assert data.shape == self.get_choice_index().shape
self.interaction_dataset.add_attribute(data=data,
name="chosen_choice")
self.chosen_choice = self.get_chosen_choice_index()
def get_chosen_choice_index(self):
if 'chosen_choice' in self.interaction_dataset.get_known_attribute_names():
chosen_choice_index = self.get_choice_index()[self.interaction_dataset.get_attribute('chosen_choice')]
return chosen_choice_index
def get_chosen_choice(self):
if "chosen_choice" in self.interaction_dataset.get_known_attribute_names():
chosen_choice = self.interaction_dataset.get_attribute("chosen_choice")
return chosen_choice
def set_chosen_choice_for_submodel_and_update_data(self, submodel):
chosen_choice = self.get_chosen_choice()
is_submodel_chosen_choice = take(chosen_choice, indices=self.model.observations_mapping[submodel],
axis=0)
# remove choices not being chosen by any agents
is_submodel_chosen_choice = take(is_submodel_chosen_choice,
indices=self.submodel_coefficients[submodel].get_equations_index(),
axis=1)
sumchoice = is_submodel_chosen_choice.sum(axis=1, dtype=int32)
where_not_remove = where(sumchoice == 0, False, True)
if False in where_not_remove:
is_submodel_chosen_choice = compress(where_not_remove, is_submodel_chosen_choice, axis=0)
self.remove_rows_from_data(where_not_remove, submodel)
self.data_include_rows[submodel] = where_not_remove
self.chosen_choice_per_submodel[submodel] = is_submodel_chosen_choice
return is_submodel_chosen_choice
def get_chosen_choice_for_submodel(self, submodel):
return self.chosen_choice_per_submodel[submodel]
def get_coefficient_names(self, submodel):
return self.submodel_coefficients[submodel].get_coefficient_names_from_alt()
def get_variable_names(self, submodel):
return self.submodel_coefficients[submodel].get_variable_names_from_alt()
def get_variable_names_for_simulation(self, submodel):
return array(self.submodel_coefficients[submodel].get_variable_names())
def get_coefficient_fixed_values(self, submodel):
return self.specified_coefficients.specification.get_coefficient_fixed_values_for_submodel(submodel)
def is_there_data(self, submodel):
if (self.data[submodel].shape[0] <= 0) or (self.data[submodel].size <= 0):
return False
return True
def compute_availabilities(self, submodels=[]):
availability_string = self.model.availability
if not hasattr(self.model, "availability") or availability_string is None:
return
if len(submodels) == 0 or (len(submodels) == 1 and submodels[0] < 0) or re.search('SUBMODEL', availability_string) is None:
self.interaction_dataset.compute_variables(availability_string)
short_name = VariableName(availability_string).get_alias()
if short_name != 'availability':
self.interaction_dataset.compute_variables('availability=%s' % short_name)
else:
for submodel in submodels:
availability_string_subm = re.sub('SUBMODEL', str(submodel), availability_string)
self.interaction_dataset.compute_variables(availability_string_subm)
short_name = VariableName(availability_string_subm).get_alias()
if short_name != 'availability_%s' % submodel:
self.interaction_dataset.compute_variables('availability_%s=%s' % (submodel,short_name))
def get_availability(self, submodel):
availability_string = 'availability_%s' % submodel
availability = None
if availability_string in self.interaction_dataset.get_known_attribute_names():
availability = self.interaction_dataset[availability_string][self.model.observations_mapping[submodel],:]
if 'availability' in self.interaction_dataset.get_known_attribute_names():
availability = self.interaction_dataset['availability'][self.model.observations_mapping[submodel],:]
if availability is not None and submodel in self.data_include_rows.keys():
return compress(self.data_include_rows[submodel], availability, axis=0)
return availability
import os
import tempfile
from shutil import rmtree
from opus_core.tests import opus_unittest
from numpy import ma, alltrue
from opus_core.ndimage import sum as ndimage_sum
from opus_core.tests.stochastic_test_case import StochasticTestCase
from opus_core.simulation_state import SimulationState
from opus_core.misc import load_table_from_text_file, unique
class TestChoiceModel(StochasticTestCase):
def tearDown(self):
SimulationState().remove_base_cache_directory()
def test_do_nothing_if_no_agents(self):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name = 'households',
table_data = {
"household_id": arange(10000)+1,
"autos": array(10000*[-1])
}
)
#create households
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
# create coefficients and specification
coefficients = Coefficients(names=("costcoef", ), values=(-0.001,))
specification = EquationSpecification(variables=("autos", ), coefficients=("costcoef", ))
# run the model
cm = ChoiceModel(choice_set=[0,1,2,3], choices = "opus_core.random_choices_from_index")
result = cm.run(specification, coefficients, agent_set=households, agents_index=array([], dtype='int32'), debuglevel=1)
# check
self.assertEqual(result.size , 0)
def test_agents_do_not_choose_certain_mode_if_low_income(self):
"""4 modes;
10,000 households - 5000 with no low income, 5000 with low income
Attractiveness for mode 4 if low income is -100, otherwise 0.001.
Result: No household with low income should choose mode 4. The number of households that chose
remaining modes should be equally distributed.
"""
#create households
household_data = {"household_id": arange(10000)+1, "is_low_income": array(5000*[0]+5000*[1])}
modes=array([1,2,3,4])
# create coefficients and specification (different coefficient names for each equation)
coefficients = Coefficients(names=("li1", "li2","li3","li4"), values=(0.001, 0.001, 0.001, -100))
specification = EquationSpecification(variables=("household.is_low_income", "household.is_low_income",
"household.is_low_income", "household.is_low_income"),
coefficients=("li1", "li2","li3","li4"),
equations=(1,2,3,4))
# using ASCs
# coefficients = Coefficients(names=("li1", "asc2", "li2", "asc3", "li3", "asc4", "li4"),
# values=(0, 0.1, 0, 0.1, 0, 0.1, -100))
# specification = EquationSpecification(variables=("household.is_low_income", "constant", "household.is_low_income",
# "constant", "household.is_low_income", "constant", "household.is_low_income"),
# coefficients=("li1", "asc2", "li2", "asc3", "li3", "asc4", "li4"),
# equations=(1,2,2,3,3,4,4))
tmp = ones(5000, dtype="int32")
# run the model
def run_model():
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name = 'households',
table_data = household_data)
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
cm = ChoiceModel(choice_set=modes, choices = "opus_core.random_choices")
result = cm.run(specification, coefficients, agent_set=households,
chunk_specification={'nchunks':1},
debuglevel=1)
nli = ndimage_sum(tmp, labels=result[0:5000], index=modes)
li = ndimage_sum(tmp, labels=result[5000:10000], index=modes)
return concatenate((nli, li))
# first 4 - distribution of modes should be the same for households with no low income
# second 4 - distribution of modes 1-3 should be the same for households with low income and 0 for mode 4.
expected_results = array(4*[1250]+3*[1667]+[0])
self.run_stochastic_test(__file__, run_model, expected_results, 10)
def test_estimate_and_simulate_4_mode_model(self):
"""4 modes;
10,000 households - 5000 with no low income, 5000 with low income
The modes 1-4 are equally distributed among households that don't have low income.
Households with low income decided equally for choices 2 and 3 (2490 hhs per choice) and only few of them decided for choices
1 and 4 (10 households per choice).
Coefficients for "is_low_income" are estimated separately for each choice where the first
choice is the reference alternative (coef. li2, li3, li4).
Result: Coefficient li4 should be close to 0, since the same number of households with low income decided for
alternative 1 and 4. Coefficient li2 and li3 should be positive and equal.
A simulation with the estimated coefficients is run and the resulting distribution should correspond to the original
data.
"""
storage = StorageFactory().get_storage('dict_storage')
household_data = {
'household_id': arange(10000)+1,
'is_low_income': array(5000*[0]+5000*[1]),
'choice_id':array(1250*[1] + 1250*[2] + 1250*[3] + 1250*[4] + 10*[4] + 2490*[3] + 10*[1] + 2490*[2])
}
storage.write_table(
table_name = 'households',
table_data = household_data)
# create households
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
modes=array([1,2,3,4])
specification = EquationSpecification(variables=("household.is_low_income",
"household.is_low_income",
"household.is_low_income", "constant"),
coefficients=( "li2", "li3","li4","c"),
equations=(2,3,4,1))
cm = ChoiceModel(choice_set=modes, choices = "opus_core.random_choices")
coef, dummy = cm.estimate(specification, agent_set = households,
procedure="opus_core.bhhh_mnl_estimation", debuglevel=4)
li2=coef.get_values_of_one_coefficient("li2")
li3=coef.get_values_of_one_coefficient("li3")
li4=coef.get_values_of_one_coefficient("li4")
self.assertEqual(ma.allclose(li2, li3 , rtol=0.00001), True)
self.assertEqual(li2 > 1, True)
self.assertEqual(ma.allclose(li4, 0 , rtol=0.00001), True)
tmp = ones(5000, dtype="int32")
def run_model():
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name = 'households',
table_data = household_data)
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
cm = ChoiceModel(choice_set=modes, choices = "opus_core.random_choices")
# run a simulation with the estimated coefficients
result = cm.run(specification, coef, agent_set=households,
chunk_specification={'nchunks':1},
debuglevel=1, run_config=Resources({"demand_string": "choice.demand"}))
nli = ndimage_sum(tmp, labels=result[0:5000], index=modes)
li = ndimage_sum(tmp, labels=result[5000:10000], index=modes)
self.demand = cm.choice_set.get_attribute("demand")
return concatenate((nli,li))
# distribution of modes should correspond to the original data
expected_results = array(4*[1250] + [10, 2490, 2490, 10])
self.run_stochastic_test(__file__, run_model, expected_results, 10)
#check aggregated demand
self.assertEqual(ma.allclose(self.demand, array([1260, 3740, 3740, 1260]) , rtol=0.1), True)
# estimate with a fixed value of one coefficient and check that the value of this coefficint is the assigned one
specification = EquationSpecification(variables=("household.is_low_income",
"household.is_low_income",
"household.is_low_income", "constant", "__dummy"),
coefficients=( "li2", "li3","li4","c", "a"),
equations=(2,3,4,1,1),
fixed_values=(0,0,0,0,2))
coef, dummy = cm.estimate(specification, agent_set = households,
procedure="opus_core.bhhh_mnl_estimation", debuglevel=4)
a=coef.get_values_of_one_coefficient("a")
self.assertEqual(ma.allclose(a, 2 , rtol=0.00001), True)
def test_estimate_and_simulate_4_mode_model_with_reference_equation(self):
"""Like test_estimate_and_simulate_4_mode_model, but the reference equation (the first one) does not have any terms.
Furthermore, the equations have arbitrary ids.
"""
storage = StorageFactory().get_storage('dict_storage')
household_data = {
'household_id': arange(10000)+1,
'is_low_income': array(5000*[0]+5000*[1]),
'choice_id':array(1250*[3] + 1250*[5] + 1250*[10] + 1250*[25] + 10*[25] + 2490*[10] + 10*[3] + 2490*[5])
}
storage.write_table(
table_name = 'households',
table_data = household_data)
# create households
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
modes=array([3,5,10,25])
specification = EquationSpecification(variables=("household.is_low_income", "constant",
"household.is_low_income", "constant",
"household.is_low_income", "constant"),
coefficients=( "li2", "c2", "li4", "c4", "li3", "c3"),
equations=(5,5,25,25,10,10))
cm = ChoiceModel(choice_set=modes, choices = "opus_core.random_choices")
coef, dummy = cm.estimate(specification, agent_set = households,
procedure="opus_core.bhhh_mnl_estimation", debuglevel=4)
li2=coef.get_values_of_one_coefficient("li2")
li3=coef.get_values_of_one_coefficient("li3")
li4=coef.get_values_of_one_coefficient("li4")
self.assertEqual(ma.allclose(li2, li3 , rtol=0.00001), True)
self.assertEqual(li2 > 1, True)
self.assertEqual(ma.allclose(li4, 0 , rtol=0.00001), True)
tmp = ones(5000, dtype="int32")
def run_model():
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name = 'households',
table_data = household_data)
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
cm = ChoiceModel(choice_set=modes, choices = "opus_core.random_choices")
# run a simulation with the estimated coefficients
result = cm.run(specification, coef, agent_set=households,
chunk_specification={'nchunks':1},
debuglevel=1, run_config=Resources({"demand_string": "choice.demand"}))
nli = ndimage_sum(tmp, labels=result[0:5000], index=modes)
li = ndimage_sum(tmp, labels=result[5000:10000], index=modes)
self.demand = cm.choice_set.get_attribute("demand")
return concatenate((nli, li))
# distribution of modes should correspond to the original data
expected_results = array(4*[1250] + [10, 2490, 2490, 10])
self.run_stochastic_test(__file__, run_model, expected_results, 10)
#check aggregated demand
self.assertEqual(ma.allclose(self.demand, array([1260, 3740, 3740, 1260]) , rtol=0.1), True)
def test_estimate_and_simulate_2_mode_model_with_reference_equation(self):
"""2 modes;
10,000 households - 5000 with no low income, 5000 with low income
The modes 1-2 are equally distributed among households that don't have low income.
Most of the households with low income decided for choice 1 (4700) and much less households decided for choice 2 (300).
Coefficient for "is_low_income" (li) is estimated for choice 1 (the second choice is the reference alternative
and does not have any entries in the specification).
Result: Coefficient li should be positive.
A simulation with the estimated coefficient is run and the resulting distribution should correspond to the original
data.
"""
storage = StorageFactory().get_storage('dict_storage')
household_data = {
'household_id': arange(10000)+1,
'is_low_income': array(5000*[0]+5000*[1]),
'choice_id':array(2500*[1] + 2500*[2] + 4700*[1] + 300*[2])
}
storage.write_table(
table_name = 'households',
table_data = household_data)
# create households
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
modes=array([1,2])
specification = EquationSpecification(variables=("household.is_low_income", "constant"),
coefficients=( "li", "c"),
equations=(1,1))
cm = ChoiceModel(choice_set=modes, choices = "opus_core.random_choices")
coef, dummy = cm.estimate(specification, agent_set = households,
procedure="opus_core.bhhh_mnl_estimation", debuglevel=4)
li=coef.get_values_of_one_coefficient("li")[0]
self.assertEqual(li > 1, True)
tmp = ones(5000, dtype="int32")
def run_model():
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name = 'households',
table_data = household_data)
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
cm = ChoiceModel(choice_set=modes, choices = "opus_core.random_choices")
# run a simulation with the estimated coefficients
result = cm.run(specification, coef, agent_set=households,
chunk_specification={'nchunks':1},
debuglevel=1, run_config=Resources({"demand_string": "choice.demand"}))
nli = ndimage_sum(tmp, labels=result[0:5000], index=modes)
li = ndimage_sum(tmp, labels=result[5000:10000], index=modes)
self.demand = cm.choice_set.get_attribute("demand")
return concatenate((nli,li))
# distribution of modes should correspond to the original data
expected_results = array(2*[2500] + [4700, 300])
self.run_stochastic_test(__file__, run_model, expected_results, 10)
#check aggregated demand
self.assertEqual(ma.allclose(self.demand, array([7200, 2800]) , rtol=0.1), True)
def test_run_model_and_write_simulation_data(self):
temp_dir = tempfile.mkdtemp(prefix='opus_choice_model_test')
storage = StorageFactory().get_storage('dict_storage')
household_data = {
'household_id': arange(100)+1,
'is_low_income': array(50*[0]+50*[1])
}
location_data = {
'location_id': arange(500) +1,
'cost': array(100*[20] + 100*[30] + 100*[50] + 200*[100])
}
storage.write_table(
table_name = 'households',
table_data = household_data)
storage.write_table(
table_name = 'locations',
table_data = location_data)
specification = EquationSpecification(variables=("household.is_low_income*location.cost",),
coefficients=("lic",))
coef = Coefficients(names=("lic",), values=(0.01,))
# create households
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
locations = Dataset(in_storage=storage, in_table_name='locations', id_name="location_id", dataset_name="location")
cm = ChoiceModel(choice_set=locations, choices = "opus_core.random_choices",
sampler='opus_core.samplers.weighted_sampler')
cm.run(specification, coef, agent_set=households,
chunk_specification={'nchunks':2},
debuglevel=1,
run_config=Resources({"sample_size_locations": 10,
"export_simulation_data": True,
"simulation_data_file_name": os.path.join(temp_dir, 'sim_data.txt') })
)
probs = load_table_from_text_file(os.path.join(temp_dir, 'sim_data_probabilities.txt'))[0]
self.assert_(all(probs.shape == array([100, 11])))
self.assertEqual(unique(probs[:,0]).size == 100, True)
choices = load_table_from_text_file(os.path.join(temp_dir, 'sim_data_choices.txt'))[0]
self.assert_(all(choices.shape == array([100, 11])))
self.assertEqual(unique(choices[:,0]).size == 100, True)
rmtree(temp_dir)
def MASKEDtest_equilibration_choices(self):
"""
unittest masked because it doesn't work with the default numpy/scipy version on Ubuntu
"""
storage = StorageFactory().get_storage('dict_storage')
household_data = {
'household_id': arange(2000)+1,
#'submodel': array(1000*[1]+1000*[2])
'submodel': array(2000*[1]),
}
location_data = {
'location_id': arange(1000) +1,
'capacity': ones(1000)*2,
'price': array(500*[10.0] + 500*[0.0])
}
storage.write_table(
table_name = 'households',
table_data = household_data)
storage.write_table(
table_name = 'locations',
table_data = location_data)
specification = EquationSpecification(variables=("location.price",),
coefficients=("price",))
coef = Coefficients(names=("price",), values=(-2.1,))
# create households
households = Dataset(in_storage=storage, in_table_name='households', id_name="household_id", dataset_name="household")
locations = Dataset(in_storage=storage, in_table_name='locations', id_name="location_id", dataset_name="location")
cm = ChoiceModel(choice_set=locations, choices = "opus_core.upc.equilibration_choices",
sampler=None)
resources = Resources({"price_coef_name": 'price',
"capacity": locations['capacity'],
"bfgs_kwargs": "{'pgtol': 1e-12}",
"lottery_max_iterations": 10
})
choices = cm.run(specification, coef, agent_set=households,
chunk_specification={'nchunks':1},
debuglevel=1,
run_config=resources
)
agents = locations.compute_variables("location.number_of_agents(household)",
resources={'household':households})
rmse = np.sqrt(np.mean((locations['capacity'] - agents)**2))
self.assert_(np.allclose(rmse, 0, atol=1e-3))
price_converged = resources.get('price_converged')
self.assert_('price_converged' in locations.get_known_attribute_names())
self.assert_(np.allclose(locations['price_converged'], price_converged))
self.assert_(np.allclose(np.std(price_converged), 0, atol=1e-3))
if __name__=="__main__":
opus_unittest.main()
| agpl-3.0 |
MjAbuz/watchdog | vendor/xappy/marshall.py | 4 | 1277 | #!/usr/bin/env python
#
# Copyright (C) 2007 Lemur Consulting Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
r"""marshall.py: Marshal values into strings
"""
__docformat__ = "restructuredtext en"
import math
import xapian
from replaylog import log as _log
def float_to_string(value):
"""Marshall a floating point number to a string which sorts in the
appropriate manner.
"""
return _log(xapian.sortable_serialise, value)
def date_to_string(date):
"""Marshall a date to a string which sorts in the appropriate manner.
"""
return '%04d%02d%02d' % (date.year, date.month, date.day)
| agpl-3.0 |
jeremymcintyre/jeremymcintyre.github.io | node_modules/bootstrap/node_modules/npm-shrinkwrap/node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/ordered_dict.py | 2354 | 10366 | # Unmodified from http://code.activestate.com/recipes/576693/
# other than to add MIT license header (as specified on page, but not in code).
# Linked from Python documentation here:
# http://docs.python.org/2/library/collections.html#collections.OrderedDict
#
# This should be deleted once Py2.7 is available on all bots, see
# http://crbug.com/241769.
#
# Copyright (c) 2009 Raymond Hettinger.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from thread import get_ident as _get_ident
except ImportError:
from dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.itervalues():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
# Suppress 'OrderedDict.update: Method has no argument':
# pylint: disable=E0211
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
| mit |
CuonDeveloper/cuon | cuon_client/cuon/Staff/staff.py | 3 | 17134 | # -*- coding: utf-8 -*-
##Copyright (C) [2003] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import sys
from types import *
import pygtk
pygtk.require('2.0')
import gtk
#import gtk.glade
import gobject
import locale
from locale import gettext as _
from cuon.Databases.SingleData import SingleData
import SingleStaff
import SingleStaffFee
import SingleStaffMisc
import SingleStaffVacation
import SingleStaffDisease
import logging
from cuon.Windows.chooseWindows import chooseWindows
import cuon.DMS.documentTools
import cuon.DMS.dms
class staffwindow(chooseWindows):
def __init__(self, allTables):
chooseWindows.__init__(self)
self.loadGlade('staff.xml','StaffMainwindow' )
self.win1 = self.getWidget('StaffMainwindow')
self.oDocumentTools = cuon.DMS.documentTools.documentTools()
self.ModulNumber = self.MN['Staff']
self.allTables = allTables
self.singleStaff = SingleStaff.SingleStaff(allTables)
self.singleStaffFee = SingleStaffFee.SingleStaffFee(allTables)
self.singleStaffMisc = SingleStaffMisc.SingleStaffMisc(allTables)
self.singleStaffVacation = SingleStaffVacation.SingleStaffVacation(allTables)
self.singleStaffDisease = SingleStaffDisease.SingleStaffDisease(allTables)
# self.singleStaff.loadTable()
self.entriesStaffs = 'staff.xml'
self.entriesStaffsFee = 'staff_fee.xml'
self.entriesStaffsMisc = 'staff_misc.xml'
self.entriesStaffsVacation = 'staff_vacation.xml'
self.entriesStaffsDisease = 'staff_disease.xml'
#singleStaff
self.loadEntries(self.entriesStaffs)
self.singleStaff.setEntries(self.getDataEntries( self.entriesStaffs) )
self.singleStaff.setGladeXml(self.xml)
self.singleStaff.setTreeFields( ['lastname','firstname',"id"] )
self.singleStaff.setStore( gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING,gobject.TYPE_UINT) )
self.singleStaff.setTreeOrder('lastname')
self.singleStaff.setTree(self.getWidget('tree1') )
self.singleStaff.setListHeader(['lastname','firstname',_("ID") ])
print 'Widgets - win = ', `self.win1`
print 'Widgets - tree1 = ', `self.getWidget('tree1')`
#singleStaffFee
self.loadEntries(self.entriesStaffsFee)
self.singleStaffFee.setEntries(self.getDataEntries( self.entriesStaffsFee) )
self.singleStaffFee.setGladeXml(self.xml)
self.singleStaffFee.sWhere ='where staff_id = ' + `self.singleStaff.ID`
self.singleStaffFee.setTree(self.getWidget('tree1') )
#singleStaffMisc
self.loadEntries(self.entriesStaffsMisc)
self.singleStaffMisc.setEntries(self.getDataEntries( self.entriesStaffsMisc) )
self.singleStaffMisc.setGladeXml(self.xml)
self.singleStaffMisc.sWhere ='where staff_id = ' + `self.singleStaff.ID`
self.singleStaffMisc.setTree(self.getWidget('tree1') )
#singleStaffVacation
self.loadEntries(self.entriesStaffsVacation)
self.singleStaffVacation.setEntries(self.getDataEntries( self.entriesStaffsVacation) )
self.singleStaffVacation.setGladeXml(self.xml)
self.singleStaffVacation.setTreeFields( ['name', 'designation'] )
self.singleStaffVacation.setStore( gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING,gobject.TYPE_UINT) )
self.singleStaffVacation.setTreeOrder('name')
self.singleStaffVacation.sWhere ='where staff_id = ' + `self.singleStaff.ID`
self.singleStaffVacation.setTree(self.getWidget('tree1') )
#singleStaffDisease
self.loadEntries(self.entriesStaffsDisease)
self.singleStaffDisease.setEntries(self.getDataEntries( self.entriesStaffsDisease) )
self.singleStaffDisease.setGladeXml(self.xml)
self.singleStaffDisease.setTreeFields( ['name', 'designation'] )
self.singleStaffDisease.setStore( gtk.ListStore( gobject.TYPE_STRING, gobject.TYPE_STRING,gobject.TYPE_UINT) )
self.singleStaffDisease.setTreeOrder('name')
self.singleStaffDisease.sWhere ='where staff_id = ' + `self.singleStaff.ID`
self.singleStaffDisease.setTree(self.getWidget('tree1') )
# Menu-items
self.initMenuItems()
# Close Menus for Tab
self.addEnabledMenuItems('tabs','staff1')
self.addEnabledMenuItems('tabs','fee1')
self.addEnabledMenuItems('tabs','misc1')
self.addEnabledMenuItems('tabs','vacation1')
self.addEnabledMenuItems('tabs','disease1')
# seperate Menus
self.addEnabledMenuItems('staff','staff1')
self.addEnabledMenuItems('fee','fee1')
self.addEnabledMenuItems('misc','misc1')
self.addEnabledMenuItems('vacation','vacation1')
self.addEnabledMenuItems('disease','disease1')
# enabledMenues for Staff
self.addEnabledMenuItems('editStaff','new1', self.dicUserKeys['staff_new'])
self.addEnabledMenuItems('editStaff','clear1', self.dicUserKeys['staff_delete'])
self.addEnabledMenuItems('editStaff','print1', self.dicUserKeys['staff_print'])
self.addEnabledMenuItems('editStaff','edit1',self.dicUserKeys['staff_edit'])
#enabledMenues for Stafffee
self.addEnabledMenuItems('editFee','fee_new1', self.dicUserKeys['staff_fee_new'])
self.addEnabledMenuItems('editFee','fee_edit1', self.dicUserKeys['staff_fee_edit'])
#enabledMenues for StaffMisc
self.addEnabledMenuItems('editMisc','misc_edit1', self.dicUserKeys['staff_misc_edit'])
#enabledMenues for StaffVacation
self.addEnabledMenuItems('editVacation','vacation_new1', self.dicUserKeys['staff_vacation_new'])
self.addEnabledMenuItems('editVacation','vacation_delete1')
self.addEnabledMenuItems('editVacation','vacation_edit1', self.dicUserKeys['staff_vacation_edit'])
#enabledMenues for StaffDisease
self.addEnabledMenuItems('editDisease','disease_new1', self.dicUserKeys['staff_disease_new'])
self.addEnabledMenuItems('editDisease','disease_delete1')
self.addEnabledMenuItems('editD','disease_edit1', self.dicUserKeys['staff_disease_edit'])
# enabledMenues for Save
self.addEnabledMenuItems('editSave','save1', self.dicUserKeys['staff_save'])
self.addEnabledMenuItems('editSave','fee_save1', self.dicUserKeys['staff_save'])
self.addEnabledMenuItems('editSave','misc_save1', self.dicUserKeys['staff_save'])
self.addEnabledMenuItems('editSave','vacation_save1', self.dicUserKeys['staff_save'])
self.addEnabledMenuItems('editSave','disease_save1', self.dicUserKeys['staff_save'])
# tabs from notebook
self.tabStaff = 0
self.tabFee = 1
self.tabMisc = 2
self.tabVacation = 3
self.tabDisease = 4
# start
self.tabChanged()
## # init Comboboxes
## tax_vat = self.rpc.callRP('src.Misc.py_getListOfTaxVat', self.dicUser)
## cb = self.getWidget('cbVat')
##
## for i in range(len(tax_vat)) :
## li = gtk.ListItem(tax_vat[i])
## cb.list.append_items([li])
## li.show()
self.win1.add_accel_group(self.accel_group)
#Menu File
def on_quit1_activate(self, event):
print "exit staffs v2"
self.closeWindow()
#Menu Staff
def on_save1_activate(self, event):
print "save staffs v2"
self.singleStaff.save()
self.setEntriesEditable(self.entriesStaffs, False)
self.tabChanged()
def on_new1_activate(self, event):
print "new staffs v2"
self.singleStaff.newRecord()
self.setEntriesEditable(self.entriesStaffs, True)
def on_edit1_activate(self, event):
self.setEntriesEditable(self.entriesStaffs, True)
def on_clear1_activate(self, event):
print "delete staffs v2"
self.singleStaff.deleteRecord()
# Fee
def on_fee_save1_activate(self, event):
print "save staffs Fee v2"
self.singleStaffFee.staffID = self.singleStaff.ID
self.singleStaffFee.save()
self.setEntriesEditable(self.entriesStaffsFee, False)
self.tabChanged()
def on_fee_new1_activate(self, event):
print "new Fee staffs v2"
self.singleStaffFee.newRecord()
self.setEntriesEditable(self.entriesStaffsFee, True)
def on_fee_edit1_activate(self, event):
self.setEntriesEditable(self.entriesStaffsFee, True)
def on_fee_clear1_activate(self, event):
print "delete fee staffs v2"
self.singleStaffFee.deleteRecord()
# Misc
def on_misc_save1_activate(self, event):
print "save staffs Fee v2"
self.singleStaffMisc.staffID = self.singleStaff.ID
self.singleStaffMisc.save()
self.setEntriesEditable(self.entriesStaffsMisc, False)
self.tabChanged()
def on_misc_new1_activate(self, event):
print "new misc staffs v2"
self.singleStaffMisc.newRecord()
self.setEntriesEditable(self.entriesStaffsMisc, True)
def on_misc_edit1_activate(self, event):
self.setEntriesEditable(self.entriesStaffsMisc, True)
def on_misc_delete1_activate(self, event):
print "delete miscstaffs v2"
self.singleStaffMisc.deleteRecord()
# Vacation
def on_vacation_save1_activate(self, event):
print "save vacation Fee v2"
self.singleStaffVacation.staffID = self.singleStaff.ID
self.singleStaffVacation.save()
self.setEntriesEditable(self.entriesStaffsVacation, False)
self.tabChanged()
def on_vacation_new1_activate(self, event):
print "new vacation staffs v2"
self.singleStaffVacation.newRecord()
self.setEntriesEditable(self.entriesStaffsVacation, True)
def on_vacation_edit1_activate(self, event):
self.setEntriesEditable(self.entriesStaffsVacation, True)
def on_vacation_delete1_activate(self, event):
print "delete vacation staffs v2"
self.singleStaffVacation.deleteRecord()
# Disease
def on_disease_save1_activate(self, event):
print "save staff disease v2"
self.singleStaffDisease.staffID = self.singleStaff.ID
self.singleStaffDisease.save()
self.setEntriesEditable(self.entriesStaffsDisease, False)
self.tabChanged()
def on_disease_new1_activate(self, event):
print "new disease staffs v2"
self.singleStaffDisease.newRecord()
self.setEntriesEditable(self.entriesStaffsDisease, True)
def on_disease_edit1_activate(self, event):
self.setEntriesEditable(self.entriesStaffsDisease, True)
def on_disease_delete1_activate(self, event):
print "delete disease staffs v2"
self.singleStaffDisease.deleteRecord()
def on_chooseStaff_activate(self, event):
# choose Staff from other Modul
print '############### Staff choose ID ###################'
self.setChooseValue(self.singleStaff.ID)
self.closeWindow()
# search button
def on_bSearch_clicked(self, event):
self.searchStaff()
def on_eFindNumber_editing_done(self, event):
print 'Find Number'
self.searchStaff()
def on_eFindNumber_key_press_event(self, entry,event):
if self.checkKey(event,'NONE','Return'):
self.searchStaff()
def on_eFindDesignation_editing_done(self, event):
print 'Find Designation'
self.searchStaff()
def on_eFindDesignation_key_press_event(self, entry,event):
if self.checkKey(event,'NONE','Return'):
self.searchStaff()
def on_tree1_row_activated(self, event, data1, data2):
print 'DoubleClick tree1'
if self.tabOption == self.tabStaff:
self.activateClick('chooseStaff', event)
def searchStaff(self):
self.out( 'Searching ....', self.ERROR)
sNumber = self.getWidget('eFindNumber').get_text()
sDesignation = self.getWidget('eFindDesignation').get_text()
self.out('Name and City = ' + sNumber + ', ' + sDesignation, self.ERROR)
#self.singleStaff.sWhere = 'where number ~* \'.*' + sNumber + '.*\' and designation ~* \'.*' + sDesignation + '.*\''
liSearch = ['number',sNumber, 'designation', sDesignation]
self.singleStaff.sWhere = self.getWhere(liSearch)
self.out(self.singleStaff.sWhere, self.ERROR)
self.refreshTree()
# #choose Manufactor button
# def on_bChooseManufactor_clicked(self, event):
# adr = cuon.Addresses.addresses.addresswindow(self.allTables)
# adr.setChooseEntry(_('chooseAddress'), self.getWidget( 'eManufactorNumber'))
#
# # signals from entry eManufactorNumber
#
# def on_eManufactorNumber_changed(self, event):
# print 'eManufactor changed'
# eAdrField = self.getWidget('eManufactorField1')
# liAdr = self.singleAddress.getAddress(self.getWidget( 'eManufactorNumber').get_text())
# eAdrField.set_text(liAdr[0] + ', ' + liAdr[4])
def on_bShowStaffDMS_clicked(self, event):
print 'dms clicked'
if self.singleStaff.ID > 0:
print 'ModulNumber', self.ModulNumber
Dms = cuon.DMS.dms.dmswindow(self.allTables, self.ModulNumber, {'1':self.singleStaff.ID})
def refreshTree(self):
self.singleStaff.disconnectTree()
self.singleStaffFee.disconnectTree()
self.singleStaffMisc.disconnectTree()
self.singleStaffVacation.disconnectTree()
self.singleStaffDisease.disconnectTree()
if self.tabOption == self.tabStaff:
self.singleStaff.connectTree()
self.singleStaff.refreshTree()
elif self.tabOption == self.tabFee:
self.singleStaffFee.sWhere ='where staff_id = ' + `int(self.singleStaff.ID)`
self.singleStaffFee.getFirstListRecord()
self.singleStaffFee.fillEntries(self.singleStaffFee.ID)
elif self.tabOption == self.tabMisc:
self.singleStaffMisc.sWhere ='where staff_id = ' + `int(self.singleStaff.ID)`
self.singleStaffMisc.getFirstListRecord()
self.singleStaffMisc.fillEntries(self.singleStaffFee.ID)
elif self.tabOption == self.tabVacation:
self.singleStaffVacation.sWhere ='where staff_id = ' + `int(self.singleStaff.ID)`
self.singleStaffVacation.connectTree()
self.singleStaffVacation.refreshTree()
elif self.tabOption == self.tabDisease:
self.singleStaffDisease.sWhere ='where staff_id = ' + `int(self.singleStaff.ID)`
self.singleStaffDisease.connectTree()
self.singleStaffDisease.refreshTree()
def tabChanged(self):
print 'tab changed to :' + str(self.tabOption)
self.setTreeVisible(True)
if self.tabOption == self.tabStaff:
#Staff
self.disableMenuItem('tabs')
self.enableMenuItem('staff')
print 'Seite 0'
self.editAction = 'editStaff'
elif self.tabOption == self.tabFee:
#Fee
self.disableMenuItem('tabs')
self.enableMenuItem('fee')
self.editAction = 'editFee'
print 'Seite 1'
elif self.tabOption == self.tabMisc:
#Misc
self.disableMenuItem('tabs')
self.enableMenuItem('misc')
self.editAction = 'editMisc'
elif self.tabOption == self.tabVacation:
#Misc
self.disableMenuItem('tabs')
self.enableMenuItem('vacation')
self.editAction = 'editVacation'
elif self.tabOption == self.tabDisease:
#Misc
self.disableMenuItem('tabs')
self.enableMenuItem('disease')
self.editAction = 'editDisease'
# refresh the Tree
self.refreshTree()
self.enableMenuItem(self.editAction)
self.editEntries = False
| gpl-3.0 |
sonya/eea | py/ca/counterfact.py | 1 | 2732 | #!/usr/bin/python3
#
# Copyright 2012 Sonya Huang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import common.more_exchange_rates as exrate
import wiod.common
from ca import config
from common import matrixutils, sqlhelper
from common.dbconnect import db
from common.matrixutils import NamedMatrix
from common.ioutils import IOMatrixGenerator, EnvMatrixGenerator
from common.counterfact import CounterfactGenerator
iogen = IOMatrixGenerator(
transaction_table="%s.ixi_%d" % (config.SCHEMA, config.STUDY_YEARS[0]),
final_demand_sectors=config.fd_sectors)
iogen.set_pce_col(config.pce_sector)
iogen.set_export_col(config.export_sector)
envgen = EnvMatrixGenerator(
envtable="%s.emissions_quantity" % config.SCHEMA,
ind_col_name="industry",
series_col_name="1") # we just have emissions, no series, need hack
cfgen = CounterfactGenerator(iogen, envgen)
cfgen.set_series_code(["1"], "emissions")
for year in config.STUDY_YEARS:
print(year)
iogen = cfgen.get_iogen()
iogen.set_table("%s.ixi_%d" % (config.SCHEMA, year))
exchange_rate = wiod.common.get_exchange_rate("CAN", year)
if exchange_rate is None:
exchange_rate = exrate.get_rate("ca", year)
iogen.set_exchange_rate(exchange_rate)
envgen = cfgen.get_envgen()
envgen.set_universal_conditions([
"year = %d" % year,
"industry not in %s" % sqlhelper.set_repr(config.env_blacklist),
])
io_harmonizer = matrixutils.generate_selector_matrix(
"%s.sector_map" % config.SCHEMA,
iogen.get_sectors(), "io_code", "harmonized",
["io_code is not null"])
env_harmonizer = matrixutils.generate_selector_matrix(
"%s.sector_map" % config.SCHEMA,
envgen.get_sectors(), "env_code", "harmonized",
["env_code is not null"])
series = ["1"]
cfgen.prepare(year, series, io_harmonizer, env_harmonizer)
sector_titles = {}
stmt = db.prepare("select distinct code, description" +
" from %s.ind_codes order by code" % config.SCHEMA)
for row in stmt():
sector_titles[row[0]] = row[1]
cfgen.set_sector_titles(sector_titles)
cfgen.describe()
cfgen.describe(True)
cfgen.counterfact(1997, "ca")
| apache-2.0 |
SunDwarf/curious | curious/dataclasses/channel.py | 1 | 45941 | # This file is part of curious.
#
# curious is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# curious is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with curious. If not, see <http://www.gnu.org/licenses/>.
"""
Wrappers for Channel objects.
.. currentmodule:: curious.dataclasses.channel
"""
import time
from math import floor
import collections
import enum
import multio
import pathlib
import typing as _typing
from async_generator import asynccontextmanager
from os import PathLike
from types import MappingProxyType
from typing import AsyncIterator
from curious.dataclasses import guild as dt_guild, invite as dt_invite, member as dt_member, \
message as dt_message, permissions as dt_permissions, role as dt_role, user as dt_user, \
webhook as dt_webhook
from curious.dataclasses.bases import Dataclass, IDObject
from curious.dataclasses.embed import Embed
from curious.exc import CuriousError, ErrorCode, Forbidden, HTTPException, PermissionsError
from curious.util import AsyncIteratorWrapper, base64ify, deprecated, safe_generator
class ChannelType(enum.IntEnum):
"""
Returns a mapping from Discord channel type.
"""
#: Represents a text channel.
TEXT = 0
#: Represents a private channel.
PRIVATE = 1
#: Represents a voice channel.
VOICE = 2
#: Represents a group channel.
GROUP = 3
#: Represents a category channel.
CATEGORY = 4
def has_messages(self) -> bool:
"""
:return: If this channel type has messages.
"""
return self not in [ChannelType.VOICE, ChannelType.CATEGORY]
class HistoryIterator(collections.AsyncIterator):
"""
An iterator that allows you to automatically fetch messages and async iterate over them.
.. code-block:: python3
it = HistoryIterator(some_channel, bot, max_messages=100)
# usage 1
async for message in it:
...
# usage 2
await it.fill_messages()
for message in it.messages:
...
Note that usage 2 will only fill chunks of 100 messages at a time.
"""
def __init__(self, channel: 'Channel',
max_messages: int = -1, *,
before: int = None, after: int = None):
"""
:param channel: The :class:`.Channel` to iterate over.
:param max_messages: The maximum number of messages to return. <= 0 means infinite.
:param before: The message ID to fetch before.
:param after: The message ID to fetch after.
.. versionchanged:: 0.7.0
Removed the ``client`` parameter.
"""
self.channel = channel
#: The current storage of messages.
self.messages = collections.deque()
#: The current count of messages iterated over.
#: This is used to know when to automatically fill new messages.
self.current_count = 0
#: The maximum amount of messages to use.
#: If this is <= 0, an infinite amount of messages are returned.
self.max_messages = max_messages
#: The message ID of before to fetch.
self.before = before
if isinstance(self.before, IDObject):
self.before = self.before.id
#: The message ID of after to fetch.
self.after = after
if isinstance(self.after, IDObject):
self.after = self.after.id
#: The last message ID that we fetched.
if self.before:
self.last_message_id = self.before
else:
self.last_message_id = self.after
async def fill_messages(self) -> None:
"""
Called to fill the next <n> messages.
This is called automatically by :meth:`.__anext__`, but can be used to fill the messages
anyway.
"""
if self.max_messages < 0:
to_get = 100
else:
to_get = self.max_messages - self.current_count
if to_get <= 0:
return
if self.before:
messages = await self.channel._bot.http.get_message_history(self.channel.id,
before=self.last_message_id,
limit=to_get)
else:
messages = await self.channel._bot.http.get_message_history(self.channel.id,
after=self.last_message_id)
messages = reversed(messages)
for message in messages:
self.messages.append(self.channel._bot.state.make_message(message))
async def __anext__(self) -> 'dt_message.Message':
self.current_count += 1
if self.current_count == self.max_messages:
raise StopAsyncIteration
if len(self.messages) <= 0:
await self.fill_messages()
try:
message = self.messages.popleft()
except IndexError:
# No messages to fill, so self._fill_messages didn't return any
# This signals the end of iteration.
raise StopAsyncIteration
self.last_message_id = message.id
return message
def __iter__(self) -> None:
raise RuntimeError("This is not an iterator - you want to use `async for` instead.")
def __await__(self) -> None:
raise RuntimeError("This is not a coroutine - you want to use `async for` instead.")
async def next(self) -> 'dt_message.Message':
"""
Gets the next item in history.
"""
return await self.__anext__()
async def all(self) -> '_typing.List[dt_message.Message]':
"""
Gets a flattened list of items from the history.
"""
items = []
async for item in self:
items.append(item)
return items
class ChannelMessageWrapper(object):
"""
Represents a channel's message container.
"""
__slots__ = "channel",
def __init__(self, channel: 'Channel'):
#: The :class:`.Channel` this container is used for.
self.channel = channel
def __iter__(self) -> None:
raise RuntimeError("Use `async for`")
def __aiter__(self) -> HistoryIterator:
return self.history.__aiter__()
@property
def history(self) -> HistoryIterator:
"""
:return: A :class:`.HistoryIterator` that can be used to iterate over the channel history.
"""
return self.get_history(before=self.channel._last_message_id, limit=-1)
def get_history(self, before: int = None,
after: int = None,
limit: int = 100) -> HistoryIterator:
"""
Gets history for this channel.
This is *not* an async function - it returns a :class:`HistoryIterator` which can be async
iterated over to get message history.
.. code-block:: python3
async for message in channel.get_history(limit=1000):
print(message.content, "by", message.author.user.name)
:param limit: The maximum number of messages to get.
:param before: The snowflake ID to get messages before.
:param after: The snowflake ID to get messages after.
"""
if self.channel.guild:
if not self.channel.effective_permissions(self.channel.guild.me).read_message_history:
raise PermissionsError("read_message_history")
return HistoryIterator(self.channel, before=before, after=after, max_messages=limit)
async def send(self, content: str = None, *,
tts: bool = False, embed: 'Embed' = None) -> 'dt_message.Message':
"""
Sends a message to this channel.
This requires SEND_MESSAGES permission in the channel.
If the content is not a string, it will be automatically stringified.
.. code:: python
await channel.send("Hello, world!")
:param content: The content of the message to send.
:param tts: Should this message be text to speech?
:param embed: An embed object to send with this message.
:return: A new :class:`.Message` object.
"""
if not self.channel.type.has_messages():
raise CuriousError("Cannot send messages to a voice channel")
if self.channel.guild:
if not self.channel.effective_permissions(self.channel.guild.me).send_messages:
raise PermissionsError("send_messages")
if not isinstance(content, str) and content is not None:
content = str(content)
# check for empty messages
if not content:
if not embed:
raise CuriousError("Cannot send an empty message")
if self.channel.guild and not \
self.channel.effective_permissions(self.channel.guild.me).embed_links:
raise PermissionsError("embed_links")
else:
if content and len(content) > 2000:
raise CuriousError("Content must be less than 2000 characters")
if embed is not None:
embed = embed.to_dict()
data = await self.channel._bot.http.send_message(self.channel.id, content,
tts=tts, embed=embed)
obb = self.channel._bot.state.make_message(data, cache=True)
return obb
async def upload(self, fp: '_typing.Union[bytes, str, PathLike, _typing.IO]',
*,
filename: str = None,
message_content: '_typing.Optional[str]' = None,
message_embed: '_typing.Optional[Embed]' = None) -> 'dt_message.Message':
"""
Uploads a message to this channel.
This requires SEND_MESSAGES and ATTACH_FILES permission in the channel.
.. code-block:: python3
with open("/tmp/emilia_best_girl.jpg", 'rb') as f:
await channel.messages.upload(f, "my_waifu.jpg")
:param fp: Variable.
- If passed a string or a :class:`os.PathLike`, will open and read the file and
upload it.
- If passed bytes, will use the bytes as the file content.
- If passed a file-like, will read and use the content to upload.
:param filename: The filename for the file uploaded. If a path-like or str is passed, \
will use the filename from that if this is not specified.
:param message_content: Optional: Any extra content to be sent with the message.
:param message_embed: Optional: An :class:`.Embed` to be sent with the message. The embed \
can refer to the image as "attachment://filename"
:return: The new :class:`.Message` created.
"""
if not self.channel.type.has_messages():
raise CuriousError("Cannot send messages to a voice channel")
if self.channel.guild:
if not self.channel.effective_permissions(self.channel.guild.me).send_messages:
raise PermissionsError("send_messages")
if not self.channel.effective_permissions(self.channel.guild.me).attach_files:
raise PermissionsError("attach_files")
if isinstance(fp, bytes):
file_content = fp
elif isinstance(fp, pathlib.Path):
if filename is None:
filename = fp.parts[-1]
file_content = fp.read_bytes()
elif isinstance(fp, (str, PathLike)):
path = pathlib.Path(fp)
if filename is None:
filename = path.parts[-1]
file_content = path.read_bytes()
elif isinstance(fp, _typing.IO) or hasattr(fp, "read"):
file_content = fp.read()
if isinstance(file_content, str):
file_content = file_content.encode("utf-8")
else:
raise ValueError("Got unknown type for upload")
if filename is None:
filename = "unknown.bin"
embed = message_embed.to_dict() if message_embed else None
data = await self.channel._bot.http.send_file(self.channel.id, file_content,
filename=filename, content=message_content, embed=embed)
obb = self.channel._bot.state.make_message(data, cache=False)
return obb
async def bulk_delete(self, messages: '_typing.List[dt_message.Message]') -> int:
"""
Deletes messages from a channel.
This is the low-level delete function - for the high-level function, see
:meth:`.Channel.messages.purge()`.
Example for deleting all the last 100 messages:
.. code:: python
history = channel.messages.get_history(limit=100)
messages = []
async for message in history:
messages.append(message)
await channel.messages.bulk_delete(messages)
:param messages: A list of :class:`.Message` objects to delete.
:return: The number of messages deleted.
"""
if self.channel.guild:
if not self.channel.effective_permissions(self.channel.guild.me).manage_messages:
raise PermissionsError("manage_messages")
minimum_allowed = floor((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
ids = []
for message in messages:
if message.id < minimum_allowed:
msg = f"Cannot delete message id {message.id} older than {minimum_allowed}"
raise CuriousError(msg)
ids.append(message.id)
await self.channel._bot.http.delete_multiple_messages(self.channel.id, ids)
return len(ids)
async def purge(self, limit: int = 100, *,
author: 'dt_member.Member' = None,
content: str = None,
predicate: '_typing.Callable[[dt_message.Message], bool]' = None,
fallback_from_bulk: bool = False):
"""
Purges messages from a channel.
This will attempt to use ``bulk-delete`` if possible, but otherwise will use the normal
delete endpoint (which can get ratelimited severely!) if ``fallback_from_bulk`` is True.
Example for deleting all messages owned by the bot:
.. code-block:: python3
me = channel.guild.me
await channel.messages.purge(limit=100, author=me)
Custom check functions can also be applied which specify any extra checks. They take one
argument (the Message object) and return a boolean (True or False) determining if the
message should be deleted.
For example, to delete all messages with the letter ``i`` in them:
.. code-block:: python3
await channel.messages.purge(limit=100,
predicate=lambda message: 'i' in message.content)
:param limit: The maximum amount of messages to delete. -1 for unbounded size.
:param author: Only delete messages made by this author.
:param content: Only delete messages that exactly match this content.
:param predicate: A callable that determines if a message should be deleted.
:param fallback_from_bulk: If this is True, messages will be regular deleted if they \
cannot be bulk deleted.
:return: The number of messages deleted.
"""
if self.channel.guild:
if not self.channel.effective_permissions(self.channel.guild.me).manage_messages \
and not fallback_from_bulk:
raise PermissionsError("manage_messages")
checks = []
if author:
checks.append(lambda m: m.author == author)
if content:
checks.append(lambda m: m.content == content)
if predicate:
checks.append(predicate)
to_delete = []
history = self.get_history(limit=limit)
async for message in history:
if all(check(message) for check in checks):
to_delete.append(message)
can_bulk_delete = True
# Split into chunks of 100.
message_chunks = [to_delete[i:i + 100] for i in range(0, len(to_delete), 100)]
minimum_allowed = floor((time.time() - 14 * 24 * 60 * 60) * 1000.0 - 1420070400000) << 22
for chunk in message_chunks:
message_ids = []
for message in chunk:
if message.id < minimum_allowed:
msg = f"Cannot delete message id {message.id} older than {minimum_allowed}"
raise CuriousError(msg)
message_ids.append(message.id)
# First, try and bulk delete all the messages.
if can_bulk_delete:
try:
await self.channel._bot.http.delete_multiple_messages(self.channel.id,
message_ids)
except Forbidden:
# We might not have MANAGE_MESSAGES.
# Check if we should fallback on normal delete.
can_bulk_delete = False
if not fallback_from_bulk:
# Don't bother, actually.
raise
# This is an `if not` instead of an `else` because `can_bulk_delete` might've changed.
if not can_bulk_delete:
# Instead, just delete() the message.
for message in chunk:
await message.delete()
return len(to_delete)
async def get(self, message_id: int) -> 'dt_message.Message':
"""
Gets a single message from this channel.
.. versionchanged:: 0.7.0
Errors raised are now consistent across bots and userbots.
:param message_id: The message ID to retrieve.
:return: A new :class:`.Message` object.
:raises CuriousError: If the message could not be found.
"""
if self.channel.guild:
if not self.channel.effective_permissions(self.channel.guild.me).read_message_history:
raise PermissionsError("read_message_history")
cached_message = self.channel._bot.state.find_message(message_id)
if cached_message is not None:
return cached_message
try:
data = await self.channel._bot.http.get_message(self.channel.id, message_id)
except HTTPException as e:
# transform into a CuriousError if it wasn't found
if e.error_code == ErrorCode.UNKNOWN_MESSAGE:
raise CuriousError("No message found for this ID") from e
raise
msg = self.channel._bot.state.make_message(data)
return msg
class Channel(Dataclass):
"""
Represents a channel object.
"""
def __init__(self, client, **kwargs) -> None:
super().__init__(kwargs.get("id"), client)
#: The name of this channel.
self.name = kwargs.get("name", None) # type: str
#: The topic of this channel.
self.topic = kwargs.get("topic", None) # type: str
#: The ID of the guild this is associated with.
self.guild_id = int(kwargs.get("guild_id", 0)) or None # type: int
parent_id = kwargs.get("parent_id")
if parent_id is not None:
parent_id = int(parent_id)
#: The parent ID of this channel.
self.parent_id = parent_id # type: int
#: The :class:`.ChannelType` of channel this channel is.
self.type = ChannelType(kwargs.get("type", 0)) # type: ChannelType
#: The :class:`.ChannelMessageWrapper` for this channel.
self._messages = None # type: ChannelMessageWrapper
#: If this channel is NSFW.
self.nsfw = kwargs.get("nsfw", False) # type: bool
#: If private, the mapping of :class:`.User` that are in this channel.
self._recipients = {} # type: _typing.Dict[int, dt_user.User]
if self.private:
for recipient in kwargs.get("recipients", []):
u = self._bot.state.make_user(recipient)
self._recipients[u.id] = u
if self.type == ChannelType.GROUP:
# append the current user
self._recipients[self._bot.user.id] = self._bot.user
#: The position of this channel.
self.position = kwargs.get("position", 0) # type: int
#: The last message ID of this channel.
#: Used for history.
self._last_message_id = None # type: int
_last_message_id = kwargs.get("last_message_id", 0)
if _last_message_id:
self._last_message_id = int(_last_message_id)
else:
self._last_message_id = None
# group channel stuff
#: The owner ID of the channel.
#: This is None for non-group channels.
self.owner_id = int(kwargs.get("owner_id", 0)) or None # type: int
#: The icon hash of the channel.
self.icon_hash = kwargs.get("icon", None) # type: str
#: The internal overwrites for this channel.
self._overwrites = {} # type: _typing.Dict[int, dt_permissions.Overwrite]
def __repr__(self) -> str:
return f"<Channel id={self.id} name={self.name} type={self.type.name} " \
f"guild_id={self.guild_id}>"
__str__ = __repr__
def _update_overwrites(self, overwrites: _typing.List[dict]):
"""
Updates the overwrites for this channel.
:param overwrites: A list of overwrite dicts.
"""
if not self.guild_id:
raise CuriousError("A channel without a guild cannot have overwrites")
self._overwrites = {}
for overwrite in overwrites:
id_ = int(overwrite["id"])
type_ = overwrite["type"]
if type_ == "member":
obb = self.guild._members.get(id_)
else:
obb = self.guild._roles.get(id_)
self._overwrites[id_] = dt_permissions.Overwrite(allow=overwrite["allow"],
deny=overwrite["deny"],
obb=obb, channel_id=self.id)
self._overwrites[id_]._immutable = True
@property
def guild(self) -> '_typing.Union[dt_guild.Guild, None]':
"""
:return: The :class:`.Guild` associated with this Channel.
"""
try:
return self._bot.guilds[self.guild_id]
except KeyError:
return None
@property
def private(self) -> bool:
"""
:return: If this channel is a private channel (i.e has no guild.)
"""
return self.guild_id is None
@property
def recipients(self) -> '_typing.Mapping[int, dt_user.User]':
"""
:return: A mapping of int -> :class:`.User` for the recipients of this private chat.
"""
return MappingProxyType(self._recipients)
@property
def user(self) -> '_typing.Union[dt_user.User, None]':
"""
:return: If this channel is a private channel, the :class:`.User` of the other user.
"""
if self.type != ChannelType.PRIVATE:
return None
try:
return next(iter(self.recipients.values()))
except StopIteration:
return None
@property
def owner(self) -> '_typing.Union[dt_user.User, None]':
"""
:return: If this channel is a group channel, the owner of the channel.
"""
if not self.owner_id:
return None
try:
return self._bot.state._users[self.owner_id]
except KeyError:
return None
@property
def parent(self) -> '_typing.Union[Channel, None]':
"""
:return: If this channel has a parent, the parent category of this channel.
"""
try:
return self.guild.channels[self.parent_id]
except (KeyError, AttributeError):
return None
@property
def children(self) -> '_typing.List[Channel]':
"""
:return: A list of :class:`.Channel` children this channel has, if any.
"""
if not self.guild:
return []
channels = [channel
for channel in self.guild.channels.values()
if channel.parent_id == self.id]
return channels
def get_by_name(self, name: str) -> '_typing.Union[Channel, None]':
"""
Gets a channel by name in this channel's children.
:param name: The name of the channel to get.
:return: A :class:`.Channel` if the channel was find
"""
return next(filter(lambda channel: channel.name == name, self.children), None)
@property
def messages(self) -> 'ChannelMessageWrapper':
"""
:return: The :class:`.ChannelMessageWrapper` for this channel, if applicable.
"""
if not self.type.has_messages():
raise CuriousError("This channel does not have messages")
if self._messages is None:
self._messages = ChannelMessageWrapper(self)
return self._messages
@property
@deprecated(since="0.7.0", see_instead="Channel.messages", removal="0.9.0")
def history(self) -> HistoryIterator:
"""
:return: A :class:`.HistoryIterator` that can be used to iterate over the channel history.
"""
return self.messages.history
@property
def pins(self) -> '_typing.AsyncIterator[dt_message.Message]':
"""
:return: A :class:`.AsyncIteratorWrapper` that can be used to iterate over the pins.
"""
return AsyncIteratorWrapper(self.get_pins)
@property
def icon_url(self) -> _typing.Union[str, None]:
"""
:return: The icon URL for this channel if it is a group DM.
"""
return "https://cdn.discordapp.com/channel-icons/{}/{}.webp" \
.format(self.id, self.icon_hash)
@property
def voice_members(self) -> '_typing.List[dt_member.Member]':
"""
:return: A list of members that are in this voice channel.
"""
if self.type != ChannelType.VOICE:
raise CuriousError("No members for channels that aren't voice channels")
return [state.member for state in self.guild._voice_states.values()
if state.channel_id == self.id]
@property
def overwrites(self) -> '_typing.Mapping[int, dt_permissions.Overwrite]':
"""
:return: A mapping of target_id -> :class:`.Overwrite` for this channel.
"""
return MappingProxyType(self._overwrites)
def effective_permissions(self, member: 'dt_member.Member') -> \
'dt_permissions.Permissions':
"""
Gets the effective permissions for the given member.
"""
if not self.guild:
return dt_permissions.Permissions(515136)
permissions = dt_permissions.Permissions(self.guild.default_role.permissions.bitfield)
for role in member.roles:
permissions.bitfield |= role.permissions.bitfield
if permissions.administrator:
return dt_permissions.Permissions.all()
overwrites_everyone = self._overwrites.get(self.guild.default_role.id)
if overwrites_everyone:
permissions.bitfield &= ~(overwrites_everyone.deny.bitfield)
permissions.bitfield |= overwrites_everyone.allow.bitfield
allow = deny = 0
for role in member.roles:
overwrite = self._overwrites.get(role.id)
if overwrite:
allow |= overwrite.allow.bitfield
deny |= overwrite.deny.bitfield
permissions.bitfield &= ~deny
permissions.bitfield |= allow
overwrite_member = self._overwrites.get(member.id)
if overwrite_member:
permissions.bitfield &= ~(overwrite.deny.bitfield)
permissions.bitfield |= overwrite.allow.bitfield
return permissions
def permissions(self, obb: '_typing.Union[dt_member.Member, dt_role.Role]') -> \
'dt_permissions.Overwrite':
"""
Gets the permission overwrites for the specified object.
If you want to check whether a member has specific permissions, use
:method:effective_permissions instead.
"""
if not self.guild:
allow = dt_permissions.Permissions(515136)
overwrite = dt_permissions.Overwrite(allow=allow, deny=0, obb=obb, channel_id=self.id)
overwrite._immutable = True
return overwrite
overwrite = self._overwrites.get(obb.id)
if not overwrite:
everyone_overwrite = self._overwrites.get(self.guild.default_role.id)
if everyone_overwrite is None:
everyone_perms = self.guild.default_role.permissions
everyone_overwrite = dt_permissions.Overwrite(allow=everyone_perms,
deny=dt_permissions.Permissions(0),
obb=obb)
everyone_overwrite.channel_id = self.id
overwrite = everyone_overwrite
else:
overwrite = dt_permissions.Overwrite(everyone_overwrite.allow,
everyone_overwrite.deny,
obb)
overwrite.channel_id = self.id
overwrite._immutable = True
return overwrite
@property
def me_permissions(self) -> 'dt_permissions.Overwrite':
"""
:return: The overwrite permissions for the current member.
"""
if not self.guild:
# this works in this branch, but it shouldn't
return self.permissions(None)
return self.permissions(self.guild.me)
def _copy(self):
obb = object.__new__(self.__class__)
obb.name = self.name
obb.type = self.type
obb.guild_id = self.guild_id
obb.nsfw = self.nsfw
obb._recipients = self._recipients
obb.icon_hash = self.icon_hash
obb.owner_id = self.owner_id
obb.topic = self.topic
obb.position = self.position
obb._bot = self._bot
obb.parent_id = self.parent_id
return obb
@deprecated(since="0.7.0", see_instead="Channel.messages.get_history", removal="0.9.0")
def get_history(self, before: int = None,
after: int = None,
limit: int = 100) -> HistoryIterator:
"""
Gets history for this channel.
This is *not* an async function - it returns a :class:`HistoryIterator` which can be async
iterated over to get message history.
.. code-block:: python3
async for message in channel.get_history(limit=1000):
print(message.content, "by", message.author.user.name)
:param limit: The maximum number of messages to get.
:param before: The snowflake ID to get messages before.
:param after: The snowflake ID to get messages after.
"""
return self.messages.get_history(before=before, after=after, limit=limit)
async def get_pins(self) -> '_typing.List[dt_message.Message]':
"""
Gets the pins for a channel.
:return: A list of :class:`.Message` objects.
"""
msg_data = await self._bot.http.get_pins(self.id)
messages = []
for message in msg_data:
messages.append(self._bot.state.make_message(message))
return messages
@property
def webhooks(self) -> 'AsyncIterator[dt_webhook.Webhook]':
"""
:return: A :class:`.AsyncIteratorWrapper` for the :class:`.Webhook` objects in this \
channel.
"""
return AsyncIteratorWrapper(self.get_webhooks)
async def get_webhooks(self) -> '_typing.List[dt_webhook.Webhook]':
"""
Gets the webhooks for this channel.
:return: A list of :class:`.Webhook` objects for the channel.
"""
webhooks = await self._bot.http.get_webhooks_for_channel(self.id)
obbs = []
for webhook in webhooks:
obbs.append(self._bot.state.make_webhook(webhook))
return obbs
@deprecated(since="0.7.0", see_instead="Channel.messages.get", removal="0.9.0")
async def get_message(self, message_id: int) -> 'dt_message.Message':
"""
Gets a single message from this channel.
:param message_id: The message ID to retrieve.
:return: A new :class:`.Message` object.
"""
return await self.messages.get(message_id)
async def create_webhook(self, *, name: str = None,
avatar: bytes = None) -> 'dt_webhook.Webhook':
"""
Create a webhook in this channel.
:param name: The name of the new webhook.
:param avatar: The bytes content of the new webhook.
:return: A :class:`.Webhook` that represents the webhook created.
"""
if not self.effective_permissions(self.guild.me).manage_webhooks:
raise PermissionsError("manage_webhooks")
if avatar is not None:
avatar = base64ify(avatar)
data = await self._bot.http.create_webhook(self.id, name=name, avatar=avatar)
webook = self._bot.state.make_webhook(data)
return webook
async def edit_webhook(self, webhook: 'dt_webhook.Webhook', *,
name: str = None, avatar: bytes = None) -> 'dt_webhook.Webhook':
"""
Edits a webhook.
:param webhook: The :class:`.Webhook` to edit.
:param name: The new name for the webhook.
:param avatar: The new bytes for the avatar.
:return: The modified :class:`.Webhook`. object.
"""
if avatar is not None:
avatar = base64ify(avatar)
if webhook.token is not None:
# Edit it unconditionally.
await self._bot.http.edit_webhook_with_token(webhook.id, webhook.token,
name=name, avatar=avatar)
if not self.effective_permissions(self.guild.me).manage_webhooks:
raise PermissionsError("manage_webhooks")
data = await self._bot.http.edit_webhook(webhook.id,
name=name, avatar=avatar)
webhook.default_name = data.get("name")
webhook._default_avatar = data.get("avatar")
webhook.user.username = data.get("name")
webhook.user.avatar_hash = data.get("avatar")
return webhook
async def delete_webhook(self, webhook: 'dt_webhook.Webhook') -> 'dt_webhook.Webhook':
"""
Deletes a webhook.
You must have MANAGE_WEBHOOKS to delete this webhook.
:param webhook: The :class:`.Webhook` to delete.
"""
if webhook.token is not None:
# Delete it unconditionally.
await self._bot.http.delete_webhook_with_token(webhook.id, webhook.token)
return webhook
if not self.effective_permissions(self.guild.me).manage_webhooks:
raise PermissionsError("manage_webhooks")
await self._bot.http.delete_webhook(webhook.id)
return webhook
async def create_invite(self, **kwargs) -> 'dt_invite.Invite':
"""
Creates an invite in this channel.
:param max_age: The maximum age of the invite.
:param max_uses: The maximum uses of the invite.
:param temporary: Is this invite temporary?
:param unique: Is this invite unique?
"""
if not self.guild:
raise PermissionsError("create_instant_invite")
if not self.effective_permissions(self.guild.me).create_instant_invite:
raise PermissionsError("create_instant_invite")
inv = await self._bot.http.create_invite(self.id, **kwargs)
invite = dt_invite.Invite(self._bot, **inv)
return invite
@deprecated(since="0.7.0", see_instead="Channel.messages.delete_messages",
removal="0.9.0")
async def delete_messages(self, messages: '_typing.List[dt_message.Message]') -> int:
"""
Deletes messages from a channel.
This is the low-level delete function - for the high-level function, see
:meth:`.Channel.purge()`.
Example for deleting all the last 100 messages:
.. code:: python
history = channel.get_history(limit=100)
messages = []
async for message in history:
messages.append(message)
await channel.delete_messages(messages)
:param messages: A list of :class:`.Message` objects to delete.
:return: The number of messages deleted.
"""
return await self.messages.bulk_delete(messages)
@deprecated(since="0.7.0", see_instead="Channel.messages.purge", removal="0.9.0")
async def purge(self, limit: int = 100, *,
author: 'dt_member.Member' = None,
content: str = None,
predicate: '_typing.Callable[[dt_message.Message], bool]' = None,
fallback_from_bulk: bool = False):
"""
Purges messages from a channel.
This will attempt to use ``bulk-delete`` if possible, but otherwise will use the normal
delete endpoint (which can get ratelimited severely!) if ``fallback_from_bulk`` is True.
Example for deleting all messages owned by the bot:
.. code-block:: python3
me = channel.guild.me
await channel.purge(limit=100, author=me)
Custom check functions can also be applied which specify any extra checks. They take one
argument (the Message object) and return a boolean (True or False) determining if the
message should be deleted.
For example, to delete all messages with the letter ``i`` in them:
.. code-block:: python3
await channel.purge(limit=100, predicate=lambda message: 'i' in message.content)
:param limit: The maximum amount of messages to delete. -1 for unbounded size.
:param author: Only delete messages made by this author.
:param content: Only delete messages that exactly match this content.
:param predicate: A callable that determines if a message should be deleted.
:param fallback_from_bulk: If this is True, messages will be regular deleted if they \
cannot be bulk deleted.
:return: The number of messages deleted.
"""
return await self.messages.purge(limit=limit, author=author, content=content,
predicate=predicate, fallback_from_bulk=fallback_from_bulk)
async def send_typing(self) -> None:
"""
Starts typing in the channel for 5 seconds.
"""
if not self.type.has_messages():
raise CuriousError("Cannot send messages to this channel")
if self.guild:
if not self.effective_permissions(self.guild.me).send_messages:
raise PermissionsError("send_message")
await self._bot.http.send_typing(self.id)
@property
@asynccontextmanager
@safe_generator
async def typing(self) -> _typing.AsyncContextManager[None]:
"""
:return: A context manager that sends typing repeatedly.
Usage:
.. code-block:: python3
async with channel.typing:
res = await do_long_action()
await channel.messages.send("Long action:", res)
"""
running = multio.Event()
async def runner():
await self.send_typing()
while True:
try:
async with multio.timeout_after(5):
await running.wait()
except multio.asynclib.TaskTimeout:
await self.send_typing()
else:
return
async with multio.asynclib.task_manager() as tg:
await multio.asynclib.spawn(tg, runner)
try:
yield
finally:
await multio.asynclib.cancel_task_group(tg)
@deprecated(since="0.7.0", see_instead="Channel.messages.send", removal="0.10.0")
async def send(self, content: str = None, *,
tts: bool = False, embed: Embed = None) -> 'dt_message.Message':
"""
Sends a message to this channel.
This requires SEND_MESSAGES permission in the channel.
If the content is not a string, it will be automatically stringified.
.. code:: python
await channel.send("Hello, world!")
:param content: The content of the message to send.
:param tts: Should this message be text to speech?
:param embed: An embed object to send with this message.
:return: A new :class:`.Message` object.
"""
return await self.messages.send(content, tts=tts, embed=embed)
@deprecated(since="0.7.0", see_instead="Channel.messages.upload", removal="0.10.0")
async def send_file(self, file_content: bytes, filename: str,
*, message_content: _typing.Optional[str] = None) -> 'dt_message.Message':
"""
Uploads a message to this channel.
This requires SEND_MESSAGES and ATTACH_FILES permission in the channel.
.. code:: python
with open("/tmp/emilia_best_girl.jpg", 'rb') as f:
await channel.send_file(f.read(), "my_waifu.jpg")
:param file_content: The bytes-like file content to upload.
This **cannot** be a file-like object.
:param filename: The filename of the file.
:param message_content: Optional: Any extra content to be sent with the message.
:return: The new :class:`.Message` created.
"""
return await self.messages.upload(file_content, filename, message_content=message_content)
@deprecated(since="0.7.0", see_instead="Channel.messages.upload", removal="0.10.0")
async def upload_file(self, filename: str, *,
message_content: str = None) -> 'dt_message.Message':
"""
A higher level interface to ``send_file``.
This allows you to specify one of the following to upload:
- A filename (str)
- A file-like object
- A path-like object
This will open the file, read it in binary, and upload it to the channel.
:param filename: The file to send, in the formats specified above.
:param message_content: Any extra content to be sent with the message.
:return: The new :class:`.Message` created.
"""
return await self.messages.upload(fp=filename, filename=filename,
message_content=message_content)
async def change_overwrite(self, overwrite: 'dt_permissions.Overwrite'):
"""
Changes an overwrite for this channel.
This overwrite must be an instance of :class:`.Overwrite`.
:param overwrite: The specific overwrite to use.
If this is None, the overwrite will be deleted.
"""
if not self.guild:
raise PermissionsError("manage_roles")
if not self.effective_permissions(self.guild.me).manage_roles:
raise PermissionsError("manage_roles")
target = overwrite.target
if isinstance(target, dt_member.Member):
type_ = "member"
else:
type_ = "role"
if overwrite is None:
# Delete the overwrite instead.
coro = self._bot.http.remove_overwrite(channel_id=self.id, target_id=target.id)
async def _listener(before, after):
if after.id != self.id:
return False
# probably right /shrug
return True
else:
coro = self._bot.http.edit_overwrite(self.id, target.id, type_,
allow=overwrite.allow.bitfield,
deny=overwrite.deny.bitfield)
async def _listener(before, after):
return after.id == self.id
async with self._bot.events.wait_for_manager("channel_update", _listener):
await coro
return self
async def edit(self, **kwargs) -> 'Channel':
"""
Edits this channel.
"""
if self.guild is None:
raise CuriousError("Can only edit guild channels")
if not self.effective_permissions(self.guild.me).manage_channels:
raise PermissionsError("manage_channels")
if "parent" in kwargs:
kwargs["parent_id"] = kwargs["parent"].id
await self._bot.http.edit_channel(self.id, **kwargs)
return self
async def delete(self) -> 'Channel':
"""
Deletes this channel.
"""
if not self.effective_permissions(self.guild.me).manage_channels:
raise PermissionsError("manage_channels")
await self._bot.http.delete_channel(self.id)
return self
| mit |
jalexvig/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/reshape.py | 13 | 13697 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reshape bijectors."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import bijector
from tensorflow.python.util import deprecation
__all__ = [
"Reshape",
]
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _static_ndims_from_shape(shape):
return shape.shape.with_rank_at_least(1)[0].value
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def _ndims_from_shape(shape):
return array_ops.shape(shape)[0]
class Reshape(bijector.Bijector):
"""Reshapes the `event_shape` of a `Tensor`.
The semantics generally follow that of `tf.reshape()`, with
a few differences:
* The user must provide both the input and output shape, so that
the transformation can be inverted. If an input shape is not
specified, the default assumes a vector-shaped input, i.e.,
event_shape_in = (-1,).
* The `Reshape` bijector automatically broadcasts over the leftmost
dimensions of its input (`sample_shape` and `batch_shape`); only
the rightmost `event_ndims_in` dimensions are reshaped. The
number of dimensions to reshape is inferred from the provided
`event_shape_in` (`event_ndims_in = len(event_shape_in)`).
Example usage:
```python
tfd = tf.contrib.distributions
r = tfd.bijectors.Reshape(event_shape_out=[1, -1])
r.forward([3., 4.]) # shape [2]
# ==> [[3., 4.]] # shape [1, 2]
r.forward([[1., 2.], [3., 4.]]) # shape [2, 2]
# ==> [[[1., 2.]],
# [[3., 4.]]] # shape [2, 1, 2]
r.inverse([[3., 4.]]) # shape [1,2]
# ==> [3., 4.] # shape [2]
r.forward_log_det_jacobian(any_value)
# ==> 0.
r.inverse_log_det_jacobian(any_value)
# ==> 0.
```
"""
@deprecation.deprecated(
"2018-10-01",
"The TensorFlow Distributions library has moved to "
"TensorFlow Probability "
"(https://github.com/tensorflow/probability). You "
"should update all references to use `tfp.distributions` "
"instead of `tf.contrib.distributions`.",
warn_once=True)
def __init__(self, event_shape_out, event_shape_in=(-1,),
validate_args=False, name=None):
"""Creates a `Reshape` bijector.
Args:
event_shape_out: An `int`-like vector-shaped `Tensor`
representing the event shape of the transformed output.
event_shape_in: An optional `int`-like vector-shape `Tensor`
representing the event shape of the input. This is required in
order to define inverse operations; the default of (-1,)
assumes a vector-shaped input.
validate_args: Python `bool` indicating whether arguments should
be checked for correctness.
name: Python `str`, name given to ops managed by this object.
Raises:
TypeError: if either `event_shape_in` or `event_shape_out` has
non-integer `dtype`.
ValueError: if either of `event_shape_in` or `event_shape_out`
has non-vector shape (`rank > 1`), or if their sizes do not
match.
"""
with ops.name_scope(name, "reshape",
values=[event_shape_out, event_shape_in]):
event_shape_out = ops.convert_to_tensor(event_shape_out,
name="event_shape_out",
preferred_dtype=dtypes.int32)
event_shape_in = ops.convert_to_tensor(event_shape_in,
name="event_shape_in",
preferred_dtype=dtypes.int32)
assertions = []
assertions.extend(self._maybe_check_valid_shape(
event_shape_out, validate_args))
assertions.extend(self._maybe_check_valid_shape(
event_shape_in, validate_args))
self._assertions = assertions
self._event_shape_in = event_shape_in
self._event_shape_out = event_shape_out
super(Reshape, self).__init__(
forward_min_event_ndims=0,
is_constant_jacobian=True,
validate_args=validate_args,
name=name or "reshape")
def _maybe_check_valid_shape(self, shape, validate_args):
"""Check that a shape Tensor is int-type and otherwise sane."""
if not shape.dtype.is_integer:
raise TypeError("{} dtype ({}) should be `int`-like.".format(
shape, shape.dtype.name))
assertions = []
ndims = array_ops.rank(shape)
ndims_ = tensor_util.constant_value(ndims)
if ndims_ is not None and ndims_ > 1:
raise ValueError("`{}` rank ({}) should be <= 1.".format(
shape, ndims_))
elif validate_args:
assertions.append(check_ops.assert_less_equal(
ndims, 1, message="`{}` rank should be <= 1.".format(shape)))
shape_ = tensor_util.constant_value_as_shape(shape)
if shape_.is_fully_defined():
es = np.int32(shape_.as_list())
if sum(es == -1) > 1:
raise ValueError(
"`{}` must have at most one `-1` (given {})"
.format(shape, es))
if np.any(es < -1):
raise ValueError(
"`{}` elements must be either positive integers or `-1`"
"(given {})."
.format(shape, es))
elif validate_args:
assertions.extend([
check_ops.assert_less_equal(
math_ops.reduce_sum(
math_ops.cast(math_ops.equal(shape, -1), dtypes.int32)),
1,
message="`{}` elements must have at most one `-1`."
.format(shape)),
check_ops.assert_greater_equal(
shape, -1,
message="`{}` elements must be either positive integers or `-1`."
.format(shape)),
])
return assertions
def _reshape_helper(self, x, event_shape_in, event_shape_out):
"""Reshape only the event_shape of an input `Tensor`."""
event_ndims_in_ = _static_ndims_from_shape(event_shape_in)
event_ndims_in = _ndims_from_shape(event_shape_in)
x_ndims_, x_ndims = x.shape.ndims, array_ops.rank(x)
assertions = []
# Ensure x.event_shape is compatible with event_shape_in.
if (event_ndims_in_ is not None
and x_ndims_ is not None
and x.shape.with_rank_at_least(event_ndims_in_)[
x_ndims_-event_ndims_in_:].is_fully_defined()):
x_event_shape_, x_event_shape = [ # pylint: disable=unbalanced-tuple-unpacking
np.int32(x.shape[x_ndims_-event_ndims_in_:])]*2
else:
x_event_shape_, x_event_shape = (
None, array_ops.shape(x)[x_ndims-event_ndims_in:])
event_shape_in_ = tensor_util.constant_value(event_shape_in)
if x_event_shape_ is not None and event_shape_in_ is not None:
# Compare the shape dimensions that are fully specified in the
# input (i.e., for which event_shape_in is not -1). If x_event_shape
# matches along all of these dimensions, it is compatible with
# the desired input shape and any further mismatches (i.e.,
# imcompatibility with the desired *output* shape) will be
# caught inside of array_ops.reshape() below.
x_event_shape_specified_ = x_event_shape_[event_shape_in_ >= 0]
event_shape_in_specified_ = event_shape_in_[event_shape_in_ >= 0]
if not np.equal(x_event_shape_specified_,
event_shape_in_specified_).all():
raise ValueError(
"Input `event_shape` does not match `event_shape_in` ({} vs {}).".
format(x_event_shape_, event_shape_in_))
elif self.validate_args:
# Similarly to the static case, we compare the shape dimensions
# that are fully specified in the input. We extract these
# dimensions using boolean_mask(), which requires that the mask
# have known ndims. We can assume that shape Tensors always have
# ndims==1 (this assumption is verified inside of
# _maybe_check_valid_shape), so the reshape operation is just a
# no-op that formally encodes this fact to make boolean_mask()
# happy.
event_shape_mask = array_ops.reshape(event_shape_in >= 0, [-1])
x_event_shape_specified = array_ops.boolean_mask(x_event_shape,
event_shape_mask)
event_shape_in_specified = array_ops.boolean_mask(event_shape_in,
event_shape_mask)
assertions.append(check_ops.assert_equal(
x_event_shape_specified, event_shape_in_specified,
message="Input `event_shape` does not match `event_shape_in`."))
if assertions:
x = control_flow_ops.with_dependencies(assertions, x)
# get the parts of shape(x) that will not change
sample_and_batch_shape = array_ops.shape(x)
ndims = (x.shape.ndims if x.shape.ndims is not None
else array_ops.rank(x))
sample_and_batch_shape = sample_and_batch_shape[
:(ndims - math_ops.abs(event_ndims_in))]
if (event_ndims_in_ is not None
and x_ndims_ is not None
and event_ndims_in_ == x_ndims_):
# Hack to allow forward/inverse_event_shape to do shape
# inference by calling this helper method with a dummy Tensor of
# shape event_shape_in. In this special case,
# sample_and_batch_shape will be empty so we can preserve static
# shape information by avoiding the concat operation below
# (which would be a no-op).
new_shape = event_shape_out
else:
new_shape = array_ops.concat(
[sample_and_batch_shape, event_shape_out], axis=0)
return array_ops.reshape(x, new_shape)
def _forward(self, x):
with ops.control_dependencies(self._assertions):
return self._reshape_helper(x,
self._event_shape_in,
self._event_shape_out)
def _inverse(self, y):
with ops.control_dependencies(self._assertions):
return self._reshape_helper(y,
self._event_shape_out,
self._event_shape_in)
def _inverse_log_det_jacobian(self, y):
with ops.control_dependencies(self._assertions):
return constant_op.constant(0., dtype=y.dtype)
def _forward_log_det_jacobian(self, x):
with ops.control_dependencies(self._assertions):
return constant_op.constant(0., dtype=x.dtype)
def _forward_event_shape(self, input_shape):
# NOTE: this method and the other *_event_shape* methods
# compute shape by explicit transformation of a dummy
# variable. This approach is not generally recommended because it
# bloats the graph and could in general trigger side effects.
#
# In this particular case of the Reshape bijector, the
# forward and inverse transforms have no side effects, and we
# believe the reduction in code complexity from delegating the
# heavy lifting to tf.reshape() is worth the added graph ops.
# However, you should think hard before implementing this approach
# in other Bijectors; it is strongly preferred to compute
# shapes explicitly whenever it's feasible to do so.
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape)
dummy_reshaped = self.forward(dummy)
return dummy_reshaped.shape
def _inverse_event_shape(self, output_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape)
dummy_reshaped = self.inverse(dummy)
return dummy_reshaped.shape
def _forward_event_shape_tensor(self, input_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=input_shape)
dummy_reshaped = self.forward(dummy)
return array_ops.shape(dummy_reshaped)
def _inverse_event_shape_tensor(self, output_shape):
with ops.control_dependencies(self._assertions):
dummy = array_ops.zeros(dtype=dtypes.float32, shape=output_shape)
dummy_reshaped = self.inverse(dummy)
return array_ops.shape(dummy_reshaped)
| apache-2.0 |
theoryno3/pylearn2 | pylearn2/scripts/browse_conv_weights.py | 44 | 7605 | #! /usr/bin/env python
"""
Interactive viewer for the convolutional weights in a pickled model.
Unlike ./show_weights, this shows one unit's weights at a time. This
allows it to display weights from higher levels (which can have 100s
of input channels), not just the first.
"""
import os
import sys
import warnings
import argparse
import numpy
from pylearn2.models.mlp import MLP, ConvElemwise, CompositeLayer
from pylearn2.models.maxout import MaxoutConvC01B
from pylearn2.utils import safe_zip, serial
from pylearn2.space import Conv2DSpace
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
def _parse_args():
parser = argparse.ArgumentParser(
description=("Interactive browser of convolutional weights. "
"Up/down keys switch layers. "
"Left/right keys switch units."))
parser.add_argument('-i',
'--input',
required=True,
help=".pkl file of model")
result = parser.parse_args()
if os.path.splitext(result.input)[1] != '.pkl':
print("Expected --input to end in .pkl, got %s." % result.input)
sys.exit(1)
return result
def _get_conv_layers(layer, result=None):
'''
Returns a list of the convolutional layers in a model.
Returns
-------
rval: list
Lists the convolutional layers (ConvElemwise, MaxoutConvC01B).
'''
if result is None:
result = []
if isinstance(layer, (MLP, CompositeLayer)):
for sub_layer in layer.layers:
_get_conv_layers(sub_layer, result)
elif isinstance(layer, (MaxoutConvC01B, ConvElemwise)):
result.append(layer)
return result
def _get_conv_weights_bc01(layer):
'''
Returns a conv. layer's weights in BC01 format.
Parameters
----------
layer: MaxoutConvC01B or ConvElemwise
Returns
-------
rval: numpy.ndarray
The kernel weights in BC01 axis order. (B: output channels, C: input
channels)
'''
assert isinstance(layer, (MaxoutConvC01B, ConvElemwise))
weights = layer.get_params()[0].get_value()
if isinstance(layer, MaxoutConvC01B):
c01b = Conv2DSpace(shape=weights.shape[1:3],
num_channels=weights.shape[0],
axes=('c', 0, 1, 'b'))
bc01 = Conv2DSpace(shape=c01b.shape,
num_channels=c01b.num_channels,
axes=('b', 'c', 0, 1))
weights = c01b.np_format_as(weights, bc01)
elif isinstance(layer, ConvElemwise):
weights = weights[:, :, ::-1, ::-1] # reverse 0, 1 axes
return weights
def _num_conv_units(conv_layer):
'''
Returns a conv layer's number of output channels.
'''
assert isinstance(conv_layer, (MaxoutConvC01B, ConvElemwise))
weights = conv_layer.get_params()[0].get_value()
if isinstance(conv_layer, MaxoutConvC01B):
return weights.shape[-1]
elif isinstance(conv_layer, ConvElemwise):
return weights.shape[0]
def main():
"Entry point of script."
args = _parse_args()
model = serial.load(args.input)
if not isinstance(model, MLP):
print("Expected the .pkl file to contain an MLP, got a %s." %
str(model.type))
sys.exit(1)
def get_figure_and_axes(conv_layers, window_width=800):
kernel_display_width = 20
margin = 5
grid_square_width = kernel_display_width + margin
num_columns = window_width // grid_square_width
max_num_channels = numpy.max([layer.get_input_space().num_channels
for layer in conv_layers])
# pdb.set_trace()
num_rows = max_num_channels // num_columns
if num_rows * num_columns < max_num_channels:
num_rows += 1
assert num_rows * num_columns >= max_num_channels
window_width = 15
# '* 1.8' comse from the fact that rows take up about 1.8 times as much
# space as columns, due to the title text.
window_height = window_width * ((num_rows * 1.8) / num_columns)
figure, all_axes = pyplot.subplots(num_rows,
num_columns,
squeeze=False,
figsize=(window_width,
window_height))
for unit_index, axes in enumerate(all_axes.flat):
subplot_title = axes.set_title('%d' % unit_index)
subplot_title.set_size(8)
subplot_title.set_color((.3, .3, .3))
# Hides tickmarks
for axes_row in all_axes:
for axes in axes_row:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
return figure, all_axes
conv_layers = _get_conv_layers(model)
figure, all_axes = get_figure_and_axes(conv_layers)
title_text = figure.suptitle("title")
pyplot.tight_layout(h_pad=.1, w_pad=.5) # in inches
layer_index = numpy.array(0)
unit_indices = numpy.zeros(len(model.layers), dtype=int)
def redraw():
'''
Draws the currently selected convolutional kernel.
'''
axes_list = all_axes.flatten()
layer = conv_layers[layer_index]
unit_index = unit_indices[layer_index, ...]
weights = _get_conv_weights_bc01(layer)[unit_index, ...]
active_axes = axes_list[:weights.shape[0]]
for axes, weights in safe_zip(active_axes, weights):
axes.set_visible(True)
axes.imshow(weights, cmap='gray', interpolation='nearest')
assert len(frozenset(active_axes)) == len(active_axes)
unused_axes = axes_list[len(active_axes):]
assert len(frozenset(unused_axes)) == len(unused_axes)
assert len(axes_list) == len(active_axes) + len(unused_axes)
for axes in unused_axes:
axes.set_visible(False)
title_text.set_text("Layer %s, unit %d" %
(layer.layer_name,
unit_indices[layer_index]))
figure.canvas.draw()
def on_key_press(event):
"Callback for key press events"
def increment(index, size, step):
"""
Increments an index in-place.
Parameters
----------
index: numpy.ndarray
scalar (0-dim array) of dtype=int. Non-negative.
size: int
One more than the maximum permissible index.
step: int
-1, 0, or 1.
"""
assert index >= 0
assert step in (0, -1, 1)
index[...] = (index + size + step) % size
if event.key in ('up', 'down'):
increment(layer_index,
len(conv_layers),
1 if event.key == 'up' else -1)
unit_index = unit_indices[layer_index]
redraw()
elif event.key in ('right', 'left'):
unit_index = unit_indices[layer_index:layer_index + 1]
increment(unit_index,
_num_conv_units(conv_layers[layer_index]),
1 if event.key == 'right' else -1)
redraw()
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw()
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
UstadMobile/exelearning-extjs5-mirror | nevow/test/test_i18n.py | 14 | 8863 | from twisted.trial import unittest
from cStringIO import StringIO
from nevow import inevow, flat, context, tags, loaders, rend
from nevow import i18n
def mockTranslator(s, domain=None):
args = {}
if domain is not None:
args['domain'] = domain
return 'MOCK(%s)[%s]' % (', '.join(['%s=%r' % (k,v)
for k,v in args.items()]),
s)
class Misc(unittest.TestCase):
def test_simple(self):
s = i18n._('foo')
def test_simple_flat(self):
s = i18n._('foo')
r = flat.ten.flatten(s, None)
self.assertEquals(r, 'foo')
def test_translator(self):
_ = i18n.Translator(translator=mockTranslator)
s = _('foo')
r = flat.ten.flatten(s, None)
self.assertEquals(r, 'MOCK()[foo]')
class Config(unittest.TestCase):
def test_remember(self):
ctx = context.WebContext()
cfg = i18n.I18NConfig(domain='foo')
ctx.remember(cfg)
class Domain(unittest.TestCase):
def test_classInit(self):
_ = i18n.Translator(translator=mockTranslator,
domain='bar')
s = _('foo')
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK(domain='bar')[foo]")
def test_runTime(self):
_ = i18n.Translator(translator=mockTranslator)
s = _('foo', domain='baz')
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK(domain='baz')[foo]")
def test_context(self):
_ = i18n.Translator(translator=mockTranslator)
ctx = context.WebContext()
cfg = i18n.I18NConfig(domain='thud')
ctx.remember(cfg)
s = _('foo')
r = flat.ten.flatten(s, ctx)
self.assertEquals(r, "MOCK(domain='thud')[foo]")
def test_runTime_beats_all(self):
_ = i18n.Translator(translator=mockTranslator,
domain='not-used1')
ctx = context.WebContext()
cfg = i18n.I18NConfig(domain='not-used2')
ctx.remember(cfg)
s = _('foo', domain='baz')
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK(domain='baz')[foo]")
def test_classInit_beats_context(self):
_ = i18n.Translator(translator=mockTranslator,
domain='baz')
ctx = context.WebContext()
cfg = i18n.I18NConfig(domain='not-used')
ctx.remember(cfg)
s = _('foo')
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK(domain='baz')[foo]")
class Format(unittest.TestCase):
def test_simple(self):
_ = i18n.Translator(translator=mockTranslator)
s = _('foo %s') % 'bar'
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK()[foo bar]")
def test_multiple(self):
_ = i18n.Translator(translator=mockTranslator)
s = _('foo %s')
s = s % 'bar %s'
s = s % 'baz'
r = flat.ten.flatten(s, None)
self.assertEquals(r, "MOCK()[foo bar baz]")
class FakeRequest(object):
__implements__ = inevow.IRequest,
def __init__(self, headers):
self.headers = headers
def getHeader(self, key):
return self.headers.get(key, None)
class Languages(unittest.TestCase):
def test_noLanguages(self):
request = FakeRequest(headers={})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, [])
def test_oneLanguage(self):
request = FakeRequest(headers={
'accept-language': 'fo',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['fo'])
def test_multipleLanguages(self):
request = FakeRequest(headers={
'accept-language': 'fo,ba,th',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['fo', 'ba', 'th'])
def test_quality_simple(self):
request = FakeRequest(headers={
'accept-language': 'fo;q=0.4',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['fo'])
def test_quality_sort(self):
request = FakeRequest(headers={
'accept-language': 'fo;q=0.4,ba;q=0.2,xy;q=0.9',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['xy', 'fo', 'ba'])
def test_quality_invalid_notQ(self):
request = FakeRequest(headers={
'accept-language': 'fo;q=0.4,ba;z=0.2',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['ba', 'fo'])
def test_quality_invalid_notFloat(self):
request = FakeRequest(headers={
'accept-language': 'fo;q=0.4,ba;q=junk',
})
ctx = context.RequestContext(tag=request)
r = inevow.ILanguages(ctx)
self.assertEquals(r, ['ba', 'fo'])
class Render(unittest.TestCase):
def makePage(self, content):
_ = i18n.Translator(translator=mockTranslator)
page = rend.Page(
docFactory=loaders.stan(tags.invisible(render=tags.directive('i18n'))[content]))
page.render_i18n = i18n.render(_)
doc = page.docFactory.load()
ctx = context.WovenContext(context.PageContext(tag=page),
tags.invisible[doc])
page.rememberStuff(ctx)
io = StringIO()
writer = io.write
def finisher(result):
return io.getvalue()
d = page.flattenFactory(doc, ctx, writer, finisher)
r = unittest.deferredResult(d, 1)
return r
def test_empty(self):
r = self.makePage([''])
self.assertEquals(r, 'MOCK()[]')
def test_simple(self):
r = self.makePage(['foo'])
self.assertEquals(r, 'MOCK()[foo]')
def test_stan(self):
r = self.makePage([tags.p['You should really avoid tags in i18n input.']])
self.assertEquals(r, 'MOCK()[<p>You should really avoid tags in i18n input.</p>]')
class InterpolateTests:
def test_mod_string(self):
self.check('foo %s', 'bar',
'foo bar')
def test_mod_unicode(self):
self.check('foo %s', u'bar',
'foo bar')
# Tuples are a special case, 'foo %s' % ('bar', 'baz') does not
# work. Also, 'foo %s %s' only works with tuples.
def test_mod_tuple_two(self):
self.check('foo %s %s', ('bar', 'baz'),
"foo bar baz")
def test_mod_tuple_complex(self):
self.check('foo %s %s %s', ([1, 2], (3, 4), {5: 6}),
"foo [1, 2] (3, 4) {5: 6}")
def test_mod_list_stringify(self):
self.check('foo %s', ['bar', 'baz'],
"foo ['bar', 'baz']")
def test_mod_list_reprify(self):
self.check('foo %r', ['bar', 'baz'],
"foo ['bar', 'baz']")
def test_mod_dict_stringify(self):
self.check('foo %s', {'bar': 1, 'baz': 2},
"foo {'bar': 1, 'baz': 2}",
"foo {'baz': 2, 'bar': 1}")
def test_mod_dict_reprify(self):
self.check('foo %r', {'bar': 1, 'baz': 2},
"foo {'bar': 1, 'baz': 2}",
"foo {'baz': 2, 'bar': 1}")
def test_mod_dict_two(self):
self.check('foo %(bar)s %(baz)s', {'bar': 1, 'baz': 2},
"foo 1 2")
class InterpolateMixin:
def setUp(self):
self._ = i18n.Translator(translator=mockTranslator)
def mangle(self, s):
raise NotImplementedError, 'override mangle somewhere'
def check(self, fmt, args, *wants):
got = self.mangle(self._(fmt) % args)
self.failUnlessIn(got, wants)
class Repr(InterpolateMixin, unittest.TestCase, InterpolateTests):
def mangle(self, s):
return repr(s)
def check(self, fmt, args, *wants):
InterpolateMixin.check(self, fmt, args,
"PlaceHolder(translator=%r, original=%r) %% %r" % \
(mockTranslator, fmt, args))
class Str(InterpolateMixin, unittest.TestCase, InterpolateTests):
def mangle(self, s):
return str(s)
def check(self, fmt, args, *wants):
InterpolateMixin.check(self, fmt, args,
"PlaceHolder(translator=%r, original=%r) %% %r" % \
(mockTranslator, fmt, args))
class Interpolation(InterpolateMixin, unittest.TestCase, InterpolateTests):
def mangle(self, s):
r = flat.ten.flatten(s, None)
return r
def check(self, fmt, args, *wants):
InterpolateMixin.check(self, fmt, args,
*['MOCK()[%s]' % x for x in wants])
| gpl-2.0 |
DigitalGlobe/gbdxtools | tests/unit/test_catalog.py | 1 | 8401 | '''
Authors: Donnie Marino, Kostas Stamatiou
Contact: dmarino@digitalglobe.com
Unit tests for the gbdxtools.Catalog class
'''
from gbdxtools import Interface
from gbdxtools.catalog import Catalog
from auth_mock import get_mock_gbdx_session
import vcr
import unittest
class TestCatalog(unittest.TestCase):
@classmethod
def setUpClass(cls):
mock_gbdx_session = get_mock_gbdx_session(token="dummytoken")
cls.gbdx = Interface(gbdx_connection=mock_gbdx_session)
def test_init(self):
c = Catalog()
self.assertTrue(isinstance(c, Catalog))
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_address_coords.yaml', filter_headers=['authorization'])
def test_catalog_get_address_coords(self):
c = Catalog()
lat, lng = c.get_address_coords('Boulder, CO')
self.assertTrue(lat == 40.0149856)
self.assertTrue(lng == -105.2705456)
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_record.yaml', filter_headers=['authorization'])
def test_catalog_get_record(self):
c = Catalog()
catid = '1040010019B4A600'
record = c.get(catid)
self.assertEqual(record['identifier'], '1040010019B4A600')
assert 'DigitalGlobeAcquisition' in record['type']
self.assertTrue('inEdges' not in list(record.keys()))
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_record_with_relationships.yaml', filter_headers=['authorization'])
def test_catalog_get_record_with_relationships(self):
"""
includeRelationships doesn't do anything anymore. This is now a test of backward compatibility.
"""
c = Catalog()
catid = '1040010019B4A600'
record = c.get(catid, includeRelationships=True)
self.assertEqual(record['identifier'], '1040010019B4A600')
assert 'DigitalGlobeAcquisition' in record['type']
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_point.yaml', filter_headers=['authorization'])
def test_catalog_search_point(self):
c = Catalog()
lat = 40.0149856
lng = -105.2705456
results = c.search_point(lat, lng)
self.assertEqual(len(results),499)
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_address.yaml', filter_headers=['authorization'])
def test_catalog_search_address(self):
c = Catalog()
results = c.search_address('Boulder, CO')
self.assertEqual(len(results), 499)
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_wkt_only.yaml',filter_headers=['authorization'])
def test_catalog_search_wkt_only(self):
c = Catalog()
results = c.search(searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))")
assert len(results) == 508
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_wkt_and_startDate.yaml',filter_headers=['authorization'])
def test_catalog_search_wkt_and_startDate(self):
c = Catalog()
results = c.search(searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))",
startDate='2012-01-01T00:00:00.000Z')
assert len(results) == 416
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_wkt_and_endDate.yaml',filter_headers=['authorization'])
def test_catalog_search_wkt_and_endDate(self):
c = Catalog()
results = c.search(searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))",
endDate='2012-01-01T00:00:00.000Z')
assert len(results) == 92
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_startDate_and_endDate_only_more_than_one_week_apart.yaml',filter_headers=['authorization'])
def test_catalog_search_startDate_and_endDate_only_more_than_one_week_apart(self):
c = Catalog()
results = c.search(startDate='2004-01-01T00:00:00.000Z',
endDate='2012-01-01T00:00:00.000Z')
assert len(results) == 1000
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_startDate_and_endDate_only_less_than_one_week_apart.yaml',filter_headers=['authorization'])
def test_catalog_search_startDate_and_endDate_only_less_than_one_week_apart(self):
c = Catalog()
results = c.search(startDate='2008-01-01T00:00:00.000Z',
endDate='2008-01-03T00:00:00.000Z')
assert len(results) == 643
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_filters1.yaml',filter_headers=['authorization'])
def test_catalog_search_filters1(self):
c = Catalog()
filters = [
"(sensorPlatformName = 'WORLDVIEW01' OR sensorPlatformName ='QUICKBIRD02')",
"cloudCover < 10",
"offNadirAngle < 10"
]
results = c.search(startDate='2008-01-01T00:00:00.000Z',
endDate='2012-01-03T00:00:00.000Z',
filters=filters,
searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))")
for result in results:
assert result['properties']['sensorPlatformName'] in ['WORLDVIEW01','QUICKBIRD02']
assert float(result['properties']['cloudCover']) < 10
assert float(result['properties']['offNadirAngle']) < 10
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_filters2.yaml',filter_headers=['authorization'])
def test_catalog_search_filters2(self):
c = Catalog()
filters = [
"sensorPlatformName = 'WORLDVIEW03'"
]
results = c.search(filters=filters,
searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))")
for result in results:
assert result['properties']['sensorPlatformName'] in ['WORLDVIEW03']
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_types1.yaml',filter_headers=['authorization'])
def test_catalog_search_types1(self):
c = Catalog()
types = [ "LandsatAcquisition" ]
results = c.search(types=types,
searchAreaWkt="POLYGON ((30.1 9.9, 30.1 10.1, 29.9 10.1, 29.9 9.9, 30.1 9.9))")
for result in results:
assert 'LandsatAcquisition' in result['type']
@vcr.use_cassette('tests/unit/cassettes/test_catalog_search_huge_aoi.yaml',filter_headers=['authorization'])
def test_catalog_search_huge_aoi(self):
"""
Search an AOI the size of utah, broken into multiple smaller searches
"""
c = Catalog()
results = c.search(searchAreaWkt = "POLYGON((-113.88427734375 40.36642741921034,-110.28076171875 40.36642741921034,-110.28076171875 37.565262680889965,-113.88427734375 37.565262680889965,-113.88427734375 40.36642741921034))")
assert len(results) == 1000 # we will max out the paging limit of the vector service
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_data_location_DG.yaml',filter_headers=['authorization'])
def test_catalog_get_data_location_DG(self):
c = Catalog()
s3path = c.get_data_location(catalog_id='1030010045539700')
assert s3path == 's3://receiving-dgcs-tdgplatform-com/055158926010_01_003'
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_data_location_Landsat.yaml',filter_headers=['authorization'])
def test_catalog_get_data_location_Landsat(self):
c = Catalog()
s3path = c.get_data_location(catalog_id='LC81740532014364LGN00')
assert s3path == 's3://landsat-pds/L8/174/053/LC81740532014364LGN00'
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_data_location_nonexistent_catid.yaml',filter_headers=['authorization'])
def test_catalog_get_data_location_nonexistent_catid(self):
c = Catalog()
s3path = c.get_data_location(catalog_id='nonexistent_asdfasdfasdfdfasffds')
assert s3path == None
@vcr.use_cassette('tests/unit/cassettes/test_catalog_get_data_location_catid_with_no_data.yaml',filter_headers=['authorization'])
def test_catalog_get_data_location_catid_with_no_data(self):
c = Catalog()
s3path = c.get_data_location(catalog_id='1010010011AD6E00')
assert s3path == None
| mit |
grbd/GBD.Build.BlackJack | blackjack/cmake/cmd/project.py | 1 | 1199 | from blackjack.cmake.ScriptBase import ScriptBase
from blackjack.cmake.cmdpart.Version import Version
class project(ScriptBase):
"""
CMake Command - Project Defintion
"""
def __init__(self, name: str, version: Version = None, langs: str = None):
super().__init__()
self._Name = None
self.Name = name
"""Name of the project"""
self.Version = version
"""Version associated with the project"""
self.Langs = langs
"""Languages associated with the project"""
# Set Defaults
if self.Version is None: self.Version = Version(0,0)
if self.Langs is None: self.Langs = "C, CXX"
return
@property
def Name(self):
"""Name of the list / set"""
return self._Name
@Name.setter
def Name(self, value):
self._Name = value.replace(" ", "_")
def render_body(self):
ret = []
tmpline = "project(" + self.Name + " "
if self.Version:
tmpline += self.Version.render_string() + " "
if self.Langs:
tmpline += "LANGUAGES " + self.Langs
tmpline += ")"
ret.append(tmpline)
return ret
| apache-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.