repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
dandycheung/androguard | androguard/core/bytecodes/jvm.py | 38 | 128778 | # This file is part of Androguard.
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from struct import pack, unpack, calcsize
from collections import namedtuple
import re, zipfile, StringIO, os
from androguard.core import bytecode
from androguard.core.bytecode import SV, SVs
######################################################## JAR FORMAT ########################################################
class JAR :
def __init__(self, filename, raw=False) :
self.filename = filename
if raw == True :
self.__raw = filename
else :
fd = open( filename, "rb" )
self.__raw = fd.read()
fd.close()
self.zip = zipfile.ZipFile( StringIO.StringIO( self.__raw ) )
def get_classes(self) :
l = []
for i in self.zip.namelist() :
if ".class" in i :
l.append( (i, self.zip.read(i)) )
return l
def show(self) :
print self.zip.namelist()
######################################################## CLASS FORMAT ########################################################
# Special functions to manage more easily special arguments of bytecode
def special_F0(x) :
return [ i for i in x ]
def special_F0R(x) :
return [ x ]
def special_F1(x) :
return (x[0] << 8) | x[1]
def special_F1R(x) :
return [ (x & 0xFF00) >> 8, x & 0x00FF ]
def special_F2(x) :
v = ((x[0] << 8) | x[1])
if v > 0x7FFF :
v = (0x7FFF & v) - 0x8000
return v
def special_F2R(x) :
val = x & 0xFFFF
return [ (val & 0xFF00) >> 8, val & 0x00FF ]
def special_F3(x) :
val = (x[0] << 24) | (x[1] << 16) | (x[2] << 8) | x[3]
if val > 0x7fffffff :
val = (0x7fffffff & val) - 0x80000000
return val
def special_F3R(x) :
val = x & 0xFFFFFFFF
return [ (val & 0xFF000000) >> 24, (val & 0x00FF0000) >> 16, (val & 0x0000FF00) >> 8, val & 0x000000FF ]
def special_F4(x) :
return [ (x[0] << 8) | x[1], x[2] ]
def special_F4R(x) :
pass
def specialSwitch(x) :
return x
FD = { "B" : "byte",
"C" : "char",
"D" : "double",
"F" : "float",
"I" : "int",
"J" : "long",
"S" : "short",
"Z" : "boolean",
"V" : "void",
}
def formatFD(v) :
#print v, "--->",
l = []
i = 0
while i < len(v) :
if v[i] == "L" :
base_object = ""
i = i + 1
while v[i] != ";" :
base_object += v[i]
i = i + 1
l.append( os.path.basename( base_object ) )
elif v[i] == "[" :
z = []
while v[i] == "[" :
z.append( "[]" )
i = i + 1
l.append( [ FD[ v[i] ], z ] )
else :
l.append( FD[ v[i] ] )
i = i + 1
#print l
return l
def TableSwitch(idx, raw_format) :
r_buff = []
r_format = ">"
idx = idx + 1
n = 0
if idx % 4 :
n = 4 - (idx % 4)
for i in range(0, n) :
r_buff.append( "bytepad%d" % i )
r_format += "B"
r_buff.extend( [ "default", "low", "high" ] )
r_format += "LLL"
idx = 1 + n + 4
low = unpack('>L', raw_format[ idx : idx + 4 ])[0]
idx = idx + 4
high = unpack('>L', raw_format[ idx : idx + 4 ])[0]
for i in range(0, high - low + 1) :
r_buff.append( "offset%d" % i )
r_format += "L"
return specialSwitch, specialSwitch, r_buff, r_format, None
def LookupSwitch(idx, raw_format) :
r_buff = []
r_format = ">"
idx = idx + 1
n = 0
if idx % 4 :
n = 4 - (idx % 4)
for i in range(0, n) :
r_buff.append( "bytepad%d" % i )
r_format += "B"
r_buff.extend( [ "default", "npairs" ] )
r_format += "LL"
idx = 1 + n + 4
for i in range(0, unpack('>L', raw_format[ idx : idx + 4 ])[0]) :
r_buff.extend( [ "match%d" % i, "offset%d" % i ] )
r_format += "LL"
return specialSwitch, specialSwitch, r_buff, r_format, None
# The list of java bytecodes, with their value, name, and special functions !
JAVA_OPCODES = {
0x32 : [ "aaload" ],
0x53 : [ "aastore" ],
0x1 : [ "aconst_null" ],
0x19 : [ "aload", "index:B", special_F0, special_F0, None ],
0x2a : [ "aload_0" ],
0x2b : [ "aload_1" ],
0x2c : [ "aload_2" ],
0x2d : [ "aload_3" ],
0xbd : [ "anewarray", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_class" ],
0xb0 : [ "areturn" ],
0xbe : [ "arraylength" ],
0x3a : [ "astore", "index:B", special_F0, special_F0, None ],
0x4b : [ "astore_0" ],
0x4c : [ "astore_1" ],
0x4d : [ "astore_2" ],
0x4e : [ "astore_3" ],
0xbf : [ "athrow" ],
0x33 : [ "baload" ],
0x54 : [ "bastore" ],
0x10 : [ "bipush", "byte:B", special_F0, special_F0R, None ],
0x34 : [ "caload" ],
0x55 : [ "castore" ],
0xc0 : [ "checkcast", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, None ],
0x90 : [ "d2f" ],
0x8e : [ "d2i" ],
0x8f : [ "d2l" ],
0x63 : [ "dadd" ],
0x31 : [ "daload" ],
0x52 : [ "dastore" ],
0x98 : [ "dcmpg" ],
0x97 : [ "dcmpl" ],
0xe : [ "dconst_0" ],
0xf : [ "dconst_1" ],
0x6f : [ "ddiv" ],
0x18 : [ "dload", "index:B", special_F0, special_F0, None ],
0x26 : [ "dload_0" ],
0x27 : [ "dload_1" ],
0x28 : [ "dload_2" ],
0x29 : [ "dload_3" ],
0x6b : [ "dmul" ],
0x77 : [ "dneg" ],
0x73 : [ "drem" ],
0xaf : [ "dreturn" ],
0x39 : [ "dstore", "index:B", special_F0, special_F0, None ],
0x47 : [ "dstore_0" ],
0x48 : [ "dstore_1" ],
0x49 : [ "dstore_2" ],
0x4a : [ "dstore_3" ],
0x67 : [ "dsub" ],
0x59 : [ "dup" ],
0x5a : [ "dup_x1" ],
0x5b : [ "dup_x2" ],
0x5c : [ "dup2" ],
0x5d : [ "dup2_x1" ],
0x5e : [ "dup2_x2" ],
0x8d : [ "f2d" ],
0x8b : [ "f2i" ],
0x8c : [ "f2l" ],
0x62 : [ "fadd" ],
0x30 : [ "faload" ],
0x51 : [ "fastore" ],
0x96 : [ "fcmpg" ],
0x95 : [ "fcmpl" ],
0xb : [ "fconst_0" ],
0xc : [ "fconst_1" ],
0xd : [ "fconst_2" ],
0x6e : [ "fdiv" ],
0x17 : [ "fload", "index:B", special_F0, special_F0, None ],
0x22 : [ "fload_0" ],
0x23 : [ "fload_1" ],
0x24 : [ "fload_2" ],
0x25 : [ "fload_3" ],
0x6a : [ "fmul" ],
0x76 : [ "fneg" ],
0x72 : [ "frem" ],
0xae : [ "freturn" ],
0x38 : [ "fstore", "index:B", special_F0, special_F0, None ],
0x43 : [ "fstore_0" ],
0x44 : [ "fstore_1" ],
0x45 : [ "fstore_2" ],
0x46 : [ "fstore_3" ],
0x66 : [ "fsub" ],
0xb4 : [ "getfield", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field" ],
0xb2 : [ "getstatic", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field", "get_field_index" ],
0xa7 : [ "goto", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc8 : [ "goto_w", "branchbyte1:B branchbyte2:B branchbyte3:B branchbyte4:B", special_F3, special_F3R, None ],
0x91 : [ "i2b" ],
0x92 : [ "i2c" ],
0x87 : [ "i2d" ],
0x86 : [ "i2f" ],
0x85 : [ "i2l" ],
0x93 : [ "i2s" ],
0x60 : [ "iadd" ],
0x2e : [ "iaload" ],
0x7e : [ "iand" ],
0x4f : [ "iastore" ],
0x2 : [ "iconst_m1" ],
0x3 : [ "iconst_0" ],
0x4 : [ "iconst_1" ],
0x5 : [ "iconst_2" ],
0x6 : [ "iconst_3" ],
0x7 : [ "iconst_4" ],
0x8 : [ "iconst_5" ],
0x6c : [ "idiv" ],
0xa5 : [ "if_acmpeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa6 : [ "if_acmpne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9f : [ "if_icmpeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa0 : [ "if_icmpne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa1 : [ "if_icmplt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa2 : [ "if_icmpge", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa3 : [ "if_icmpgt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xa4 : [ "if_icmple", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x99 : [ "ifeq", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9a : [ "ifne", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9b : [ "iflt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9c : [ "ifge", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9d : [ "ifgt", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x9e : [ "ifle", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc7 : [ "ifnonnull", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc6 : [ "ifnull", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0x84 : [ "iinc", "index:B const:B", special_F0, special_F0, None ],
0x15 : [ "iload", "index:B", special_F0, special_F0, None ],
0x1a : [ "iload_0" ],
0x1b : [ "iload_1" ],
0x1c : [ "iload_2" ],
0x1d : [ "iload_3" ],
0x68 : [ "imul" ],
0x74 : [ "ineg" ],
0xc1 : [ "instanceof", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0xb9 : [ "invokeinterface", "indexbyte1:B indexbyte2:B count:B null:B", special_F1, special_F1R, "get_interface", "get_interface_index" ],
0xb7 : [ "invokespecial", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0xb8 : [ "invokestatic", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0xb6 : [ "invokevirtual", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_method", "get_method_index" ],
0x80 : [ "ior" ],
0x70 : [ "irem" ],
0xac : [ "ireturn" ],
0x78 : [ "ishl" ],
0x7a : [ "ishr" ],
0x36 : [ "istore", "index:B", special_F0, special_F0, None ],
0x3b : [ "istore_0" ],
0x3c : [ "istore_1" ],
0x3d : [ "istore_2" ],
0x3e : [ "istore_3" ],
0x64 : [ "isub" ],
0x7c : [ "iushr" ],
0x82 : [ "ixor" ],
0xa8 : [ "jsr", "branchbyte1:B branchbyte2:B", special_F2, special_F2R, None ],
0xc9 : [ "jsr_w", "branchbyte1:B branchbyte2:B branchbyte3:B branchbyte4:B", special_F3, special_F3R, None ],
0x8a : [ "l2d" ],
0x89 : [ "l2f" ],
0x88 : [ "l2i" ],
0x61 : [ "ladd" ],
0x2f : [ "laload" ],
0x7f : [ "land" ],
0x50 : [ "lastore" ],
0x94 : [ "lcmp" ],
0x9 : [ "lconst_0" ],
0xa : [ "lconst_1" ],
0x12 : [ "ldc", "index:B", special_F0, special_F0R, "get_value" ],
0x13 : [ "ldc_w", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0x14 : [ "ldc2_w", "indexbyte1:B indexbyte2:B", special_F2, special_F2R, None ],
0x6d : [ "ldiv" ],
0x16 : [ "lload", "index:B", special_F0, special_F0, None ],
0x1e : [ "lload_0" ],
0x1f : [ "lload_1" ],
0x20 : [ "lload_2" ],
0x21 : [ "lload_3" ],
0x69 : [ "lmul" ],
0x75 : [ "lneg" ],
0xab : [ "lookupswitch", LookupSwitch ],
0x81 : [ "lor" ],
0x71 : [ "lrem" ],
0xad : [ "lreturn" ],
0x79 : [ "lshl" ],
0x7b : [ "lshr" ],
0x37 : [ "lstore", "index:B", special_F0, special_F0, None ],
0x3f : [ "lstore_0" ],
0x40 : [ "lstore_1" ],
0x41 : [ "lstore_2" ],
0x42 : [ "lstore_3" ],
0x65 : [ "lsub" ],
0x7d : [ "lushr" ],
0x83 : [ "lxor" ],
0xc2 : [ "monitorenter" ],
0xc3 : [ "monitorexit" ],
0xc5 : [ "multianewarray", "indexbyte1:B indexbyte2:B dimensions:B", special_F4, special_F4R, None ],
0xbb : [ "new", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_class", "get_class_index2" ],
0xbc : [ "newarray", "atype:B", special_F0, special_F0, "get_array_type" ],
0x0 : [ "nop" ],
0x57 : [ "pop" ],
0x58 : [ "pop2" ],
0xb5 : [ "putfield", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field", "get_field_index" ],
0xb3 : [ "putstatic", "indexbyte1:B indexbyte2:B", special_F1, special_F1R, "get_field", "get_field_index" ],
0xa9 : [ "ret", "index:B", special_F0, special_F0, None ],
0xb1 : [ "return" ],
0x35 : [ "saload" ],
0x56 : [ "sastore" ],
0x11 : [ "sipush", "byte1:B byte2:B", special_F1, special_F1R, None ],
0x5f : [ "swap" ],
0xaa : [ "tableswitch", TableSwitch ],
0xc4 : [ "wide" ], # FIXME
}
# Invert the value and the name of the bytecode
INVERT_JAVA_OPCODES = dict([( JAVA_OPCODES[k][0], k ) for k in JAVA_OPCODES])
# List of java bytecodes which can modify the control flow
BRANCH_JVM_OPCODES = [ "goto", "goto_w", "if_acmpeq", "if_icmpeq", "if_icmpne", "if_icmplt", "if_icmpge", "if_icmpgt", "if_icmple", "ifeq", "ifne", "iflt", "ifge", "ifgt", "ifle", "ifnonnull", "ifnull", "jsr", "jsr_w" ]
BRANCH2_JVM_OPCODES = [ "goto", "goto.", "jsr", "jsr.", "if.", "return", ".return", "tableswitch", "lookupswitch" ]
MATH_JVM_OPCODES = { ".and" : '&',
".add" : '+',
".sub" : '-',
".mul" : '*',
".div" : '/',
".shl" : '<<',
".shr" : '>>',
".xor" : '^',
".or" : '|',
}
MATH_JVM_RE = []
for i in MATH_JVM_OPCODES :
MATH_JVM_RE.append( (re.compile( i ), MATH_JVM_OPCODES[i]) )
INVOKE_JVM_OPCODES = [ "invoke." ]
FIELD_READ_JVM_OPCODES = [ "get." ]
FIELD_WRITE_JVM_OPCODES = [ "put." ]
BREAK_JVM_OPCODES = [ "invoke.", "put.", ".store", "iinc", "pop", ".return", "if." ]
INTEGER_INSTRUCTIONS = [ "bipush", "sipush" ]
def EXTRACT_INFORMATION_SIMPLE(op_value) :
"""Extract information (special functions) about a bytecode"""
r_function = JAVA_OPCODES[ op_value ][2]
v_function = JAVA_OPCODES[ op_value ][3]
f_function = JAVA_OPCODES[ op_value ][4]
r_format = ">"
r_buff = []
format = JAVA_OPCODES[ op_value ][1]
l = format.split(" ")
for j in l :
operands = j.split(":")
name = operands[0] + " "
val = operands[1]
r_buff.append( name.replace(' ', '') )
r_format += val
return ( r_function, v_function, r_buff, r_format, f_function )
def EXTRACT_INFORMATION_VARIABLE(idx, op_value, raw_format) :
r_function, v_function, r_buff, r_format, f_function = JAVA_OPCODES[ op_value ][1]( idx, raw_format )
return ( r_function, v_function, r_buff, r_format, f_function )
def determineNext(i, end, m) :
#if "invoke" in i.get_name() :
# self.childs.append( self.end, -1, ExternalMethod( i.get_operands()[0], i.get_operands()[1], i.get_operands()[2] ) )
# self.childs.append( self.end, self.end, self.__context.get_basic_block( self.end + 1 ) )
if "return" in i.get_name() :
return [ -1 ]
elif "goto" in i.get_name() :
return [ i.get_operands() + end ]
elif "jsr" in i.get_name() :
return [ i.get_operands() + end ]
elif "if" in i.get_name() :
return [ end + i.get_length(), i.get_operands() + end ]
elif "tableswitch" in i.get_name() :
x = []
x.append( i.get_operands().default + end )
for idx in range(0, (i.get_operands().high - i.get_operands().low) + 1) :
off = getattr(i.get_operands(), "offset%d" % idx)
x.append( off + end )
return x
elif "lookupswitch" in i.get_name() :
x = []
x.append( i.get_operands().default + end )
for idx in range(0, i.get_operands().npairs) :
off = getattr(i.get_operands(), "offset%d" % idx)
x.append( off + end )
return x
return []
def determineException(vm, m) :
return []
def classToJclass(x) :
return "L%s;" % x
METHOD_INFO = [ '>HHHH', namedtuple("MethodInfo", "access_flags name_index descriptor_index attributes_count") ]
ATTRIBUTE_INFO = [ '>HL', namedtuple("AttributeInfo", "attribute_name_index attribute_length") ]
FIELD_INFO = [ '>HHHH', namedtuple("FieldInfo", "access_flags name_index descriptor_index attributes_count") ]
LINE_NUMBER_TABLE = [ '>HH', namedtuple("LineNumberTable", "start_pc line_number") ]
EXCEPTION_TABLE = [ '>HHHH', namedtuple("ExceptionTable", "start_pc end_pc handler_pc catch_type") ]
LOCAL_VARIABLE_TABLE = [ '>HHHHH', namedtuple("LocalVariableTable", "start_pc length name_index descriptor_index index") ]
LOCAL_VARIABLE_TYPE_TABLE = [ '>HHHHH', namedtuple("LocalVariableTypeTable", "start_pc length name_index signature_index index") ]
CODE_LOW_STRUCT = [ '>HHL', namedtuple( "LOW", "max_stack max_locals code_length" ) ]
ARRAY_TYPE = {
4 : "T_BOOLEAN",
5 : "T_CHAR",
6 : "T_FLOAT",
7 : "T_DOUBLE",
8 : "T_BYTE",
9 : "T_SHORT",
10 : "T_INT",
11 : "T_LONG",
}
INVERT_ARRAY_TYPE = dict([( ARRAY_TYPE[k][0], k ) for k in ARRAY_TYPE])
ACC_CLASS_FLAGS = {
0x0001 : [ "ACC_PUBLIC", "Declared public; may be accessed from outside its package." ],
0x0010 : [ "ACC_FINAL", "Declared final; no subclasses allowed." ],
0x0020 : [ "ACC_SUPER", "Treat superclass methods specially when invoked by the invokespecial instruction." ],
0x0200 : [ "ACC_INTERFACE", "Is an interface, not a class." ],
0x0400 : [ "ACC_ABSTRACT", "Declared abstract; may not be instantiated." ],
}
INVERT_ACC_CLASS_FLAGS = dict([( ACC_CLASS_FLAGS[k][0], k ) for k in ACC_CLASS_FLAGS])
ACC_FIELD_FLAGS = {
0x0001 : [ "ACC_PUBLIC", "Declared public; may be accessed from outside its package." ],
0x0002 : [ "ACC_PRIVATE", "Declared private; usable only within the defining class." ],
0x0004 : [ "ACC_PROTECTED", "Declared protected; may be accessed within subclasses." ],
0x0008 : [ "ACC_STATIC", "Declared static." ],
0x0010 : [ "ACC_FINAL", "Declared final; no further assignment after initialization." ],
0x0040 : [ "ACC_VOLATILE", "Declared volatile; cannot be cached." ],
0x0080 : [ "ACC_TRANSIENT", "Declared transient; not written or read by a persistent object manager." ],
}
INVERT_ACC_FIELD_FLAGS = dict([( ACC_FIELD_FLAGS[k][0], k ) for k in ACC_FIELD_FLAGS])
ACC_METHOD_FLAGS = {
0x0001 : [ "ACC_PUBLIC", "Declared public; may be accessed from outside its package." ],
0x0002 : [ "ACC_PRIVATE", "Declared private; accessible only within the defining class." ],
0x0004 : [ "ACC_PROTECTED", "Declared protected; may be accessed within subclasses." ],
0x0008 : [ "ACC_STATIC", "Declared static." ],
0x0010 : [ "ACC_FINAL", "Declared final; may not be overridden." ],
0x0020 : [ "ACC_SYNCHRONIZED", "Declared synchronized; invocation is wrapped in a monitor lock." ],
0x0100 : [ "ACC_NATIVE", "Declared native; implemented in a language other than Java." ],
0x0400 : [ "ACC_ABSTRACT", "Declared abstract; no implementation is provided." ],
0x0800 : [ "ACC_STRICT", "Declared strictfp; floating-point mode is FP-strict" ]
}
INVERT_ACC_METHOD_FLAGS = dict([( ACC_METHOD_FLAGS[k][0], k ) for k in ACC_METHOD_FLAGS])
class CpInfo(object) :
"""Generic class to manage constant info object"""
def __init__(self, buff) :
self.__tag = SV( '>B', buff.read_b(1) )
self.__bytes = None
self.__extra = 0
tag_value = self.__tag.get_value()
format = CONSTANT_INFO[ tag_value ][1]
self.__name = CONSTANT_INFO[ tag_value ][0]
self.format = SVs( format, CONSTANT_INFO[ tag_value ][2], buff.read( calcsize( format ) ) )
# Utf8 value ?
if tag_value == 1 :
self.__extra = self.format.get_value().length
self.__bytes = SVs( ">%ss" % self.format.get_value().length, namedtuple( CONSTANT_INFO[ tag_value ][0] + "_next", "bytes" ), buff.read( self.format.get_value().length ) )
def get_format(self) :
return self.format
def get_name(self) :
return self.__name
def get_bytes(self) :
return self.__bytes.get_value().bytes
def set_bytes(self, name) :
self.format.set_value( { "length" : len(name) } )
self.__extra = self.format.get_value().length
self.__bytes = SVs( ">%ss" % self.format.get_value().length, namedtuple( CONSTANT_INFO[ self.__tag.get_value() ][0] + "_next", "bytes" ), name )
def get_length(self) :
return self.__extra + calcsize( CONSTANT_INFO[ self.__tag.get_value() ][1] )
def get_raw(self) :
if self.__bytes != None :
return self.format.get_value_buff() + self.__bytes.get_value_buff()
return self.format.get_value_buff()
def show(self) :
if self.__bytes != None :
print self.format.get_value(), self.__bytes.get_value()
else :
print self.format.get_value()
class MethodRef(CpInfo) :
def __init__(self, class_manager, buff) :
super(MethodRef, self).__init__( buff )
def get_class_index(self) :
return self.format.get_value().class_index
def get_name_and_type_index(self) :
return self.format.get_value().name_and_type_index
class InterfaceMethodRef(CpInfo) :
def __init__(self, class_manager, buff) :
super(InterfaceMethodRef, self).__init__( buff )
def get_class_index(self) :
return self.format.get_value().class_index
def get_name_and_type_index(self) :
return self.format.get_value().name_and_type_index
class FieldRef(CpInfo) :
def __init__(self, class_manager, buff) :
super(FieldRef, self).__init__( buff )
def get_class_index(self) :
return self.format.get_value().class_index
def get_name_and_type_index(self) :
return self.format.get_value().name_and_type_index
class Class(CpInfo) :
def __init__(self, class_manager, buff) :
super(Class, self).__init__( buff )
def get_name_index(self) :
return self.format.get_value().name_index
class Utf8(CpInfo) :
def __init__(self, class_manager, buff) :
super(Utf8, self).__init__( buff )
class String(CpInfo) :
def __init__(self, class_manager, buff) :
super(String, self).__init__( buff )
class Integer(CpInfo) :
def __init__(self, class_manager, buff) :
super(Integer, self).__init__( buff )
class Float(CpInfo) :
def __init__(self, class_manager, buff) :
super(Float, self).__init__( buff )
class Long(CpInfo) :
def __init__(self, class_manager, buff) :
super(Long, self).__init__( buff )
class Double(CpInfo) :
def __init__(self, class_manager, buff) :
super(Double, self).__init__( buff )
class NameAndType(CpInfo) :
def __init__(self, class_manager, buff) :
super(NameAndType, self).__init__( buff )
def get_get_name_index(self) :
return self.format.get_value().get_name_index
def get_name_index(self) :
return self.format.get_value().name_index
def get_descriptor_index(self) :
return self.format.get_value().descriptor_index
class EmptyConstant :
def __init__(self) :
pass
def get_name(self) :
return ""
def get_raw(self) :
return ""
def get_length(self) :
return 0
def show(self) :
pass
CONSTANT_INFO = {
7 : [ "CONSTANT_Class", '>BH', namedtuple( "CONSTANT_Class_info", "tag name_index" ), Class ],
9 : [ "CONSTANT_Fieldref", '>BHH', namedtuple( "CONSTANT_Fieldref_info", "tag class_index name_and_type_index" ), FieldRef ],
10 : [ "CONSTANT_Methodref", '>BHH', namedtuple( "CONSTANT_Methodref_info", "tag class_index name_and_type_index" ), MethodRef ],
11 : [ "CONSTANT_InterfaceMethodref", '>BHH', namedtuple( "CONSTANT_InterfaceMethodref_info", "tag class_index name_and_type_index" ), InterfaceMethodRef ],
8 : [ "CONSTANT_String", '>BH', namedtuple( "CONSTANT_String_info", "tag string_index" ), String ],
3 : [ "CONSTANT_Integer", '>BL', namedtuple( "CONSTANT_Integer_info", "tag bytes" ), Integer ],
4 : [ "CONSTANT_Float", '>BL', namedtuple( "CONSTANT_Float_info", "tag bytes" ), Float ],
5 : [ "CONSTANT_Long", '>BLL', namedtuple( "CONSTANT_Long_info", "tag high_bytes low_bytes" ), Long ],
6 : [ "CONSTANT_Double", '>BLL', namedtuple( "CONSTANT_Long_info", "tag high_bytes low_bytes" ), Double ],
12 : [ "CONSTANT_NameAndType", '>BHH', namedtuple( "CONSTANT_NameAndType_info", "tag name_index descriptor_index" ), NameAndType ],
1 : [ "CONSTANT_Utf8", '>BH', namedtuple( "CONSTANT_Utf8_info", "tag length" ), Utf8 ]
}
INVERT_CONSTANT_INFO = dict([( CONSTANT_INFO[k][0], k ) for k in CONSTANT_INFO])
ITEM_Top = 0
ITEM_Integer = 1
ITEM_Float = 2
ITEM_Long = 4
ITEM_Double = 3
ITEM_Null = 5
ITEM_UninitializedThis = 6
ITEM_Object = 7
ITEM_Uninitialized = 8
VERIFICATION_TYPE_INFO = {
ITEM_Top : [ "Top_variable_info", '>B', namedtuple( "Top_variable_info", "tag" ) ],
ITEM_Integer : [ "Integer_variable_info", '>B', namedtuple( "Integer_variable_info", "tag" ) ],
ITEM_Float : [ "Float_variable_info", '>B', namedtuple( "Float_variable_info", "tag" ) ],
ITEM_Long : [ "Long_variable_info", '>B', namedtuple( "Long_variable_info", "tag" ) ],
ITEM_Double : [ "Double_variable_info", '>B', namedtuple( "Double_variable_info", "tag" ) ],
ITEM_Null : [ "Null_variable_info", '>B', namedtuple( "Null_variable_info", "tag" ) ],
ITEM_UninitializedThis : [ "UninitializedThis_variable_info", '>B', namedtuple( "UninitializedThis_variable_info", "tag" ) ],
ITEM_Object : [ "Object_variable_info", '>BH', namedtuple( "Object_variable_info", "tag cpool_index" ), [ ("cpool_index", "get_class") ] ],
ITEM_Uninitialized : [ "Uninitialized_variable_info", '>BH', namedtuple( "Uninitialized_variable_info", "tag offset" ) ],
}
class FieldInfo :
"""An object which represents a Field"""
def __init__(self, class_manager, buff) :
self.__raw_buff = buff.read( calcsize( FIELD_INFO[0] ) )
self.format = SVs( FIELD_INFO[0], FIELD_INFO[1], self.__raw_buff )
self.__CM = class_manager
self.__attributes = []
for i in range(0, self.format.get_value().attributes_count) :
ai = AttributeInfo( self.__CM, buff )
self.__attributes.append( ai )
def get_raw(self) :
return self.__raw_buff + ''.join(x.get_raw() for x in self.__attributes)
def get_length(self) :
val = 0
for i in self.__attributes :
val += i.length
return val + calcsize( FIELD_INFO[0] )
def get_access(self) :
try :
return ACC_FIELD_FLAGS[ self.format.get_value().access_flags ][0]
except KeyError :
ok = True
access = ""
for i in ACC_FIELD_FLAGS :
if (i & self.format.get_value().access_flags) == i :
access += ACC_FIELD_FLAGS[ i ][0] + " "
ok = False
if ok == False :
return access[:-1]
return "ACC_PRIVATE"
def set_access(self, value) :
self.format.set_value( { "access_flags" : value } )
def get_class_name(self) :
return self.__CM.get_this_class_name()
def get_name(self) :
return self.__CM.get_string( self.format.get_value().name_index )
def set_name(self, name) :
return self.__CM.set_string( self.format.get_value().name_index, name )
def get_descriptor(self) :
return self.__CM.get_string( self.format.get_value().descriptor_index )
def set_descriptor(self, name) :
return self.__CM.set_string( self.format.get_value().descriptor_index, name )
def get_attributes(self) :
return self.__attributes
def get_name_index(self) :
return self.format.get_value().name_index
def get_descriptor_index(self) :
return self.format.get_value().descriptor_index
def show(self) :
print self.format.get_value(), self.get_name(), self.get_descriptor()
for i in self.__attributes :
i.show()
class MethodInfo :
"""An object which represents a Method"""
def __init__(self, class_manager, buff) :
self.format = SVs( METHOD_INFO[0], METHOD_INFO[1], buff.read( calcsize( METHOD_INFO[0] ) ) )
self.__CM = class_manager
self.__code = None
self.__attributes = []
for i in range(0, self.format.get_value().attributes_count) :
ai = AttributeInfo( self.__CM, buff )
self.__attributes.append( ai )
if ai.get_name() == "Code" :
self.__code = ai
def get_raw(self) :
return self.format.get_value_buff() + ''.join(x.get_raw() for x in self.__attributes)
def get_length(self) :
val = 0
for i in self.__attributes :
val += i.length
return val + calcsize( METHOD_INFO[0] )
def get_attributes(self) :
return self.__attributes
def get_access(self) :
return ACC_METHOD_FLAGS[ self.format.get_value().access_flags ][0]
def set_access(self, value) :
self.format.set_value( { "access_flags" : value } )
def get_name(self) :
return self.__CM.get_string( self.format.get_value().name_index )
def set_name(self, name) :
return self.__CM.set_string( self.format.get_value().name_index, name )
def get_descriptor(self) :
return self.__CM.get_string( self.format.get_value().descriptor_index )
def set_descriptor(self, name) :
return self.__CM.set_string( self.format.get_value().name_descriptor, name )
def get_name_index(self) :
return self.format.get_value().name_index
def get_descriptor_index(self) :
return self.format.get_value().descriptor_index
def get_local_variables(self) :
return self.get_code().get_local_variables()
def get_code(self) :
if self.__code == None :
return None
return self.__code.get_item()
def set_name_index(self, name_index) :
self.format.set_value( { "name_index" : name_index } )
def set_descriptor_index(self, descriptor_index) :
self.format.set_value( { "descriptor_index" : descriptor_index } )
def get_class_name(self) :
return self.__CM.get_this_class_name()
def set_cm(self, cm) :
self.__CM = cm
for i in self.__attributes :
i.set_cm( cm )
def with_descriptor(self, descriptor) :
return descriptor == self.__CM.get_string( self.format.get_value().descriptor_index )
def _patch_bytecodes(self) :
return self.get_code()._patch_bytecodes()
def show(self) :
print "*" * 80
print self.format.get_value(), self.get_class_name(), self.get_name(), self.get_descriptor()
for i in self.__attributes :
i.show()
print "*" * 80
def pretty_show(self, vm_a) :
print "*" * 80
print self.format.get_value(), self.get_class_name(), self.get_name(), self.get_descriptor()
for i in self.__attributes :
i.pretty_show(vm_a.hmethods[ self ])
print "*" * 80
class CreateString :
"""Create a specific String constant by given the name index"""
def __init__(self, class_manager, bytes) :
self.__string_index = class_manager.add_string( bytes )
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_String" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__string_index )
return buff
class CreateInteger :
"""Create a specific Integer constant by given the name index"""
def __init__(self, byte) :
self.__byte = byte
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Integer" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__byte )
return buff
class CreateClass :
"""Create a specific Class constant by given the name index"""
def __init__(self, class_manager, name_index) :
self.__CM = class_manager
self.__name_index = name_index
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Class" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__name_index )
return buff
class CreateNameAndType :
"""Create a specific NameAndType constant by given the name and the descriptor index"""
def __init__(self, class_manager, name_index, descriptor_index) :
self.__CM = class_manager
self.__name_index = name_index
self.__descriptor_index = descriptor_index
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_NameAndType" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__name_index, self.__descriptor_index )
return buff
class CreateFieldRef :
"""Create a specific FieldRef constant by given the class and the NameAndType index"""
def __init__(self, class_manager, class_index, name_and_type_index) :
self.__CM = class_manager
self.__class_index = class_index
self.__name_and_type_index = name_and_type_index
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Fieldref" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__class_index, self.__name_and_type_index )
return buff
class CreateMethodRef :
"""Create a specific MethodRef constant by given the class and the NameAndType index"""
def __init__(self, class_manager, class_index, name_and_type_index) :
self.__CM = class_manager
self.__class_index = class_index
self.__name_and_type_index = name_and_type_index
def get_raw(self) :
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Methodref" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, self.__class_index, self.__name_and_type_index )
return buff
class CreateCodeAttributeInfo :
"""Create a specific CodeAttributeInfo by given bytecodes (into an human readable format)"""
def __init__(self, class_manager, codes) :
self.__CM = class_manager
#ATTRIBUTE_INFO = [ '>HL', namedtuple("AttributeInfo", "attribute_name_index attribute_length") ]
self.__attribute_name_index = self.__CM.get_string_index( "Code" )
self.__attribute_length = 0
########
# CODE_LOW_STRUCT = [ '>HHL', namedtuple( "LOW", "max_stack max_locals code_length" ) ]
self.__max_stack = 1
self.__max_locals = 2
self.__code_length = 0
########
# CODE
raw_buff = ""
for i in codes :
op_name = i[0]
op_value = INVERT_JAVA_OPCODES[ op_name ]
raw_buff += pack( '>B', op_value )
if len( JAVA_OPCODES[ op_value ] ) > 1 :
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
raw_buff += pack(r_format, *v_function( *i[1:] ) )
self.__code = JavaCode( self.__CM, raw_buff )
self.__code_length = len( raw_buff )
########
# EXCEPTION
# u2 exception_table_length;
self.__exception_table_length = 0
# { u2 start_pc;
# u2 end_pc;
# u2 handler_pc;
# u2 catch_type;
# } exception_table[exception_table_length];
self.__exception_table = []
########
# ATTRIBUTES
# u2 attributes_count;
self.__attributes_count = 0
# attribute_info attributes[attributes_count];
self.__attributes = []
########
# FIXME : remove calcsize
self.__attribute_length = calcsize( ATTRIBUTE_INFO[0] ) + \
calcsize( CODE_LOW_STRUCT[0] ) + \
self.__code_length + \
calcsize('>H') + \
calcsize('>H')
def get_raw(self) :
return pack( ATTRIBUTE_INFO[0], self.__attribute_name_index, self.__attribute_length ) + \
pack( CODE_LOW_STRUCT[0], self.__max_stack, self.__max_locals, self.__code_length ) + \
self.__code.get_raw() + \
pack( '>H', self.__exception_table_length ) + \
''.join( i.get_raw() for i in self.__exception_table ) + \
pack( '>H', self.__attributes_count ) + \
''.join( i.get_raw() for i in self.__attributes )
# FIELD_INFO = [ '>HHHH', namedtuple("FieldInfo", "access_flags name_index descriptor_index attributes_count") ]
class CreateFieldInfo :
"""Create a specific FieldInfo by given the name, the prototype of the "new" field"""
def __init__(self, class_manager, name, proto) :
self.__CM = class_manager
access_flags_value = proto[0]
type_value = proto[1]
self.__access_flags = INVERT_ACC_FIELD_FLAGS[ access_flags_value ]
self.__name_index = self.__CM.get_string_index( name )
if self.__name_index == -1 :
self.__name_index = self.__CM.add_string( name )
else :
bytecode.Exit("field %s is already present ...." % name)
self.__descriptor_index = self.__CM.add_string( type_value )
self.__attributes = []
def get_raw(self) :
buff = pack( FIELD_INFO[0], self.__access_flags, self.__name_index, self.__descriptor_index, len(self.__attributes) )
for i in self.__attributes :
buff += i.get_raw()
return buff
# METHOD_INFO = [ '>HHHH', namedtuple("MethodInfo", "access_flags name_index descriptor_index attributes_count") ]
class CreateMethodInfo :
"""Create a specific MethodInfo by given the name, the prototype and the code (into an human readable format) of the "new" method"""
def __init__(self, class_manager, name, proto, codes) :
self.__CM = class_manager
access_flags_value = proto[0]
return_value = proto[1]
arguments_value = proto[2]
self.__access_flags = INVERT_ACC_METHOD_FLAGS[ access_flags_value ]
self.__name_index = self.__CM.get_string_index( name )
if self.__name_index == -1 :
self.__name_index = self.__CM.add_string( name )
proto_final = "(" + arguments_value + ")" + return_value
self.__descriptor_index = self.__CM.add_string( proto_final )
self.__attributes = []
self.__attributes.append( CreateCodeAttributeInfo( self.__CM, codes ) )
def get_raw(self) :
buff = pack( METHOD_INFO[0], self.__access_flags, self.__name_index, self.__descriptor_index, len(self.__attributes) )
for i in self.__attributes :
buff += i.get_raw()
return buff
class JBC :
"""JBC manages each bytecode with the value, name, raw buffer and special functions"""
# special --> ( r_function, v_function, r_buff, r_format, f_function )
def __init__(self, class_manager, op_name, raw_buff, special=None) :
self.__CM = class_manager
self.__op_name = op_name
self.__raw_buff = raw_buff
self.__special = special
self.__special_value = None
self._load()
def _load(self) :
if self.__special != None :
ntuple = namedtuple( self.__op_name, self.__special[2] )
x = ntuple._make( unpack( self.__special[3], self.__raw_buff[1:] ) )
if self.__special[4] == None :
self.__special_value = self.__special[0]( x )
else :
self.__special_value = getattr(self.__CM, self.__special[4])( self.__special[0]( x ) )
def reload(self, raw_buff) :
"""Reload the bytecode with a new raw buffer"""
self.__raw_buff = raw_buff
self._load()
def set_cm(self, cm) :
self.__CM = cm
def get_length(self) :
"""Return the length of the bytecode"""
return len( self.__raw_buff )
def get_raw(self) :
"""Return the current raw buffer of the bytecode"""
return self.__raw_buff
def get_name(self) :
"""Return the name of the bytecode"""
return self.__op_name
def get_operands(self) :
"""Return the operands of the bytecode"""
if isinstance( self.__special_value, list ):
if len(self.__special_value) == 1 :
return self.__special_value[0]
return self.__special_value
def get_formatted_operands(self) :
return []
def adjust_r(self, pos, pos_modif, len_modif) :
"""Adjust the bytecode (if necessary (in this cas the bytecode is a branch bytecode)) when a bytecode has been removed"""
# print self.__op_name, pos, pos_modif, len_modif, self.__special_value, type(pos), type(pos_modif), type(len_modif), type(self.__special_value)
if pos > pos_modif :
if (self.__special_value + pos) < (pos_modif) :
# print "MODIF +", self.__special_value, len_modif,
self.__special_value += len_modif
# print self.__special_value
self.__raw_buff = pack( '>B', INVERT_JAVA_OPCODES[ self.__op_name ] ) + pack(self.__special[3], *self.__special[1]( self.__special_value ) )
elif pos < pos_modif :
if (self.__special_value + pos) > (pos_modif) :
# print "MODIF -", self.__special_value, len_modif,
self.__special_value -= len_modif
# print self.__special_value
self.__raw_buff = pack( '>B', INVERT_JAVA_OPCODES[ self.__op_name ] ) + pack(self.__special[3], *self.__special[1]( self.__special_value ) )
def adjust_i(self, pos, pos_modif, len_modif) :
"""Adjust the bytecode (if necessary (in this cas the bytecode is a branch bytecode)) when a bytecode has been inserted"""
#print self.__op_name, pos, pos_modif, len_modif, self.__special_value, type(pos), type(pos_modif), type(len_modif), type(self.__special_value)
if pos > pos_modif :
if (self.__special_value + pos) < (pos_modif) :
# print "MODIF +", self.__special_value, len_modif,
self.__special_value -= len_modif
# print self.__special_value
self.__raw_buff = pack( '>B', INVERT_JAVA_OPCODES[ self.__op_name ] ) + pack(self.__special[3], *self.__special[1]( self.__special_value ) )
elif pos < pos_modif :
if (self.__special_value + pos) > (pos_modif) :
# print "MODIF -", self.__special_value, len_modif,
self.__special_value += len_modif
# print self.__special_value
self.__raw_buff = pack( '>B', INVERT_JAVA_OPCODES[ self.__op_name ] ) + pack(self.__special[3], *self.__special[1]( self.__special_value ) )
def show_buff(self, pos) :
buff = ""
if self.__special_value == None :
buff += self.__op_name
else :
if self.__op_name in BRANCH_JVM_OPCODES :
buff += "%s %s %s" % (self.__op_name, self.__special_value, self.__special_value + pos)
else :
buff += "%s %s" % (self.__op_name, self.__special_value)
return buff
def show(self, pos) :
"""Show the bytecode at a specific position
pos - the position into the bytecodes (integer)
"""
print self.show_buff( pos ),
class JavaCode :
"""JavaCode manages a list of bytecode to a specific method, by decoding a raw buffer and transform each bytecode into a JBC object"""
def __init__(self, class_manager, buff) :
self.__CM = class_manager
self.__raw_buff = buff
self.__bytecodes = []
self.__maps = []
self.__branches = []
i = 0
while i < len(self.__raw_buff) :
op_value = unpack( '>B', self.__raw_buff[i])[0]
if op_value in JAVA_OPCODES :
if len( JAVA_OPCODES[ op_value ] ) >= 2 :
# it's a fixed length opcode
if isinstance(JAVA_OPCODES[ op_value ][1], str) == True :
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
# it's a variable length opcode
else :
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_VARIABLE( i, op_value, self.__raw_buff[ i : ] )
len_format = calcsize(r_format)
raw_buff = self.__raw_buff[ i : i + 1 + len_format ]
jbc = JBC( class_manager, JAVA_OPCODES[ op_value ][0], raw_buff, ( r_function, v_function, r_buff, r_format, f_function ) )
self.__bytecodes.append( jbc )
i += len_format
else :
self.__bytecodes.append( JBC( class_manager, JAVA_OPCODES[ op_value ][0], self.__raw_buff[ i ] ) )
else :
bytecode.Exit( "op_value 0x%x is unknown" % op_value )
i += 1
# Create branch bytecodes list
idx = 0
nb = 0
for i in self.__bytecodes :
self.__maps.append( idx )
if i.get_name() in BRANCH_JVM_OPCODES :
self.__branches.append( nb )
idx += i.get_length()
nb += 1
def _patch_bytecodes(self) :
methods = []
for i in self.__bytecodes :
if "invoke" in i.get_name() :
operands = i.get_operands()
methods.append( operands )
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands[0] )
new_name_and_type_index = self.__CM.create_name_and_type( operands[1], operands[2] )
self.__CM.create_method_ref( new_class_index, new_name_and_type_index )
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *operands[0:] )
if value == -1 :
bytecode.Exit( "Unable to found method " + str(operands) )
raw_buff += pack(r_format, *v_function( value ) )
i.reload( raw_buff )
elif "anewarray" in i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands )
raw_buff += pack(r_format, *v_function( new_class_index ) )
i.reload( raw_buff )
elif "getstatic" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands[0] )
new_name_and_type_index = self.__CM.create_name_and_type( operands[1], operands[2] )
self.__CM.create_field_ref( new_class_index, new_name_and_type_index )
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *operands[1:] )
if value == -1 :
bytecode.Exit( "Unable to found method " + str(operands) )
raw_buff += pack(r_format, *v_function( value ) )
i.reload( raw_buff )
elif "ldc" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
if operands[0] != "CONSTANT_Integer" and operands[0] != "CONSTANT_String" :
bytecode.Exit( "...." )
if operands[0] == "CONSTANT_Integer" :
new_int_index = self.__CM.create_integer( operands[1] )
raw_buff += pack(r_format, *v_function( new_int_index ) )
elif operands[0] == "CONSTANT_String" :
new_string_index = self.__CM.create_string( operands[1] )
raw_buff += pack(r_format, *v_function( new_string_index ) )
i.reload( raw_buff )
elif "new" == i.get_name() :
operands = i.get_operands()
op_value = INVERT_JAVA_OPCODES[ i.get_name() ]
raw_buff = pack( '>B', op_value )
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
new_class_index = self.__CM.create_class( operands )
raw_buff += pack(r_format, *v_function( new_class_index ) )
i.reload( raw_buff )
return methods
def get(self) :
"""
Return all bytecodes
@rtype : L{list}
"""
return self.__bytecodes
def get_raw(self) :
return ''.join(x.get_raw() for x in self.__bytecodes)
def show(self) :
"""
Display the code like a disassembler
"""
nb = 0
for i in self.__bytecodes :
print nb, self.__maps[nb],
i.show( self.__maps[nb] )
print
nb += 1
def pretty_show(self, m_a) :
"""
Display the code like a disassembler but with instructions' links
"""
bytecode.PrettyShow( m_a.basic_blocks.gets() )
bytecode.PrettyShowEx( m_a.exceptions.gets() )
def get_relative_idx(self, idx) :
"""
Return the relative idx by given an offset in the code
@param idx : an offset in the code
@rtype : the relative index in the code, it's the position in the list of a bytecode
"""
n = 0
x = 0
for i in self.__bytecodes :
#print n, idx
if n == idx :
return x
n += i.get_length()
x += 1
return -1
def get_at(self, idx) :
"""
Return a specific bytecode at an index
@param : the index of a bytecode
@rtype : L{JBC}
"""
return self.__bytecodes[ idx ]
def remove_at(self, idx) :
"""
Remove bytecode at a specific index
@param idx : the index to remove the bytecode
@rtype : the length of the removed bytecode
"""
val = self.__bytecodes[idx]
val_m = self.__maps[idx]
# Remove the index if it's in our branch list
if idx in self.__branches :
self.__branches.remove( idx )
# Adjust each branch
for i in self.__branches :
self.__bytecodes[i].adjust_r( self.__maps[i], val_m, val.get_length() )
# Remove it !
self.__maps.pop(idx)
self.__bytecodes.pop(idx)
# Adjust branch and map list
self._adjust_maps( val_m, val.get_length() * -1 )
self._adjust_branches( idx, -1 )
return val.get_length()
def _adjust_maps(self, val, size) :
nb = 0
for i in self.__maps :
if i > val :
self.__maps[ nb ] = i + size
nb = nb + 1
def _adjust_maps_i(self, val, size) :
nb = 0
x = 0
for i in self.__maps :
if i == val :
x+=1
if x == 2 :
self.__maps[ nb ] = i + size
if i > val :
self.__maps[ nb ] = i + size
nb = nb + 1
def _adjust_branches(self, val, size) :
nb = 0
for i in self.__branches :
if i > val :
self.__branches[ nb ] = i + size
nb += 1
def insert_at(self, idx, byte_code) :
"""
Insert bytecode at a specific index
@param idx : the index to insert the bytecode
@param bytecode : a list which represent the bytecode
@rtype : the length of the inserted bytecode
"""
# Get the op_value and add it to the raw_buff
op_name = byte_code[0]
op_value = INVERT_JAVA_OPCODES[ op_name ]
raw_buff = pack( '>B', op_value )
new_jbc = None
# If it's an op_value with args, we must handle that !
if len( JAVA_OPCODES[ op_value ] ) > 1 :
# Find information about the op_value
r_function, v_function, r_buff, r_format, f_function = EXTRACT_INFORMATION_SIMPLE( op_value )
# Special values for this op_value (advanced bytecode)
if len( JAVA_OPCODES[ op_value ] ) == 6 :
value = getattr( self.__CM, JAVA_OPCODES[ op_value ][5] )( *byte_code[1:] )
if value == -1 :
bytecode.Exit( "Unable to found " + str(byte_code[1:]) )
raw_buff += pack(r_format, *v_function( value ) )
else :
raw_buff += pack(r_format, *v_function( *byte_code[1:] ) )
new_jbc = JBC(self.__CM, op_name, raw_buff, ( r_function, v_function, r_buff, r_format, f_function ) )
else :
new_jbc = JBC(self.__CM, op_name, raw_buff)
# Adjust each branch with the new insertion
val_m = self.__maps[ idx ]
for i in self.__branches :
self.__bytecodes[i].adjust_i( self.__maps[i], val_m, new_jbc.get_length() )
# Insert the new bytecode at the correct index
# Adjust maps + branches
self.__bytecodes.insert( idx, new_jbc )
self.__maps.insert( idx, val_m )
self._adjust_maps_i( val_m, new_jbc.get_length() )
self._adjust_branches( idx, 1 )
# Add it to the branches if it's a correct op_value
if new_jbc.get_name() in BRANCH_JVM_OPCODES :
self.__branches.append( idx )
# FIXME
# modify the exception table
# modify tableswitch and lookupswitch instructions
# return the length of the raw_buff
return len(raw_buff)
def remplace_at(self, idx, bytecode) :
"""
Remplace bytecode at a specific index by another bytecode (remplace = remove + insert)
@param idx : the index to insert the bytecode
@param bytecode : a list which represent the bytecode
@rtype : the length of the inserted bytecode
"""
self.remove_at(idx) * (-1)
size = self.insert_at(idx, bytecode)
return size
def set_cm(self, cm) :
self.__CM = cm
for i in self.__bytecodes :
i.set_cm( cm )
class BasicAttribute(object) :
def __init__(self) :
self.__attributes = []
def get_attributes(self) :
return self.__attributes
def set_cm(self, cm) :
self.__CM = cm
class CodeAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(CodeAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 max_stack;
# u2 max_locals;
# u4 code_length;
# u1 code[code_length];
self.low_struct = SVs( CODE_LOW_STRUCT[0], CODE_LOW_STRUCT[1], buff.read( calcsize(CODE_LOW_STRUCT[0]) ) )
self.__code = JavaCode( class_manager, buff.read( self.low_struct.get_value().code_length ) )
# u2 exception_table_length;
self.exception_table_length = SV( '>H', buff.read(2) )
# { u2 start_pc;
# u2 end_pc;
# u2 handler_pc;
# u2 catch_type;
# } exception_table[exception_table_length];
self.__exception_table = []
for i in range(0, self.exception_table_length.get_value()) :
et = SVs( EXCEPTION_TABLE[0], EXCEPTION_TABLE[1], buff.read( calcsize(EXCEPTION_TABLE[0]) ) )
self.__exception_table.append( et )
# u2 attributes_count;
self.attributes_count = SV( '>H', buff.read(2) )
# attribute_info attributes[attributes_count];
self.__attributes = []
for i in range(0, self.attributes_count.get_value()) :
ai = AttributeInfo( self.__CM, buff )
self.__attributes.append( ai )
def get_attributes(self) :
return self.__attributes
def get_exceptions(self) :
return self.__exception_table
def get_raw(self) :
return self.low_struct.get_value_buff() + \
self.__code.get_raw() + \
self.exception_table_length.get_value_buff() + \
''.join(x.get_value_buff() for x in self.__exception_table) + \
self.attributes_count.get_value_buff() + \
''.join(x.get_raw() for x in self.__attributes)
def get_length(self) :
return self.low_struct.get_value().code_length
def get_max_stack(self) :
return self.low_struct.get_value().max_stack
def get_max_locals(self) :
return self.low_struct.get_value().max_locals
def get_local_variables(self) :
for i in self.__attributes :
if i.get_name() == "StackMapTable" :
return i.get_item().get_local_variables()
return []
def get_bc(self) :
return self.__code
# FIXME : show* --> add exceptions
def show_info(self) :
print "!" * 70
print self.low_struct.get_value()
bytecode._Print( "ATTRIBUTES_COUNT", self.attributes_count.get_value() )
for i in self.__attributes :
i.show()
print "!" * 70
def _begin_show(self) :
print "!" * 70
print self.low_struct.get_value()
def _end_show(self) :
bytecode._Print( "ATTRIBUTES_COUNT", self.attributes_count.get_value() )
for i in self.__attributes :
i.show()
print "!" * 70
def show(self) :
self._begin_show()
self.__code.show()
self._end_show()
def pretty_show(self, m_a) :
self._begin_show()
self.__code.pretty_show(m_a)
self._end_show()
def _patch_bytecodes(self) :
return self.__code._patch_bytecodes()
def remplace_at(self, idx, bytecode) :
size = self.__code.remplace_at(idx, bytecode)
# Adjust the length of our bytecode
self.low_struct.set_value( { "code_length" : self.low_struct.get_value().code_length + size } )
def remove_at(self, idx) :
size = self.__code.remove_at(idx)
# Adjust the length of our bytecode
self.low_struct.set_value( { "code_length" : self.low_struct.get_value().code_length - size } )
def removes_at(self, l_idx) :
i = 0
while i < len(l_idx) :
self.remove_at( l_idx[i] )
j = i + 1
while j < len(l_idx) :
if l_idx[j] > l_idx[i] :
l_idx[j] -= 1
j += 1
i += 1
def inserts_at(self, idx, l_bc) :
# self.low_struct.set_value( { "max_stack" : self.low_struct.get_value().max_stack + 2 } )
# print self.low_struct.get_value()
total_size = 0
for i in l_bc :
size = self.insert_at( idx, i )
idx += 1
total_size += size
return total_size
def insert_at(self, idx, bytecode) :
size = self.__code.insert_at(idx, bytecode)
# Adjust the length of our bytecode
self.low_struct.set_value( { "code_length" : self.low_struct.get_value().code_length + size } )
return size
def get_relative_idx(self, idx) :
return self.__code.get_relative_idx(idx)
def get_at(self, idx) :
return self.__code.get_at(idx)
def gets_at(self, l_idx) :
return [ self.__code.get_at(i) for i in l_idx ]
def set_cm(self, cm) :
self.__CM = cm
for i in self.__attributes :
i.set_cm( cm )
self.__code.set_cm( cm )
def _fix_attributes(self, new_cm) :
for i in self.__attributes :
i._fix_attributes( new_cm )
class SourceFileAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(SourceFileAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 sourcefile_index;
self.sourcefile_index = SV( '>H', buff.read(2) )
def get_raw(self) :
return self.sourcefile_index.get_value_buff()
def show(self) :
print self.sourcefile_index
class LineNumberTableAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(LineNumberTableAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 line_number_table_length;
# { u2 start_pc;
# u2 line_number;
# } line_number_table[line_number_table_length];
self.line_number_table_length = SV( '>H', buff.read( 2 ) )
self.__line_number_table = []
for i in range(0, self.line_number_table_length.get_value()) :
lnt = SVs( LINE_NUMBER_TABLE[0], LINE_NUMBER_TABLE[1], buff.read( 4 ) )
self.__line_number_table.append( lnt )
def get_raw(self) :
return self.line_number_table_length.get_value_buff() + \
''.join(x.get_value_buff() for x in self.__line_number_table)
def get_line_number_table(self) :
return self.__line_number_table
def show(self) :
bytecode._Print("LINE_NUMBER_TABLE_LENGTH", self.line_number_table_length.get_value())
for x in self.__line_number_table :
print "\t", x.get_value()
def _fix_attributes(self, new_cm) :
pass
class LocalVariableTableAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(LocalVariableTableAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 local_variable_table_length;
# { u2 start_pc;
# u2 length;
# u2 name_index;
# u2 descriptor_index;
# u2 index;
# } local_variable_table[local_variable_table_length];
self.local_variable_table_length = SV( '>H', buff.read(2) )
self.local_variable_table = []
for i in range(0, self.local_variable_table_length.get_value()) :
lvt = SVs( LOCAL_VARIABLE_TABLE[0], LOCAL_VARIABLE_TABLE[1], buff.read( calcsize(LOCAL_VARIABLE_TABLE[0]) ) )
self.local_variable_table.append( lvt )
def get_raw(self) :
return self.local_variable_table_length.get_value_buff() + \
''.join(x.get_value_buff() for x in self.local_variable_table)
def show(self) :
print "LocalVariableTable", self.local_variable_table_length.get_value()
for x in self.local_variable_table :
print x.get_value()
class LocalVariableTypeTableAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(LocalVariableTypeTableAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 local_variable_type_table_length;
# { u2 start_pc;
# u2 length;
# u2 name_index;
# u2 signature_index;
# u2 index;
# } local_variable_type_table[local_variable_type_table_length];
self.local_variable_type_table_length = SV( '>H', buff.read(2) )
self.local_variable_type_table = []
for i in range(0, self.local_variable_type_table_length.get_value()) :
lvtt = SVs( LOCAL_VARIABLE_TYPE_TABLE[0], LOCAL_VARIABLE_TYPE_TABLE[1], buff.read( calcsize(LOCAL_VARIABLE_TYPE_TABLE[0]) ) )
self.local_variable_type_table.append( lvtt )
def get_raw(self) :
return self.local_variable_type_table_length.get_value_buff() + \
''.join(x.get_value_buff() for x in self.local_variable_type_table)
def show(self) :
print "LocalVariableTypeTable", self.local_variable_type_table_length.get_value()
for x in self.local_variable_type_table :
print x.get_value()
class SourceDebugExtensionAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(SourceDebugExtensionAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u1 debug_extension[attribute_length];
self.debug_extension = buff.read( self.attribute_length )
def get_raw(self) :
return self.debug_extension
def show(self) :
print "SourceDebugExtension", self.debug_extension.get_value()
class DeprecatedAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(DeprecatedAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
def get_raw(self) :
return ''
def show(self) :
print "Deprecated"
class SyntheticAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(SyntheticAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
def get_raw(self) :
return ''
def show(self) :
print "Synthetic"
class SignatureAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(SignatureAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 signature_index;
self.signature_index = SV( '>H', buff.read(2) )
def get_raw(self) :
return self.signature_index.get_value_buff()
def show(self) :
print "Signature", self.signature_index.get_value()
class RuntimeVisibleAnnotationsAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(RuntimeVisibleAnnotationsAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 num_annotations;
# annotation annotations[num_annotations];
self.num_annotations = SV( '>H', buff.read(2) )
self.annotations = []
for i in range(0, self.num_annotations.get_value()) :
self.annotations.append( Annotation(cm, buff) )
def get_raw(self) :
return self.num_annotations.get_value_buff() + \
''.join(x.get_raw() for x in self.annotations)
def show(self) :
print "RuntimeVisibleAnnotations", self.num_annotations.get_value()
for i in self.annotations :
i.show()
class RuntimeInvisibleAnnotationsAttribute(RuntimeVisibleAnnotationsAttribute) :
def show(self) :
print "RuntimeInvisibleAnnotations", self.num_annotations.get_value()
for i in self.annotations :
i.show()
class RuntimeVisibleParameterAnnotationsAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(RuntimeVisibleParameterAnnotationsAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u1 num_parameters;
#{
# u2 num_annotations;
# annotation annotations[num_annotations];
#} parameter_annotations[num_parameters];
self.num_parameters = SV( '>H', buff.read(2) )
self.parameter_annotations = []
for i in range(0, self.num_parameters.get_value()) :
self.parameter_annotations.append( ParameterAnnotation( cm, buff ) )
def get_raw(self) :
return self.num_parameters.get_value_buff() + \
''.join(x.get_raw() for x in self.parameter_annotations)
def show(self) :
print "RuntimeVisibleParameterAnnotations", self.num_parameters.get_value()
for i in self.parameter_annotations :
i.show()
class RuntimeInvisibleParameterAnnotationsAttribute(RuntimeVisibleParameterAnnotationsAttribute) :
def show(self) :
print "RuntimeVisibleParameterAnnotations", self.num_annotations.get_value()
for i in self.parameter_annotations :
i.show()
class ParameterAnnotation :
def __init__(self, cm, buff) :
# u2 num_annotations;
# annotation annotations[num_annotations];
self.num_annotations = SV( '>H', buff.read(2) )
self.annotations = []
for i in range(0, self.num_annotations.get_value()) :
self.annotations = Annotation( cm, buff )
def get_raw(self) :
return self.num_annotations.get_value_buff() + \
''.join(x.get_raw() for x in self.annotations)
def show(self) :
print "ParameterAnnotation", self.num_annotations.get_value()
for i in self.annotations :
i.show()
class AnnotationDefaultAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(AnnotationDefaultAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# element_value default_value;
self.default_value = ElementValue( cm, buff )
def get_raw(self) :
return self.default_value.get_raw()
def show(self) :
print "AnnotationDefault"
self.default_value.show()
class Annotation :
def __init__(self, cm, buff) :
# u2 type_index;
# u2 num_element_value_pairs;
# { u2 element_name_index;
# element_value value;
# } element_value_pairs[num_element_value_pairs]
self.type_index = SV( '>H', buff.read(2) )
self.num_element_value_pairs = SV( '>H', buff.read(2) )
self.element_value_pairs = []
for i in range(0, self.num_element_value_pairs.get_value()) :
self.element_value_pairs.append( ElementValuePair(cm, buff) )
def get_raw(self) :
return self.type_index.get_value_buff() + self.num_element_value_pairs.get_value_buff() + \
''.join(x.get_raw() for x in self.element_value_pairs)
def show(self) :
print "Annotation", self.type_index.get_value(), self.num_element_value_pairs.get_value()
for i in self.element_value_pairs :
i.show()
class ElementValuePair :
def __init__(self, cm, buff) :
# u2 element_name_index;
# element_value value;
self.element_name_index = SV( '>H', buff.read(2) )
self.value = ElementValue(cm, buff)
def get_raw(self) :
return self.element_name_index.get_value_buff() + \
self.value.get_raw()
def show(self) :
print "ElementValuePair", self.element_name_index.get_value()
self.value.show()
ENUM_CONST_VALUE = [ '>HH', namedtuple("EnumConstValue", "type_name_index const_name_index") ]
class ElementValue :
def __init__(self, cm, buff) :
# u1 tag;
# union {
# u2 const_value_index;
# {
# u2 type_name_index;
# u2 const_name_index;
# } enum_const_value;
# u2 class_info_index;
# annotation annotation_value;
# {
# u2 num_values;
# element_value values[num_values];
# } array_value;
# } value;
self.tag = SV( '>B', buff.read(1) )
tag = chr( self.tag.get_value() )
if tag == 'B' or tag == 'C' or tag == 'D' or tag == 'F' or tag == 'I' or tag == 'J' or tag == 'S' or tag == 'Z' or tag == 's' :
self.value = SV( '>H', buff.read(2) )
elif tag == 'e' :
self.value = SVs( ENUM_CONST_VALUE[0], ENUM_CONST_VALUE[1], buff.read( calcsize(ENUM_CONST_VALUE[0]) ) )
elif tag == 'c' :
self.value = SV( '>H', buff.read(2) )
elif tag == '@' :
self.value = Annotation( cm, buff )
elif tag == '[' :
self.value = ArrayValue( cm, buff )
else :
bytecode.Exit( "tag %c not in VERIFICATION_TYPE_INFO" % self.tag.get_value() )
def get_raw(self) :
if isinstance(self.value, SV) or isinstance(self.value, SVs) :
return self.tag.get_value_buff() + self.value.get_value_buff()
return self.tag.get_value_buff() + self.value.get_raw()
def show(self) :
print "ElementValue", self.tag.get_value()
if isinstance(self.value, SV) or isinstance(self.value, SVs) :
print self.value.get_value()
else :
self.value.show()
class ArrayValue :
def __init__(self, cm, buff) :
# u2 num_values;
# element_value values[num_values];
self.num_values = SV( '>H', buff.read(2) )
self.values = []
for i in range(0, self.num_values.get_value()) :
self.values.append( ElementValue(cm, buff) )
def get_raw(self) :
return self.num_values.get_value_buff() + \
''.join(x.get_raw() for x in self.values)
def show(self) :
print "ArrayValue", self.num_values.get_value()
for i in self.values :
i.show()
class ExceptionsAttribute(BasicAttribute) :
def __init__(self, cm, buff) :
super(ExceptionsAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 number_of_exceptions;
# u2 exception_index_table[number_of_exceptions];
self.number_of_exceptions = SV( '>H', buff.read(2) )
self.__exception_index_table = []
for i in range(0, self.number_of_exceptions.get_value()) :
self.__exception_index_table.append( SV( '>H', buff.read(2) ) )
def get_raw(self) :
return self.number_of_exceptions.get_value_buff() + ''.join(x.get_value_buff() for x in self.__exception_index_table)
def get_exception_index_table(self) :
return self.__exception_index_table
def show(self) :
print "Exceptions", self.number_of_exceptions.get_value()
for i in self.__exception_index_table :
print "\t", i
class VerificationTypeInfo :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
tag = SV( '>B', buff.read_b(1) ).get_value()
if tag not in VERIFICATION_TYPE_INFO :
bytecode.Exit( "tag not in VERIFICATION_TYPE_INFO" )
format = VERIFICATION_TYPE_INFO[ tag ][1]
self.format = SVs( format, VERIFICATION_TYPE_INFO[ tag ][2], buff.read( calcsize( format ) ) )
def get_raw(self) :
return self.format.get_value_buff()
def show(self) :
general_format = self.format.get_value()
if len( VERIFICATION_TYPE_INFO[ general_format.tag ] ) > 3 :
print general_format,
for (i,j) in VERIFICATION_TYPE_INFO[ general_format.tag ][3] :
print getattr(self.__CM, j)( getattr(general_format, i) )
else :
print general_format
def _fix_attributes(self, new_cm) :
general_format = self.format.get_value()
if len( VERIFICATION_TYPE_INFO[ general_format.tag ] ) > 3 :
for (i,j) in VERIFICATION_TYPE_INFO[ general_format.tag ][3] :
# Fix the first object which is the current class
if getattr(self.__CM, j)( getattr(general_format, i) )[0] == self.__CM.get_this_class_name() :
self.format.set_value( { "cpool_index" : new_cm.get_this_class() } )
# Fix other objects
else :
new_class_index = new_cm.create_class( getattr(self.__CM, j)( getattr(general_format, i) )[0] )
self.format.set_value( { "cpool_index" : new_class_index } )
def set_cm(self, cm) :
self.__CM = cm
class FullFrame :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
# u1 frame_type = FULL_FRAME; /* 255 */
# u2 offset_delta;
# u2 number_of_locals;
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
self.number_of_locals = SV( '>H', buff.read(2) )
# verification_type_info locals[number_of_locals];
self.__locals = []
for i in range(0, self.number_of_locals.get_value()) :
self.__locals.append( VerificationTypeInfo( self.__CM, buff ) )
# u2 number_of_stack_items;
self.number_of_stack_items = SV( '>H', buff.read(2) )
# verification_type_info stack[number_of_stack_items];
self.__stack = []
for i in range(0, self.number_of_stack_items.get_value()) :
self.__stack.append( VerificationTypeInfo( self.__CM, buff ) )
def get_locals(self) :
return self.__locals
def get_raw(self) :
return self.frame_type.get_value_buff() + \
self.offset_delta.get_value_buff() + \
self.number_of_locals.get_value_buff() + \
''.join(x.get_raw() for x in self.__locals) + \
self.number_of_stack_items.get_value_buff() + \
''.join(x.get_raw() for x in self.__stack)
def show(self) :
print "#" * 60
bytecode._Print("\tFULL_FRAME", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
bytecode._Print("\tNUMBER_OF_LOCALS", self.number_of_locals.get_value())
for i in self.__locals :
i.show()
bytecode._Print("\tNUMBER_OF_STACK_ITEMS", self.number_of_stack_items.get_value())
for i in self.__stack :
i.show()
print "#" * 60
def _fix_attributes(self, new_cm) :
for i in self.__locals :
i._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
for i in self.__locals :
i.set_cm( cm )
class ChopFrame :
def __init__(self, buff) :
# u1 frame_type=CHOP; /* 248-250 */
# u2 offset_delta;
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
def get_raw(self) :
return self.frame_type.get_value_buff() + self.offset_delta.get_value_buff()
def show(self) :
print "#" * 60
bytecode._Print("\tCHOP_FRAME", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
print "#" * 60
def _fix_attributes(self, cm) :
pass
def set_cm(self, cm) :
pass
class SameFrame :
def __init__(self, buff) :
# u1 frame_type = SAME;/* 0-63 */
self.frame_type = SV( '>B', buff.read(1) )
def get_raw(self) :
return self.frame_type.get_value_buff()
def show(self) :
print "#" * 60
bytecode._Print("\tSAME_FRAME", self.frame_type.get_value())
print "#" * 60
def _fix_attributes(self, new_cm) :
pass
def set_cm(self, cm) :
pass
class SameLocals1StackItemFrame :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
# u1 frame_type = SAME_LOCALS_1_STACK_ITEM;/* 64-127 */
# verification_type_info stack[1];
self.frame_type = SV( '>B', buff.read(1) )
self.stack = VerificationTypeInfo( self.__CM, buff )
def show(self) :
print "#" * 60
bytecode._Print("\tSAME_LOCALS_1_STACK_ITEM_FRAME", self.frame_type.get_value())
self.stack.show()
print "#" * 60
def get_raw(self) :
return self.frame_type.get_value_buff() + self.stack.get_raw()
def _fix_attributes(self, new_cm) :
pass
def set_cm(self, cm) :
self.__CM = cm
class SameLocals1StackItemFrameExtended :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
# u1 frame_type = SAME_LOCALS_1_STACK_ITEM_EXTENDED; /* 247 */
# u2 offset_delta;
# verification_type_info stack[1];
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
self.stack = VerificationTypeInfo( self.__CM, buff )
def get_raw(self) :
return self.frame_type.get_value_buff() + self.offset_delta.get_value_buff() + self.stack.get_value_buff()
def _fix_attributes(self, new_cm) :
pass
def set_cm(self, cm) :
self.__CM = cm
def show(self) :
print "#" * 60
bytecode._Print("\tSAME_LOCALS_1_STACK_ITEM_FRAME_EXTENDED", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
self.stack.show()
print "#" * 60
class SameFrameExtended :
def __init__(self, buff) :
# u1 frame_type = SAME_FRAME_EXTENDED;/* 251*/
# u2 offset_delta;
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
def get_raw(self) :
return self.frame_type.get_value_buff() + self.offset_delta.get_value_buff()
def _fix_attributes(self, cm) :
pass
def set_cm(self, cm) :
pass
def show(self) :
print "#" * 60
bytecode._Print("\tSAME_FRAME_EXTENDED", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
print "#" * 60
class AppendFrame :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
# u1 frame_type = APPEND; /* 252-254 */
# u2 offset_delta;
self.frame_type = SV( '>B', buff.read(1) )
self.offset_delta = SV( '>H', buff.read(2) )
# verification_type_info locals[frame_type -251];
self.__locals = []
k = self.frame_type.get_value() - 251
for i in range(0, k) :
self.__locals.append( VerificationTypeInfo( self.__CM, buff ) )
def get_locals(self) :
return self.__locals
def show(self) :
print "#" * 60
bytecode._Print("\tAPPEND_FRAME", self.frame_type.get_value())
bytecode._Print("\tOFFSET_DELTA", self.offset_delta.get_value())
for i in self.__locals :
i.show()
print "#" * 60
def get_raw(self) :
return self.frame_type.get_value_buff() + \
self.offset_delta.get_value_buff() + \
''.join(x.get_raw() for x in self.__locals)
def _fix_attributes(self, new_cm) :
for i in self.__locals :
i._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
for i in self.__locals :
i.set_cm( cm )
class StackMapTableAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(StackMapTableAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length
# u2 number_of_entries;
self.number_of_entries = SV( '>H', buff.read(2) )
# stack_map_frame entries[number_of_entries];
self.__entries = []
for i in range(0, self.number_of_entries.get_value()) :
frame_type = SV( '>B', buff.read_b(1) ).get_value()
if frame_type >= 0 and frame_type <= 63 :
self.__entries.append( SameFrame( buff ) )
elif frame_type >= 64 and frame_type <= 127 :
self.__entries.append( SameLocals1StackItemFrame( self.__CM, buff ) )
elif frame_type == 247 :
self.__entries.append( SameLocals1StackItemFrameExtended( self.__CM, buff ) )
elif frame_type >= 248 and frame_type <= 250 :
self.__entries.append( ChopFrame( buff ) )
elif frame_type == 251 :
self.__entries.append( SameFrameExtended( buff ) )
elif frame_type >= 252 and frame_type <= 254 :
self.__entries.append( AppendFrame( self.__CM, buff ) )
elif frame_type == 255 :
self.__entries.append( FullFrame( self.__CM, buff ) )
else :
bytecode.Exit( "Frame type %d is unknown" % frame_type )
def get_entries(self) :
return self.__entries
def get_local_variables(self) :
for i in self.__entries :
if isinstance(i, FullFrame) :
return i.get_local_variables()
return []
def get_raw(self) :
return self.number_of_entries.get_value_buff() + \
''.join(x.get_raw() for x in self.__entries )
def show(self) :
bytecode._Print("NUMBER_OF_ENTRIES", self.number_of_entries.get_value())
for i in self.__entries :
i.show()
def _fix_attributes(self, new_cm) :
for i in self.__entries :
i._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
for i in self.__entries :
i.set_cm( cm )
class InnerClassesDesc :
def __init__(self, class_manager, buff) :
INNER_CLASSES_FORMAT = [ ">HHHH", "inner_class_info_index outer_class_info_index inner_name_index inner_class_access_flags" ]
self.__CM = class_manager
self.__raw_buff = buff.read( calcsize( INNER_CLASSES_FORMAT[0] ) )
self.format = SVs( INNER_CLASSES_FORMAT[0], namedtuple( "InnerClassesFormat", INNER_CLASSES_FORMAT[1] ), self.__raw_buff )
def show(self) :
print self.format
def get_raw(self) :
return self.format.get_value_buff()
def set_cm(self, cm) :
self.__CM = cm
class InnerClassesAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(InnerClassesAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length
# u2 number_of_classes;
self.number_of_classes = SV( '>H', buff.read(2) )
# { u2 inner_class_info_index;
# u2 outer_class_info_index;
# u2 inner_name_index;
# u2 inner_class_access_flags;
# } classes[number_of_classes];
self.__classes = []
for i in range(0, self.number_of_classes.get_value()) :
self.__classes.append( InnerClassesDesc( self.__CM, buff ) )
def get_classes(self) :
return self.__classes
def show(self) :
print self.number_of_classes
for i in self.__classes :
i.show()
def set_cm(self, cm) :
self.__CM = cm
for i in self.__classes :
i.set_cm( cm )
def get_raw(self) :
return self.number_of_classes.get_value_buff() + \
''.join(x.get_raw() for x in self.__classes)
class ConstantValueAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
self.__CM = class_manager
super(ConstantValueAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 constantvalue_index;
self.constantvalue_index = SV( '>H', buff.read(2) )
def show(self) :
print self.constantvalue_index
def set_cm(self, cm) :
self.__CM = cm
def get_raw(self) :
return self.constantvalue_index.get_value_buff()
class EnclosingMethodAttribute(BasicAttribute) :
def __init__(self, class_manager, buff) :
ENCLOSING_METHOD_FORMAT = [ '>HH', "class_index method_index" ]
self.__CM = class_manager
super(EnclosingMethodAttribute, self).__init__()
# u2 attribute_name_index;
# u4 attribute_length;
# u2 class_index
# u2 method_index;
self.__raw_buff = buff.read( calcsize( ENCLOSING_METHOD_FORMAT[0] ) )
self.format = SVs( ENCLOSING_METHOD_FORMAT[0], namedtuple( "EnclosingMethodFormat", ENCLOSING_METHOD_FORMAT[1] ), self.__raw_buff )
def show(self) :
print self.format
def set_cm(self, cm) :
self.__CM = cm
def get_raw(self) :
return self.format.get_value_buff()
ATTRIBUTE_INFO_DESCR = {
"Code" : CodeAttribute,
"Deprecated" : DeprecatedAttribute,
"SourceFile" : SourceFileAttribute,
"Exceptions" : ExceptionsAttribute,
"LineNumberTable" : LineNumberTableAttribute,
"LocalVariableTable" : LocalVariableTableAttribute,
"LocalVariableTypeTable" : LocalVariableTypeTableAttribute,
"StackMapTable" : StackMapTableAttribute,
"InnerClasses" : InnerClassesAttribute,
"ConstantValue" : ConstantValueAttribute,
"EnclosingMethod" : EnclosingMethodAttribute,
"Signature" : SignatureAttribute,
"Synthetic" : SyntheticAttribute,
"SourceDebugExtension" : SourceDebugExtensionAttribute,
"RuntimeVisibleAnnotations" : RuntimeVisibleAnnotationsAttribute,
"RuntimeInvisibleAnnotations" : RuntimeInvisibleAnnotationsAttribute,
"RuntimeVisibleParameterAnnotations" : RuntimeVisibleParameterAnnotationsAttribute,
"RuntimeInvisibleParameterAnnotations" : RuntimeInvisibleParameterAnnotationsAttribute,
"AnnotationDefault" : AnnotationDefaultAttribute,
}
class AttributeInfo :
"""AttributeInfo manages each attribute info (Code, SourceFile ....)"""
def __init__(self, class_manager, buff) :
self.__CM = class_manager
self.__raw_buff = buff.read( calcsize( ATTRIBUTE_INFO[0] ) )
self.format = SVs( ATTRIBUTE_INFO[0], ATTRIBUTE_INFO[1], self.__raw_buff )
self.__name = self.__CM.get_string( self.format.get_value().attribute_name_index )
try :
self._info = ATTRIBUTE_INFO_DESCR[ self.__name ](self.__CM, buff)
except KeyError, ke :
bytecode.Exit( "AttributeInfo %s doesn't exit" % self.__name )
def get_item(self) :
"""Return the specific attribute info"""
return self._info
def get_name(self) :
"""Return the name of the attribute"""
return self.__name
def get_raw(self) :
v1 = self.format.get_value().attribute_length
v2 = len(self._info.get_raw())
if v1 != v2 :
self.set_attribute_length( v2 )
return self.format.get_value_buff() + self._info.get_raw()
def get_attribute_name_index(self) :
return self.format.get_value().attribute_name_index
def set_attribute_name_index(self, value) :
self.format.set_value( { "attribute_name_index" : value } )
def set_attribute_length(self, value) :
self.format.set_value( { "attribute_length" : value } )
def get_attributes(self) :
return self.format
def _fix_attributes(self, new_cm) :
self._info._fix_attributes( new_cm )
def set_cm(self, cm) :
self.__CM = cm
self._info.set_cm( cm )
def show(self) :
print self.format, self.__name
if self._info != None :
self._info.show()
def pretty_show(self, m_a) :
print self.format, self.__name
if self._info != None :
if isinstance(self._info, CodeAttribute) :
self._info.pretty_show(m_a)
else :
self._info.show()
class ClassManager :
"""ClassManager can be used by all classes to get more information"""
def __init__(self, constant_pool, constant_pool_count) :
self.constant_pool = constant_pool
self.constant_pool_count = constant_pool_count
self.__this_class = None
def get_value(self, idx) :
name = self.get_item(idx[0]).get_name()
if name == "CONSTANT_Integer" :
return [ name, self.get_item(idx[0]).get_format().get_value().bytes ]
elif name == "CONSTANT_String" :
return [ name, self.get_string( self.get_item(idx[0]).get_format().get_value().string_index ) ]
elif name == "CONSTANT_Class" :
return [ name, self.get_class( idx[0] ) ]
elif name == "CONSTANT_Fieldref" :
return [ name, self.get_field( idx[0] ) ]
elif name == "CONSTANT_Float" :
return [ name, self.get_item(idx[0]).get_format().get_value().bytes ]
bytecode.Exit( "get_value not yet implemented for %s" % name )
def get_item(self, idx) :
return self.constant_pool[ idx - 1]
def get_interface(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_InterfaceMethodref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_interface_index(self, class_name, name, descriptor) :
raise("ooo")
def get_method(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Methodref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_method_index(self, class_name, name, descriptor) :
idx = 1
for i in self.constant_pool :
res = self.get_method( idx )
if res != [] :
m_class_name, m_name, m_descriptor = res
if m_class_name == class_name and m_name == name and m_descriptor == descriptor :
return idx
idx += 1
return -1
def get_field(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Fieldref" :
return []
class_idx = self.get_item(idx).get_class_index()
name_and_type_idx = self.get_item(idx).get_name_and_type_index()
return [ self.get_string( self.get_item(class_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_name_index() ),
self.get_string( self.get_item(name_and_type_idx).get_descriptor_index() )
]
def get_field_index(self, name, descriptor) :
idx = 1
for i in self.constant_pool :
res = self.get_field( idx )
if res != [] :
_, m_name, m_descriptor = res
if m_name == name and m_descriptor == descriptor :
return idx
idx += 1
def get_class(self, idx) :
if self.get_item(idx).get_name() != "CONSTANT_Class" :
return []
return [ self.get_string( self.get_item(idx).get_name_index() ) ]
def get_array_type(self, idx) :
return ARRAY_TYPE[ idx[0] ]
def get_string_index(self, name) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Utf8" :
if i.get_bytes() == name :
return idx
idx += 1
return -1
def get_integer_index(self, value) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Integer" :
if i.get_format().get_value().bytes == value :
return idx
idx += 1
return -1
def get_cstring_index(self, value) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_String" :
if self.get_string( i.get_format().get_value().string_index ) == value :
return idx
idx += 1
return -1
def get_name_and_type_index(self, name_method_index, descriptor_method_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_NameAndType" :
value = i.get_format().get_value()
if value.name_index == name_method_index and value.descriptor_index == descriptor_method_index :
return idx
idx += 1
return -1
def get_class_by_index(self, name_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Class" :
value = i.get_format().get_value()
if value.name_index == name_index :
return idx
idx += 1
return -1
def get_method_ref_index(self, new_class_index, new_name_and_type_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Methodref" :
value = i.get_format().get_value()
if value.class_index == new_class_index and value.name_and_type_index == new_name_and_type_index :
return idx
idx += 1
return -1
def get_field_ref_index(self, new_class_index, new_name_and_type_index) :
idx = 1
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Fieldref" :
value = i.get_format().get_value()
if value.class_index == new_class_index and value.name_and_type_index == new_name_and_type_index :
return idx
idx += 1
return -1
def get_class_index(self, method_name) :
idx = 1
for i in self.constant_pool :
res = self.get_method( idx )
if res != [] :
_, name, _ = res
if name == method_name :
return i.get_class_index()
idx += 1
return -1
def get_class_index2(self, class_name) :
idx = 1
for i in self.constant_pool :
res = self.get_class( idx )
if res != [] :
name = res[0]
if name == class_name :
return idx
idx += 1
return -1
def get_used_fields(self) :
l = []
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Fieldref" :
l.append( i )
return l
def get_used_methods(self) :
l = []
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Methodref" :
l.append( i )
return l
def get_string(self, idx) :
if self.constant_pool[idx - 1].get_name() == "CONSTANT_Utf8" :
return self.constant_pool[idx - 1].get_bytes()
return None
def set_string(self, idx, name) :
if self.constant_pool[idx - 1].get_name() == "CONSTANT_Utf8" :
self.constant_pool[idx - 1].set_bytes( name )
else :
bytecode.Exit( "invalid index %d to set string %s" % (idx, name) )
def add_string(self, name) :
name_index = self.get_string_index(name)
if name_index != -1 :
return name_index
tag_value = INVERT_CONSTANT_INFO[ "CONSTANT_Utf8" ]
buff = pack( CONSTANT_INFO[ tag_value ][1], tag_value, len(name) ) + pack( ">%ss" % len(name), name )
ci = CONSTANT_INFO[ tag_value ][-1]( self, bytecode.BuffHandle( buff ) )
self.constant_pool.append( ci )
self.constant_pool_count.set_value( self.constant_pool_count.get_value() + 1 )
return self.constant_pool_count.get_value() - 1
def set_this_class(self, this_class) :
self.__this_class = this_class
def get_this_class(self) :
return self.__this_class.get_value()
def get_this_class_name(self) :
return self.get_class( self.__this_class.get_value() )[0]
def add_constant_pool(self, elem) :
self.constant_pool.append( elem )
self.constant_pool_count.set_value( self.constant_pool_count.get_value() + 1 )
def get_constant_pool_count(self) :
return self.constant_pool_count.get_value()
def create_class(self, name) :
class_name_index = self.add_string( name )
return self._create_class( class_name_index )
def _create_class(self, class_name_index) :
class_index = self.get_class_by_index( class_name_index )
if class_index == -1 :
new_class = CreateClass( self, class_name_index )
self.add_constant_pool( Class( self, bytecode.BuffHandle( new_class.get_raw() ) ) )
class_index = self.get_constant_pool_count() - 1
return class_index
def create_name_and_type(self, name, desc) :
name_index = self.add_string( name )
descriptor_index = self.add_string( desc )
return self._create_name_and_type( name_index, descriptor_index )
def create_name_and_type_by_index(self, name_method_index, descriptor_method_index) :
return self._create_name_and_type( name_method_index, descriptor_method_index )
def _create_name_and_type(self, name_method_index, descriptor_method_index) :
name_and_type_index = self.get_name_and_type_index( name_method_index, descriptor_method_index )
if name_and_type_index == -1 :
new_nat = CreateNameAndType( self, name_method_index, descriptor_method_index )
self.add_constant_pool( NameAndType( self, bytecode.BuffHandle( new_nat.get_raw() ) ) )
name_and_type_index = self.get_constant_pool_count() - 1
return name_and_type_index
def create_method_ref(self, new_class_index, new_name_and_type_index) :
new_mr_index = self.get_method_ref_index( new_class_index, new_name_and_type_index )
if new_mr_index == -1 :
new_mr = CreateMethodRef( self, new_class_index, new_name_and_type_index )
self.add_constant_pool( MethodRef( self, bytecode.BuffHandle( new_mr.get_raw() ) ) )
new_mr_index = self.get_constant_pool_count() - 1
return new_mr_index
def create_field_ref(self, new_class_index, new_name_and_type_index) :
new_fr_index = self.get_field_ref_index( new_class_index, new_name_and_type_index )
if new_fr_index == -1 :
new_fr = CreateFieldRef( self, new_class_index, new_name_and_type_index )
self.add_constant_pool( FieldRef( self, bytecode.BuffHandle( new_fr.get_raw() ) ) )
new_fr_index = self.get_constant_pool_count() - 1
return new_fr_index
def create_integer(self, value) :
new_int_index = self.get_integer_index( value )
if new_int_index == -1 :
new_int = CreateInteger( value )
self.add_constant_pool( Integer( self, bytecode.BuffHandle( new_int.get_raw() ) ) )
new_int_index = self.get_constant_pool_count() - 1
return new_int_index
def create_string(self, value) :
new_string_index = self.get_cstring_index( value )
if new_string_index == -1 :
new_string = CreateString( self, value )
self.add_constant_pool( String( self, bytecode.BuffHandle( new_string.get_raw() ) ) )
new_string_index = self.get_constant_pool_count() - 1
return new_string_index
class JVMFormat(bytecode._Bytecode) :
"""
An object which is the main class to handle properly a class file.
Exported fields : magic, minor_version, major_version, constant_pool_count, access_flags, this_class, super_class, interfaces_count, fields_count, methods_count, attributes_count
"""
def __init__(self, buff) :
"""
@param buff : the buffer which represents the open file
"""
super(JVMFormat, self).__init__( buff )
self._load_class()
def _load_class(self) :
# u4 magic;
# u2 minor_version;
# u2 major_version;
self.magic = SV( '>L', self.read( 4 ) )
self.minor_version = SV( '>H', self.read( 2 ) )
self.major_version = SV( '>H', self.read( 2 ) )
# u2 constant_pool_count;
self.constant_pool_count = SV( '>H', self.read( 2 ) )
# cp_info constant_pool[constant_pool_count-1];
self.constant_pool = []
self.__CM = ClassManager( self.constant_pool, self.constant_pool_count )
i = 1
while(i < self.constant_pool_count.get_value()) :
tag = SV( '>B', self.read_b( 1 ) )
if tag.get_value() not in CONSTANT_INFO :
bytecode.Exit( "tag %d not in CONSTANT_INFO" % tag.get_value() )
ci = CONSTANT_INFO[ tag.get_value() ][-1]( self.__CM, self )
self.constant_pool.append( ci )
i = i + 1
# CONSTANT_Long or CONSTANT_Double
# If a CONSTANT_Long_info or CONSTANT_Double_info structure is the item
# in the constant_pool table at index n, then the next usable item in the pool is
# located at index n + 2. The constant_pool index n + 1 must be valid but is
# considered unusable.
if tag.get_value() == 5 or tag.get_value() == 6 :
self.constant_pool.append( EmptyConstant() )
i = i + 1
# u2 access_flags;
# u2 this_class;
# u2 super_class;
self.access_flags = SV( '>H', self.read( 2 ) )
self.this_class = SV( '>H', self.read( 2 ) )
self.super_class = SV( '>H', self.read( 2 ) )
self.__CM.set_this_class( self.this_class )
# u2 interfaces_count;
self.interfaces_count = SV( '>H', self.read( 2 ) )
# u2 interfaces[interfaces_count];
self.interfaces = []
for i in range(0, self.interfaces_count.get_value()) :
tag = SV( '>H', self.read( 2 ) )
self.interfaces.append( tag )
# u2 fields_count;
self.fields_count = SV( '>H', self.read( 2 ) )
# field_info fields[fields_count];
self.fields = []
for i in range(0, self.fields_count.get_value()) :
fi = FieldInfo( self.__CM, self )
self.fields.append( fi )
# u2 methods_count;
self.methods_count = SV( '>H', self.read( 2 ) )
# method_info methods[methods_count];
self.methods = []
for i in range(0, self.methods_count.get_value()) :
mi = MethodInfo( self.__CM, self )
self.methods.append( mi )
# u2 attributes_count;
self.attributes_count = SV( '>H', self.read( 2 ) )
# attribute_info attributes[attributes_count];
self.__attributes = []
for i in range(0, self.attributes_count.get_value()) :
ai = AttributeInfo( self.__CM, self )
self.__attributes.append( ai )
def get_class(self, class_name) :
"""
Verify the name of the class
@param class_name : the name of the class
@rtype : True if the class name is valid, otherwise it's False
"""
x = self.__CM.get_this_class_name() == class_name
if x == True :
return x
return self.__CM.get_this_class_name() == class_name.replace(".", "/")
def get_classes_names(self) :
"""
Return the names of classes
"""
return [ self.__CM.get_this_class_name() ]
def get_name(self) :
"""
"""
return self.__CM.get_this_class_name()
def get_classes(self) :
"""
"""
return [ self ]
def get_field(self, name) :
"""
Return into a list all fields which corresponds to the regexp
@param name : the name of the field (a regexp)
"""
prog = re.compile( name )
fields = []
for i in self.fields :
if prog.match( i.get_name() ) :
fields.append( i )
return fields
def get_method_descriptor(self, class_name, method_name, descriptor) :
"""
Return the specific method
@param class_name : the class name of the method
@param method_name : the name of the method
@param descriptor : the descriptor of the method
@rtype: L{MethodInfo}
"""
# FIXME : handle multiple class name ?
if class_name != None :
if class_name != self.__CM.get_this_class_name() :
return None
for i in self.methods :
if method_name == i.get_name() and descriptor == i.get_descriptor() :
return i
return None
def get_field_descriptor(self, class_name, field_name, descriptor) :
"""
Return the specific field
@param class_name : the class name of the field
@param field_name : the name of the field
@param descriptor : the descriptor of the field
@rtype: L{FieldInfo}
"""
# FIXME : handle multiple class name ?
if class_name != None :
if class_name != self.__CM.get_this_class_name() :
return None
for i in self.fields :
if field_name == i.get_name() and descriptor == i.get_descriptor() :
return i
return None
def get_method(self, name) :
"""Return into a list all methods which corresponds to the regexp
@param name : the name of the method (a regexp)
"""
prog = re.compile( name )
methods = []
for i in self.methods :
if prog.match( i.get_name() ) :
methods.append( i )
return methods
def get_all_fields(self) :
return self.fields
def get_fields(self) :
"""Return all objects fields"""
return self.fields
def get_methods(self) :
"""Return all objects methods"""
return self.methods
def get_constant_pool(self) :
"""Return the constant pool list"""
return self.constant_pool
def get_strings(self) :
"""Return all strings into the class"""
l = []
for i in self.constant_pool :
if i.get_name() == "CONSTANT_Utf8" :
l.append( i.get_bytes() )
return l
def get_class_manager(self) :
"""
Return directly the class manager
@rtype : L{ClassManager}
"""
return self.__CM
def set_used_field(self, old, new) :
"""
Change the description of a field
@param old : a list of string which contained the original class name, the original field name and the original descriptor
@param new : a list of string which contained the new class name, the new field name and the new descriptor
"""
used_fields = self.__CM.get_used_fields()
for i in used_fields :
class_idx = i.format.get_value().class_index
name_and_type_idx = i.format.get_value().name_and_type_index
class_name = self.__CM.get_string( self.__CM.get_item(class_idx).get_name_index() )
field_name = self.__CM.get_string( self.__CM.get_item(name_and_type_idx).get_name_index() )
descriptor = self.__CM.get_string( self.__CM.get_item(name_and_type_idx).get_descriptor_index() )
if old[0] == class_name and old[1] == field_name and old[2] == descriptor :
# print "SET USED FIELD", class_name, method_name, descriptor
self.__CM.set_string( self.__CM.get_item(class_idx).get_name_index(), new[0] )
self.__CM.set_string( self.__CM.get_item(name_and_type_idx).get_name_index(), new[1] )
self.__CM.set_string( self.__CM.get_item(name_and_type_idx).get_descriptor_index(), new[2] )
def set_used_method(self, old, new) :
"""
Change the description of a method
@param old : a list of string which contained the original class name, the original method name and the original descriptor
@param new : a list of string which contained the new class name, the new method name and the new descriptor
"""
used_methods = self.__CM.get_used_methods()
for i in used_methods :
class_idx = i.format.get_value().class_index
name_and_type_idx = i.format.get_value().name_and_type_index
class_name = self.__CM.get_string( self.__CM.get_item(class_idx).get_name_index() )
method_name = self.__CM.get_string( self.__CM.get_item(name_and_type_idx).get_name_index() )
descriptor = self.__CM.get_string( self.__CM.get_item(name_and_type_idx).get_descriptor_index() )
if old[0] == class_name and old[1] == method_name and old[2] == descriptor :
# print "SET USED METHOD", class_name, method_name, descriptor
self.__CM.set_string( self.__CM.get_item(class_idx).get_name_index(), new[0] )
self.__CM.set_string( self.__CM.get_item(name_and_type_idx).get_name_index(), new[1] )
self.__CM.set_string( self.__CM.get_item(name_and_type_idx).get_descriptor_index(), new[2] )
def show(self) :
"""
Show the .class format into a human readable format
"""
bytecode._Print( "MAGIC", self.magic.get_value() )
bytecode._Print( "MINOR VERSION", self.minor_version.get_value() )
bytecode._Print( "MAJOR VERSION", self.major_version.get_value() )
bytecode._Print( "CONSTANT POOL COUNT", self.constant_pool_count.get_value() )
nb = 0
for i in self.constant_pool :
print nb,
i.show()
nb += 1
bytecode._Print( "ACCESS FLAGS", self.access_flags.get_value() )
bytecode._Print( "THIS CLASS", self.this_class.get_value() )
bytecode._Print( "SUPER CLASS", self.super_class.get_value() )
bytecode._Print( "INTERFACE COUNT", self.interfaces_count.get_value() )
nb = 0
for i in self.interfaces :
print nb,
print i
bytecode._Print( "FIELDS COUNT", self.fields_count.get_value() )
nb = 0
for i in self.fields :
print nb,
i.show()
nb += 1
bytecode._Print( "METHODS COUNT", self.methods_count.get_value() )
nb = 0
for i in self.methods :
print nb,
i.show()
nb += 1
bytecode._Print( "ATTRIBUTES COUNT", self.attributes_count.get_value() )
nb = 0
for i in self.__attributes :
print nb,
i.show()
nb += 1
def pretty_show(self, vm_a) :
"""
Show the .class format into a human readable format
"""
bytecode._Print( "MAGIC", self.magic.get_value() )
bytecode._Print( "MINOR VERSION", self.minor_version.get_value() )
bytecode._Print( "MAJOR VERSION", self.major_version.get_value() )
bytecode._Print( "CONSTANT POOL COUNT", self.constant_pool_count.get_value() )
nb = 0
for i in self.constant_pool :
print nb,
i.show()
nb += 1
bytecode._Print( "ACCESS FLAGS", self.access_flags.get_value() )
bytecode._Print( "THIS CLASS", self.this_class.get_value() )
bytecode._Print( "SUPER CLASS", self.super_class.get_value() )
bytecode._Print( "INTERFACE COUNT", self.interfaces_count.get_value() )
nb = 0
for i in self.interfaces :
print nb,
i.show()
bytecode._Print( "FIELDS COUNT", self.fields_count.get_value() )
nb = 0
for i in self.fields :
print nb,
i.show()
nb += 1
bytecode._Print( "METHODS COUNT", self.methods_count.get_value() )
nb = 0
for i in self.methods :
print nb,
i.pretty_show(vm_a)
nb += 1
bytecode._Print( "ATTRIBUTES COUNT", self.attributes_count.get_value() )
nb = 0
for i in self.__attributes :
print nb,
i.show()
def insert_string(self, value) :
"""Insert a string into the constant pool list (Constant_Utf8)
@param value : the new string
"""
self.__CM.add_string( value )
def insert_field(self, class_name, name, descriptor) :
"""
Insert a field into the class
@param class_name : the class of the field
@param name : the name of the field
@param descriptor : a list with the access_flag and the descriptor ( [ "ACC_PUBLIC", "I" ] )
"""
new_field = CreateFieldInfo( self.__CM, name, descriptor )
new_field = FieldInfo( self.__CM, bytecode.BuffHandle( new_field.get_raw() ) )
self.fields.append( new_field )
self.fields_count.set_value( self.fields_count.get_value() + 1 )
# Add a FieldRef and a NameAndType
name_and_type_index = self.__CM.create_name_and_type_by_index( new_field.get_name_index(), new_field.get_descriptor_index() )
self.__CM.create_field_ref( self.__CM.get_this_class(), name_and_type_index )
def insert_craft_method(self, name, proto, codes) :
"""
Insert a craft method into the class
@param name : the name of the new method
@param proto : a list which describe the method ( [ ACCESS_FLAGS, RETURN_TYPE, ARGUMENTS ], ie : [ "ACC_PUBLIC", "[B", "[B" ] )
@param codes : a list which represents the code into a human readable format ( [ "aconst_null" ], [ "areturn" ] ] )
"""
# Create new method
new_method = CreateMethodInfo(self.__CM, name, proto, codes)
# Insert the method by casting it directly into a MethodInfo with the raw buffer
self._insert_basic_method( MethodInfo( self.__CM, bytecode.BuffHandle( new_method.get_raw() ) ) )
def insert_direct_method(self, name, ref_method) :
"""
Insert a direct method (MethodInfo object) into the class
@param name : the name of the new method
@param ref_method : the MethodInfo Object
"""
if ref_method == None :
return
# Change the name_index
name_index = self.__CM.get_string_index( name )
if name_index != -1 :
bytecode.Exit( "method %s already exits" % name )
name_index = self.__CM.add_string( name )
ref_method.set_name_index( name_index )
# Change the descriptor_index
descriptor_index = self.__CM.get_string_index( ref_method.get_descriptor() )
if descriptor_index == -1 :
descriptor_index = self.__CM.add_string( ref_method.get_descriptor() )
ref_method.set_descriptor_index( descriptor_index )
# Change attributes name index
self._fix_attributes_external( ref_method )
# Change internal index
self._fix_attributes_internal( ref_method )
# Insert the method
self._insert_basic_method( ref_method )
def _fix_attributes_external(self, ref_method) :
for i in ref_method.get_attributes() :
attribute_name_index = self.__CM.add_string( i.get_name() )
i.set_attribute_name_index( attribute_name_index )
self._fix_attributes_external( i.get_item() )
def _fix_attributes_internal(self, ref_method) :
for i in ref_method.get_attributes() :
attribute_name_index = self.__CM.add_string( i.get_name() )
i._fix_attributes( self.__CM )
i.set_attribute_name_index( attribute_name_index )
def _insert_basic_method(self, ref_method) :
# Add a MethodRef and a NameAndType
name_and_type_index = self.__CM.create_name_and_type_by_index( ref_method.get_name_index(), ref_method.get_descriptor_index() )
self.__CM.create_method_ref( self.__CM.get_this_class(), name_and_type_index )
# Change the class manager
ref_method.set_cm( self.__CM )
# Insert libraries/constants dependances
methods = ref_method._patch_bytecodes()
# FIXME : insert needed fields + methods
prog = re.compile( "^java*" )
for i in methods :
if prog.match( i[0] ) == None :
bytecode.Exit( "ooooops" )
#ref_method.show()
# Insert the method
self.methods.append( ref_method )
self.methods_count.set_value( self.methods_count.get_value() + 1 )
def _get_raw(self) :
# u4 magic;
# u2 minor_version;
# u2 major_version;
buff = self.magic.get_value_buff()
buff += self.minor_version.get_value_buff()
buff += self.major_version.get_value_buff()
# u2 constant_pool_count;
buff += self.constant_pool_count.get_value_buff()
# cp_info constant_pool[constant_pool_count-1];
for i in self.constant_pool :
buff += i.get_raw()
# u2 access_flags;
# u2 this_class;
# u2 super_class;
buff += self.access_flags.get_value_buff()
buff += self.this_class.get_value_buff()
buff += self.super_class.get_value_buff()
# u2 interfaces_count;
buff += self.interfaces_count.get_value_buff()
# u2 interfaces[interfaces_count];
for i in self.interfaces :
buff += i.get_value_buff()
# u2 fields_count;
buff += self.fields_count.get_value_buff()
# field_info fields[fields_count];
for i in self.fields :
buff += i.get_raw()
# u2 methods_count;
buff += self.methods_count.get_value_buff()
# method_info methods[methods_count];
for i in self.methods :
buff += i.get_raw()
# u2 attributes_count;
buff += self.attributes_count.get_value_buff()
# attribute_info attributes[attributes_count];
for i in self.__attributes :
buff += i.get_raw()
return buff
def save(self) :
"""
Return the class (with the modifications) into raw format
@rtype: string
"""
return self._get_raw()
def set_vmanalysis(self, vmanalysis) :
pass
def get_generator(self) :
import jvm_generate
return jvm_generate.JVMGenerate
def get_INTEGER_INSTRUCTIONS(self) :
return INTEGER_INSTRUCTIONS
def get_type(self) :
return "JVM"
| apache-2.0 |
patdoyle1/FastMath | appengine-try-python-flask/lib/werkzeug/exceptions.py | 316 | 17799 | # -*- coding: utf-8 -*-
"""
werkzeug.exceptions
~~~~~~~~~~~~~~~~~~~
This module implements a number of Python exceptions you can raise from
within your views to trigger a standard non-200 response.
Usage Example
-------------
::
from werkzeug.wrappers import BaseRequest
from werkzeug.wsgi import responder
from werkzeug.exceptions import HTTPException, NotFound
def view(request):
raise NotFound()
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except HTTPException as e:
return e
As you can see from this example those exceptions are callable WSGI
applications. Because of Python 2.4 compatibility those do not extend
from the response objects but only from the python exception class.
As a matter of fact they are not Werkzeug response objects. However you
can get a response object by calling ``get_response()`` on a HTTP
exception.
Keep in mind that you have to pass an environment to ``get_response()``
because some errors fetch additional information from the WSGI
environment.
If you want to hook in a different exception page to say, a 404 status
code, you can add a second except for a specific subclass of an error::
@responder
def application(environ, start_response):
request = BaseRequest(environ)
try:
return view(request)
except NotFound, e:
return not_found(request)
except HTTPException, e:
return e
:copyright: (c) 2013 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import sys
# Because of bootstrapping reasons we need to manually patch ourselves
# onto our parent module.
import werkzeug
werkzeug.exceptions = sys.modules[__name__]
from werkzeug._internal import _get_environ
from werkzeug._compat import iteritems, integer_types, text_type, \
implements_to_string
from werkzeug.wrappers import Response
@implements_to_string
class HTTPException(Exception):
"""
Baseclass for all HTTP exceptions. This exception can be called as WSGI
application to render a default error page or you can catch the subclasses
of it independently and render nicer error messages.
"""
code = None
description = None
def __init__(self, description=None, response=None):
Exception.__init__(self)
if description is not None:
self.description = description
self.response = response
@classmethod
def wrap(cls, exception, name=None):
"""This method returns a new subclass of the exception provided that
also is a subclass of `BadRequest`.
"""
class newcls(cls, exception):
def __init__(self, arg=None, *args, **kwargs):
cls.__init__(self, *args, **kwargs)
exception.__init__(self, arg)
newcls.__module__ = sys._getframe(1).f_globals.get('__name__')
newcls.__name__ = name or cls.__name__ + exception.__name__
return newcls
@property
def name(self):
"""The status name."""
return HTTP_STATUS_CODES.get(self.code, 'Unknown Error')
def get_description(self, environ=None):
"""Get the description."""
return u'<p>%s</p>' % escape(self.description)
def get_body(self, environ=None):
"""Get the HTML body."""
return text_type((
u'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n'
u'<title>%(code)s %(name)s</title>\n'
u'<h1>%(name)s</h1>\n'
u'%(description)s\n'
) % {
'code': self.code,
'name': escape(self.name),
'description': self.get_description(environ)
})
def get_headers(self, environ=None):
"""Get a list of headers."""
return [('Content-Type', 'text/html')]
def get_response(self, environ=None):
"""Get a response object. If one was passed to the exception
it's returned directly.
:param environ: the optional environ for the request. This
can be used to modify the response depending
on how the request looked like.
:return: a :class:`Response` object or a subclass thereof.
"""
if self.response is not None:
return self.response
if environ is not None:
environ = _get_environ(environ)
headers = self.get_headers(environ)
return Response(self.get_body(environ), self.code, headers)
def __call__(self, environ, start_response):
"""Call the exception as WSGI application.
:param environ: the WSGI environment.
:param start_response: the response callable provided by the WSGI
server.
"""
response = self.get_response(environ)
return response(environ, start_response)
def __str__(self):
return '%d: %s' % (self.code, self.name)
def __repr__(self):
return '<%s \'%s\'>' % (self.__class__.__name__, self)
class BadRequest(HTTPException):
"""*400* `Bad Request`
Raise if the browser sends something to the application the application
or server cannot handle.
"""
code = 400
description = (
'The browser (or proxy) sent a request that this server could '
'not understand.'
)
class ClientDisconnected(BadRequest):
"""Internal exception that is raised if Werkzeug detects a disconnected
client. Since the client is already gone at that point attempting to
send the error message to the client might not work and might ultimately
result in another exception in the server. Mainly this is here so that
it is silenced by default as far as Werkzeug is concerned.
Since disconnections cannot be reliably detected and are unspecified
by WSGI to a large extend this might or might not be raised if a client
is gone.
.. versionadded:: 0.8
"""
class SecurityError(BadRequest):
"""Raised if something triggers a security error. This is otherwise
exactly like a bad request error.
.. versionadded:: 0.9
"""
class Unauthorized(HTTPException):
"""*401* `Unauthorized`
Raise if the user is not authorized. Also used if you want to use HTTP
basic auth.
"""
code = 401
description = (
'The server could not verify that you are authorized to access '
'the URL requested. You either supplied the wrong credentials (e.g. '
'a bad password), or your browser doesn\'t understand how to supply '
'the credentials required.'
)
class Forbidden(HTTPException):
"""*403* `Forbidden`
Raise if the user doesn't have the permission for the requested resource
but was authenticated.
"""
code = 403
description = (
'You don\'t have the permission to access the requested resource. '
'It is either read-protected or not readable by the server.'
)
class NotFound(HTTPException):
"""*404* `Not Found`
Raise if a resource does not exist and never existed.
"""
code = 404
description = (
'The requested URL was not found on the server. '
'If you entered the URL manually please check your spelling and '
'try again.'
)
class MethodNotAllowed(HTTPException):
"""*405* `Method Not Allowed`
Raise if the server used a method the resource does not handle. For
example `POST` if the resource is view only. Especially useful for REST.
The first argument for this exception should be a list of allowed methods.
Strictly speaking the response would be invalid if you don't provide valid
methods in the header which you can do with that list.
"""
code = 405
description = 'The method is not allowed for the requested URL.'
def __init__(self, valid_methods=None, description=None):
"""Takes an optional list of valid http methods
starting with werkzeug 0.3 the list will be mandatory."""
HTTPException.__init__(self, description)
self.valid_methods = valid_methods
def get_headers(self, environ):
headers = HTTPException.get_headers(self, environ)
if self.valid_methods:
headers.append(('Allow', ', '.join(self.valid_methods)))
return headers
class NotAcceptable(HTTPException):
"""*406* `Not Acceptable`
Raise if the server can't return any content conforming to the
`Accept` headers of the client.
"""
code = 406
description = (
'The resource identified by the request is only capable of '
'generating response entities which have content characteristics '
'not acceptable according to the accept headers sent in the '
'request.'
)
class RequestTimeout(HTTPException):
"""*408* `Request Timeout`
Raise to signalize a timeout.
"""
code = 408
description = (
'The server closed the network connection because the browser '
'didn\'t finish the request within the specified time.'
)
class Conflict(HTTPException):
"""*409* `Conflict`
Raise to signal that a request cannot be completed because it conflicts
with the current state on the server.
.. versionadded:: 0.7
"""
code = 409
description = (
'A conflict happened while processing the request. The resource '
'might have been modified while the request was being processed.'
)
class Gone(HTTPException):
"""*410* `Gone`
Raise if a resource existed previously and went away without new location.
"""
code = 410
description = (
'The requested URL is no longer available on this server and '
'there is no forwarding address.</p><p>If you followed a link '
'from a foreign page, please contact the author of this page.'
)
class LengthRequired(HTTPException):
"""*411* `Length Required`
Raise if the browser submitted data but no ``Content-Length`` header which
is required for the kind of processing the server does.
"""
code = 411
description = (
'A request with this method requires a valid <code>Content-'
'Length</code> header.'
)
class PreconditionFailed(HTTPException):
"""*412* `Precondition Failed`
Status code used in combination with ``If-Match``, ``If-None-Match``, or
``If-Unmodified-Since``.
"""
code = 412
description = (
'The precondition on the request for the URL failed positive '
'evaluation.'
)
class RequestEntityTooLarge(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = (
'The data value transmitted exceeds the capacity limit.'
)
class RequestURITooLarge(HTTPException):
"""*414* `Request URI Too Large`
Like *413* but for too long URLs.
"""
code = 414
description = (
'The length of the requested URL exceeds the capacity limit '
'for this server. The request cannot be processed.'
)
class UnsupportedMediaType(HTTPException):
"""*415* `Unsupported Media Type`
The status code returned if the server is unable to handle the media type
the client transmitted.
"""
code = 415
description = (
'The server does not support the media type transmitted in '
'the request.'
)
class RequestedRangeNotSatisfiable(HTTPException):
"""*416* `Requested Range Not Satisfiable`
The client asked for a part of the file that lies beyond the end
of the file.
.. versionadded:: 0.7
"""
code = 416
description = (
'The server cannot provide the requested range.'
)
class ExpectationFailed(HTTPException):
"""*417* `Expectation Failed`
The server cannot meet the requirements of the Expect request-header.
.. versionadded:: 0.7
"""
code = 417
description = (
'The server could not meet the requirements of the Expect header'
)
class ImATeapot(HTTPException):
"""*418* `I'm a teapot`
The server should return this if it is a teapot and someone attempted
to brew coffee with it.
.. versionadded:: 0.7
"""
code = 418
description = (
'This server is a teapot, not a coffee machine'
)
class UnprocessableEntity(HTTPException):
"""*422* `Unprocessable Entity`
Used if the request is well formed, but the instructions are otherwise
incorrect.
"""
code = 422
description = (
'The request was well-formed but was unable to be followed '
'due to semantic errors.'
)
class PreconditionRequired(HTTPException):
"""*428* `Precondition Required`
The server requires this request to be conditional, typically to prevent
the lost update problem, which is a race condition between two or more
clients attempting to update a resource through PUT or DELETE. By requiring
each client to include a conditional header ("If-Match" or "If-Unmodified-
Since") with the proper value retained from a recent GET request, the
server ensures that each client has at least seen the previous revision of
the resource.
"""
code = 428
description = (
'This request is required to be conditional; try using "If-Match" '
'or "If-Unmodified-Since".'
)
class TooManyRequests(HTTPException):
"""*429* `Too Many Requests`
The server is limiting the rate at which this user receives responses, and
this request exceeds that rate. (The server may use any convenient method
to identify users and their request rates). The server may include a
"Retry-After" header to indicate how long the user should wait before
retrying.
"""
code = 429
description = (
'This user has exceeded an allotted request count. Try again later.'
)
class RequestHeaderFieldsTooLarge(HTTPException):
"""*431* `Request Header Fields Too Large`
The server refuses to process the request because the header fields are too
large. One or more individual fields may be too large, or the set of all
headers is too large.
"""
code = 431
description = (
'One or more header fields exceeds the maximum size.'
)
class InternalServerError(HTTPException):
"""*500* `Internal Server Error`
Raise if an internal server error occurred. This is a good fallback if an
unknown error occurred in the dispatcher.
"""
code = 500
description = (
'The server encountered an internal error and was unable to '
'complete your request. Either the server is overloaded or there '
'is an error in the application.'
)
class NotImplemented(HTTPException):
"""*501* `Not Implemented`
Raise if the application does not support the action requested by the
browser.
"""
code = 501
description = (
'The server does not support the action requested by the '
'browser.'
)
class BadGateway(HTTPException):
"""*502* `Bad Gateway`
If you do proxying in your application you should return this status code
if you received an invalid response from the upstream server it accessed
in attempting to fulfill the request.
"""
code = 502
description = (
'The proxy server received an invalid response from an upstream '
'server.'
)
class ServiceUnavailable(HTTPException):
"""*503* `Service Unavailable`
Status code you should return if a service is temporarily unavailable.
"""
code = 503
description = (
'The server is temporarily unable to service your request due to '
'maintenance downtime or capacity problems. Please try again '
'later.'
)
default_exceptions = {}
__all__ = ['HTTPException']
def _find_exceptions():
for name, obj in iteritems(globals()):
try:
if getattr(obj, 'code', None) is not None:
default_exceptions[obj.code] = obj
__all__.append(obj.__name__)
except TypeError: # pragma: no cover
continue
_find_exceptions()
del _find_exceptions
class Aborter(object):
"""
When passed a dict of code -> exception items it can be used as
callable that raises exceptions. If the first argument to the
callable is an integer it will be looked up in the mapping, if it's
a WSGI application it will be raised in a proxy exception.
The rest of the arguments are forwarded to the exception constructor.
"""
def __init__(self, mapping=None, extra=None):
if mapping is None:
mapping = default_exceptions
self.mapping = dict(mapping)
if extra is not None:
self.mapping.update(extra)
def __call__(self, code, *args, **kwargs):
if not args and not kwargs and not isinstance(code, integer_types):
raise HTTPException(response=code)
if code not in self.mapping:
raise LookupError('no exception for %r' % code)
raise self.mapping[code](*args, **kwargs)
abort = Aborter()
#: an exception that is used internally to signal both a key error and a
#: bad request. Used by a lot of the datastructures.
BadRequestKeyError = BadRequest.wrap(KeyError)
# imported here because of circular dependencies of werkzeug.utils
from werkzeug.utils import escape
from werkzeug.http import HTTP_STATUS_CODES
| gpl-2.0 |
Metaswitch/horizon | openstack_dashboard/static_settings.py | 25 | 5308 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This file contains configuration for the locations of all the static file
libraries, such as JavaScript and CSS libraries. Packagers for individual
distributions can edit or replace this file, in order to change the paths
to match their distribution's standards.
"""
import xstatic.main
import xstatic.pkg.angular
import xstatic.pkg.angular_bootstrap
import xstatic.pkg.angular_lrdragndrop
import xstatic.pkg.angular_smart_table
import xstatic.pkg.bootstrap_datepicker
import xstatic.pkg.bootstrap_scss
import xstatic.pkg.d3
import xstatic.pkg.font_awesome
import xstatic.pkg.hogan
import xstatic.pkg.jasmine
import xstatic.pkg.jquery
import xstatic.pkg.jquery_migrate
import xstatic.pkg.jquery_quicksearch
import xstatic.pkg.jquery_tablesorter
import xstatic.pkg.jquery_ui
import xstatic.pkg.jsencrypt
import xstatic.pkg.magic_search
import xstatic.pkg.qunit
import xstatic.pkg.rickshaw
import xstatic.pkg.spin
import xstatic.pkg.termjs
def get_staticfiles_dirs(webroot='/'):
STATICFILES_DIRS = [
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_bootstrap,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_lrdragndrop,
root_url=webroot).base_dir),
('horizon/lib/angular',
xstatic.main.XStatic(xstatic.pkg.angular_smart_table,
root_url=webroot).base_dir),
('horizon/lib/bootstrap_datepicker',
xstatic.main.XStatic(xstatic.pkg.bootstrap_datepicker,
root_url=webroot).base_dir),
('bootstrap',
xstatic.main.XStatic(xstatic.pkg.bootstrap_scss,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.d3,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.hogan,
root_url=webroot).base_dir),
('horizon/lib/font-awesome',
xstatic.main.XStatic(xstatic.pkg.font_awesome,
root_url=webroot).base_dir),
('horizon/lib/jasmine',
xstatic.main.XStatic(xstatic.pkg.jasmine,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_migrate,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_quicksearch,
root_url=webroot).base_dir),
('horizon/lib/jquery',
xstatic.main.XStatic(xstatic.pkg.jquery_tablesorter,
root_url=webroot).base_dir),
('horizon/lib/jsencrypt',
xstatic.main.XStatic(xstatic.pkg.jsencrypt,
root_url=webroot).base_dir),
('horizon/lib/magic_search',
xstatic.main.XStatic(xstatic.pkg.magic_search,
root_url=webroot).base_dir),
('horizon/lib/qunit',
xstatic.main.XStatic(xstatic.pkg.qunit,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.rickshaw,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.spin,
root_url=webroot).base_dir),
('horizon/lib',
xstatic.main.XStatic(xstatic.pkg.termjs,
root_url=webroot).base_dir),
]
if xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).version.startswith('1.10.'):
# The 1.10.x versions already contain the 'ui' directory.
STATICFILES_DIRS.append(
('horizon/lib/jquery-ui',
xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).base_dir))
else:
# Newer versions dropped the directory, add it to keep the path the
# same.
STATICFILES_DIRS.append(
('horizon/lib/jquery-ui/ui',
xstatic.main.XStatic(xstatic.pkg.jquery_ui,
root_url=webroot).base_dir))
return STATICFILES_DIRS
| apache-2.0 |
romain-dartigues/ansible | test/units/module_utils/test_text.py | 111 | 2172 | # -*- coding: utf-8 -*-
# (c) 2016 Toshio Kuratomi <tkuratomi@ansible.com>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import itertools
import pytest
from ansible.module_utils.six import PY3
# Internal API while this is still being developed. Eventually move to
# module_utils.common.text
from ansible.module_utils._text import to_text, to_bytes, to_native
# Format: byte representation, text representation, encoding of byte representation
VALID_STRINGS = (
(b'abcde', u'abcde', 'ascii'),
(b'caf\xc3\xa9', u'caf\xe9', 'utf-8'),
(b'caf\xe9', u'caf\xe9', 'latin-1'),
# u'くらとみ'
(b'\xe3\x81\x8f\xe3\x82\x89\xe3\x81\xa8\xe3\x81\xbf', u'\u304f\u3089\u3068\u307f', 'utf-8'),
(b'\x82\xad\x82\xe7\x82\xc6\x82\xdd', u'\u304f\u3089\u3068\u307f', 'shift-jis'),
)
@pytest.mark.parametrize('in_string, encoding, expected',
itertools.chain(((d[0], d[2], d[1]) for d in VALID_STRINGS),
((d[1], d[2], d[1]) for d in VALID_STRINGS)))
def test_to_text(in_string, encoding, expected):
"""test happy path of decoding to text"""
assert to_text(in_string, encoding) == expected
@pytest.mark.parametrize('in_string, encoding, expected',
itertools.chain(((d[0], d[2], d[0]) for d in VALID_STRINGS),
((d[1], d[2], d[0]) for d in VALID_STRINGS)))
def test_to_bytes(in_string, encoding, expected):
"""test happy path of encoding to bytes"""
assert to_bytes(in_string, encoding) == expected
@pytest.mark.parametrize('in_string, encoding, expected',
itertools.chain(((d[0], d[2], d[1] if PY3 else d[0]) for d in VALID_STRINGS),
((d[1], d[2], d[1] if PY3 else d[0]) for d in VALID_STRINGS)))
def test_to_native(in_string, encoding, expected):
"""test happy path of encoding to native strings"""
assert to_native(in_string, encoding) == expected
| gpl-3.0 |
benoit-pierre/mcomix | mcomix/thumbnail_view.py | 1 | 5206 | """ gtk.IconView subclass for dynamically generated thumbnails. """
import Queue
import gtk
import gobject
from mcomix.preferences import prefs
from mcomix.worker_thread import WorkerThread
class ThumbnailViewBase(object):
""" This class provides shared functionality for gtk.TreeView and
gtk.IconView. Instantiating this class directly is *impossible*,
as it depends on methods provided by the view classes. """
def __init__(self, uid_column, pixbuf_column, status_column):
""" Constructs a new ThumbnailView.
@param uid_column: index of unique identifer column.
@param pixbuf_column: index of pixbuf column.
@param status_column: index of status boolean column
(True if pixbuf is not temporary filler)
"""
#: Keep track of already generated thumbnails.
self._uid_column = uid_column
self._pixbuf_column = pixbuf_column
self._status_column = status_column
#: Ignore updates when this flag is True.
self._updates_stopped = True
#: Worker thread
self._thread = WorkerThread(self._pixbuf_worker,
name='thumbview',
unique_orders=True,
max_threads=prefs["max threads"])
def generate_thumbnail(self, uid):
""" This function must return the thumbnail for C{uid}. """
raise NotImplementedError()
def get_visible_range(self):
""" See L{gtk.IconView.get_visible_range}. """
raise NotImplementedError()
def stop_update(self):
""" Stops generation of pixbufs. """
self._updates_stopped = True
self._thread.stop()
def draw_thumbnails_on_screen(self, *args):
""" Prepares valid thumbnails for currently displayed icons.
This method is supposed to be called from the expose-event
callback function. """
visible = self.get_visible_range()
if not visible:
# No valid paths available
return
pixbufs_needed = []
start = visible[0][0]
end = visible[1][0]
# Read ahead/back and start caching a few more icons. Currently invisible
# icons are always cached only after the visible icons have been completed.
additional = (end - start) // 2
required = range(start, end + additional + 1) + \
range(max(0, start - additional), start)
model = self.get_model()
# Filter invalid paths.
required = [path for path in required if 0 <= path < len(model)]
with self._thread:
# Flush current pixmap generation orders.
self._thread.clear_orders()
for path in required:
iter = model.get_iter(path)
uid, generated = model.get(iter,
self._uid_column,
self._status_column)
# Do not queue again if thumbnail was already created.
if not generated:
pixbufs_needed.append((uid, iter))
if len(pixbufs_needed) > 0:
self._updates_stopped = False
self._thread.extend_orders(pixbufs_needed)
def _pixbuf_worker(self, order):
""" Run by a worker thread to generate the thumbnail for a path."""
uid, iter = order
pixbuf = self.generate_thumbnail(uid)
if pixbuf is not None:
gobject.idle_add(self._pixbuf_finished, iter, pixbuf)
def _pixbuf_finished(self, iter, pixbuf):
""" Executed when a pixbuf was created, to actually insert the pixbuf
into the view store. C{pixbuf_info} is a tuple containing
(index, pixbuf). """
if self._updates_stopped:
return 0
model = self.get_model()
model.set(iter, self._status_column, True, self._pixbuf_column, pixbuf)
# Remove this idle handler.
return 0
class ThumbnailIconView(gtk.IconView, ThumbnailViewBase):
def __init__(self, model, uid_column, pixbuf_column, status_column):
assert gtk.TREE_MODEL_ITERS_PERSIST == (model.get_flags() & gtk.TREE_MODEL_ITERS_PERSIST)
super(ThumbnailIconView, self).__init__(model)
ThumbnailViewBase.__init__(self, uid_column, pixbuf_column, status_column)
self.set_pixbuf_column(pixbuf_column)
# Connect events
self.connect('expose-event', self.draw_thumbnails_on_screen)
def get_visible_range(self):
return gtk.IconView.get_visible_range(self)
class ThumbnailTreeView(gtk.TreeView, ThumbnailViewBase):
def __init__(self, model, uid_column, pixbuf_column, status_column):
assert gtk.TREE_MODEL_ITERS_PERSIST == (model.get_flags() & gtk.TREE_MODEL_ITERS_PERSIST)
super(ThumbnailTreeView, self).__init__(model)
ThumbnailViewBase.__init__(self, uid_column, pixbuf_column, status_column)
# Connect events
self.connect('expose-event', self.draw_thumbnails_on_screen)
def get_visible_range(self):
return gtk.TreeView.get_visible_range(self)
# vim: expandtab:sw=4:ts=4
| gpl-2.0 |
gmarkall/numba | numba/core/postproc.py | 5 | 8525 | from numba.core import utils, ir, analysis, transforms, ir_utils
class YieldPoint(object):
def __init__(self, block, inst):
assert isinstance(block, ir.Block)
assert isinstance(inst, ir.Yield)
self.block = block
self.inst = inst
self.live_vars = None
self.weak_live_vars = None
class GeneratorInfo(object):
def __init__(self):
# { index: YieldPoint }
self.yield_points = {}
# Ordered list of variable names
self.state_vars = []
def get_yield_points(self):
"""
Return an iterable of YieldPoint instances.
"""
return self.yield_points.values()
class VariableLifetime(object):
"""
For lazily building information of variable lifetime
"""
def __init__(self, blocks):
self._blocks = blocks
@utils.cached_property
def cfg(self):
return analysis.compute_cfg_from_blocks(self._blocks)
@utils.cached_property
def usedefs(self):
return analysis.compute_use_defs(self._blocks)
@utils.cached_property
def livemap(self):
return analysis.compute_live_map(self.cfg, self._blocks,
self.usedefs.usemap,
self.usedefs.defmap)
@utils.cached_property
def deadmaps(self):
return analysis.compute_dead_maps(self.cfg, self._blocks, self.livemap,
self.usedefs.defmap)
# other packages that define new nodes add calls for inserting dels
# format: {type:function}
ir_extension_insert_dels = {}
class PostProcessor(object):
"""
A post-processor for Numba IR.
"""
def __init__(self, func_ir):
self.func_ir = func_ir
def run(self, emit_dels=False):
"""
Run the following passes over Numba IR:
- canonicalize the CFG
- emit explicit `del` instructions for variables
- compute lifetime of variables
- compute generator info (if function is a generator function)
"""
self.func_ir.blocks = transforms.canonicalize_cfg(self.func_ir.blocks)
vlt = VariableLifetime(self.func_ir.blocks)
self.func_ir.variable_lifetime = vlt
bev = analysis.compute_live_variables(vlt.cfg, self.func_ir.blocks,
vlt.usedefs.defmap,
vlt.deadmaps.combined)
for offset, ir_block in self.func_ir.blocks.items():
self.func_ir.block_entry_vars[ir_block] = bev[offset]
if self.func_ir.is_generator:
self.func_ir.generator_info = GeneratorInfo()
self._compute_generator_info()
else:
self.func_ir.generator_info = None
# Emit del nodes, do this last as the generator info parsing generates
# and then strips dels as part of its analysis.
if emit_dels:
self._insert_var_dels()
def _populate_generator_info(self):
"""
Fill `index` for the Yield instruction and create YieldPoints.
"""
dct = self.func_ir.generator_info.yield_points
assert not dct, 'rerunning _populate_generator_info'
for block in self.func_ir.blocks.values():
for inst in block.body:
if isinstance(inst, ir.Assign):
yieldinst = inst.value
if isinstance(yieldinst, ir.Yield):
index = len(dct) + 1
yieldinst.index = index
yp = YieldPoint(block, yieldinst)
dct[yieldinst.index] = yp
def _compute_generator_info(self):
"""
Compute the generator's state variables as the union of live variables
at all yield points.
"""
# generate del info, it's used in analysis here, strip it out at the end
self._insert_var_dels()
self._populate_generator_info()
gi = self.func_ir.generator_info
for yp in gi.get_yield_points():
live_vars = set(self.func_ir.get_block_entry_vars(yp.block))
weak_live_vars = set()
stmts = iter(yp.block.body)
for stmt in stmts:
if isinstance(stmt, ir.Assign):
if stmt.value is yp.inst:
break
live_vars.add(stmt.target.name)
elif isinstance(stmt, ir.Del):
live_vars.remove(stmt.value)
else:
assert 0, "couldn't find yield point"
# Try to optimize out any live vars that are deleted immediately
# after the yield point.
for stmt in stmts:
if isinstance(stmt, ir.Del):
name = stmt.value
if name in live_vars:
live_vars.remove(name)
weak_live_vars.add(name)
else:
break
yp.live_vars = live_vars
yp.weak_live_vars = weak_live_vars
st = set()
for yp in gi.get_yield_points():
st |= yp.live_vars
st |= yp.weak_live_vars
gi.state_vars = sorted(st)
self.remove_dels()
def _insert_var_dels(self):
"""
Insert del statements for each variable.
Returns a 2-tuple of (variable definition map, variable deletion map)
which indicates variables defined and deleted in each block.
The algorithm avoids relying on explicit knowledge on loops and
distinguish between variables that are defined locally vs variables that
come from incoming blocks.
We start with simple usage (variable reference) and definition (variable
creation) maps on each block. Propagate the liveness info to predecessor
blocks until it stabilize, at which point we know which variables must
exist before entering each block. Then, we compute the end of variable
lives and insert del statements accordingly. Variables are deleted after
the last use. Variable referenced by terminators (e.g. conditional
branch and return) are deleted by the successors or the caller.
"""
vlt = self.func_ir.variable_lifetime
self._patch_var_dels(vlt.deadmaps.internal, vlt.deadmaps.escaping)
def _patch_var_dels(self, internal_dead_map, escaping_dead_map):
"""
Insert delete in each block
"""
for offset, ir_block in self.func_ir.blocks.items():
# for each internal var, insert delete after the last use
internal_dead_set = internal_dead_map[offset].copy()
delete_pts = []
# for each statement in reverse order
for stmt in reversed(ir_block.body[:-1]):
# internal vars that are used here
live_set = set(v.name for v in stmt.list_vars())
dead_set = live_set & internal_dead_set
for T, def_func in ir_extension_insert_dels.items():
if isinstance(stmt, T):
done_dels = def_func(stmt, dead_set)
dead_set -= done_dels
internal_dead_set -= done_dels
# used here but not afterwards
delete_pts.append((stmt, dead_set))
internal_dead_set -= dead_set
# rewrite body and insert dels
body = []
lastloc = ir_block.loc
for stmt, delete_set in reversed(delete_pts):
lastloc = stmt.loc
# Ignore dels (assuming no user inserted deletes)
if not isinstance(stmt, ir.Del):
body.append(stmt)
# note: the reverse sort is not necessary for correctness
# it is just to minimize changes to test for now
for var_name in sorted(delete_set, reverse=True):
body.append(ir.Del(var_name, loc=lastloc))
body.append(ir_block.body[-1]) # terminator
ir_block.body = body
# vars to delete at the start
escape_dead_set = escaping_dead_map[offset]
for var_name in sorted(escape_dead_set):
ir_block.prepend(ir.Del(var_name, loc=ir_block.body[0].loc))
def remove_dels(self):
"""
Strips the IR of Del nodes
"""
ir_utils.remove_dels(self.func_ir.blocks)
| bsd-2-clause |
jocke-l/blues | blues/application/providers/supervisor.py | 2 | 2610 | import os
from fabric.context_managers import settings
from fabric.state import env
from .base import BaseProvider
from ..project import *
from ... import debian
from ... import supervisor
from ...app import blueprint
class SupervisorProvider(BaseProvider):
def install(self):
"""
Install system wide Supervisor and upstart service.
"""
supervisor.setup()
def get_config_path(self):
"""
Get or create Supervisor project programs home dir.
:return: Remote config path
"""
# Join config path and make sure that it ends with a slash
destination = os.path.join(project_home(), 'supervisor.d', '')
debian.mkdir(destination)
return destination
def configure_web(self):
"""
TODO: Render and upload web program to projects Supervisor home dir.
:return: Updated programs
"""
raise NotImplementedError('Supervisor provider not yet implements web workers.')
def configure_worker(self):
"""
Render and upload worker program(s) to projects Supervisor home dir.
:return: Updated programs
"""
destination = self.get_config_path()
context = super(SupervisorProvider, self).get_context()
context.update({
'workers': blueprint.get('worker.workers', debian.nproc()),
})
# Override context defaults with blueprint settings
context.update(blueprint.get('worker'))
# Filter program extensions by host
programs = ['celery.conf']
extensions = blueprint.get('worker.celery.extensions')
if isinstance(extensions, list):
# Filter of bad values
extensions = [extension for extension in extensions if extension]
for extension in extensions:
programs.append('{}.conf'.format(extension))
elif isinstance(extensions, dict):
for extension, extension_host in extensions.items():
if extension_host in ('*', env.host_string):
programs.append('{}.conf'.format(extension))
# Upload programs
for program in programs:
template = os.path.join('supervisor', 'default', program)
default_templates = supervisor.blueprint.get_default_template_root()
with settings(template_dirs=[default_templates]):
uploads = blueprint.upload(template, destination, context=context)
self.updates.extend(uploads)
return self.updates
def reload(self):
supervisor.reload()
| mit |
usmcamgrimm/kernel_lge_g3 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py | 11088 | 3246 | # Core.py - Python extension for perf script, core functions
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
from collections import defaultdict
def autodict():
return defaultdict(autodict)
flag_fields = autodict()
symbolic_fields = autodict()
def define_flag_field(event_name, field_name, delim):
flag_fields[event_name][field_name]['delim'] = delim
def define_flag_value(event_name, field_name, value, field_str):
flag_fields[event_name][field_name]['values'][value] = field_str
def define_symbolic_field(event_name, field_name):
# nothing to do, really
pass
def define_symbolic_value(event_name, field_name, value, field_str):
symbolic_fields[event_name][field_name]['values'][value] = field_str
def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
print_delim = 0
keys = flag_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
if idx and (value & idx) == idx:
if print_delim and flag_fields[event_name][field_name]['delim']:
string += " " + flag_fields[event_name][field_name]['delim'] + " "
string += flag_fields[event_name][field_name]['values'][idx]
print_delim = 1
value &= ~idx
return string
def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
keys = symbolic_fields[event_name][field_name]['values'].keys()
keys.sort()
for idx in keys:
if not value and not idx:
string = symbolic_fields[event_name][field_name]['values'][idx]
break
if (value == idx):
string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
trace_flags = { 0x00: "NONE", \
0x01: "IRQS_OFF", \
0x02: "IRQS_NOSUPPORT", \
0x04: "NEED_RESCHED", \
0x08: "HARDIRQ", \
0x10: "SOFTIRQ" }
def trace_flag_str(value):
string = ""
print_delim = 0
keys = trace_flags.keys()
for idx in keys:
if not value and not idx:
string += "NONE"
break
if idx and (value & idx) == idx:
if print_delim:
string += " | ";
string += trace_flags[idx]
print_delim = 1
value &= ~idx
return string
def taskState(state):
states = {
0 : "R",
1 : "S",
2 : "D",
64: "DEAD"
}
if state not in states:
return "Unknown"
return states[state]
class EventHeaders:
def __init__(self, common_cpu, common_secs, common_nsecs,
common_pid, common_comm):
self.cpu = common_cpu
self.secs = common_secs
self.nsecs = common_nsecs
self.pid = common_pid
self.comm = common_comm
def ts(self):
return (self.secs * (10 ** 9)) + self.nsecs
def ts_format(self):
return "%d.%d" % (self.secs, int(self.nsecs / 1000))
| gpl-2.0 |
rob356/SickRage | lib/pyasn1/type/constraint.py | 382 | 7279 | #
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
| gpl-3.0 |
tchernomax/ansible | lib/ansible/modules/cloud/vmware/vmware_host_firewall_manager.py | 10 | 8047 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: vmware_host_firewall_manager
short_description: Manage firewall configurations about an ESXi host
description:
- This module can be used to manage firewall configurations about an ESXi host when ESXi hostname or Cluster name is given.
version_added: '2.5'
author:
- Abhijeet Kasurde (@Akasurde)
notes:
- Tested on vSphere 6.5
requirements:
- python >= 2.6
- PyVmomi
options:
cluster_name:
description:
- Name of the cluster.
- Firewall settings are applied to every ESXi host system in given cluster.
- If C(esxi_hostname) is not given, this parameter is required.
esxi_hostname:
description:
- ESXi hostname.
- Firewall settings are applied to this ESXi host system.
- If C(cluster_name) is not given, this parameter is required.
rules:
description:
- A list of Rule set which needs to be managed.
- Each member of list is rule set name and state to be set the rule.
- Both rule name and rule state are required parameters.
- Please see examples for more information.
default: []
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = r'''
- name: Enable vvold rule set for all ESXi Host in given Cluster
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
cluster_name: cluster_name
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Enable vvold rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
delegate_to: localhost
- name: Manage multiple rule set for an ESXi Host
vmware_host_firewall_manager:
hostname: '{{ vcenter_hostname }}'
username: '{{ vcenter_username }}'
password: '{{ vcenter_password }}'
esxi_hostname: '{{ esxi_hostname }}'
rules:
- name: vvold
enabled: True
- name: CIMHttpServer
enabled: False
delegate_to: localhost
'''
RETURN = r'''
rule_set_state:
description:
- dict with hostname as key and dict with firewall rule set facts as value
returned: success
type: dict
sample: {
"rule_set_state": {
"localhost.localdomain": {
"CIMHttpServer": {
"current_state": true,
"desired_state": true,
"previous_state": true
},
"vvold": {
"current_state": true,
"desired_state": true,
"previous_state": true
}
}
}
}
'''
try:
from pyVmomi import vim
except ImportError:
pass
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vmware import vmware_argument_spec, PyVmomi
from ansible.module_utils._text import to_native
class VmwareFirewallManager(PyVmomi):
def __init__(self, module):
super(VmwareFirewallManager, self).__init__(module)
cluster_name = self.params.get('cluster_name', None)
esxi_host_name = self.params.get('esxi_hostname', None)
self.options = self.params.get('options', dict())
self.hosts = self.get_all_host_objs(cluster_name=cluster_name, esxi_host_name=esxi_host_name)
self.firewall_facts = dict()
self.rule_options = self.module.params.get("rules")
self.gather_rule_set()
def gather_rule_set(self):
for host in self.hosts:
self.firewall_facts[host.name] = {}
firewall_system = host.configManager.firewallSystem
if firewall_system:
for rule_set_obj in firewall_system.firewallInfo.ruleset:
temp_rule_dict = dict()
temp_rule_dict['enabled'] = rule_set_obj.enabled
self.firewall_facts[host.name][rule_set_obj.key] = temp_rule_dict
def ensure(self):
"""
Function to ensure rule set configuration
"""
fw_change_list = []
results = dict(changed=False, rule_set_state=dict())
for host in self.hosts:
firewall_system = host.configManager.firewallSystem
if firewall_system is None:
continue
results['rule_set_state'][host.name] = dict()
for rule_option in self.rule_options:
rule_name = rule_option.get('name', None)
if rule_name is None:
self.module.fail_json(msg="Please specify rule.name for rule set"
" as it is required parameter.")
if rule_name not in self.firewall_facts[host.name]:
self.module.fail_json(msg="rule named '%s' wasn't found." % rule_name)
rule_enabled = rule_option.get('enabled', None)
if rule_enabled is None:
self.module.fail_json(msg="Please specify rules.enabled for rule set"
" %s as it is required parameter." % rule_name)
current_rule_state = self.firewall_facts[host.name][rule_name]['enabled']
if current_rule_state != rule_enabled:
try:
if rule_enabled:
firewall_system.EnableRuleset(id=rule_name)
else:
firewall_system.DisableRuleset(id=rule_name)
fw_change_list.append(True)
except vim.fault.NotFound as not_found:
self.module.fail_json(msg="Failed to enable rule set %s as"
" rule set id is unknown : %s" % (rule_name,
to_native(not_found.msg)))
except vim.fault.HostConfigFault as host_config_fault:
self.module.fail_json(msg="Failed to enabled rule set %s as an internal"
" error happened while reconfiguring"
" rule set : %s" % (rule_name,
to_native(host_config_fault.msg)))
results['rule_set_state'][host.name][rule_name] = dict(current_state=rule_enabled,
previous_state=current_rule_state,
desired_state=rule_enabled,
)
if any(fw_change_list):
results['changed'] = True
self.module.exit_json(**results)
def main():
argument_spec = vmware_argument_spec()
argument_spec.update(
cluster_name=dict(type='str', required=False),
esxi_hostname=dict(type='str', required=False),
rules=dict(type='list', default=list(), required=False),
)
module = AnsibleModule(
argument_spec=argument_spec,
required_one_of=[
['cluster_name', 'esxi_hostname'],
]
)
vmware_firewall_manager = VmwareFirewallManager(module)
vmware_firewall_manager.ensure()
if __name__ == "__main__":
main()
| gpl-3.0 |
Deepakkothandan/ansible | test/units/playbook/test_play_context.py | 65 | 7605 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2017 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import pytest
from ansible import constants as C
from ansible.cli import CLI
from ansible.compat.tests import unittest
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.six.moves import shlex_quote
from ansible.playbook.play_context import PlayContext
from units.mock.loader import DictDataLoader
@pytest.fixture
def parser():
parser = CLI.base_parser(runas_opts=True, meta_opts=True,
runtask_opts=True, vault_opts=True,
async_opts=True, connect_opts=True,
subset_opts=True, check_opts=True,
inventory_opts=True,)
return parser
def test_play_context(mocker, parser):
(options, args) = parser.parse_args(['-vv', '--check'])
play_context = PlayContext(options=options)
assert play_context._attributes['connection'] == C.DEFAULT_TRANSPORT
assert play_context.remote_addr is None
assert play_context.remote_user is None
assert play_context.password == ''
assert play_context.port is None
assert play_context.private_key_file == C.DEFAULT_PRIVATE_KEY_FILE
assert play_context.timeout == C.DEFAULT_TIMEOUT
assert play_context.shell is None
assert play_context.verbosity == 2
assert play_context.check_mode is True
assert play_context.no_log is None
mock_play = mocker.MagicMock()
mock_play.connection = 'mock'
mock_play.remote_user = 'mock'
mock_play.port = 1234
mock_play.become = True
mock_play.become_method = 'mock'
mock_play.become_user = 'mockroot'
mock_play.no_log = True
play_context = PlayContext(play=mock_play, options=options)
assert play_context.connection == 'mock'
assert play_context.remote_user == 'mock'
assert play_context.password == ''
assert play_context.port == 1234
assert play_context.become is True
assert play_context.become_method == "mock"
assert play_context.become_user == "mockroot"
mock_task = mocker.MagicMock()
mock_task.connection = 'mocktask'
mock_task.remote_user = 'mocktask'
mock_task.no_log = mock_play.no_log
mock_task.become = True
mock_task.become_method = 'mocktask'
mock_task.become_user = 'mocktaskroot'
mock_task.become_pass = 'mocktaskpass'
mock_task._local_action = False
mock_task.delegate_to = None
all_vars = dict(
ansible_connection='mock_inventory',
ansible_ssh_port=4321,
)
mock_templar = mocker.MagicMock()
play_context = PlayContext(play=mock_play, options=options)
play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar)
assert play_context.connection == 'mock_inventory'
assert play_context.remote_user == 'mocktask'
assert play_context.port == 4321
assert play_context.no_log is True
assert play_context.become is True
assert play_context.become_method == "mocktask"
assert play_context.become_user == "mocktaskroot"
assert play_context.become_pass == "mocktaskpass"
mock_task.no_log = False
play_context = play_context.set_task_and_variable_override(task=mock_task, variables=all_vars, templar=mock_templar)
assert play_context.no_log is False
def test_play_context_make_become_cmd(parser):
(options, args) = parser.parse_args([])
play_context = PlayContext(options=options)
default_cmd = "/bin/foo"
default_exe = "/bin/bash"
sudo_exe = C.DEFAULT_SUDO_EXE or 'sudo'
sudo_flags = C.DEFAULT_SUDO_FLAGS
su_exe = C.DEFAULT_SU_EXE or 'su'
su_flags = C.DEFAULT_SU_FLAGS or ''
pbrun_exe = 'pbrun'
pbrun_flags = ''
pfexec_exe = 'pfexec'
pfexec_flags = ''
doas_exe = 'doas'
doas_flags = ' -n -u foo '
ksu_exe = 'ksu'
ksu_flags = ''
dzdo_exe = 'dzdo'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
assert cmd == default_cmd
play_context.become = True
play_context.become_user = 'foo'
play_context.become_method = 'sudo'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
assert (cmd == """%s %s -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags, play_context.become_user,
default_exe, play_context.success_key, default_cmd))
play_context.become_pass = 'testpass'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable=default_exe)
assert (cmd == """%s %s -p "%s" -u %s %s -c 'echo %s; %s'""" % (sudo_exe, sudo_flags.replace('-n', ''),
play_context.prompt, play_context.become_user, default_exe,
play_context.success_key, default_cmd))
play_context.become_pass = None
play_context.become_method = 'su'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
assert (cmd == """%s %s -c '%s -c '"'"'echo %s; %s'"'"''""" % (su_exe, play_context.become_user, default_exe,
play_context.success_key, default_cmd))
play_context.become_method = 'pbrun'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
assert cmd == """%s %s -u %s 'echo %s; %s'""" % (pbrun_exe, pbrun_flags, play_context.become_user, play_context.success_key, default_cmd)
play_context.become_method = 'pfexec'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
assert cmd == '''%s %s "'echo %s; %s'"''' % (pfexec_exe, pfexec_flags, play_context.success_key, default_cmd)
play_context.become_method = 'doas'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
assert (cmd == """%s %s echo %s && %s %s env ANSIBLE=true %s""" % (doas_exe, doas_flags, play_context.
success_key, doas_exe, doas_flags, default_cmd))
play_context.become_method = 'ksu'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
assert (cmd == """%s %s %s -e %s -c 'echo %s; %s'""" % (ksu_exe, play_context.become_user, ksu_flags,
default_exe, play_context.success_key, default_cmd))
play_context.become_method = 'bad'
with pytest.raises(AnsibleError):
play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
play_context.become_method = 'dzdo'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
assert cmd == """%s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, play_context.become_user, default_exe, play_context.success_key, default_cmd)
play_context.become_pass = 'testpass'
play_context.become_method = 'dzdo'
cmd = play_context.make_become_cmd(cmd=default_cmd, executable="/bin/bash")
assert (cmd == """%s -p %s -u %s %s -c 'echo %s; %s'""" % (dzdo_exe, shlex_quote(play_context.prompt),
play_context.become_user, default_exe,
play_context.success_key, default_cmd))
| gpl-3.0 |
groschovskiy/personfinder | app/pytz/zoneinfo/Asia/Tashkent.py | 9 | 1530 | '''tzinfo timezone information for Asia/Tashkent.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Tashkent(DstTzInfo):
'''Asia/Tashkent timezone definition. See datetime.tzinfo for details'''
zone = 'Asia/Tashkent'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1924,5,1,19,22,48),
d(1930,6,20,19,0,0),
d(1981,3,31,18,0,0),
d(1981,9,30,17,0,0),
d(1982,3,31,18,0,0),
d(1982,9,30,17,0,0),
d(1983,3,31,18,0,0),
d(1983,9,30,17,0,0),
d(1984,3,31,18,0,0),
d(1984,9,29,20,0,0),
d(1985,3,30,20,0,0),
d(1985,9,28,20,0,0),
d(1986,3,29,20,0,0),
d(1986,9,27,20,0,0),
d(1987,3,28,20,0,0),
d(1987,9,26,20,0,0),
d(1988,3,26,20,0,0),
d(1988,9,24,20,0,0),
d(1989,3,25,20,0,0),
d(1989,9,23,20,0,0),
d(1990,3,24,20,0,0),
d(1990,9,29,20,0,0),
d(1991,3,30,20,0,0),
d(1991,8,31,18,0,0),
d(1991,9,28,21,0,0),
d(1991,12,31,19,0,0),
]
_transition_info = [
i(16620,0,'LMT'),
i(18000,0,'TAST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(25200,3600,'TASST'),
i(21600,0,'TAST'),
i(21600,0,'TASST'),
i(21600,0,'UZST'),
i(18000,0,'UZT'),
i(18000,0,'UZT'),
]
Tashkent = Tashkent()
| apache-2.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/jinja2/defaults.py | 130 | 1323 | # -*- coding: utf-8 -*-
"""
jinja2.defaults
~~~~~~~~~~~~~~~
Jinja default filters and tags.
:copyright: (c) 2017 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
from jinja2._compat import range_type
from jinja2.utils import generate_lorem_ipsum, Cycler, Joiner
# defaults for the parser / lexer
BLOCK_START_STRING = '{%'
BLOCK_END_STRING = '%}'
VARIABLE_START_STRING = '{{'
VARIABLE_END_STRING = '}}'
COMMENT_START_STRING = '{#'
COMMENT_END_STRING = '#}'
LINE_STATEMENT_PREFIX = None
LINE_COMMENT_PREFIX = None
TRIM_BLOCKS = False
LSTRIP_BLOCKS = False
NEWLINE_SEQUENCE = '\n'
KEEP_TRAILING_NEWLINE = False
# default filters, tests and namespace
from jinja2.filters import FILTERS as DEFAULT_FILTERS
from jinja2.tests import TESTS as DEFAULT_TESTS
DEFAULT_NAMESPACE = {
'range': range_type,
'dict': dict,
'lipsum': generate_lorem_ipsum,
'cycler': Cycler,
'joiner': Joiner
}
# default policies
DEFAULT_POLICIES = {
'compiler.ascii_str': True,
'urlize.rel': 'noopener',
'urlize.target': None,
'truncate.leeway': 5,
'json.dumps_function': None,
'json.dumps_kwargs': {'sort_keys': True},
}
# export all constants
__all__ = tuple(x for x in locals().keys() if x.isupper())
| mit |
eharney/nova | nova/pci/pci_whitelist.py | 19 | 3769 | # Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from nova import exception
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.pci import pci_utils
from oslo.config import cfg
pci_opts = [cfg.MultiStrOpt('pci_passthrough_whitelist',
default=[],
help='White list of PCI devices available to VMs. '
'For example: pci_passthrough_whitelist = '
'[{"vendor_id": "8086", "product_id": "0443"}]'
)
]
CONF = cfg.CONF
CONF.register_opts(pci_opts)
LOG = logging.getLogger(__name__)
_PCI_VENDOR_PATTERN = "^(hex{4})$".replace("hex", "[\da-fA-F]")
_WHITELIST_SCHEMA = {
"type": "array",
"items":
{
"type": "object",
"additionalProperties": False,
"properties": {
"product_id": {
"type": "string",
"pattern": _PCI_VENDOR_PATTERN
},
"vendor_id": {
"type": "string",
"pattern": _PCI_VENDOR_PATTERN
},
},
"required": ["product_id", "vendor_id"]
}
}
class PciHostDevicesWhiteList(object):
"""White list class to decide assignable pci devices.
Not all devices on compute node can be assigned to guest, the
cloud administrator decides the devices that can be assigned
based on vendor_id or product_id etc. If no white list specified,
no device will be assignable.
"""
def _parse_white_list_from_config(self, whitelists):
"""Parse and validate the pci whitelist from the nova config."""
specs = []
try:
for jsonspecs in whitelists:
spec = jsonutils.loads(jsonspecs)
jsonschema.validate(spec, _WHITELIST_SCHEMA)
specs.extend(spec)
except Exception as e:
raise exception.PciConfigInvalidWhitelist(reason=str(e))
return specs
def __init__(self, whitelist_spec=None):
"""White list constructor
For example, followed json string specifies that devices whose
vendor_id is '8086' and product_id is '1520' can be assigned
to guest.
'[{"product_id":"1520", "vendor_id":"8086"}]'
:param whitelist_spec: A json string for a list of dictionaries,
each dictionary specifies the pci device
properties requirement.
"""
super(PciHostDevicesWhiteList, self).__init__()
if whitelist_spec:
self.spec = self._parse_white_list_from_config(whitelist_spec)
else:
self.spec = None
def device_assignable(self, dev):
"""Check if a device can be assigned to a guest.
:param dev: A dictionary describing the device properties
"""
if self.spec is None:
return False
return pci_utils.pci_device_prop_match(dev, self.spec)
def get_pci_devices_filter():
return PciHostDevicesWhiteList(CONF.pci_passthrough_whitelist)
| apache-2.0 |
terbolous/SickRage | lib/rtorrent/lib/xmlrpc/http.py | 180 | 1195 | # Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.compat import xmlrpclib
HTTPServerProxy = xmlrpclib.ServerProxy
| gpl-3.0 |
yodalee/servo | tests/wpt/harness/wptrunner/wptmanifest/tests/test_tokenizer.py | 195 | 11355 | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
import os
import unittest
sys.path.insert(0, os.path.abspath(".."))
from cStringIO import StringIO
from .. import parser
from ..parser import token_types
class TokenizerTest(unittest.TestCase):
def setUp(self):
self.tokenizer = parser.Tokenizer()
def tokenize(self, input_str):
rv = []
for item in self.tokenizer.tokenize(StringIO(input_str)):
rv.append(item)
if item[0] == token_types.eof:
break
return rv
def compare(self, input_text, expected):
expected = expected + [(token_types.eof, None)]
actual = self.tokenize(input_text)
self.assertEquals(actual, expected)
def test_heading_0(self):
self.compare("""[Heading text]""",
[(token_types.paren, "["),
(token_types.string, "Heading text"),
(token_types.paren, "]")])
def test_heading_1(self):
self.compare("""[Heading [text\]]""",
[(token_types.paren, "["),
(token_types.string, "Heading [text]"),
(token_types.paren, "]")])
def test_heading_2(self):
self.compare("""[Heading #text]""",
[(token_types.paren, "["),
(token_types.string, "Heading #text"),
(token_types.paren, "]")])
def test_heading_3(self):
self.compare("""[Heading [\]text]""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("[Heading")
def test_heading_5(self):
self.compare("""[Heading [\]text] #comment""",
[(token_types.paren, "["),
(token_types.string, "Heading []text"),
(token_types.paren, "]")])
def test_heading_6(self):
self.compare(r"""[Heading \ttext]""",
[(token_types.paren, "["),
(token_types.string, "Heading \ttext"),
(token_types.paren, "]")])
def test_key_0(self):
self.compare("""key:value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_1(self):
self.compare("""key : value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_2(self):
self.compare("""key : val ue""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "val ue")])
def test_key_3(self):
self.compare("""key: value#comment""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""ke y: value""")
def test_key_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key""")
def test_key_6(self):
self.compare("""key: "value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_7(self):
self.compare("""key: 'value'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_key_8(self):
self.compare("""key: "#value\"""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_9(self):
self.compare("""key: '#value\'""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, "#value")])
def test_key_10(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: "value""")
def test_key_11(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_12(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value""")
def test_key_13(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: 'value' abc""")
def test_key_14(self):
self.compare(r"""key: \\nb""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.string, r"\nb")])
def test_list_0(self):
self.compare(
"""
key: []""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.list_end, "]")])
def test_list_1(self):
self.compare(
"""
key: [a, "b"]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_2(self):
self.compare(
"""
key: [a,
b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_list_3(self):
self.compare(
"""
key: [a, #b]
c]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "c"),
(token_types.list_end, "]")])
def test_list_4(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a #b]
c]""")
def test_list_5(self):
with self.assertRaises(parser.ParseError):
self.tokenize("""key: [a \\
c]""")
def test_list_6(self):
self.compare(
"""key: [a , b]""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.list_start, "["),
(token_types.string, "a"),
(token_types.string, "b"),
(token_types.list_end, "]")])
def test_expr_0(self):
self.compare(
"""
key:
if cond == 1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_1(self):
self.compare(
"""
key:
if cond == 1: value1
value2""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1"),
(token_types.separator, ":"),
(token_types.string, "value1"),
(token_types.string, "value2")])
def test_expr_2(self):
self.compare(
"""
key:
if cond=="1": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.string, "1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_3(self):
self.compare(
"""
key:
if cond==1.1: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_4(self):
self.compare(
"""
key:
if cond==1.1 and cond2 == "a": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.ident, "and"),
(token_types.ident, "cond2"),
(token_types.ident, "=="),
(token_types.string, "a"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_5(self):
self.compare(
"""
key:
if (cond==1.1 ): value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.paren, "("),
(token_types.ident, "cond"),
(token_types.ident, "=="),
(token_types.number, "1.1"),
(token_types.paren, ")"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_6(self):
self.compare(
"""
key:
if "\\ttest": value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.string, "\ttest"),
(token_types.separator, ":"),
(token_types.string, "value")])
def test_expr_7(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1A: value""")
def test_expr_8(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1a: value""")
def test_expr_9(self):
with self.assertRaises(parser.ParseError):
self.tokenize(
"""
key:
if 1.1.1: value""")
def test_expr_10(self):
self.compare(
"""
key:
if 1.: value""",
[(token_types.string, "key"),
(token_types.separator, ":"),
(token_types.group_start, None),
(token_types.ident, "if"),
(token_types.number, "1."),
(token_types.separator, ":"),
(token_types.string, "value")])
if __name__ == "__main__":
unittest.main()
| mpl-2.0 |
dgwakeman/mne-python | examples/inverse/plot_lcmv_beamformer.py | 18 | 2801 | """
======================================
Compute LCMV beamformer on evoked data
======================================
Compute LCMV beamformer solutions on evoked dataset for three different choices
of source orientation and stores the solutions in stc files for visualisation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import numpy as np
import mne
from mne.datasets import sample
from mne.io import Raw
from mne.beamformer import lcmv
print(__doc__)
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_raw-eve.fif'
fname_fwd = data_path + '/MEG/sample/sample_audvis-meg-eeg-oct-6-fwd.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-shrunk-cov.fif'
label_name = 'Aud-lh'
fname_label = data_path + '/MEG/sample/labels/%s.label' % label_name
###############################################################################
# Get epochs
event_id, tmin, tmax = 1, -0.2, 0.5
# Setup for reading the raw data
raw = Raw(raw_fname)
raw.info['bads'] = ['MEG 2443', 'EEG 053'] # 2 bads channels
events = mne.read_events(event_fname)
# Set up pick list: EEG + MEG - bad channels (modify to your needs)
left_temporal_channels = mne.read_selection('Left-temporal')
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads', selection=left_temporal_channels)
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, mag=4e-12, eog=150e-6))
evoked = epochs.average()
forward = mne.read_forward_solution(fname_fwd, surf_ori=True)
# Read regularized noise covariance and compute regularized data covariance
noise_cov = mne.read_cov(fname_cov)
data_cov = mne.compute_covariance(epochs, tmin=0.04, tmax=0.15,
method='shrunk')
plt.close('all')
pick_oris = [None, 'normal', 'max-power']
names = ['free', 'normal', 'max-power']
descriptions = ['Free orientation', 'Normal orientation', 'Max-power '
'orientation']
colors = ['b', 'k', 'r']
for pick_ori, name, desc, color in zip(pick_oris, names, descriptions, colors):
stc = lcmv(evoked, forward, noise_cov, data_cov, reg=0.01,
pick_ori=pick_ori)
# View activation time-series
label = mne.read_label(fname_label)
stc_label = stc.in_label(label)
plt.plot(1e3 * stc_label.times, np.mean(stc_label.data, axis=0), color,
hold=True, label=desc)
plt.xlabel('Time (ms)')
plt.ylabel('LCMV value')
plt.ylim(-0.8, 2.2)
plt.title('LCMV in %s' % label_name)
plt.legend()
plt.show()
| bsd-3-clause |
sri85/selenium | py/selenium/webdriver/support/event_firing_webdriver.py | 71 | 13011 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from .abstract_event_listener import AbstractEventListener
def _wrap_elements(result, ef_driver):
if isinstance(result, WebElement):
return EventFiringWebElement(result, ef_driver)
elif isinstance(result, list):
return [_wrap_elements(item, ef_driver) for item in result]
else:
return result
class EventFiringWebDriver(object):
"""
A wrapper around an arbitrary WebDriver instance which supports firing events
"""
def __init__(self, driver, event_listener):
"""
Creates a new instance of the EventFiringWebDriver
:Args:
- driver : A WebDriver instance
- event_listener : Instance of a class that subclasses AbstractEventListener and implements it fully or partially
Example:
.. code-block:: python
from selenium.webdriver import Firefox
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
class MyListener(AbstractEventListener):
def before_navigate_to(self, url, driver):
print("Before navigate to %s" % url)
def after_navigate_to(self, url, driver):
print("After navigate to %s" % url)
driver = Firefox()
ef_driver = EventFiringWebDriver(driver, MyListener())
ef_driver.get("http://www.google.co.in/")
"""
if not isinstance(driver, WebDriver):
raise WebDriverException("A WebDriver instance must be supplied")
if not isinstance(event_listener, AbstractEventListener):
raise WebDriverException("Event listener must be a subclass of AbstractEventListener")
self._driver = driver
self._driver._wrap_value = self._wrap_value
self._listener = event_listener
@property
def wrapped_driver(self):
"""Returns the WebDriver instance wrapped by this EventsFiringWebDriver"""
return self._driver
def get(self, url):
self._dispatch("navigate_to", (url, self._driver), "get", (url, ))
def back(self):
self._dispatch("navigate_back", (self._driver,), "back", ())
def forward(self):
self._dispatch("navigate_forward", (self._driver,), "forward", ())
def execute_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_script", unwrapped_args)
def execute_async_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_async_script", unwrapped_args)
def close(self):
self._dispatch("close", (self._driver,), "close", ())
def quit(self):
self._dispatch("quit", (self._driver,), "quit", ())
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._driver, d_call)(*d_args)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self)
def _unwrap_element_args(self, args):
if isinstance(args, EventFiringWebElement):
return args.wrapped_element
elif isinstance(args, tuple):
return tuple([self._unwrap_element_args(item) for item in args])
elif isinstance(args, list):
return [self._unwrap_element_args(item) for item in args]
else:
return args
def _wrap_value(self, value):
if isinstance(value, EventFiringWebElement):
return WebDriver._wrap_value(self._driver, value.wrapped_element)
return WebDriver._wrap_value(self._driver, value)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._driver, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._driver, item, value)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args):
try:
result = attrib(*args)
return _wrap_elements(result, self)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
if hasattr(self._driver, name):
try:
attrib = getattr(self._driver, name)
if not callable(attrib):
return attrib
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
return _wrap
raise AttributeError(name)
class EventFiringWebElement(object):
""""
A wrapper around WebElement instance which supports firing events
"""
def __init__(self, webelement, ef_driver):
"""
Creates a new instance of the EventFiringWebElement
"""
self._webelement = webelement
self._ef_driver = ef_driver
self._driver = ef_driver.wrapped_driver
self._listener = ef_driver._listener
@property
def wrapped_element(self):
"""Returns the WebElement wrapped by this EventFiringWebElement instance"""
return self._webelement
def click(self):
self._dispatch("click", (self._webelement, self._driver), "click", ())
def clear(self):
self._dispatch("change_value_of", (self._webelement, self._driver), "clear", ())
def send_keys(self, *value):
self._dispatch("change_value_of", (self._webelement, self._driver), "send_keys", value)
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._webelement, d_call)(*d_args)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self._ef_driver)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._webelement, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._webelement, item, value)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args):
try:
result = attrib(*args)
return _wrap_elements(result, self._ef_driver)
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
if hasattr(self._webelement, name):
try:
attrib = getattr(self._webelement, name)
if not callable(attrib):
return attrib
except Exception as e:
self._listener.on_exception(e, self._driver)
raise e
return _wrap
raise AttributeError(name)
| apache-2.0 |
pferreir/indico | indico/util/passwords.py | 3 | 7141 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import functools
import hashlib
import re
import bcrypt
import requests
from indico.util.i18n import _
class BCryptPassword:
def __init__(self, pwhash):
if pwhash is not None and not isinstance(pwhash, str):
raise TypeError(f'pwhash must be str or None, not {type(pwhash)}')
self.hash = pwhash
def __eq__(self, value):
if not self.hash or not value:
# For security reasons we never consider an empty password/hash valid
return False
if not isinstance(value, str):
raise TypeError(f'password must be str, not {type(value)}')
return bcrypt.checkpw(value.encode(), self.hash.encode())
def __ne__(self, other):
return not (self == other)
def __hash__(self): # pragma: no cover
return hash(self.hash)
def __repr__(self):
return f'<BCryptPassword({self.hash})>'
@staticmethod
def hash(value):
return bcrypt.hashpw(value.encode(), bcrypt.gensalt()).decode()
class SHA256Token:
def __init__(self, pwhash):
if pwhash is not None and not isinstance(pwhash, str):
raise TypeError(f'pwhash must be str or None, not {type(pwhash)}')
self.hash = pwhash
def __eq__(self, value):
if not self.hash or not value:
# For security reasons we never consider an empty password/hash valid
return False
if not isinstance(value, str):
raise TypeError(f'password must be str, not {type(value)}')
return value == self.hash
def __ne__(self, other):
return not (self == other)
def __hash__(self): # pragma: no cover
return hash(self.hash)
def __repr__(self):
return f'<SHA256Token({self.hash})>'
def __str__(self):
raise RuntimeError('Hashed tokens have no string representation')
@staticmethod
def hash(value):
return hashlib.sha256(value.encode()).hexdigest()
class PasswordProperty:
"""Define a hashed password property.
When reading this property, it will return an object which will
let you use the ``==`` operator to compare the password against
a plaintext password. When assigning a value to it, it will be
hashed and stored in :attr:`attr` of the containing object.
:param attr: The attribute of the containing object where the
password hash is stored.
:param backend: The password backend that handles hashing/checking
passwords.
"""
def __init__(self, attr, backend=BCryptPassword):
self.attr = attr
self.backend = backend
def __get__(self, instance, owner):
return self.backend(getattr(instance, self.attr, None)) if instance is not None else self
def __set__(self, instance, value):
if not value:
raise ValueError('Password may not be empty')
setattr(instance, self.attr, self.backend.hash(value))
def __delete__(self, instance):
setattr(instance, self.attr, None)
class TokenProperty(PasswordProperty):
"""Similar to `PasswordProperty` but tailored towards API tokens.
Since tokens are used much more often than passwords, they use
a fast hash algorithm instead of a secure one. This is not a
problem for tokens as they are fully random and much longer
than the typical password or even passphrase.
"""
def __init__(self, attr):
super().__init__(attr, backend=SHA256Token)
def __set__(self, instance, value):
if len(value) < 30:
raise ValueError('Token is too short')
super().__set__(instance, value)
@functools.lru_cache
def _get_pwned_hashes(prefix, timeout=1):
try:
resp = requests.get(f'https://api.pwnedpasswords.com/range/{prefix}', timeout=timeout)
resp.raise_for_status()
except requests.RequestException:
return None
return {x.split(':', 1)[0] for x in resp.text.splitlines() if not x.endswith(':0')}
def check_password_pwned(password, fast=False):
"""Check if a password is in the pwned-passwords list.
:param password: The plaintext password
:param fast: Whether the check should finish quickly, even if that may
indicate not being able to check the password. This should
be used during interactive requests
:return: A bool indicating whether the password has been pwned or not,
or `None` in case checking it was not possible.
"""
timeout = 1 if fast else 3
sha = hashlib.sha1(password.encode()).hexdigest().upper()
hashes = _get_pwned_hashes(sha[:5], timeout)
if hashes is None:
return None
return sha[5:] in hashes
def validate_secure_password(context, password, *, username='', fast=False):
"""Check if a password is considered secure.
A password is considered secure if it:
- is at least 8 characters long
- does not contain the username unless the username is <5 chars and the password is >16 chars long
- does not contain the strings 'indico' (or common variations)
- is not in the pwned password list
:param context: A string indicating the context where the password is used
:param password: The plaintext password
:param username: The corresponding username (may be empty if not applicable)
:param fast: Whether the check should finish quickly, even if that may
indicate not being able to check the password against the list
of pwned passwords. This should be used during interactive requests
where slowdowns are generally frowned upon (e.g. during login).
:return: A string indicating why the password is bad, or `None if it's secure.
"""
from indico.core import signals
from indico.util.signals import values_from_signal
# See https://pages.nist.gov/800-63-3/sp800-63b.html#-511-memorized-secrets for some useful
# guidelines for passwords. Ideally we would also perform a dictionary check, but unless we
# rely on someone installing OS packages with dictionaries we don't have one available, and
# there's a good chance that single dictionary words are already included in the pwned password
# list.
if errors := values_from_signal(signals.check_password_secure.send(context, username=username, password=password),
as_list=True):
return errors[0]
if len(password) < 8:
return _('Passwords must be at least 8 characters long.')
if re.search(r'[i1|]nd[1i|]c[o0]', password.lower()):
return _('Passwords may not contain the word "indico" or variations.')
if len(username) >= 5 and len(password) <= 16 and username.lower() in password.lower():
return _('Passwords may not contain your username.')
if check_password_pwned(password):
return _('This password has been seen in previous data breaches.')
| mit |
Jnrolfe/pychess | lib/pychess/widgets/newGameDialog.py | 20 | 27779 | import os.path
import gettext
import locale
from operator import attrgetter
from itertools import groupby
from gi.repository import GObject
from gi.repository import Gdk
from gi.repository import Gtk
from cairo import ImageSurface
from gi.repository import GtkSource
from gi.repository import GdkPixbuf
from pychess.compat import StringIO
from pychess.Utils.IconLoader import load_icon
from pychess.Utils.GameModel import GameModel
from pychess.Utils.TimeModel import TimeModel
from pychess.Utils.const import *
from pychess.Utils.repr import localReprSign
from pychess.Utils.lutils.LBoard import LBoard
from pychess.System import uistuff
from pychess.System.Log import log
from pychess.System import conf
from pychess.System.glock import glock_connect_after
from pychess.System.prefix import getDataPrefix, isInstalled, addDataPrefix
from pychess.Players.engineNest import discoverer
from pychess.Players.Human import Human
from pychess.widgets import BoardPreview
from pychess.widgets import ionest
from pychess.widgets import ImageMenu
from pychess.Savers import fen, pgn
from pychess.Savers.ChessFile import LoadingError
from pychess.Variants import variants
from pychess.Variants.normal import NormalChess
#===============================================================================
# We init most dialog icons global to make them accessibly to the
# Background.Taskers so they have a similar look.
#===============================================================================
big_time = GdkPixbuf.Pixbuf.new_from_file(addDataPrefix("glade/stock_alarm.svg"))
big_people = load_icon(48, "stock_people", "system-users")
iwheels = load_icon(24, "gtk-execute")
ipeople = load_icon(24, "stock_people", "system-users")
inotebook = load_icon(24, "stock_notebook", "computer")
speople = load_icon(16, "stock_people", "system-users")
snotebook = load_icon(16, "stock_notebook", "computer")
weather_icons = ("clear", "clear-night", "few-clouds", "few-clouds-night", "fog", "overcast", "severe-alert", "showers-scattered", "showers", "storm")
skillToIcon = {}
# Used by TaskerManager. Put here to help synchronization
skillToIconLarge = {}
for i, icon in enumerate(weather_icons, start=1):
skillToIcon[2*i-1] = load_icon(16, "weather-%s" % icon)
skillToIcon[2*i] = load_icon(16, "weather-%s" % icon)
skillToIconLarge[2*i-1] = load_icon(48, "weather-%s" % icon)
skillToIconLarge[2*i] = load_icon(48, "weather-%s" % icon)
playerItems = []
smallPlayerItems = []
analyzerItems = []
def createPlayerUIGlobals (discoverer):
global playerItems
global smallPlayerItems
global analyzerItems
playerItems = []
smallPlayerItems = []
analyzerItems = []
for variantClass in variants.values():
playerItems += [ [(ipeople, _("Human Being"), "")] ]
smallPlayerItems += [ [(speople, _("Human Being"), "")] ]
for engine in discoverer.getEngines():
name = engine["name"]
c = discoverer.getCountry(engine)
path = addDataPrefix("flags/%s.png" % c)
if c and os.path.isfile(path):
flag_icon = GdkPixbuf.Pixbuf.new_from_file(path)
else:
path = addDataPrefix("flags/unknown.png")
flag_icon = GdkPixbuf.Pixbuf.new_from_file(path)
for variant in discoverer.getEngineVariants(engine):
playerItems[variant] += [(flag_icon, name)]
smallPlayerItems[variant] += [(snotebook, name)]
if discoverer.is_analyzer(engine):
analyzerItems.append((flag_icon, name))
discoverer.connect("all_engines_discovered", createPlayerUIGlobals)
#===============================================================================
# GameInitializationMode is the super class of new game dialogs. Dialogs include
# the standard new game dialog, the load file dialog and the enter notation
# dialog.
#===============================================================================
class _GameInitializationMode:
@classmethod
def _ensureReady (cls):
if not hasattr(_GameInitializationMode, "superhasRunInit"):
_GameInitializationMode._init()
_GameInitializationMode.superhasRunInit = True
if not hasattr(cls, "hasRunInit"):
cls._init()
cls.hasRunInit = True
@classmethod
def _init (cls):
cls.widgets = uistuff.GladeWidgets ("newInOut.glade")
uistuff.createCombo(cls.widgets["whitePlayerCombobox"])
uistuff.createCombo(cls.widgets["blackPlayerCombobox"])
cls.widgets["playersIcon"].set_from_pixbuf(big_people)
cls.widgets["timeIcon"].set_from_pixbuf(big_time)
def on_playerCombobox_changed (widget, skillHbox):
skillHbox.props.visible = widget.get_active() > 0
cls.widgets["whitePlayerCombobox"].connect(
"changed", on_playerCombobox_changed, cls.widgets["skillHbox1"])
cls.widgets["blackPlayerCombobox"].connect(
"changed", on_playerCombobox_changed, cls.widgets["skillHbox2"])
cls.widgets["whitePlayerCombobox"].set_active(0)
cls.widgets["blackPlayerCombobox"].set_active(1)
def on_skill_changed (scale, image):
image.set_from_pixbuf(skillToIcon[int(scale.get_value())])
cls.widgets["skillSlider1"].connect("value-changed", on_skill_changed,
cls.widgets["skillIcon1"])
cls.widgets["skillSlider2"].connect("value-changed", on_skill_changed,
cls.widgets["skillIcon2"])
cls.widgets["skillSlider1"].set_value(3)
cls.widgets["skillSlider2"].set_value(3)
cls.__initTimeRadio(_("Blitz"), "ngblitz", cls.widgets["blitzRadio"],
cls.widgets["configImageBlitz"], 5, 0)
cls.__initTimeRadio(_("Rapid"), "ngrapid", cls.widgets["rapidRadio"],
cls.widgets["configImageRapid"], 15, 5)
cls.__initTimeRadio(_("Normal"), "ngnormal", cls.widgets["normalRadio"],
cls.widgets["configImageNormal"], 40, 15)
cls.__initVariantRadio("ngvariant1", cls.widgets["playVariant1Radio"],
cls.widgets["configImageVariant1"],
FISCHERRANDOMCHESS)
cls.__initVariantRadio("ngvariant2", cls.widgets["playVariant2Radio"],
cls.widgets["configImageVariant2"], LOSERSCHESS)
def updateCombos(*args):
if cls.widgets["playNormalRadio"].get_active():
variant = NORMALCHESS
elif cls.widgets["playVariant1Radio"].get_active():
variant = conf.get("ngvariant1", FISCHERRANDOMCHESS)
else:
variant = conf.get("ngvariant2", LOSERSCHESS)
variant1 = conf.get("ngvariant1", FISCHERRANDOMCHESS)
cls.widgets["playVariant1Radio"].set_tooltip_text(variants[variant1].__desc__)
variant2 = conf.get("ngvariant2", LOSERSCHESS)
cls.widgets["playVariant2Radio"].set_tooltip_text(variants[variant2].__desc__)
data = [(item[0], item[1]) for item in playerItems[variant]]
uistuff.updateCombo(cls.widgets["blackPlayerCombobox"], data)
uistuff.updateCombo(cls.widgets["whitePlayerCombobox"], data)
glock_connect_after(discoverer, "all_engines_discovered", updateCombos)
updateCombos(discoverer)
conf.notify_add("ngvariant1", updateCombos)
conf.notify_add("ngvariant2", updateCombos)
cls.widgets["playNormalRadio"].connect("toggled", updateCombos)
cls.widgets["playNormalRadio"].set_tooltip_text(variants[NORMALCHESS].__desc__)
cls.widgets["playVariant1Radio"].connect("toggled", updateCombos)
variant1 = conf.get("ngvariant1", FISCHERRANDOMCHESS)
cls.widgets["playVariant1Radio"].set_tooltip_text(variants[variant1].__desc__)
cls.widgets["playVariant2Radio"].connect("toggled", updateCombos)
variant2 = conf.get("ngvariant2", LOSERSCHESS)
cls.widgets["playVariant2Radio"].set_tooltip_text(variants[variant2].__desc__)
# The "variant" has to come before players, because the engine positions
# in the user comboboxes can be different in different variants
for key in ("whitePlayerCombobox", "blackPlayerCombobox",
"skillSlider1", "skillSlider2",
"notimeRadio", "blitzRadio", "rapidRadio", "normalRadio",
"playNormalRadio", "playVariant1Radio", "playVariant2Radio"):
uistuff.keep(cls.widgets[key], key)
# We don't want the dialog to deallocate when closed. Rather we hide
# it on respond
cls.widgets["newgamedialog"].connect("delete_event", lambda *a: True)
@classmethod
def __initTimeRadio (cls, name, id, radiobutton, configImage, defmin, defgain):
#minSpin = Gtk.SpinButton(Gtk.Adjustment(1,1,240,1))
minSpin = Gtk.SpinButton()
minSpin.set_adjustment(Gtk.Adjustment(1,1,240,1))
#gainSpin = Gtk.SpinButton(Gtk.Adjustment(0,-60,60,1))
gainSpin = Gtk.SpinButton()
gainSpin.set_adjustment(Gtk.Adjustment(0,-60,60,1))
cls.widgets["%s min" % id] = minSpin
cls.widgets["%s gain" % id] = gainSpin
uistuff.keep(minSpin, "%s min" % id, first_value=defmin)
uistuff.keep(gainSpin, "%s gain" % id, first_value=defgain)
table = Gtk.Table(2, 2)
table.props.row_spacing = 3
table.props.column_spacing = 12
label = Gtk.Label(label=_("Minutes:"))
label.props.xalign = 0
table.attach(label, 0, 1, 0, 1)
table.attach(minSpin, 1, 2, 0, 1)
label = Gtk.Label(label=_("Gain:"))
label.props.xalign = 0
table.attach(label, 0, 1, 1, 2)
table.attach(gainSpin, 1, 2, 1, 2)
alignment = Gtk.Alignment.new(1,1,1,1)
alignment.set_padding(6,6,12,12)
alignment.add(table)
ImageMenu.switchWithImage(configImage, alignment)
def updateString (spin):
minutes = minSpin.get_value_as_int()
gain = gainSpin.get_value_as_int()
if gain > 0:
radiobutton.set_label(_("%(name)s %(minutes)d min + %(gain)d sec/move") % {
'name': name, 'minutes': minutes, 'gain': gain})
elif gain < 0:
radiobutton.set_label(_("%(name)s %(minutes)d min %(gain)d sec/move") % {
'name': name, 'minutes': minutes, 'gain': gain})
else:
radiobutton.set_label(_("%(name)s %(minutes)d min") % {
'name': name, 'minutes': minutes})
minSpin.connect("value-changed", updateString)
gainSpin.connect("value-changed", updateString)
updateString(None)
@classmethod
def __initVariantRadio (cls, confid, radiobutton, configImage, default):
model = Gtk.TreeStore(str)
treeview = Gtk.TreeView(model)
treeview.set_headers_visible(False)
treeview.append_column(Gtk.TreeViewColumn(None, Gtk.CellRendererText(), text=0))
alignment = Gtk.Alignment.new(1,1,1,1)
alignment.set_padding(6,6,12,12)
alignment.add(treeview)
ImageMenu.switchWithImage(configImage, alignment)
groupNames = {VARIANTS_BLINDFOLD: _("Blindfold"),
VARIANTS_ODDS: _("Odds"),
VARIANTS_SHUFFLE: _("Shuffle"),
VARIANTS_OTHER: _("Other (standard rules)"),
VARIANTS_OTHER_NONSTANDARD: _("Other (non standard rules)"),
}
specialVariants = [v for v in variants.values() if v != NormalChess and
v.board.variant not in UNSUPPORTED]
groups = groupby(specialVariants, attrgetter("variant_group"))
pathToVariant = {}
variantToPath = {}
for i, (id, group) in enumerate(groups):
iter = model.append(None, (groupNames[id],))
for variant in group:
subiter = model.append(iter, (variant.name,))
path = model.get_path(subiter)
pathToVariant[path.to_string()] = variant.board.variant
variantToPath[variant.board.variant] = path.to_string()
treeview.expand_row(Gtk.TreePath(i), True)
selection = treeview.get_selection()
selection.set_mode(Gtk.SelectionMode.BROWSE)
def selfunc (selection, store, path, path_selected, data):
return len(path) > 1
selection.set_select_function(selfunc, None)
selection.select_path(variantToPath[conf.get(confid, default)])
def callback (selection):
model, iter = selection.get_selected()
if iter:
radiobutton.set_label("%s" % model.get(iter, 0) + _(" chess"))
path = model.get_path(iter)
variant = pathToVariant[path.to_string()]
conf.set(confid, variant)
selection.connect("changed", callback)
callback(selection)
@classmethod
def _generalRun (cls, callback, validate):
def onResponse(dialog, res):
if res != Gtk.ResponseType.OK:
cls.widgets["newgamedialog"].hide()
cls.widgets["newgamedialog"].disconnect(handlerId)
return
# Find variant
if cls.widgets["playNormalRadio"].get_active():
variant_index = NORMALCHESS
elif cls.widgets["playVariant1Radio"].get_active():
variant_index = conf.get("ngvariant1", FISCHERRANDOMCHESS)
else:
variant_index = conf.get("ngvariant2", LOSERSCHESS)
variant = variants[variant_index]
# Find time
if cls.widgets["notimeRadio"].get_active():
secs = 0
incr = 0
elif cls.widgets["blitzRadio"].get_active():
secs = cls.widgets["ngblitz min"].get_value_as_int()*60
incr = cls.widgets["ngblitz gain"].get_value_as_int()
elif cls.widgets["rapidRadio"].get_active():
secs = cls.widgets["ngrapid min"].get_value_as_int()*60
incr = cls.widgets["ngrapid gain"].get_value_as_int()
elif cls.widgets["normalRadio"].get_active():
secs = cls.widgets["ngnormal min"].get_value_as_int()*60
incr = cls.widgets["ngnormal gain"].get_value_as_int()
# Find players
player0 = cls.widgets["whitePlayerCombobox"].get_active()
player0 = playerItems[0].index(playerItems[variant_index][player0])
diffi0 = int(cls.widgets["skillSlider1"].get_value())
player1 = cls.widgets["blackPlayerCombobox"].get_active()
player1 = playerItems[0].index(playerItems[variant_index][player1])
diffi1 = int(cls.widgets["skillSlider2"].get_value())
# Prepare players for ionest
playertups = []
for i, playerno, diffi, color in ((0, player0, diffi0, WHITE),
(1, player1, diffi1, BLACK)):
if playerno > 0:
engine = discoverer.getEngineN (playerno-1)
name = discoverer.getName(engine)
playertups.append((ARTIFICIAL, discoverer.initPlayerEngine,
[engine, color, diffi, variant, secs, incr], name))
else:
if not playertups or playertups[0][0] != LOCAL:
name = conf.get("firstName", _("You"))
else: name = conf.get("secondName", _("Guest"))
playertups.append((LOCAL, Human, (color, name), name))
# Set forcePonderOff initPlayerEngine param True in engine-engine games
if playertups[0][0] == ARTIFICIAL and playertups[1][0] == ARTIFICIAL:
playertups[0][2].append(True)
playertups[1][2].append(True)
if secs > 0:
timemodel = TimeModel (secs, incr)
else:
timemodel = TimeModel()
gamemodel = GameModel (timemodel, variant)
if not validate(gamemodel):
return
else:
cls.widgets["newgamedialog"].hide()
cls.widgets["newgamedialog"].disconnect(handlerId)
callback(gamemodel, playertups[0], playertups[1])
handlerId = cls.widgets["newgamedialog"].connect("response", onResponse)
cls.widgets["newgamedialog"].show()
@classmethod
def _hideOthers (cls):
for extension in ("loadsidepanel", "enterGameNotationSidePanel",
"enterGameNotationSidePanel"):
cls.widgets[extension].hide()
################################################################################
# NewGameMode #
################################################################################
class NewGameMode (_GameInitializationMode):
@classmethod
def _init (cls):
# We have to override this, so the GameInitializationMode init method
# isn't called twice
pass
@classmethod
def run (cls):
cls._ensureReady()
if cls.widgets["newgamedialog"].props.visible:
cls.widgets["newgamedialog"].present()
return
def _validate(gamemodel):
return True
cls._hideOthers()
cls.widgets["newgamedialog"].set_title(_("New Game"))
cls._generalRun(ionest.generalStart, _validate)
################################################################################
# LoadFileExtension #
################################################################################
class LoadFileExtension (_GameInitializationMode):
@classmethod
def _init (cls):
opendialog, savedialog, enddir, savecombo, savers = ionest.getOpenAndSaveDialogs()
cls.filechooserbutton = Gtk.FileChooserButton(opendialog)
cls.loadSidePanel = BoardPreview.BoardPreview(cls.widgets,
cls.filechooserbutton, opendialog, enddir)
@classmethod
def run (cls, uri=None):
cls._ensureReady()
if cls.widgets["newgamedialog"].props.visible:
cls.widgets["newgamedialog"].present()
return
if not uri:
res = ionest.opendialog.run()
ionest.opendialog.hide()
if res != Gtk.ResponseType.ACCEPT:
return
else:
if not uri[uri.rfind(".")+1:] in ionest.enddir:
log.info("Ignoring strange file: %s" % uri)
return
cls.loadSidePanel.set_filename(uri)
cls.filechooserbutton.emit("file-activated")
cls._hideOthers()
cls.widgets["newgamedialog"].set_title(_("Open Game"))
cls.widgets["loadsidepanel"].show()
def _validate(gamemodel):
return True
def _callback (gamemodel, p0, p1):
if not cls.loadSidePanel.is_empty():
uri = cls.loadSidePanel.get_filename()
loader = ionest.enddir[uri[uri.rfind(".")+1:]]
position = cls.loadSidePanel.get_position()
gameno = cls.loadSidePanel.get_gameno()
ionest.generalStart(gamemodel, p0, p1, (uri, loader, gameno, position))
else:
ionest.generalStart(gamemodel, p0, p1)
cls._generalRun(_callback, _validate)
################################################################################
# EnterNotationExtension #
################################################################################
class EnterNotationExtension (_GameInitializationMode):
@classmethod
def _init (cls):
def callback (widget, allocation):
cls.widgets["enterGameNotationFrame"].set_size_request(
223, allocation.height-4)
cls.widgets["enterGameNotationSidePanel"].connect_after("size-allocate", callback)
flags = []
if isInstalled():
path = gettext.find("pychess")
else:
path = gettext.find("pychess", localedir=addDataPrefix("lang"))
if path:
loc = locale.getdefaultlocale()[0][-2:].lower()
flags.append(addDataPrefix("flags/%s.png" % loc))
flags.append(addDataPrefix("flags/us.png"))
cls.ib = ImageButton(flags)
cls.widgets["imageButtonDock"].add(cls.ib)
cls.ib.show()
cls.sourcebuffer = GtkSource.Buffer()
sourceview = GtkSource.View.new_with_buffer(cls.sourcebuffer)
sourceview.set_tooltip_text(
_("Type or paste PGN game or FEN positions here"))
cls.widgets["scrolledwindow6"].add(sourceview)
sourceview.show()
# Pgn format does not allow tabulator
sourceview.set_insert_spaces_instead_of_tabs(True)
sourceview.set_wrap_mode(Gtk.WrapMode.WORD)
man = GtkSource.LanguageManager()
# Init new version
if hasattr(man.props, 'search_path'):
path = os.path.join(getDataPrefix(),"gtksourceview-1.0/language-specs")
man.props.search_path = man.props.search_path + [path]
if 'pgn' in man.get_language_ids():
lang = man.get_language('pgn')
cls.sourcebuffer.set_language(lang)
else:
log.warning("Unable to load pgn syntax-highlighting.")
cls.sourcebuffer.set_highlight_syntax(True)
# Init old version
else:
os.environ["XDG_DATA_DIRS"] = getDataPrefix()+":/usr/share/"
man = LanguageManager()
for lang in man.get_available_languages():
if lang.get_name() == "PGN":
cls.sourcebuffer.set_language(lang)
break
else:
log.warning("Unable to load pgn syntax-highlighting.")
cls.sourcebuffer.set_highlight(True)
@classmethod
def run (cls):
cls._ensureReady()
if cls.widgets["newgamedialog"].props.visible:
cls.widgets["newgamedialog"].present()
return
cls._hideOthers()
cls.widgets["newgamedialog"].set_title(_("Enter Game"))
cls.widgets["enterGameNotationSidePanel"].show()
def _get_text():
text = cls.sourcebuffer.get_text(
cls.sourcebuffer.get_start_iter(), cls.sourcebuffer.get_end_iter(), False)
# Test if the ImageButton has two layers and is set on the local language
if len(cls.ib.surfaces) == 2 and cls.ib.current == 0:
# 2 step used to avoid backtranslating
# (local and english piece letters can overlap)
for i, sign in enumerate(localReprSign[1:]):
if sign.strip():
text = text.replace(sign, FAN_PIECES[0][i+1])
for i, sign in enumerate(FAN_PIECES[0][1:7]):
text = text.replace(sign, reprSign[i+1])
text = str(text)
# First we try if it's just a FEN string
parts_no = len(text.split())
if text.strip() == "":
text = FEN_START
loadType = fen
elif parts_no > 0 and text.split()[0].count("/") == 7:
loadType = fen
else:
loadType = pgn
return text, loadType
def _validate(gamemodel):
try:
text, loadType = _get_text()
chessfile = loadType.load(StringIO(text))
chessfile.loadToModel(0, -1, model=gamemodel)
gamemodel.status = WAITING_TO_START
return True
except LoadingError as e:
d = Gtk.MessageDialog (type=Gtk.MessageType.WARNING, buttons=Gtk.ButtonsType.OK,
message_format=e.args[0])
d.format_secondary_text (e.args[1])
d.connect("response", lambda d,a: d.hide())
d.show()
return False
def _callback (gamemodel, p0, p1):
text, loadType = _get_text()
ionest.generalStart(gamemodel, p0, p1, (StringIO(text), loadType, 0, -1))
cls._generalRun(_callback, _validate)
class ImageButton(Gtk.Button):
def __init__ (self, imagePaths):
GObject.GObject.__init__(self)
self.surfaces = [Gtk.Image().new_from_file(path) for path in imagePaths]
self.current = 0
self.image = self.surfaces[self.current]
self.image.show()
self.add(self.image)
self.connect("clicked", self.on_clicked)
def on_clicked(self, button):
self.current = (self.current + 1) % len(self.surfaces)
self.remove(self.image)
self.image = self.surfaces[self.current]
self.image.show()
self.add(self.image)
class xxxImageButton(Gtk.DrawingArea):
def __init__ (self, imagePaths):
GObject.GObject.__init__(self)
self.set_events(Gdk.EventMask.EXPOSURE_MASK | Gdk.EventMask.BUTTON_PRESS_MASK)
self.connect("draw", self.draw)
self.connect("button_press_event", self.buttonPress)
self.surfaces = [ImageSurface.create_from_png(path) for path in imagePaths]
self.current = 0
width, height = self.surfaces[0].get_width(), self.surfaces[0].get_height()
self.size = (0, 0, width, height)
self.set_size_request(width, height)
def draw (self, self_, context):
context.set_source_surface(self.surfaces[self.current], 0, 0)
context.fill()
def buttonPress (self, self_, event):
if event.button == 1 and event.type == Gdk.EventType.BUTTON_PRESS:
self.current = (self.current + 1) % len(self.surfaces)
self.window.invalidate_rect(self.size, True)
self.window.process_updates(True)
def createRematch (gamemodel):
""" If gamemodel contains only LOCAL or ARTIFICIAL players, this starts a
new game, based on the info in gamemodel """
if gamemodel.timed:
secs = gamemodel.timemodel.intervals[0][WHITE]
gain = gamemodel.timemodel.gain
else:
secs = 0
gain = 0
newgamemodel = GameModel(TimeModel(secs, gain), variant=gamemodel.variant)
wp = gamemodel.players[WHITE]
bp = gamemodel.players[BLACK]
if wp.__type__ == LOCAL:
player1tup = (wp.__type__, wp.__class__, (BLACK, repr(wp)), repr(wp))
if bp.__type__ == LOCAL:
player0tup = (bp.__type__, bp.__class__, (WHITE, repr(wp)), repr(bp))
else:
engine = discoverer.getEngineByMd5(bp.md5)
player0tup = (ARTIFICIAL, discoverer.initPlayerEngine,
(engine, WHITE, bp.strength, gamemodel.variant,
secs, gain), repr(bp))
else:
player0tup = (bp.__type__, bp.__class__, (WHITE, repr(bp)), repr(bp))
engine = discoverer.getEngineByMd5(wp.md5)
player1tup = (ARTIFICIAL, discoverer.initPlayerEngine,
(engine, BLACK, wp.strength, gamemodel.variant,
secs, gain), repr(wp))
ionest.generalStart(newgamemodel, player0tup, player1tup)
| gpl-3.0 |
pixelated-project/pixelated-user-agent | service/test/unit/bitmask_libraries/test_provider.py | 2 | 10421 | #
# Copyright (c) 2014 ThoughtWorks, Inc.
#
# Pixelated is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pixelated is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Pixelated. If not, see <http://www.gnu.org/licenses/>.
import json
from mock import patch, MagicMock, ANY
from httmock import all_requests, HTTMock, urlmatch
from requests import HTTPError
from pixelated.bitmask_libraries.provider import LeapProvider
from pixelated.bitmask_libraries.certs import LeapCertificate
from pixelated.config import leap_config
from test_abstract_leap import AbstractLeapTest
import requests
@all_requests
def not_found_mock(url, request):
return {'status_code': 404,
'content': 'foobar'}
@urlmatch(netloc=r'(.*\.)?some-provider\.test$', path='/provider.json')
def provider_json_mock(url, request):
return provider_json_response("SHA256: 06e2300bdbc118c290eda0dc977c24080718f4eeca68c8b0ad431872a2baa22d")
@urlmatch(netloc=r'(.*\.)?some-provider\.test$', path='/provider.json')
def provider_json_invalid_fingerprint_mock(url, request):
return provider_json_response("SHA256: 0123456789012345678901234567890123456789012345678901234567890123")
def provider_json_response(fingerprint):
content = {
"api_uri": "https://api.some-provider.test:4430",
"api_version": "1",
"ca_cert_fingerprint": fingerprint,
"ca_cert_uri": "https://some-provider.test/ca.crt",
"domain": "some-provider.test",
"services": [
"mx"
]
}
return {
"status_code": 200,
"content": json.dumps(content)
}
@urlmatch(netloc=r'api\.some-provider\.test:4430$', path='/1/config/soledad-service.json')
def soledad_json_mock(url, request):
content = {
"some key": "some value",
}
return {
"status_code": 200,
"content": json.dumps(content)
}
@urlmatch(netloc=r'api\.some-provider\.test:4430$', path='/1/config/smtp-service.json')
def smtp_json_mock(url, request):
content = {
"hosts": {
"leap-mx": {
"hostname": "mx.some-provider.test",
"ip_address": "0.0.0.0",
"port": 465
}
},
"locations": {},
"serial": 1,
"version": 1
}
return {
"status_code": 200,
"content": json.dumps(content)
}
@urlmatch(netloc=r'(.*\.)?some-provider\.test$', path='/ca.crt')
def ca_cert_mock(url, request):
return {
"status_code": 200,
"content": ca_crt
}
ca_crt = """
-----BEGIN CERTIFICATE-----
MIIFbzCCA1egAwIBAgIBATANBgkqhkiG9w0BAQ0FADBKMREwDwYDVQQKDAhXYXpv
a2F6aTEaMBgGA1UECwwRaHR0cHM6Ly9kZmkubG9jYWwxGTAXBgNVBAMMEFdhem9r
YXppIFJvb3QgQ0EwHhcNMTQwMzI1MDAwMDAwWhcNMjQwMzI1MDAwMDAwWjBKMREw
DwYDVQQKDAhXYXpva2F6aTEaMBgGA1UECwwRaHR0cHM6Ly9kZmkubG9jYWwxGTAX
BgNVBAMMEFdhem9rYXppIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw
ggIKAoICAQDSPyaslC6SNVsKpGoXllInPXbjiq7rJaV08Xg+64FJU/257BZZEJ/j
r33r0xlt2kj85PcbPySLKy0omXAQt9bs273hwAQXExdY41FxMD3wP/dmLqd55KYa
LDV4GUw0QPZ0QUyWVrRHkrdCDyjpRG+6GbowmtygJKLflYmUFC1PYQ3492esr0jC
+Q6L6+/D2+hBiH3NPI22Yk0kQmuPfnu2pvo+EYQ3It81qZE0Jo8u/BqOMgN2f9DS
GvSNfZcKAP18A41/VRrYFa/WUcdDxt/uP5nO1dm2vfLorje3wcMGtGRcDKG/+GAm
S0nYKKQeWYc6z5SDvPM1VlNdn1gOejhAoggT3Hr5Dq8kxW/lQZbOz+HLbz15qGjz
gL4KHKuDE6hOuqxpHdMTY4WZBBQ8/6ICBxaXH9587/nNDdZiom+XukVD4mrSMJS7
PRr14Hw57433AJDJcZRwZNRRAGgDPNsCoR2caKB6/Uwkp+dWVndj5Ad8MEjyM1yV
+fYU6PSQWNig7qqN5VhNY+zUCcez5gL6volMuW00iOkXISW4lBrcZmEAQTTcWT1D
U7EkLlwITQce63LcuvK7ZWsEm5XCqD+yUz9oQfugmIhxAlTdqt3De9FA0WT9WxGt
zLeswCNKjnMpRgTerq6elwB03EBJVc7k1QRn4+s6C30sXR12dYnEMwIDAQABo2Aw
XjAdBgNVHQ4EFgQU8ItSdI5pSqMDjgRjgYI3Nj0SwxQwDgYDVR0PAQH/BAQDAgIE
MAwGA1UdEwQFMAMBAf8wHwYDVR0jBBgwFoAU8ItSdI5pSqMDjgRjgYI3Nj0SwxQw
DQYJKoZIhvcNAQENBQADggIBALdSPUrIqyIlSMr4R7pWd6Ep0BZH5RztVUcoXtei
x2MFi/rsw7aL9qZqACYIE8Gkkh6Z6GQph0fIqhAlNFvJXKkguL3ri5xh0XmPfbv/
OLIvaUAixATivdm8ro/IqYQWdL3P6mDZOv4O6POdBEJ9JLc9RXUt1LiQ5Xb9QiLs
l/yOthhp5dJHqC8s6CDEUHRe3s9Q/4cwNB4td47I+mkLsNtVNXqi4lOzuQamqiFt
cFIqOLTFtBJ7G3k9iaDuN6RPS6LMRbqabwg4gafQTmJ+roHpnsaiHkfomI4MZOVi
TLQKOAJ3/pRGm5cGzkzQ+z4sUiCSQxtIWs7EnQCCE8agqpef6zArAvKEO+139+f2
u1BhWOm/aHT5a3INnJEbuFr8V9MlbZSxSzU3UH7hby+9PxWKYesc6KUAu6Icooci
gEQqrVhVKmfaYMLL7UZHhw56yv/6B10SSmeAMiJhtTExjjrTRLSCaKCPa2ISAUDB
aPR3t8ZoUESWRAFQGj5NvWOomTaXfyE8Or2WfNemvdlWsKvlLeVsjts+iaTgQRU9
VXcrUhrHhaXhYXeWrWkDDcl8VUlDWXzoUGV9SczOGwr6hONJWMn1HNxNV7ywFWf0
QXH1g3LBW7qNgRaGhbIX4a1WoNQDmbbKaLgKWs74atZ8o4A2aUEjomclgZWPsc5l
VeJ6
-----END CERTIFICATE-----
"""
PROVIDER_API_CERT = '/tmp/ca.crt'
PROVIDER_WEB_CERT = '/tmp/bootstrap-ca.crt'
class LeapProviderTest(AbstractLeapTest):
def setUp(self):
leap_config.leap_home = '/tmp/foobar'
LeapCertificate.set_cert_and_fingerprint(PROVIDER_WEB_CERT, None)
def tearDown(self):
reload(leap_config)
def test_provider_fetches_provider_json(self):
with HTTMock(provider_json_mock, soledad_json_mock):
provider = LeapProvider('some-provider.test')
self.assertEqual("1", provider.api_version)
self.assertEqual("some-provider.test", provider.domain)
self.assertEqual("https://api.some-provider.test:4430", provider.api_uri)
self.assertEqual("https://some-provider.test/ca.crt", provider.ca_cert_uri)
self.assertEqual("SHA256: 06e2300bdbc118c290eda0dc977c24080718f4eeca68c8b0ad431872a2baa22d",
provider.ca_cert_fingerprint)
self.assertEqual(["mx"], provider.services)
def test_provider_json_throws_exception_on_status_code(self):
with HTTMock(not_found_mock):
self.assertRaises(HTTPError, LeapProvider, 'some-provider.test')
def test_fetch_soledad_json(self):
with HTTMock(provider_json_mock, soledad_json_mock, not_found_mock):
provider = LeapProvider('some-provider.test')
soledad = provider.fetch_soledad_json()
self.assertEqual("some value", soledad.get('some key'))
def test_fetch_smtp_json(self):
with HTTMock(provider_json_mock, soledad_json_mock, smtp_json_mock, not_found_mock):
provider = LeapProvider('some-provider.test')
smtp = provider.fetch_smtp_json()
self.assertEqual('mx.some-provider.test', smtp.get('hosts').get('leap-mx').get('hostname'))
def test_throw_exception_for_fetch_smtp_status_code(self):
with HTTMock(provider_json_mock, soledad_json_mock, not_found_mock):
provider = LeapProvider('some-provider.test')
self.assertRaises(HTTPError, provider.fetch_smtp_json)
def test_fetch_valid_certificate(self):
with HTTMock(provider_json_mock, soledad_json_mock, ca_cert_mock, not_found_mock):
provider = LeapProvider('some-provider.test')
provider.fetch_valid_certificate()
def test_throw_exception_for_invalid_certificate(self):
expected_exception_message = 'Certificate fingerprints don\'t match! Expected [0123456789012345678901234567890123456789012345678901234567890123] but got [06e2300bdbc118c290eda0dc977c24080718f4eeca68c8b0ad431872a2baa22d]'
with HTTMock(provider_json_invalid_fingerprint_mock, soledad_json_mock, ca_cert_mock, not_found_mock):
provider = LeapProvider('some-provider.test')
with self.assertRaises(Exception) as cm:
provider.fetch_valid_certificate()
self.assertEqual(expected_exception_message, cm.exception.message)
def test_that_bootstrap_cert_is_used_to_fetch_certificate(self):
session = MagicMock(wraps=requests.session())
session_func = MagicMock(return_value=session)
get_func = MagicMock(wraps=requests.get)
LeapCertificate.LEAP_CERT = PROVIDER_WEB_CERT
with patch('pixelated.bitmask_libraries.provider.requests.session', new=session_func):
with patch('pixelated.bitmask_libraries.provider.requests.get', new=get_func):
with HTTMock(provider_json_mock, ca_cert_mock, soledad_json_mock, not_found_mock):
provider = LeapProvider('some-provider.test')
provider.fetch_valid_certificate()
session.get.assert_any_call('https://some-provider.test/ca.crt', verify=PROVIDER_WEB_CERT, timeout=15)
session.get.assert_any_call('https://some-provider.test/provider.json', verify=PROVIDER_WEB_CERT, timeout=15)
def test_that_provider_cert_is_used_to_fetch_soledad_json(self):
get_func = MagicMock(wraps=requests.get)
with patch('pixelated.bitmask_libraries.provider.requests.get', new=get_func):
with HTTMock(provider_json_mock, soledad_json_mock, not_found_mock):
provider = LeapProvider('some-provider.test')
provider.fetch_soledad_json()
get_func.assert_called_with('https://api.some-provider.test:4430/1/config/soledad-service.json', verify='/tmp/foobar/providers/some-provider.test/keys/client/api.pem', timeout=15)
def test_that_leap_fingerprint_is_validated(self):
session = MagicMock(wraps=requests.session())
session_func = MagicMock(return_value=session)
LeapCertificate.set_cert_and_fingerprint(None, 'some fingerprint')
with patch('pixelated.bitmask_libraries.provider.requests.session', new=session_func):
with HTTMock(provider_json_mock, ca_cert_mock, soledad_json_mock, not_found_mock):
provider = LeapProvider('some-provider.test')
provider.fetch_valid_certificate()
session.get.assert_any_call('https://some-provider.test/ca.crt', verify=False, timeout=15)
session.mount.assert_called_with('https://', ANY)
def test_provider_api_cert(self):
with HTTMock(provider_json_mock):
provider = LeapProvider('some-provider.test')
certs = provider.provider_api_cert
self.assertEqual('/tmp/foobar/providers/some-provider.test/keys/client/api.pem', certs)
| agpl-3.0 |
neteler/QGIS | scripts/random_vector.py | 31 | 3877 | #!/usr/bin/env python
# Generates random shapefile which may be used for benchmarks
import os
import sys
import random
import string
import math
from osgeo import ogr
from optparse import OptionParser
def error ( msg ):
print msg
sys.exit( 1 )
parser = OptionParser("usage: %prog [options] output")
parser.add_option("-t", "--type", dest="type", type="choice", choices=("point", "line", "polygon"), default="point", help="Geometry type")
parser.add_option("-f", "--features", dest="features", type="int", default=1000, help="Number of features")
parser.add_option("-c", "--coordinates", dest="coordinates", type="int", default=10, help="Number of coordinates per feature (lines and polygons)")
parser.add_option("-a", "--attributes", dest="attributes", type="int", default=10, help="Number of attributes")
parser.add_option("-e", "--extent", dest="extent", type="string", default="-180,-90,180,90", help="Extent")
(options, args) = parser.parse_args()
if len(args) != 1:
error( "Output file path missing" )
(minx, miny, maxx, maxy) = map ( float, options.extent.split(",") )
driverName = "ESRI Shapefile"
drv = ogr.GetDriverByName( driverName )
if drv is None:
error ( "%s driver not available.\n" % driverName )
# delete if exists
try:
if os.path.exists( args[0] ):
drv.DeleteDataSource( args[0] )
except:
pass
ds = drv.CreateDataSource( args[0] )
if ds is None:
error( "Creation of output file failed.\n" )
types = { "point": ogr.wkbPoint, "line": ogr.wkbLineString, "polygon": ogr.wkbPolygon }
lyr = ds.CreateLayer( "out", None, types[options.type] )
if lyr is None:
error ( "Layer creation failed.\n" )
attrTypes = ( ogr.OFTString, ogr.OFTInteger, ogr.OFTReal )
stringWidth = 100
for a in range(0,options.attributes):
attrName = "attr%s" % a
field_defn = ogr.FieldDefn( attrName, random.choice( attrTypes ) )
if field_defn.type == ogr.OFTString:
field_defn.SetWidth( stringWidth )
if lyr.CreateField ( field_defn ) != 0:
error ( "Creating Name field failed.\n" )
feat_defn = lyr.GetLayerDefn()
for f in range(options.features):
feat = ogr.Feature( feat_defn )
buffer = (maxx-minx)/100
if options.type == "point":
geo = ogr.Geometry( ogr.wkbPoint )
x = random.uniform( minx, maxx )
y = random.uniform( miny, maxy )
geo.SetPoint_2D(0, x, y)
elif options.type == "line":
geo = ogr.Geometry(ogr.wkbLineString)
xc = random.uniform( minx+buffer, maxx-buffer )
yc = random.uniform( miny+buffer, maxy-buffer )
for c in range(options.coordinates):
a = c * 2 * math.pi / options.coordinates
r = random.uniform( buffer/10, 9*buffer/10 )
x = xc + r * math.sin(a)
y = yc + r * math.cos(a)
geo.SetPoint_2D(c, x, y)
elif options.type == "polygon":
ring = ogr.Geometry(ogr.wkbLinearRing)
xc = random.uniform( minx+buffer, maxx-buffer )
yc = random.uniform( miny+buffer, maxy-buffer )
for c in range(options.coordinates):
a = c * 2 * math.pi / options.coordinates
r = random.uniform( buffer/10, 9*buffer/10 )
x = xc + r * math.sin(a)
y = yc + r * math.cos(a)
ring.SetPoint_2D(c, x, y)
geo = ogr.Geometry(ogr.wkbPolygon)
geo.AddGeometry ( ring )
feat.SetGeometry(geo)
for i in range(feat_defn.GetFieldCount()):
field_defn = feat_defn.GetFieldDefn(i)
val = None
limit = 10000000
if field_defn.GetType() == ogr.OFTString:
nChars = random.randint(0,stringWidth)
val = ''.join(random.choice(string.ascii_letters+ string.digits) for x in range(nChars) )
elif field_defn.GetType() == ogr.OFTInteger:
val = random.randint( -limit, limit )
elif field_defn.GetType() == ogr.OFTReal:
val = random.uniform ( -limit, limit )
feat.SetField( field_defn.name, val )
if lyr.CreateFeature(feat) != 0:
error ( "Failed to create feature in shapefile.\n" )
| gpl-2.0 |
centricular/cerbero | test/test_cerbero_build_build.py | 4 | 3389 | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import unittest
import os
from test.test_common import DummyConfig
from cerbero.build import build
class MakefilesBase(build.MakefilesBase):
srcdir = ''
build_dir = ''
def __init__(self, config):
self.config = config
build.MakefilesBase.__init__(self)
@build.modify_environment
def get_env_var(self, var):
if var not in os.environ:
return None
return os.environ[var]
@build.modify_environment
def get_env_var_nested(self, var):
return self.get_env_var(var)
class ModifyEnvTest(unittest.TestCase):
def setUp(self):
self.var = 'TEST_VAR'
self.val1 = 'test'
self.val2 = 'test2'
self.mk = MakefilesBase(DummyConfig())
def testAppendEnv(self):
os.environ[self.var] = self.val1
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEqual(val, "%s %s" % (self.val1, self.val2))
def testAppendNonExistentEnv(self):
if self.var in os.environ:
del os.environ[self.var]
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEqual(val, ' %s' % self.val2)
def testNewEnv(self):
os.environ[self.var] = self.val1
self.mk.new_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEqual(val, self.val2)
def testAppendAndNewEnv(self):
os.environ[self.var] = ''
self.mk.append_env = {self.var: self.val1}
self.mk.new_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEqual(val, self.val2)
def testSystemLibs(self):
os.environ['PKG_CONFIG_PATH'] = '/path/1'
os.environ['PKG_CONFIG_LIBDIR'] = '/path/2'
self.mk.config.allow_system_libs = True
self.mk.use_system_libs = True
val = self.mk.get_env_var('PKG_CONFIG_PATH')
self.assertEqual(val,'/path/2:/usr/lib/pkgconfig:'
'/usr/share/pkgconfig:/usr/lib/i386-linux-gnu/pkgconfig')
val = self.mk.get_env_var('PKG_CONFIG_LIBDIR')
self.assertEqual(val,'/path/2')
def testNestedModif(self):
os.environ[self.var] = self.val1
self.mk.append_env = {self.var: self.val2}
val = self.mk.get_env_var(self.var)
self.assertEqual(val, "%s %s" % (self.val1, self.val2))
val = self.mk.get_env_var_nested(self.var)
self.assertEqual(val, "%s %s" % (self.val1, self.val2))
| lgpl-2.1 |
qiankunshe/sky_engine | sky/engine/build/scripts/name_macros.py | 14 | 8268 | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os.path
import re
from in_generator import Maker
import in_generator
import license
import name_utilities
HEADER_TEMPLATE = """%(license)s
#ifndef %(namespace)s%(suffix)sHeaders_h
#define %(namespace)s%(suffix)sHeaders_h
%(base_header_for_suffix)s
%(includes)s
#endif // %(namespace)s%(suffix)sHeaders_h
"""
INTERFACES_HEADER_TEMPLATE = """%(license)s
#ifndef %(namespace)s%(suffix)sInterfaces_h
#define %(namespace)s%(suffix)sInterfaces_h
%(base_header_for_suffix)s
%(declare_conditional_macros)s
#define %(macro_style_name)s_INTERFACES_FOR_EACH(macro) \\
\\
%(unconditional_macros)s
\\
%(conditional_macros)s
#endif // %(namespace)s%(suffix)sInterfaces_h
"""
class Writer(in_generator.Writer):
def __init__(self, in_file_path):
super(Writer, self).__init__(in_file_path)
self.namespace = self.in_file.parameters['namespace'].strip('"')
self.suffix = self.in_file.parameters['suffix'].strip('"')
self._entries_by_conditional = {}
self._unconditional_entries = []
self._validate_entries()
self._sort_entries_by_conditional()
self._outputs = {(self.namespace + self.suffix + "Headers.h"): self.generate_headers_header,
(self.namespace + self.suffix + "Interfaces.h"): self.generate_interfaces_header,
}
def _validate_entries(self):
# If there is more than one entry with the same script name, only the first one will ever
# be hit in practice, and so we'll silently ignore any properties requested for the second
# (like RuntimeEnabled - see crbug.com/332588).
entries_by_script_name = dict()
for entry in self.in_file.name_dictionaries:
script_name = name_utilities.script_name(entry)
if script_name in entries_by_script_name:
self._fatal('Multiple entries with script_name=%(script_name)s: %(name1)s %(name2)s' % {
'script_name': script_name,
'name1': entry['name'],
'name2': entries_by_script_name[script_name]['name']})
entries_by_script_name[script_name] = entry
def _fatal(self, message):
print 'FATAL ERROR: ' + message
exit(1)
def _sort_entries_by_conditional(self):
unconditional_names = set()
for entry in self.in_file.name_dictionaries:
conditional = entry['Conditional']
if not conditional:
cpp_name = name_utilities.cpp_name(entry)
if cpp_name in unconditional_names:
continue
unconditional_names.add(cpp_name)
self._unconditional_entries.append(entry)
continue
for entry in self.in_file.name_dictionaries:
cpp_name = name_utilities.cpp_name(entry)
if cpp_name in unconditional_names:
continue
conditional = entry['Conditional']
if not conditional in self._entries_by_conditional:
self._entries_by_conditional[conditional] = []
self._entries_by_conditional[conditional].append(entry)
def _headers_header_include_path(self, entry):
if entry['ImplementedAs']:
path = os.path.dirname(entry['name'])
if len(path):
path += '/'
path += entry['ImplementedAs']
else:
path = entry['name']
return path + '.h'
def _headers_header_includes(self, entries):
includes = dict()
for entry in entries:
cpp_name = name_utilities.cpp_name(entry)
# Avoid duplicate includes.
if cpp_name in includes:
continue
include = '#include "%(path)s"\n' % {
'path': self._headers_header_include_path(entry),
}
includes[cpp_name] = self.wrap_with_condition(include, entry['Conditional'])
return includes.values()
def generate_headers_header(self):
base_header_for_suffix = ''
if self.suffix:
base_header_for_suffix = '\n#include "core/%(namespace)sHeaders.h"\n' % {'namespace': self.namespace}
return HEADER_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'namespace': self.namespace,
'suffix': self.suffix,
'base_header_for_suffix': base_header_for_suffix,
'includes': '\n'.join(self._headers_header_includes(self.in_file.name_dictionaries)),
}
def _declare_one_conditional_macro(self, conditional, entries):
macro_name = '%(macro_style_name)s_INTERFACES_FOR_EACH_%(conditional)s' % {
'macro_style_name': name_utilities.to_macro_style(self.namespace + self.suffix),
'conditional': conditional,
}
return self.wrap_with_condition("""#define %(macro_name)s(macro) \\
%(declarations)s
#else
#define %(macro_name)s(macro)""" % {
'macro_name': macro_name,
'declarations': '\n'.join(sorted(set([
' macro(%(cpp_name)s) \\' % {'cpp_name': name_utilities.cpp_name(entry)}
for entry in entries]))),
}, conditional)
def _declare_conditional_macros(self):
return '\n'.join([
self._declare_one_conditional_macro(conditional, entries)
for conditional, entries in self._entries_by_conditional.items()])
def _unconditional_macro(self, entry):
return ' macro(%(cpp_name)s) \\' % {'cpp_name': name_utilities.cpp_name(entry)}
def _conditional_macros(self, conditional):
return ' %(macro_style_name)s_INTERFACES_FOR_EACH_%(conditional)s(macro) \\' % {
'macro_style_name': name_utilities.to_macro_style(self.namespace + self.suffix),
'conditional': conditional,
}
def generate_interfaces_header(self):
base_header_for_suffix = ''
if self.suffix:
base_header_for_suffix = '\n#include "core/%(namespace)sInterfaces.h"\n' % {'namespace': self.namespace}
return INTERFACES_HEADER_TEMPLATE % {
'license': license.license_for_generated_cpp(),
'namespace': self.namespace,
'suffix': self.suffix,
'base_header_for_suffix': base_header_for_suffix,
'macro_style_name': name_utilities.to_macro_style(self.namespace + self.suffix),
'declare_conditional_macros': self._declare_conditional_macros(),
'unconditional_macros': '\n'.join(sorted(set(map(self._unconditional_macro, self._unconditional_entries)))),
'conditional_macros': '\n'.join(map(self._conditional_macros, self._entries_by_conditional.keys())),
}
| bsd-3-clause |
vmax-feihu/hue | desktop/core/ext-py/pysqlite/doc/includes/sqlite3/text_factory.py | 49 | 1370 | from pysqlite2 import dbapi2 as sqlite3
con = sqlite3.connect(":memory:")
cur = con.cursor()
# Create the table
con.execute("create table person(lastname, firstname)")
AUSTRIA = u"\xd6sterreich"
# by default, rows are returned as Unicode
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
assert row[0] == AUSTRIA
# but we can make pysqlite always return bytestrings ...
con.text_factory = str
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
assert type(row[0]) == str
# the bytestrings will be encoded in UTF-8, unless you stored garbage in the
# database ...
assert row[0] == AUSTRIA.encode("utf-8")
# we can also implement a custom text_factory ...
# here we implement one that will ignore Unicode characters that cannot be
# decoded from UTF-8
con.text_factory = lambda x: unicode(x, "utf-8", "ignore")
cur.execute("select ?", ("this is latin1 and would normally create errors" + u"\xe4\xf6\xfc".encode("latin1"),))
row = cur.fetchone()
assert type(row[0]) == unicode
# pysqlite offers a builtin optimized text_factory that will return bytestring
# objects, if the data is in ASCII only, and otherwise return unicode objects
con.text_factory = sqlite3.OptimizedUnicode
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
assert type(row[0]) == unicode
cur.execute("select ?", ("Germany",))
row = cur.fetchone()
assert type(row[0]) == str
| apache-2.0 |
willingc/oh-mainline | vendor/packages/scrapy/scrapy/tests/test_squeue.py | 24 | 4074 | from scrapy.tests import test_utils_queue as t
from scrapy.squeue import MarshalFifoDiskQueue, MarshalLifoDiskQueue, PickleFifoDiskQueue, PickleLifoDiskQueue
from scrapy.item import Item, Field
from scrapy.http import Request
from scrapy.contrib.loader import ItemLoader
class TestItem(Item):
name = Field()
def test_processor(x):
return x + x
class TestLoader(ItemLoader):
default_item_class = TestItem
name_out = staticmethod(test_processor)
class MarshalFifoDiskQueueTest(t.FifoDiskQueueTest):
chunksize = 100000
def queue(self):
return MarshalFifoDiskQueue(self.qdir, chunksize=self.chunksize)
def test_serialize(self):
q = self.queue()
q.push('a')
q.push(123)
q.push({'a': 'dict'})
self.assertEqual(q.pop(), 'a')
self.assertEqual(q.pop(), 123)
self.assertEqual(q.pop(), {'a': 'dict'})
def test_nonserializable_object(self):
q = self.queue()
self.assertRaises(ValueError, q.push, lambda x: x)
class ChunkSize1MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 1
class ChunkSize2MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 2
class ChunkSize3MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 3
class ChunkSize4MarshalFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 4
class PickleFifoDiskQueueTest(MarshalFifoDiskQueueTest):
chunksize = 100000
def queue(self):
return PickleFifoDiskQueue(self.qdir, chunksize=self.chunksize)
def test_serialize_item(self):
q = self.queue()
i = TestItem(name='foo')
q.push(i)
i2 = q.pop()
assert isinstance(i2, TestItem)
self.assertEqual(i, i2)
def test_serialize_loader(self):
q = self.queue()
l = TestLoader()
q.push(l)
l2 = q.pop()
assert isinstance(l2, TestLoader)
assert l2.default_item_class is TestItem
self.assertEqual(l2.name_out('x'), 'xx')
def test_serialize_request_recursive(self):
q = self.queue()
r = Request('http://www.example.com')
r.meta['request'] = r
q.push(r)
r2 = q.pop()
assert isinstance(r2, Request)
self.assertEqual(r.url, r2.url)
assert r2.meta['request'] is r2
class ChunkSize1PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 1
class ChunkSize2PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 2
class ChunkSize3PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 3
class ChunkSize4PickleFifoDiskQueueTest(PickleFifoDiskQueueTest):
chunksize = 4
class MarshalLifoDiskQueueTest(t.LifoDiskQueueTest):
def queue(self):
return MarshalLifoDiskQueue(self.path)
def test_serialize(self):
q = self.queue()
q.push('a')
q.push(123)
q.push({'a': 'dict'})
self.assertEqual(q.pop(), {'a': 'dict'})
self.assertEqual(q.pop(), 123)
self.assertEqual(q.pop(), 'a')
def test_nonserializable_object(self):
q = self.queue()
self.assertRaises(ValueError, q.push, lambda x: x)
class PickleLifoDiskQueueTest(MarshalLifoDiskQueueTest):
def queue(self):
return PickleLifoDiskQueue(self.path)
def test_serialize_item(self):
q = self.queue()
i = TestItem(name='foo')
q.push(i)
i2 = q.pop()
assert isinstance(i2, TestItem)
self.assertEqual(i, i2)
def test_serialize_loader(self):
q = self.queue()
l = TestLoader()
q.push(l)
l2 = q.pop()
assert isinstance(l2, TestLoader)
assert l2.default_item_class is TestItem
self.assertEqual(l2.name_out('x'), 'xx')
def test_serialize_request_recursive(self):
q = self.queue()
r = Request('http://www.example.com')
r.meta['request'] = r
q.push(r)
r2 = q.pop()
assert isinstance(r2, Request)
self.assertEqual(r.url, r2.url)
assert r2.meta['request'] is r2
| agpl-3.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-0.96/django/db/__init__.py | 32 | 2258 | from django.conf import settings
from django.core import signals
from django.dispatch import dispatcher
__all__ = ('backend', 'connection', 'DatabaseError')
if not settings.DATABASE_ENGINE:
settings.DATABASE_ENGINE = 'dummy'
try:
backend = __import__('django.db.backends.%s.base' % settings.DATABASE_ENGINE, {}, {}, [''])
except ImportError, e:
# The database backend wasn't found. Display a helpful error message
# listing all possible database backends.
from django.core.exceptions import ImproperlyConfigured
import os
backend_dir = os.path.join(__path__[0], 'backends')
available_backends = [f for f in os.listdir(backend_dir) if not f.startswith('_') and not f.startswith('.') and not f.endswith('.py') and not f.endswith('.pyc')]
available_backends.sort()
if settings.DATABASE_ENGINE not in available_backends:
raise ImproperlyConfigured, "%r isn't an available database backend. Available options are: %s" % \
(settings.DATABASE_ENGINE, ", ".join(map(repr, available_backends)))
else:
raise # If there's some other error, this must be an error in Django itself.
get_introspection_module = lambda: __import__('django.db.backends.%s.introspection' % settings.DATABASE_ENGINE, {}, {}, [''])
get_creation_module = lambda: __import__('django.db.backends.%s.creation' % settings.DATABASE_ENGINE, {}, {}, [''])
runshell = lambda: __import__('django.db.backends.%s.client' % settings.DATABASE_ENGINE, {}, {}, ['']).runshell()
connection = backend.DatabaseWrapper(**settings.DATABASE_OPTIONS)
DatabaseError = backend.DatabaseError
# Register an event that closes the database connection
# when a Django request is finished.
dispatcher.connect(connection.close, signal=signals.request_finished)
# Register an event that resets connection.queries
# when a Django request is started.
def reset_queries():
connection.queries = []
dispatcher.connect(reset_queries, signal=signals.request_started)
# Register an event that rolls back the connection
# when a Django request has an exception.
def _rollback_on_exception():
from django.db import transaction
transaction.rollback_unless_managed()
dispatcher.connect(_rollback_on_exception, signal=signals.got_request_exception)
| bsd-3-clause |
CivicTechTO/open-cabinet | venv/lib/python2.7/site-packages/django/test/signals.py | 240 | 5928 | import os
import threading
import time
import warnings
from django.core.signals import setting_changed
from django.db import connections, router
from django.db.utils import ConnectionRouter
from django.dispatch import Signal, receiver
from django.utils import timezone
from django.utils.functional import empty
template_rendered = Signal(providing_args=["template", "context"])
# Most setting_changed receivers are supposed to be added below,
# except for cases where the receiver is related to a contrib app.
# Settings that may not work well when using 'override_settings' (#19031)
COMPLEX_OVERRIDE_SETTINGS = {'DATABASES'}
@receiver(setting_changed)
def clear_cache_handlers(**kwargs):
if kwargs['setting'] == 'CACHES':
from django.core.cache import caches
caches._caches = threading.local()
@receiver(setting_changed)
def update_installed_apps(**kwargs):
if kwargs['setting'] == 'INSTALLED_APPS':
# Rebuild any AppDirectoriesFinder instance.
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
# Rebuild management commands cache
from django.core.management import get_commands
get_commands.cache_clear()
# Rebuild get_app_template_dirs cache.
from django.template.utils import get_app_template_dirs
get_app_template_dirs.cache_clear()
# Rebuild translations cache.
from django.utils.translation import trans_real
trans_real._translations = {}
@receiver(setting_changed)
def update_connections_time_zone(**kwargs):
if kwargs['setting'] == 'TIME_ZONE':
# Reset process time zone
if hasattr(time, 'tzset'):
if kwargs['value']:
os.environ['TZ'] = kwargs['value']
else:
os.environ.pop('TZ', None)
time.tzset()
# Reset local time zone cache
timezone.get_default_timezone.cache_clear()
# Reset the database connections' time zone
if kwargs['setting'] in {'TIME_ZONE', 'USE_TZ'}:
for conn in connections.all():
try:
del conn.timezone
except AttributeError:
pass
try:
del conn.timezone_name
except AttributeError:
pass
tz_sql = conn.ops.set_time_zone_sql()
if tz_sql:
with conn.cursor() as cursor:
cursor.execute(tz_sql, [conn.timezone_name])
@receiver(setting_changed)
def clear_routers_cache(**kwargs):
if kwargs['setting'] == 'DATABASE_ROUTERS':
router.routers = ConnectionRouter().routers
@receiver(setting_changed)
def reset_template_engines(**kwargs):
if kwargs['setting'] in {
'TEMPLATES',
'TEMPLATE_DIRS',
'ALLOWED_INCLUDE_ROOTS',
'TEMPLATE_CONTEXT_PROCESSORS',
'TEMPLATE_DEBUG',
'TEMPLATE_LOADERS',
'TEMPLATE_STRING_IF_INVALID',
'DEBUG',
'FILE_CHARSET',
'INSTALLED_APPS',
}:
from django.template import engines
try:
del engines.templates
except AttributeError:
pass
engines._templates = None
engines._engines = {}
from django.template.engine import Engine
Engine.get_default.cache_clear()
@receiver(setting_changed)
def clear_serializers_cache(**kwargs):
if kwargs['setting'] == 'SERIALIZATION_MODULES':
from django.core import serializers
serializers._serializers = {}
@receiver(setting_changed)
def language_changed(**kwargs):
if kwargs['setting'] in {'LANGUAGES', 'LANGUAGE_CODE', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._default = None
trans_real._active = threading.local()
if kwargs['setting'] in {'LANGUAGES', 'LOCALE_PATHS'}:
from django.utils.translation import trans_real
trans_real._translations = {}
trans_real.check_for_language.cache_clear()
@receiver(setting_changed)
def file_storage_changed(**kwargs):
file_storage_settings = {
'DEFAULT_FILE_STORAGE',
'FILE_UPLOAD_DIRECTORY_PERMISSIONS',
'FILE_UPLOAD_PERMISSIONS',
'MEDIA_ROOT',
'MEDIA_URL',
}
if kwargs['setting'] in file_storage_settings:
from django.core.files.storage import default_storage
default_storage._wrapped = empty
@receiver(setting_changed)
def complex_setting_changed(**kwargs):
if kwargs['enter'] and kwargs['setting'] in COMPLEX_OVERRIDE_SETTINGS:
# Considering the current implementation of the signals framework,
# stacklevel=5 shows the line containing the override_settings call.
warnings.warn("Overriding setting %s can lead to unexpected behavior."
% kwargs['setting'], stacklevel=5)
@receiver(setting_changed)
def root_urlconf_changed(**kwargs):
if kwargs['setting'] == 'ROOT_URLCONF':
from django.core.urlresolvers import clear_url_caches, set_urlconf
clear_url_caches()
set_urlconf(None)
@receiver(setting_changed)
def static_storage_changed(**kwargs):
if kwargs['setting'] in {
'STATICFILES_STORAGE',
'STATIC_ROOT',
'STATIC_URL',
}:
from django.contrib.staticfiles.storage import staticfiles_storage
staticfiles_storage._wrapped = empty
@receiver(setting_changed)
def static_finders_changed(**kwargs):
if kwargs['setting'] in {
'STATICFILES_DIRS',
'STATIC_ROOT',
}:
from django.contrib.staticfiles.finders import get_finder
get_finder.cache_clear()
@receiver(setting_changed)
def auth_password_validators_changed(**kwargs):
if kwargs['setting'] == 'AUTH_PASSWORD_VALIDATORS':
from django.contrib.auth.password_validation import get_default_password_validators
get_default_password_validators.cache_clear()
| mit |
ovilab/lammps | tools/moltemplate/src/nbody_alternate_symmetry/nbody_Impropers_Icenter_swapJKL.py | 26 | 1867 | from nbody_graph_search import Ugraph
# To find 4-body "improper" interactions, we would use this subgraph:
# 3
# * 1st bond connects atoms 0 and 1
# | => 2nd bond connects atoms 0 and 2
# _.*._ 3rd bond connects atoms 0 and 3
# *' 0 `*
# 1 2
#
bond_pattern = Ugraph([(0,1), (0,2), (0,3)])
# (Note: Ugraph atom-index counters begin at 0, not 1)
def canonical_order(match):
"""
When searching for atoms with matching bond patterns GraphMatcher
often returns redundant results. We must define a "canonical_order"
function which sorts the atoms and bonds in a way which is consistent
with the type of N-body interaction being considered.
The atoms (and bonds) in a candidate match are rearranged by the
canonical_order(). Then the re-ordered list of atom and bond ids is
tested against the list of atom/bond ids in the matches-found-so-far,
before it is added to the list of interactions found so far.
(For example, it does not make sense to define a separate 4-body improper-
angle interaction between atoms 1,2,3,4 AND 1,4,2,3,
or 1,3,4,2, or 1,4,3,2 or 1,2,4,3, or 1,3,2,4, for that matter.)
In some cases, the second atom is the central atom (the "hub"), and the
potential is invariant with respect to permutations of the other 3 atoms.
So we arbitrarily sort these other 3 atoms in increasing order
(as well as the bonds which connect the central atom to them).
"""
atom0 = match[0][0]
atom1 = match[0][1]
atom2 = match[0][2]
atom3 = match[0][3]
bonds = match[1]
ab=[(atom1,0), (atom2,1), (atom3,2)]
ab.sort()
return ((atom0, ab[0][0], ab[1][0], ab[2][0]),
(bonds[ab[0][1]], bonds[ab[1][1]], bonds[ab[2][1]]))
| gpl-2.0 |
lcy-seso/Paddle | python/paddle/fluid/tests/unittests/test_margin_rank_loss_op.py | 5 | 1813 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from op_test import OpTest
class TestMarginRankLossOp(OpTest):
def setUp(self):
self.op_type = "margin_rank_loss"
batch_size = 5
margin = 0.5
# labels_{i} = {-1, 1}
label = 2 * np.random.randint(
0, 2, size=(batch_size, 1)).astype("float32") - 1
x1 = np.random.random((batch_size, 1)).astype("float32")
x2 = np.random.random((batch_size, 1)).astype("float32")
# loss = max(0, -label * (x1 - x2) + margin)
loss = -label * (x1 - x2) + margin
loss = np.where(loss > 0, loss, 0)
act = np.where(loss > 0, 1., 0.)
self.attrs = {'margin': margin}
self.inputs = {'Label': label, 'X1': x1, 'X2': x2}
self.outputs = {'Activated': act, 'Out': loss}
def test_check_output(self):
self.check_output()
def test_check_grad(self):
self.check_grad(["X1", "X2"], "Out")
def test_check_grad_ignore_x1(self):
self.check_grad(["X2"], "Out", no_grad_set=set('X1'))
def test_check_grad_ignore_x2(self):
self.check_grad(["X1"], "Out", no_grad_set=set('X2'))
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
albertomurillo/ansible | lib/ansible/modules/cloud/opennebula/one_image.py | 52 | 11657 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
(c) 2018, Milan Ilic <milani@nordeus.com>
This file is part of Ansible
Ansible is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Ansible is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a clone of the GNU General Public License
along with Ansible. If not, see <http://www.gnu.org/licenses/>.
"""
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: one_image
short_description: Manages OpenNebula images
description:
- Manages OpenNebula images
version_added: "2.6"
requirements:
- python-oca
options:
api_url:
description:
- URL of the OpenNebula RPC server.
- It is recommended to use HTTPS so that the username/password are not
- transferred over the network unencrypted.
- If not set then the value of the C(ONE_URL) environment variable is used.
api_username:
description:
- Name of the user to login into the OpenNebula RPC server. If not set
- then the value of the C(ONE_USERNAME) environment variable is used.
api_password:
description:
- Password of the user to login into OpenNebula RPC server. If not set
- then the value of the C(ONE_PASSWORD) environment variable is used.
id:
description:
- A C(id) of the image you would like to manage.
name:
description:
- A C(name) of the image you would like to manage.
state:
description:
- C(present) - state that is used to manage the image
- C(absent) - delete the image
- C(cloned) - clone the image
- C(renamed) - rename the image to the C(new_name)
choices: ["present", "absent", "cloned", "renamed"]
default: present
enabled:
description:
- Whether the image should be enabled or disabled.
type: bool
new_name:
description:
- A name that will be assigned to the existing or new image.
- In the case of cloning, by default C(new_name) will take the name of the origin image with the prefix 'Copy of'.
author:
- "Milan Ilic (@ilicmilan)"
'''
EXAMPLES = '''
# Fetch the IMAGE by id
- one_image:
id: 45
register: result
# Print the IMAGE properties
- debug:
msg: result
# Rename existing IMAGE
- one_image:
id: 34
state: renamed
new_name: bar-image
# Disable the IMAGE by id
- one_image:
id: 37
enabled: no
# Enable the IMAGE by name
- one_image:
name: bar-image
enabled: yes
# Clone the IMAGE by name
- one_image:
name: bar-image
state: cloned
new_name: bar-image-clone
register: result
# Delete the IMAGE by id
- one_image:
id: '{{ result.id }}'
state: absent
'''
RETURN = '''
id:
description: image id
type: int
returned: success
sample: 153
name:
description: image name
type: str
returned: success
sample: app1
group_id:
description: image's group id
type: int
returned: success
sample: 1
group_name:
description: image's group name
type: str
returned: success
sample: one-users
owner_id:
description: image's owner id
type: int
returned: success
sample: 143
owner_name:
description: image's owner name
type: str
returned: success
sample: ansible-test
state:
description: state of image instance
type: str
returned: success
sample: READY
used:
description: is image in use
type: bool
returned: success
sample: true
running_vms:
description: count of running vms that use this image
type: int
returned: success
sample: 7
'''
try:
import oca
HAS_OCA = True
except ImportError:
HAS_OCA = False
from ansible.module_utils.basic import AnsibleModule
import os
def get_image(module, client, predicate):
pool = oca.ImagePool(client)
# Filter -2 means fetch all images user can Use
pool.info(filter=-2)
for image in pool:
if predicate(image):
return image
return None
def get_image_by_name(module, client, image_name):
return get_image(module, client, lambda image: (image.name == image_name))
def get_image_by_id(module, client, image_id):
return get_image(module, client, lambda image: (image.id == image_id))
def get_image_instance(module, client, requested_id, requested_name):
if requested_id:
return get_image_by_id(module, client, requested_id)
else:
return get_image_by_name(module, client, requested_name)
IMAGE_STATES = ['INIT', 'READY', 'USED', 'DISABLED', 'LOCKED', 'ERROR', 'CLONE', 'DELETE', 'USED_PERS', 'LOCKED_USED', 'LOCKED_USED_PERS']
def get_image_info(image):
image.info()
info = {
'id': image.id,
'name': image.name,
'state': IMAGE_STATES[image.state],
'running_vms': image.running_vms,
'used': bool(image.running_vms),
'user_name': image.uname,
'user_id': image.uid,
'group_name': image.gname,
'group_id': image.gid,
}
return info
def wait_for_state(module, image, wait_timeout, state_predicate):
import time
start_time = time.time()
while (time.time() - start_time) < wait_timeout:
image.info()
state = image.state
if state_predicate(state):
return image
time.sleep(1)
module.fail_json(msg="Wait timeout has expired!")
def wait_for_ready(module, image, wait_timeout=60):
return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('READY')]))
def wait_for_delete(module, image, wait_timeout=60):
return wait_for_state(module, image, wait_timeout, lambda state: (state in [IMAGE_STATES.index('DELETE')]))
def enable_image(module, client, image, enable):
image.info()
changed = False
state = image.state
if state not in [IMAGE_STATES.index('READY'), IMAGE_STATES.index('DISABLED'), IMAGE_STATES.index('ERROR')]:
if enable:
module.fail_json(msg="Cannot enable " + IMAGE_STATES[state] + " image!")
else:
module.fail_json(msg="Cannot disable " + IMAGE_STATES[state] + " image!")
if ((enable and state != IMAGE_STATES.index('READY')) or
(not enable and state != IMAGE_STATES.index('DISABLED'))):
changed = True
if changed and not module.check_mode:
client.call('image.enable', image.id, enable)
result = get_image_info(image)
result['changed'] = changed
return result
def clone_image(module, client, image, new_name):
if new_name is None:
new_name = "Copy of " + image.name
tmp_image = get_image_by_name(module, client, new_name)
if tmp_image:
result = get_image_info(tmp_image)
result['changed'] = False
return result
if image.state == IMAGE_STATES.index('DISABLED'):
module.fail_json(msg="Cannot clone DISABLED image")
if not module.check_mode:
new_id = client.call('image.clone', image.id, new_name)
image = get_image_by_id(module, client, new_id)
wait_for_ready(module, image)
result = get_image_info(image)
result['changed'] = True
return result
def rename_image(module, client, image, new_name):
if new_name is None:
module.fail_json(msg="'new_name' option has to be specified when the state is 'renamed'")
if new_name == image.name:
result = get_image_info(image)
result['changed'] = False
return result
tmp_image = get_image_by_name(module, client, new_name)
if tmp_image:
module.fail_json(msg="Name '" + new_name + "' is already taken by IMAGE with id=" + str(tmp_image.id))
if not module.check_mode:
client.call('image.rename', image.id, new_name)
result = get_image_info(image)
result['changed'] = True
return result
def delete_image(module, client, image):
if not image:
return {'changed': False}
if image.running_vms > 0:
module.fail_json(msg="Cannot delete image. There are " + str(image.running_vms) + " VMs using it.")
if not module.check_mode:
client.call('image.delete', image.id)
wait_for_delete(module, image)
return {'changed': True}
def get_connection_info(module):
url = module.params.get('api_url')
username = module.params.get('api_username')
password = module.params.get('api_password')
if not url:
url = os.environ.get('ONE_URL')
if not username:
username = os.environ.get('ONE_USERNAME')
if not password:
password = os.environ.get('ONE_PASSWORD')
if not(url and username and password):
module.fail_json(msg="One or more connection parameters (api_url, api_username, api_password) were not specified")
from collections import namedtuple
auth_params = namedtuple('auth', ('url', 'username', 'password'))
return auth_params(url=url, username=username, password=password)
def main():
fields = {
"api_url": {"required": False, "type": "str"},
"api_username": {"required": False, "type": "str"},
"api_password": {"required": False, "type": "str", "no_log": True},
"id": {"required": False, "type": "int"},
"name": {"required": False, "type": "str"},
"state": {
"default": "present",
"choices": ['present', 'absent', 'cloned', 'renamed'],
"type": "str"
},
"enabled": {"required": False, "type": "bool"},
"new_name": {"required": False, "type": "str"},
}
module = AnsibleModule(argument_spec=fields,
mutually_exclusive=[['id', 'name']],
supports_check_mode=True)
if not HAS_OCA:
module.fail_json(msg='This module requires python-oca to work!')
auth = get_connection_info(module)
params = module.params
id = params.get('id')
name = params.get('name')
state = params.get('state')
enabled = params.get('enabled')
new_name = params.get('new_name')
client = oca.Client(auth.username + ':' + auth.password, auth.url)
result = {}
if not id and state == 'renamed':
module.fail_json(msg="Option 'id' is required when the state is 'renamed'")
image = get_image_instance(module, client, id, name)
if not image and state != 'absent':
if id:
module.fail_json(msg="There is no image with id=" + str(id))
else:
module.fail_json(msg="There is no image with name=" + name)
if state == 'absent':
result = delete_image(module, client, image)
else:
result = get_image_info(image)
changed = False
result['changed'] = False
if enabled is not None:
result = enable_image(module, client, image, enabled)
if state == "cloned":
result = clone_image(module, client, image, new_name)
elif state == "renamed":
result = rename_image(module, client, image, new_name)
changed = changed or result['changed']
result['changed'] = changed
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
jeremiahyan/odoo | addons/auth_signup/models/res_users.py | 1 | 11459 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import logging
from ast import literal_eval
from collections import defaultdict
from dateutil.relativedelta import relativedelta
from odoo import api, fields, models, _
from odoo.exceptions import UserError
from odoo.osv import expression
from odoo.tools.misc import ustr
from odoo.addons.base.models.ir_mail_server import MailDeliveryException
from odoo.addons.auth_signup.models.res_partner import SignupError, now
_logger = logging.getLogger(__name__)
class ResUsers(models.Model):
_inherit = 'res.users'
state = fields.Selection(compute='_compute_state', search='_search_state', string='Status',
selection=[('new', 'Never Connected'), ('active', 'Confirmed')])
def _search_state(self, operator, value):
negative = operator in expression.NEGATIVE_TERM_OPERATORS
# In case we have no value
if not value:
return expression.TRUE_DOMAIN if negative else expression.FALSE_DOMAIN
if operator in ['in', 'not in']:
if len(value) > 1:
return expression.FALSE_DOMAIN if negative else expression.TRUE_DOMAIN
if value[0] == 'new':
comp = '!=' if negative else '='
if value[0] == 'active':
comp = '=' if negative else '!='
return [('log_ids', comp, False)]
if operator in ['=', '!=']:
# In case we search against anything else than new, we have to invert the operator
if value != 'new':
operator = expression.TERM_OPERATORS_NEGATION[operator]
return [('log_ids', operator, False)]
return expression.TRUE_DOMAIN
def _compute_state(self):
for user in self:
user.state = 'active' if user.login_date else 'new'
@api.model
def signup(self, values, token=None):
""" signup a user, to either:
- create a new user (no token), or
- create a user for a partner (with token, but no user for partner), or
- change the password of a user (with token, and existing user).
:param values: a dictionary with field values that are written on user
:param token: signup token (optional)
:return: (dbname, login, password) for the signed up user
"""
if token:
# signup with a token: find the corresponding partner id
partner = self.env['res.partner']._signup_retrieve_partner(token, check_validity=True, raise_exception=True)
# invalidate signup token
partner.write({'signup_token': False, 'signup_type': False, 'signup_expiration': False})
partner_user = partner.user_ids and partner.user_ids[0] or False
# avoid overwriting existing (presumably correct) values with geolocation data
if partner.country_id or partner.zip or partner.city:
values.pop('city', None)
values.pop('country_id', None)
if partner.lang:
values.pop('lang', None)
if partner_user:
# user exists, modify it according to values
values.pop('login', None)
values.pop('name', None)
partner_user.write(values)
if not partner_user.login_date:
partner_user._notify_inviter()
return (self.env.cr.dbname, partner_user.login, values.get('password'))
else:
# user does not exist: sign up invited user
values.update({
'name': partner.name,
'partner_id': partner.id,
'email': values.get('email') or values.get('login'),
})
if partner.company_id:
values['company_id'] = partner.company_id.id
values['company_ids'] = [(6, 0, [partner.company_id.id])]
partner_user = self._signup_create_user(values)
partner_user._notify_inviter()
else:
# no token, sign up an external user
values['email'] = values.get('email') or values.get('login')
self._signup_create_user(values)
return (self.env.cr.dbname, values.get('login'), values.get('password'))
@api.model
def _get_signup_invitation_scope(self):
return self.env['ir.config_parameter'].sudo().get_param('auth_signup.invitation_scope', 'b2b')
@api.model
def _signup_create_user(self, values):
""" signup a new user using the template user """
# check that uninvited users may sign up
if 'partner_id' not in values:
if self._get_signup_invitation_scope() != 'b2c':
raise SignupError(_('Signup is not allowed for uninvited users'))
return self._create_user_from_template(values)
def _notify_inviter(self):
for user in self:
invite_partner = user.create_uid.partner_id
if invite_partner:
# notify invite user that new user is connected
title = _("%s connected", user.name)
message = _("This is their first connection. Wish them luck.")
self.env['bus.bus'].sendone(
(self._cr.dbname, 'res.partner', invite_partner.id),
{'type': 'user_connection', 'title': title,
'message': message, 'partner_id': user.partner_id.id}
)
def _create_user_from_template(self, values):
template_user_id = literal_eval(self.env['ir.config_parameter'].sudo().get_param('base.template_portal_user_id', 'False'))
template_user = self.browse(template_user_id)
if not template_user.exists():
raise ValueError(_('Signup: invalid template user'))
if not values.get('login'):
raise ValueError(_('Signup: no login given for new user'))
if not values.get('partner_id') and not values.get('name'):
raise ValueError(_('Signup: no name or partner given for new user'))
# create a copy of the template user (attached to a specific partner_id if given)
values['active'] = True
try:
with self.env.cr.savepoint():
return template_user.with_context(no_reset_password=True).copy(values)
except Exception as e:
# copy may failed if asked login is not available.
raise SignupError(ustr(e))
def reset_password(self, login):
""" retrieve the user corresponding to login (login or email),
and reset their password
"""
users = self.search([('login', '=', login)])
if not users:
users = self.search([('email', '=', login)])
if len(users) != 1:
raise Exception(_('Reset password: invalid username or email'))
return users.action_reset_password()
def action_reset_password(self):
""" create signup token for each user, and send their signup url by email """
if self.env.context.get('install_mode', False):
return
if self.filtered(lambda user: not user.active):
raise UserError(_("You cannot perform this action on an archived user."))
# prepare reset password signup
create_mode = bool(self.env.context.get('create_user'))
# no time limit for initial invitation, only for reset password
expiration = False if create_mode else now(days=+1)
self.mapped('partner_id').signup_prepare(signup_type="reset", expiration=expiration)
# send email to users with their signup url
template = False
if create_mode:
try:
template = self.env.ref('auth_signup.set_password_email', raise_if_not_found=False)
except ValueError:
pass
if not template:
template = self.env.ref('auth_signup.reset_password_email')
assert template._name == 'mail.template'
email_values = {
'email_to': '${object.email|safe}',
'email_cc': False,
'auto_delete': True,
'recipient_ids': [],
'partner_ids': [],
'scheduled_date': False,
}
for user in self:
if not user.email:
raise UserError(_("Cannot send email: user %s has no email address.", user.name))
# TDE FIXME: make this template technical (qweb)
with self.env.cr.savepoint():
force_send = not(self.env.context.get('import_file', False))
template.send_mail(user.id, force_send=force_send, raise_exception=True, email_values=email_values)
_logger.info("Password reset email sent for user <%s> to <%s>", user.login, user.email)
def send_unregistered_user_reminder(self, after_days=5):
datetime_min = fields.Datetime.today() - relativedelta(days=after_days)
datetime_max = datetime_min + relativedelta(hours=23, minutes=59, seconds=59)
res_users_with_details = self.env['res.users'].search_read([
('share', '=', False),
('create_uid.email', '!=', False),
('create_date', '>=', datetime_min),
('create_date', '<=', datetime_max),
('log_ids', '=', False)], ['create_uid', 'name', 'login'])
# group by invited by
invited_users = defaultdict(list)
for user in res_users_with_details:
invited_users[user.get('create_uid')[0]].append("%s (%s)" % (user.get('name'), user.get('login')))
# For sending mail to all the invitors about their invited users
for user in invited_users:
template = self.env.ref('auth_signup.mail_template_data_unregistered_users').with_context(dbname=self._cr.dbname, invited_users=invited_users[user])
template.send_mail(user, notif_layout='mail.mail_notification_light', force_send=False)
@api.model
def web_create_users(self, emails):
inactive_users = self.search([('state', '=', 'new'), '|', ('login', 'in', emails), ('email', 'in', emails)])
new_emails = set(emails) - set(inactive_users.mapped('email'))
res = super(ResUsers, self).web_create_users(list(new_emails))
if inactive_users:
inactive_users.with_context(create_user=True).action_reset_password()
return res
@api.model_create_multi
def create(self, vals_list):
# overridden to automatically invite user to sign up
users = super(ResUsers, self).create(vals_list)
if not self.env.context.get('no_reset_password'):
users_with_email = users.filtered('email')
if users_with_email:
try:
users_with_email.with_context(create_user=True).action_reset_password()
except MailDeliveryException:
users_with_email.partner_id.with_context(create_user=True).signup_cancel()
return users
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
self.ensure_one()
sup = super(ResUsers, self)
if not default or not default.get('email'):
# avoid sending email to the user we are duplicating
sup = super(ResUsers, self.with_context(no_reset_password=True))
return sup.copy(default=default)
| gpl-3.0 |
Learningtribes/edx-platform | lms/djangoapps/support/tests/test_programs.py | 27 | 2651 | # pylint: disable=missing-docstring
from django.core.urlresolvers import reverse
from django.test import TestCase
import mock
from edx_oauth2_provider.tests.factories import AccessTokenFactory, ClientFactory
from openedx.core.djangoapps.programs.tests.mixins import ProgramsApiConfigMixin
from student.tests.factories import UserFactory
class IssueProgramCertificatesViewTests(TestCase, ProgramsApiConfigMixin):
password = 'password'
def setUp(self):
super(IssueProgramCertificatesViewTests, self).setUp()
self.create_programs_config()
self.path = reverse('support:programs-certify')
self.user = UserFactory(password=self.password, is_staff=True)
self.data = {'username': self.user.username}
self.headers = {}
self.client.login(username=self.user.username, password=self.password)
def _verify_response(self, status_code):
"""Verify that the endpoint returns the provided status code and enqueues the task if appropriate."""
with mock.patch('lms.djangoapps.support.views.programs.award_program_certificates.delay') as mock_task:
response = self.client.post(self.path, self.data, **self.headers)
self.assertEqual(response.status_code, status_code)
self.assertEqual(status_code == 200, mock_task.called)
def test_authentication_required(self):
"""Verify that the endpoint requires authentication."""
self.client.logout()
self._verify_response(403)
def test_session_auth(self):
"""Verify that the endpoint supports session auth."""
self._verify_response(200)
def test_oauth(self):
"""Verify that the endpoint supports OAuth 2.0."""
access_token = AccessTokenFactory(user=self.user, client=ClientFactory()).token # pylint: disable=no-member
self.headers['HTTP_AUTHORIZATION'] = 'Bearer ' + access_token
self.client.logout()
self._verify_response(200)
def test_staff_permissions_required(self):
"""Verify that staff permissions are required to access the endpoint."""
self.user.is_staff = False
self.user.save() # pylint: disable=no-member
self._verify_response(403)
def test_certification_disabled(self):
"""Verify that the endpoint returns a 400 when program certification is disabled."""
self.create_programs_config(enable_certification=False)
self._verify_response(400)
def test_username_required(self):
"""Verify that the endpoint returns a 400 when a username isn't provided."""
self.data.pop('username')
self._verify_response(400)
| agpl-3.0 |
ericfc/django | django/contrib/contenttypes/models.py | 273 | 7798 | from __future__ import unicode_literals
import warnings
from django.apps import apps
from django.db import models
from django.db.utils import IntegrityError, OperationalError, ProgrammingError
from django.utils.deprecation import RemovedInDjango110Warning
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class ContentTypeManager(models.Manager):
use_in_migrations = True
# Cache to avoid re-looking up ContentType objects all over the place.
# This cache is shared by all the get_for_* methods.
_cache = {}
def get_by_natural_key(self, app_label, model):
try:
ct = self.__class__._cache[self.db][(app_label, model)]
except KeyError:
ct = self.get(app_label=app_label, model=model)
self._add_to_cache(self.db, ct)
return ct
def _get_opts(self, model, for_concrete_model):
if for_concrete_model:
model = model._meta.concrete_model
elif model._deferred:
model = model._meta.proxy_for_model
return model._meta
def _get_from_cache(self, opts):
key = (opts.app_label, opts.model_name)
return self.__class__._cache[self.db][key]
def create(self, **kwargs):
if 'name' in kwargs:
del kwargs['name']
warnings.warn(
"ContentType.name field doesn't exist any longer. Please remove it from your code.",
RemovedInDjango110Warning, stacklevel=2)
return super(ContentTypeManager, self).create(**kwargs)
def get_for_model(self, model, for_concrete_model=True):
"""
Returns the ContentType object for a given model, creating the
ContentType if necessary. Lookups are cached so that subsequent lookups
for the same model don't hit the database.
"""
opts = self._get_opts(model, for_concrete_model)
try:
return self._get_from_cache(opts)
except KeyError:
pass
# The ContentType entry was not found in the cache, therefore we
# proceed to load or create it.
try:
try:
# We start with get() and not get_or_create() in order to use
# the db_for_read (see #20401).
ct = self.get(app_label=opts.app_label, model=opts.model_name)
except self.model.DoesNotExist:
# Not found in the database; we proceed to create it. This time we
# use get_or_create to take care of any race conditions.
ct, created = self.get_or_create(
app_label=opts.app_label,
model=opts.model_name,
)
except (OperationalError, ProgrammingError, IntegrityError):
# It's possible to migrate a single app before contenttypes,
# as it's not a required initial dependency (it's contrib!)
# Have a nice error for this.
raise RuntimeError(
"Error creating new content types. Please make sure contenttypes "
"is migrated before trying to migrate apps individually."
)
self._add_to_cache(self.db, ct)
return ct
def get_for_models(self, *models, **kwargs):
"""
Given *models, returns a dictionary mapping {model: content_type}.
"""
for_concrete_models = kwargs.pop('for_concrete_models', True)
# Final results
results = {}
# models that aren't already in the cache
needed_app_labels = set()
needed_models = set()
needed_opts = set()
for model in models:
opts = self._get_opts(model, for_concrete_models)
try:
ct = self._get_from_cache(opts)
except KeyError:
needed_app_labels.add(opts.app_label)
needed_models.add(opts.model_name)
needed_opts.add(opts)
else:
results[model] = ct
if needed_opts:
cts = self.filter(
app_label__in=needed_app_labels,
model__in=needed_models
)
for ct in cts:
model = ct.model_class()
if model._meta in needed_opts:
results[model] = ct
needed_opts.remove(model._meta)
self._add_to_cache(self.db, ct)
for opts in needed_opts:
# These weren't in the cache, or the DB, create them.
ct = self.create(
app_label=opts.app_label,
model=opts.model_name,
)
self._add_to_cache(self.db, ct)
results[ct.model_class()] = ct
return results
def get_for_id(self, id):
"""
Lookup a ContentType by ID. Uses the same shared cache as get_for_model
(though ContentTypes are obviously not created on-the-fly by get_by_id).
"""
try:
ct = self.__class__._cache[self.db][id]
except KeyError:
# This could raise a DoesNotExist; that's correct behavior and will
# make sure that only correct ctypes get stored in the cache dict.
ct = self.get(pk=id)
self._add_to_cache(self.db, ct)
return ct
def clear_cache(self):
"""
Clear out the content-type cache. This needs to happen during database
flushes to prevent caching of "stale" content type IDs (see
django.contrib.contenttypes.management.update_contenttypes for where
this gets called).
"""
self.__class__._cache.clear()
def _add_to_cache(self, using, ct):
"""Insert a ContentType into the cache."""
# Note it's possible for ContentType objects to be stale; model_class() will return None.
# Hence, there is no reliance on model._meta.app_label here, just using the model fields instead.
key = (ct.app_label, ct.model)
self.__class__._cache.setdefault(using, {})[key] = ct
self.__class__._cache.setdefault(using, {})[ct.id] = ct
@python_2_unicode_compatible
class ContentType(models.Model):
app_label = models.CharField(max_length=100)
model = models.CharField(_('python model class name'), max_length=100)
objects = ContentTypeManager()
class Meta:
verbose_name = _('content type')
verbose_name_plural = _('content types')
db_table = 'django_content_type'
unique_together = (('app_label', 'model'),)
def __str__(self):
return self.name
@property
def name(self):
model = self.model_class()
if not model:
return self.model
return force_text(model._meta.verbose_name)
def model_class(self):
"Returns the Python model class for this type of content."
try:
return apps.get_model(self.app_label, self.model)
except LookupError:
return None
def get_object_for_this_type(self, **kwargs):
"""
Returns an object of this type for the keyword arguments given.
Basically, this is a proxy around this object_type's get_object() model
method. The ObjectNotExist exception, if thrown, will not be caught,
so code that calls this method should catch it.
"""
return self.model_class()._base_manager.using(self._state.db).get(**kwargs)
def get_all_objects_for_this_type(self, **kwargs):
"""
Returns all objects of this type for the keyword arguments given.
"""
return self.model_class()._base_manager.using(self._state.db).filter(**kwargs)
def natural_key(self):
return (self.app_label, self.model)
| bsd-3-clause |
jonathonwalz/ansible | lib/ansible/module_utils/gcp.py | 46 | 34965 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny <franck.cuny@gmail.com>, 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import json
import os
import time
import traceback
from distutils.version import LooseVersion
# libcloud
try:
import libcloud
HAS_LIBCLOUD_BASE = True
except ImportError:
HAS_LIBCLOUD_BASE = False
# google-auth
try:
import google.auth
from google.oauth2 import service_account
HAS_GOOGLE_AUTH = True
except ImportError:
HAS_GOOGLE_AUTH = False
# google-python-api
try:
import google_auth_httplib2
from httplib2 import Http
from googleapiclient.http import set_user_agent
from googleapiclient.errors import HttpError
from apiclient.discovery import build
HAS_GOOGLE_API_LIB = True
except ImportError:
HAS_GOOGLE_API_LIB = False
import ansible.module_utils.six.moves.urllib.parse as urlparse
GCP_DEFAULT_SCOPES = ['https://www.googleapis.com/auth/cloud-platform']
def _get_gcp_ansible_credentials(module):
"""Helper to fetch creds from AnsibleModule object."""
service_account_email = module.params.get('service_account_email', None)
# Note: pem_file is discouraged and will be deprecated
credentials_file = module.params.get('pem_file', None) or module.params.get(
'credentials_file', None)
project_id = module.params.get('project_id', None)
return (service_account_email, credentials_file, project_id)
def _get_gcp_environ_var(var_name, default_value):
"""Wrapper around os.environ.get call."""
return os.environ.get(
var_name, default_value)
def _get_gcp_environment_credentials(service_account_email, credentials_file, project_id):
"""Helper to look in environment variables for credentials."""
# If any of the values are not given as parameters, check the appropriate
# environment variables.
if not service_account_email:
service_account_email = _get_gcp_environ_var('GCE_EMAIL', None)
if not credentials_file:
credentials_file = _get_gcp_environ_var(
'GCE_CREDENTIALS_FILE_PATH', None) or _get_gcp_environ_var(
'GOOGLE_APPLICATION_CREDENTIALS', None) or _get_gcp_environ_var(
'GCE_PEM_FILE_PATH', None)
if not project_id:
project_id = _get_gcp_environ_var('GCE_PROJECT', None) or _get_gcp_environ_var(
'GOOGLE_CLOUD_PROJECT', None)
return (service_account_email, credentials_file, project_id)
def _get_gcp_libcloud_credentials(module, service_account_email=None, credentials_file=None, project_id=None):
"""
Helper to look for libcloud secrets.py file.
Note: This has an 'additive' effect right now, filling in
vars not specified elsewhere, in order to keep legacy functionality.
This method of specifying credentials will be deprecated, otherwise
we'd look to make it more restrictive with an all-vars-or-nothing approach.
:param service_account: GCP service account email used to make requests
:type service_account: ``str`` or None
:param credentials_file: Path on disk to credentials file
:type credentials_file: ``str`` or None
:param project_id: GCP project ID.
:type project_id: ``str`` or None
:return: tuple of (service_account, credentials_file, project_id)
:rtype: ``tuple`` of ``str``
"""
if service_account_email is None or credentials_file is None:
try:
import secrets
module.deprecate(msg=("secrets file found at '%s'. This method of specifying "
"credentials is deprecated. Please use env vars or "
"Ansible YAML files instead" % (secrets.__file__)), version=2.5)
except ImportError:
secrets = None
if hasattr(secrets, 'GCE_PARAMS'):
if not service_account_email:
service_account_email = secrets.GCE_PARAMS[0]
if not credentials_file:
credentials_file = secrets.GCE_PARAMS[1]
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not project_id:
project_id = keyword_params.get('project', None)
return (service_account_email, credentials_file, project_id)
def _get_gcp_credentials(module, require_valid_json=True, check_libcloud=False):
"""
Obtain GCP credentials by trying various methods.
There are 3 ways to specify GCP credentials:
1. Specify via Ansible module parameters (recommended).
2. Specify via environment variables. Two sets of env vars are available:
a) GOOGLE_CLOUD_PROJECT, GOOGLE_CREDENTIALS_APPLICATION (preferred)
b) GCE_PROJECT, GCE_CREDENTIAL_FILE_PATH, GCE_EMAIL (legacy, not recommended; req'd if
using p12 key)
3. Specify via libcloud secrets.py file (deprecated).
There are 3 helper functions to assist in the above.
Regardless of method, the user also has the option of specifying a JSON
file or a p12 file as the credentials file. JSON is strongly recommended and
p12 will be removed in the future.
Additionally, flags may be set to require valid json and check the libcloud
version.
AnsibleModule.fail_json is called only if the project_id cannot be found.
:param module: initialized Ansible module object
:type module: `class AnsibleModule`
:param require_valid_json: If true, require credentials to be valid JSON. Default is True.
:type require_valid_json: ``bool``
:params check_libcloud: If true, check the libcloud version available to see if
JSON creds are supported.
:type check_libcloud: ``bool``
:return: {'service_account_email': service_account_email,
'credentials_file': credentials_file,
'project_id': project_id}
:rtype: ``dict``
"""
(service_account_email,
credentials_file,
project_id) = _get_gcp_ansible_credentials(module)
# If any of the values are not given as parameters, check the appropriate
# environment variables.
(service_account_email,
credentials_file,
project_id) = _get_gcp_environment_credentials(service_account_email,
credentials_file, project_id)
# If we still don't have one or more of our credentials, attempt to
# get the remaining values from the libcloud secrets file.
(service_account_email,
credentials_file,
project_id) = _get_gcp_libcloud_credentials(module, service_account_email,
credentials_file, project_id)
if credentials_file is None or project_id is None or service_account_email is None:
if check_libcloud is True:
if project_id is None:
# TODO(supertom): this message is legacy and integration tests
# depend on it.
module.fail_json(msg='Missing GCE connection parameters in libcloud '
'secrets file.')
else:
if project_id is None:
module.fail_json(msg=('GCP connection error: unable to determine project (%s) or '
'credentials file (%s)' % (project_id, credentials_file)))
# Set these fields to empty strings if they are None
# consumers of this will make the distinction between an empty string
# and None.
if credentials_file is None:
credentials_file = ''
if service_account_email is None:
service_account_email = ''
# ensure the credentials file is found and is in the proper format.
if credentials_file:
_validate_credentials_file(module, credentials_file,
require_valid_json=require_valid_json,
check_libcloud=check_libcloud)
return {'service_account_email': service_account_email,
'credentials_file': credentials_file,
'project_id': project_id}
def _validate_credentials_file(module, credentials_file, require_valid_json=True, check_libcloud=False):
"""
Check for valid credentials file.
Optionally check for JSON format and if libcloud supports JSON.
:param module: initialized Ansible module object
:type module: `class AnsibleModule`
:param credentials_file: path to file on disk
:type credentials_file: ``str``. Complete path to file on disk.
:param require_valid_json: If true, require credentials to be valid JSON. Default is True.
:type require_valid_json: ``bool``
:params check_libcloud: If true, check the libcloud version available to see if
JSON creds are supported.
:type check_libcloud: ``bool``
:returns: True
:rtype: ``bool``
"""
try:
# Try to read credentials as JSON
with open(credentials_file) as credentials:
json.loads(credentials.read())
# If the credentials are proper JSON and we do not have the minimum
# required libcloud version, bail out and return a descriptive
# error
if check_libcloud and LooseVersion(libcloud.__version__) < '0.17.0':
module.fail_json(msg='Using JSON credentials but libcloud minimum version not met. '
'Upgrade to libcloud>=0.17.0.')
return True
except IOError as e:
module.fail_json(msg='GCP Credentials File %s not found.' %
credentials_file, changed=False)
return False
except ValueError as e:
if require_valid_json:
module.fail_json(
msg='GCP Credentials File %s invalid. Must be valid JSON.' % credentials_file, changed=False)
else:
module.deprecate(msg=("Non-JSON credentials file provided. This format is deprecated. "
" Please generate a new JSON key from the Google Cloud console"),
version=2.5)
return True
def gcp_connect(module, provider, get_driver, user_agent_product, user_agent_version):
"""Return a Google libcloud driver connection."""
if not HAS_LIBCLOUD_BASE:
module.fail_json(msg='libcloud must be installed to use this module')
creds = _get_gcp_credentials(module,
require_valid_json=False,
check_libcloud=True)
try:
gcp = get_driver(provider)(creds['service_account_email'], creds['credentials_file'],
datacenter=module.params.get('zone', None),
project=creds['project_id'])
gcp.connection.user_agent_append("%s/%s" % (
user_agent_product, user_agent_version))
except (RuntimeError, ValueError) as e:
module.fail_json(msg=str(e), changed=False)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return gcp
def get_google_cloud_credentials(module, scopes=[]):
"""
Get credentials object for use with Google Cloud client.
Attempts to obtain credentials by calling _get_gcp_credentials. If those are
not present will attempt to connect via Application Default Credentials.
To connect via libcloud, don't use this function, use gcp_connect instead. For
Google Python API Client, see get_google_api_auth for how to connect.
For more information on Google's client library options for Python, see:
U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
Google Cloud example:
creds, params = get_google_cloud_credentials(module, scopes, user_agent_product, user_agent_version)
pubsub_client = pubsub.Client(project=params['project_id'], credentials=creds)
pubsub_client.user_agent = 'ansible-pubsub-0.1'
...
:param module: initialized Ansible module object
:type module: `class AnsibleModule`
:param scopes: list of scopes
:type module: ``list`` of URIs
:returns: A tuple containing (google authorized) credentials object and
params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
:rtype: ``tuple``
"""
if not HAS_GOOGLE_AUTH:
module.fail_json(msg='Please install google-auth.')
conn_params = _get_gcp_credentials(module,
require_valid_json=True,
check_libcloud=False)
try:
if conn_params['credentials_file']:
credentials = service_account.Credentials.from_service_account_file(
conn_params['credentials_file'])
if scopes:
credentials = credentials.with_scopes(scopes)
else:
(credentials, project_id) = google.auth.default(
scopes=scopes)
if project_id is not None:
conn_params['project_id'] = project_id
return (credentials, conn_params)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return (None, None)
def get_google_api_auth(module, scopes=[], user_agent_product='ansible-python-api', user_agent_version='NA'):
"""
Authentication for use with google-python-api-client.
Function calls get_google_cloud_credentials, which attempts to assemble the credentials
from various locations. Next it attempts to authenticate with Google.
This function returns an httplib2 (compatible) object that can be provided to the Google Python API client.
For libcloud, don't use this function, use gcp_connect instead. For Google Cloud, See
get_google_cloud_credentials for how to connect.
For more information on Google's client library options for Python, see:
U(https://cloud.google.com/apis/docs/client-libraries-explained#google_api_client_libraries)
Google API example:
http_auth, conn_params = get_google_api_auth(module, scopes, user_agent_product, user_agent_version)
service = build('myservice', 'v1', http=http_auth)
...
:param module: initialized Ansible module object
:type module: `class AnsibleModule`
:param scopes: list of scopes
:type scopes: ``list`` of URIs
:param user_agent_product: User agent product. eg: 'ansible-python-api'
:type user_agent_product: ``str``
:param user_agent_version: Version string to append to product. eg: 'NA' or '0.1'
:type user_agent_version: ``str``
:returns: A tuple containing (google authorized) httplib2 request object and a
params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
:rtype: ``tuple``
"""
if not HAS_GOOGLE_API_LIB:
module.fail_json(msg="Please install google-api-python-client library")
if not scopes:
scopes = GCP_DEFAULT_SCOPES
try:
(credentials, conn_params) = get_google_cloud_credentials(module, scopes)
http = set_user_agent(Http(), '%s-%s' %
(user_agent_product, user_agent_version))
http_auth = google_auth_httplib2.AuthorizedHttp(credentials, http=http)
return (http_auth, conn_params)
except Exception as e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return (None, None)
def get_google_api_client(module, service, user_agent_product, user_agent_version,
scopes=None, api_version='v1'):
"""
Get the discovery-based python client. Use when a cloud client is not available.
client = get_google_api_client(module, 'compute', user_agent_product=USER_AGENT_PRODUCT,
user_agent_version=USER_AGENT_VERSION)
:returns: A tuple containing the authorized client to the specified service and a
params dict {'service_account_email': '...', 'credentials_file': '...', 'project_id': ...}
:rtype: ``tuple``
"""
if not scopes:
scopes = GCP_DEFAULT_SCOPES
http_auth, conn_params = get_google_api_auth(module, scopes=scopes,
user_agent_product=user_agent_product,
user_agent_version=user_agent_version)
client = build(service, api_version, http=http_auth)
return (client, conn_params)
def check_min_pkg_version(pkg_name, minimum_version):
"""Minimum required version is >= installed version."""
from pkg_resources import get_distribution
try:
installed_version = get_distribution(pkg_name).version
return LooseVersion(installed_version) >= minimum_version
except Exception as e:
return False
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return 'Unexpected response: (%s). Detail: %s' % (str(error), traceback.format_exc())
def get_valid_location(module, driver, location, location_type='zone'):
if location_type == 'zone':
l = driver.ex_get_zone(location)
else:
l = driver.ex_get_region(location)
if l is None:
link = 'https://cloud.google.com/compute/docs/regions-zones/regions-zones#available'
module.fail_json(msg=('%s %s is invalid. Please see the list of '
'available %s at %s' % (
location_type, location, location_type, link)),
changed=False)
return l
def check_params(params, field_list):
"""
Helper to validate params.
Use this in function definitions if they require specific fields
to be present.
:param params: structure that contains the fields
:type params: ``dict``
:param field_list: list of dict representing the fields
[{'name': str, 'required': True/False', 'type': cls}]
:type field_list: ``list`` of ``dict``
:return True or raises ValueError
:rtype: ``bool`` or `class:ValueError`
"""
for d in field_list:
if not d['name'] in params:
if 'required' in d and d['required'] is True:
raise ValueError(("%s is required and must be of type: %s" %
(d['name'], str(d['type']))))
else:
if not isinstance(params[d['name']], d['type']):
raise ValueError(("%s must be of type: %s. %s (%s) provided." % (
d['name'], str(d['type']), params[d['name']],
type(params[d['name']]))))
if 'values' in d:
if params[d['name']] not in d['values']:
raise ValueError(("%s must be one of: %s" % (
d['name'], ','.join(d['values']))))
if isinstance(params[d['name']], int):
if 'min' in d:
if params[d['name']] < d['min']:
raise ValueError(("%s must be greater than or equal to: %s" % (
d['name'], d['min'])))
if 'max' in d:
if params[d['name']] > d['max']:
raise ValueError("%s must be less than or equal to: %s" % (
d['name'], d['max']))
return True
class GCPUtils(object):
"""
Helper utilities for GCP.
"""
@staticmethod
def underscore_to_camel(txt):
return txt.split('_')[0] + ''.join(x.capitalize() or '_' for x in txt.split('_')[1:])
@staticmethod
def remove_non_gcp_params(params):
"""
Remove params if found.
"""
params_to_remove = ['state']
for p in params_to_remove:
if p in params:
del params[p]
return params
@staticmethod
def params_to_gcp_dict(params, resource_name=None):
"""
Recursively convert ansible params to GCP Params.
Keys are converted from snake to camelCase
ex: default_service to defaultService
Handles lists, dicts and strings
special provision for the resource name
"""
if not isinstance(params, dict):
return params
gcp_dict = {}
params = GCPUtils.remove_non_gcp_params(params)
for k, v in params.items():
gcp_key = GCPUtils.underscore_to_camel(k)
if isinstance(v, dict):
retval = GCPUtils.params_to_gcp_dict(v)
gcp_dict[gcp_key] = retval
elif isinstance(v, list):
gcp_dict[gcp_key] = [GCPUtils.params_to_gcp_dict(x) for x in v]
else:
if resource_name and k == resource_name:
gcp_dict['name'] = v
else:
gcp_dict[gcp_key] = v
return gcp_dict
@staticmethod
def execute_api_client_req(req, client=None, raw=True,
operation_timeout=180, poll_interval=5,
raise_404=True):
"""
General python api client interaction function.
For use with google-api-python-client, or clients created
with get_google_api_client function
Not for use with Google Cloud client libraries
For long-running operations, we make an immediate query and then
sleep poll_interval before re-querying. After the request is done
we rebuild the request with a get method and return the result.
"""
try:
resp = req.execute()
if not resp:
return None
if raw:
return resp
if resp['kind'] == 'compute#operation':
resp = GCPUtils.execute_api_client_operation_req(req, resp,
client,
operation_timeout,
poll_interval)
if 'items' in resp:
return resp['items']
return resp
except HttpError as h:
# Note: 404s can be generated (incorrectly) for dependent
# resources not existing. We let the caller determine if
# they want 404s raised for their invocation.
if h.resp.status == 404 and not raise_404:
return None
else:
raise
except Exception:
raise
@staticmethod
def execute_api_client_operation_req(orig_req, op_resp, client,
operation_timeout=180, poll_interval=5):
"""
Poll an operation for a result.
"""
parsed_url = GCPUtils.parse_gcp_url(orig_req.uri)
project_id = parsed_url['project']
resource_name = GCPUtils.get_gcp_resource_from_methodId(
orig_req.methodId)
resource = GCPUtils.build_resource_from_name(client, resource_name)
start_time = time.time()
complete = False
attempts = 1
while not complete:
if start_time + operation_timeout >= time.time():
op_req = client.globalOperations().get(
project=project_id, operation=op_resp['name'])
op_resp = op_req.execute()
if op_resp['status'] != 'DONE':
time.sleep(poll_interval)
attempts += 1
else:
complete = True
if op_resp['operationType'] == 'delete':
# don't wait for the delete
return True
elif op_resp['operationType'] in ['insert', 'update', 'patch']:
# TODO(supertom): Isolate 'build-new-request' stuff.
resource_name_singular = GCPUtils.get_entity_name_from_resource_name(
resource_name)
if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url:
parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[
'entity_name']
args = {'project': project_id,
resource_name_singular: parsed_url['entity_name']}
new_req = resource.get(**args)
resp = new_req.execute()
return resp
else:
# assuming multiple entities, do a list call.
new_req = resource.list(project=project_id)
resp = new_req.execute()
return resp
else:
# operation didn't complete on time.
raise GCPOperationTimeoutError("Operation timed out: %s" % (
op_resp['targetLink']))
@staticmethod
def build_resource_from_name(client, resource_name):
try:
method = getattr(client, resource_name)
return method()
except AttributeError:
raise NotImplementedError('%s is not an attribute of %s' % (resource_name,
client))
@staticmethod
def get_gcp_resource_from_methodId(methodId):
try:
parts = methodId.split('.')
if len(parts) != 3:
return None
else:
return parts[1]
except AttributeError:
return None
@staticmethod
def get_entity_name_from_resource_name(resource_name):
if not resource_name:
return None
try:
# Chop off global or region prefixes
if resource_name.startswith('global'):
resource_name = resource_name.replace('global', '')
elif resource_name.startswith('regional'):
resource_name = resource_name.replace('region', '')
# ensure we have a lower case first letter
resource_name = resource_name[0].lower() + resource_name[1:]
if resource_name[-3:] == 'ies':
return resource_name.replace(
resource_name[-3:], 'y')
if resource_name[-1] == 's':
return resource_name[:-1]
return resource_name
except AttributeError:
return None
@staticmethod
def parse_gcp_url(url):
"""
Parse GCP urls and return dict of parts.
Supported URL structures:
/SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE
/SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE
/SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE
/SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME
/SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME
:param url: GCP-generated URL, such as a selflink or resource location.
:type url: ``str``
:return: dictionary of parts. Includes stanard components of urlparse, plus
GCP-specific 'service', 'api_version', 'project' and
'resource_name' keys. Optionally, 'zone', 'region', 'entity_name'
and 'method_name', if applicable.
:rtype: ``dict``
"""
p = urlparse.urlparse(url)
if not p:
return None
else:
# we add extra items such as
# zone, region and resource_name
url_parts = {}
url_parts['scheme'] = p.scheme
url_parts['host'] = p.netloc
url_parts['path'] = p.path
if p.path.find('/') == 0:
url_parts['path'] = p.path[1:]
url_parts['params'] = p.params
url_parts['fragment'] = p.fragment
url_parts['query'] = p.query
url_parts['project'] = None
url_parts['service'] = None
url_parts['api_version'] = None
path_parts = url_parts['path'].split('/')
url_parts['service'] = path_parts[0]
url_parts['api_version'] = path_parts[1]
if path_parts[2] == 'projects':
url_parts['project'] = path_parts[3]
else:
# invalid URL
raise GCPInvalidURLError('unable to parse: %s' % url)
if 'global' in path_parts:
url_parts['global'] = True
idx = path_parts.index('global')
if len(path_parts) - idx == 4:
# we have a resource, entity and method_name
url_parts['resource_name'] = path_parts[idx + 1]
url_parts['entity_name'] = path_parts[idx + 2]
url_parts['method_name'] = path_parts[idx + 3]
if len(path_parts) - idx == 3:
# we have a resource and entity
url_parts['resource_name'] = path_parts[idx + 1]
url_parts['entity_name'] = path_parts[idx + 2]
if len(path_parts) - idx == 2:
url_parts['resource_name'] = path_parts[idx + 1]
if len(path_parts) - idx < 2:
# invalid URL
raise GCPInvalidURLError('unable to parse: %s' % url)
elif 'regions' in path_parts or 'zones' in path_parts:
idx = -1
if 'regions' in path_parts:
idx = path_parts.index('regions')
url_parts['region'] = path_parts[idx + 1]
else:
idx = path_parts.index('zones')
url_parts['zone'] = path_parts[idx + 1]
if len(path_parts) - idx == 5:
# we have a resource, entity and method_name
url_parts['resource_name'] = path_parts[idx + 2]
url_parts['entity_name'] = path_parts[idx + 3]
url_parts['method_name'] = path_parts[idx + 4]
if len(path_parts) - idx == 4:
# we have a resource and entity
url_parts['resource_name'] = path_parts[idx + 2]
url_parts['entity_name'] = path_parts[idx + 3]
if len(path_parts) - idx == 3:
url_parts['resource_name'] = path_parts[idx + 2]
if len(path_parts) - idx < 3:
# invalid URL
raise GCPInvalidURLError('unable to parse: %s' % url)
else:
# no location in URL.
idx = path_parts.index('projects')
if len(path_parts) - idx == 5:
# we have a resource, entity and method_name
url_parts['resource_name'] = path_parts[idx + 2]
url_parts['entity_name'] = path_parts[idx + 3]
url_parts['method_name'] = path_parts[idx + 4]
if len(path_parts) - idx == 4:
# we have a resource and entity
url_parts['resource_name'] = path_parts[idx + 2]
url_parts['entity_name'] = path_parts[idx + 3]
if len(path_parts) - idx == 3:
url_parts['resource_name'] = path_parts[idx + 2]
if len(path_parts) - idx < 3:
# invalid URL
raise GCPInvalidURLError('unable to parse: %s' % url)
return url_parts
@staticmethod
def build_googleapi_url(project, api_version='v1', service='compute'):
return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project)
@staticmethod
def filter_gcp_fields(params, excluded_fields=None):
new_params = {}
if not excluded_fields:
excluded_fields = ['creationTimestamp', 'id', 'kind',
'selfLink', 'fingerprint', 'description']
if isinstance(params, list):
new_params = [GCPUtils.filter_gcp_fields(
x, excluded_fields) for x in params]
elif isinstance(params, dict):
for k in params.keys():
if k not in excluded_fields:
new_params[k] = GCPUtils.filter_gcp_fields(
params[k], excluded_fields)
else:
new_params = params
return new_params
@staticmethod
def are_params_equal(p1, p2):
"""
Check if two params dicts are equal.
TODO(supertom): need a way to filter out URLs, or they need to be built
"""
filtered_p1 = GCPUtils.filter_gcp_fields(p1)
filtered_p2 = GCPUtils.filter_gcp_fields(p2)
if filtered_p1 != filtered_p2:
return False
return True
class GCPError(Exception):
pass
class GCPOperationTimeoutError(GCPError):
pass
class GCPInvalidURLError(GCPError):
pass
| gpl-3.0 |
nRFMesh/mbed-os | features/FEATURE_BLE/targets/TARGET_NORDIC/TARGET_MCU_NRF51822/sdk/script/replace_headers.py | 77 | 1181 | # Copyright (c) 2015-2016 ARM Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
with open("copyright_header.txt", "r") as fd:
header = fd.read()
path = "../source/nordic_sdk"
for root, dirs, files in os.walk(path):
for fn in [os.path.join(root, x) for x in files]:
with open(fn, "r+") as fd:
print "+"*35
print fn
s = fd.read()
start = s.find("/*")
end = s.find("*/")
copyright_str = s[start:end+2]
if "copyright (c)" not in copyright_str.lower():
s = header + "\n\n" + s
elif copyright_str is not header:
s = s.replace(copyright_str, header)
fd.seek(0)
fd.write(s)
fd.truncate()
| apache-2.0 |
willcode/gnuradio | grc/gui/MainWindow.py | 5 | 16020 | """
Copyright 2008, 2009, 2011 Free Software Foundation, Inc.
This file is part of GNU Radio
SPDX-License-Identifier: GPL-2.0-or-later
"""
import os
import logging
from gi.repository import Gtk, Gdk, GObject
from . import Bars, Actions, Utils
from .BlockTreeWindow import BlockTreeWindow
from .Console import Console
from .VariableEditor import VariableEditor
from .Constants import \
NEW_FLOGRAPH_TITLE, DEFAULT_CONSOLE_WINDOW_WIDTH
from .Dialogs import TextDisplay, MessageDialogWrapper
from .Notebook import Notebook, Page
from ..core import Messages
log = logging.getLogger(__name__)
############################################################
# Main window
############################################################
class MainWindow(Gtk.ApplicationWindow):
"""The topmost window with menus, the tool bar, and other major windows."""
# Constants the action handler can use to indicate which panel visibility to change.
BLOCKS = 0
CONSOLE = 1
VARIABLES = 2
def __init__(self, app, platform):
"""
MainWindow constructor
Setup the menu, toolbar, flow graph editor notebook, block selection window...
"""
Gtk.ApplicationWindow.__init__(self, title="GNU Radio Companion", application=app)
log.debug("__init__()")
self._platform = platform
self.app = app
self.config = platform.config
# Add all "win" actions to the local
for x in Actions.get_actions():
if x.startswith("win."):
self.add_action(Actions.actions[x])
# Setup window
vbox = Gtk.VBox()
self.add(vbox)
icon_theme = Gtk.IconTheme.get_default()
icon = icon_theme.lookup_icon("gnuradio-grc", 48, 0)
if not icon:
# Set default window icon
self.set_icon_from_file(os.path.dirname(os.path.abspath(__file__)) + "/icon.png")
else:
# Use gnuradio icon
self.set_icon(icon.load_icon())
# Create the menu bar and toolbar
generate_modes = platform.get_generate_options()
# This needs to be replaced
# Have an option for either the application menu or this menu
self.menu = Bars.Menu()
self.menu_bar = Gtk.MenuBar.new_from_model(self.menu)
vbox.pack_start(self.menu_bar, False, False, 0)
self.tool_bar = Bars.Toolbar()
self.tool_bar.set_hexpand(True)
# Show the toolbar
self.tool_bar.show()
vbox.pack_start(self.tool_bar, False, False, 0)
# Main parent container for the different panels
self.main = Gtk.HPaned() #(orientation=Gtk.Orientation.HORIZONTAL)
vbox.pack_start(self.main, True, True, 0)
# Create the notebook
self.notebook = Notebook()
self.page_to_be_closed = None
self.current_page = None # type: Page
# Create the console window
self.console = Console()
# Create the block tree and variable panels
self.btwin = BlockTreeWindow(platform)
self.btwin.connect('create_new_block', self._add_block_to_current_flow_graph)
self.vars = VariableEditor()
self.vars.connect('create_new_block', self._add_block_to_current_flow_graph)
self.vars.connect('remove_block', self._remove_block_from_current_flow_graph)
# Figure out which place to put the variable editor
self.left = Gtk.VPaned() #orientation=Gtk.Orientation.VERTICAL)
self.right = Gtk.VPaned() #orientation=Gtk.Orientation.VERTICAL)
self.left_subpanel = Gtk.HPaned() #orientation=Gtk.Orientation.HORIZONTAL)
self.variable_panel_sidebar = self.config.variable_editor_sidebar()
if self.variable_panel_sidebar:
self.left.pack1(self.notebook)
self.left.pack2(self.console, False)
self.right.pack1(self.btwin)
self.right.pack2(self.vars, False)
else:
# Put the variable editor in a panel with the console
self.left.pack1(self.notebook)
self.left_subpanel.pack1(self.console, shrink=False)
self.left_subpanel.pack2(self.vars, resize=False, shrink=True)
self.left.pack2(self.left_subpanel, False)
# Create the right panel
self.right.pack1(self.btwin)
self.main.pack1(self.left)
self.main.pack2(self.right, False)
# Load preferences and show the main window
self.resize(*self.config.main_window_size())
self.main.set_position(self.config.blocks_window_position())
self.left.set_position(self.config.console_window_position())
if self.variable_panel_sidebar:
self.right.set_position(self.config.variable_editor_position(sidebar=True))
else:
self.left_subpanel.set_position(self.config.variable_editor_position())
self.show_all()
log.debug("Main window ready")
############################################################
# Event Handlers
############################################################
def _add_block_to_current_flow_graph(self, widget, key):
self.current_flow_graph.add_new_block(key)
def _remove_block_from_current_flow_graph(self, widget, key):
block = self.current_flow_graph.get_block(key)
self.current_flow_graph.remove_element(block)
def _quit(self, window, event):
"""
Handle the delete event from the main window.
Generated by pressing X to close, alt+f4, or right click+close.
This method in turns calls the state handler to quit.
Returns:
true
"""
Actions.APPLICATION_QUIT()
return True
def update_panel_visibility(self, panel, visibility=True):
"""
Handles changing visibility of panels.
"""
# Set the visibility for the requested panel, then update the containers if they need
# to be hidden as well.
if panel == self.BLOCKS:
if visibility:
self.btwin.show()
else:
self.btwin.hide()
elif panel == self.CONSOLE:
if visibility:
self.console.show()
else:
self.console.hide()
elif panel == self.VARIABLES:
if visibility:
self.vars.show()
else:
self.vars.hide()
else:
return
if self.variable_panel_sidebar:
# If both the variable editor and block panels are hidden, hide the right container
if not (self.btwin.get_property('visible')) and not (self.vars.get_property('visible')):
self.right.hide()
else:
self.right.show()
else:
if not (self.btwin.get_property('visible')):
self.right.hide()
else:
self.right.show()
if not (self.vars.get_property('visible')) and not (self.console.get_property('visible')):
self.left_subpanel.hide()
else:
self.left_subpanel.show()
############################################################
# Console Window
############################################################
@property
def current_page(self):
return self.notebook.current_page
@current_page.setter
def current_page(self, page):
self.notebook.current_page = page
def add_console_line(self, line):
"""
Place line at the end of the text buffer, then scroll its window all the way down.
Args:
line: the new text
"""
self.console.add_line(line)
############################################################
# Pages: create and close
############################################################
def new_page(self, file_path='', show=False):
"""
Create a new notebook page.
Set the tab to be selected.
Args:
file_path: optional file to load into the flow graph
show: true if the page should be shown after loading
"""
#if the file is already open, show the open page and return
if file_path and file_path in self._get_files(): #already open
page = self.notebook.get_nth_page(self._get_files().index(file_path))
self._set_page(page)
return
try: #try to load from file
if file_path: Messages.send_start_load(file_path)
flow_graph = self._platform.make_flow_graph()
flow_graph.grc_file_path = file_path
#print flow_graph
page = Page(
self,
flow_graph=flow_graph,
file_path=file_path,
)
if getattr(Messages, 'flowgraph_error') is not None:
Messages.send(
">>> Check: {}\n>>> FlowGraph Error: {}\n".format(
str(Messages.flowgraph_error_file),
str(Messages.flowgraph_error)
)
)
if file_path: Messages.send_end_load()
except Exception as e: #return on failure
Messages.send_fail_load(e)
if isinstance(e, KeyError) and str(e) == "'options'":
# This error is unrecoverable, so crash gracefully
exit(-1)
return
#add this page to the notebook
self.notebook.append_page(page, page.tab)
self.notebook.set_tab_reorderable(page, True)
#only show if blank or manual
if not file_path or show: self._set_page(page)
def close_pages(self):
"""
Close all the pages in this notebook.
Returns:
true if all closed
"""
open_files = [file for file in self._get_files() if file] #filter blank files
open_file = self.current_page.file_path
#close each page
for page in sorted(self.get_pages(), key=lambda p: p.saved):
self.page_to_be_closed = page
closed = self.close_page(False)
if not closed:
break
if self.notebook.get_n_pages(): return False
#save state before closing
self.config.set_open_files(open_files)
self.config.file_open(open_file)
self.config.main_window_size(self.get_size())
self.config.console_window_position(self.left.get_position())
self.config.blocks_window_position(self.main.get_position())
if self.variable_panel_sidebar:
self.config.variable_editor_position(self.right.get_position(), sidebar=True)
else:
self.config.variable_editor_position(self.left_subpanel.get_position())
self.config.save()
return True
def close_page(self, ensure=True):
"""
Close the current page.
If the notebook becomes empty, and ensure is true,
call new page upon exit to ensure that at least one page exists.
Args:
ensure: boolean
"""
if not self.page_to_be_closed: self.page_to_be_closed = self.current_page
#show the page if it has an executing flow graph or is unsaved
if self.page_to_be_closed.process or not self.page_to_be_closed.saved:
self._set_page(self.page_to_be_closed)
#unsaved? ask the user
if not self.page_to_be_closed.saved:
response = self._save_changes() # return value is either OK, CLOSE, or CANCEL
if response == Gtk.ResponseType.OK:
Actions.FLOW_GRAPH_SAVE() #try to save
if not self.page_to_be_closed.saved: #still unsaved?
self.page_to_be_closed = None #set the page to be closed back to None
return False
elif response == Gtk.ResponseType.CANCEL:
self.page_to_be_closed = None
return False
#stop the flow graph if executing
if self.page_to_be_closed.process:
Actions.FLOW_GRAPH_KILL()
#remove the page
self.notebook.remove_page(self.notebook.page_num(self.page_to_be_closed))
if ensure and self.notebook.get_n_pages() == 0:
self.new_page() #no pages, make a new one
self.page_to_be_closed = None #set the page to be closed back to None
return True
############################################################
# Misc
############################################################
def update(self):
"""
Set the title of the main window.
Set the titles on the page tabs.
Show/hide the console window.
"""
page = self.current_page
basename = os.path.basename(page.file_path)
dirname = os.path.dirname(page.file_path)
Gtk.Window.set_title(self, ''.join((
'*' if not page.saved else '', basename if basename else NEW_FLOGRAPH_TITLE,
'(read only)' if page.get_read_only() else '', ' - ',
dirname if dirname else self._platform.config.name,
)))
# set tab titles
for page in self.get_pages():
file_name = os.path.splitext(os.path.basename(page.file_path))[0]
page.set_markup('<span foreground="{foreground}">{title}{ro}</span>'.format(
foreground='black' if page.saved else 'red', ro=' (ro)' if page.get_read_only() else '',
title=Utils.encode(file_name or NEW_FLOGRAPH_TITLE),
))
fpath = page.file_path
if not fpath:
fpath = '(unsaved)'
page.set_tooltip(fpath)
# show/hide notebook tabs
self.notebook.set_show_tabs(len(self.get_pages()) > 1)
# Need to update the variable window when changing
self.vars.update_gui(self.current_flow_graph.blocks)
def update_pages(self):
"""
Forces a reload of all the pages in this notebook.
"""
for page in self.get_pages():
success = page.flow_graph.reload()
if success: # Only set saved if errors occurred during import
page.saved = False
@property
def current_flow_graph(self):
return self.current_page.flow_graph
def get_focus_flag(self):
"""
Get the focus flag from the current page.
Returns:
the focus flag
"""
return self.current_page.drawing_area.get_focus_flag()
############################################################
# Helpers
############################################################
def _set_page(self, page):
"""
Set the current page.
Args:
page: the page widget
"""
self.current_page = page
self.notebook.set_current_page(self.notebook.page_num(self.current_page))
def _save_changes(self):
"""
Save changes to flow graph?
Returns:
the response_id (see buttons variable below)
"""
buttons = (
'Close without saving', Gtk.ResponseType.CLOSE,
Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,
Gtk.STOCK_SAVE, Gtk.ResponseType.OK
)
return MessageDialogWrapper(
self, Gtk.MessageType.QUESTION, Gtk.ButtonsType.NONE, 'Unsaved Changes!',
'Would you like to save changes before closing?', Gtk.ResponseType.OK, buttons
).run_and_destroy()
def _get_files(self):
"""
Get the file names for all the pages, in order.
Returns:
list of file paths
"""
return [page.file_path for page in self.get_pages()]
def get_pages(self):
"""
Get a list of all pages in the notebook.
Returns:
list of pages
"""
return [self.notebook.get_nth_page(page_num)
for page_num in range(self.notebook.get_n_pages())]
| gpl-3.0 |
googleapis/python-grafeas | grafeas/grafeas_v1/types/deployment.py | 1 | 2785 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package="grafeas.v1", manifest={"DeploymentNote", "DeploymentOccurrence",},
)
class DeploymentNote(proto.Message):
r"""An artifact that can be deployed in some runtime.
Attributes:
resource_uri (Sequence[str]):
Required. Resource URI for the artifact being
deployed.
"""
resource_uri = proto.RepeatedField(proto.STRING, number=1,)
class DeploymentOccurrence(proto.Message):
r"""The period during which some deployable was active in a
runtime.
Attributes:
user_email (str):
Identity of the user that triggered this
deployment.
deploy_time (google.protobuf.timestamp_pb2.Timestamp):
Required. Beginning of the lifetime of this
deployment.
undeploy_time (google.protobuf.timestamp_pb2.Timestamp):
End of the lifetime of this deployment.
config (str):
Configuration used to create this deployment.
address (str):
Address of the runtime element hosting this
deployment.
resource_uri (Sequence[str]):
Output only. Resource URI for the artifact
being deployed taken from the deployable field
with the same name.
platform (grafeas.grafeas_v1.types.DeploymentOccurrence.Platform):
Platform hosting this deployment.
"""
class Platform(proto.Enum):
r"""Types of platforms."""
PLATFORM_UNSPECIFIED = 0
GKE = 1
FLEX = 2
CUSTOM = 3
user_email = proto.Field(proto.STRING, number=1,)
deploy_time = proto.Field(proto.MESSAGE, number=2, message=timestamp_pb2.Timestamp,)
undeploy_time = proto.Field(
proto.MESSAGE, number=3, message=timestamp_pb2.Timestamp,
)
config = proto.Field(proto.STRING, number=4,)
address = proto.Field(proto.STRING, number=5,)
resource_uri = proto.RepeatedField(proto.STRING, number=6,)
platform = proto.Field(proto.ENUM, number=7, enum=Platform,)
__all__ = tuple(sorted(__protobuf__.manifest))
| apache-2.0 |
matthappens/taskqueue | taskqueue/venv_tq/lib/python2.7/site-packages/PIL/SunImagePlugin.py | 18 | 1941 | #
# The Python Imaging Library.
# $Id$
#
# Sun image file handling
#
# History:
# 1995-09-10 fl Created
# 1996-05-28 fl Fixed 32-bit alignment
# 1998-12-29 fl Import ImagePalette module
# 2001-12-18 fl Fixed palette loading (from Jean-Claude Rimbault)
#
# Copyright (c) 1997-2001 by Secret Labs AB
# Copyright (c) 1995-1996 by Fredrik Lundh
#
# See the README file for information on usage and redistribution.
#
__version__ = "0.3"
from PIL import Image, ImageFile, ImagePalette, _binary
i16 = _binary.i16be
i32 = _binary.i32be
def _accept(prefix):
return i32(prefix) == 0x59a66a95
##
# Image plugin for Sun raster files.
class SunImageFile(ImageFile.ImageFile):
format = "SUN"
format_description = "Sun Raster File"
def _open(self):
# HEAD
s = self.fp.read(32)
if i32(s) != 0x59a66a95:
raise SyntaxError("not an SUN raster file")
offset = 32
self.size = i32(s[4:8]), i32(s[8:12])
depth = i32(s[12:16])
if depth == 1:
self.mode, rawmode = "1", "1;I"
elif depth == 8:
self.mode = rawmode = "L"
elif depth == 24:
self.mode, rawmode = "RGB", "BGR"
else:
raise SyntaxError("unsupported mode")
compression = i32(s[20:24])
if i32(s[24:28]) != 0:
length = i32(s[28:32])
offset = offset + length
self.palette = ImagePalette.raw("RGB;L", self.fp.read(length))
if self.mode == "L":
self.mode = rawmode = "P"
stride = (((self.size[0] * depth + 7) // 8) + 3) & (~3)
if compression == 1:
self.tile = [("raw", (0,0)+self.size, offset, (rawmode, stride))]
elif compression == 2:
self.tile = [("sun_rle", (0,0)+self.size, offset, rawmode)]
#
# registry
Image.register_open("SUN", SunImageFile, _accept)
Image.register_extension("SUN", ".ras")
| mit |
lzw120/django | build/lib/django/db/backends/__init__.py | 7 | 36683 | from django.db.utils import DatabaseError
try:
import thread
except ImportError:
import dummy_thread as thread
from contextlib import contextmanager
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils.importlib import import_module
from django.utils.timezone import is_aware
class BaseDatabaseWrapper(object):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
self._thread_ident = thread.get_ident()
self.allow_thread_sharing = allow_thread_sharing
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self._leave_transaction_management(self.is_managed())
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
if self._dirty:
self.rollback()
raise TransactionManagementError("Transaction managed block ended with "
"pending COMMIT/ROLLBACK")
self._dirty = False
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
"""
if (not self.allow_thread_sharing
and self._thread_ident != thread.get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, thread.get_ident()))
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self.validate_thread_sharing()
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
self.validate_thread_sharing()
if self.savepoint_state:
self._savepoint_commit(sid)
@contextmanager
def constraint_checks_disabled(self):
disabled = self.disable_constraint_checking()
try:
yield
finally:
if disabled:
self.enable_constraint_checking()
def disable_constraint_checking(self):
"""
Backends can implement as needed to temporarily disable foreign key constraint
checking.
"""
pass
def enable_constraint_checking(self):
"""
Backends can implement as needed to re-enable foreign key constraint checking.
"""
pass
def check_constraints(self, table_names=None):
"""
Backends can override this method if they can apply constraint checking (e.g. via "SET CONSTRAINTS
ALL IMMEDIATE"). Should raise an IntegrityError if any invalid foreign key references are encountered.
"""
pass
def close(self):
self.validate_thread_sharing()
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
self.validate_thread_sharing()
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
has_bulk_insert = False
uses_autocommit = False
uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
has_select_for_update = False
has_select_for_update_nowait = False
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Does the backend support tablespaces? Default to False because it isn't
# in the SQL standard.
supports_tablespaces = False
# Features that need to be confirmed at runtime
# Cache whether the confirmation has been performed.
_confirmed = False
supports_transactions = None
supports_stddev = None
can_introspect_foreign_keys = None
# Support for the DISTINCT ON clause
can_distinct_on_fields = False
def __init__(self, connection):
self.connection = connection
def confirm(self):
"Perform manual checks of any database features that might vary between installs"
self._confirmed = True
self.supports_transactions = self._supports_transactions()
self.supports_stddev = self._supports_stddev()
self.can_introspect_foreign_keys = self._can_introspect_foreign_keys()
def _supports_transactions(self):
"Confirm support for transactions"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
return count == 0
def _supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
except NotImplementedError:
self.supports_stddev = False
def _can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
return True
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self, connection):
self.connection = connection
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def for_update_sql(self, nowait=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
else:
return 'FOR UPDATE'
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
else:
return 'DISTINCT'
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
"""
return value
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
if is_aware(value):
raise ValueError("Django does not support timezone-aware times.")
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplementedError.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self, cursor=None):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
"""
if cursor is None:
cursor = self.connection.cursor()
return sorted(self.get_table_list(cursor))
def get_table_list(self, cursor):
"""
Returns an unsorted list of names of all tables that exist in the
database.
"""
raise NotImplementedError
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
tables = list(tables)
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = map(self.table_name_converter, tables)
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
"""
raise NotImplementedError
def get_primary_key_column(self, cursor, table_name):
"""
Backends can override this to return the column name of the primary key for the given table.
"""
raise NotImplementedError
def get_indexes(self, cursor, table_name):
"""
Returns a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
Only single-column indexes are introspected.
"""
raise NotImplementedError
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| bsd-3-clause |
waynenilsen/statsmodels | setup.py | 16 | 15962 | """
Much of the build system code was adapted from work done by the pandas
developers [1], which was in turn based on work done in pyzmq [2] and lxml [3].
[1] http://pandas.pydata.org
[2] http://zeromq.github.io/pyzmq/
[3] http://lxml.de/
"""
import os
from os.path import relpath, join as pjoin
import sys
import subprocess
import re
from distutils.version import StrictVersion
# temporarily redirect config directory to prevent matplotlib importing
# testing that for writeable directory which results in sandbox error in
# certain easy_install versions
os.environ["MPLCONFIGDIR"] = "."
no_frills = (len(sys.argv) >= 2 and ('--help' in sys.argv[1:] or
sys.argv[1] in ('--help-commands',
'egg_info', '--version',
'clean')))
# try bootstrapping setuptools if it doesn't exist
try:
import pkg_resources
try:
pkg_resources.require("setuptools>=0.6c5")
except pkg_resources.VersionConflict:
from ez_setup import use_setuptools
use_setuptools(version="0.6c5")
from setuptools import setup, Command, find_packages
_have_setuptools = True
except ImportError:
# no setuptools installed
from distutils.core import setup, Command
_have_setuptools = False
if _have_setuptools:
setuptools_kwargs = {"zip_safe": False,
"test_suite": "nose.collector"}
else:
setuptools_kwargs = {}
if sys.version_info[0] >= 3:
sys.exit("Need setuptools to install statsmodels for Python 3.x")
curdir = os.path.abspath(os.path.dirname(__file__))
README = open(pjoin(curdir, "README.rst")).read()
DISTNAME = 'statsmodels'
DESCRIPTION = 'Statistical computations and models for use with SciPy'
LONG_DESCRIPTION = README
MAINTAINER = 'Skipper Seabold, Josef Perktold'
MAINTAINER_EMAIL ='pystatsmodels@googlegroups.com'
URL = 'http://statsmodels.sourceforge.net/'
LICENSE = 'BSD License'
DOWNLOAD_URL = ''
# These imports need to be here; setuptools needs to be imported first.
from distutils.extension import Extension
from distutils.command.build import build
from distutils.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
def build_extensions(self):
numpy_incl = pkg_resources.resource_filename('numpy', 'core/include')
for ext in self.extensions:
if (hasattr(ext, 'include_dirs') and
not numpy_incl in ext.include_dirs):
ext.include_dirs.append(numpy_incl)
_build_ext.build_extensions(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
p = subprocess.call([sys.executable,
os.path.join(cwd, 'tools', 'cythonize.py'),
'statsmodels'],
cwd=cwd)
if p != 0:
raise RuntimeError("Running cythonize failed!")
def strip_rc(version):
return re.sub(r"rc\d+$", "", version)
def check_dependency_versions(min_versions):
"""
Don't let pip/setuptools do this all by itself. It's rude.
For all dependencies, try to import them and check if the versions of
installed dependencies match the minimum version requirements. If
installed but version too low, raise an error. If not installed at all,
return the correct ``setup_requires`` and ``install_requires`` arguments to
be added to the setuptools kwargs. This prevents upgrading installed
dependencies like numpy (that should be an explicit choice by the user and
never happen automatically), but make things work when installing into an
empty virtualenv for example.
"""
setup_requires = []
install_requires = []
try:
from numpy.version import short_version as npversion
except ImportError:
setup_requires.append('numpy')
install_requires.append('numpy')
else:
if not (StrictVersion(strip_rc(npversion)) >= min_versions['numpy']):
raise ImportError("Numpy version is %s. Requires >= %s" %
(npversion, min_versions['numpy']))
try:
import scipy
except ImportError:
install_requires.append('scipy')
else:
try:
from scipy.version import short_version as spversion
except ImportError:
from scipy.version import version as spversion # scipy 0.7.0
if not (StrictVersion(strip_rc(spversion)) >= min_versions['scipy']):
raise ImportError("Scipy version is %s. Requires >= %s" %
(spversion, min_versions['scipy']))
try:
from pandas.version import short_version as pversion
except ImportError:
install_requires.append('pandas')
else:
if not (StrictVersion(strip_rc(pversion)) >= min_versions['pandas']):
ImportError("Pandas version is %s. Requires >= %s" %
(pversion, min_versions['pandas']))
try:
from patsy import __version__ as patsy_version
except ImportError:
install_requires.append('patsy')
else:
# patsy dev looks like 0.1.0+dev
pversion = re.match("\d*\.\d*\.\d*", patsy_version).group()
if not (StrictVersion(pversion) >= min_versions['patsy']):
raise ImportError("Patsy version is %s. Requires >= %s" %
(pversion, min_versions["patsy"]))
return setup_requires, install_requires
MAJ = 0
MIN = 7
REV = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJ,MIN,REV)
classifiers = [ 'Development Status :: 4 - Beta',
'Environment :: Console',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Topic :: Scientific/Engineering']
# Return the git revision as a string
def git_version():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(" ".join(cmd), stdout = subprocess.PIPE, env=env,
shell=True).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
except OSError:
GIT_REVISION = "Unknown"
return GIT_REVISION
def write_version_py(filename=pjoin(curdir, 'statsmodels/version.py')):
cnt = "\n".join(["",
"# THIS FILE IS GENERATED FROM SETUP.PY",
"short_version = '%(version)s'",
"version = '%(version)s'",
"full_version = '%(full_version)s'",
"git_revision = '%(git_revision)s'",
"release = %(isrelease)s", "",
"if not release:",
" version = full_version"])
# Adding the git rev number needs to be done inside write_version_py(),
# otherwise the import of numpy.version messes up the build under Python 3.
FULLVERSION = VERSION
dowrite = True
if os.path.exists('.git'):
GIT_REVISION = git_version()
elif os.path.exists(filename):
# must be a source distribution, use existing version file
try:
from statsmodels.version import git_revision as GIT_REVISION
except ImportError:
dowrite = False
GIT_REVISION = "Unknown"
else:
GIT_REVISION = "Unknown"
if not ISRELEASED:
FULLVERSION += '.dev0+' + GIT_REVISION[:7]
if dowrite:
try:
a = open(filename, 'w')
a.write(cnt % {'version': VERSION,
'full_version' : FULLVERSION,
'git_revision' : GIT_REVISION,
'isrelease': str(ISRELEASED)})
finally:
a.close()
class CleanCommand(Command):
"""Custom distutils command to clean the .so and .pyc files."""
user_options = [("all", "a", "")]
def initialize_options(self):
self.all = True
self._clean_me = []
self._clean_trees = []
self._clean_exclude = ["bspline_ext.c",
"bspline_impl.c"]
for root, dirs, files in list(os.walk('statsmodels')):
for f in files:
if f in self._clean_exclude:
continue
if os.path.splitext(f)[-1] in ('.pyc', '.so', '.o',
'.pyo',
'.pyd', '.c', '.orig'):
self._clean_me.append(pjoin(root, f))
for d in dirs:
if d == '__pycache__':
self._clean_trees.append(pjoin(root, d))
for d in ('build',):
if os.path.exists(d):
self._clean_trees.append(d)
def finalize_options(self):
pass
def run(self):
for clean_me in self._clean_me:
try:
os.unlink(clean_me)
except Exception:
pass
for clean_tree in self._clean_trees:
try:
import shutil
shutil.rmtree(clean_tree)
except Exception:
pass
class CheckingBuildExt(build_ext):
"""Subclass build_ext to get clearer report if Cython is necessary."""
def check_cython_extensions(self, extensions):
for ext in extensions:
for src in ext.sources:
if not os.path.exists(src):
raise Exception("""Cython-generated file '%s' not found.
Cython is required to compile statsmodels from a development branch.
Please install Cython or download a source release of statsmodels.
""" % src)
def build_extensions(self):
self.check_cython_extensions(self.extensions)
build_ext.build_extensions(self)
class DummyBuildSrc(Command):
""" numpy's build_src command interferes with Cython's build_ext.
"""
user_options = []
def initialize_options(self):
self.py_modules_dict = {}
def finalize_options(self):
pass
def run(self):
pass
cmdclass = {'clean': CleanCommand,
'build': build}
cmdclass["build_src"] = DummyBuildSrc
cmdclass["build_ext"] = CheckingBuildExt
# some linux distros require it
#NOTE: we are not currently using this but add it to Extension, if needed.
# libraries = ['m'] if 'win32' not in sys.platform else []
from numpy.distutils.misc_util import get_info
npymath_info = get_info("npymath")
ext_data = dict(
kalman_loglike = {"name" : "statsmodels/tsa/kalmanf/kalman_loglike.c",
"depends" : ["statsmodels/src/capsule.h"],
"include_dirs": ["statsmodels/src"],
"sources" : []},
_statespace = {"name" : "statsmodels/tsa/statespace/_statespace.c",
"depends" : ["statsmodels/src/capsule.h"],
"include_dirs": ["statsmodels/src"] + npymath_info['include_dirs'],
"libraries": npymath_info['libraries'],
"library_dirs": npymath_info['library_dirs'],
"sources" : []},
linbin = {"name" : "statsmodels/nonparametric/linbin.c",
"depends" : [],
"sources" : []},
_smoothers_lowess = {"name" : "statsmodels/nonparametric/_smoothers_lowess.c",
"depends" : [],
"sources" : []}
)
extensions = []
for name, data in ext_data.items():
data['sources'] = data.get('sources', []) + [data['name']]
destdir = ".".join(os.path.dirname(data["name"]).split("/"))
data.pop('name')
obj = Extension('%s.%s' % (destdir, name), **data)
extensions.append(obj)
def get_data_files():
sep = os.path.sep
# install the datasets
data_files = {}
root = pjoin(curdir, "statsmodels", "datasets")
for i in os.listdir(root):
if i is "tests":
continue
path = pjoin(root, i)
if os.path.isdir(path):
data_files.update({relpath(path, start=curdir).replace(sep, ".") : ["*.csv",
"*.dta"]})
# add all the tests and results files
for r, ds, fs in os.walk(pjoin(curdir, "statsmodels")):
r_ = relpath(r, start=curdir)
if r_.endswith('results'):
data_files.update({r_.replace(sep, ".") : ["*.csv",
"*.txt"]})
return data_files
if __name__ == "__main__":
if os.path.exists('MANIFEST'):
os.unlink('MANIFEST')
min_versions = {
'numpy' : '1.4.0',
'scipy' : '0.7.0',
'pandas' : '0.7.1',
'patsy' : '0.1.0',
}
if sys.version_info[0] == 3 and sys.version_info[1] >= 3:
# 3.3 needs numpy 1.7+
min_versions.update({"numpy" : "1.7.0b2"})
(setup_requires,
install_requires) = check_dependency_versions(min_versions)
if _have_setuptools:
setuptools_kwargs['setup_requires'] = setup_requires
setuptools_kwargs['install_requires'] = install_requires
write_version_py()
# this adds *.csv and *.dta files in datasets folders
# and *.csv and *.txt files in test/results folders
package_data = get_data_files()
packages = find_packages()
packages.append("statsmodels.tsa.vector_ar.data")
package_data["statsmodels.datasets.tests"].append("*.zip")
package_data["statsmodels.iolib.tests.results"].append("*.dta")
package_data["statsmodels.stats.tests.results"].append("*.json")
package_data["statsmodels.tsa.vector_ar.tests.results"].append("*.npz")
# data files that don't follow the tests/results pattern. should fix.
package_data.update({"statsmodels.stats.tests" : ["*.txt"]})
package_data.update({"statsmodels.stats.libqsturng" :
["*.r", "*.txt", "*.dat"]})
package_data.update({"statsmodels.stats.libqsturng.tests" :
["*.csv", "*.dat"]})
package_data.update({"statsmodels.tsa.vector_ar.data" : ["*.dat"]})
package_data.update({"statsmodels.tsa.vector_ar.data" : ["*.dat"]})
# temporary, until moved:
package_data.update({"statsmodels.sandbox.regression.tests" :
["*.dta", "*.csv"]})
#TODO: deal with this. Not sure if it ever worked for bdists
#('docs/build/htmlhelp/statsmodelsdoc.chm',
# 'statsmodels/statsmodelsdoc.chm')
cwd = os.path.abspath(os.path.dirname(__file__))
if not os.path.exists(os.path.join(cwd, 'PKG-INFO')) and not no_frills:
# Generate Cython sources, unless building from source release
generate_cython()
setup(name = DISTNAME,
version = VERSION,
maintainer = MAINTAINER,
ext_modules = extensions,
maintainer_email = MAINTAINER_EMAIL,
description = DESCRIPTION,
license = LICENSE,
url = URL,
download_url = DOWNLOAD_URL,
long_description = LONG_DESCRIPTION,
classifiers = classifiers,
platforms = 'any',
cmdclass = cmdclass,
packages = packages,
package_data = package_data,
include_package_data=False, # True will install all files in repo
**setuptools_kwargs)
| bsd-3-clause |
MonamAgarwal/final | GTG/core/requester.py | 2 | 8353 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Getting Things GNOME! - a personal organizer for the GNOME desktop
# Copyright (c) 2008-2013 - Lionel Dricot & Bertrand Rousseau
#
# This program is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
"""
A nice general purpose interface for the datastore and tagstore
"""
from gi.repository import GObject
from GTG.core.tag import Tag
from GTG.tools.logger import Log
class Requester(GObject.GObject):
""" A view on a GTG datastore.
L{Requester} is a stateless object that simply provides a nice API for
user interfaces to use for datastore operations.
Multiple L{Requester}s can exist on the same datastore, so they should
never have state of their own.
"""
def __init__(self, datastore, global_conf):
"""Construct a L{Requester}."""
GObject.GObject.__init__(self)
self.ds = datastore
self.__config = global_conf
self.__basetree = self.ds.get_tasks_tree()
############ Tasks Tree ######################
# By default, we return the task tree of the main window
def get_tasks_tree(self, name='active', refresh=True):
return self.__basetree.get_viewtree(name=name, refresh=refresh)
def get_main_view(self):
return self.__basetree.get_main_view()
def is_displayed(self, task):
return self.__basetree.get_viewtree(name='active').is_displayed(task)
def get_basetree(self):
return self.__basetree
# this method also update the viewcount of tags
def apply_global_filter(self, tree, filtername):
tree.apply_filter(filtername)
for t in self.get_all_tags():
ta = self.get_tag(t)
ta.apply_filter(filtername)
def unapply_global_filter(self, tree, filtername):
tree.unapply_filter(filtername)
for t in self.get_all_tags():
ta = self.get_tag(t)
ta.unapply_filter(filtername)
######### Filters bank #######################
# List, by name, all available filters
def list_filters(self):
return self.__basetree.list_filters()
# Add a filter to the filter bank
# Return True if the filter was added
# Return False if the filter_name was already in the bank
def add_filter(self, filter_name, filter_func):
return self.__basetree.add_filter(filter_name, filter_func)
# Remove a filter from the bank.
# Only custom filters that were added here can be removed
# Return False if the filter was not removed
def remove_filter(self, filter_name):
return self.__basetree.remove_filter(filter_name)
############## Tasks ##########################
###############################################
def has_task(self, tid):
"""Does the task 'tid' exist?"""
return self.ds.has_task(tid)
def get_task(self, tid):
"""Get the task with the given C{tid}.
If no such task exists, create it and force the tid to be C{tid}.
@param tid: The task id.
@return: A task.
"""
task = self.ds.get_task(tid)
return task
# FIXME unused parameter newtask (maybe for compatibility?)
def new_task(self, tags=None, newtask=True):
"""Create a new task.
Note: this modifies the datastore.
@param pid: The project where the new task will be created.
@param tags: The tags for the new task. If not provided, then the
task will have no tags. Tags must be an iterator type containing
the tags tids
@param newtask: C{True} if this is creating a new task that never
existed, C{False} if importing an existing task from a backend.
@return: A task from the data store
"""
task = self.ds.new_task()
if tags:
for t in tags:
assert(not isinstance(t, Tag))
task.tag_added(t)
return task
def delete_task(self, tid, recursive=True):
"""Delete the task 'tid' and, by default, delete recursively
all the childrens.
Note: this modifies the datastore.
@param tid: The id of the task to be deleted.
"""
# send the signal before actually deleting the task !
Log.debug("deleting task %s" % tid)
return self.__basetree.del_node(tid, recursive=recursive)
def get_task_id(self, task_title):
""" Heuristic which convert task_title to a task_id
Return a first task which has similar title """
task_title = task_title.lower()
tasks = self.get_tasks_tree('active', False).get_all_nodes()
tasktree = self.get_main_view()
for task_id in tasks:
task = tasktree.get_node(task_id)
if task_title == task.get_title().lower():
return task_id
return None
############### Tags ##########################
###############################################
def get_tag_tree(self):
return self.ds.get_tagstore().get_viewtree(name='activetags')
def new_tag(self, tagname):
"""Create a new tag called 'tagname'.
Note: this modifies the datastore.
@param tagname: The name of the new tag.
@return: The newly-created tag.
"""
return self.ds.new_tag(tagname)
def new_search_tag(self, name, query):
"""
Create a new search tag from search query
Note: this modifies the datastore.
@param name: name of the new search tag
@param query: Query will be parsed using search parser
@return: new tag
"""
return self.ds.new_search_tag(name, query)
def remove_tag(self, name):
""" calls datastore to remove a given tag """
self.ds.remove_tag(name)
def rename_tag(self, oldname, newname):
self.ds.rename_tag(oldname, newname)
def get_tag(self, tagname):
return self.ds.get_tag(tagname)
def get_used_tags(self):
"""Return tags currently used by a task.
@return: A list of tag names used by a task.
"""
tagstore = self.ds.get_tagstore()
view = tagstore.get_viewtree(name='tag_completion', refresh=False)
tags = view.get_all_nodes()
tags.sort(key=str.lower)
return tags
def get_all_tags(self):
"""
Gets all tags from all tasks
"""
return self.ds.get_tagstore().get_main_view().get_all_nodes()
############## Backends #######################
###############################################
def get_all_backends(self, disabled=False):
return self.ds.get_all_backends(disabled)
def register_backend(self, dic):
return self.ds.register_backend(dic)
def flush_all_tasks(self, backend_id):
return self.ds.flush_all_tasks(backend_id)
def get_backend(self, backend_id):
return self.ds.get_backend(backend_id)
def set_backend_enabled(self, backend_id, state):
return self.ds.set_backend_enabled(backend_id, state)
def remove_backend(self, backend_id):
return self.ds.remove_backend(backend_id)
def backend_change_attached_tags(self, backend_id, tags):
return self.ds.backend_change_attached_tags(backend_id, tags)
def save_datastore(self):
return self.ds.save()
############## Config ############################
##################################################
def get_global_config(self):
return self.__config
def get_config(self, name):
return self.__config.get_subconfig(name)
def save_config(self):
self.__config.save()
| gpl-3.0 |
patricklaw/pants | src/python/pants/base/exception_sink.py | 4 | 17975 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import datetime
import faulthandler
import logging
import os
import signal
import sys
import threading
import traceback
from contextlib import contextmanager
from typing import Callable, Dict, Iterator
import psutil
import setproctitle
from pants.util.dirutil import safe_mkdir, safe_open
from pants.util.osutil import Pid
logger = logging.getLogger(__name__)
class SignalHandler:
"""A specification for how to handle a fixed set of nonfatal signals.
This is subclassed and registered with ExceptionSink.reset_signal_handler() whenever the signal
handling behavior is modified for different pants processes, for example in the remote client when
pantsd is enabled. The default behavior is to exit "gracefully" by leaving a detailed log of which
signal was received, then exiting with failure.
Note that the terminal will convert a ctrl-c from the user into a SIGINT.
"""
@property
def signal_handler_mapping(self) -> Dict[signal.Signals, Callable]:
"""A dict mapping (signal number) -> (a method handling the signal)."""
# Could use an enum here, but we never end up doing any matching on the specific signal value,
# instead just iterating over the registered signals to set handlers, so a dict is probably
# better.
return {
signal.SIGINT: self._handle_sigint_if_enabled,
signal.SIGQUIT: self.handle_sigquit,
signal.SIGTERM: self.handle_sigterm,
}
def __init__(self, *, pantsd_instance: bool):
self._ignore_sigint_lock = threading.Lock()
self._ignoring_sigint = False
self._pantsd_instance = pantsd_instance
def _handle_sigint_if_enabled(self, signum: int, _frame):
with self._ignore_sigint_lock:
if not self._ignoring_sigint:
self.handle_sigint(signum, _frame)
def _toggle_ignoring_sigint(self, toggle: bool) -> None:
if not self._pantsd_instance:
with self._ignore_sigint_lock:
self._ignoring_sigint = toggle
def _send_signal_to_children(self, received_signal: int, signame: str) -> None:
"""Send a signal to any children of this process in order.
Pants may have spawned multiple subprocesses via Python or Rust. Upon receiving a signal,
this method is invoked to propagate the signal to all children, regardless of how they were
spawned.
"""
self_process = psutil.Process()
children = self_process.children()
logger.debug(f"Sending signal {signame} ({received_signal}) to child processes: {children}")
for child_process in children:
child_process.send_signal(received_signal)
def handle_sigint(self, signum: int, _frame):
self._send_signal_to_children(signum, "SIGINT")
raise KeyboardInterrupt("User interrupted execution with control-c!")
# TODO(#7406): figure out how to let sys.exit work in a signal handler instead of having to raise
# this exception!
class SignalHandledNonLocalExit(Exception):
"""Raised in handlers for non-fatal signals to overcome Python limitations.
When waiting on a subprocess and in a signal handler, sys.exit appears to be ignored, and
causes the signal handler to return. We want to (eventually) exit after these signals, not
ignore them, so we raise this exception instead and check it in our sys.excepthook override.
"""
def __init__(self, signum, signame):
self.signum = signum
self.signame = signame
self.traceback_lines = traceback.format_stack()
super(SignalHandler.SignalHandledNonLocalExit, self).__init__()
if "I/O operation on closed file" in self.traceback_lines:
logger.debug(
"SignalHandledNonLocalExit: unexpected appearance of "
"'I/O operation on closed file' in traceback"
)
def handle_sigquit(self, signum, _frame):
self._send_signal_to_children(signum, "SIGQUIT")
raise self.SignalHandledNonLocalExit(signum, "SIGQUIT")
def handle_sigterm(self, signum, _frame):
self._send_signal_to_children(signum, "SIGTERM")
raise self.SignalHandledNonLocalExit(signum, "SIGTERM")
class ExceptionSink:
"""A mutable singleton object representing where exceptions should be logged to.
The ExceptionSink should be installed in any process that is running Pants @rules via the
engine. Notably, this does _not_ include the pantsd client, which does its own signal handling
directly in order to forward information to the pantsd server.
"""
# NB: see the bottom of this file where we call reset_log_location() and other mutators in order
# to properly setup global state.
_log_dir = None
# Where to log stacktraces to in a SIGUSR2 handler.
_interactive_output_stream = None
# An instance of `SignalHandler` which is invoked to handle a static set of specific nonfatal
# signals (these signal handlers are allowed to make pants exit, but unlike SIGSEGV they don't
# need to exit immediately).
_signal_handler: SignalHandler = SignalHandler(pantsd_instance=False)
# These persistent open file descriptors are kept so the signal handler can do almost no work
# (and lets faulthandler figure out signal safety).
_pid_specific_error_fileobj = None
_shared_error_fileobj = None
def __new__(cls, *args, **kwargs):
raise TypeError(
"Instances of {} are not allowed to be constructed! Call install() instead.".format(
cls.__name__
)
)
class ExceptionSinkError(Exception):
pass
@classmethod
def install(cls, log_location: str, pantsd_instance: bool) -> None:
"""Setup global state for this process, such as signal handlers and sys.excepthook."""
# Set the log location for writing logs before bootstrap options are parsed.
cls.reset_log_location(log_location)
# NB: Mutate process-global state!
sys.excepthook = ExceptionSink.log_exception
# Setup a default signal handler.
cls.reset_signal_handler(SignalHandler(pantsd_instance=pantsd_instance))
# All reset_* methods are ~idempotent!
@classmethod
def reset_log_location(cls, new_log_location: str) -> None:
"""Re-acquire file handles to error logs based in the new location.
Class state:
- Overwrites `cls._log_dir`, `cls._pid_specific_error_fileobj`, and
`cls._shared_error_fileobj`.
OS state:
- May create a new directory.
- Overwrites signal handlers for many fatal and non-fatal signals (but not SIGUSR2).
:raises: :class:`ExceptionSink.ExceptionSinkError` if the directory does not exist or is not
writable.
"""
# We could no-op here if the log locations are the same, but there's no reason not to have the
# additional safety of re-acquiring file descriptors each time (and erroring out early if the
# location is no longer writable).
try:
safe_mkdir(new_log_location)
except Exception as e:
raise cls.ExceptionSinkError(
"The provided log location path at '{}' is not writable or could not be created: {}.".format(
new_log_location, str(e)
),
e,
)
pid = os.getpid()
pid_specific_log_path = cls.exceptions_log_path(for_pid=pid, in_dir=new_log_location)
shared_log_path = cls.exceptions_log_path(in_dir=new_log_location)
assert pid_specific_log_path != shared_log_path
try:
pid_specific_error_stream = safe_open(pid_specific_log_path, mode="w")
shared_error_stream = safe_open(shared_log_path, mode="a")
except Exception as e:
raise cls.ExceptionSinkError(
"Error opening fatal error log streams for log location '{}': {}".format(
new_log_location, str(e)
)
)
# NB: mutate process-global state!
if faulthandler.is_enabled():
logger.debug("re-enabling faulthandler")
# Call Py_CLEAR() on the previous error stream:
# https://github.com/vstinner/faulthandler/blob/master/faulthandler.c
faulthandler.disable()
# Send a stacktrace to this file if interrupted by a fatal error.
faulthandler.enable(file=pid_specific_error_stream, all_threads=True)
# NB: mutate the class variables!
cls._log_dir = new_log_location
cls._pid_specific_error_fileobj = pid_specific_error_stream
cls._shared_error_fileobj = shared_error_stream
@classmethod
def exceptions_log_path(cls, for_pid=None, in_dir=None):
"""Get the path to either the shared or pid-specific fatal errors log file."""
if for_pid is None:
intermediate_filename_component = ""
else:
assert isinstance(for_pid, Pid)
intermediate_filename_component = ".{}".format(for_pid)
in_dir = in_dir or cls._log_dir
return os.path.join(
in_dir, ".pids", "exceptions{}.log".format(intermediate_filename_component)
)
@classmethod
def _log_exception(cls, msg):
"""Try to log an error message to this process's error log and the shared error log.
NB: Doesn't raise (logs an error instead).
"""
pid = os.getpid()
fatal_error_log_entry = cls._format_exception_message(msg, pid)
# We care more about this log than the shared log, so write to it first.
try:
cls._try_write_with_flush(cls._pid_specific_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the pid-specific file handle for {} at pid {}:\n{}".format(
msg, cls._log_dir, pid, e
)
)
# Write to the shared log.
try:
# TODO: we should probably guard this against concurrent modification by other pants
# subprocesses somehow.
cls._try_write_with_flush(cls._shared_error_fileobj, fatal_error_log_entry)
except Exception as e:
logger.error(
"Error logging the message '{}' to the shared file handle for {} at pid {}:\n{}".format(
msg, cls._log_dir, pid, e
)
)
@classmethod
def _try_write_with_flush(cls, fileobj, payload):
"""This method is here so that it can be patched to simulate write errors.
This is because mock can't patch primitive objects like file objects.
"""
fileobj.write(payload)
fileobj.flush()
@classmethod
def reset_signal_handler(cls, signal_handler: SignalHandler) -> SignalHandler:
"""Given a SignalHandler, uses the `signal` std library functionality to set the pants
process's signal handlers to those specified in the object.
Note that since this calls `signal.signal()`, it will crash if not the main thread. Returns
the previously-registered signal handler.
"""
for signum, handler in signal_handler.signal_handler_mapping.items():
signal.signal(signum, handler)
# Retry any system calls interrupted by any of the signals we just installed handlers for
# (instead of having them raise EINTR). siginterrupt(3) says this is the default behavior on
# Linux and OSX.
signal.siginterrupt(signum, False)
previous_signal_handler = cls._signal_handler
cls._signal_handler = signal_handler
return previous_signal_handler
@classmethod
@contextmanager
def trapped_signals(cls, new_signal_handler: SignalHandler) -> Iterator[None]:
"""A contextmanager which temporarily overrides signal handling.
NB: This method calls signal.signal(), which will crash if not called from the main thread!
"""
previous_signal_handler = cls.reset_signal_handler(new_signal_handler)
try:
yield
finally:
cls.reset_signal_handler(previous_signal_handler)
@classmethod
@contextmanager
def ignoring_sigint(cls) -> Iterator[None]:
"""This method provides a context that temporarily disables responding to the SIGINT signal
sent by a Ctrl-C in the terminal.
We currently only use this to implement disabling catching SIGINT while an
InteractiveProcess is running (where we want that process to catch it), and only when pantsd
is not enabled (if pantsd is enabled, the client will actually catch SIGINT and forward it
to the server, so we don't want the server process to ignore it.
"""
try:
cls._signal_handler._toggle_ignoring_sigint(True)
yield
finally:
cls._signal_handler._toggle_ignoring_sigint(False)
@classmethod
def _iso_timestamp_for_now(cls):
return datetime.datetime.now().isoformat()
# NB: This includes a trailing newline, but no leading newline.
_EXCEPTION_LOG_FORMAT = """\
timestamp: {timestamp}
process title: {process_title}
sys.argv: {args}
pid: {pid}
{message}
"""
@classmethod
def _format_exception_message(cls, msg, pid):
return cls._EXCEPTION_LOG_FORMAT.format(
timestamp=cls._iso_timestamp_for_now(),
process_title=setproctitle.getproctitle(),
args=sys.argv,
pid=pid,
message=msg,
)
_traceback_omitted_default_text = "(backtrace omitted)"
@classmethod
def _format_traceback(cls, traceback_lines, should_print_backtrace):
if should_print_backtrace:
traceback_string = "\n{}".format("".join(traceback_lines))
else:
traceback_string = " {}".format(cls._traceback_omitted_default_text)
return traceback_string
_UNHANDLED_EXCEPTION_LOG_FORMAT = """\
Exception caught: ({exception_type}){backtrace}
Exception message: {exception_message}{maybe_newline}
"""
@classmethod
def _format_unhandled_exception_log(cls, exc, tb, add_newline, should_print_backtrace):
exc_type = type(exc)
exception_full_name = "{}.{}".format(exc_type.__module__, exc_type.__name__)
exception_message = str(exc) if exc else "(no message)"
maybe_newline = "\n" if add_newline else ""
return cls._UNHANDLED_EXCEPTION_LOG_FORMAT.format(
exception_type=exception_full_name,
backtrace=cls._format_traceback(
traceback_lines=traceback.format_tb(tb),
should_print_backtrace=should_print_backtrace,
),
exception_message=exception_message,
maybe_newline=maybe_newline,
)
@classmethod
def log_exception(cls, exc_class=None, exc=None, tb=None, add_newline=False):
"""Logs an unhandled exception to a variety of locations."""
exc_class = exc_class or sys.exc_info()[0]
exc = exc or sys.exc_info()[1]
tb = tb or sys.exc_info()[2]
# This exception was raised by a signal handler with the intent to exit the program.
if exc_class == SignalHandler.SignalHandledNonLocalExit:
return cls._handle_signal_gracefully(exc.signum, exc.signame, exc.traceback_lines)
extra_err_msg = None
try:
# Always output the unhandled exception details into a log file, including the
# traceback.
exception_log_entry = cls._format_unhandled_exception_log(
exc, tb, add_newline, should_print_backtrace=True
)
cls._log_exception(exception_log_entry)
except Exception as e:
extra_err_msg = "Additional error logging unhandled exception {}: {}".format(exc, e)
logger.error(extra_err_msg)
# The rust logger implementation will have its own stacktrace, but at import time, we want
# to be able to see any stacktrace to know where the error is being raised, so we reproduce
# it here.
exception_log_entry = cls._format_unhandled_exception_log(
exc, tb, add_newline, should_print_backtrace=True
)
logger.exception(exception_log_entry)
@classmethod
def _handle_signal_gracefully(cls, signum, signame, traceback_lines):
"""Signal handler for non-fatal signals which raises or logs an error."""
def gen_formatted(formatted_traceback: str) -> str:
return f"Signal {signum} ({signame}) was raised. Exiting with failure.{formatted_traceback}"
# Extract the stack, and format an entry to be written to the exception log.
formatted_traceback = cls._format_traceback(
traceback_lines=traceback_lines, should_print_backtrace=True
)
signal_error_log_entry = gen_formatted(formatted_traceback)
# TODO: determine the appropriate signal-safe behavior here (to avoid writing to our file
# descriptors reentrantly, which raises an IOError).
# This method catches any exceptions raised within it.
cls._log_exception(signal_error_log_entry)
# Create a potentially-abbreviated traceback for the terminal or other interactive stream.
formatted_traceback_for_terminal = cls._format_traceback(
traceback_lines=traceback_lines,
should_print_backtrace=True,
)
terminal_log_entry = gen_formatted(formatted_traceback_for_terminal)
# Print the output via standard logging.
logger.error(terminal_log_entry)
| apache-2.0 |
miloharper/neural-network-animation | matplotlib/tests/test_patches.py | 9 | 8388 | """
Tests specific to the patches module.
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from numpy.testing import assert_almost_equal
from matplotlib.patches import Polygon
from matplotlib.patches import Rectangle
from matplotlib.testing.decorators import image_comparison
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import matplotlib.collections as mcollections
from matplotlib import path as mpath
from matplotlib import transforms as mtrans
def test_Polygon_close():
#: Github issue #1018 identified a bug in the Polygon handling
#: of the closed attribute; the path was not getting closed
#: when set_xy was used to set the vertices.
# open set of vertices:
xy = [[0, 0], [0, 1], [1, 1]]
# closed set:
xyclosed = xy + [[0, 0]]
# start with open path and close it:
p = Polygon(xy, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xyclosed)
# start with closed path and open it:
p = Polygon(xyclosed, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xy)
# start with open path and leave it open:
p = Polygon(xy, closed=False)
assert_array_equal(p.get_xy(), xy)
p.set_xy(xy)
assert_array_equal(p.get_xy(), xy)
# start with closed path and leave it closed:
p = Polygon(xyclosed, closed=True)
assert_array_equal(p.get_xy(), xyclosed)
p.set_xy(xyclosed)
assert_array_equal(p.get_xy(), xyclosed)
def test_rotate_rect():
loc = np.asarray([1.0, 2.0])
width = 2
height = 3
angle = 30.0
# A rotated rectangle
rect1 = Rectangle(loc, width, height, angle=angle)
# A non-rotated rectangle
rect2 = Rectangle(loc, width, height)
# Set up an explicit rotation matrix (in radians)
angle_rad = np.pi * angle / 180.0
rotation_matrix = np.array([[np.cos(angle_rad), -np.sin(angle_rad)],
[np.sin(angle_rad), np.cos(angle_rad)]])
# Translate to origin, rotate each vertex, and then translate back
new_verts = np.inner(rotation_matrix, rect2.get_verts() - loc).T + loc
# They should be the same
assert_almost_equal(rect1.get_verts(), new_verts)
@image_comparison(baseline_images=['clip_to_bbox'])
def test_clip_to_bbox():
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_xlim([-18, 20])
ax.set_ylim([-150, 100])
path = mpath.Path.unit_regular_star(8).deepcopy()
path.vertices *= [10, 100]
path.vertices -= [5, 25]
path2 = mpath.Path.unit_circle().deepcopy()
path2.vertices *= [10, 100]
path2.vertices += [10, -25]
combined = mpath.Path.make_compound_path(path, path2)
patch = mpatches.PathPatch(
combined, alpha=0.5, facecolor='coral', edgecolor='none')
ax.add_patch(patch)
bbox = mtrans.Bbox([[-12, -77.5], [50, -110]])
result_path = combined.clip_to_bbox(bbox)
result_patch = mpatches.PathPatch(
result_path, alpha=0.5, facecolor='green', lw=4, edgecolor='black')
ax.add_patch(result_patch)
@image_comparison(baseline_images=['patch_alpha_coloring'], remove_text=True)
def test_patch_alpha_coloring():
"""
Test checks that the patch and collection are rendered with the specified
alpha values in their facecolor and edgecolor.
"""
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_alpha_override'], remove_text=True)
def test_patch_alpha_override():
#: Test checks that specifying an alpha attribute for a patch or
#: collection will override any alpha component of the facecolor
#: or edgecolor.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles='dashdot',
alpha=0.25,
facecolor=(1, 0, 0, 0.5),
edgecolor=(0, 0, 1, 0.75))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
@image_comparison(baseline_images=['patch_custom_linestyle'],
remove_text=True)
def test_patch_custom_linestyle():
#: A test to check that patches and collections accept custom dash
#: patterns as linestyle and that they display correctly.
star = mpath.Path.unit_regular_star(6)
circle = mpath.Path.unit_circle()
# concatenate the star with an internal cutout of the circle
verts = np.concatenate([circle.vertices, star.vertices[::-1]])
codes = np.concatenate([circle.codes, star.codes])
cut_star1 = mpath.Path(verts, codes)
cut_star2 = mpath.Path(verts + 1, codes)
ax = plt.axes()
patch = mpatches.PathPatch(cut_star1,
linewidth=5, linestyle=(0.0, (5.0, 7.0, 10.0, 7.0)),
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_patch(patch)
col = mcollections.PathCollection([cut_star2],
linewidth=5, linestyles=[(0.0, (5.0, 7.0, 10.0, 7.0))],
facecolor=(1, 0, 0),
edgecolor=(0, 0, 1))
ax.add_collection(col)
ax.set_xlim([-1, 2])
ax.set_ylim([-1, 2])
def test_wedge_movement():
param_dict = {'center': ((0, 0), (1, 1), 'set_center'),
'r': (5, 8, 'set_radius'),
'width': (2, 3, 'set_width'),
'theta1': (0, 30, 'set_theta1'),
'theta2': (45, 50, 'set_theta2')}
init_args = dict((k, v[0]) for (k, v) in six.iteritems(param_dict))
w = mpatches.Wedge(**init_args)
for attr, (old_v, new_v, func) in six.iteritems(param_dict):
assert_equal(getattr(w, attr), old_v)
getattr(w, func)(new_v)
assert_equal(getattr(w, attr), new_v)
@image_comparison(baseline_images=['wedge_range'],
remove_text=True)
def test_wedge_range():
ax = plt.axes()
t1 = 2.313869244286224
args = [[52.31386924, 232.31386924],
[52.313869244286224, 232.31386924428622],
[t1, t1 + 180.0],
[0, 360],
[90, 90 + 360],
[-180, 180],
[0, 380],
[45, 46],
[46, 45]]
for i, (theta1, theta2) in enumerate(args):
x = i % 3
y = i // 3
wedge = mpatches.Wedge((x * 3, y * 3), 1, theta1, theta2,
facecolor='none', edgecolor='k', lw=3)
ax.add_artist(wedge)
ax.set_xlim([-2, 8])
ax.set_ylim([-2, 9])
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
FCP-INDI/C-PAC | CPAC/utils/interfaces/utils.py | 1 | 4184 | # -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
# This functionality is adapted from poldracklab/niworkflows:
# https://github.com/poldracklab/niworkflows/blob/master/niworkflows/interfaces/utils.py
# https://fmriprep.readthedocs.io/
# https://poldracklab.stanford.edu/
# We are temporarily maintaining our own copy for more granular control.
"""
Utilities
"""
import os
import re
import json
import shutil
import numpy as np
import nibabel as nb
import nilearn.image as nli
# from textwrap import indent
from collections import OrderedDict
import scipy.ndimage as nd
from nipype import logging
from nipype.utils.filemanip import fname_presuffix
from nipype.interfaces.io import add_traits
from nipype.interfaces.base import (
traits, isdefined, File, InputMultiPath,
TraitedSpec, BaseInterfaceInputSpec, SimpleInterface,
DynamicTraitedSpec
)
from CPAC.info import __version__
import copy
LOG = logging.getLogger('nipype.interface')
class CopyXFormInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
hdr_file = File(exists=True, mandatory=True, desc='the file we get the header from')
class CopyXForm(SimpleInterface):
"""
Copy the x-form matrices from `hdr_file` to `out_file`.
"""
input_spec = CopyXFormInputSpec
output_spec = DynamicTraitedSpec
def __init__(self, fields=None, **inputs):
self._fields = fields or ['in_file']
if isinstance(self._fields, str):
self._fields = [self._fields]
super(CopyXForm, self).__init__(**inputs)
add_traits(self.inputs, self._fields)
for f in set(self._fields).intersection(list(inputs.keys())):
setattr(self.inputs, f, inputs[f])
def _outputs(self):
base = super(CopyXForm, self)._outputs()
if self._fields:
fields = copy.copy(self._fields)
if 'in_file' in fields:
idx = fields.index('in_file')
fields.pop(idx)
fields.insert(idx, 'out_file')
base = add_traits(base, fields)
return base
def _run_interface(self, runtime):
for f in self._fields:
in_files = getattr(self.inputs, f)
self._results[f] = []
if not isinstance(in_files, list): # if isinstance(in_files, str):
in_files = [in_files]
for in_file in in_files:
out_name = fname_presuffix(
in_file, suffix='_xform', newpath=runtime.cwd)
# Copy and replace header
shutil.copy(in_file, out_name)
_copyxform(self.inputs.hdr_file, out_name,
message='CopyXForm (niworkflows v%s)' % __version__)
self._results[f].append(out_name)
# Flatten out one-element lists
if len(self._results[f]) == 1:
self._results[f] = self._results[f][0]
default = self._results.pop('in_file', None)
if default:
self._results['out_file'] = default
return runtime
def _copyxform(ref_image, out_image, message=None):
# Read in reference and output
# Use mmap=False because we will be overwriting the output image
resampled = nb.load(out_image, mmap=False)
orig = nb.load(ref_image)
if not np.allclose(orig.affine, resampled.affine):
LOG.debug(
'Affines of input and reference images do not match, '
'FMRIPREP will set the reference image headers. '
'Please, check that the x-form matrices of the input dataset'
'are correct and manually verify the alignment of results.')
# Copy xform infos
qform, qform_code = orig.header.get_qform(coded=True)
sform, sform_code = orig.header.get_sform(coded=True)
header = resampled.header.copy()
header.set_qform(qform, int(qform_code))
header.set_sform(sform, int(sform_code))
header['descrip'] = 'xform matrices modified by %s.' % (message or '(unknown)')
newimg = resampled.__class__(resampled.get_data(), orig.affine, header)
newimg.to_filename(out_image)
| bsd-3-clause |
40223226/2015cdbg8_6-22 | static/Brython3.1.1-20150328-091302/Lib/jqueryui/__init__.py | 603 | 3671 | """Wrapper around the jQuery UI library
Exposes a single object, jq, to manipulate the widgets designed in the library
This object supports :
- subscription : js[elt_id] returns an object matching the element with the
specified id
- a method get(**kw). The only keyword currently supported is "selector". The
method returns a list of instances of the class Element, each instance wraps
the elements matching the CSS selector passed
jq(selector="button") : returns instances of Element for all button tags
The value can be a list or tuple of CSS selector strings :
js(selector=("input[type=submit]","a")) : instances of Element for all
"input" tags with attribute "type" set to "submit" + "a" tags (anchors)
Instances of Element have the same interface as the selections made by the
jQuery function $, with the additional methods provided by jQuery UI. For
instance, to turn an element into a dialog :
jq[elt_id].dialog()
When jQuery UI methods expect a Javascript object, they can be passed as
key/value pairs :
jq['tags'].autocomplete(source=availableTags)
"""
from browser import html, document, window
import javascript
_path = __file__[:__file__.rfind('/')]+'/'
document <= html.LINK(rel="stylesheet",
href=_path+'css/smoothness/jquery-ui.css')
# The scripts must be loaded in blocking mode, by using the function
# load(script_url[, names]) in module javascript
# If we just add them to the document with script tags, eg :
#
# document <= html.SCRIPT(sciprt_url)
# _jqui = window.jQuery.noConflict(True)
#
# the name "jQuery" is not in the Javascript namespace until the script is
# fully loaded in the page, so "window.jQuery" raises an exception
# Load jQuery and put name 'jQuery' in the global Javascript namespace
javascript.load(_path+'jquery-1.11.2.js', ['jQuery'])
javascript.load(_path+'jquery-ui.js')
_jqui = window.jQuery.noConflict(True)
_events = ['abort',
'beforeinput',
'blur',
'click',
'compositionstart',
'compositionupdate',
'compositionend',
'dblclick',
'error',
'focus',
'focusin',
'focusout',
'input',
'keydown',
'keyup',
'load',
'mousedown',
'mouseenter',
'mouseleave',
'mousemove',
'mouseout',
'mouseover',
'mouseup',
'resize',
'scroll',
'select',
'unload']
class JQFunction:
def __init__(self, func):
self.func = func
def __call__(self, *args, **kw):
if kw:
# keyword arguments are passed as a single Javascript object
return self.func(*args, kw)
else:
return self.func(*args)
class Element:
"""Wrapper around the objects returned by jQuery selections"""
def __init__(self, item):
self.item = item
def bind(self, event, callback):
"""Binds an event on the element to function callback"""
getattr(self.item, event)(callback)
def __getattr__(self, attr):
res = getattr(self.item, attr)
if attr in _events:
# elt.click(f) is handled like elt.bind('click', f)
return lambda f:self.bind(attr, f)
if callable(res):
res = JQFunction(res)
return res
class jq:
@staticmethod
def get(**selectors):
items = []
for k,v in selectors.items():
if k=='selector':
if isinstance(v,[list, tuple]):
values = v
else:
values = [v]
for value in values:
items.append(Element(_jqui(value)))
elif k=='element':
items = Element(_jqui(v))
return items
@staticmethod
def __getitem__(element_id):
return jq.get(selector='#'+element_id)[0]
| gpl-3.0 |
sobomax/virtualbox_64bit_edd | src/VBox/Devices/EFI/Firmware/BaseTools/Source/Python/UPT/Parser/InfPcdSectionParser.py | 11 | 7868 | ## @file
# This file contained the parser for [Pcds] sections in INF file
#
# Copyright (c) 2011, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
InfPcdSectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Library import GlobalData
from Library.String import SplitPcdEntry
from Parser.InfParserMisc import InfParserSectionRoot
class InfPcdSectionParser(InfParserSectionRoot):
## Section PCD related parser
#
# For 5 types of PCD list below, all use this function.
# 'FixedPcd', 'FeaturePcd', 'PatchPcd', 'Pcd', 'PcdEx'
#
# This is a INF independent parser, the validation in this parser only
# cover
# INF spec scope, will not cross DEC/DSC to check pcd value
#
def InfPcdParser(self, SectionString, InfSectionObject, FileName):
KeysList = []
PcdList = []
CommentsList = []
ValueList = []
#
# Current section archs
#
LineIndex = -1
for Item in self.LastSectionHeaderContent:
if (Item[0], Item[1], Item[3]) not in KeysList:
KeysList.append((Item[0], Item[1], Item[3]))
LineIndex = Item[3]
if (Item[0].upper() == DT.TAB_INF_FIXED_PCD.upper() or \
Item[0].upper() == DT.TAB_INF_FEATURE_PCD.upper() or \
Item[0].upper() == DT.TAB_INF_PCD.upper()) and GlobalData.gIS_BINARY_INF:
Logger.Error('InfParser', FORMAT_INVALID, ST.ERR_ASBUILD_PCD_SECTION_TYPE%("\"" + Item[0] + "\""),
File=FileName, Line=LineIndex)
#
# For Common INF file
#
if not GlobalData.gIS_BINARY_INF:
#
# Macro defined in this section
#
SectionMacros = {}
for Line in SectionString:
PcdLineContent = Line[0]
PcdLineNo = Line[1]
if PcdLineContent.strip() == '':
CommentsList = []
continue
if PcdLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
CommentsList.append(Line)
continue
else:
#
# Encounter a PCD entry
#
if PcdLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
CommentsList.append((
PcdLineContent[PcdLineContent.find(DT.TAB_COMMENT_SPLIT):],
PcdLineNo))
PcdLineContent = PcdLineContent[:PcdLineContent.find(DT.TAB_COMMENT_SPLIT)]
if PcdLineContent != '':
#
# Find Macro
#
Name, Value = MacroParser((PcdLineContent, PcdLineNo),
FileName,
DT.MODEL_EFI_PCD,
self.FileLocalMacros)
if Name != None:
SectionMacros[Name] = Value
ValueList = []
CommentsList = []
continue
PcdEntryReturn = SplitPcdEntry(PcdLineContent)
if not PcdEntryReturn[1]:
TokenList = ['']
else:
TokenList = PcdEntryReturn[0]
ValueList[0:len(TokenList)] = TokenList
#
# Replace with Local section Macro and [Defines] section Macro.
#
ValueList = [InfExpandMacro(Value, (FileName, PcdLineContent, PcdLineNo),
self.FileLocalMacros, SectionMacros, True)
for Value in ValueList]
if len(ValueList) >= 1:
PcdList.append((ValueList, CommentsList, (PcdLineContent, PcdLineNo, FileName)))
ValueList = []
CommentsList = []
continue
#
# For Binary INF file
#
else:
for Line in SectionString:
LineContent = Line[0].strip()
LineNo = Line[1]
if LineContent == '':
CommentsList = []
continue
if LineContent.startswith(DT.TAB_COMMENT_SPLIT):
CommentsList.append(LineContent)
continue
#
# Have comments at tail.
#
CommentIndex = LineContent.find(DT.TAB_COMMENT_SPLIT)
if CommentIndex > -1:
CommentsList.append(LineContent[CommentIndex+1:])
LineContent = LineContent[:CommentIndex]
TokenList = GetSplitValueList(LineContent, DT.TAB_VALUE_SPLIT)
#
# PatchablePcd
# TokenSpace.CName | Value | Offset
#
if KeysList[0][0].upper() == DT.TAB_INF_PATCH_PCD.upper():
if len(TokenList) != 3:
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_ASBUILD_PATCHPCD_FORMAT_INVALID,
File=FileName,
Line=LineNo,
ExtraData=LineContent)
#
elif KeysList[0][0].upper() == DT.TAB_INF_PCD_EX.upper():
if len(TokenList) != 2:
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_ASBUILD_PCDEX_FORMAT_INVALID,
File=FileName,
Line=LineNo,
ExtraData=LineContent)
ValueList[0:len(TokenList)] = TokenList
if len(ValueList) >= 1:
PcdList.append((ValueList, CommentsList, (LineContent, LineNo, FileName)))
ValueList = []
CommentsList = []
continue
if not InfSectionObject.SetPcds(PcdList, KeysList = KeysList,
PackageInfo = self.InfPackageSection.GetPackages()):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR%("[PCD]"),
File=FileName,
Line=LineIndex)
| gpl-2.0 |
strint/tensorflow | tensorflow/contrib/util/__init__.py | 118 | 1513 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for dealing with Tensors.
See @{$python/contrib.util} guide.
@@constant_value
@@make_tensor_proto
@@make_ndarray
@@ops_used_by_graph_def
@@stripped_op_list_for_graph
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.framework.meta_graph import ops_used_by_graph_def
from tensorflow.python.framework.meta_graph import stripped_op_list_for_graph
from tensorflow.python.framework.tensor_util import constant_value
from tensorflow.python.framework.tensor_util import make_tensor_proto
from tensorflow.python.framework.tensor_util import MakeNdarray as make_ndarray
# pylint: disable=unused_import
from tensorflow.python.util.all_util import remove_undocumented
remove_undocumented(__name__)
| apache-2.0 |
radupaul/profitpy | profit/workbench/sessionreplay.py | 18 | 4671 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2007 Troy Melhase
# Distributed under the terms of the GNU General Public License v2
# Author: Troy Melhase <troy@gci.net>
##
# This module defines the SessionReplay dialog class.
#
# SessionReplay dialogs offer the user widgets to control the replay
# of a session. It includes a delivery interval spinner and
# associated slider, and also a button to restart the session replay.
##
from PyQt4.QtCore import QTimer
from PyQt4.QtGui import QDialog, QMessageBox
from profit.lib import logging
from profit.lib import Signals
from profit.workbench.widgets.ui_sessionreplay import Ui_SessionReplayWidget
class SessionReplay(QDialog, Ui_SessionReplayWidget):
""" Dialog for controlling the replay of a session.
After the dialog instance is constructed, clients should call
the 'setSession' to associate the dialog with a session.
Clients should use 'exec_()' to display the dialog, not 'show'.
"""
def __init__(self, interval=50, parent=None):
""" Initializer.
@param interval=50 milliseconds between message delivery
@param parent=None ancestor of this dialog
"""
QDialog.__init__(self, parent)
self.setupUi(self)
self.interval = interval
self.session = None
self.filename = None
self.types = None
self.loader = None
self.importer = None
self.timer = QTimer()
def exec_(self):
""" Dialog main loop.
@return QDialog.DialogCode result
"""
connect = self.connect
setInterval = self.timer.setInterval
connect(self.timerSlider, Signals.intValueChanged, setInterval)
connect(self.timerSpin, Signals.intValueChanged, setInterval)
connect(self.timer, Signals.timeout, self.on_timer_timeout)
self.timer.start(self.interval)
return QDialog.exec_(self)
def on_restartButton_clicked(self):
""" Signal handler for restart button clicked signals.
"""
if self.importer:
self.timer.setInterval(self.timerSpin.value())
self.loader = self.importer()
def on_timer_timeout(self):
""" Signal handler for the delivery timer timeout signal.
If the instance has a session but no loader, it will attempt
to import the session object and initiate the replay.
If a loader is present (possibly added by importSession), the
the next message is requested from the loader.
@return None
"""
if self.session and not self.loader:
try:
self.importSession(self.session, self.filename, self.types)
except (Exception, ), ex:
QMessageBox.critical(
self, 'Import Exception',
'Exception "%s" during import. '
'Import not completed.' % ex)
self.close()
if self.loader:
try:
msgid = self.loader.next()
except (StopIteration, ):
self.timer.setInterval(max(self.timer.interval(), 50))
else:
self.importProgress.setValue(msgid)
if msgid == self.last:
logging.debug(
'Imported %s messages from file "%s".',
self.count, self.filename)
def importSession(self, session, filename, types):
""" Initiates session import.
@param session Session instance
@param filename name of file with serialized messages
@param types sequence of message types to import
@return None
"""
importer = session.importMessages(str(filename), types)
loader = importer()
try:
self.count = loader.next()
self.last = self.count - 1
if not self.count:
raise StopIteration()
except (StopIteration, ):
self.loader = self.count = self.last = None
logging.debug('Warning messages not imported from "%s"', filename)
else:
self.importProgress.setMaximum(self.last)
self.importer = importer
self.loader = loader
def setImportParameters(self, session, filename, types):
""" Sets the parameters for the import (replay) of session messages.
@param session Session instance
@param filename name of file with serialized messages
@param types sequence of message types to import
@return None
"""
self.session = session
self.filename = filename
self.types = types
| gpl-2.0 |
eayunstack/fuel-web | nailgun/nailgun/test/unit/test_graph.py | 3 | 2955 | # -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from nailgun.orchestrator import deployment_graph
from nailgun.orchestrator import graph_visualization
from nailgun.test import base
class TestDeploymentGraphViualization(base.BaseUnitTest):
def get_dotgraph_with_tasks(self, tasks):
graph = deployment_graph.DeploymentGraph()
graph.add_tasks(tasks)
visualization = graph_visualization.GraphVisualization(graph)
dotgraph = visualization.get_dotgraph()
return dotgraph.to_string()
def test_stage_type(self):
tasks = [
{'id': 'pre_deployment', 'type': 'stage'},
]
dotgraph = self.get_dotgraph_with_tasks(tasks)
six.assertRegex(self, dotgraph, 'pre_deployment .*color=red.*;')
six.assertRegex(self, dotgraph, 'pre_deployment .*shape=rect.*;')
six.assertRegex(self, dotgraph, 'pre_deployment .*style=filled.*;')
def test_group_type(self):
tasks = [
{'id': 'controller', 'type': 'group'},
]
dotgraph = self.get_dotgraph_with_tasks(tasks)
six.assertRegex(self, dotgraph, 'controller .*color=lightskyblue.*;')
six.assertRegex(self, dotgraph, 'controller .*shape=box.*;')
six.assertRegex(self, dotgraph,
'controller .*style="filled, rounded".*;')
def test_skipped_type(self):
tasks = [
{'id': 'hiera', 'type': 'skipped'},
]
dotgraph = self.get_dotgraph_with_tasks(tasks)
self.assertIn('hiera [color=gray95];', dotgraph)
def test_add_simple_connection(self):
tasks = [
{'id': 'task-A'},
{'id': 'task-B', 'requires': ['task-A']},
]
dotgraph = self.get_dotgraph_with_tasks(tasks)
self.assertIn('"task-A" -> "task-B"', dotgraph)
def test_node_default_attrs(self):
tasks = [
{'id': 'task-A'},
]
dotgraph = self.get_dotgraph_with_tasks(tasks)
six.assertRegex(self, dotgraph, '"task-A" .*color=yellowgreen.*;')
six.assertRegex(self, dotgraph, '"task-A" .*style=filled.*;')
def test_skipped_metaparam(self):
tasks = [
{'id': 'task_a', 'skipped': True},
]
dotgraph = self.get_dotgraph_with_tasks(tasks)
self.assertIn('task_a [color=gray95];', dotgraph)
| apache-2.0 |
GrabzIt/grabzit | python/GrabzIt/GrabzItException.py | 1 | 3698 | #!/usr/bin/python
class GrabzItException(Exception):
SUCCESS = 0
PARAMETER_NO_URL = 100
PARAMETER_INVALID_URL = 101
PARAMETER_NON_EXISTANT_URL = 102
PARAMETER_MISSING_APPLICATION_KEY = 103
PARAMETER_UNRECOGNISED_APPLICATION_KEY = 104
PARAMETER_MISSING_SIGNATURE = 105
PARAMETER_INVALID_SIGNATURE = 106
PARAMETER_INVALID_FORMAT = 107
PARAMETER_INVALID_COUNTRY_CODE = 108
PARAMETER_DUPLICATE_IDENTIFIER = 109
PARAMETER_MATCHING_RECORD_NOT_FOUND = 110
PARAMETER_INVALID_CALLBACK_URL = 111
PARAMETER_NON_EXISTANT_CALLBACK_URL = 112
PARAMETER_IMAGE_WIDTH_TOO_LARGE = 113
PARAMETER_IMAGE_HEIGHT_TOO_LARGE = 114
PARAMETER_BROWSER_WIDTH_TOO_LARGE = 115
PARAMETER_BROWSER_HEIGHT_TOO_LARGE = 116
PARAMETER_DELAY_TOO_LARGE = 117
PARAMETER_INVALID_BACKGROUND = 118
PARAMETER_INVALID_INCLUDE_LINKS = 119
PARAMETER_INVALID_INCLUDE_OUTLINE = 120
PARAMETER_INVALID_PAGE_SIZE = 121
PARAMETER_INVALID_PAGE_ORIENTATION = 122
PARAMETER_VERTICAL_MARGIN_TOO_LARGE = 123
PARAMETER_HORIZONTAL_MARGIN_TOO_LARGE = 124
PARAMETER_INVALID_COVER_URL = 125
PARAMETER_NON_EXISTANT_COVER_URL = 126
PARAMETER_MISSING_COOKIE_NAME = 127
PARAMETER_MISSING_COOKIE_DOMAIN = 128
PARAMETER_INVALID_COOKIE_NAME = 129
PARAMETER_INVALID_COOKIE_DOMAIN = 130
PARAMETER_INVALID_COOKIE_DELETE = 131
PARAMETER_INVALID_COOKIE_HTTP = 132
PARAMETER_INVALID_COOKIE_EXPIRY = 133
PARAMETER_INVALID_CACHE_VALUE = 134
PARAMETER_INVALID_DOWNLOAD_VALUE = 135
PARAMETER_INVALID_SUPPRESS_VALUE = 136
PARAMETER_MISSING_WATERMARK_IDENTIFIER = 137
PARAMETER_INVALID_WATERMARK_IDENTIFIER = 138
PARAMETER_INVALID_WATERMARK_XPOS = 139
PARAMETER_INVALID_WATERMARK_YPOS = 140
PARAMETER_MISSING_WATERMARK_FORMAT = 141
PARAMETER_WATERMARK_TOO_LARGE = 142
PARAMETER_MISSING_PARAMETERS = 143
PARAMETER_QUALITY_TOO_LARGE = 144
PARAMETER_QUALITY_TOO_SMALL = 145
PARAMETER_REPEAT_TOO_SMALL = 149
PARAMETER_INVALID_REVERSE = 150
PARAMETER_FPS_TOO_LARGE = 151
PARAMETER_FPS_TOO_SMALL = 152
PARAMETER_SPEED_TOO_FAST = 153
PARAMETER_SPEED_TOO_SLOW = 154
PARAMETER_INVALID_ANIMATION_COMBINATION = 155
PARAMETER_START_TOO_SMALL = 156
PARAMETER_DURATION_TOO_SMALL = 157
PARAMETER_NO_HTML = 163
PARAMETER_INVALID_TARGET_VALUE = 165
PARAMETER_INVALID_HIDE_VALUE = 166
PARAMETER_INVALID_INCLUDE_IMAGES = 167
PARAMETER_INVALID_EXPORT_URL = 168
PARAMETER_INVALID_WAIT_FOR_VALUE = 169
PARAMETER_INVALID_TRANSPARENT_VALUE = 170
PARAMETER_INVALID_ENCRYPTION_KEY = 171
PARAMETER_INVALID_NO_ADS = 172
PARAMETER_INVALID_PROXY = 173
PARAMETER_INVALID_NO_NOTIFY = 174
PARAMETER_INVALID_HD = 176
PARAMETER_INVALID_MEDIA_TYPE = 177
PARAMETER_INVALID_PASSWORD = 178
PARAMETER_INVALID_MERGE = 179
NETWORK_SERVER_OFFLINE = 200
NETWORK_GENERAL_ERROR = 201
NETWORK_DDOS_ATTACK = 202
RENDERING_ERROR = 300
RENDERING_MISSING_SCREENSHOT = 301
GENERIC_ERROR = 400
UPGRADE_REQUIRED = 500
FILE_SAVE_ERROR = 600
FILE_NON_EXISTANT_PATH = 601
def __init__(self, message, code):
Exception.__init__(self, message)
self.Code = code | mit |
ruibarreira/linuxtrail | usr/lib/python2.7/dist-packages/docutils/languages/nl.py | 200 | 1865 | # $Id: nl.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: Martijn Pieters <mjpieters@users.sourceforge.net>
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/docs/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Dutch-language mappings for language-dependent features of Docutils.
"""
__docformat__ = 'reStructuredText'
labels = {
# fixed: language-dependent
'author': 'Auteur',
'authors': 'Auteurs',
'organization': 'Organisatie',
'address': 'Adres',
'contact': 'Contact',
'version': 'Versie',
'revision': 'Revisie',
'status': 'Status',
'date': 'Datum',
'copyright': 'Copyright',
'dedication': 'Toewijding',
'abstract': 'Samenvatting',
'attention': 'Attentie!',
'caution': 'Let op!',
'danger': '!GEVAAR!',
'error': 'Fout',
'hint': 'Hint',
'important': 'Belangrijk',
'note': 'Opmerking',
'tip': 'Tip',
'warning': 'Waarschuwing',
'contents': 'Inhoud'}
"""Mapping of node class name to label text."""
bibliographic_fields = {
# language-dependent: fixed
'auteur': 'author',
'auteurs': 'authors',
'organisatie': 'organization',
'adres': 'address',
'contact': 'contact',
'versie': 'version',
'revisie': 'revision',
'status': 'status',
'datum': 'date',
'copyright': 'copyright',
'toewijding': 'dedication',
'samenvatting': 'abstract'}
"""Dutch (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
| gpl-3.0 |
mrdon/bottle | test/testall.py | 46 | 1119 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
import coverage
coverage.process_startup()
except ImportError:
pass
import unittest
import sys, os, glob
test_root = os.path.dirname(os.path.abspath(__file__))
test_files = glob.glob(os.path.join(test_root, 'test_*.py'))
os.chdir(test_root)
sys.path.insert(0, os.path.dirname(test_root))
sys.path.insert(0, test_root)
test_names = [os.path.basename(name)[:-3] for name in test_files]
if 'help' in sys.argv or '-h' in sys.argv:
sys.stdout.write('''Command line arguments:
fast: Skip server adapter tests.
verbose: Print tests even if they pass.
''')
sys.exit(0)
if 'fast' in sys.argv:
sys.stderr.write("Warning: The 'fast' keyword skipps server tests.\n")
test_names.remove('test_server')
suite = unittest.defaultTestLoader.loadTestsFromNames(test_names)
def run():
import bottle
bottle.debug(True)
vlevel = 2 if 'verbose' in sys.argv else 0
result = unittest.TextTestRunner(verbosity=vlevel).run(suite)
sys.exit((result.errors or result.failures) and 1 or 0)
if __name__ == '__main__':
run()
| mit |
koreyou/word_embedding_loader | word_embedding_loader/word_embedding.py | 1 | 18340 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function, \
unicode_literals
import six
__all__ = ["WordEmbedding", "classify_format"]
import warnings
import numpy as np
from word_embedding_loader import loader, saver, util
# Mimick namespace
class _glove:
loader = loader.glove
saver = saver.glove
class _word2vec_bin:
loader = loader.word2vec_bin
saver = saver.word2vec_bin
class _word2vec_text:
loader = loader.word2vec_text
saver = saver.word2vec_text
class _numpy:
loader = loader.numpy
saver = saver.numpy
def _select_module(format, binary):
if format == 'glove':
mod = _glove
if binary:
warnings.warn(
b"Argument binary=True for glove loader is ignored.",
UserWarning)
elif format == 'word2vec':
if binary:
mod = _word2vec_bin
else:
mod = _word2vec_text
elif format == 'numpy':
mod = _numpy
if binary:
warnings.warn(
b"Argument binary=True for numpy loader is ignored.",
UserWarning)
else:
raise NameError(('Unknown format "%s"' % format).encode('utf-8'))
return mod
def classify_format(path):
"""
Determine the format of word embedding file by their content. This operation
only looks at the first two lines and does not check the sanity of input
file.
Args:
path (str):
Returns:
class
"""
if loader.glove.check_valid(path):
return _glove
elif loader.word2vec_text.check_valid(path):
return _word2vec_text
elif loader.numpy.check_valid(path):
return _numpy
elif loader.word2vec_bin.check_valid(path):
return _word2vec_bin
else:
raise OSError(b"Invalid format")
class WordEmbedding(object):
"""
Main API for loading and saving of pretrained word embedding files.
.. note:: You do not need to call initializer directly in normal usage.
Instead you should call
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
Args:
vectors (numpy.ndarray): Word embedding representation vectors
vocab (dict): Mapping from words (bytes) to vector
indices (int).
freqs (dict): Mapping from words (bytes) to word frequency counts
(int).
Attributes:
vectors (numpy.ndarray): Word embedding vectors in shape of
``(vocabulary size, feature dimension)``.
vocab (dict): Mapping from words (bytes) to vector indices (int)
freqs (dict or None): Mapping from words (bytes) to frequency counts
(int).
"""
def __init__(self, vectors, vocab, freqs=None):
if not isinstance(vectors, np.ndarray):
raise TypeError(
("Expected numpy.ndarray for vectors, %s found."% type(vectors)
).encode('utf-8'))
if not isinstance(vocab, dict):
raise TypeError(
("Expected dict for vocab, %s found." % type(vectors)
).encode('utf-8'))
if len(vectors) != len(vocab):
warnings.warn(
("vectors and vocab size unmatch (%d != %d)" %
(len(vectors), len(vocab))).encode('utf-8'))
self.vectors = vectors
self.vocab = vocab
self.freqs = freqs
self._load_cond = None
@classmethod
def load(cls, path, vocab=None, dtype=np.float32, max_vocab=None,
format=None, binary=False):
"""
Load pretrained word embedding from a file.
Args:
path (str): Path of file to load.
vocab (str or set or None): If ``str``, it is assumed to be path to
vocabulary file created by word2vec with
``-save-vocab <file>`` option.
If ``set`` it will only load words that are in vocab.
dtype (numpy.dtype): Element data type to use for the array.
max_vocab (int): Number of vocabulary to read.
format (str or None): Format of the file. ``'word2vec'`` for file
that was implemented in
`word2vec <https://code.google.com/archive/p/word2vec/>`_,
by Mikolov et al.. ``'glove'`` for file that was implemented in
`GloVe <https://nlp.stanford.edu/projects/glove/>`_, Global
Vectors for Word Representation, by Jeffrey Pennington,
Richard Socher, Christopher D. Manning from Stanford NLP group.
``'numpy'`` for efficient format, defined and implemented by
this project.
If ``None`` is given, the format is guessed from the content.
binary (bool): Load file as binary file as in word embedding file
created by
`word2vec <https://code.google.com/archive/p/word2vec/>`_ with
``-binary 1`` option. If ``format`` is ``'glove'`` or ``None``,
this argument is simply ignored
Returns:
:class:`~word_embedding_loader.word_embedding.WordEmbedding`
"""
freqs = None
vocab_dict = None
if isinstance(vocab, six.string_types):
with open(vocab, mode='rb') as f:
freqs = loader.vocab.load_vocab(f)
# Create vocab from freqs
# [:None] gives all the list member
vocab_dict = {k: i for i, (k, v) in enumerate(
sorted(six.iteritems(freqs),
key=lambda k_v: k_v[1], reverse=True)[:max_vocab])}
elif isinstance(vocab, list):
vocab = set(vocab)
elif isinstance(vocab, set) or vocab is None:
pass
else:
raise TypeError(
'Expected set, str or None for vocab but %s is given.' %
type(vocab)
)
if format is None:
mod = classify_format(path)
else:
mod = _select_module(format, binary)
with open(path, mode='rb') as f:
if vocab_dict is not None:
arr = mod.loader.load_with_vocab(f, vocab_dict, dtype=dtype)
else:
arr, vocab_dict = mod.loader.load(
f, max_vocab=max_vocab, dtype=dtype, vocab=vocab)
obj = cls(arr, vocab_dict, freqs)
obj._load_cond = mod
return obj
def save(self, path, format, binary=False, use_load_condition=False):
"""
Save object as word embedding file. For most arguments, you should refer
to :func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
Args:
use_load_condition (bool): If `True`, options from
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`
is used.
Raises:
ValueError: ``use_load_condition == True`` but the object is not
initialized via
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`.
"""
if use_load_condition:
if self._load_cond is None:
raise ValueError(
b"use_load_condition was specified but the object is not "
b"loaded from a file")
# Use load condition
mod = self._load_cond
else:
mod = _select_module(format, binary)
if self.freqs is None:
itr = list(
sorted(six.iteritems(self.vocab), key=lambda k_v: k_v[1]))
else:
itr = list(
sorted(six.iteritems(self.vocab),
key=lambda k_v: self.freqs[k_v[0]], reverse=True)
)
with open(path, mode='wb') as f:
mod.saver.save(f, self.vectors, itr)
def __len__(self):
return len(self.vectors)
@property
def size(self):
"""
Feature dimension of the loaded vector.
Returns:
int
"""
return self.vectors.shape[1]
def resize(self, size):
"""
Reduce number of vocabulary in place.
Args:
size (int): new size
Returns:
~WordEmbedding: Returns reference to self
"""
if size < len(self):
n = len(self) - size
if self.freqs is not None:
del_keys = [
k for k, v in sorted(
six.iteritems(self.freqs), key=lambda k_v: k_v[1])[:n]]
else:
del_keys = [
k for k, v in six.iteritems(self.vocab) if v >= size]
assert len(del_keys) == n
for k in del_keys:
self.vocab, self.vectors, _ = \
util.remove_vocab(self.vocab, self.vectors, k)
if self.freqs is not None:
del self.freqs[k]
return self
def apply_freqs(self, freqs=None):
if freqs is None:
if self.freqs is None:
raise ValueError("You must supply freqs when self.freqs is None")
return self.apply_freqs(self.freqs)
if len(freqs) != len(self.vocab):
raise ValueError("len(freqs) != len(self.vocab)")
self.freqs = freqs
new_vocab = {}
# sort then reverse for stability
freqs_list = sorted(six.iteritems(freqs), key=lambda k_v: k_v[1])
for i, (k, _) in enumerate(freqs_list[::-1]):
new_vocab[k] = i
def load_word_embedding(
path, vocab=None, dtype=np.float32, max_vocab=None,
format=None, binary=False, unk=b'<unk>', unk_index=0, eos=b'</s>',
eos_index=1, unk_creation_method='least_common',
unk_creation_n_least=10, random_init_std='auto', random_init_seed=0):
"""
Load pretrained word embedding from a file. This is short hand method for
using :func:`~word_embedding_loader.word_embedding.WordEmbedding.load` and
getting :attr:`~word_embedding_loader.word_embedding.WordEmbedding.vectors`
and :attr:`~word_embedding_loader.word_embedding.WordEmbedding.vocab`.
This is especially useful if you have no plans to save the word embedding.
By specifying ``unk`` or ``eos``, you are guaranteed to have a embedding
for out-of-vocabulary words and for the end-of-sentence token, respectively.
The function first tries to detect if those tokens already exists by
looking for words that are they tend to be associated with (e.g. "</s>").
On success, it replaces those found vocabularies with the words you have
provided to the function. On failure, it initializes and append new
embeddings to the word embedding.
Args:
path (str): Path of file to load.
vocab (str or set or None): Path to vocab files or set of vocab to use.
Refer
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load`
for details.
dtype (numpy.dtype): Element data type to use for the array.
max_vocab (int): Number of vocabulary to read.
format (str or None): Format of the file. Refer
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load` for
details.
binary (bool): Load file as binary file. Refer
:func:`~word_embedding_loader.word_embedding.WordEmbedding.load` for
details.
unk (bytes or None): The vocabulary for out-of-vocabulary words.
If ``None`` it will not do any post-precessings to gurentee that
it exists.
unk_index (int): Index to which unk is inserted.
eos (bytes or None): The vocabulary for the end-of-sentence token.
If ``None`` it will not do any post-precessings to gurentee that
it exists.
eos_index (int): Index to which eos is inserted.
unk_creation_method (str): The method to use if it needs to create
an embedding for out-of-vocabulary words. ``'least_common'`` will
take n least common word embeddings and take their average (it will
use frequencies from vocab file if provided or assume that words
are ordered in the order of frequencies). ``random`` will create
word embedding normal distribution drawn with the
``random_init_std`` argument. This argument is ignored if ``unk``
token is already available in loaded word embedding.
unk_creation_n_least (str): number of least common word embeddings to
use when ``unk_creation_method`` is ``'least_common'``.
random_init_std (float or 'auto'): standard deviation of newly created
``unk`` and ``eos`` tokens (only when they are to be randomly
initialized). Specify ``'auto'`` to use standard deviation of
the loaded word embeddings.
random_init_seed (int or None): Random seed that is used to create
unk or eos word embedding. Default is ``0`` so you get the same
vector every time you use the function (this is useful if you want
to use trained model). You should specify `None` to do experiments
with different seed, but you should explicitly save the word
embedding to use it with the trained model.
Returns:
vectors (numpy.ndarray): Word embedding vectors in shape of
``(vocabulary size, feature dimension)``.
vocab (dict): Mapping from words (bytes) to vector indices (int)
"""
if max_vocab is not None:
if eos_index >= max_vocab:
raise ValueError('eos_index must be within max_vocab')
if unk_index >= max_vocab:
raise ValueError('unk_index must be within max_vocab')
if unk is not None and eos is not None and eos_index == unk_index:
raise ValueError('eos_index and unk_index must be different')
EOS_CAND = b'</s>'
if unk is None and eos is None:
_vocab = vocab
_max_vocab = max_vocab
elif unk is not None and unk_creation_method == 'least_common':
# Just load everything
_vocab = None
_max_vocab = None
else:
if isinstance(vocab, six.string_types):
# it is difficult to add words to vocab so apply frequency afterward
_vocab = None
_max_vocab = None
elif isinstance(vocab, (list, set)):
_vocab = set(vocab)
_max_vocab = max_vocab
if unk is not None:
for w in util.UNK_ID_CANDIDATES:
_vocab.add(w)
if eos is not None:
_vocab.add(EOS_CAND)
else:
_max_vocab = None
_vocab = None
we = WordEmbedding.load(path, vocab=_vocab, dtype=dtype,
max_vocab=_max_vocab, format=format, binary=binary)
if unk is None and eos is None:
return we.vectors, we.vocab
if eos_index >= len(we):
raise ValueError('eos_index must be smaller than the word embedding size')
if unk_index >= len(we):
raise ValueError('unk_index must be smaller than the word embedding size')
if isinstance(vocab, six.string_types):
# vocab path was ignored
with open(vocab, mode='rb') as f:
freqs_loaded = loader.vocab.load_vocab(f)
# balance vocab and freqs
freqs = {k: freqs_loaded.get(k, 0) for k in six.iterkeys(we.vocab)}
max_freq = max(freqs.values())
we.apply_freqs(freqs)
if we.freqs is not None:
max_freq = max(we.freqs.values())
if unk is not None:
if eos is not None and unk_index > eos_index:
# Inserting eos will move index of unk so compensate for that
unk_index -= 1
_unk = util.infer_unk_id(we.vocab)
if _unk is None:
# Create unk
if unk_creation_method == 'least_common':
v = util.create_unk_least_common(we, unk_creation_n_least)
elif unk_creation_method == 'random':
if random_init_std == 'auto':
random_init_std = np.std(we.vectors)
v = util.create_random_vector(
we.vectors.shape[1], random_init_std,
dtype=we.vectors.dtype, seed=random_init_seed)
else:
raise ValueError(
'Unknown unk_creation_method %s' % unk_creation_method)
we.vocab, we.vectors = util.insert_vocab(
we.vocab, we.vectors, unk, unk_index, v)
else:
we.vocab, we.vectors, _ = util.move_vocab(
we.vocab, we.vectors, _unk, unk_index)
del we.vocab[_unk]
we.vocab[unk] = unk_index
if we.freqs is not None:
we.freqs[unk] = max_freq + 1
if eos is not None:
if EOS_CAND not in we.vocab:
# Create eos
if random_init_std == 'auto':
random_init_std = np.std(we.vectors)
random_init_seed_eos = (None if random_init_seed is None else
random_init_seed + 1)
v = util.create_random_vector(
we.vectors.shape[1], random_init_std, dtype=we.vectors.dtype,
seed=random_init_seed_eos
)
we.vocab, we.vectors = util.insert_vocab(
we.vocab, we.vectors, eos, eos_index, v)
else:
we.vocab, we.vectors, _ = util.move_vocab(
we.vocab, we.vectors, EOS_CAND, eos_index)
del we.vocab[EOS_CAND]
we.vocab[eos] = eos_index
if we.freqs is not None:
we.freqs[eos] = max_freq + 1
if isinstance(vocab, six.string_types):
# Prune freq = 0 by changing
_max_vocab = len([1 for k in six.iterkeys(we.vocab) if k in freqs_loaded])
if eos is not None and eos not in freqs_loaded:
_max_vocab += 1
if unk is not None and unk not in freqs_loaded:
_max_vocab += 1
max_vocab = _max_vocab if max_vocab is None else min(max_vocab, _max_vocab)
if max_vocab is not None:
# index of eos/unk is always smaller than max_vocab so it is safe to
# call resize
we.resize(max_vocab)
return we.vectors, we.vocab
| mit |
thnee/ansible | test/lib/ansible_test/_internal/powershell_import_analysis.py | 15 | 2975 | """Analyze powershell import statements."""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
from .util import (
display,
)
from .data import (
data_context,
)
def get_powershell_module_utils_imports(powershell_targets):
"""Return a dictionary of module_utils names mapped to sets of powershell file paths.
:type powershell_targets: list[TestTarget]
:rtype: dict[str, set[str]]
"""
module_utils = enumerate_module_utils()
imports_by_target_path = {}
for target in powershell_targets:
imports_by_target_path[target.path] = extract_powershell_module_utils_imports(target.path, module_utils)
imports = dict([(module_util, set()) for module_util in module_utils])
for target_path in imports_by_target_path:
for module_util in imports_by_target_path[target_path]:
imports[module_util].add(target_path)
for module_util in sorted(imports):
if not imports[module_util]:
display.warning('No imports found which use the "%s" module_util.' % module_util)
return imports
def get_powershell_module_utils_name(path): # type: (str) -> str
"""Return a namespace and name from the given module_utils path."""
base_path = data_context().content.module_utils_powershell_path
if data_context().content.collection:
prefix = 'ansible_collections.' + data_context().content.collection.prefix + '.plugins.module_utils.'
else:
prefix = ''
name = prefix + os.path.splitext(os.path.relpath(path, base_path))[0].replace(os.sep, '.')
return name
def enumerate_module_utils():
"""Return a list of available module_utils imports.
:rtype: set[str]
"""
return set(get_powershell_module_utils_name(p)
for p in data_context().content.walk_files(data_context().content.module_utils_powershell_path)
if os.path.splitext(p)[1] == '.psm1')
def extract_powershell_module_utils_imports(path, module_utils):
"""Return a list of module_utils imports found in the specified source file.
:type path: str
:type module_utils: set[str]
:rtype: set[str]
"""
imports = set()
with open(path, 'r') as module_fd:
code = module_fd.read()
if '# POWERSHELL_COMMON' in code:
imports.add('Ansible.ModuleUtils.Legacy')
lines = code.splitlines()
line_number = 0
for line in lines:
line_number += 1
match = re.search(r'(?i)^#\s*(?:requires\s+-module(?:s?)|ansiblerequires\s+-powershell)\s*((?:Ansible|ansible_collections)\..+)', line)
if not match:
continue
import_name = match.group(1)
if import_name in module_utils:
imports.add(import_name)
else:
display.warning('%s:%d Invalid module_utils import: %s' % (path, line_number, import_name))
return imports
| gpl-3.0 |
SDSG-Invenio/invenio | invenio/legacy/websubmit/functions/Move_to_Done.py | 13 | 2369 | # This file is part of Invenio.
# Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
__revision__ = "$Id$"
## Description: function Move_to_Done
## This function move the current working directory to the
## /done directory and compress it
## Author: T.Baron
## PARAMETERS: -
import os
import re
import time
import subprocess
import shutil
from invenio.config import \
CFG_PATH_GZIP, \
CFG_PATH_TAR, \
CFG_WEBSUBMIT_STORAGEDIR
from invenio.legacy.websubmit.config import InvenioWebSubmitFunctionError
def Move_to_Done(parameters, curdir, form, user_info=None):
"""
This function moves the existing submission directory to the
/opt/invenio/var/data/submit/storage/done directory.
Then it tars and gzips the directory.
"""
global rn
data = re.search(".*/([^/]*)/([^/]*)/[^/]*$", curdir)
dir = data.group(1)
doctype = data.group(2)
donedir = os.path.join(CFG_WEBSUBMIT_STORAGEDIR, "done", dir, doctype)
if not os.path.exists(donedir):
try:
os.makedirs(donedir)
except:
raise InvenioWebSubmitFunctionError("Cannot create done directory %s" % donedir)
# Moves the files to the done diectory and creates an archive
rn = rn.replace("/", "-").replace(" ","")
namedir = "%s_%s" % (rn, time.strftime("%Y%m%d%H%M%S"))
finaldir = os.path.join(donedir, namedir)
os.rename(curdir, finaldir)
if CFG_PATH_TAR != "" and CFG_PATH_GZIP != "":
if subprocess.Popen([CFG_PATH_TAR, '-czf', '%s.tar.gz' % namedir, namedir], cwd=donedir).wait() == 0:
shutil.rmtree(finaldir)
return ""
| gpl-2.0 |
kevinpetersavage/BOUT-dev | tools/pylib/boututils/ask.py | 11 | 1265 | from builtins import input
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes":True, "y":True, "ye":True,
"no":False, "n":False, "No":False, "N":False }
valid = {"yes":1, "y":1, "ye":1,
"no":-1, "n":-1, "No":-1, "N":-1}
if default == None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
| gpl-3.0 |
armoredsoftware/protocol | measurer/gdb-7.9/gdb/python/python-config.py | 140 | 2595 | # Program to fetch python compilation parameters.
# Copied from python-config of the 2.7 release.
import sys
import os
import getopt
from distutils import sysconfig
valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags',
'ldflags', 'help']
def exit_with_usage(code=1):
sys.stderr.write ("Usage: %s [%s]\n" % (sys.argv[0],
'|'.join('--'+opt for opt in valid_opts)))
sys.exit(code)
try:
opts, args = getopt.getopt(sys.argv[1:], '', valid_opts)
except getopt.error:
exit_with_usage()
if not opts:
exit_with_usage()
pyver = sysconfig.get_config_var('VERSION')
getvar = sysconfig.get_config_var
abiflags = getattr (sys, "abiflags", "")
opt_flags = [flag for (flag, val) in opts]
if '--help' in opt_flags:
exit_with_usage(code=0)
def to_unix_path(path):
"""On Windows, returns the given path with all backslashes
converted into forward slashes. This is to help prevent problems
when using the paths returned by this script with cygwin tools.
In particular, cygwin bash treats backslashes as a special character.
On Unix systems, returns the path unchanged.
"""
if os.name == 'nt':
path = path.replace('\\', '/')
return path
for opt in opt_flags:
if opt == '--prefix':
print (to_unix_path(sysconfig.PREFIX))
elif opt == '--exec-prefix':
print (to_unix_path(sysconfig.EXEC_PREFIX))
elif opt in ('--includes', '--cflags'):
flags = ['-I' + sysconfig.get_python_inc(),
'-I' + sysconfig.get_python_inc(plat_specific=True)]
if opt == '--cflags':
flags.extend(getvar('CFLAGS').split())
print (to_unix_path(' '.join(flags)))
elif opt in ('--libs', '--ldflags'):
libs = []
if getvar('LIBS') is not None:
libs.extend(getvar('LIBS').split())
if getvar('SYSLIBS') is not None:
libs.extend(getvar('SYSLIBS').split())
libs.append('-lpython'+pyver + abiflags)
# add the prefix/lib/pythonX.Y/config dir, but only if there is no
# shared library in prefix/lib/.
if opt == '--ldflags':
if not getvar('Py_ENABLE_SHARED'):
if getvar('LIBPL') is not None:
libs.insert(0, '-L' + getvar('LIBPL'))
elif os.name == 'nt':
libs.insert(0, '-L' + sysconfig.PREFIX + '/libs')
if getvar('LINKFORSHARED') is not None:
libs.extend(getvar('LINKFORSHARED').split())
print (to_unix_path(' '.join(libs)))
| bsd-3-clause |
arienchen/pytibrv | tests/api/test-ftmem.py | 1 | 8776 |
import threading
from datetime import datetime
import time
from pytibrv.api import *
from pytibrv.status import *
from pytibrv.tport import *
from pytibrv.events import *
from pytibrv.disp import *
from pytibrv.queue import *
from pytibrv.ft import *
import unittest
def callback(member: tibrvftMember, groupName: bytes, action: tibrvftAction, closure):
name = groupName.decode()
obj = tibrvClosure(closure)
obj.action = action
print(datetime.now(), obj.name, ' -> ', action)
class App(threading.Thread):
def __init__(self, name: str):
threading.Thread.__init__(self)
self.action = 0
self.tx = 0
self.que = 0
self.disp = 0
self.ft = 0
self.name = name
def start(self, weight: int = 50):
status, self.tx = tibrvTransport_Create(None, None, None)
assert TIBRV_OK == status, tibrvStatus_GetText(status)
status, self.que = tibrvQueue_Create()
assert TIBRV_OK == status, tibrvStatus_GetText(status)
status, self.ft = tibrvftMember_Create(self.que, callback, self.tx, 'TEST', \
weight, 1, 1.0, 1.5, 2.0, self)
assert TIBRV_OK == status, tibrvStatus_GetText(status)
status, self.disp = tibrvDispatcher_Create(self.que)
assert TIBRV_OK == status, tibrvStatus_GetText(status)
threading.Thread.start(self)
def stop(self):
if self.ft != 0:
print(datetime.now(), self.name, 'IS STOPING ...')
status = tibrvftMember_Destroy(self.ft)
assert TIBRV_OK == status, tibrvStatus_GetText(status)
self.ft = 0
def close(self):
if self.tx != 0:
self.stop()
if self.disp != 0:
status = tibrvDispatcher_Destroy(self.disp)
assert TIBRV_OK == status, tibrvStatus_GetText(status)
self.disp = 0
if self.que != 0:
status = tibrvQueue_Destroy(self.que)
assert TIBRV_OK == status, tibrvStatus_GetText(status)
self.que = 0
if self.tx != 0:
status = tibrvTransport_Destroy(self.tx)
assert TIBRV_OK == status, tibrvStatus_GetText(status)
self.tx = 0
def run(self):
print(datetime.now(), self.name, 'START RUNNING ')
# when thread stopped, self.ft is 0
while self.ft != 0:
if self.action != TIBRVFT_ACTIVATE:
print(datetime.now(), self.name, 'IS DEACTIVATED')
time.sleep(0.5)
continue
print(datetime.now(), self.name, 'IS ACTIVATED')
time.sleep(1.0)
print(datetime.now(), self.name, 'RUN SOMETHING')
print(datetime.now(), self.name, 'EXIT NOW')
class MemberTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
status = tibrv_Open()
assert TIBRV_OK == status, tibrvStatus_GetText(status)
@classmethod
def tearDownClass(cls):
tibrv_Close()
def test_create(self):
print('')
ap1 = App('AP1')
ap1.start()
timeout = time.time() + 5
while time.time() <= timeout:
if ap1.action == TIBRVFT_ACTIVATE:
break
time.sleep(1.0)
self.assertEqual(TIBRVFT_ACTIVATE, ap1.action)
# let ap1 to run 5 sec
time.sleep(5)
ap1.stop()
# when you destroy ftMember
# there is no chance to get callback
# so, self.action would be still TIBRVFT_ACTIVATE
self.assertEqual(0, ap1.ft)
ap1.close()
def test_failove(self):
# action matrix
# AP1 AP2 DESCRIPTION
# ---------------------------------------------------
# 1. 0 AP1 CREATED
# 2. 0 -> ACT AP1 FT CALLBACK
# 3. ACT AP1 ACTIVATED
# 4. 0 AP2 CREATED
# 5. ACT AP1 CLOSED, NO CALLBACK
# 6. 0 -> ACT AP2 FT CALLBACK
# 7. ACT AP2 ACTIVATED
#
print('\nTEST FAILOVER')
ap1 = App('AP1')
ap1.start()
# let ap1 became ACTIVATE
timeout = time.time() + 5
while time.time() <= timeout:
if ap1.action == TIBRVFT_ACTIVATE:
break
time.sleep(1.0)
self.assertEqual(TIBRVFT_ACTIVATE, ap1.action)
# start another instance
ap2 = App('AP2')
ap2.start()
# let time going
# ap1 should be ACTIVATE
# ap2 should be still 0 (initial value)
time.sleep(5.0)
self.assertEqual(TIBRVFT_ACTIVATE, ap1.action)
self.assertEqual(0, ap2.action)
# simulate ap1 is dead
ap1.close()
self.assertEqual(0, ap1.ft)
time.sleep(0.1)
time.sleep(2)
self.assertEqual(TIBRVFT_ACTIVATE, ap2.action)
# let time going
time.sleep(5)
ap2.close()
self.assertEqual(0, ap2.ft)
def test_primary(self):
# action matrix: AP1 is PRIMARY
# AP1 AP2 DESCRIPTION
# ---------------------------------------------------
# 1. 0 AP1 CREATED, PRIMARY
# 2. 0 -> ACT AP1 FT CALLBACK
# 3. ACT AP1 ACTIVATED
# 4. 0 AP2 CREATED
# 5. ACT AP1 CLOSED, NO CALLBACK
# 6. 0 -> ACT AP2 FT CALLBACK
# 7. ACT AP2 ACTIVATED
# 8. 0 AP1 CREATED, PRIMARY
# 9. ACT -> DEACT AP2 FT CALLBACK
# 10. DEACT AP2 DEACTIVATED
# 11. 0 -> ACT AP1 FT CALLBACK
# 12. ACT AP1 ACTIVATED
#
print('\nTEST PRIMARY')
ap1 = App('AP1')
ap1.start(100) # More Weight
# let ap1 became ACTIVATE
timeout = time.time() + 5
while time.time() <= timeout:
if ap1.action == TIBRVFT_ACTIVATE:
break
time.sleep(1.0)
self.assertEqual(TIBRVFT_ACTIVATE, ap1.action)
# start another instance
ap2 = App('AP2')
ap2.start(50) #
# let time going
# ap1 should be ACTIVATE
# ap2 should be still 0 (initial value)
time.sleep(5.0)
self.assertEqual(TIBRVFT_ACTIVATE, ap1.action)
self.assertEqual(0, ap2.action)
# simulate ap1 is dead
ap1.close()
self.assertEqual(0, ap1.ft)
time.sleep(0.1)
time.sleep(2)
self.assertEqual(TIBRVFT_ACTIVATE, ap2.action)
# ap1 is back again
ap1 = App('AP1')
ap1.start(100) # More Weight
# let time going
time.sleep(5)
self.assertEqual(TIBRVFT_ACTIVATE, ap1.action)
self.assertEqual(TIBRVFT_DEACTIVATE, ap2.action)
ap1.close()
ap2.close()
def test_getset(self):
status, tx = tibrvTransport_Create(None, None, None)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
que = TIBRV_DEFAULT_QUEUE
status, ft = tibrvftMember_Create(que, callback, tx, 'MemberTest',
50, 1, 1.0, 1.5, 2.0, None)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, q = tibrvftMember_GetQueue(ft)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(que, q)
status, t = tibrvftMember_GetTransport(ft)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(tx, t)
status, sz = tibrvftMember_GetGroupName(ft)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual('MemberTest', sz)
status = tibrvftMember_SetWeight(ft, 100)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, w = tibrvftMember_GetWeight(ft)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual(100, w)
status = tibrvftMember_Destroy(ft)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvTransport_Destroy(tx)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
if __name__ == "__main__" :
unittest.main(verbosity=2)
| bsd-3-clause |
zhenzhai/edx-platform | cms/djangoapps/contentstore/features/textbooks.py | 44 | 4541 | # pylint: disable=missing-docstring
from lettuce import world, step
from django.conf import settings
from common import upload_file
from nose.tools import assert_equal
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
@step(u'I go to the textbooks page')
def go_to_uploads(_step):
world.wait_for_js_to_load()
world.click_course_content()
menu_css = 'li.nav-course-courseware-textbooks a'
world.css_click(menu_css)
@step(u'I should see a message telling me to create a new textbook')
def assert_create_new_textbook_msg(_step):
css = ".wrapper-content .no-textbook-content"
assert world.is_css_present(css)
no_tb = world.css_find(css)
assert "You haven't added any textbooks" in no_tb.text
@step(u'I upload the textbook "([^"]*)"$')
def upload_textbook(_step, file_name):
upload_file(file_name, sub_path="uploads/")
@step(u'I click (on )?the New Textbook button')
def click_new_textbook(_step, on):
button_css = ".nav-actions .new-button"
button = world.css_find(button_css)
button.click()
@step(u'I name my textbook "([^"]*)"')
def name_textbook(_step, name):
input_css = ".textbook input[name=textbook-name]"
world.css_fill(input_css, name)
if world.is_firefox():
world.trigger_event(input_css)
@step(u'I name the (first|second|third) chapter "([^"]*)"')
def name_chapter(_step, ordinal, name):
index = ["first", "second", "third"].index(ordinal)
input_css = ".textbook .chapter{i} input.chapter-name".format(i=index + 1)
world.css_fill(input_css, name)
if world.is_firefox():
world.trigger_event(input_css)
@step(u'I type in "([^"]*)" for the (first|second|third) chapter asset')
def asset_chapter(_step, name, ordinal):
index = ["first", "second", "third"].index(ordinal)
input_css = ".textbook .chapter{i} input.chapter-asset-path".format(i=index + 1)
world.css_fill(input_css, name)
if world.is_firefox():
world.trigger_event(input_css)
@step(u'I click the Upload Asset link for the (first|second|third) chapter')
def click_upload_asset(_step, ordinal):
index = ["first", "second", "third"].index(ordinal)
button_css = ".textbook .chapter{i} .action-upload".format(i=index + 1)
world.css_click(button_css)
@step(u'I click Add a Chapter')
def click_add_chapter(_step):
button_css = ".textbook .action-add-chapter"
world.css_click(button_css)
@step(u'I save the textbook')
def save_textbook(_step):
submit_css = "form.edit-textbook button[type=submit]"
world.css_click(submit_css)
@step(u'I should see a textbook named "([^"]*)" with a chapter path containing "([^"]*)"')
def check_textbook(_step, textbook_name, chapter_name):
title = world.css_text(".textbook h3.textbook-title", index=0)
chapter = world.css_text(".textbook .wrap-textbook p", index=0)
assert_equal(title, textbook_name)
assert_equal(chapter, chapter_name)
@step(u'I should see a textbook named "([^"]*)" with (\d+) chapters')
def check_textbook_chapters(_step, textbook_name, num_chapters_str):
num_chapters = int(num_chapters_str)
title = world.css_text(".textbook .view-textbook h3.textbook-title", index=0)
toggle_text = world.css_text(".textbook .view-textbook .chapter-toggle", index=0)
assert_equal(title, textbook_name)
assert_equal(
toggle_text,
"{num} PDF Chapters".format(num=num_chapters),
"Expected {num} chapters, found {real}".format(num=num_chapters, real=toggle_text)
)
@step(u'I click the textbook chapters')
def click_chapters(_step):
world.css_click(".textbook a.chapter-toggle")
@step(u'the (first|second|third) chapter should be named "([^"]*)"')
def check_chapter_name(_step, ordinal, name):
index = ["first", "second", "third"].index(ordinal)
chapter = world.css_find(".textbook .view-textbook ol.chapters li")[index]
element = chapter.find_by_css(".chapter-name")
assert element.text == name, "Expected chapter named {expected}, found chapter named {actual}".format(
expected=name, actual=element.text)
@step(u'the (first|second|third) chapter should have an asset called "([^"]*)"')
def check_chapter_asset(_step, ordinal, name):
index = ["first", "second", "third"].index(ordinal)
chapter = world.css_find(".textbook .view-textbook ol.chapters li")[index]
element = chapter.find_by_css(".chapter-asset-path")
assert element.text == name, "Expected chapter with asset {expected}, found chapter with asset {actual}".format(
expected=name, actual=element.text)
| agpl-3.0 |
raycarnes/account-invoicing | account_invoice_line_description/res_config.py | 27 | 1502 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Agile Business Group sagl
# (<http://www.agilebg.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, fields
class AccountConfigSettings(orm.TransientModel):
_inherit = 'account.config.settings'
_columns = {
'group_use_product_description_per_inv_line': fields.boolean(
"""Allow using only the product description on the
invoice lines""",
implied_group="account_invoice_line_description."
"group_use_product_description_per_inv_line",
help="""Allows you to use only product description on the
invoice lines."""
),
}
| agpl-3.0 |
bcl/anaconda | pyanaconda/ui/tui/spokes/askvnc.py | 8 | 6296 | # Ask vnc text spoke
#
# Copyright (C) 2012 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Jesse Keating <jkeating@redhat.com>
#
from pyanaconda.ui.tui.spokes import NormalTUISpoke
from pyanaconda.ui.tui.simpleline import TextWidget, ColumnWidget
from pyanaconda.ui.tui.tuiobject import YesNoDialog
from pyanaconda.constants import USEVNC, USETEXT, IPMI_ABORTED
from pyanaconda.constants_text import INPUT_PROCESSED
from pyanaconda.i18n import N_, _
from pyanaconda.ui.communication import hubQ
from pyanaconda.ui.tui import exception_msg_handler
from pyanaconda.iutil import execWithRedirect
from pyanaconda.flags import can_touch_runtime_system
from pyanaconda import iutil
import getpass
import sys
def exception_msg_handler_and_exit(event, data):
"""Display an exception and exit so that we don't end up in a loop."""
exception_msg_handler(event, data)
sys.exit(1)
class AskVNCSpoke(NormalTUISpoke):
title = N_("VNC")
# This spoke is kinda standalone, not meant to be used with a hub
# We pass in some fake data just to make our parents happy
def __init__(self, app, data, storage=None, payload=None,
instclass=None, message=None):
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
# The TUI hasn't been initialized with the message handlers yet. Add an
# exception message handler so that the TUI exits if anything goes wrong
# at this stage.
self._app.register_event_handler(hubQ.HUB_CODE_EXCEPTION, exception_msg_handler_and_exit)
if message:
self._message = message
else:
self._message = _("X was unable to start on your "
"machine. Would you like to "
"start VNC to connect to "
"this computer from another "
"computer and perform a "
"graphical installation or continue "
"with a text mode installation?")
self._choices = (_(USEVNC), _(USETEXT))
self._usevnc = False
@property
def indirect(self):
return True
def refresh(self, args=None):
NormalTUISpoke.refresh(self, args)
self._window += [TextWidget(self._message), ""]
for idx, choice in enumerate(self._choices):
number = TextWidget("%2d)" % (idx + 1))
c = ColumnWidget([(3, [number]), (None, [TextWidget(choice)])], 1)
self._window += [c, ""]
return True
def input(self, args, key):
"""Override input so that we can launch the VNC password spoke"""
try:
keyid = int(key) - 1
if 0 <= keyid < len(self._choices):
choice = self._choices[keyid]
if choice == _(USETEXT):
self._usevnc = False
else:
self._usevnc = True
newspoke = VNCPassSpoke(self.app, self.data, self.storage,
self.payload, self.instclass)
self.app.switch_screen_modal(newspoke)
self.apply()
self.close()
return INPUT_PROCESSED
except ValueError:
pass
if key.lower() == _('q'):
d = YesNoDialog(self.app, _(self.app.quit_message))
self.app.switch_screen_modal(d)
if d.answer:
iutil.ipmi_report(IPMI_ABORTED)
if can_touch_runtime_system("Quit and Reboot"):
execWithRedirect("systemctl", ["--no-wall", "reboot"])
else:
exit(1)
else:
return key
def apply(self):
self.data.vnc.enabled = self._usevnc
class VNCPassSpoke(NormalTUISpoke):
title = N_("VNC Password")
def __init__(self, app, data, storage, payload, instclass, message=None):
NormalTUISpoke.__init__(self, app, data, storage, payload, instclass)
self._password = ""
if message:
self._message = message
else:
self._message = _("Please provide VNC password (must be six to eight characters long).\n"
"You will have to type it twice. Leave blank for no password")
@property
def indirect(self):
return True
@property
def completed(self):
return True # We're always complete
def refresh(self, args=None):
NormalTUISpoke.refresh(self, args)
self._window += [TextWidget(self._message), ""]
return True
def prompt(self, args=None):
"""Override prompt as password typing is special."""
p1 = getpass.getpass(_("Password: "))
p2 = getpass.getpass(_("Password (confirm): "))
if p1 != p2:
print(_("Passwords do not match!"))
return None
elif 0 < len(p1) < 6:
print(_("The password must be at least "
"six characters long."))
return None
elif len(p1) > 8:
print(_("The password cannot be more than "
"eight characters long."))
return None
else:
self._password = p1
self.apply()
self.close()
def apply(self):
self.data.vnc.password = self._password
| gpl-2.0 |
tacgomes/morph | morphlib/morphloader.py | 1 | 30620 | # Copyright (C) 2013-2015 Codethink Limited
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
#
# =*= License: GPL-2 =*=
import collections
import warnings
import yaml
import morphlib
class MorphologyObsoleteFieldWarning(UserWarning):
def __init__(self, morphology, spec, field):
self.kind = morphology['kind']
self.morphology_name = morphology.get('name', '<unknown>')
self.stratum_name = spec.get('alias', spec['morph'])
self.field = field
def __str__(self):
format_string = ('%(kind)s morphology %(morphology_name)s refers to '
'stratum %(stratum_name)s with the %(field)s field. '
'Defaulting to null.')
return format_string % self.__dict__
class MorphologySyntaxError(morphlib.Error):
pass
class MorphologyNotYamlError(MorphologySyntaxError):
def __init__(self, morphology, errmsg):
self.msg = 'Syntax error in morphology %s:\n%s' % (morphology, errmsg)
class NotADictionaryError(MorphologySyntaxError):
def __init__(self, morph_filename):
self.msg = 'Not a dictionary: morphology %s' % morph_filename
class MorphologyValidationError(morphlib.Error):
pass
class UnknownKindError(MorphologyValidationError):
def __init__(self, kind, morph_filename):
self.msg = (
'Unknown kind %s in morphology %s' % (kind, morph_filename))
class MissingFieldError(MorphologyValidationError):
def __init__(self, field, morphology_name):
self.field = field
self.morphology_name = morphology_name
self.msg = (
'Missing field %s from morphology %s' % (field, morphology_name))
class InvalidFieldError(MorphologyValidationError):
def __init__(self, field, morphology_name):
self.field = field
self.morphology_name = morphology_name
self.msg = (
'Field %s not allowed in morphology %s' % (field, morphology_name))
class InvalidTypeError(MorphologyValidationError):
def __init__(self, field, expected, actual, morphology_name):
self.field = field
self.expected = expected
self.actual = actual
self.morphology_name = morphology_name
self.msg = (
'Field %s expected type %s, got %s in morphology %s' %
(field, expected, actual, morphology_name))
class ObsoleteFieldsError(MorphologyValidationError):
def __init__(self, fields, morph_filename):
self.msg = (
'Morphology %s uses obsolete fields: %s' %
(morph_filename, ' '.join(fields)))
class UnknownArchitectureError(MorphologyValidationError):
def __init__(self, arch, morph_filename):
self.msg = ('Unknown architecture %s in morphology %s'
% (arch, morph_filename))
class UnknownBuildSystemError(MorphologyValidationError):
def __init__(self, build_system, morph_filename):
self.msg = ('Undefined build system %s in morphology %s'
% (build_system, morph_filename))
class NoStratumBuildDependenciesError(MorphologyValidationError):
def __init__(self, stratum_name, morph_filename):
self.msg = (
'Stratum %s has no build dependencies in %s' %
(stratum_name, morph_filename))
class EmptyStratumError(MorphologyValidationError):
def __init__(self, stratum_name, morph_filename):
self.msg = (
'Stratum %s has no chunks in %s' %
(stratum_name, morph_filename))
class DuplicateChunkError(MorphologyValidationError):
def __init__(self, stratum_name, chunk_name):
self.stratum_name = stratum_name
self.chunk_name = chunk_name
MorphologyValidationError.__init__(
self, 'Duplicate chunk %(chunk_name)s '\
'in stratum %(stratum_name)s' % locals())
class EmptyRefError(MorphologyValidationError):
def __init__(self, ref_location, morph_filename):
self.ref_location = ref_location
self.morph_filename = morph_filename
MorphologyValidationError.__init__(
self, 'Empty ref found for %(ref_location)s '\
'in %(morph_filename)s' % locals())
class ChunkSpecRefNotStringError(MorphologyValidationError):
def __init__(self, ref_value, chunk_name, stratum_name):
self.ref_value = ref_value
self.chunk_name = chunk_name
self.stratum_name = stratum_name
MorphologyValidationError.__init__(
self, 'Ref %(ref_value)s for %(chunk_name)s '\
'in stratum %(stratum_name)s is not a string' % locals())
class ChunkSpecConflictingFieldsError(MorphologyValidationError):
def __init__(self, fields, chunk_name, stratum_name):
self.chunk_name = chunk_name
self.stratum_name = stratum_name
self.fields = fields
MorphologyValidationError.__init__(
self, 'Conflicting fields "%s" for %s in stratum %s.' % (
', and '.join(fields), chunk_name, stratum_name))
class ChunkSpecNoBuildInstructionsError(MorphologyValidationError):
def __init__(self, chunk_name, stratum_name):
self.chunk_name = chunk_name
self.stratum_name = stratum_name
self.msg = (
'Chunk %(chunk_name)s in stratum %(stratum_name)s has no '
'build-system defined, and no chunk .morph file referenced '
'either. Please specify how to build the chunk, either by setting '
'"build-system: " in the stratum, or adding a chunk .morph file '
'and setting "morph: " in the stratum.' % locals())
class SystemStrataNotListError(MorphologyValidationError):
def __init__(self, system_name, strata_type):
self.system_name = system_name
self.strata_type = strata_type
typename = strata_type.__name__
MorphologyValidationError.__init__(
self, 'System %(system_name)s has the wrong type for its strata: '\
'%(typename)s, expected list' % locals())
class DuplicateStratumError(MorphologyValidationError):
def __init__(self, system_name, stratum_name):
self.system_name = system_name
self.stratum_name = stratum_name
MorphologyValidationError.__init__(
self, 'Duplicate stratum %(stratum_name)s '\
'in system %(system_name)s' % locals())
class SystemStratumSpecsNotMappingError(MorphologyValidationError):
def __init__(self, system_name, strata):
self.system_name = system_name
self.strata = strata
MorphologyValidationError.__init__(
self, 'System %(system_name)s has stratum specs '\
'that are not mappings.' % locals())
class EmptySystemError(MorphologyValidationError):
def __init__(self, system_name):
MorphologyValidationError.__init__(
self, 'System %(system_name)s has no strata.' % locals())
class DependsOnSelfError(MorphologyValidationError):
def __init__(self, name, filename):
msg = ("Stratum %(name)s build-depends on itself (%(filename)s)"
% locals())
MorphologyValidationError.__init__(self, msg)
class MultipleValidationErrors(MorphologyValidationError):
def __init__(self, name, errors):
self.name = name
self.errors = errors
self.msg = 'Multiple errors when validating %(name)s:'
for error in errors:
self.msg += ('\n' + str(error))
class DuplicateDeploymentNameError(MorphologyValidationError):
def __init__(self, cluster_filename, duplicates):
self.duplicates = duplicates
self.cluster_filename = cluster_filename
morphlib.Error.__init__(self,
'Cluster %s contains the following duplicate deployment names:%s'
% (cluster_filename, '\n ' + '\n '.join(duplicates)))
class MorphologyDumper(yaml.SafeDumper):
keyorder = (
'name',
'kind',
'description',
'arch',
'strata',
'configuration-extensions',
'morph',
'repo',
'ref',
'unpetrify-ref',
'build-depends',
'build-mode',
'artifacts',
'max-jobs',
'products',
'chunks',
'build-system',
'pre-configure-commands',
'configure-commands',
'post-configure-commands',
'pre-build-commands',
'build-commands',
'pre-test-commands',
'test-commands',
'post-test-commands',
'post-build-commands',
'pre-install-commands',
'install-commands',
'post-install-commands',
'artifact',
'include',
'systems',
'deploy-defaults',
'deploy',
'type',
'location',
)
@classmethod
def _iter_in_global_order(cls, mapping):
for key in cls.keyorder:
if key in mapping:
yield key, mapping[key]
for key in sorted(mapping.iterkeys()):
if key not in cls.keyorder:
yield key, mapping[key]
@classmethod
def _represent_dict(cls, dumper, mapping):
return dumper.represent_mapping('tag:yaml.org,2002:map',
cls._iter_in_global_order(mapping))
@classmethod
def _represent_str(cls, dumper, orig_data):
fallback_representer = yaml.representer.SafeRepresenter.represent_str
try:
data = unicode(orig_data, 'ascii')
if data.count('\n') == 0:
return fallback_representer(dumper, orig_data)
except UnicodeDecodeError:
try:
data = unicode(orig_data, 'utf-8')
if data.count('\n') == 0:
return fallback_representer(dumper, orig_data)
except UnicodeDecodeError:
return fallback_representer(dumper, orig_data)
return dumper.represent_scalar(u'tag:yaml.org,2002:str',
data, style='|')
@classmethod
def _represent_unicode(cls, dumper, data):
if data.count('\n') == 0:
return yaml.representer.SafeRepresenter.represent_unicode(dumper,
data)
return dumper.represent_scalar(u'tag:yaml.org,2002:str',
data, style='|')
def __init__(self, *args, **kwargs):
yaml.SafeDumper.__init__(self, *args, **kwargs)
self.add_representer(dict, self._represent_dict)
self.add_representer(str, self._represent_str)
self.add_representer(unicode, self._represent_unicode)
class MorphologyLoader(object):
'''Load morphologies from disk, or save them back to disk.'''
_required_fields = {
'chunk': [
'name',
],
'stratum': [
'name',
],
'system': [
'name',
'arch',
'strata',
],
'cluster': [
'name',
'systems',
],
}
_obsolete_fields = {
'system': [
'system-kind',
'disk-size',
],
}
_static_defaults = {
'chunk': {
'description': '',
'pre-configure-commands': None,
'configure-commands': None,
'post-configure-commands': None,
'pre-build-commands': None,
'build-commands': None,
'post-build-commands': None,
'pre-test-commands': None,
'test-commands': None,
'post-test-commands': None,
'pre-install-commands': None,
'install-commands': None,
'post-install-commands': None,
'pre-strip-commands': None,
'strip-commands': None,
'post-strip-commands': None,
'devices': [],
'products': [],
'max-jobs': None,
'build-system': 'manual',
'build-mode': 'staging',
'prefix': '/usr',
'system-integration': [],
},
'stratum': {
'chunks': [],
'description': '',
'build-depends': [],
'products': [],
},
'system': {
'description': '',
'arch': None,
'configuration-extensions': [],
},
'cluster': {
'description': '',
},
}
def __init__(self,
predefined_build_systems={}):
self._predefined_build_systems = predefined_build_systems.copy()
if 'manual' not in self._predefined_build_systems:
self._predefined_build_systems['manual'] = \
morphlib.buildsystem.ManualBuildSystem()
def parse_morphology_text(self, text, morph_filename):
'''Parse a textual morphology.
The text may be a string, or an open file handle.
Return the new Morphology object, or raise an error indicating
the problem. This method does minimal validation: a syntactically
correct morphology is fine, even if none of the fields are
valid. It also does not set any default values for any of the
fields. See validate and set_defaults.
'''
try:
obj = yaml.safe_load(text)
except yaml.error.YAMLError as e:
raise MorphologyNotYamlError(morph_filename, e)
if not isinstance(obj, dict):
raise NotADictionaryError(morph_filename)
m = morphlib.morphology.Morphology(obj)
m.filename = morph_filename
return m
def load_from_string(self, string,
filename='string'): # pragma: no cover
'''Load a morphology from a string.
Return the Morphology object.
'''
if string is None:
return None
m = self.parse_morphology_text(string, filename)
self.validate(m)
self.set_commands(m)
self.set_defaults(m)
return m
def load_from_file(self, filename):
'''Load a morphology from a named file.
Return the Morphology object.
'''
with open(filename) as f:
text = f.read()
return self.load_from_string(text, filename=filename)
def save_to_string(self, morphology):
'''Return normalised textual form of morphology.'''
return yaml.dump(morphology.data, Dumper=MorphologyDumper,
default_flow_style=False)
def save_to_file(self, filename, morphology):
'''Save a morphology object to a named file.'''
text = self.save_to_string(morphology)
with morphlib.savefile.SaveFile(filename, 'w') as f:
f.write(text)
def validate(self, morph):
'''Validate a morphology.'''
# Validate that the kind field is there.
self._require_field('kind', morph)
# The rest of the validation is dependent on the kind.
kind = morph['kind']
if kind not in ('system', 'stratum', 'chunk', 'cluster'):
raise UnknownKindError(morph['kind'], morph.filename)
required = ['kind'] + self._required_fields[kind]
obsolete = self._obsolete_fields.get(kind, [])
allowed = self._static_defaults[kind].keys()
self._require_fields(required, morph)
self._deny_obsolete_fields(obsolete, morph)
self._deny_unknown_fields(required + allowed, morph)
getattr(self, '_validate_%s' % kind)(morph)
def _validate_cluster(self, morph):
# Deployment names must be unique within a cluster
deployments = collections.Counter()
for system in morph['systems']:
deployments.update(system['deploy'].iterkeys())
if 'subsystems' in system:
deployments.update(self._get_subsystem_names(system))
duplicates = set(deployment for deployment, count
in deployments.iteritems() if count > 1)
if duplicates:
raise DuplicateDeploymentNameError(morph.filename, duplicates)
def _get_subsystem_names(self, system): # pragma: no cover
for subsystem in system.get('subsystems', []):
for name in subsystem['deploy'].iterkeys():
yield name
for name in self._get_subsystem_names(subsystem):
yield name
def _validate_system(self, morph):
# A system must contain at least one stratum
strata = morph['strata']
if (not isinstance(strata, collections.Iterable)
or isinstance(strata, collections.Mapping)):
raise SystemStrataNotListError(morph['name'],
type(strata))
if not strata:
raise EmptySystemError(morph['name'])
if not all(isinstance(o, collections.Mapping) for o in strata):
raise SystemStratumSpecsNotMappingError(morph['name'], strata)
# All stratum names should be unique within a system.
names = set()
for spec in strata:
name = spec.get('alias', spec['morph'])
if name in names:
raise DuplicateStratumError(morph['name'], name)
names.add(name)
# Validate stratum spec fields
self._validate_stratum_specs_fields(morph, 'strata')
# We allow the ARMv7 little-endian architecture to be specified
# as armv7 and armv7l. Normalise.
if morph['arch'] == 'armv7':
morph['arch'] = 'armv7l'
# Architecture name must be known.
if morph['arch'] not in morphlib.valid_archs:
raise UnknownArchitectureError(morph['arch'], morph.filename)
def _validate_stratum(self, morph):
# Require at least one chunk.
if len(morph.get('chunks', [])) == 0:
raise EmptyStratumError(morph['name'], morph.filename)
# Require build-dependencies for the stratum itself, unless
# it has chunks built in bootstrap mode.
if 'build-depends' in morph:
if not isinstance(morph['build-depends'], list):
raise InvalidTypeError(
'build-depends', list, type(morph['build-depends']),
morph['name'])
for dep in morph['build-depends']:
if dep['morph'] == morph.filename:
raise DependsOnSelfError(morph['name'], morph.filename)
else:
for spec in morph['chunks']:
if spec.get('build-mode') in ['bootstrap', 'test']:
break
else:
raise NoStratumBuildDependenciesError(
morph['name'], morph.filename)
# Validate build-dependencies if specified
self._validate_stratum_specs_fields(morph, 'build-depends')
# All chunk names must be unique within a stratum.
names = set()
for spec in morph['chunks']:
name = spec.get('alias', spec['name'])
if name in names:
raise DuplicateChunkError(morph['name'], name)
names.add(name)
# Check each reference to a chunk.
for spec in morph['chunks']:
chunk_name = spec.get('alias', spec['name'])
# All chunk refs must be strings.
if 'ref' in spec:
ref = spec['ref']
if ref == None:
raise EmptyRefError(
spec.get('alias', spec['name']), morph.filename)
elif not isinstance(ref, basestring):
raise ChunkSpecRefNotStringError(
ref, spec.get('alias', spec['name']), morph.filename)
# The build-depends field must be a list.
if 'build-depends' in spec:
if not isinstance(spec['build-depends'], list):
raise InvalidTypeError(
'%s.build-depends' % chunk_name, list,
type(spec['build-depends']), morph['name'])
# Either 'morph' or 'build-system' must be specified.
if 'morph' in spec and 'build-system' in spec:
raise ChunkSpecConflictingFieldsError(
['morph', 'build-system'], chunk_name, morph.filename)
if 'morph' not in spec and 'build-system' not in spec:
raise ChunkSpecNoBuildInstructionsError(
chunk_name, morph.filename)
@classmethod
def _validate_chunk(cls, morphology):
errors = []
if 'products' in morphology:
cls._validate_products(morphology['name'],
morphology['products'], errors)
if len(errors) == 1:
raise errors[0]
elif errors:
raise MultipleValidationErrors(morphology['name'], errors)
@classmethod
def _validate_products(cls, morphology_name, products, errors):
'''Validate the products field is of the correct type.'''
if (not isinstance(products, collections.Iterable)
or isinstance(products, collections.Mapping)):
raise InvalidTypeError('products', list,
type(products), morphology_name)
for spec_index, spec in enumerate(products):
if not isinstance(spec, collections.Mapping):
e = InvalidTypeError('products[%d]' % spec_index,
dict, type(spec), morphology_name)
errors.append(e)
continue
cls._validate_products_spec_fields_exist(morphology_name,
spec_index, spec, errors)
if 'include' in spec:
cls._validate_products_specs_include(
morphology_name, spec_index, spec['include'], errors)
product_spec_required_fields = ('artifact', 'include')
@classmethod
def _validate_products_spec_fields_exist(
cls, morphology_name, spec_index, spec, errors):
given_fields = sorted(spec.iterkeys())
missing = (field for field in cls.product_spec_required_fields
if field not in given_fields)
for field in missing:
e = MissingFieldError('products[%d].%s' % (spec_index, field),
morphology_name)
errors.append(e)
unexpected = (field for field in given_fields
if field not in cls.product_spec_required_fields)
for field in unexpected:
e = InvalidFieldError('products[%d].%s' % (spec_index, field),
morphology_name)
errors.append(e)
@classmethod
def _validate_products_specs_include(cls, morphology_name, spec_index,
include_patterns, errors):
'''Validate that products' include field is a list of strings.'''
# Allow include to be most iterables, but not a mapping
# or a string, since iter of a mapping is just the keys,
# and the iter of a string is a 1 character length string,
# which would also validate as an iterable of strings.
if (not isinstance(include_patterns, collections.Iterable)
or isinstance(include_patterns, collections.Mapping)
or isinstance(include_patterns, basestring)):
e = InvalidTypeError('products[%d].include' % spec_index, list,
type(include_patterns), morphology_name)
errors.append(e)
else:
for pattern_index, pattern in enumerate(include_patterns):
pattern_path = ('products[%d].include[%d]' %
(spec_index, pattern_index))
if not isinstance(pattern, basestring):
e = InvalidTypeError(pattern_path, str,
type(pattern), morphology_name)
errors.append(e)
@classmethod
def _warn_obsolete_field(cls, morphology, spec, field):
warnings.warn(MorphologyObsoleteFieldWarning(morphology, spec, field),
stacklevel=2)
@classmethod
def _validate_stratum_specs_fields(cls, morphology, specs_field):
for spec in morphology.get(specs_field, None) or []:
for obsolete_field in ('repo', 'ref'):
if obsolete_field in spec:
cls._warn_obsolete_field(morphology, spec, obsolete_field)
def _require_field(self, field, morphology):
if field not in morphology:
raise MissingFieldError(field, morphology.filename)
def _require_fields(self, fields, morphology):
for field in fields:
self._require_field(field, morphology)
def _deny_obsolete_fields(self, fields, morphology):
obsolete_ones = [x for x in morphology if x in fields]
if obsolete_ones:
raise ObsoleteFieldsError(obsolete_ones, morphology.filename)
def _deny_unknown_fields(self, allowed, morphology):
for field in morphology:
if field not in allowed:
raise InvalidFieldError(field, morphology.filename)
def set_defaults(self, morphology):
'''Set all missing fields in the morpholoy to their defaults.
The morphology is assumed to be valid.
'''
kind = morphology['kind']
defaults = self._static_defaults[kind]
for key in defaults:
if key not in morphology:
morphology[key] = defaults[key]
getattr(self, '_set_%s_defaults' % kind)(morphology)
def unset_defaults(self, morphology):
'''If a field is equal to its default, delete it.
The morphology is assumed to be valid.
'''
kind = morphology['kind']
defaults = self._static_defaults[kind]
for key in defaults:
if key in morphology and morphology[key] == defaults[key]:
del morphology[key]
getattr(self, '_unset_%s_defaults' % kind)(morphology)
@classmethod
def _set_stratum_specs_defaults(cls, morphology, specs_field):
for spec in morphology.get(specs_field, None) or []:
for obsolete_field in ('repo', 'ref'):
if obsolete_field in spec:
del spec[obsolete_field]
@classmethod
def _unset_stratum_specs_defaults(cls, morphology, specs_field):
for spec in morphology.get(specs_field, []):
for obsolete_field in ('repo', 'ref'):
if obsolete_field in spec:
del spec[obsolete_field]
def _set_cluster_defaults(self, morph):
for system in morph.get('systems', []):
if 'deploy-defaults' not in system:
system['deploy-defaults'] = {}
if 'deploy' not in system:
system['deploy'] = {}
def _unset_cluster_defaults(self, morph):
for system in morph.get('systems', []):
if 'deploy-defaults' in system and system['deploy-defaults'] == {}:
del system['deploy-defaults']
if 'deploy' in system and system['deploy'] == {}:
del system['deploy']
def _set_system_defaults(self, morph):
self._set_stratum_specs_defaults(morph, 'strata')
def _unset_system_defaults(self, morph):
self._unset_stratum_specs_defaults(morph, 'strata')
def _set_stratum_defaults(self, morph):
for spec in morph['chunks']:
if 'repo' not in spec:
spec['repo'] = spec['name']
if 'build-mode' not in spec:
spec['build-mode'] = \
self._static_defaults['chunk']['build-mode']
if 'prefix' not in spec:
spec['prefix'] = \
self._static_defaults['chunk']['prefix']
self._set_stratum_specs_defaults(morph, 'build-depends')
def _unset_stratum_defaults(self, morph):
for spec in morph['chunks']:
if 'repo' in spec and spec['repo'] == spec['name']:
del spec['repo']
if 'build-mode' in spec and spec['build-mode'] == \
self._static_defaults['chunk']['build-mode']:
del spec['build-mode']
if 'prefix' in spec and spec['prefix'] == \
self._static_defaults['chunk']['prefix']:
del spec['prefix']
self._unset_stratum_specs_defaults(morph, 'strata')
def _set_chunk_defaults(self, morph):
if morph['max-jobs'] is not None:
morph['max-jobs'] = int(morph['max-jobs'])
def _unset_chunk_defaults(self, morph): # pragma: no cover
# This is only used by the deprecated branch-and-merge plugin, and
# probably doesn't work correctly for definitions V7 and newer.
default_bs = self._static_defaults['chunk']['build-system']
bs_name = morph.get('build-system', default_bs)
bs = self.lookup_build_system(bs_name)
for key in self._static_defaults['chunk']:
if key not in morph: continue
if 'commands' not in key: continue
attr = key.replace('-', '_')
default_value = getattr(bs, attr)
if morph[key] == default_value:
del morph[key]
def lookup_build_system(self, name):
return self._predefined_build_systems[name]
def set_commands(self, morph):
if morph['kind'] == 'chunk':
default = self._static_defaults['chunk']['build-system']
bs_name = morph.get('build-system', default)
try:
bs = self.lookup_build_system(bs_name)
except KeyError:
raise UnknownBuildSystemError(bs_name, morph['name'])
for key in self._static_defaults['chunk']:
if 'commands' not in key: continue
if key not in morph:
attr = '_'.join(key.split('-'))
morph[key] = getattr(bs, attr)
| gpl-2.0 |
mrquim/mrquimrepo | plugin.program.indigo/support.py | 4 | 3459 | import base64
import os
import re
import shutil
import urllib2
import xbmc
import xbmcaddon
from libs import kodi
addon_id = kodi.addon_id
BlocksUrl = base64.b64decode('aHR0cDovL2luZGlnby50dmFkZG9ucy5jby9ibG9ja2VyL2Jsb2NrZXIudHh0')
BlocksUrl = 'http://indigo.tvaddons.co/blocker/blocker.txt'
def service_checks():
import maintool
maintool.source_change()
date = datetime.datetime.today().weekday()
if (kodi.get_setting("clearday") == date) or kodi.get_setting("acstartup") == "true":
maintool.auto_clean(True)
elif (kodi.get_setting("clearday") == 0) and kodi.get_setting("acstartup") != "true":
kodi.log('Auto Main Turned off')
def scriptblock_checks():
if kodi.get_setting('scriptblock') == 'true':
kodi.log('SCRIPT BLOCKER ON')
try:
req = urllib2.Request(BlocksUrl)
req.add_header('User-Agent', 'Mozilla/5.0 (Linux; U; Android 4.2.2; en-us; AFTB Build/JDQ39) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30')
response = urllib2.urlopen(req)
except:
kodi.log('Could not perform blocked script check. invalid URL')
return
link = response.read()
response.close()
link = link.replace('\n', '').replace('\r', '').replace('\a', '')
match = re.compile('block="(.+?)"').findall(link)
for blocked in match:
kodi.log('Checking for Malicious scripts')
addonPath = xbmcaddon.Addon(id=addon_id).getAddonInfo('path')
addonPath = xbmc.translatePath(addonPath)
xbmcPath = os.path.join(addonPath, "..", "..")
xbmcPath = os.path.abspath(xbmcPath);
addonpath = xbmcPath + '/addons/'
try:
for root, dirs, files in os.walk(addonpath, topdown=False):
if root != addonpath:
if blocked in root:
shutil.rmtree(root)
except:
kodi.log('Could not find blocked script')
def clear_cache():
kodi.log('STARTUP CLEAR CACHE ACTIVATED')
xbmc_cache_path = os.path.join(xbmc.translatePath('special://home'), 'cache')
if os.path.exists(xbmc_cache_path) == True:
for root, dirs, files in os.walk(xbmc_cache_path):
file_count = 0
file_count += len(files)
if file_count > 0:
for f in files:
try:
os.unlink(os.path.join(root, f))
except:
pass
for d in dirs:
if 'archive_cache' not in d:
try:
shutil.rmtree(os.path.join(root, d))
except:
pass
kodi.log('Startup Service could not clear cache')
def purge_packages():
kodi.log('STARTUP PURGE PACKAGES ACTIVATED')
packages_path = xbmc.translatePath(os.path.join('special://home/addons/packages', ''))
try:
for root, dirs, files in os.walk(packages_path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
# kodi.log('Packages Wiped by Service')
# dialog = xbmcgui.Dialog()
# dialog.ok(AddonTitle, " Packages Folder Wiped Successfully!")
except:
kodi.log('Startup Service could not purge packages')
| gpl-2.0 |
gem/oq-engine | openquake/hmtk/seismicity/completeness/base.py | 1 | 2703 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2021 GEM Foundation, G. Weatherill, M. Pagani,
# D. Monelli.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM’s OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM’s OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (hazard@globalquakemodel.org).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
"""
Module :mod:'openquake.hmtk.seismicity.completeness.base' defines an abstract base class
for :class:'CataloguCompleteness <BaseCatalogueCompleteness>
"""
import abc
from openquake.hmtk.registry import CatalogueFunctionRegistry
class BaseCatalogueCompleteness(object):
'''
Abstract base class for implementation of the completeness algorithms
'''
@abc.abstractmethod
def completeness(self, catalogue, config):
'''
:param catalogue:
Earthquake catalogue as instance of
:class: openquake.hmtk.seismicity.catalogue.Catalogue
:param dict config:
Configuration parameters of the algorithm
'''
return
COMPLETENESS_METHODS = CatalogueFunctionRegistry()
| agpl-3.0 |
esemin83/python_training | generator/group.py | 1 | 1104 | from model.group import Group
import random
import string
import os.path
import jsonpickle
import getopt
import sys
try:
opts, args = getopt.getopt(sys.argv[1:], "n:f:", ["number of groups", "file"])
except getopt.GetoptError as err:
getopt.usage()
sys.exit(2)
n = 1
f = "data/groups.json"
for o, a in opts:
if o == "-n":
n = int(a)
elif o == "-f":
f = a
def random_string(prefix, maxlen):
symbols = string.ascii_letters + string.digits + string.punctuation + " "*7
return prefix + "".join([random.choice(symbols) for x in range(random.randrange(maxlen))])
testdata = [Group(name="", header="", footer="")] + \
[
Group(name=name, header=header, footer=footer)
for name in ["", random_string("name", 15)]
for header in ["", random_string("header", 15)]
for footer in ["", random_string("footer", 15)]
for m in range(n)
]
file = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", f)
with open(file, "w") as out:
jsonpickle.set_encoder_options("json", indent=2)
out.write(jsonpickle.encode(testdata))
| apache-2.0 |
40223149/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/tempfile.py | 728 | 22357 | """Temporary files.
This module provides generic, low- and high-level interfaces for
creating temporary files and directories. The interfaces listed
as "safe" just below can be used without fear of race conditions.
Those listed as "unsafe" cannot, and are provided for backward
compatibility only.
This module also provides some data items to the user:
TMP_MAX - maximum number of names that will be tried before
giving up.
tempdir - If this is set to a string before the first use of
any routine from this module, it will be considered as
another candidate location to store temporary files.
"""
__all__ = [
"NamedTemporaryFile", "TemporaryFile", # high level safe interfaces
"SpooledTemporaryFile", "TemporaryDirectory",
"mkstemp", "mkdtemp", # low level safe interfaces
"mktemp", # deprecated unsafe interface
"TMP_MAX", "gettempprefix", # constants
"tempdir", "gettempdir"
]
# Imports.
import warnings as _warnings
import sys as _sys
import io as _io
import os as _os
import errno as _errno
from random import Random as _Random
try:
import fcntl as _fcntl
except ImportError:
def _set_cloexec(fd):
pass
else:
def _set_cloexec(fd):
try:
flags = _fcntl.fcntl(fd, _fcntl.F_GETFD, 0)
except OSError:
pass
else:
# flags read successfully, modify
flags |= _fcntl.FD_CLOEXEC
_fcntl.fcntl(fd, _fcntl.F_SETFD, flags)
try:
import _thread
except ImportError:
import _dummy_thread as _thread
_allocate_lock = _thread.allocate_lock
_text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL
if hasattr(_os, 'O_NOINHERIT'):
_text_openflags |= _os.O_NOINHERIT
if hasattr(_os, 'O_NOFOLLOW'):
_text_openflags |= _os.O_NOFOLLOW
_bin_openflags = _text_openflags
if hasattr(_os, 'O_BINARY'):
_bin_openflags |= _os.O_BINARY
if hasattr(_os, 'TMP_MAX'):
TMP_MAX = _os.TMP_MAX
else:
TMP_MAX = 10000
# Although it does not have an underscore for historical reasons, this
# variable is an internal implementation detail (see issue 10354).
template = "tmp"
# Internal routines.
_once_lock = _allocate_lock()
if hasattr(_os, "lstat"):
_stat = _os.lstat
elif hasattr(_os, "stat"):
_stat = _os.stat
else:
# Fallback. All we need is something that raises OSError if the
# file doesn't exist.
def _stat(fn):
f = open(fn)
f.close()
def _exists(fn):
try:
_stat(fn)
except OSError:
return False
else:
return True
class _RandomNameSequence:
"""An instance of _RandomNameSequence generates an endless
sequence of unpredictable strings which can safely be incorporated
into file names. Each string is six characters long. Multiple
threads can safely use the same instance at the same time.
_RandomNameSequence is an iterator."""
characters = "abcdefghijklmnopqrstuvwxyz0123456789_"
@property
def rng(self):
cur_pid = _os.getpid()
if cur_pid != getattr(self, '_rng_pid', None):
self._rng = _Random()
self._rng_pid = cur_pid
return self._rng
def __iter__(self):
return self
def __next__(self):
c = self.characters
choose = self.rng.choice
letters = [choose(c) for dummy in "123456"]
return ''.join(letters)
def _candidate_tempdir_list():
"""Generate a list of candidate temporary directories which
_get_default_tempdir will try."""
dirlist = []
# First, try the environment.
for envname in 'TMPDIR', 'TEMP', 'TMP':
dirname = _os.getenv(envname)
if dirname: dirlist.append(dirname)
# Failing that, try OS-specific locations.
if _os.name == 'nt':
dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ])
else:
dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ])
# As a last resort, the current directory.
try:
dirlist.append(_os.getcwd())
except (AttributeError, OSError):
dirlist.append(_os.curdir)
return dirlist
def _get_default_tempdir():
"""Calculate the default directory to use for temporary files.
This routine should be called exactly once.
We determine whether or not a candidate temp dir is usable by
trying to create and write to a file in that directory. If this
is successful, the test file is deleted. To prevent denial of
service, the name of the test file must be randomized."""
namer = _RandomNameSequence()
dirlist = _candidate_tempdir_list()
for dir in dirlist:
if dir != _os.curdir:
dir = _os.path.normcase(_os.path.abspath(dir))
# Try only a few names per directory.
for seq in range(100):
name = next(namer)
filename = _os.path.join(dir, name)
try:
fd = _os.open(filename, _bin_openflags, 0o600)
try:
try:
with _io.open(fd, 'wb', closefd=False) as fp:
fp.write(b'blat')
finally:
_os.close(fd)
finally:
_os.unlink(filename)
return dir
except FileExistsError:
pass
except OSError:
break # no point trying more names in this directory
raise FileNotFoundError(_errno.ENOENT,
"No usable temporary directory found in %s" %
dirlist)
_name_sequence = None
def _get_candidate_names():
"""Common setup sequence for all user-callable interfaces."""
global _name_sequence
if _name_sequence is None:
_once_lock.acquire()
try:
if _name_sequence is None:
_name_sequence = _RandomNameSequence()
finally:
_once_lock.release()
return _name_sequence
def _mkstemp_inner(dir, pre, suf, flags):
"""Code common to mkstemp, TemporaryFile, and NamedTemporaryFile."""
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, pre + name + suf)
try:
fd = _os.open(file, flags, 0o600)
_set_cloexec(fd)
return (fd, _os.path.abspath(file))
except FileExistsError:
continue # try again
except PermissionError:
# This exception is thrown when a directory with the chosen name
# already exists on windows.
if _os.name == 'nt':
continue
else:
raise
raise FileExistsError(_errno.EEXIST,
"No usable temporary file name found")
# User visible interfaces.
def gettempprefix():
"""Accessor for tempdir.template."""
return template
tempdir = None
def gettempdir():
"""Accessor for tempfile.tempdir."""
global tempdir
if tempdir is None:
_once_lock.acquire()
try:
if tempdir is None:
tempdir = _get_default_tempdir()
finally:
_once_lock.release()
return tempdir
def mkstemp(suffix="", prefix=template, dir=None, text=False):
"""User-callable function to create and return a unique temporary
file. The return value is a pair (fd, name) where fd is the
file descriptor returned by os.open, and name is the filename.
If 'suffix' is specified, the file name will end with that suffix,
otherwise there will be no suffix.
If 'prefix' is specified, the file name will begin with that prefix,
otherwise a default prefix is used.
If 'dir' is specified, the file will be created in that directory,
otherwise a default directory is used.
If 'text' is specified and true, the file is opened in text
mode. Else (the default) the file is opened in binary mode. On
some operating systems, this makes no difference.
The file is readable and writable only by the creating user ID.
If the operating system uses permission bits to indicate whether a
file is executable, the file is executable by no one. The file
descriptor is not inherited by children of this process.
Caller is responsible for deleting the file when done with it.
"""
if dir is None:
dir = gettempdir()
if text:
flags = _text_openflags
else:
flags = _bin_openflags
return _mkstemp_inner(dir, prefix, suffix, flags)
def mkdtemp(suffix="", prefix=template, dir=None):
"""User-callable function to create and return a unique temporary
directory. The return value is the pathname of the directory.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
The directory is readable, writable, and searchable only by the
creating user.
Caller is responsible for deleting the directory when done with it.
"""
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
try:
_os.mkdir(file, 0o700)
return file
except FileExistsError:
continue # try again
raise FileExistsError(_errno.EEXIST,
"No usable temporary directory name found")
def mktemp(suffix="", prefix=template, dir=None):
"""User-callable function to return a unique temporary file name. The
file is not created.
Arguments are as for mkstemp, except that the 'text' argument is
not accepted.
This function is unsafe and should not be used. The file name
refers to a file that did not exist at some point, but by the time
you get around to creating it, someone else may have beaten you to
the punch.
"""
## from warnings import warn as _warn
## _warn("mktemp is a potential security risk to your program",
## RuntimeWarning, stacklevel=2)
if dir is None:
dir = gettempdir()
names = _get_candidate_names()
for seq in range(TMP_MAX):
name = next(names)
file = _os.path.join(dir, prefix + name + suffix)
if not _exists(file):
return file
raise FileExistsError(_errno.EEXIST,
"No usable temporary filename found")
class _TemporaryFileWrapper:
"""Temporary file wrapper
This class provides a wrapper around files opened for
temporary use. In particular, it seeks to automatically
remove the file when it is no longer needed.
"""
def __init__(self, file, name, delete=True):
self.file = file
self.name = name
self.close_called = False
self.delete = delete
def __getattr__(self, name):
# Attribute lookups are delegated to the underlying file
# and cached for non-numeric results
# (i.e. methods are cached, closed and friends are not)
file = self.__dict__['file']
a = getattr(file, name)
if not isinstance(a, int):
setattr(self, name, a)
return a
# The underlying __enter__ method returns the wrong object
# (self.file) so override it to return the wrapper
def __enter__(self):
self.file.__enter__()
return self
# iter() doesn't use __getattr__ to find the __iter__ method
def __iter__(self):
return iter(self.file)
# NT provides delete-on-close as a primitive, so we don't need
# the wrapper to do anything special. We still use it so that
# file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile.
if _os.name != 'nt':
# Cache the unlinker so we don't get spurious errors at
# shutdown when the module-level "os" is None'd out. Note
# that this must be referenced as self.unlink, because the
# name TemporaryFileWrapper may also get None'd out before
# __del__ is called.
unlink = _os.unlink
def close(self):
if not self.close_called:
self.close_called = True
self.file.close()
if self.delete:
self.unlink(self.name)
def __del__(self):
self.close()
# Need to trap __exit__ as well to ensure the file gets
# deleted when used in a with statement
def __exit__(self, exc, value, tb):
result = self.file.__exit__(exc, value, tb)
self.close()
return result
else:
def __exit__(self, exc, value, tb):
self.file.__exit__(exc, value, tb)
def NamedTemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None, delete=True):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
'delete' -- whether the file is deleted on close (default True).
The file is created as mkstemp() would do it.
Returns an object with a file-like interface; the name of the file
is accessible as file.name. The file will be automatically deleted
when it is closed unless the 'delete' argument is set to False.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
# Setting O_TEMPORARY in the flags causes the OS to delete
# the file when it is closed. This is only supported by Windows.
if _os.name == 'nt' and delete:
flags |= _os.O_TEMPORARY
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
file = _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
return _TemporaryFileWrapper(file, name, delete)
if _os.name != 'posix' or _os.sys.platform == 'cygwin':
# On non-POSIX and Cygwin systems, assume that we cannot unlink a file
# while it is open.
TemporaryFile = NamedTemporaryFile
else:
def TemporaryFile(mode='w+b', buffering=-1, encoding=None,
newline=None, suffix="", prefix=template,
dir=None):
"""Create and return a temporary file.
Arguments:
'prefix', 'suffix', 'dir' -- as for mkstemp.
'mode' -- the mode argument to io.open (default "w+b").
'buffering' -- the buffer size argument to io.open (default -1).
'encoding' -- the encoding argument to io.open (default None)
'newline' -- the newline argument to io.open (default None)
The file is created as mkstemp() would do it.
Returns an object with a file-like interface. The file has no
name, and will cease to exist when it is closed.
"""
if dir is None:
dir = gettempdir()
flags = _bin_openflags
(fd, name) = _mkstemp_inner(dir, prefix, suffix, flags)
try:
_os.unlink(name)
return _io.open(fd, mode, buffering=buffering,
newline=newline, encoding=encoding)
except:
_os.close(fd)
raise
class SpooledTemporaryFile:
"""Temporary file wrapper, specialized to switch from BytesIO
or StringIO to a real file when it exceeds a certain size or
when a fileno is needed.
"""
_rolled = False
def __init__(self, max_size=0, mode='w+b', buffering=-1,
encoding=None, newline=None,
suffix="", prefix=template, dir=None):
if 'b' in mode:
self._file = _io.BytesIO()
else:
# Setting newline="\n" avoids newline translation;
# this is important because otherwise on Windows we'd
# hget double newline translation upon rollover().
self._file = _io.StringIO(newline="\n")
self._max_size = max_size
self._rolled = False
self._TemporaryFileArgs = {'mode': mode, 'buffering': buffering,
'suffix': suffix, 'prefix': prefix,
'encoding': encoding, 'newline': newline,
'dir': dir}
def _check(self, file):
if self._rolled: return
max_size = self._max_size
if max_size and file.tell() > max_size:
self.rollover()
def rollover(self):
if self._rolled: return
file = self._file
newfile = self._file = TemporaryFile(**self._TemporaryFileArgs)
del self._TemporaryFileArgs
newfile.write(file.getvalue())
newfile.seek(file.tell(), 0)
self._rolled = True
# The method caching trick from NamedTemporaryFile
# won't work here, because _file may change from a
# BytesIO/StringIO instance to a real file. So we list
# all the methods directly.
# Context management protocol
def __enter__(self):
if self._file.closed:
raise ValueError("Cannot enter context with closed file")
return self
def __exit__(self, exc, value, tb):
self._file.close()
# file protocol
def __iter__(self):
return self._file.__iter__()
def close(self):
self._file.close()
@property
def closed(self):
return self._file.closed
@property
def encoding(self):
try:
return self._file.encoding
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['encoding']
def fileno(self):
self.rollover()
return self._file.fileno()
def flush(self):
self._file.flush()
def isatty(self):
return self._file.isatty()
@property
def mode(self):
try:
return self._file.mode
except AttributeError:
return self._TemporaryFileArgs['mode']
@property
def name(self):
try:
return self._file.name
except AttributeError:
return None
@property
def newlines(self):
try:
return self._file.newlines
except AttributeError:
if 'b' in self._TemporaryFileArgs['mode']:
raise
return self._TemporaryFileArgs['newline']
def read(self, *args):
return self._file.read(*args)
def readline(self, *args):
return self._file.readline(*args)
def readlines(self, *args):
return self._file.readlines(*args)
def seek(self, *args):
self._file.seek(*args)
@property
def softspace(self):
return self._file.softspace
def tell(self):
return self._file.tell()
def truncate(self, size=None):
if size is None:
self._file.truncate()
else:
if size > self._max_size:
self.rollover()
self._file.truncate(size)
def write(self, s):
file = self._file
rv = file.write(s)
self._check(file)
return rv
def writelines(self, iterable):
file = self._file
rv = file.writelines(iterable)
self._check(file)
return rv
class TemporaryDirectory(object):
"""Create and return a temporary directory. This has the same
behavior as mkdtemp but can be used as a context manager. For
example:
with TemporaryDirectory() as tmpdir:
...
Upon exiting the context, the directory and everything contained
in it are removed.
"""
def __init__(self, suffix="", prefix=template, dir=None):
self._closed = False
self.name = None # Handle mkdtemp raising an exception
self.name = mkdtemp(suffix, prefix, dir)
def __repr__(self):
return "<{} {!r}>".format(self.__class__.__name__, self.name)
def __enter__(self):
return self.name
def cleanup(self, _warn=False):
if self.name and not self._closed:
try:
self._rmtree(self.name)
except (TypeError, AttributeError) as ex:
# Issue #10188: Emit a warning on stderr
# if the directory could not be cleaned
# up due to missing globals
if "None" not in str(ex):
raise
print("ERROR: {!r} while cleaning up {!r}".format(ex, self,),
file=_sys.stderr)
return
self._closed = True
if _warn:
self._warn("Implicitly cleaning up {!r}".format(self),
ResourceWarning)
def __exit__(self, exc, value, tb):
self.cleanup()
def __del__(self):
# Issue a ResourceWarning if implicit cleanup needed
self.cleanup(_warn=True)
# XXX (ncoghlan): The following code attempts to make
# this class tolerant of the module nulling out process
# that happens during CPython interpreter shutdown
# Alas, it doesn't actually manage it. See issue #10188
_listdir = staticmethod(_os.listdir)
_path_join = staticmethod(_os.path.join)
_isdir = staticmethod(_os.path.isdir)
_islink = staticmethod(_os.path.islink)
_remove = staticmethod(_os.remove)
_rmdir = staticmethod(_os.rmdir)
_os_error = OSError
_warn = _warnings.warn
def _rmtree(self, path):
# Essentially a stripped down version of shutil.rmtree. We can't
# use globals because they may be None'ed out at shutdown.
for name in self._listdir(path):
fullname = self._path_join(path, name)
try:
isdir = self._isdir(fullname) and not self._islink(fullname)
except self._os_error:
isdir = False
if isdir:
self._rmtree(fullname)
else:
try:
self._remove(fullname)
except self._os_error:
pass
try:
self._rmdir(path)
except self._os_error:
pass
| gpl-3.0 |
thumt/THUMT | thumt/utils/scope.py | 1 | 1704 | # coding=utf-8
# Copyright 2017-2020 The THUMT Authors
# Modified from TensorFlow (tf.name_scope)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import contextlib
# global variable
_NAME_STACK = ""
_NAMES_IN_USE = {}
_VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$")
_VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$")
def unique_name(name, mark_as_used=True):
global _NAME_STACK
if _NAME_STACK:
name = _NAME_STACK + "/" + name
i = _NAMES_IN_USE.get(name, 0)
if mark_as_used:
_NAMES_IN_USE[name] = i + 1
if i > 0:
base_name = name
while name in _NAMES_IN_USE:
name = "%s_%d" % (base_name, i)
i += 1
if mark_as_used:
_NAMES_IN_USE[name] = 1
return name
@contextlib.contextmanager
def scope(name):
global _NAME_STACK
if name:
if _NAME_STACK:
# check name
if not _VALID_SCOPE_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
else:
# check name strictly
if not _VALID_OP_NAME_REGEX.match(name):
raise ValueError("'%s' is not a valid scope name" % name)
try:
old_stack = _NAME_STACK
if not name:
new_stack = None
elif name and name[-1] == "/":
new_stack = name[:-1]
else:
new_stack = unique_name(name)
_NAME_STACK = new_stack
yield "" if new_stack is None else new_stack + "/"
finally:
_NAME_STACK = old_stack
def get_scope():
return _NAME_STACK
| bsd-3-clause |
waytai/networkx | networkx/generators/intersection.py | 55 | 3919 | # -*- coding: utf-8 -*-
"""
Generators for random intersection graphs.
"""
# Copyright (C) 2011 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
import random
import networkx as nx
from networkx.algorithms import bipartite
__author__ = "\n".join(['Aric Hagberg (hagberg@lanl.gov)'])
__all__ = ['uniform_random_intersection_graph',
'k_random_intersection_graph',
'general_random_intersection_graph',
]
def uniform_random_intersection_graph(n, m, p, seed=None):
"""Return a uniform random intersection graph.
Parameters
----------
n : int
The number of nodes in the first bipartite set (nodes)
m : int
The number of nodes in the second bipartite set (attributes)
p : float
Probability of connecting nodes between bipartite sets
seed : int, optional
Seed for random number generator (default=None).
See Also
--------
gnp_random_graph
References
----------
.. [1] K.B. Singer-Cohen, Random Intersection Graphs, 1995,
PhD thesis, Johns Hopkins University
.. [2] Fill, J. A., Scheinerman, E. R., and Singer-Cohen, K. B.,
Random intersection graphs when m = !(n):
An equivalence theorem relating the evolution of the g(n, m, p)
and g(n, p) models. Random Struct. Algorithms 16, 2 (2000), 156–176.
"""
G=bipartite.random_graph(n, m, p, seed=seed)
return nx.projected_graph(G, range(n))
def k_random_intersection_graph(n,m,k):
"""Return a intersection graph with randomly chosen attribute sets for
each node that are of equal size (k).
Parameters
----------
n : int
The number of nodes in the first bipartite set (nodes)
m : int
The number of nodes in the second bipartite set (attributes)
k : float
Size of attribute set to assign to each node.
seed : int, optional
Seed for random number generator (default=None).
See Also
--------
gnp_random_graph, uniform_random_intersection_graph
References
----------
.. [1] Godehardt, E., and Jaworski, J.
Two models of random intersection graphs and their applications.
Electronic Notes in Discrete Mathematics 10 (2001), 129--132.
"""
G = nx.empty_graph(n + m)
mset = range(n,n+m)
for v in range(n):
targets = random.sample(mset, k)
G.add_edges_from(zip([v]*len(targets), targets))
return nx.projected_graph(G, range(n))
def general_random_intersection_graph(n,m,p):
"""Return a random intersection graph with independent probabilities
for connections between node and attribute sets.
Parameters
----------
n : int
The number of nodes in the first bipartite set (nodes)
m : int
The number of nodes in the second bipartite set (attributes)
p : list of floats of length m
Probabilities for connecting nodes to each attribute
seed : int, optional
Seed for random number generator (default=None).
See Also
--------
gnp_random_graph, uniform_random_intersection_graph
References
----------
.. [1] Nikoletseas, S. E., Raptopoulos, C., and Spirakis, P. G.
The existence and efficient construction of large independent sets
in general random intersection graphs. In ICALP (2004), J. D´ıaz,
J. Karhum¨aki, A. Lepist¨o, and D. Sannella, Eds., vol. 3142
of Lecture Notes in Computer Science, Springer, pp. 1029–1040.
"""
if len(p)!=m:
raise ValueError("Probability list p must have m elements.")
G = nx.empty_graph(n + m)
mset = range(n,n+m)
for u in range(n):
for v,q in zip(mset,p):
if random.random()<q:
G.add_edge(u,v)
return nx.projected_graph(G, range(n))
| bsd-3-clause |
spbguru/repo1 | nupic/regions/PyRegion.py | 8 | 11394 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013-2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
## @file
This file defines the base class for NuPIC 2 Python regions.
"""
import numpy
RealNumpyDType = numpy.float32
from abc import ABCMeta, abstractmethod
from nupic.support import getCallerInfo
def not_implemented(f):
"""A decorator that raises NotImplementedError exception when called
Keeps the docstring of the original function.
"""
def decorated(*args, **kw):
gci = getCallerInfo()
caller = gci[0] + '()'
if gci[2]:
caller = gci[2] + '.' + caller
message = 'The unimplemented method '
message += '%s() was called by %s' % (f.func_name, caller)
raise NotImplementedError(message)
decorated.__doc__ == f.__doc__
return decorated
#---------------------------------------------------------------------------------
class PyRegion(object):
"""
PyRegion provides services to its sub-classes (the actual regions):
- Define and document the interface of a Python region
- Enforce implementation of required methods
- Default implementation for some methods
PyRegion is an abstract base class (http://docs.python.org/library/abc.html).
If a subclass doesn't implement all its abstract methods it can't be
instantiated. Note, that the signature of implemented abstract method in the
subclass doesn't need to match the signature of the abstract method in the
base class. This is very important for __init__() in this case.
The abstract methods (decorated with @abstract method) are:
* __init__
* initialize
* compute
In addition, PyRegion decorates some other methods with the
@not_implemented decorator. A sub-class may opt not to implement these
methods, but if such a methods is called then a NotImplementedError will be
raised. This is useful for methods like setParameterArray if a particular
subclass has no array parameters.
The not implemented methods (decorated with @not_implemented) are:
* getSpec (class method)
* setParameter
* setParameterArray
* getOutputElementCount
The getSpec is a class method, which is actually required but since it's
not an instance method the @abstractmethod decorator doesn't apply.
Finally, PyRegion provides reasonable default implementation to some methods.
Sub-classes may opt to override these methods or use the default
implementation (often recommended).
The implemented methods are:
* getParameter
* getParameterArray
* getParameterArrayCount
* executeMethod
"""
__metaclass__ = ABCMeta
#---------------------------------------------------------------------------------
@classmethod
@not_implemented
def getSpec(cls):
"""Returns the region spec for this region. The Region Spec is a dictionary
with the following keys:
description -- a string
singleNodeOnly -- a boolean (True if this Region supports only a single node)
inputs -- a dictionary in which the keys are the names of the inputs and
the values are dictionaries with these keys:
description - string
regionLevel -- True if this is a "region-level" input.
dataType - a string describing the data type, usually 'Real32'
count - the number of items in the input. 0 means unspecified.
required -- boolean - whether the input is must be connected
isDefaultInput -- must be True for exactly one input
requireSplitterMap -- [just set this to False.]
outputs -- a dictionary with similar structure to inputs. The keys
are:
categoriesOut
description
dataType
count
regionLevel
isDefaultOutput
parameters -- a dictionary of dictionaries with the following keys:
description
dataType
count
constraints
accessMode (one of "ReadWrite", "Read", "Create")
This class method is called by NuPIC before creating a Region.
"""
@abstractmethod
def __init__(self, *args, **kwars):
"""Initialize the node with creation parameters from the node spec
Should be implemented by subclasses (unless there are no creation params)
"""
@abstractmethod
def initialize(self, inputs, outputs):
"""Initialize the node after the network is fully linked
It is called once by NuPIC before the first call to compute(). It is
a good place to perform one time initialization that depend on the inputs
and/or outputs. The region may also remember its inputs and outputs here
because they will not change.
inputs: dict of numpy arrays (one per input)
outputs: dict of numpy arrays (one per output)
"""
@abstractmethod
def compute(self, inputs, outputs):
"""Perform the main computation
This method is called in each iteration for each phase the node supports.
inputs: dict of numpy arrays (one per input)
outputs: dict of numpy arrays (one per output)
"""
@not_implemented
def getOutputElementCount(self, name):
"""Return the number of elements in the output of a single node
If the region has multiple nodes (all must have the same output
size) then just the number of output elements of a single node
should be returned.
name: the name of the output
"""
def getParameter(self, name, index):
"""Default implementation that return an attribute with the requested name
This method provides a default implementation of getParameter() that simply
returns an attribute with the parameter name. If the Region conceptually
contains multiple nodes with separate state the 'index' argument is used
to request a parameter of a specific node inside the region. In case of
a region-level parameter the index should be -1
The implementation prevents accessing parameters names that start with '_'.
It may be better to enforce this convention at the node spec level.
name: name of requested parameter
index: index of node inside the region (if relevant)
"""
if name.startswith('_'):
raise Exception('Parameter name must not start with an underscore')
value = getattr(self, name)
return value
def getParameterArrayCount(self, name, index):
"""Default implementation that return the length of the attribute
This default implementation goes hand in hand with getParameterArray().
If you override one of them in your subclass, you should probably override
both of them.
The implementation prevents accessing parameters names that start with '_'.
It may be better to enforce this convention at the node spec level.
name: name of requested parameter
index: index of node inside the region (if relevant)
"""
if name.startswith('_'):
raise Exception('Parameter name must not start with an underscore')
v = getattr(self, name)
return len(self.parameters[name])
def getParameterArray(self, name, index, array):
"""Default implementation that return an attribute with the requested name
This method provides a default implementation of getParameterArray() that
returns an attribute with the parameter name. If the Region conceptually
contains multiple nodes with separate state the 'index' argument is used
to request a parameter of a specific node inside the region. The attribute
value is written into the output array. No type or sanity checks are
performed for performance reasons. If something goes awry it will result
in a low-level exception. If you are unhappy about it you can implement
your own getParameterArray() method in the subclass.
The implementation prevents accessing parameters names that start with '_'.
It may be better to enforce this convention at the node spec level.
name: name of requested parameter
index: index of node inside the region (if relevant)
array: output numpy array that the value is written to
"""
if name.startswith('_'):
raise Exception('Parameter name must not start with an underscore')
v = getattr(self, name)
# Not performing sanity checks for performance reasons.
#assert array.dtype == v.dtype
#assert len(array) == len(v)
array[:] = v
@not_implemented
def setParameter(self, name, index, value):
"""Set the value of a parameter
If the Region conceptually contains multiple nodes with separate state
the 'index' argument is used set a parameter of a specific node inside
the region.
name: name of requested parameter
index: index of node inside the region (if relevant)
value: the value to assign to the requested parameter
"""
@not_implemented
def setParameterArray(self, name, index, array):
"""Set the value of an array parameter
If the Region conceptually contains multiple nodes with separate state
the 'index' argument is used set a parameter of a specific node inside
the region.
name: name of requested parameter
index: index of node inside the region (if relevant)
array: the value to assign to the requested parameter (a numpy array)
"""
def serializeExtraData(self, filePath):
"""This method is called during network serialization with an external
filename that can be used to bypass pickle for saving large binary states.
filePath: full filepath and name
"""
pass
def deSerializeExtraData(self, filePath):
"""This method is called during network deserialization with an external
filename that can be used to bypass pickle for loading large binary states.
filePath: full filepath and name
"""
pass
def executeMethod(self, methodName, args):
"""Executes a method named 'methodName' with the specified arguments.
This method is called when the user executes a command as defined in
the node spec. It provides a perfectly reasonble implementation
of the command mechanism. As a sub-class developer you just need to
implement a method for each command in the node spec. Note that due to
the command mechanism only unnamed argument are supported.
methodName: the name of the method that correspond to a command in the spec
args: list of arguments that will be passed to the method
"""
if not hasattr(self, methodName):
raise Exception('Missing command method: ' + methodName)
m = getattr(self, methodName)
if not hasattr(m, '__call__'):
raise Exception('Command: ' + methodName + ' must be callable')
return m(*args)
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/numpy-master/numpy/core/records.py | 1 | 29502 | """
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats,
integers, bools etc. However, it is possible for elements to be combinations
of these using structured types, such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
>>> a
array([(1, 2.0), (1, 2.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([ 2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = np.rec.array(a)
>>> ar.x
array([1, 1])
>>> ar.y
array([ 2., 2.])
"""
from __future__ import division, absolute_import, print_function
import os
import sys
from numpy.compat import isfileobj, bytes, long
from . import numeric as sb
from . import numerictypes as nt
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
ndarray = sb.ndarray
_byteorderconv = {'b': '>',
'l': '<',
'n': '=',
'B': '>',
'L': '<',
'N': '=',
'S': 's',
's': 's',
'>': '>',
'<': '<',
'=': '=',
'|': '|',
'I': '|',
'i': '|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
dup = []
for i in range(len(list)):
if (list[i] in list[i + 1:]):
if (list[i] not in dup):
dup.append(list[i])
return dup
class format_parser:
"""
Class to convert formats, names, titles description to a dtype.
After constructing the format_parser object, the dtype attribute is
the converted data-type:
``dtype = format_parser(formats, names, titles).dtype``
Attributes
----------
dtype : dtype
The converted data-type.
Parameters
----------
formats : str or list of str
The format description, either specified as a string with
comma-separated format descriptions in the form ``'f8, i4, a5'``, or
a list of format description strings in the form
``['f8', 'i4', 'a5']``.
names : str or list/tuple of str
The field names, either specified as a comma-separated string in the
form ``'col1, col2, col3'``, or as a list or tuple of strings in the
form ``['col1', 'col2', 'col3']``.
An empty list can be used, in that case default field names
('f0', 'f1', ...) are used.
titles : sequence
Sequence of title strings. An empty list can be used to leave titles
out.
aligned : bool, optional
If True, align the fields by padding as the C-compiler would.
Default is False.
byteorder : str, optional
If specified, all the fields will be changed to the
provided byte-order. Otherwise, the default byte-order is
used. For all available string specifiers, see `dtype.newbyteorder`.
See Also
--------
dtype, typename, sctype2char
Examples
--------
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... ['T1', 'T2', 'T3']).dtype
dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'),
(('T3', 'col3'), '|S5')])
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
titles will simply not appear. If `names` is empty, default field names
will be used.
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... []).dtype
dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')])
>>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdescr(byteorder)
self.dtype = self._descr
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError("Need formats argument")
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if (names):
if (type(names) in [list, tuple]):
pass
elif isinstance(names, str):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError("Duplicate field names: %s" % _dup)
if (titles):
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if (self._nfields > len(titles)):
self._titles += [None] * (self._nfields - len(titles))
def _createdescr(self, byteorder):
descr = sb.dtype({'names': self._names,
'formats': self._f_formats,
'offsets': self._offsets,
'titles': self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
# manually set name and module so that this class's type shows up
# as numpy.record when printed
__name__ = 'record'
__module__ = 'numpy'
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.item())
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a record,
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
# happens if field is Object type
return obj
if dt.fields:
return obj.view((self.__class__, obj.dtype.fields))
return obj
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __setattr__(self, attr, val):
if attr in ['setfield', 'getfield', 'dtype']:
raise AttributeError("Cannot set '%s' attribute" % attr)
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self, attr, None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __getitem__(self, indx):
obj = nt.void.__getitem__(self, indx)
# copy behavior of record.__getattribute__,
if isinstance(obj, nt.void) and obj.dtype.fields:
return obj.view((self.__class__, obj.dtype.fields))
else:
# return a single element
return obj
def pprint(self):
"""Pretty-print all fields."""
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
rows = []
fmt = '%% %ds: %%s' % maxlen
for name in names:
rows.append(fmt % (name, getattr(self, name)))
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analogous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of str, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of str, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : bool, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
order : {'C', 'F'}, optional
Row-major (C-style) or column-major (Fortran-style) order.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
rec.fromrecords : Construct a record array from data.
record : fundamental data-type for `recarray`.
format_parser : determine a data-type from formats, names, titles.
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a record array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
>>> x
array([(1.0, 2), (3.0, 4)],
dtype=[('x', '<f8'), ('y', '<i4')])
>>> x['x']
array([ 1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([ 1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
# manually set name and module so that this class's type shows
# up as "numpy.recarray" when printed
__name__ = 'recarray'
__module__ = 'numpy'
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides, order=order)
return self
def __array_finalize__(self, obj):
if self.dtype.type is not record and self.dtype.fields:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
def __getattribute__(self, attr):
# See if ndarray has this attr, and return it if so. (note that this
# means a field with the same name as an ndarray attr cannot be
# accessed by attribute).
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
# look for a field with this name
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("recarray has no attribute %s" % attr)
obj = self.getfield(*res)
# At this point obj will always be a recarray, since (see
# PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
# non-structured, convert it to an ndarray. Then if obj is structured
# with void type convert it to the same dtype.type (eg to preserve
# numpy.record type if present), since nested structured fields do not
# inherit type. Don't do this for non-void structures though.
if obj.dtype.fields:
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(ndarray)
# Save the dictionary.
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
# Automatically convert (void) structured types to records
# (but not non-void structures, subarrays, or non-structured voids)
if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields:
val = sb.dtype((record, val))
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
raise exctype(value)
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr:
# We just added this one or this setattr worked on an
# internal attribute.
try:
object.__delattr__(self, attr)
except:
return ret
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = super(recarray, self).__getitem__(indx)
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.fields:
obj = obj.view(type(self))
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
else:
return obj.view(type=ndarray)
else:
# return a single element
return obj
def __repr__(self):
# get data/shape string. logic taken from numeric.array_repr
if self.size > 0 or self.shape == (0,):
lst = sb.array2string(self, separator=', ')
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),)
if (self.dtype.type is record
or (not issubclass(self.dtype.type, nt.void))):
# If this is a full record array (has numpy.record dtype),
# or if it has a scalar (non-void) dtype with no records,
# represent it using the rec.array function. Since rec.array
# converts dtype to a numpy.record for us, convert back
# to non-record before printing
plain_dtype = self.dtype
if plain_dtype.type is record:
plain_dtype = sb.dtype((nt.void, plain_dtype))
lf = '\n' + ' ' * len("rec.array(")
return ('rec.array(%s, %sdtype=%s)' %
(lst, lf, plain_dtype))
else:
# otherwise represent it using np.array plus a view
# This should only happen if the user is playing
# strange games with dtypes.
lf = '\n' + ' ' * len("array(")
return ('array(%s, %sdtype=%s).view(numpy.recarray)' %
(lst, lf, str(self.dtype)))
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self, 'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self, 'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.fields:
return obj
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print(r[1])
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
"""
arrayList = [sb.asarray(x) for x in arrayList]
if shape is None or shape == 0:
shape = arrayList[0].shape
if isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = []
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError("item in the array list must be an ndarray.")
formats.append(obj.dtype.str)
formats = ','.join(formats)
if dtype is not None:
descr = sb.dtype(dtype)
_names = descr.names
else:
parsed = format_parser(formats, names, titles, aligned, byteorder)
_names = parsed._names
descr = parsed._descr
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError("mismatch between the number of fields "
"and the number of arrays")
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = len(descr[k].shape)
testshape = obj.shape[:len(obj.shape) - nn]
if testshape != shape:
raise ValueError("array-shape mismatch in array %d" % k)
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
# shape must be 1-d if you use list of lists...
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
""" create a recarray from a list of records in text form
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined
r=fromrecords([(2,3.,'abc')]*100000)
it can be slow.
If formats is None, then this will auto-detect formats. Use list of
tuples rather than list of lists for faster processing.
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print(r[0])
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
array(['dbe', 'de'],
dtype='|S3')
>>> import pickle
>>> print(pickle.loads(pickle.dumps(r)))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
nfields = len(recList[0])
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[..., i].tolist()) for i in range(nfields)]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype((record, dtype))
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
try:
retval = sb.array(recList, dtype=descr)
except TypeError: # list of lists instead of list of tuples
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
shape = (shape,)
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
_array = recarray(shape, descr)
for k in range(_array.size):
_array[k] = tuple(recList[k])
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise ValueError("Must have dtype= or formats=")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring) - offset) / itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string then that file is opened, else it is assumed
to be a file object.
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print(r[5])
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
name = 0
if isinstance(fd, str):
name = 1
fd = open(fd, 'rb')
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod()
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
shape[shape.index(-1)] = size / -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
nbytes = shapeprod * itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
if name:
fd.close()
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if ((isinstance(obj, (type(None), str)) or isfileobj(obj)) and
(formats is None) and (dtype is None)):
raise ValueError("Must define formats (or dtype) if object is "
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names': names,
'titles': titles,
'aligned': aligned,
'byteorder': byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, bytes):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isfileobj(obj):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new.view(recarray)
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
return obj.view(recarray)
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/social/backends/lastfm.py | 70 | 1888 | import hashlib
from social.utils import handle_http_errors
from social.backends.base import BaseAuth
class LastFmAuth(BaseAuth):
"""
Last.Fm authentication backend. Requires two settings:
SOCIAL_AUTH_LASTFM_KEY
SOCIAL_AUTH_LASTFM_SECRET
Don't forget to set the Last.fm callback to something sensible like
http://your.site/lastfm/complete
"""
name = 'lastfm'
AUTH_URL = 'http://www.last.fm/api/auth/?api_key={api_key}'
EXTRA_DATA = [
('key', 'session_key')
]
def auth_url(self):
return self.AUTH_URL.format(api_key=self.setting('KEY'))
@handle_http_errors
def auth_complete(self, *args, **kwargs):
"""Completes login process, must return user instance"""
key, secret = self.get_key_and_secret()
token = self.data['token']
signature = hashlib.md5(''.join(
('api_key', key, 'methodauth.getSession', 'token', token, secret)
).encode()).hexdigest()
response = self.get_json('http://ws.audioscrobbler.com/2.0/', data={
'method': 'auth.getSession',
'api_key': key,
'token': token,
'api_sig': signature,
'format': 'json'
}, method='POST')
kwargs.update({'response': response['session'], 'backend': self})
return self.strategy.authenticate(*args, **kwargs)
def get_user_id(self, details, response):
"""Return a unique ID for the current user, by default from server
response."""
return response.get('name')
def get_user_details(self, response):
fullname, first_name, last_name = self.get_user_names(response['name'])
return {
'username': response['name'],
'email': '',
'fullname': fullname,
'first_name': first_name,
'last_name': last_name
}
| agpl-3.0 |
comforx/mongrel2 | examples/python/tests/mongrel2_org.py | 98 | 1671 | from mongrel2.config import *
main = Server(
uuid="2f62bd5-9e59-49cd-993c-3b6013c28f05",
access_log="/logs/access.log",
error_log="/logs/error.log",
chroot="./",
pid_file="/run/mongrel2.pid",
default_host="mongrel2.org",
name="main",
port=6767
)
test_directory = Dir(base='tests/',
index_file='index.html',
default_ctype='text/plain')
web_app_proxy = Proxy(addr='127.0.0.1', port=80)
chat_demo_dir = Dir(base='examples/chat/static/',
index_file='index.html',
default_ctype='text/plain')
chat_demo = Handler(send_spec='tcp://127.0.0.1:9999',
send_ident='54c6755b-9628-40a4-9a2d-cc82a816345e',
recv_spec='tcp://127.0.0.1:9998', recv_ident='')
handler_test = Handler(send_spec='tcp://127.0.0.1:9997',
send_ident='34f9ceee-cd52-4b7f-b197-88bf2f0ec378',
recv_spec='tcp://127.0.0.1:9996', recv_ident='')
# the r'' string syntax means to not interpret any \ chars, for regexes
mongrel2 = Host(name="mongrel2.org", routes={
r'@chat': chat_demo,
r'/handlertest': handler_test,
r'/chat/': web_app_proxy,
r'/': web_app_proxy,
r'/tests/': test_directory,
r'/testsmulti/(.*.json)': test_directory,
r'/chatdemo/': chat_demo_dir,
r'/static/': chat_demo_dir,
r'/mp3stream': Handler(
send_spec='tcp://127.0.0.1:9995',
send_ident='53f9f1d1-1116-4751-b6ff-4fbe3e43d142',
recv_spec='tcp://127.0.0.1:9994', recv_ident='')
})
main.hosts.add(mongrel2)
settings = {"zeromq.threads": 1}
commit([main], settings=settings)
| bsd-3-clause |
m-soori/pjsip-blf | tests/cdash/builder.py | 107 | 17919 | #
# builder.py - PJSIP test scenarios builder
#
# Copyright (C) 2008-2009 Teluu Inc. (http://www.teluu.com)
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
import ccdash
import os
import platform
import re
import subprocess
import sys
import time
class Operation:
"""\
The Operation class describes the individual ccdash operation to be
performed.
"""
# Types:
UPDATE = "update" # Update operation
CONFIGURE = "configure" # Configure operation
BUILD = "build" # Build operation
TEST = "test" # Unit test operation
def __init__(self, type, cmdline, name="", wdir=""):
self.type = type
self.cmdline = cmdline
self.name = name
self.wdir = wdir
if self.type==self.TEST and not self.name:
raise "name required for tests"
def encode(self, base_dir):
s = [self.type]
if self.type == self.TEST:
s.append(self.name)
if self.type != self.UPDATE:
s.append(self.cmdline)
s.append("-w")
if self.wdir:
s.append(base_dir + "/" + self.wdir)
else:
s.append(base_dir)
return s
#
# Update operation
#
update_ops = [Operation(Operation.UPDATE, "")]
#
# The standard library tests (e.g. pjlib-test, pjsip-test, etc.)
#
std_test_ops= [
Operation(Operation.TEST, "./pjlib-test$SUFFIX", name="pjlib test",
wdir="pjlib/bin"),
Operation(Operation.TEST, "./pjlib-util-test$SUFFIX",
name="pjlib-util test", wdir="pjlib-util/bin"),
Operation(Operation.TEST, "./pjnath-test$SUFFIX", name="pjnath test",
wdir="pjnath/bin"),
Operation(Operation.TEST, "./pjmedia-test$SUFFIX", name="pjmedia test",
wdir="pjmedia/bin"),
Operation(Operation.TEST, "./pjsip-test$SUFFIX", name="pjsip test",
wdir="pjsip/bin")
]
#
# These are pjsua Python based unit test operations
#
def build_pjsua_test_ops(pjsua_exe=""):
ops = []
if pjsua_exe:
exe = " -e ../../pjsip-apps/bin/" + pjsua_exe
else:
exe = ""
cwd = os.getcwd()
os.chdir("../pjsua")
os.system("python runall.py --list > list")
f = open("list", "r")
for e in f:
e = e.rstrip("\r\n ")
(mod,param) = e.split(None,2)
name = mod[4:mod.find(".py")] + "_" + \
param[param.find("/")+1:param.find(".py")]
ops.append(Operation(Operation.TEST, "python run.py" + exe + " " + \
e, name=name, wdir="tests/pjsua"))
f.close()
os.remove("list")
os.chdir(cwd)
return ops
#
# Get gcc version
#
def gcc_version(gcc):
proc = subprocess.Popen(gcc + " -v", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
ver = ""
while True:
s = proc.stdout.readline()
if not s:
break
if s.find("gcc version") >= 0:
ver = s.split(None, 3)[2]
break
proc.wait()
return "gcc-" + ver
#
# Get Visual Studio version
#
def vs_get_version():
proc = subprocess.Popen("cl", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
while True:
s = proc.stdout.readline()
if s=="":
break
pos = s.find("Version")
if pos > 0:
proc.wait()
s = s[pos+8:]
ver = s.split(None, 1)[0]
major = ver[0:2]
if major=="12":
return "vs6"
elif major=="13":
return "vs2003"
elif major=="14":
return "vs2005"
elif major=="15":
return "vs2008"
else:
return "vs-" + major
proc.wait()
return "vs-unknown"
#
# Test config
#
class BaseConfig:
def __init__(self, base_dir, url, site, group, options=None):
self.base_dir = base_dir
self.url = url
self.site = site
self.group = group
self.options = options
#
# Base class for test configurator
#
class TestBuilder:
def __init__(self, config, build_config_name="",
user_mak="", config_site="", exclude=[], not_exclude=[]):
self.config = config # BaseConfig instance
self.build_config_name = build_config_name # Optional build suffix
self.user_mak = user_mak # To be put in user.mak
self.config_site = config_site # To be put in config_s..
self.saved_user_mak = "" # To restore user.mak
self.saved_config_site = "" # To restore config_s..
self.exclude = exclude # List of exclude pattern
self.not_exclude = not_exclude # List of include pattern
self.ccdash_args = [] # ccdash cmd line
def stamp(self):
return time.strftime("%Y%m%d-%H%M", time.localtime())
def pre_action(self):
# Override user.mak
name = self.config.base_dir + "/user.mak"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_user_mak = f.read()
f.close()
if True:
f = open(name, "w")
f.write(self.user_mak)
f.close()
# Override config_site.h
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
if os.access(name, os.F_OK):
f = open(name, "r")
self.saved_config_site= f.read()
f.close()
if True:
f = open(name, "wt")
f.write(self.config_site)
f.close()
def post_action(self):
# Restore user.mak
name = self.config.base_dir + "/user.mak"
f = open(name, "wt")
f.write(self.saved_user_mak)
f.close()
# Restore config_site.h
name = self.config.base_dir + "/pjlib/include/pj/config_site.h"
f = open(name, "wt")
f.write(self.saved_config_site)
f.close()
def build_tests(self):
# This should be overridden by subclasses
pass
def execute(self):
if len(self.ccdash_args)==0:
self.build_tests()
self.pre_action()
mandatory_op = ["update", "configure", "build"]
counter = 0
for a in self.ccdash_args:
# Check if this test is in exclusion list
fullcmd = " ".join(a)
excluded = False
included = False
for pat in self.exclude:
if pat and re.search(pat, fullcmd) != None:
excluded = True
break
if excluded:
for pat in self.not_exclude:
if pat and re.search(pat, fullcmd) != None:
included = True
break
if excluded and not included:
if len(fullcmd)>60:
fullcmd = fullcmd[0:60] + ".."
print "Skipping '%s'" % (fullcmd)
continue
b = ["ccdash.py"]
b.extend(a)
a = b
#print a
try:
rc = ccdash.main(a)
except Exception, e:
errmsg = str(e)
print "**** Error: ccdash got exception %s ****" % errmsg
rc = -1
except:
print "**** Error: ccdash got unknown exception ****"
rc = -1
if rc!=0 and a[1] in mandatory_op:
print "Stopping because of error.."
break
counter = counter + 1
self.post_action()
#
# GNU test configurator
#
class GNUTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for GNU targets.
"""
def __init__(self, config, build_config_name="", user_mak="", \
config_site="", cross_compile="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
build_config_name - Optional name to be added as suffix to the build
name. Sample: "min-size", "O4", "TLS", etc.
user_mak - Contents to be put on user.mak
config_site - Contents to be put on config_site.h
cross_compile - Optional cross-compile prefix. Must include the
trailing dash, e.g. "arm-unknown-linux-"
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
user_mak=user_mak, config_site=config_site,
exclude=exclude, not_exclude=not_exclude)
self.cross_compile = cross_compile
if self.cross_compile and self.cross_compile[-1] != '-':
self.cross_compile.append("-")
def build_tests(self):
if self.cross_compile:
suffix = "-" + self.cross_compile[0:-1]
build_name = self.cross_compile + \
gcc_version(self.cross_compile + "gcc")
else:
proc = subprocess.Popen("sh "+self.config.base_dir+"/config.guess",
shell=True, stdout=subprocess.PIPE)
plat = proc.stdout.readline().rstrip(" \r\n")
build_name = plat + "-"+gcc_version(self.cross_compile + "gcc")
suffix = "-" + plat
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "sh ./configure"))
if sys.platform=="win32":
# Don't build python module on Mingw
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make'"))
else:
cmds.append(Operation(Operation.BUILD,
"sh -c 'make distclean && make dep && make" + \
" && cd pjsip-apps/src/python && " + \
"python setup.py clean build'"))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops())
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# MSVC test configurator
#
class MSVCTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Visual Studio builds.
You need to set the MSVC environment variables (typically by calling
vcvars32.bat) prior to running this class.
"""
def __init__(self, config, target="Release|Win32", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Visual Studio build configuration to build.
Sample: "Debug|Win32", "Release|Win32".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "Debug", "Release", "IPv6", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
(vsbuild,sys) = self.target.split("|",2)
build_name = sys + "-" + vs_get_version() + "-" + vsbuild
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
vccmd = "vcbuild.exe /nologo /nohtmllog /nocolor /rebuild " + \
"pjproject-vs8.sln " + " \"" + self.target + "\""
suffix = "-i386-win32-vc8-" + vsbuild
pjsua = "pjsua_vc8"
if vsbuild=="debug":
pjsua = pjsua + "d"
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.append(Operation(Operation.BUILD, vccmd))
cmds.extend(std_test_ops)
cmds.extend(build_pjsua_test_ops(pjsua))
self.ccdash_args = []
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
#
# Symbian test configurator
#
class SymbianTestBuilder(TestBuilder):
"""\
This class creates list of tests suitable for Symbian builds. You need to
set the command line build settings prior to running this class (typically
that involves setting the EPOCROOT variable and current device).
"""
def __init__(self, config, target="gcce urel", build_config_name="",
config_site="", exclude=[], not_exclude=[]):
"""\
Parameters:
config - BaseConfig instance
target - Symbian target to build. Default is "gcce urel".
build_config_name - Optional name to be added as suffix to the build
name. Sample: "APS", "VAS", etc.
config_site - Contents to be put on config_site.h
exclude - List of regular expression patterns for tests
that will be excluded from the run
not_exclude - List of regular expression patterns for tests
that will be run regardless of whether they
match the excluded pattern.
"""
TestBuilder.__init__(self, config, build_config_name=build_config_name,
config_site=config_site, exclude=exclude,
not_exclude=not_exclude)
self.target = target.lower()
def build_tests(self):
# Check that EPOCROOT is set
if not "EPOCROOT" in os.environ:
print "Error: EPOCROOT environment variable is not set"
sys.exit(1)
epocroot = os.environ["EPOCROOT"]
# EPOCROOT must have trailing backslash
if epocroot[-1] != "\\":
epocroot = epocroot + "\\"
os.environ["EPOCROOT"] = epocroot
sdk1 = epocroot.split("\\")[-2]
# Check that correct device is set
proc = subprocess.Popen("devices", stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
sdk2 = ""
while True:
line = proc.stdout.readline()
if line.find("- default") > 0:
sdk2 = line.split(":",1)[0]
break
proc.wait()
if sdk1 != sdk2:
print "Error: default SDK in device doesn't match EPOCROOT"
print "Default device SDK =", sdk2
print "EPOCROOT SDK =", sdk1
sys.exit(1)
build_name = sdk2.replace("_", "-") + "-" + \
self.target.replace(" ", "-")
if self.build_config_name:
build_name = build_name + "-" + self.build_config_name
cmdline = "cmd /C \"cd build.symbian && bldmake bldfiles && abld build %s\"" % (self.target)
cmds = []
cmds.extend(update_ops)
cmds.append(Operation(Operation.CONFIGURE, "CMD /C echo Nothing to do"))
cmds.extend([Operation(Operation.BUILD, cmdline)])
self.ccdash_args = []
suffix = ""
for c in cmds:
c.cmdline = c.cmdline.replace("$SUFFIX", suffix)
args = c.encode(self.config.base_dir)
args.extend(["-U", self.config.url,
"-S", self.config.site,
"-T", self.stamp(),
"-B", build_name,
"-G", self.config.group])
args.extend(self.config.options)
self.ccdash_args.append(args)
| gpl-2.0 |
amidabuddha/saga-ics-crc-3.0.16 | tools/perf/scripts/python/sctop.py | 11180 | 1924 | # system call top
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Periodically displays system-wide system call totals, broken down by
# syscall. If a [comm] arg is specified, only syscalls called by
# [comm] are displayed. If an [interval] arg is specified, the display
# will be refreshed every [interval] seconds. The default interval is
# 3 seconds.
import os, sys, thread, time
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
usage = "perf script -s sctop.py [comm] [interval]\n";
for_comm = None
default_interval = 3
interval = default_interval
if len(sys.argv) > 3:
sys.exit(usage)
if len(sys.argv) > 2:
for_comm = sys.argv[1]
interval = int(sys.argv[2])
elif len(sys.argv) > 1:
try:
interval = int(sys.argv[1])
except ValueError:
for_comm = sys.argv[1]
interval = default_interval
syscalls = autodict()
def trace_begin():
thread.start_new_thread(print_syscall_totals, (interval,))
pass
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if for_comm is not None:
if common_comm != for_comm:
return
try:
syscalls[id] += 1
except TypeError:
syscalls[id] = 1
def print_syscall_totals(interval):
while 1:
clear_term()
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
for id, val in sorted(syscalls.iteritems(), key = lambda(k, v): (v, k), \
reverse = True):
try:
print "%-40s %10d\n" % (syscall_name(id), val),
except TypeError:
pass
syscalls.clear()
time.sleep(interval)
| gpl-2.0 |
Beauhurst/django | tests/custom_pk/models.py | 106 | 1058 | """
Using a custom primary key
By default, Django adds an ``"id"`` field to each model. But you can override
this behavior by explicitly adding ``primary_key=True`` to a field.
"""
from django.db import models
from .fields import MyAutoField
class Employee(models.Model):
employee_code = models.IntegerField(primary_key=True, db_column='code')
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
class Meta:
ordering = ('last_name', 'first_name')
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
class Business(models.Model):
name = models.CharField(max_length=20, primary_key=True)
employees = models.ManyToManyField(Employee)
class Meta:
verbose_name_plural = 'businesses'
def __str__(self):
return self.name
class Bar(models.Model):
id = MyAutoField(primary_key=True, db_index=True)
def __str__(self):
return repr(self.pk)
class Foo(models.Model):
bar = models.ForeignKey(Bar, models.CASCADE)
| bsd-3-clause |
Sunsoo/ecogwiki | tests/test_schema.py | 2 | 27652 | # -*- coding: utf-8 -*-
import schema
import caching
import unittest2 as unittest
from tests import AppEngineTestCase
from models import SchemaDataIndex, PageOperationMixin, WikiPage
class LabelTest(AppEngineTestCase):
def setUp(self):
super(LabelTest, self).setUp()
def test_get_plural_label(self):
self.assertEqual(u'Creative Works', schema.get_schema('CreativeWork')['plural_label'])
self.assertEqual(u'Medical Entities', schema.get_schema('MedicalEntity')['plural_label'])
self.assertEqual(u'Local Businesses', schema.get_schema('LocalBusiness')['plural_label'])
self.assertEqual(u'Attorneys', schema.get_schema('Attorney')['plural_label'])
def test_get_custom_plural_label_for_irregular_noun(self):
self.assertEqual(u'People', schema.get_schema('Person')['plural_label'])
def test_get_property_label(self):
self.assertEqual(u'Author', schema.get_property('author')['label'])
self.assertEqual(u'Authored %s', schema.get_property('author')['reversed_label'])
def test_get_property_label_for_custom_reversed_form(self):
self.assertEqual(u'Date Published', schema.get_property('datePublished')['label'])
self.assertEqual(u'Published %s', schema.get_property('datePublished')['reversed_label'])
def test_incoming_links(self):
self.assertEqual(u'Related People', schema.humane_property('Person', 'relatedTo', True))
self.assertEqual(u'Children (People)', schema.humane_property('Person', 'parent', True))
class CustomTypeAndPropertyTest(AppEngineTestCase):
def setUp(self):
super(CustomTypeAndPropertyTest, self).setUp()
schema.SCHEMA_TO_LOAD.append({
"datatypes": {
},
"properties": {
"politicalParty": {
"comment": "Political party.",
"comment_plain": "Political party.",
"domains": [
"Thing"
],
"id": "politicalParty",
"label": "Political Party",
"reversed_label": "%s",
"ranges": [
"Text"
]
}
},
"types": {
"Politician": {
"ancestors": [
"Thing",
"Person"
],
"comment": "",
"comment_plain": "",
"id": "Politician",
"label": "Politician",
"specific_properties": [
"politicalParty"
],
"subtypes": [],
"supertypes": [
"Person"
],
"url": "http://www.ecogwiki.com/sp.schema/types/Politician"
}
}
})
self.person = schema.get_schema('Person')
self.politician = schema.get_schema('Politician')
def tearDown(self):
schema.SCHEMA_TO_LOAD = schema.SCHEMA_TO_LOAD[:-1]
super(CustomTypeAndPropertyTest, self).tearDown()
def test_inheritance_relationship(self):
self.assertTrue('Politician' in self.person['subtypes'])
self.assertTrue('Person' in self.politician['supertypes'])
self.assertTrue('Person' in self.politician['ancestors'])
def test_humane_labels(self):
self.assertEqual(u'Politician', schema.get_schema('Politician')['label'])
self.assertEqual(u'Politicians', schema.humane_property('Politician', 'politicalParty', True))
self.assertEqual(u'Political Party', schema.humane_property('Politician', 'politicalParty'))
def test_property_inheritance(self):
person = set(schema.get_schema('Person')['properties'])
politician = set(schema.get_schema('Politician')['properties'])
self.assertEqual(set(), person.difference(politician))
self.assertEqual({u'politicalParty'}, politician.difference(person))
class SimpleCustomTypeAndPropertyTest(AppEngineTestCase):
def setUp(self):
super(SimpleCustomTypeAndPropertyTest, self).setUp()
schema.SCHEMA_TO_LOAD.append({
"datatypes": {
"ISBN2": {
"comment": "ISBN 2",
},
},
"properties": {
"politicalParty": {
"comment": "A political party.",
}
},
"types": {
"Politician": {
"supertypes": ["Person"],
"specific_properties": ["politicalParty"],
"comment": "A political party.",
}
}
})
self.dtype = schema.get_datatype('ISBN2')
self.item = schema.get_schema('Politician')
self.prop = schema.get_property('politicalParty')
def tearDown(self):
schema.SCHEMA_TO_LOAD = schema.SCHEMA_TO_LOAD[:-1]
super(SimpleCustomTypeAndPropertyTest, self).tearDown()
def test_populate_omitted_item_fields(self):
self.assertEqual('/sp.schema/types/Politician', self.item['url'])
self.assertEqual(["Thing", "Person"], self.item['ancestors'])
self.assertEqual('Politician', self.item['id'])
self.assertEqual('A political party.', self.item['comment_plain'])
self.assertEqual([], self.item['subtypes'])
def test_populate_omitted_datatype_fields(self):
self.assertEqual('/sp.schema/datatypes/ISBN2', self.dtype['url'])
self.assertEqual(["Thing", "Person"], self.item['ancestors'])
self.assertEqual([], self.dtype['properties'])
self.assertEqual([], self.dtype['specific_properties'])
self.assertEqual(['DataType'], self.dtype['ancestors'])
self.assertEqual(['DataType'], self.dtype['supertypes'])
self.assertEqual([], self.dtype['subtypes'])
self.assertEqual('ISBN2', self.dtype['id'])
self.assertEqual('ISBN 2', self.dtype['comment_plain'])
def test_populate_omitted_property_fields(self):
self.assertEqual(["Thing"], self.prop['domains'])
self.assertEqual(["Text"], self.prop['ranges'])
self.assertEqual('A political party.', self.item['comment_plain'])
class EnumerationTest(AppEngineTestCase):
def setUp(self):
super(EnumerationTest, self).setUp()
schema.SCHEMA_TO_LOAD.append({
"types": {
"Student": {
"ancestors": ["Thing", "Person"],
"id": "Student",
"label": "Student",
"specific_properties": ["academicSeason"],
"subtypes": [],
"supertypes": ["Person"],
"url": "http://www.ecogwiki.com/sp.schema/types/Student",
}
},
"properties": {
"academicSeason": {
"label": "Academic Season",
"domains": ["Student"],
"ranges": ["Text"],
"enum": ["1-1", "1-2", "2-1", "2-2"]
}
},
})
def tearDown(self):
schema.SCHEMA_TO_LOAD = schema.SCHEMA_TO_LOAD[:-1]
super(EnumerationTest, self).tearDown()
def test_enum(self):
data = schema.SchemaConverter.convert(u'Student', {u'academicSeason': u'1-1'})['academicSeason']
self.assertEqual(schema.TextProperty, type(data))
self.assertEqual(u'1-1', data.render())
data = schema.SchemaConverter.convert(u'Student', {u'academicSeason': u'1-3'})['academicSeason']
self.assertEqual(schema.InvalidProperty, type(data))
class CustomCardinalityTest(AppEngineTestCase):
def setUp(self):
super(CustomCardinalityTest, self).setUp()
schema.SCHEMA_TO_LOAD.append({
"properties": {
"url": {
"cardinality": [1, 1]
}
},
"types": {
"Person": {
"cardinalities": {
"url": [0, 1]
}
}
}
})
def tearDown(self):
schema.SCHEMA_TO_LOAD = schema.SCHEMA_TO_LOAD[:-1]
super(CustomCardinalityTest, self).tearDown()
def test_props_gt_cardinality(self):
data = schema.SchemaConverter.convert(u'Person', {'url': ['http://x.com', 'http://y.com']})
self.assertEqual('http://x.com', data['url'].value)
def test_props_lt_cardinality(self):
self.assertRaises(ValueError, schema.SchemaConverter.convert, u'Thing', {})
def test_default_cardinality(self):
self.assertEqual([0, 0], schema.get_cardinality('Person', 'children'))
def test_prop_cardinality(self):
self.assertEqual([1, 1], schema.get_cardinality('Thing', 'url'))
def test_cardinality_in_item_should_override_prop_candinality(self):
self.assertEqual([0, 1], schema.get_cardinality('Person', 'url'))
class SchemaPathTest(unittest.TestCase):
def test_humane_itemtype(self):
self.assertEqual('Book', schema.humane_item('Book'))
self.assertEqual('Creative Work', schema.humane_item('CreativeWork'))
def test_humane_property(self):
self.assertEqual('Published Books',
schema.humane_property('Book', 'datePublished', True))
self.assertEqual('Date Published',
schema.humane_property('Book', 'datePublished'))
def test_itemtype_path(self):
self.assertEqual('Thing/',
schema.get_itemtype_path('Thing'))
self.assertEqual('Thing/CreativeWork/Article/',
schema.get_itemtype_path('Article'))
class SectionTest(unittest.TestCase):
def test_default_section(self):
data = PageOperationMixin.parse_sections(u'Hello')
self.assertEqual({'articleBody'}, set(data.keys()))
self.assertEqual(u'Hello', data['articleBody'])
def test_specifying_default_section(self):
data = PageOperationMixin.parse_sections(u'Hello', u'longText')
self.assertEqual({'longText'}, set(data.keys()))
self.assertEqual(u'Hello', data['longText'])
def test_additional_sections(self):
data = PageOperationMixin.parse_sections(u'Hello\n\nsection1::---\n\nHello\n\nthere\n\nsection2::---\n\nGood\n\nbye\n')
self.assertEqual({'articleBody', 'section1', 'section2'}, set(data.keys()))
self.assertEqual(u'Hello', data['articleBody'])
self.assertEqual(u'Hello\n\nthere', data['section1'])
self.assertEqual(u'Good\n\nbye', data['section2'])
class EmbeddedSchemaDataTest(unittest.TestCase):
def test_no_data(self):
data = PageOperationMixin.parse_data(u'Hello', u'Hello')
self.assertEqual(['articleBody', 'name', 'schema'], data.keys())
self.assertEqual(u'Hello', data['name'].pvalue)
self.assertEqual(u'Thing/CreativeWork/Article/', data['schema'].pvalue)
self.assertEqual(u'Hello', data['articleBody'].pvalue)
def test_author_and_isbn(self):
data = PageOperationMixin.parse_data(u'Hello', u'[[author::AK]]\n{{isbn::1234567890}}', u'Book')
self.assertEqual(u'AK', data['author'].pvalue)
self.assertEqual(u'1234567890', data['isbn'].pvalue)
def test_multiple_authors(self):
data = PageOperationMixin.parse_data(u'Hello', u'[[author::AK]] and [[author::TK]]', u'Book')
self.assertEqual([u'AK', u'TK'], [v.pvalue for v in data['author']])
class YamlSchemaDataTest(AppEngineTestCase):
def setUp(self):
super(YamlSchemaDataTest, self).setUp()
self.login('ak@gmail.com', 'ak')
def test_yaml(self):
page = self.update_page(u'.schema Book\n\n #!yaml/schema\n author: AK\n isbn: "1234567890"\n\nHello', u'Hello')
self.assertEqual({u'Book/author': [u'AK']}, page.outlinks)
data_items = dict((k, v.pvalue) for k, v in page.data.items())
del data_items['datePageModified']
self.assertEqual(
{'name': u'Hello', 'isbn': u'1234567890', 'schema': u'Thing/CreativeWork/Book/', 'author': u'AK', 'longDescription': u'Hello'},
data_items
)
def test_re_match(self):
body = u'''\t#!yaml/schema\n url: "http://anotherfam.kr/"\n\n\n[[\uc81c\uc791\ub450\ub808]]\ub97c ...\n'''
data = PageOperationMixin.parse_schema_yaml(body)
self.assertEqual(data['url'], 'http://anotherfam.kr/')
def test_list_value(self):
page = self.update_page(u'.schema Book\n\n #!yaml/schema\n author: [AK, TK]\n\nHello', u'Hello')
self.assertEqual({u'Book/author': [u'AK', u'TK']}, page.outlinks)
self.assertEqual([u'AK', u'TK'], [v.pvalue for v in page.data['author']])
def test_mix_with_embedded_data(self):
page = self.update_page(u'.schema Book\n\n #!yaml/schema\n author: [AK, TK]\n\n{{isbn::1234567890}}\n\n[[author::JK]]', u'Hello')
self.assertEqual({u'Book/author': [u'AK', u'JK', u'TK']}, page.outlinks)
self.assertEqual([u'AK', u'TK', u'JK'], [v.pvalue for v in page.data['author']])
self.assertEqual(u'1234567890', page.data['isbn'].pvalue)
self.assertEqual(u'Hello', page.data['name'].pvalue)
def test_no_duplications(self):
page = self.update_page(u'.schema Book\n\n #!yaml/schema\n author: [AK, TK]\n\n{{isbn::1234567890}}\n\n[[author::TK]]')
self.assertEqual({u'Book/author': [u'AK', u'TK']}, page.outlinks)
self.assertEqual([u'AK', u'TK'], [v.pvalue for v in page.data['author']])
def test_yaml_block_should_not_be_rendered(self):
page = self.update_page(u'.schema Book\n\n #!yaml/schema\n author: AK\n isbn: "1234567890"\n\nHello')
self.assertEqual(-1, page.rendered_body.find(u'#!yaml/schema'))
def test_tab_and_space_mixed(self):
body = u'\t#!yaml/schema\n alternateName: hi\n\turl: http://x.com\n name: "Hello"\n'
data = PageOperationMixin.parse_schema_yaml(body)
self.assertEqual(data['name'], u'Hello')
self.assertEqual(data['alternateName'], u'hi')
self.assertEqual(data['url'], u'http://x.com')
def test_yaml_indent_catching_only_space(self):
body = u'''\n\t#!yaml/schema\n url: "http://x.com"\n\nHello\n'''
matched = PageOperationMixin.re_yaml_schema.search(body).group(0)
self.assertTrue(matched.startswith('\t'))
def test_rawdata(self):
page = self.update_page(u'.schema Book\n\n #!yaml/schema\n author: [AK, TK]\n', u'Hello')
raw = page.rawdata
self.assertEqual(u'Hello', raw['name'])
self.assertEqual([u'AK', u'TK'], raw['author'])
class SchemaIndexTest(AppEngineTestCase):
def setUp(self):
super(SchemaIndexTest, self).setUp()
self.login('ak@gmail.com', 'ak')
def test_create(self):
self.update_page(u'.schema Book\n[[author::AK]]\n{{isbn::1234567890}}\n[[datePublished::2013]]', u'Hello')
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'author', u'AK'))
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'isbn', u'1234567890'))
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'datePublished', u'2013'))
def test_update(self):
self.update_page(u'.schema Book\n[[author::AK]]\n{{isbn::1234567890}}\n[[datePublished::2013]]', u'Hello')
self.update_page(u'.schema Book\n[[author::AK]]\n{{isbn::1234567899}}\n[[dateModified::2013]]', u'Hello')
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'author', u'AK'))
self.assertFalse(SchemaDataIndex.has_match(u'Hello', u'isbn', u'1234567890'))
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'isbn', u'1234567899'))
self.assertFalse(SchemaDataIndex.has_match(u'Hello', u'datePublished', u'2013'))
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'dateModified', u'2013'))
def test_rebuild(self):
page = self.update_page(u'.schema Book\n[[author::AK]]\n{{isbn::1234567890}}\n[[datePublished::2013]]', u'Hello')
SchemaDataIndex.rebuild_index(page.title, page.data)
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'author', u'AK'))
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'isbn', u'1234567890'))
self.assertTrue(SchemaDataIndex.has_match(u'Hello', u'datePublished', u'2013'))
def test_should_not_index_for_longtext(self):
self.update_page(u'longDescription::---\n\nHello there', u'Hello')
self.assertFalse(SchemaDataIndex.has_match(u'Hello', u'longDescription', u'Hello there'))
class TypeConversionTest(unittest.TestCase):
def test_unknown_itemtype(self):
self.assertRaises(ValueError, schema.SchemaConverter.convert, u'UnknownSchema', {})
def test_invalid_property(self):
data = schema.SchemaConverter.convert(u'Book', {u'unknownProp': u'Hello'})['unknownProp']
self.assertEqual(schema.InvalidProperty, type(data))
def test_year_only_date(self):
data = schema.SchemaConverter.convert(u'Person', {u'birthDate': u'1979 BCE'})
self.assertEqual(1979, data['birthDate'].year)
self.assertTrue(data['birthDate'].bce)
self.assertIsNone(data['birthDate'].month)
self.assertIsNone(data['birthDate'].day)
self.assertTrue(data['birthDate'].is_year_only())
self.assertEqual(u'<time datetime="1979 BCE"><a class="wikipage" href="/1979_BCE">1979</a><span> BCE</span></time>', data['birthDate'].render())
def test_date(self):
data = schema.SchemaConverter.convert(u'Person', {u'birthDate': u'300-05-15 BCE'})
self.assertEqual(u'300-05-15 BCE', data['birthDate'].pvalue)
self.assertEqual(300, data['birthDate'].year)
self.assertTrue(data['birthDate'].bce)
self.assertEqual(5, data['birthDate'].month)
self.assertEqual(15, data['birthDate'].day)
self.assertFalse(data['birthDate'].is_year_only())
self.assertEqual(u'<time datetime="300-05-15 BCE"><a class="wikipage" href="/300_BCE">300</a><span>-</span><a class="wikipage" href="/May_15">05-15</a><span> BCE</span></time>', data['birthDate'].render())
def test_partial_date(self):
data = schema.SchemaConverter.convert(u'Person', {u'birthDate': u'1979'})['birthDate']
self.assertEqual(1979, data.year)
self.assertTrue(data.is_year_only())
self.assertEqual(u'<time datetime="1979"><a class="wikipage" href="/1979">1979</a></time>', data.render())
data = schema.SchemaConverter.convert(u'Person', {u'birthDate': u'1979-03-??'})['birthDate']
self.assertEqual(1979, data.year)
self.assertEqual(3, data.month)
self.assertEqual(1, data.day)
self.assertFalse(data.is_year_only())
self.assertEqual(u'<time datetime="1979-03-??"><a class="wikipage" href="/1979">1979</a><span>-</span><a class="wikipage" href="/March">03-??</a></time>', data.render())
data = schema.SchemaConverter.convert(u'Person', {u'birthDate': u'1979-??-??'})['birthDate']
self.assertEqual(1979, data.year)
self.assertEqual(u'<time datetime="1979-??-??"><a class="wikipage" href="/1979">1979</a><span>-</span><span>??-??</span></time>', data.render())
def test_invalid_date(self):
data = schema.SchemaConverter.convert(u'Person', {u'birthDate': u'Ten years ago'})['birthDate']
self.assertEqual(schema.InvalidProperty, type(data))
self.assertEqual(u'Ten years ago', data.pvalue)
self.assertEqual(u'<span class="error">Ten years ago</span>', data.render())
data = schema.SchemaConverter.convert(u'Person', {u'birthDate': u'1979-13-05'})['birthDate']
self.assertEqual(schema.InvalidProperty, type(data))
self.assertEqual(u'1979-13-05', data.pvalue)
self.assertEqual(u'<span class="error">1979-13-05</span>', data.render())
data = schema.SchemaConverter.convert(u'Person', {u'birthDate': u'1979-05-40'})['birthDate']
self.assertEqual(schema.InvalidProperty, type(data))
self.assertEqual(u'1979-05-40', data.pvalue)
self.assertEqual(u'<span class="error">1979-05-40</span>', data.render())
def test_boolean(self):
for l in [u'1', u'true', u'TRUE', u'yes', u'YES']:
data = schema.SchemaConverter.convert(u'Article', {u'isFamilyFriendly': l})
self.assertTrue(data['isFamilyFriendly'].value)
for l in [u'0', u'false', u'FALSE', u'no', u'NO']:
data = schema.SchemaConverter.convert(u'Article', {u'isFamilyFriendly': l})
self.assertFalse(data['isFamilyFriendly'].value)
def test_text(self):
data = schema.SchemaConverter.convert(u'Person', {u'jobTitle': u'Visualization engineer'})
self.assertEqual(u'Visualization engineer', data['jobTitle'].value)
def test_name_text(self):
data = schema.SchemaConverter.convert(u'Person', {u'name': u'AK'})
self.assertEqual(True, data['name'].is_wikilink())
self.assertEqual(u'<a class="wikipage" href="/AK">AK</a>', data['name'].render())
def test_integer(self):
data = schema.SchemaConverter.convert(u'SoftwareApplication', {u'fileSize': u'12345'})
self.assertEqual(12345, data['fileSize'].value)
def test_invalid_integer(self):
data = schema.SchemaConverter.convert(u'SoftwareApplication', {u'fileSize': u'1234.5'})['fileSize']
self.assertEqual(schema.InvalidProperty, type(data))
data = schema.SchemaConverter.convert(u'SoftwareApplication', {u'fileSize': u'Very small'})['fileSize']
self.assertEqual(schema.InvalidProperty, type(data))
def test_number(self):
data = schema.SchemaConverter.convert(u'JobPosting', {u'baseSalary': u'1234.5'})
self.assertEqual(1234.5, data['baseSalary'].value)
self.assertEqual(float, type(data['baseSalary'].value))
data = schema.SchemaConverter.convert(u'JobPosting', {u'baseSalary': u'12345'})
self.assertEqual(int, type(data['baseSalary'].value))
def test_url(self):
data = schema.SchemaConverter.convert(u'Code', {u'codeRepository': u'http://x.com/path/y.jsp?q=2&q2=2'})
self.assertEqual('http://x.com/path/y.jsp?q=2&q2=2', data['codeRepository'].value)
def test_invalid_url(self):
data = schema.SchemaConverter.convert(u'Code', {u'codeRepository': u'See http://github.org'})['codeRepository']
self.assertEqual(schema.InvalidProperty, type(data))
def test_embeddable_url(self):
data = schema.SchemaConverter.convert(u'Thing', {u'image': u'http://x.com/a.png'})
self.assertEqual(u'http://x.com/a.png', data['image'].value)
self.assertEqual(schema.EmbeddableURLProperty, type(data['image']))
def test_thing(self):
data = schema.SchemaConverter.convert(u'Code', {u'programmingLanguage': u'JavaScript'})
self.assertEqual('JavaScript', data['programmingLanguage'].value)
def test_isbn(self):
data = schema.SchemaConverter.convert(u'Book', {u'isbn': u'1234512345'})
self.assertEqual('1234512345', data['isbn'].value)
self.assertEqual(u'<a href="http://www.amazon.com/gp/product/1234512345" class="isbn" itemprop="isbn">1234512345</a>',
data['isbn'].render())
def test_isbn_kr(self):
data = schema.SchemaConverter.convert(u'Book', {u'isbn': u'8912345123'})
self.assertEqual('8912345123', data['isbn'].value)
self.assertEqual(u'<a href="http://www.aladin.co.kr/shop/wproduct.aspx?ISBN=9788912345123" class="isbn" itemprop="isbn">8912345123</a>',
data['isbn'].render())
data = schema.SchemaConverter.convert(u'Book', {u'isbn': u'9788912345123'})
self.assertEqual('9788912345123', data['isbn'].value)
self.assertEqual(u'<a href="http://www.aladin.co.kr/shop/wproduct.aspx?ISBN=9788912345123" class="isbn" itemprop="isbn">9788912345123</a>',
data['isbn'].render())
def test_list_value(self):
data = schema.SchemaConverter.convert(u'Book', {u'author': [u'AK', u'CK']})
self.assertEqual(list, type(data['author']))
self.assertEqual(2, len(data['author']))
self.assertEqual(u'AK', data['author'][0].value)
self.assertEqual(u'CK', data['author'][1].value)
class ConversionPriorityTest(unittest.TestCase):
def test_try_url_first_then_text(self):
prop = schema.SchemaConverter.convert(u'SoftwareApplication', {u'featureList': u'http://x.com'})['featureList']
self.assertEqual(schema.URLProperty, type(prop))
prop = schema.SchemaConverter.convert(u'SoftwareApplication', {u'featureList': u'See http://x.com'})['featureList']
self.assertEqual(schema.TextProperty, type(prop))
class SchemaChangeTest(AppEngineTestCase):
def setUp(self):
super(SchemaChangeTest, self).setUp()
self.login('ak@gmail.com', 'ak')
def tearDown(self):
schema.SCHEMA_TO_LOAD = schema.SCHEMA_TO_LOAD[:-1]
super(SchemaChangeTest, self).tearDown()
def test_change_schema_after_writing_and_try_to_read(self):
self.update_page(u'Hello there?', u'Hello')
caching.flush_all()
schema.SCHEMA_TO_LOAD.append({
"properties": {
"author": {
"cardinality": [1, 1]
}
}
})
page = WikiPage.get_by_title(u'Hello')
page.rendered_body
def test_change_schema_after_writing_and_try_to_update(self):
self.update_page(u'Hello there?', u'Hello')
caching.flush_all()
schema.SCHEMA_TO_LOAD.append({
"properties": {
"author": {
"cardinality": [1, 1]
}
}
})
self.update_page(u'.schema Book\n\n #!yaml/schema\n author: "Alan Kang"\n\nHello there?\n', u'Hello')
class MiscTest(AppEngineTestCase):
def setUp(self):
super(MiscTest, self).setUp()
def test_should_not_allow_legacy_spells(self):
self.assertRaises(KeyError, schema.get_property, 'contactPoints')
self.assertTrue('awards' not in schema.get_schema('Person')['properties'])
def test_get_datatype(self):
self.assertEqual('Boolean', schema.get_datatype('Boolean')['label'])
def test_get_custom_datatype(self):
isbn = schema.get_datatype('ISBN')
self.assertEqual(['DataType'], isbn['ancestors'])
def test_get_itemtypes(self):
itemtypes = schema.get_itemtypes()
self.assertEqual(list, type(itemtypes))
self.assertEqual(('APIReference', 'API Reference'), itemtypes[0])
self.assertEqual(('Zoo', 'Zoo'), itemtypes[-1])
def test_properties_should_contain_all_specific_properties(self):
for t, _ in schema.get_itemtypes():
item = schema.get_schema(t)
self.assertEqual(set(), set(item['specific_properties']).difference(item['properties']))
def test_properties_order_should_follow_that_of_source(self):
article = schema.get_schema('Article')
self.assertEqual('about', article['properties'][0])
self.assertEqual('wordCount', article['properties'][-1])
def test_self_contained_schema(self):
s = schema.get_schema('Person', True)
url = s['properties']['url']
self.assertEqual(dict, type(url))
self.assertEqual([0, 0], url['cardinality'])
self.assertEqual(['URL'], url['type']['ranges'])
| gpl-3.0 |
denverfoundation/storybase | apps/storybase/api/authorization.py | 1 | 6464 | from django.db.models import Q
from tastypie.authorization import Authorization
from tastypie.exceptions import Unauthorized
class UserAuthorization(object):
def user_valid(self, bundle):
"""
Is the user associated with the request valid?
A valid user must exist, be authenticated and be active.
"""
if (not hasattr(bundle.request, 'user') or
not bundle.request.user.is_authenticated or
not bundle.request.user.is_active):
raise Unauthorized("You are not allowed to access that resource.")
else:
return True
class LoggedInAuthorization(Authorization, UserAuthorization):
"""Custom authorization that checks Django authentication"""
def filter_by_perms(self, object_list, bundle, perms):
"""
Filter a list of objects to only the items for which a request's user
has a particular set of permissions.
"""
# TODO: Use custom queryset methods to do this at the database
# level instead of iterating through the list and checking the
# permissions on each item
filtered = []
if self.user_valid(bundle):
for obj in object_list:
if (hasattr(obj, 'has_perms') and
obj.has_perms(bundle.request.user, perms)):
filtered.append(obj)
return filtered
def obj_has_perms(self, obj, user, perms):
"""
Use an object's ``has_perms`` method to check whether the request's
user has particular permissions for the object.
Returns either ``True`` if the user has the requested permissions for
the object in question or throw ``Unauthorized`` if they do not.
Unlike ``has_perms``, this method allows explicitely specify the
object and user instead of taking them from the bundle.
This is useful in cases where you want to check permissions against
some other object, often in nested resources.
"""
if (hasattr(obj, 'has_perms') and
obj.has_perms(user, perms)):
return True
raise Unauthorized("You are not allowed to access that resource.")
def has_perms(self, object_list, bundle, perms):
"""
Use an object's ``has_perms`` method to check whether the request's
user has particular permissions for the object.
Returns either ``True`` if the user has the requested permissions for
the object in question or throw ``Unauthorized`` if they do not.
"""
if self.user_valid(bundle):
return self.obj_has_perms(bundle.obj, bundle.request.user, perms)
raise Unauthorized("You are not allowed to access that resource.")
def create_detail(self, object_list, bundle):
"""
Returns either ``True`` if the user is allowed to create the object in
question or throw ``Unauthorized`` if they are not.
Returns ``True`` if the user is authenticated and active.
"""
return self.user_valid(bundle)
def update_list(self, object_list, bundle):
"""
Returns a list of all the objects a user is allowed to update.
Should return an empty list if none are allowed.
Delegates to each object in the list's ``has_perms`` method to check
permissions for the request's user.
"""
return self.filter_by_perms(object_list, bundle, ['change'])
def update_detail(self, object_list, bundle):
"""
Returns either ``True`` if the user is allowed to update the object in
question or throw ``Unauthorized`` if they are not.
Delegates to the object's ``has_perms`` method to check permissions
for the request's user.
"""
return self.has_perms(object_list, bundle, ['change'])
def delete_list(self, object_list, bundle):
"""
Returns a list of all the objects a user is allowed to delete.
Should return an empty list if none are allowed.
Delegates to each object in the list's ``has_perms`` method to check
permissions for the request's user.
"""
return self.filter_by_perms(object_list, bundle, ['delete'])
def delete_detail(self, object_list, bundle):
"""
Returns either ``True`` if the user is allowed to delete the object in
question or throw ``Unauthorized`` if they are not.
Delegates to the object's ``has_perms`` method to check permissions
for the request's user.
"""
return self.has_perms(object_list, bundle, ['change'])
class PublishedOwnerAuthorization(LoggedInAuthorization):
"""Authorization for models with a publication status and owner user"""
# This can be overridden is subclasses if the owner field is something
# other than ``owner``, e.g. ``author``
owner_field = 'owner'
# TODO: Move these filtering operations to the model/manager/queryset
def read_list(self, object_list, bundle):
# All users can see published items
q = Q(status='published')
if (hasattr(bundle.request, 'user') and
bundle.request.user.is_authenticated()):
if bundle.request.user.is_superuser:
# If the user is a superuser, don't restrict the list at all
return object_list
else:
# If the user is logged in, show their unpublished stories as
# well
q_args = {
self.owner_field: bundle.request.user
}
q = q | Q(**q_args)
return object_list.filter(q)
def update_list(self, object_list, bundle):
if not (hasattr(bundle.request, 'user') and
bundle.request.user.is_authenticated()):
# Unauthenticated users shouldn't be able to update anything
return []
if bundle.request.is_superuser:
# If the user is a superuser, don't restrict the list at all
return object_list
else:
# For authenticated users, restrict the list to objects that
# the user owns
filter_args = {
self.owner_field: bundle.request.user
}
return object_list.filter(**filter_args)
def delete_list(self, object_list, bundle):
return self.update_list(object_list, bundle)
| mit |
nimbis/django-cms | cms/cache/placeholder.py | 6 | 6421 | # -*- coding: utf-8 -*-
"""
This module manages placeholder caching. We use a cache-versioning strategy
in which each (placeholder x lang x site_id) manages its own version. The
actual cache includes additional keys appropriate for the placeholders
get_vary_cache_on().
Invalidation of a placeholder's cache simply increments the version number for
the (placeholder x lang) pair, which renders any cache entries for that
placeholder under that version inaccessible. Those cache entries will simply
expire and will be purged according to the policy of the cache backend in-use.
The cache entries themselves may include additional sub-keys, according to the
list of VARY header-names as returned by placeholder.get_vary_cache_on() and
the current HTTPRequest object.
The vary-on header-names are also stored with the version. This enables us to
check for cache hits without re-computing placeholder.get_vary_cache_on().
"""
import hashlib
import time
from django.utils.timezone import now
from cms.utils import get_cms_setting
from cms.utils.helpers import get_header_name, get_timezone_name
def _get_placeholder_cache_version_key(placeholder, lang, site_id):
"""
Returns the version key for the given «placeholder», «lang» and «site_id».
Invalidating this (via clear_placeholder_cache by replacing the stored
value with a new value) will effectively make all "sub-caches" relating to
this (placeholder x lang) inaccessible. Sub-caches include caches per TZ
and per VARY header.
"""
prefix = get_cms_setting('CACHE_PREFIX')
key = '{prefix}|placeholder_cache_version|id:{id}|lang:{lang}|site:{site}'.format(
prefix=prefix,
id=placeholder.pk,
lang=str(lang),
site=site_id,
)
if len(key) > 250:
key = '{prefix}|{hash}'.format(
prefix=prefix,
hash=hashlib.md5(key.encode('utf-8')).hexdigest(),
)
return key
def _get_placeholder_cache_version(placeholder, lang, site_id):
"""
Gets the (placeholder x lang)'s current version and vary-on header-names
list, if present, otherwise resets to («timestamp», []).
"""
from django.core.cache import cache
key = _get_placeholder_cache_version_key(placeholder, lang, site_id)
cached = cache.get(key)
if cached:
version, vary_on_list = cached
else:
version = int(time.time() * 1000000)
vary_on_list = []
_set_placeholder_cache_version(placeholder, lang, site_id, version, vary_on_list)
return version, vary_on_list
def _set_placeholder_cache_version(placeholder, lang, site_id, version, vary_on_list=None, duration=None):
"""
Sets the (placeholder x lang)'s version and vary-on header-names list.
"""
from django.core.cache import cache
key = _get_placeholder_cache_version_key(placeholder, lang, site_id)
if not version or version < 1:
version = int(time.time() * 1000000)
if vary_on_list is None:
vary_on_list = []
cache.set(key, (version, vary_on_list), duration)
def _get_placeholder_cache_key(placeholder, lang, site_id, request, soft=False):
"""
Returns the fully-addressed cache key for the given placeholder and
the request.
The kwarg «soft» should be set to True if getting the cache key to then
read from the cache. If instead the key retrieval is to support a cache
write, let «soft» be False.
"""
prefix = get_cms_setting('CACHE_PREFIX')
version, vary_on_list = _get_placeholder_cache_version(placeholder, lang, site_id)
main_key = '{prefix}|render_placeholder|id:{id}|lang:{lang}|site:{site}|tz:{tz}|v:{version}'.format(
prefix=prefix,
id=placeholder.pk,
lang=lang,
site=site_id,
tz=get_timezone_name(),
version=version,
)
if not soft:
# We are about to write to the cache, so we want to get the latest
# vary_cache_on headers and the correct cache expiration, ignoring any
# we already have. If the placeholder has already been rendered, this
# will be very efficient (zero-additional queries) due to the caching
# of all its plugins during the rendering process anyway.
vary_on_list = placeholder.get_vary_cache_on(request)
duration = placeholder.get_cache_expiration(request, now())
# Update the main placeholder cache version
_set_placeholder_cache_version(
placeholder, lang, site_id, version, vary_on_list, duration)
sub_key_list = []
for key in vary_on_list:
value = request.META.get(get_header_name(key)) or '_'
sub_key_list.append(key + ':' + value)
cache_key = main_key
if sub_key_list:
cache_key += '|' + '|'.join(sub_key_list)
if len(cache_key) > 250:
cache_key = '{prefix}|{hash}'.format(
prefix=prefix,
hash=hashlib.md5(cache_key.encode('utf-8')).hexdigest(),
)
return cache_key
def set_placeholder_cache(placeholder, lang, site_id, content, request):
"""
Sets the (correct) placeholder cache with the rendered placeholder.
"""
from django.core.cache import cache
key = _get_placeholder_cache_key(placeholder, lang, site_id, request)
duration = min(
get_cms_setting('CACHE_DURATIONS')['content'],
placeholder.get_cache_expiration(request, now())
)
cache.set(key, content, duration)
# "touch" the cache-version, so that it stays as fresh as this content.
version, vary_on_list = _get_placeholder_cache_version(placeholder, lang, site_id)
_set_placeholder_cache_version(
placeholder, lang, site_id, version, vary_on_list, duration=duration)
def get_placeholder_cache(placeholder, lang, site_id, request):
"""
Returns the placeholder from cache respecting the placeholder's
VARY headers.
"""
from django.core.cache import cache
key = _get_placeholder_cache_key(placeholder, lang, site_id, request, soft=True)
content = cache.get(key)
return content
def clear_placeholder_cache(placeholder, lang, site_id):
"""
Invalidates all existing cache entries for (placeholder x lang x site_id).
We don't need to re-store the vary_on_list, because the cache is now
effectively empty.
"""
version = int(time.time() * 1000000)
_set_placeholder_cache_version(placeholder, lang, site_id, version, [])
| bsd-3-clause |
arafsheikh/coala | coalib/tests/results/result_actions/ShowPatchActionTest.py | 16 | 3307 | import unittest
from coalib.misc.ContextManagers import retrieve_stdout
from coalib.results.Diff import Diff
from coalib.results.Result import Result
from coalib.results.result_actions.ShowPatchAction import ShowPatchAction
from coalib.settings.Section import Section, Setting
class ShowPatchActionTest(unittest.TestCase):
def setUp(self):
self.uut = ShowPatchAction()
self.file_dict = {"a": ["a\n", "b\n", "c\n"], "b": ["old_first\n"]}
self.diff_dict = {"a": Diff(self.file_dict['a']),
"b": Diff(self.file_dict['b'])}
self.diff_dict["a"].add_lines(1, ["test\n"])
self.diff_dict["a"].delete_line(3)
self.diff_dict["b"].add_lines(0, ["first\n"])
self.test_result = Result("origin", "message", diffs=self.diff_dict)
self.section = Section("name")
self.section.append(Setting("colored", "false"))
def test_is_applicable(self):
self.assertFalse(self.uut.is_applicable(1, None, None))
self.assertFalse(self.uut.is_applicable(Result("o", "m"), None, None))
self.assertTrue(self.uut.is_applicable(self.test_result, {}, {}))
self.assertFalse(self.uut.is_applicable(self.test_result, {},
self.diff_dict))
def test_apply(self):
with retrieve_stdout() as stdout:
self.assertEqual(self.uut.apply_from_section(self.test_result,
self.file_dict,
{},
self.section),
{})
self.assertEqual(stdout.getvalue(),
"|----| | a\n"
"| |++++| a\n"
"| 1| 1| a\n"
"| | 2|+test\n"
"| 2| 3| b\n"
"| 3| |-c\n"
"|----| | b\n"
"| |++++| b\n"
"| | 1|+first\n"
"| 1| 2| old_first\n")
def test_apply_with_previous_patches(self):
with retrieve_stdout() as stdout:
previous_diffs = {"a": Diff(self.file_dict['a'])}
previous_diffs["a"].change_line(2, "b\n", "b_changed\n")
self.assertEqual(self.uut.apply_from_section(self.test_result,
self.file_dict,
previous_diffs,
self.section),
previous_diffs)
self.assertEqual(stdout.getvalue(),
"|----| | a\n"
"| |++++| a\n"
"| 1| 1| a\n"
"| | 2|+test\n"
"| 2| 3| b_changed\n"
"| 3| |-c\n"
"|----| | b\n"
"| |++++| b\n"
"| | 1|+first\n"
"| 1| 2| old_first\n")
| agpl-3.0 |
jasonkying/pip | pip/_vendor/requests/packages/chardet/charsetprober.py | 3127 | 1902 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
import re
class CharSetProber:
def __init__(self):
pass
def reset(self):
self._mState = constants.eDetecting
def get_charset_name(self):
return None
def feed(self, aBuf):
pass
def get_state(self):
return self._mState
def get_confidence(self):
return 0.0
def filter_high_bit_only(self, aBuf):
aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf)
return aBuf
def filter_without_english_letters(self, aBuf):
aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf)
return aBuf
def filter_with_english_letters(self, aBuf):
# TODO
return aBuf
| mit |
repotvsupertuga/repo | plugin.video.zen/resources/lib/indexers/movies.py | 1 | 54963 | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,re,json,urllib,urlparse,base64,datetime
import unicodedata
try: action = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))['action']
except: action = None
from resources.lib.modules import trakt
from resources.lib.modules import control
from resources.lib.modules import client
from resources.lib.modules import cache
from resources.lib.modules import metacache
from resources.lib.modules import playcount
from resources.lib.modules import workers
from resources.lib.modules import views
from resources.lib.modules import favourites
class movies:
def __init__(self):
self.list = []
self.tmdb_link = 'http://api.themoviedb.org'
self.trakt_link = 'http://api-v2launch.trakt.tv'
self.imdb_link = 'http://www.imdb.com'
self.tmdb_key = control.setting('tmdb_apikey')
if self.tmdb_key == '' or self.tmdb_key == None: self.tmdb_key = base64.b64decode('ZjdmNTE3NzU4NzdlMGJiNjcwMzUyMDk1MmIzYzc4NDA=')
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.trakt_user = re.sub('[^a-z0-9]', '-', control.setting('trakt.user').strip().lower())
self.imdb_user = control.setting('imdb.user').replace('ur', '')
self.tmdb_lang = 'en'
self.today_date = (self.datetime).strftime('%Y-%m-%d')
self.month_date = (self.datetime - datetime.timedelta(days = 30)).strftime('%Y-%m-%d')
self.year_date = (self.datetime - datetime.timedelta(days = 365)).strftime('%Y-%m-%d')
self.tmdb_info_link = 'http://api.themoviedb.org/3/movie/%s?api_key=%s&language=%s&append_to_response=credits,releases,external_ids' % ('%s', self.tmdb_key, self.tmdb_lang)
self.imdb_by_query = 'http://www.omdbapi.com/?t=%s&y=%s'
self.imdbinfo = 'http://www.omdbapi.com/?i=%s&plot=short&r=json'
self.tmdbmovielist1_link = control.setting('tmdb.movielist_id1')
self.tmdbmovielist2_link = control.setting('tmdb.movielist_id2')
self.tmdbmovielist3_link = control.setting('tmdb.movielist_id3')
self.tmdbmovielist4_link = control.setting('tmdb.movielist_id4')
self.tmdbmovielist5_link = control.setting('tmdb.movielist_id5')
self.tmdbmovielist6_link = control.setting('tmdb.movielist_id6')
self.tmdbmovielist7_link = control.setting('tmdb.movielist_id7')
self.tmdbmovielist8_link = control.setting('tmdb.movielist_id8')
self.tmdbmovielist9_link = control.setting('tmdb.movielist_id9')
self.tmdbmovielist10_link = control.setting('tmdb.movielist_id10')
self.tmdb_image = 'http://image.tmdb.org/t/p/original'
self.tmdb_poster = 'http://image.tmdb.org/t/p/w500'
self.persons_link = 'http://api.themoviedb.org/3/search/person?&api_key=%s&query=%s&include_adult=false&page=1' % (self.tmdb_key, '%s')
self.personlist_link = 'http://api.themoviedb.org/3/person/popular?&api_key=%s&page=%s' % (self.tmdb_key, '%s')
self.genres_link = 'http://api.themoviedb.org/3/genre/movie/list?api_key=%s&language=%s' % (self.tmdb_key, self.tmdb_lang)
self.certifications_link = 'http://api.themoviedb.org/3/certification/movie/list?&api_key=%s' % self.tmdb_key
self.popular2_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&languages=en&num_votes=1000,&production_status=released&groups=top_1000&sort=moviemeter,asc&count=40&start=1'
self.search_link = 'http://api.themoviedb.org/3/search/movie?&api_key=%s&query=%s'
self.popular_link = 'http://api.themoviedb.org/3/movie/popular?api_key=%s&page=1'
self.views_link = 'http://api.themoviedb.org/3/movie/top_rated?&language=it&api_key=%s&page=1'
self.featured_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&primary_release_date.gte=date[365]&primary_release_date.lte=date[60]&page=1'
self.person_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&with_people=%s&primary_release_date.lte=date[0]&sort_by=primary_release_date.desc&page=1'
self.genre_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&with_genres=%s&primary_release_date.gte=date[365]&primary_release_date.lte=date[0]&page=1'
self.certification_link = 'http://api.themoviedb.org/3/discover/movie?&api_key=%s&certification=%s&certification_country=US&primary_release_date.lte=date[0]&page=1'
self.year_link = 'http://api.themoviedb.org/3/discover/movie?&api_key=%s&year=%s&primary_release_date.lte=date[0]&page=1'
self.theaters_link = 'http://api.themoviedb.org/3/movie/now_playing?api_key=%s&page=1'
self.boxoffice_link = 'http://www.imdb.com/search/title?&title_type=feature,tv_movie&sort=boxoffice_gross_us,desc&count=40&start=1'
self.oscars_link = 'http://www.imdb.com/search/title?title_type=feature,tv_movie&groups=oscar_best_picture_winners&sort=year,desc&count=40&start=1'
self.trending_link = 'http://api-v2launch.trakt.tv/movies/trending?&limit=40&page=1'
self.premiere_link = 'http://api.themoviedb.org/3/discover/movie?api_key=%s&first_air_date.gte=%s&first_air_date.lte=%s&page=1' % ('%s', self.year_date, self.today_date)
self.tmdbmarvel_link = 'http://api.themoviedb.org/3/list/11332?api_key=%s' % (self.tmdb_key)
self.tmdboscars_link = 'http://api.themoviedb.org/3/list/11334?api_key=%s' % (self.tmdb_key)
self.tmdbdisney_link = 'http://api.themoviedb.org/3/list/11338?api_key=%s' % (self.tmdb_key)
self.mycustomlist1_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist1_link, self.tmdb_key)
self.mycustomlist2_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist2_link, self.tmdb_key)
self.mycustomlist3_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist3_link, self.tmdb_key)
self.mycustomlist4_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist4_link, self.tmdb_key)
self.mycustomlist5_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist5_link, self.tmdb_key)
self.mycustomlist6_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist6_link, self.tmdb_key)
self.mycustomlist7_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist7_link, self.tmdb_key)
self.mycustomlist8_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist8_link, self.tmdb_key)
self.mycustomlist9_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist9_link, self.tmdb_key)
self.mycustomlist10_link = 'http://api.themoviedb.org/3/list/%s?api_key=%s' % (self.tmdbmovielist10_link, self.tmdb_key)
# self.traktlists_link = 'http://api-v2launch.trakt.tv/users/%s/lists' % self.trakt_user
# self.traktlikedlists_link = 'http://api-v2launch.trakt.tv/users/likes/lists?limit=1000000'
# self.traktlist_link = 'http://api-v2launch.trakt.tv/users/%s/lists/%s/items'
# self.traktcollection_link = 'http://api-v2launch.trakt.tv/users/%s/collection/movies' % self.trakt_user
# self.traktwatchlist_link = 'http://api-v2launch.trakt.tv/users/%s/watchlist/movies' % self.trakt_user
# self.traktfeatured_link = 'http://api-v2launch.trakt.tv/recommendations/movies?limit=40'
# self.trakthistory_link = 'http://api-v2launch.trakt.tv/users/%s/history/movies?limit=40&page=1' % self.trakt_user
# self.imdblists_link = 'http://www.imdb.com/user/ur%s/lists?tab=all&sort=modified:desc&filter=titles' % self.imdb_user
# self.imdblist_link = 'http://www.imdb.com/list/%s/?view=detail&sort=title:asc&title_type=feature,short,tv_movie,tv_special,video,documentary,game&start=1'
# self.imdbwatchlist_link = 'http://www.imdb.com/user/ur%s/watchlist' % self.imdb_user
def get(self, url, idx=True):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if u in self.tmdb_link and ('/user/' in url or '/list/' in url):
self.list = self.tmdb_custom_list(url)
self.worker()
elif u in self.tmdb_link and not ('/user/' in url or '/list/' in url):
self.list = cache.get(self.tmdb_list, 24, url)
print ("LISTS TMDB", self.list)
self.worker()
elif u in self.trakt_link and '/users/' in url:
try:
if url == self.trakthistory_link: raise Exception()
if not '/%s/' % self.trakt_user in url: raise Exception()
if trakt.getActivity() > cache.timeout(self.trakt_list, url): raise Exception()
self.list = cache.get(self.trakt_list, 720, url)
except:
self.list = cache.get(self.trakt_list, 0, url)
if '/%s/' % self.trakt_user in url:
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['title'].lower()))
if idx == True: self.worker()
elif u in self.trakt_link:
self.list = cache.get(self.trakt_list, 24, url)
if idx == True: self.worker()
elif u in self.imdb_link and ('/user/' in url or '/list/' in url):
self.list = cache.get(self.imdb_list, 0, url)
if idx == True: self.worker()
elif u in self.imdb_link:
self.list = cache.get(self.imdb_list, 24, url)
if idx == True: self.worker()
if idx == True: self.movieDirectory(self.list)
return self.list
except:
pass
def widget(self):
setting = control.setting('movie.widget')
if setting == '1':
self.get(self.premiere_link)
if setting == '2':
self.get(self.trending_link)
elif setting == '3':
self.get(self.popular_link)
elif setting == '4':
self.get(self.theaters_link)
elif setting == '5':
self.get(self.views_link)
else:
self.get(self.featured_link)
def search(self, query=None):
try:
if not control.infoLabel('ListItem.Title') == '':
self.query = control.window.getProperty('%s.movie.search' % control.addonInfo('id'))
elif query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
control.window.setProperty('%s.movie.search' % control.addonInfo('id'), self.query)
url = self.search_link % ('%s', urllib.quote_plus(self.query))
self.list = cache.get(self.tmdb_list, 0, url)
self.worker()
self.movieDirectory(self.list)
return self.list
except:
return
def person(self, query=None):
try:
if query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
url = self.persons_link % urllib.quote_plus(self.query)
self.list = cache.get(self.tmdb_person_list, 0, url)
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def genres(self):
try:
url = self.genres_link
url = re.sub('language=(fi|hr|no)', '', url)
self.list = cache.get(self.tmdb_genre_list, 24, url)
for i in range(0, len(self.list)): self.list[i].update({'image': 'genres.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def certifications(self):
try:
url = self.certifications_link
self.list = cache.get(self.tmdb_certification_list, 24, url)
for i in range(0, len(self.list)): self.list[i].update({'image': 'certificates.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
except:
return
def years(self):
year = (self.datetime.strftime('%Y'))
for i in range(int(year)-0, int(year)-50, -1): self.list.append({'name': str(i), 'url': self.year_link % ('%s', str(i)), 'image': 'years.png', 'action': 'movies'})
self.addDirectory(self.list)
return self.list
def persons(self):
personlists = []
for i in range(1, 5):
try:
self.list = []
personlists += cache.get(self.tmdb_person_list, 24, self.personlist_link % str(i))
except:
pass
self.list = personlists
for i in range(0, len(self.list)): self.list[i].update({'action': 'movies'})
self.addDirectory(self.list)
return self.list
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link)
except:
pass
try:
self.list = []
if self.imdb_user == '': raise Exception()
userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'movies'})
self.addDirectory(self.list, queue=True)
return self.list
def tmdb_list(self, url):
next = url
for i in re.findall('date\[(\d+)\]', url):
url = url.replace('date[%s]' % i, (self.datetime - datetime.timedelta(days = int(i))).strftime('%Y-%m-%d'))
try:
result = client.request(url % self.tmdb_key)
result = json.loads(result)
items = result['results']
except:
return
try:
page = int(result['page'])
total = int(result['total_pages'])
if page >= total: raise Exception()
url2 = '%s&page=%s' % (url.split('&page=', 1)[0], str(page+1))
result = client.request(url2 % self.tmdb_key)
result = json.loads(result)
items += result['results']
except:
pass
try:
page = int(result['page'])
total = int(result['total_pages'])
if page >= total: raise Exception()
if not 'page=' in url: raise Exception()
next = '%s&page=%s' % (next.split('&page=', 1)[0], str(page+1))
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
# title = str(title)
# title = re.sub(r'\ -',r'', title)
# title =re.sub('+', ' ', title)
# title =re.sub(':','', title)
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['release_date']
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
tmdb = item['id']
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
poster = item['poster_path']
if poster == '' or poster == None: raise Exception()
else: poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
fanart = item['backdrop_path']
if fanart == '' or fanart == None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
premiered = item['release_date']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
rating = str(item['vote_average'])
if rating == '' or rating == None: rating = '0'
rating = rating.encode('utf-8')
votes = str(item['vote_count'])
try: votes = str(format(int(votes),',d'))
except: pass
if votes == '' or votes == None: votes = '0'
votes = votes.encode('utf-8')
plot = item['overview']
if plot == '' or plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': '0', 'duration': '0', 'rating': rating, 'votes': votes, 'mpaa': '0', 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'tagline': tagline, 'code': '0', 'imdb': '0', 'tmdb': tmdb, 'tvdb': '0', 'poster': poster, 'banner': '0', 'fanart': fanart, 'next': next})
except:
pass
return self.list
def tmdb_custom_list(self, url):
# print ("ZEN LISTS", url)
try:
result = client.request(url)
result = json.loads(result)
items = result['items']
# print ("ZEN LISTS", items)
except:
return
next = ''
for item in items:
try:
title = item['title']
# title = str(title)
# title = re.sub(r'\ -',r'', title)
# title =re.sub('+', ' ', title)
# title =re.sub(':','', title)
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
# print ("ZEN LISTS", title)
year = item['release_date']
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
# print ("ZEN LISTS", year)
tmdb = item['id']
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
# print ("ZEN LISTS", tmdb)
poster = item['poster_path']
if poster == '' or poster == None: raise Exception()
else: poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
# print ("ZEN LISTS", poster)
fanart = item['backdrop_path']
if fanart == '' or fanart == None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
# print ("ZEN LISTS", fanart)
premiered = item['release_date']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
# print ("ZEN LISTS", premiered)
rating = str(item['vote_average'])
if rating == '' or rating == None: rating = '0'
rating = rating.encode('utf-8')
# print ("ZEN LISTS", rating)
votes = str(item['vote_count'])
try: votes = str(format(int(votes),',d'))
except: pass
if votes == '' or votes == None: votes = '0'
votes = votes.encode('utf-8')
# print ("ZEN LISTS", votes)
plot = item['overview']
if plot == '' or plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
# print ("ZEN LISTS", plot)
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
# print ("ZEN LISTS", tagline)
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': '0', 'duration': '0', 'rating': rating, 'votes': votes, 'mpaa': '0', 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'tagline': tagline, 'code': '0', 'imdb': '0', 'tmdb': tmdb, 'tvdb': '0', 'poster': poster, 'banner': '0', 'fanart': fanart, 'next': next})
except:
pass
return self.list
def favourites(self):
try:
items = favourites.getFavourites('movies')
self.list = [i[1] for i in items]
for i in self.list:
if not 'name' in i: i['name'] = '%s (%s)' % (i['title'], i['year'])
try: i['title'] = i['title'].encode('utf-8')
except: pass
try: i['name'] = i['name'].encode('utf-8')
except: pass
if not 'duration' in i: i['duration'] = '0'
if not 'imdb' in i: i['imdb'] = '0'
if not 'tmdb' in i: i['tmdb'] = '0'
if not 'tvdb' in i: i['tvdb'] = '0'
if not 'tvrage' in i: i['tvrage'] = '0'
if not 'poster' in i: i['poster'] = '0'
if not 'banner' in i: i['banner'] = '0'
if not 'fanart' in i: i['fanart'] = '0'
self.worker()
self.movieDirectory(self.list)
except:
return
def tmdb_person_list(self, url):
try:
result = client.request(url)
result = json.loads(result)
items = result['results']
except:
return
for item in items:
try:
name = item['name']
name = name.encode('utf-8')
url = self.person_link % ('%s', item['id'])
url = url.encode('utf-8')
image = '%s%s' % (self.tmdb_image, item['profile_path'])
image = image.encode('utf-8')
self.list.append({'name': name, 'url': url, 'image': image})
except:
pass
return self.list
def tmdb_genre_list(self, url):
try:
result = client.request(url)
result = json.loads(result)
items = result['genres']
except:
return
for item in items:
try:
name = item['name']
name = name.encode('utf-8')
url = self.genre_link % ('%s', item['id'])
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url})
except:
pass
return self.list
def tmdb_certification_list(self, url):
try:
result = client.request(url)
result = json.loads(result)
items = result['certifications']['US']
except:
return
for item in items:
try:
name = item['certification']
name = name.encode('utf-8')
url = self.certification_link % ('%s', item['certification'])
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url})
except:
pass
return self.list
def trakt_list(self, url):
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
result = json.loads(result)
items = []
for i in result:
try: items.append(i['movie'])
except: pass
if len(items) == 0:
items = result
except:
return
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
p = str(int(q['page']) + 1)
if p == '5': raise Exception()
q.update({'page': p})
q = (urllib.urlencode(q)).replace('%2C', ',')
next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
tmdb = item['ids']['tmdb']
if tmdb == None or tmdb == '': tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
imdb = item['ids']['imdb']
if imdb == None or imdb == '': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
poster = '0'
try: poster = item['images']['poster']['medium']
except: pass
if poster == None or not '/posters/' in poster: poster = '0'
poster = poster.rsplit('?', 1)[0]
poster = poster.encode('utf-8')
banner = poster
try: banner = item['images']['banner']['full']
except: pass
if banner == None or not '/banners/' in banner: banner = '0'
banner = banner.rsplit('?', 1)[0]
banner = banner.encode('utf-8')
fanart = '0'
try: fanart = item['images']['fanart']['full']
except: pass
if fanart == None or not '/fanarts/' in fanart: fanart = '0'
fanart = fanart.rsplit('?', 1)[0]
fanart = fanart.encode('utf-8')
premiered = item['released']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
premiered = premiered.encode('utf-8')
genre = item['genres']
genre = [i.title() for i in genre]
if genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
try: duration = str(item['runtime'])
except: duration = '0'
if duration == None: duration = '0'
duration = duration.encode('utf-8')
try: rating = str(item['rating'])
except: rating = '0'
if rating == None or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
try: votes = str(item['votes'])
except: votes = '0'
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None: votes = '0'
votes = votes.encode('utf-8')
mpaa = item['certification']
if mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
plot = item['overview']
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try: tagline = item['tagline']
except: tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': premiered, 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': '0', 'writer': '0', 'cast': '0', 'plot': plot, 'tagline': tagline, 'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'poster': poster, 'banner': banner, 'fanart': fanart, 'next': next})
except:
pass
return self.list
def trakt_user_list(self, url):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
try: item = item['list']
except: pass
name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = self.traktlist_link % (item['user']['username'].strip(), item['ids']['slug'])
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def imdb_list(self, url):
try:
if url == self.imdbwatchlist_link:
def imdb_watchlist_id(url):
return re.findall('/export[?]list_id=(ls\d*)', client.request(url))[0]
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist_link % url
result = client.request(url)
result = result.replace('\n','')
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'tr', attrs = {'class': '.+?'})
items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
except:
return
try:
next = client.parseDOM(result, 'span', attrs = {'class': 'pagination'})
next += client.parseDOM(result, 'div', attrs = {'class': 'pagination'})
name = client.parseDOM(next[-1], 'a')[-1]
if 'laquo' in name: raise Exception()
next = client.parseDOM(next, 'a', ret='href')[-1]
next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next).query)
next = client.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
try: title = client.parseDOM(item, 'a')[1]
except: pass
try: title = client.parseDOM(item, 'a', attrs = {'onclick': '.+?'})[-1]
except: pass
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = client.parseDOM(item, 'span', attrs = {'class': 'year_type'})[0]
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
imdb = client.parseDOM(item, 'a', ret='href')[0]
imdb = 'tt' + re.sub('[^0-9]', '', imdb.rsplit('tt', 1)[-1])
imdb = imdb.encode('utf-8')
poster = '0'
try: poster = client.parseDOM(item, 'img', ret='src')[0]
except: pass
try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
except: pass
if not ('_SX' in poster or '_SY' in poster): poster = '0'
poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})
genre = client.parseDOM(genre, 'a')
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = re.compile('(\d+?) mins').findall(item)[-1]
except: duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
except: rating = '0'
try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
except: rating = '0'
if rating == '' or rating == '-': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': 'rating rating-list'})[0]
except: votes = '0'
try: votes = re.compile('[(](.+?) votes[)]').findall(votes)[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
except: mpaa = '0'
try: mpaa = client.parseDOM(mpaa, 'span', ret='title')[0]
except: mpaa = '0'
if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
mpaa = mpaa.replace('_', '-')
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
director = client.parseDOM(item, 'span', attrs = {'class': 'credit'})
director += client.parseDOM(item, 'div', attrs = {'class': 'secondary'})
try: director = [i for i in director if 'Director:' in i or 'Dir:' in i][0]
except: director = '0'
director = director.split('With:', 1)[0].strip()
director = client.parseDOM(director, 'a')
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
cast = client.parseDOM(item, 'span', attrs = {'class': 'credit'})
cast += client.parseDOM(item, 'div', attrs = {'class': 'secondary'})
try: cast = [i for i in cast if 'With:' in i or 'Stars:' in i][0]
except: cast = '0'
cast = cast.split('With:', 1)[-1].strip()
cast = client.replaceHTMLCodes(cast)
cast = cast.encode('utf-8')
cast = client.parseDOM(cast, 'a')
if cast == []: cast = '0'
plot = '0'
try: plot = client.parseDOM(item, 'span', attrs = {'class': 'outline'})[0]
except: pass
try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
except: pass
plot = plot.rsplit('<span>', 1)[0].strip()
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': '0', 'cast': cast, 'plot': plot, 'tagline': tagline, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'poster': poster, 'banner': '0', 'fanart': '0', 'next': next})
except:
pass
return self.list
def imdb_user_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def worker(self):
self.meta = []
total = len(self.list)
for i in range(0, total): self.list[i].update({'metacache': False})
self.list = metacache.fetch(self.list, self.tmdb_lang)
for r in range(0, total, 100):
threads = []
for i in range(r, r+100):
if i <= total: threads.append(workers.Thread(self.super_info, i))
[i.start() for i in threads]
[i.join() for i in threads]
self.list = [i for i in self.list]
if len(self.meta) > 0: metacache.insert(self.meta)
def super_info(self, i):
try:
if self.list[i]['metacache'] == True: raise Exception()
print ("SUPERINFO INITIALIZED")
try: tmdb = self.list[i]['tmdb']
except: tmdb = '0'
if not tmdb == '0': url = self.tmdb_info_link % tmdb
else: raise Exception()
item = client.request(url, timeout='10')
item = json.loads(item)
title = item['title']
if not title == '0': self.list[i].update({'title': title})
year = item['release_date']
try: year = re.compile('(\d{4})').findall(year)[0]
except: year = '0'
if year == '' or year == None: year = '0'
year = year.encode('utf-8')
if not year == '0': self.list[i].update({'year': year})
tmdb = item['id']
if tmdb == '' or tmdb == None: tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
if not tmdb == '0': self.list[i].update({'tmdb': tmdb})
imdb = item['imdb_id']
if imdb == '' or imdb == None: imdb = '0'
imdb = imdb.encode('utf-8')
if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb})
poster = item['poster_path']
if poster == '' or poster == None: poster = '0'
if not poster == '0': poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
if not poster == '0': self.list[i].update({'poster': poster})
fanart = item['backdrop_path']
if fanart == '' or fanart == None: fanart = '0'
if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
if not fanart == '0' and self.list[i]['fanart'] == '0': self.list[i].update({'fanart': fanart})
premiered = item['release_date']
try: premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
if premiered == '' or premiered == None: premiered = '0'
premiered = premiered.encode('utf-8')
if not premiered == '0': self.list[i].update({'premiered': premiered})
studio = item['production_companies']
try: studio = [x['name'] for x in studio][0]
except: studio = '0'
if studio == '' or studio == None: studio = '0'
studio = studio.encode('utf-8')
if not studio == '0': self.list[i].update({'studio': studio})
genre = item['genres']
try: genre = [x['name'] for x in genre]
except: genre = '0'
if genre == '' or genre == None or genre == []: genre = '0'
genre = ' / '.join(genre)
genre = genre.encode('utf-8')
if not genre == '0': self.list[i].update({'genre': genre})
try: duration = str(item['runtime'])
except: duration = '0'
if duration == '' or duration == None: duration = '0'
duration = duration.encode('utf-8')
if not duration == '0': self.list[i].update({'duration': duration})
rating = str(item['vote_average'])
if rating == '' or rating == None: rating = '0'
rating = rating.encode('utf-8')
if not rating == '0': self.list[i].update({'rating': rating})
votes = str(item['vote_count'])
try: votes = str(format(int(votes),',d'))
except: pass
if votes == '' or votes == None: votes = '0'
votes = votes.encode('utf-8')
if not votes == '0': self.list[i].update({'votes': votes})
mpaa = item['releases']['countries']
try: mpaa = [x for x in mpaa if not x['certification'] == '']
except: mpaa = '0'
try: mpaa = ([x for x in mpaa if x['iso_3166_1'].encode('utf-8') == 'US'] + [x for x in mpaa if not x['iso_3166_1'].encode('utf-8') == 'US'])[0]['certification']
except: mpaa = '0'
mpaa = mpaa.encode('utf-8')
if not mpaa == '0': self.list[i].update({'mpaa': mpaa})
director = item['credits']['crew']
try: director = [x['name'] for x in director if x['job'].encode('utf-8') == 'Director']
except: director = '0'
if director == '' or director == None or director == []: director = '0'
director = ' / '.join(director)
director = director.encode('utf-8')
if not director == '0': self.list[i].update({'director': director})
writer = item['credits']['crew']
try: writer = [x['name'] for x in writer if x['job'].encode('utf-8') in ['Writer', 'Screenplay']]
except: writer = '0'
try: writer = [x for n,x in enumerate(writer) if x not in writer[:n]]
except: writer = '0'
if writer == '' or writer == None or writer == []: writer = '0'
writer = ' / '.join(writer)
writer = writer.encode('utf-8')
if not writer == '0': self.list[i].update({'writer': writer})
cast = item['credits']['cast']
try: cast = [(x['name'].encode('utf-8'), x['character'].encode('utf-8')) for x in cast]
except: cast = []
if len(cast) > 0: self.list[i].update({'cast': cast})
plot = item['overview']
if plot == '' or plot == None: plot = '0'
plot = plot.encode('utf-8')
if not plot == '0': self.list[i].update({'plot': plot})
tagline = item['tagline']
if (tagline == '' or tagline == None) and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == '' or tagline == None: tagline = '0'
try: tagline = tagline.encode('utf-8')
except: pass
if not tagline == '0': self.list[i].update({'tagline': tagline})
############# IMDB INFOS #################
try:
if not imdb == None or imdb == '0':
url = self.imdbinfo % imdb
item = client.request(url, timeout='10')
item = json.loads(item)
plot2 = item['Plot']
if plot2 == '' or plot2 == None: plot = plot
plot = plot.encode('utf-8')
if not plot == '0': self.list[i].update({'plot': plot})
rating2 = str(item['imdbRating'])
if rating2 == '' or rating2 == None: rating = rating2
rating = rating.encode('utf-8')
if not rating == '0': self.list[i].update({'rating': rating})
votes2 = str(item['imdbVotes'])
try: votes2 = str(votes2)
except: pass
if votes2 == '' or votes2 == None: votes = votes2
votes = votes.encode('utf-8')
if not votes == '0': self.list[i].update({'votes': votes2})
except:
pass
self.meta.append({'tmdb': tmdb, 'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.tmdb_lang, 'item': {'title': title, 'year': year, 'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'poster': poster, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline}})
except:
pass
def movieDirectory(self, items):
if items == None or len(items) == 0: control.idle() ; sys.exit()
sysaddon = sys.argv[0]
syshandle = int(sys.argv[1])
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
traktCredentials = trakt.getTraktCredentialsInfo()
try: isOld = False ; control.item().getArt('type')
except: isOld = True
isEstuary = True if 'estuary' in control.skin else False
isPlayable = 'true' if not 'plugin' in control.infoLabel('Container.PluginName') else 'false'
indicators = playcount.getMovieIndicators()
playbackMenu = control.lang(32063).encode('utf-8') if control.setting('hosts.mode') == '2' else control.lang(32064).encode('utf-8')
# watchedMenu = control.lang(32068).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32066).encode('utf-8')
# unwatchedMenu = control.lang(32069).encode('utf-8') if trakt.getTraktIndicatorsInfo() == True else control.lang(32067).encode('utf-8')
watchedMenu = control.lang(32066).encode('utf-8')
unwatchedMenu = control.lang(32067).encode('utf-8')
queueMenu = control.lang(32065).encode('utf-8')
traktManagerMenu = control.lang(32070).encode('utf-8')
nextMenu = control.lang(32053).encode('utf-8')
for i in items:
try:
if not 'originaltitle' in i: i['originaltitle'] = '%s' %(i['title'])
label = '%s' % (i['title'])
imdb, title, year = i['imdb'], i['originaltitle'], i['year']
sysname = urllib.quote_plus('%s (%s)' % (title, year))
systitle = urllib.quote_plus(title)
poster, banner, fanart = i['poster'], i['banner'], i['fanart']
if banner == '0' and not fanart == '0': banner = fanart
elif banner == '0' and not poster == '0': banner = poster
if poster == '0': poster = addonPoster
if banner == '0': banner = addonBanner
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
meta.update({'mediatype': 'movie'})
meta.update({'trailer': '%s?action=trailer&name=%s' % (sysaddon, sysname)})
if i['duration'] == '0': meta.update({'duration': '120'})
try: meta.update({'duration': str(int(meta['duration']) * 60)})
except: pass
try: meta.update({'genre': cleangenre.lang(meta['genre'], self.lang)})
except: pass
if isEstuary == True:
try: del meta['cast']
except: pass
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&title=%s&year=%s&imdb=%s&meta=%s&t=%s' % (sysaddon, systitle, year, imdb, sysmeta, self.systime)
sysurl = urllib.quote_plus(url)
path = '%s?action=play&title=%s&year=%s&imdb=%s' % (sysaddon, systitle, year, imdb)
cm = []
# cm.append((queueMenu, 'RunPlugin(%s?action=queueItem)' % sysaddon))
cm.append(('Trailer', 'RunPlugin(%s?action=trailer&name=%s)' % (sysaddon, sysname)))
cm.append((playbackMenu, 'RunPlugin(%s?action=alterSources&url=%s&meta=%s)' % (sysaddon, sysurl, sysmeta)))
if not action == 'movieFavourites':cm.append(('Add to Watchlist', 'RunPlugin(%s?action=addFavourite&meta=%s&content=movies)' % (sysaddon, sysmeta)))
if action == 'movieFavourites': cm.append(('Remove From Watchlist', 'RunPlugin(%s?action=deleteFavourite&meta=%s&content=movies)' % (sysaddon, sysmeta)))
try:
overlay = int(playcount.getMovieOverlay(indicators, imdb))
if overlay == 7:
cm.append((unwatchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=6)' % (sysaddon, imdb)))
meta.update({'playcount': 1, 'overlay': 7})
else:
cm.append((watchedMenu, 'RunPlugin(%s?action=moviePlaycount&imdb=%s&query=7)' % (sysaddon, imdb)))
meta.update({'playcount': 0, 'overlay': 6})
except:
pass
# if traktCredentials == True:
# cm.append((traktManagerMenu, 'RunPlugin(%s?action=traktManager&name=%s&imdb=%s&content=movie)' % (sysaddon, sysname, imdb)))
if isOld == True:
cm.append((control.lang2(19033).encode('utf-8'), 'Action(Info)'))
item = control.item(label=label)
item.setArt({'icon': poster, 'thumb': poster, 'poster': poster, 'banner': banner})
if settingFanart == 'true' and not fanart == '0':
item.setProperty('Fanart_Image', fanart)
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
item.addContextMenuItems(cm)
item.setProperty('IsPlayable', isPlayable)
item.setInfo(type='Video', infoLabels = meta)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=False)
except:
pass
try:
url = items[0]['next']
if url == '': raise Exception()
icon = control.addonNext()
url = '%s?action=moviePage&url=%s' % (sysaddon, urllib.quote_plus(url))
item = control.item(label=nextMenu)
item.setArt({'icon': icon, 'thumb': icon, 'poster': icon, 'banner': icon})
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'movies')
# control.do_block_check(False)
control.directory(syshandle, cacheToDisc=True)
views.setView('movies', {'skin.confluence': 500})
def addDirectory(self, items, queue=False):
if items == None or len(items) == 0: return
sysaddon = sys.argv[0]
isPlayable = False if control.setting('autoplay') == 'false' and control.setting('hosts.mode') == '1' else True
addonFanart, addonThumb, artPath = control.addonFanart(), control.addonThumb(), control.artPath()
for i in items:
try:
try: name = control.lang(i['name']).encode('utf-8')
except: name = i['name']
if i['image'].startswith('http://'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
item = control.item(label=name, iconImage=thumb, thumbnailImage=thumb)
item.addContextMenuItems(cm, replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
| gpl-2.0 |
githubmlai/numpy | numpy/lib/tests/test_io.py | 14 | 72608 | from __future__ import division, absolute_import, print_function
import sys
import gzip
import os
import threading
from tempfile import mkstemp, NamedTemporaryFile
import time
import warnings
import gc
from io import BytesIO
from datetime import datetime
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import (ConverterError, ConverterLockError,
ConversionWarning)
from numpy.compat import asbytes, bytes, unicode
from nose import SkipTest
from numpy.ma.testutils import (
TestCase, assert_equal, assert_array_equal, assert_allclose,
assert_raises, assert_raises_regex, run_module_suite
)
from numpy.testing import assert_warns, assert_, build_err_msg
from numpy.testing.utils import tempdir
class TextIO(BytesIO):
"""Helper IO class.
Writes encode strings to bytes if needed, reads return bytes.
This makes it easier to emulate files opened in binary mode
without needing to explicitly convert strings to bytes in
setting up the test data.
"""
def __init__(self, s=""):
BytesIO.__init__(self, asbytes(s))
def write(self, s):
BytesIO.write(self, asbytes(s))
def writelines(self, lines):
BytesIO.writelines(self, [asbytes(s) for s in lines])
MAJVER, MINVER = sys.version_info[:2]
IS_64BIT = sys.maxsize > 2**32
def strptime(s, fmt=None):
"""This function is available in the datetime module only
from Python >= 2.5.
"""
if sys.version_info[0] >= 3:
return datetime(*time.strptime(s.decode('latin1'), fmt)[:3])
else:
return datetime(*time.strptime(s, fmt)[:3])
class RoundtripTest(object):
def roundtrip(self, save_func, *args, **kwargs):
"""
save_func : callable
Function used to save arrays to file.
file_on_disk : bool
If true, store the file on disk, instead of in a
string buffer.
save_kwds : dict
Parameters passed to `save_func`.
load_kwds : dict
Parameters passed to `numpy.load`.
args : tuple of arrays
Arrays stored to file.
"""
save_kwds = kwargs.get('save_kwds', {})
load_kwds = kwargs.get('load_kwds', {})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
target_file = NamedTemporaryFile(delete=False)
load_file = target_file.name
else:
target_file = BytesIO()
load_file = target_file
try:
arr = args
save_func(target_file, *arr, **save_kwds)
target_file.flush()
target_file.seek(0)
if sys.platform == 'win32' and not isinstance(target_file, BytesIO):
target_file.close()
arr_reloaded = np.load(load_file, **load_kwds)
self.arr = arr
self.arr_reloaded = arr_reloaded
finally:
if not isinstance(target_file, BytesIO):
target_file.close()
# holds an open file descriptor so it can't be deleted on win
if not isinstance(arr_reloaded, np.lib.npyio.NpzFile):
os.remove(target_file.name)
def check_roundtrips(self, a):
self.roundtrip(a)
self.roundtrip(a, file_on_disk=True)
self.roundtrip(np.asfortranarray(a))
self.roundtrip(np.asfortranarray(a), file_on_disk=True)
if a.shape[0] > 1:
# neither C nor Fortran contiguous for 2D arrays or more
self.roundtrip(np.asfortranarray(a)[1:])
self.roundtrip(np.asfortranarray(a)[1:], file_on_disk=True)
def test_array(self):
a = np.array([], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], float)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], int)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.csingle)
self.check_roundtrips(a)
a = np.array([[1 + 5j, 2 + 6j], [3 + 7j, 4 + 8j]], dtype=np.cdouble)
self.check_roundtrips(a)
def test_array_object(self):
if sys.version_info[:2] >= (2, 7):
a = np.array([], object)
self.check_roundtrips(a)
a = np.array([[1, 2], [3, 4]], object)
self.check_roundtrips(a)
# Fails with UnpicklingError: could not find MARK on Python 2.6
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
self.roundtrip(a)
@np.testing.dec.knownfailureif(sys.platform == 'win32', "Fail on Win32")
def test_mmap(self):
a = np.array([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
a = np.asfortranarray([[1, 2.5], [4, 7.3]])
self.roundtrip(a, file_on_disk=True, load_kwds={'mmap_mode': 'r'})
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
self.check_roundtrips(a)
def test_format_2_0(self):
dt = [(("%d" % i) * 100, float) for i in range(500)]
a = np.ones(1000, dtype=dt)
with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', '', UserWarning)
self.check_roundtrips(a)
class TestSaveLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.save, *args, **kwargs)
assert_equal(self.arr[0], self.arr_reloaded)
assert_equal(self.arr[0].dtype, self.arr_reloaded.dtype)
assert_equal(self.arr[0].flags.fnc, self.arr_reloaded.flags.fnc)
class TestSavezLoad(RoundtripTest, TestCase):
def roundtrip(self, *args, **kwargs):
RoundtripTest.roundtrip(self, np.savez, *args, **kwargs)
try:
for n, arr in enumerate(self.arr):
reloaded = self.arr_reloaded['arr_%d' % n]
assert_equal(arr, reloaded)
assert_equal(arr.dtype, reloaded.dtype)
assert_equal(arr.flags.fnc, reloaded.flags.fnc)
finally:
# delete tempfile, must be done here on windows
if self.arr_reloaded.fid:
self.arr_reloaded.fid.close()
os.remove(self.arr_reloaded.fid.name)
@np.testing.dec.skipif(not IS_64BIT, "Works only with 64bit systems")
@np.testing.dec.slow
def test_big_arrays(self):
L = (1 << 31) + 100000
a = np.empty(L, dtype=np.uint8)
with tempdir(prefix="numpy_test_big_arrays_") as tmpdir:
tmp = os.path.join(tmpdir, "file.npz")
np.savez(tmp, a=a)
del a
npfile = np.load(tmp)
a = npfile['a']
npfile.close()
def test_multiple_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
self.roundtrip(a, b)
def test_named_arrays(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(a, l['file_a'])
assert_equal(b, l['file_b'])
def test_BagObj(self):
a = np.array([[1, 2], [3, 4]], float)
b = np.array([[1 + 2j, 2 + 7j], [3 - 6j, 4 + 12j]], complex)
c = BytesIO()
np.savez(c, file_a=a, file_b=b)
c.seek(0)
l = np.load(c)
assert_equal(sorted(dir(l.f)), ['file_a','file_b'])
assert_equal(a, l.f.file_a)
assert_equal(b, l.f.file_b)
def test_savez_filename_clashes(self):
# Test that issue #852 is fixed
# and savez functions in multithreaded environment
def writer(error_list):
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
arr = np.random.randn(500, 500)
try:
np.savez(tmp, arr=arr)
except OSError as err:
error_list.append(err)
finally:
os.remove(tmp)
errors = []
threads = [threading.Thread(target=writer, args=(errors,))
for j in range(3)]
for t in threads:
t.start()
for t in threads:
t.join()
if errors:
raise AssertionError(errors)
def test_not_closing_opened_fid(self):
# Test that issue #2178 is fixed:
# verify could seek on 'loaded' file
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
fp = open(tmp, 'rb', 10000)
fp.seek(0)
assert_(not fp.closed)
_ = np.load(fp)['data']
assert_(not fp.closed)
# must not get closed by .load(opened fp)
fp.seek(0)
assert_(not fp.closed)
finally:
fp.close()
os.remove(tmp)
def test_closing_fid(self):
# Test that issue #1517 (too many opened files) remains closed
# It might be a "weak" test since failed to get triggered on
# e.g. Debian sid of 2012 Jul 05 but was reported to
# trigger the failure on Ubuntu 10.04:
# http://projects.scipy.org/numpy/ticket/1517#comment:2
fd, tmp = mkstemp(suffix='.npz')
os.close(fd)
try:
fp = open(tmp, 'wb')
np.savez(fp, data='LOVELY LOAD')
fp.close()
# We need to check if the garbage collector can properly close
# numpy npz file returned by np.load when their reference count
# goes to zero. Python 3 running in debug mode raises a
# ResourceWarning when file closing is left to the garbage
# collector, so we catch the warnings. Because ResourceWarning
# is unknown in Python < 3.x, we take the easy way out and
# catch all warnings.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
for i in range(1, 1025):
try:
np.load(tmp)["data"]
except Exception as e:
msg = "Failed to load data from a file: %s" % e
raise AssertionError(msg)
finally:
os.remove(tmp)
def test_closing_zipfile_after_load(self):
# Check that zipfile owns file and can close it.
# This needs to pass a file name to load for the
# test.
with tempdir(prefix="numpy_test_closing_zipfile_after_load_") as tmpdir:
fd, tmp = mkstemp(suffix='.npz', dir=tmpdir)
os.close(fd)
np.savez(tmp, lab='place holder')
data = np.load(tmp)
fp = data.zip.fp
data.close()
assert_(fp.closed)
class TestSaveTxt(TestCase):
def test_array(self):
a = np.array([[1, 2], [3, 4]], float)
fmt = "%.18e"
c = BytesIO()
np.savetxt(c, a, fmt=fmt)
c.seek(0)
assert_equal(c.readlines(),
[asbytes((fmt + ' ' + fmt + '\n') % (1, 2)),
asbytes((fmt + ' ' + fmt + '\n') % (3, 4))])
a = np.array([[1, 2], [3, 4]], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_1D(self):
a = np.array([1, 2, 3, 4], int)
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'1\n', b'2\n', b'3\n', b'4\n'])
def test_record(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
np.savetxt(c, a, delimiter=',', fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1,2\n', b'3,4\n'])
def test_format(self):
a = np.array([(1, 2), (3, 4)])
c = BytesIO()
# Sequence of formats
np.savetxt(c, a, fmt=['%02d', '%3.1f'])
c.seek(0)
assert_equal(c.readlines(), [b'01 2.0\n', b'03 4.0\n'])
# A single multiformat string
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Specify delimiter, should be overiden
c = BytesIO()
np.savetxt(c, a, fmt='%02d : %3.1f', delimiter=',')
c.seek(0)
lines = c.readlines()
assert_equal(lines, [b'01 : 2.0\n', b'03 : 4.0\n'])
# Bad fmt, should raise a ValueError
c = BytesIO()
assert_raises(ValueError, np.savetxt, c, a, fmt=99)
def test_header_footer(self):
"""
Test the functionality of the header and footer keyword argument.
"""
c = BytesIO()
a = np.array([(1, 2), (3, 4)], dtype=np.int)
test_header_footer = 'Test header / footer'
# Test the header keyword argument
np.savetxt(c, a, fmt='%1d', header=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('# ' + test_header_footer + '\n1 2\n3 4\n'))
# Test the footer keyword argument
c = BytesIO()
np.savetxt(c, a, fmt='%1d', footer=test_header_footer)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n# ' + test_header_footer + '\n'))
# Test the commentstr keyword argument used on the header
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
header=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes(commentstr + test_header_footer + '\n' + '1 2\n3 4\n'))
# Test the commentstr keyword argument used on the footer
c = BytesIO()
commentstr = '% '
np.savetxt(c, a, fmt='%1d',
footer=test_header_footer, comments=commentstr)
c.seek(0)
assert_equal(c.read(),
asbytes('1 2\n3 4\n' + commentstr + test_header_footer + '\n'))
def test_file_roundtrip(self):
f, name = mkstemp()
os.close(f)
try:
a = np.array([(1, 2), (3, 4)])
np.savetxt(name, a)
b = np.loadtxt(name)
assert_array_equal(a, b)
finally:
os.unlink(name)
def test_complex_arrays(self):
ncols = 2
nrows = 2
a = np.zeros((ncols, nrows), dtype=np.complex128)
re = np.pi
im = np.e
a[:] = re + 1.0j * im
# One format only
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e')
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n',
b' ( +3.142e+00+ +2.718e+00j) ( +3.142e+00+ +2.718e+00j)\n'])
# One format for each real and imaginary part
c = BytesIO()
np.savetxt(c, a, fmt=' %+.3e' * 2 * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n',
b' +3.142e+00 +2.718e+00 +3.142e+00 +2.718e+00\n'])
# One format for each complex number
c = BytesIO()
np.savetxt(c, a, fmt=['(%.3e%+.3ej)'] * ncols)
c.seek(0)
lines = c.readlines()
assert_equal(
lines,
[b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n',
b'(3.142e+00+2.718e+00j) (3.142e+00+2.718e+00j)\n'])
def test_custom_writer(self):
class CustomWriter(list):
def write(self, text):
self.extend(text.split(b'\n'))
w = CustomWriter()
a = np.array([(1, 2), (3, 4)])
np.savetxt(w, a)
b = np.loadtxt(w)
assert_array_equal(a, b)
class TestLoadTxt(TestCase):
def test_record(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=[('x', np.int32), ('y', np.int32)])
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_array_equal(x, a)
d = TextIO()
d.write('M 64.0 75.0\nF 25.0 60.0')
d.seek(0)
mydescriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
b = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=mydescriptor)
y = np.loadtxt(d, dtype=mydescriptor)
assert_array_equal(y, b)
def test_array(self):
c = TextIO()
c.write('1 2\n3 4')
c.seek(0)
x = np.loadtxt(c, dtype=np.int)
a = np.array([[1, 2], [3, 4]], int)
assert_array_equal(x, a)
c.seek(0)
x = np.loadtxt(c, dtype=float)
a = np.array([[1, 2], [3, 4]], float)
assert_array_equal(x, a)
def test_1D(self):
c = TextIO()
c.write('1\n2\n3\n4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int)
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
c = TextIO()
c.write('1,2,3,4\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',')
a = np.array([1, 2, 3, 4], int)
assert_array_equal(x, a)
def test_missing(self):
c = TextIO()
c.write('1,2,3,,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
a = np.array([1, 2, 3, -999, 5], int)
assert_array_equal(x, a)
def test_converters_with_usecols(self):
c = TextIO()
c.write('1,2,3,,5\n6,7,8,9,10\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
a = np.array([[2, -999], [7, 9]], int)
assert_array_equal(x, a)
def test_comments_unicode(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=unicode('#'))
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_byte(self):
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=b'#')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_comments_multiple(self):
c = TextIO()
c.write('# comment\n1,2,3\n@ comment2\n4,5,6 // comment3')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments=['#', '@', '//'])
a = np.array([[1, 2, 3], [4, 5, 6]], int)
assert_array_equal(x, a)
def test_comments_multi_chars(self):
c = TextIO()
c.write('/* comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
comments='/*')
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
# Check that '/*' is not transformed to ['/', '*']
c = TextIO()
c.write('*/ comment\n1,2,3,5\n')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, dtype=int, delimiter=',',
comments='/*')
def test_skiprows(self):
c = TextIO()
c.write('comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
c = TextIO()
c.write('# comment\n1,2,3,5\n')
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',',
skiprows=1)
a = np.array([1, 2, 3, 5], int)
assert_array_equal(x, a)
def test_usecols(self):
a = np.array([[1, 2], [3, 4]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1,))
assert_array_equal(x, a[:, 1])
a = np.array([[1, 2, 3], [3, 4, 5]], float)
c = BytesIO()
np.savetxt(c, a)
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=(1, 2))
assert_array_equal(x, a[:, 1:])
# Testing with arrays instead of tuples.
c.seek(0)
x = np.loadtxt(c, dtype=float, usecols=np.array([1, 2]))
assert_array_equal(x, a[:, 1:])
# Checking with dtypes defined converters.
data = '''JOE 70.1 25.3
BOB 60.5 27.9
'''
c = TextIO(data)
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
arr = np.loadtxt(c, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(arr['stid'], [b"JOE", b"BOB"])
assert_equal(arr['temp'], [25.3, 27.9])
def test_fancy_dtype(self):
c = TextIO()
c.write('1,2,3.0\n4,5,6.0\n')
c.seek(0)
dt = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
x = np.loadtxt(c, dtype=dt, delimiter=',')
a = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dt)
assert_array_equal(x, a)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_3d_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6 7 8 9 10 11 12")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 2, 3))])
x = np.loadtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0,
[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])],
dtype=dt)
assert_array_equal(x, a)
def test_empty_file(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
c = TextIO()
x = np.loadtxt(c)
assert_equal(x.shape, (0,))
x = np.loadtxt(c, dtype=np.int64)
assert_equal(x.shape, (0,))
assert_(x.dtype == np.int64)
def test_unused_converter(self):
c = TextIO()
c.writelines(['1 21\n', '3 42\n'])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_array_equal(data, [21, 42])
c.seek(0)
data = np.loadtxt(c, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_array_equal(data, [33, 66])
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.loadtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
def test_uint64_type(self):
tgt = (9223372043271415339, 9223372043271415853)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.uint64)
assert_equal(res, tgt)
def test_int64_type(self):
tgt = (-9223372036854775807, 9223372036854775807)
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.int64)
assert_equal(res, tgt)
def test_from_float_hex(self):
# IEEE doubles and floats only, otherwise the float32
# conversion may fail.
tgt = np.logspace(-10, 10, 5).astype(np.float32)
tgt = np.hstack((tgt, -tgt)).astype(np.float)
inp = '\n'.join(map(float.hex, tgt))
c = TextIO()
c.write(inp)
for dt in [np.float, np.float32]:
c.seek(0)
res = np.loadtxt(c, dtype=dt)
assert_equal(res, tgt, err_msg="%s" % dt)
def test_from_complex(self):
tgt = (complex(1, 1), complex(1, -1))
c = TextIO()
c.write("%s %s" % tgt)
c.seek(0)
res = np.loadtxt(c, dtype=np.complex)
assert_equal(res, tgt)
def test_universal_newline(self):
f, name = mkstemp()
os.write(f, b'1 21\r3 42\r')
os.close(f)
try:
data = np.loadtxt(name)
assert_array_equal(data, [[1, 21], [3, 42]])
finally:
os.unlink(name)
def test_empty_field_after_tab(self):
c = TextIO()
c.write('1 \t2 \t3\tstart \n4\t5\t6\t \n7\t8\t9.5\t')
c.seek(0)
dt = {'names': ('x', 'y', 'z', 'comment'),
'formats': ('<i4', '<i4', '<f4', '|S8')}
x = np.loadtxt(c, dtype=dt, delimiter='\t')
a = np.array([b'start ', b' ', b''])
assert_array_equal(x['comment'], a)
def test_structure_unpack(self):
txt = TextIO("M 21 72\nF 35 58")
dt = {'names': ('a', 'b', 'c'), 'formats': ('|S1', '<i4', '<f4')}
a, b, c = np.loadtxt(txt, dtype=dt, unpack=True)
assert_(a.dtype.str == '|S1')
assert_(b.dtype.str == '<i4')
assert_(c.dtype.str == '<f4')
assert_array_equal(a, np.array([b'M', b'F']))
assert_array_equal(b, np.array([21, 35]))
assert_array_equal(c, np.array([72., 58.]))
def test_ndmin_keyword(self):
c = TextIO()
c.write('1,2,3\n4,5,6')
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=3)
c.seek(0)
assert_raises(ValueError, np.loadtxt, c, ndmin=1.5)
c.seek(0)
x = np.loadtxt(c, dtype=int, delimiter=',', ndmin=1)
a = np.array([[1, 2, 3], [4, 5, 6]])
assert_array_equal(x, a)
d = TextIO()
d.write('0,1,2')
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (1, 3))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
d.seek(0)
x = np.loadtxt(d, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
e = TextIO()
e.write('0\n1\n2')
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=2)
assert_(x.shape == (3, 1))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=1)
assert_(x.shape == (3,))
e.seek(0)
x = np.loadtxt(e, dtype=int, delimiter=',', ndmin=0)
assert_(x.shape == (3,))
# Test ndmin kw with empty file.
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="loadtxt: Empty input file:")
f = TextIO()
assert_(np.loadtxt(f, ndmin=2).shape == (0, 1,))
assert_(np.loadtxt(f, ndmin=1).shape == (0,))
def test_generator_source(self):
def count():
for i in range(10):
yield "%d" % i
res = np.loadtxt(count())
assert_array_equal(res, np.arange(10))
def test_bad_line(self):
c = TextIO()
c.write('1 2 3\n4 5 6\n2 3')
c.seek(0)
# Check for exception and that exception contains line number
assert_raises_regex(ValueError, "3", np.loadtxt, c)
def test_none_as_string(self):
# gh-5155, None should work as string when format demands it
c = TextIO()
c.write('100,foo,200\n300,None,400')
c.seek(0)
dt = np.dtype([('x', int), ('a', 'S10'), ('y', int)])
data = np.loadtxt(c, delimiter=',', dtype=dt, comments=None)
class Testfromregex(TestCase):
# np.fromregex expects files opened in binary mode.
def test_record(self):
c = TextIO()
c.write('1.312 foo\n1.534 bar\n4.444 qux')
c.seek(0)
dt = [('num', np.float64), ('val', 'S3')]
x = np.fromregex(c, r"([0-9.]+)\s+(...)", dt)
a = np.array([(1.312, 'foo'), (1.534, 'bar'), (4.444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_2(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.int32), ('val', 'S3')]
x = np.fromregex(c, r"(\d+)\s+(...)", dt)
a = np.array([(1312, 'foo'), (1534, 'bar'), (4444, 'qux')],
dtype=dt)
assert_array_equal(x, a)
def test_record_3(self):
c = TextIO()
c.write('1312 foo\n1534 bar\n4444 qux')
c.seek(0)
dt = [('num', np.float64)]
x = np.fromregex(c, r"(\d+)\s+...", dt)
a = np.array([(1312,), (1534,), (4444,)], dtype=dt)
assert_array_equal(x, a)
#####--------------------------------------------------------------------------
class TestFromTxt(TestCase):
#
def test_record(self):
"Test w/ explicit dtype"
data = TextIO('1 2\n3 4')
# data.seek(0)
test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
data = TextIO('M 64.0 75.0\nF 25.0 60.0')
# data.seek(0)
descriptor = {'names': ('gender', 'age', 'weight'),
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
test = np.ndfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
"Test outputing a standard ndarray"
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
control = np.array([[1, 2], [3, 4]], dtype=float)
test = np.loadtxt(data, dtype=float)
assert_array_equal(test, control)
def test_1D(self):
"Test squeezing to 1D"
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
test = np.ndfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
"Test the stripping of comments"
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
"Test row skipping"
control = np.array([1, 2, 3, 5], int)
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
test = np.ndfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
test = np.loadtxt(data, skiprows=1, **kwargs)
assert_equal(test, control)
def test_skip_footer(self):
data = ["# %i" % i for i in range(1, 6)]
data.append("A, B, C")
data.extend(["%i,%3.1f,%03s" % (i, i, i) for i in range(51)])
data[-1] = "99,99"
kwargs = dict(delimiter=",", names=True, skip_header=5, skip_footer=10)
test = np.genfromtxt(TextIO("\n".join(data)), **kwargs)
ctrl = np.array([("%f" % i, "%f" % i, "%f" % i) for i in range(41)],
dtype=[(_, float) for _ in "ABC"])
assert_equal(test, ctrl)
def test_skip_footer_with_invalid(self):
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
basestr = '1 1\n2 2\n3 3\n4 4\n5 \n6 \n7 \n'
# Footer too small to get rid of all invalid values
assert_raises(ValueError, np.genfromtxt,
TextIO(basestr), skip_footer=1)
# except ValueError:
# pass
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
a = np.genfromtxt(TextIO(basestr), skip_footer=3)
assert_equal(a, np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]]))
#
basestr = '1 1\n2 \n3 3\n4 4\n5 \n6 6\n7 7\n'
a = np.genfromtxt(
TextIO(basestr), skip_footer=1, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.], [6., 6.]]))
a = np.genfromtxt(
TextIO(basestr), skip_footer=3, invalid_raise=False)
assert_equal(a, np.array([[1., 1.], [3., 3.], [4., 4.]]))
def test_header(self):
"Test retrieving a header"
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
test = np.ndfromtxt(data, dtype=None, names=True)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
'weight': np.array([75.0, 60.0])}
assert_equal(test['gender'], control['gender'])
assert_equal(test['age'], control['age'])
assert_equal(test['weight'], control['weight'])
def test_auto_dtype(self):
"Test the automatic definition of the output dtype"
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
test = np.ndfromtxt(data, dtype=None)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
np.array([75.0, 60.0]),
np.array([3 + 4j, 5 + 6j]),
np.array([True, False]), ]
assert_equal(test.dtype.names, ['f0', 'f1', 'f2', 'f3', 'f4'])
for (i, ctrl) in enumerate(control):
assert_equal(test['f%i' % i], ctrl)
def test_auto_dtype_uniform(self):
"Tests whether the output dtype can be uniformized"
data = TextIO('1 2 3 4\n5 6 7 8\n')
test = np.ndfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
def test_fancy_dtype(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_names_overwrite(self):
"Test overwriting the names of the dtype"
descriptor = {'names': ('g', 'a', 'w'),
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
test = np.ndfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
assert_equal(test, control)
def test_commented_header(self):
"Check that names can be retrieved even if the line is commented out."
data = TextIO("""
#gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
# The # is part of the first name and should be deleted automatically.
test = np.genfromtxt(data, names=True, dtype=None)
ctrl = np.array([('M', 21, 72.1), ('F', 35, 58.33), ('M', 33, 21.99)],
dtype=[('gender', '|S1'), ('age', int), ('weight', float)])
assert_equal(test, ctrl)
# Ditto, but we should get rid of the first element
data = TextIO(b"""
# gender age weight
M 21 72.100000
F 35 58.330000
M 33 21.99
""")
test = np.genfromtxt(data, names=True, dtype=None)
assert_equal(test, ctrl)
def test_autonames_and_usecols(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
control = np.array(('aaaa', 45, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_with_usecols(self):
"Test the combination user-defined converters and usecol"
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
assert_equal(test, control)
def test_converters_with_usecols_and_names(self):
"Tests names and usecols"
data = TextIO('A B C D\n aaaa 121 45 9.1')
test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None, converters={'C': lambda s: 2 * int(s)})
control = np.array(('aaaa', 90, 9.1),
dtype=[('A', '|S4'), ('C', int), ('D', float)])
assert_equal(test, control)
def test_converters_cornercases(self):
"Test the conversion to datetime."
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
assert_equal(test, control)
def test_converters_cornercases2(self):
"Test the conversion to datetime64."
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
test = np.ndfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
assert_equal(test, control)
def test_unused_converter(self):
"Test whether unused converters are forgotten"
data = TextIO("1 21\n 3 42\n")
test = np.ndfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
test = np.ndfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
def test_invalid_converter(self):
strip_rand = lambda x: float((b'r' in x.lower() and x.split()[-1]) or
(b'r' not in x.lower() and x.strip() or 0.0))
strip_per = lambda x: float((b'%' in x.lower() and x.split()[0]) or
(b'%' not in x.lower() and x.strip() or 0.0))
s = TextIO("D01N01,10/1/2003 ,1 %,R 75,400,600\r\n"
"L24U05,12/5/2003, 2 %,1,300, 150.5\r\n"
"D02N03,10/10/2004,R 1,,7,145.55")
kwargs = dict(
converters={2: strip_per, 3: strip_rand}, delimiter=",",
dtype=None)
assert_raises(ConverterError, np.genfromtxt, s, **kwargs)
def test_tricky_converter_bug1666(self):
"Test some corner case"
s = TextIO('q1,2\nq3,4')
cnv = lambda s: float(s[1:])
test = np.genfromtxt(s, delimiter=',', converters={0: cnv})
control = np.array([[1., 2.], [3., 4.]])
assert_equal(test, control)
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
test = np.ndfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
def test_dtype_with_converters_and_usecols(self):
dstr = "1,5,-1,1:1\n2,8,-1,1:n\n3,3,-2,m:n\n"
dmap = {'1:1':0, '1:n':1, 'm:1':2, 'm:n':3}
dtyp = [('e1','i4'),('e2','i4'),('e3','i2'),('n', 'i1')]
conv = {0: int, 1: int, 2: int, 3: lambda r: dmap[r.decode()]}
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
names=None, converters=conv)
control = np.rec.array([[1,5,-1,0], [2,8,-1,1], [3,3,-2,3]], dtype=dtyp)
assert_equal(test, control)
dtyp = [('e1','i4'),('e2','i4'),('n', 'i1')]
test = np.recfromcsv(TextIO(dstr,), dtype=dtyp, delimiter=',',
usecols=(0,1,3), names=None, converters=conv)
control = np.rec.array([[1,5,0], [2,8,1], [3,3,3]], dtype=dtyp)
assert_equal(test, control)
def test_dtype_with_object(self):
"Test using an explicit dtype with an object"
from datetime import date
import time
data = """ 1; 2001-01-01
2; 2002-01-31 """
ndtype = [('idx', int), ('code', np.object)]
func = lambda s: strptime(s.strip(), "%Y-%m-%d")
converters = {1: func}
test = np.genfromtxt(TextIO(data), delimiter=";", dtype=ndtype,
converters=converters)
control = np.array(
[(1, datetime(2001, 1, 1)), (2, datetime(2002, 1, 31))],
dtype=ndtype)
assert_equal(test, control)
#
ndtype = [('nest', [('idx', int), ('code', np.object)])]
try:
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
except NotImplementedError:
pass
else:
errmsg = "Nested dtype involving objects should be supported."
raise AssertionError(errmsg)
def test_userconverters_with_explicit_dtype(self):
"Test user_converters w/ explicit (standard) dtype"
data = TextIO('skip,skip,2001-01-01,1.0,skip')
test = np.genfromtxt(data, delimiter=",", names=None, dtype=float,
usecols=(2, 3), converters={2: bytes})
control = np.array([('2001-01-01', 1.)],
dtype=[('', '|S10'), ('', float)])
assert_equal(test, control)
def test_spacedelimiter(self):
"Test space delimiter"
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
test = np.ndfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
def test_integer_delimiter(self):
"Test using an integer for delimiter"
data = " 1 2 3\n 4 5 67\n890123 4"
test = np.genfromtxt(TextIO(data), delimiter=3)
control = np.array([[1, 2, 3], [4, 5, 67], [890, 123, 4]])
assert_equal(test, control)
def test_missing(self):
data = TextIO('1,2,3,,5\n')
test = np.ndfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
def test_missing_with_tabs(self):
"Test w/ a delimiter tab"
txt = "1\t2\t3\n\t2\t\n1\t\t3"
test = np.genfromtxt(TextIO(txt), delimiter="\t",
usemask=True,)
ctrl_d = np.array([(1, 2, 3), (np.nan, 2, np.nan), (1, np.nan, 3)],)
ctrl_m = np.array([(0, 0, 0), (1, 0, 1), (0, 1, 0)], dtype=bool)
assert_equal(test.data, ctrl_d)
assert_equal(test.mask, ctrl_m)
def test_usecols(self):
"Test the selection of columns"
# Select 1 column
control = np.array([[1, 2], [3, 4]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
"Test giving usecols with a comma-separated string"
data = "1 2 3\n4 5 6"
test = np.genfromtxt(TextIO(data),
names="a, b, c", usecols="a, c")
ctrl = np.array([(1, 3), (4, 6)], dtype=[(_, float) for _ in "ac"])
assert_equal(test, ctrl)
def test_usecols_with_structured_dtype(self):
"Test usecols with an explicit structured dtype"
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
test = np.ndfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
def test_usecols_with_integer(self):
"Test usecols with an integer"
test = np.genfromtxt(TextIO(b"1 2 3\n4 5 6"), usecols=0)
assert_equal(test, np.array([1., 4.]))
def test_usecols_with_named_columns(self):
"Test usecols with named columns"
ctrl = np.array([(1, 3), (4, 6)], dtype=[('a', float), ('c', float)])
data = "1 2 3\n4 5 6"
kwargs = dict(names="a, b, c")
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data),
usecols=('a', 'c'), **kwargs)
assert_equal(test, ctrl)
def test_empty_file(self):
"Test that an empty file raises the proper warning."
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="genfromtxt: Empty input file:")
data = TextIO()
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
def test_fancy_dtype_alt(self):
"Check that a nested dtype isn't MIA"
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
def test_shaped_dtype(self):
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
x = np.ndfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.mafromtxt(data, dtype=None, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
#
data.seek(0)
test = np.mafromtxt(data, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.float), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_user_missing_values(self):
data = "A, B, C\n0, 0., 0j\n1, N/A, 1j\n-9, 2.2, N/A\n3, -99, 3j"
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
test = np.mafromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (0, 0, 1), (0, 0, 0)],
dtype=mdtype)
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
test = np.mafromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
def test_user_filling_values(self):
"Test with missing and filling values"
ctrl = np.array([(0, 3), (4, -999)], dtype=[('a', int), ('b', int)])
data = "N/A, 2, 3\n4, ,???"
kwargs = dict(delimiter=",",
dtype=int,
names="a,b,c",
missing_values={0: "N/A", 'b': " ", 2: "???"},
filling_values={0: 0, 'b': 0, 2: -999})
test = np.genfromtxt(TextIO(data), **kwargs)
ctrl = np.array([(0, 2, 3), (4, 0, -999)],
dtype=[(_, int) for _ in "abc"])
assert_equal(test, ctrl)
#
test = np.genfromtxt(TextIO(data), usecols=(0, -1), **kwargs)
ctrl = np.array([(0, 3), (4, -999)], dtype=[(_, int) for _ in "ac"])
assert_equal(test, ctrl)
data2 = "1,2,*,4\n5,*,7,8\n"
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=0)
ctrl = np.array([[1, 2, 0, 4], [5, 0, 7, 8]])
assert_equal(test, ctrl)
test = np.genfromtxt(TextIO(data2), delimiter=',', dtype=int,
missing_values="*", filling_values=-1)
ctrl = np.array([[1, 2, -1, 4], [5, -1, 7, 8]])
assert_equal(test, ctrl)
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
test = np.mafromtxt(data, dtype=None, delimiter=',',
missing_values='-999.0', names=True,)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.float)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
def test_with_masked_column_uniform(self):
"Test masked column"
data = TextIO('1 2 3\n4 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[0, 1, 0], [0, 1, 0]])
assert_equal(test, control)
def test_with_masked_column_various(self):
"Test masked column"
data = TextIO('True 2 3\nFalse 5 6\n')
test = np.genfromtxt(data, dtype=None,
missing_values='2,5', usemask=True)
control = ma.array([(1, 2, 3), (0, 5, 6)],
mask=[(0, 1, 0), (0, 1, 0)],
dtype=[('f0', bool), ('f1', bool), ('f2', int)])
assert_equal(test, control)
def test_invalid_raise(self):
"Test invalid raise"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
#
kwargs = dict(delimiter=",", dtype=None, names=True)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
assert_raises(ValueError, np.ndfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
"Test invalid_raise with usecols"
data = ["1, 1, 1, 1, 1"] * 50
for i in range(5):
data[10 * i] = "2, 2, 2, 2 2"
data.insert(0, "a, b, c, d, e")
mdata = TextIO("\n".join(data))
kwargs = dict(delimiter=",", dtype=None, names=True,
invalid_raise=False)
# XXX: is there a better way to get the return value of the callable in
# assert_warns ?
ret = {}
def f(_ret={}):
_ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
assert_equal(mtest, control)
def test_inconsistent_dtype(self):
"Test inconsistent dtype"
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
converters = {4: lambda x: "(%s)" % x}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
def test_default_field_format(self):
"Test default format"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
assert_equal(mtest, ctrl)
def test_single_dtype_wo_names(self):
"Test single dtype w/o names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
def test_single_dtype_w_explicit_names(self):
"Test single dtype w explicit names"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_single_dtype_w_implicit_names(self):
"Test single dtype w implicit names"
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
assert_equal(mtest, ctrl)
def test_easy_structured_dtype(self):
"Test easy structured dtype"
data = "0, 1, 2.3\n4, 5, 6.7"
mtest = np.ndfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
assert_equal(mtest, ctrl)
def test_autostrip(self):
"Test autostrip"
data = "01/01/2003 , 1.3, abcde"
kwargs = dict(delimiter=",", dtype=None)
mtest = np.ndfromtxt(TextIO(data), **kwargs)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
assert_equal(mtest, ctrl)
def test_replace_space(self):
"Test the 'replace_space' option"
txt = "A.A, B (B), C:C\n1, 2, 3.14"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=None,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", float)]
ctrl = np.array((1, 2, 3.14), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_replace_space_known_dtype(self):
"Test the 'replace_space' (and related) options when dtype != None"
txt = "A.A, B (B), C:C\n1, 2, 3"
# Test default: replace ' ' by '_' and delete non-alphanum chars
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int)
ctrl_dtype = [("AA", int), ("B_B", int), ("CC", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no replace, no delete
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
replace_space='', deletechars='')
ctrl_dtype = [("A.A", int), ("B (B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
# Test: no delete (spaces are replaced by _)
test = np.genfromtxt(TextIO(txt),
delimiter=",", names=True, dtype=int,
deletechars='')
ctrl_dtype = [("A.A", int), ("B_(B)", int), ("C:C", int)]
ctrl = np.array((1, 2, 3), dtype=ctrl_dtype)
assert_equal(test, ctrl)
def test_incomplete_names(self):
"Test w/ incomplete names"
data = "A,,C\n0,1,2\n3,4,5"
kwargs = dict(delimiter=",", names=True)
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
test = np.ndfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
"Make sure that names are properly completed"
data = "1 2 3\n 4 5 6"
test = np.genfromtxt(TextIO(data),
dtype=(int, float, int), names="a")
ctrl = np.array([(1, 2, 3), (4, 5, 6)],
dtype=[('a', int), ('f0', float), ('f1', int)])
assert_equal(test, ctrl)
def test_names_with_usecols_bug1636(self):
"Make sure we pick up the right names w/ usecols"
data = "A,B,C,D,E\n0,1,2,3,4\n0,1,2,3,4\n0,1,2,3,4"
ctrl_names = ("A", "C", "E")
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=(0, 2, 4), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=(int, int, int), delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
#
test = np.genfromtxt(TextIO(data),
dtype=int, delimiter=",",
usecols=("A", "C", "E"), names=True)
assert_equal(test.dtype.names, ctrl_names)
def test_fixed_width_names(self):
"Test fix-width w/ names"
data = " A B C\n 0 1 2.3\n 45 67 9."
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
"Test missing values"
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
test = np.ndfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
# Github issue 329 (None was previously being converted to 'None').
test = np.genfromtxt(TextIO("test1,testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b'testNonetherestofthedata')
test = np.genfromtxt(TextIO("test1, testNonetherestofthedata"),
dtype=None, comments=None, delimiter=',')
assert_equal(test[1], b' testNonetherestofthedata')
def test_recfromtxt(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
test = np.recfromtxt(data, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
def test_recfromcsv(self):
#
data = TextIO('A,B\n0,1\n2,3')
kwargs = dict(missing_values="N/A", names=True, case_sensitive=True)
test = np.recfromcsv(data, dtype=None, **kwargs)
control = np.array([(0, 1), (2, 3)],
dtype=[('A', np.int), ('B', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,N/A')
test = np.recfromcsv(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', np.int), ('B', np.int)])
assert_equal(test, control)
assert_equal(test.mask, control.mask)
assert_equal(test.A, [0, 2])
#
data = TextIO('A,B\n0,1\n2,3')
test = np.recfromcsv(data, missing_values='N/A',)
control = np.array([(0, 1), (2, 3)],
dtype=[('a', np.int), ('b', np.int)])
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
#
data = TextIO('A,B\n0,1\n2,3')
dtype = [('a', np.int), ('b', np.float)]
test = np.recfromcsv(data, missing_values='N/A', dtype=dtype)
control = np.array([(0, 1), (2, 3)],
dtype=dtype)
self.assertTrue(isinstance(test, np.recarray))
assert_equal(test, control)
def test_max_rows(self):
# Test the `max_rows` keyword argument.
data = '1 2\n3 4\n5 6\n7 8\n9 10\n'
txt = TextIO(data)
a1 = np.genfromtxt(txt, max_rows=3)
a2 = np.genfromtxt(txt)
assert_equal(a1, [[1, 2], [3, 4], [5, 6]])
assert_equal(a2, [[7, 8], [9, 10]])
# max_rows must be at least 1.
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=0)
# An input with several invalid rows.
data = '1 1\n2 2\n0 \n3 3\n4 4\n5 \n6 \n7 \n'
test = np.genfromtxt(TextIO(data), max_rows=2)
control = np.array([[1., 1.], [2., 2.]])
assert_equal(test, control)
# Test keywords conflict
assert_raises(ValueError, np.genfromtxt, TextIO(data), skip_footer=1,
max_rows=4)
# Test with invalid value
assert_raises(ValueError, np.genfromtxt, TextIO(data), max_rows=4)
# Test with invalid not raise
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
test = np.genfromtxt(TextIO(data), max_rows=4, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
test = np.genfromtxt(TextIO(data), max_rows=5, invalid_raise=False)
control = np.array([[1., 1.], [2., 2.], [3., 3.], [4., 4.]])
assert_equal(test, control)
# Structured array with field names.
data = 'a b\n#c d\n1 1\n2 2\n#0 \n3 3\n4 4\n5 5\n'
# Test with header, names and comments
txt = TextIO(data)
test = np.genfromtxt(txt, skip_header=1, max_rows=3, names=True)
control = np.array([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
# To continue reading the same "file", don't use skip_header or
# names, and use the previously determined dtype.
test = np.genfromtxt(txt, max_rows=None, dtype=test.dtype)
control = np.array([(4.0, 4.0), (5.0, 5.0)],
dtype=[('c', '<f8'), ('d', '<f8')])
assert_equal(test, control)
def test_gft_using_filename(self):
# Test that we can load data from a filename as well as a file object
wanted = np.arange(6).reshape((2, 3))
if sys.version_info[0] >= 3:
# python 3k is known to fail for '\r'
linesep = ('\n', '\r\n')
else:
linesep = ('\n', '\r\n', '\r')
for sep in linesep:
data = '0 1 2' + sep + '3 4 5'
f, name = mkstemp()
# We can't use NamedTemporaryFile on windows, because we cannot
# reopen the file.
try:
os.write(f, asbytes(data))
assert_array_equal(np.genfromtxt(name), wanted)
finally:
os.close(f)
os.unlink(name)
def test_gft_using_generator(self):
# gft doesn't work with unicode.
def count():
for i in range(10):
yield asbytes("%d" % i)
res = np.genfromtxt(count())
assert_array_equal(res, np.arange(10))
def test_auto_dtype_largeint(self):
"""
Regression test for numpy/numpy#5635 whereby large integers could
cause OverflowErrors.
"""
"Test the automatic definition of the output dtype"
# 2**66 = 73786976294838206464 => should convert to float
# 2**34 = 17179869184 => should convert to int64
# 2**10 = 1024 => should convert to int (int32 on 32-bit systems,
# int64 on 64-bit systems)
data = TextIO('73786976294838206464 17179869184 1024')
test = np.ndfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
assert test.dtype['f0'] == np.float
assert test.dtype['f1'] == np.int64
assert test.dtype['f2'] == np.integer
assert_allclose(test['f0'], 73786976294838206464.)
assert_equal(test['f1'], 17179869184)
assert_equal(test['f2'], 1024)
def test_gzip_load():
a = np.random.random((5, 5))
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
np.save(f, a)
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.load(f), a)
def test_gzip_loadtxt():
# Thanks to another windows brokeness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
# reopened by another open call. So we first put the gzipped string
# of the test reference array, write it to a securely opened file,
# which is then read from by the loadtxt function
s = BytesIO()
g = gzip.GzipFile(fileobj=s, mode='w')
g.write(b'1 2 3\n')
g.close()
s.seek(0)
f, name = mkstemp(suffix='.gz')
try:
os.write(f, s.read())
s.close()
assert_array_equal(np.loadtxt(name), [1, 2, 3])
finally:
os.close(f)
os.unlink(name)
def test_gzip_loadtxt_from_string():
s = BytesIO()
f = gzip.GzipFile(fileobj=s, mode="w")
f.write(b'1 2 3\n')
f.close()
s.seek(0)
f = gzip.GzipFile(fileobj=s, mode="r")
assert_array_equal(np.loadtxt(f), [1, 2, 3])
def test_npzfile_dict():
s = BytesIO()
x = np.zeros((3, 3))
y = np.zeros((3, 3))
np.savez(s, x=x, y=y)
s.seek(0)
z = np.load(s)
assert_('x' in z)
assert_('y' in z)
assert_('x' in z.keys())
assert_('y' in z.keys())
for f, a in z.items():
assert_(f in ['x', 'y'])
assert_equal(a.shape, (3, 3))
assert_(len(z.items()) == 2)
for f in z:
assert_(f in ['x', 'y'])
assert_('x' in z.keys())
def test_load_refcount():
# Check that objects returned by np.load are directly freed based on
# their refcount, rather than needing the gc to collect them.
f = BytesIO()
np.savez(f, [1, 2, 3])
f.seek(0)
gc.collect()
n_before = len(gc.get_objects())
np.load(f)
n_after = len(gc.get_objects())
assert_equal(n_before, n_after)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
Kongsea/tensorflow | tensorflow/contrib/distributions/python/ops/bijectors/invert.py | 30 | 3809 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Invert bijector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.distributions import bijector as bijector_lib
__all__ = [
"Invert",
]
class Invert(bijector_lib.Bijector):
"""Bijector which inverts another Bijector.
Example Use: [ExpGammaDistribution (see Background & Context)](
https://reference.wolfram.com/language/ref/ExpGammaDistribution.html)
models `Y=log(X)` where `X ~ Gamma`.
```python
exp_gamma_distribution = TransformedDistribution(
distribution=Gamma(concentration=1., rate=2.),
bijector=bijector.Invert(bijector.Exp())
```
"""
def __init__(self, bijector, validate_args=False, name=None):
"""Creates a `Bijector` which swaps the meaning of `inverse` and `forward`.
Note: An inverted bijector's `inverse_log_det_jacobian` is often more
efficient if the base bijector implements `_forward_log_det_jacobian`. If
`_forward_log_det_jacobian` is not implemented then the following code is
used:
```python
y = self.inverse(x, **kwargs)
return -self.inverse_log_det_jacobian(y, **kwargs)
```
Args:
bijector: Bijector instance.
validate_args: Python `bool` indicating whether arguments should be
checked for correctness.
name: Python `str`, name given to ops managed by this object.
"""
if not bijector._is_injective: # pylint: disable=protected-access
raise NotImplementedError(
"Invert is not implemented for non-injective bijectors.")
self._bijector = bijector
super(Invert, self).__init__(
event_ndims=bijector.event_ndims,
graph_parents=bijector.graph_parents,
is_constant_jacobian=bijector.is_constant_jacobian,
validate_args=validate_args,
dtype=bijector.dtype,
name=name or "_".join(["invert", bijector.name]))
def _forward_event_shape(self, input_shape):
return self.bijector._inverse_event_shape(input_shape) # pylint: disable=protected-access
def _forward_event_shape_tensor(self, input_shape):
return self.bijector._inverse_event_shape_tensor(input_shape) # pylint: disable=protected-access
def _inverse_event_shape(self, output_shape):
return self.bijector._forward_event_shape(output_shape) # pylint: disable=protected-access
def _inverse_event_shape_tensor(self, output_shape):
return self.bijector._forward_event_shape_tensor(output_shape) # pylint: disable=protected-access
@property
def bijector(self):
return self._bijector
def _forward(self, x, **kwargs):
return self.bijector._inverse(x, **kwargs) # pylint: disable=protected-access
def _inverse(self, y, **kwargs):
return self.bijector._forward(y, **kwargs) # pylint: disable=protected-access
def _inverse_log_det_jacobian(self, y, **kwargs):
return self.bijector._forward_log_det_jacobian(y, **kwargs) # pylint: disable=protected-access
def _forward_log_det_jacobian(self, x, **kwargs):
return self.bijector._inverse_log_det_jacobian(x, **kwargs) # pylint: disable=protected-access
| apache-2.0 |
wdzhou/mantid | scripts/Calibration/tube_calib.py | 2 | 26120 | #pylint: disable=invalid-name
"""
This file is concerned with calibrating a specified set of tubes
The main function is :func:`getCalibration` which is at the end of this file.
It populates an empty Calibration Table Workspace with the new positions of the pixel detectors after calibration.
This Calibration Table Workspace can be used later to move the pixel detectors to the calibrated positions.
Users should not need to directly call any other function other than :func:`getCalibration` from this file.
"""
## Author: Karl palmen ISIS and for readPeakFile Gesner Passos ISIS
from __future__ import absolute_import, division, print_function
import numpy
from mantid.simpleapi import *
from mantid.kernel import *
from tube_spec import TubeSpec
from ideal_tube import IdealTube
import re
import os
import copy
def createTubeCalibtationWorkspaceByWorkspaceIndexList ( integratedWorkspace, outputWorkspace, workspaceIndexList,
xUnit='Pixel', showPlot=False):
"""
Creates workspace with integrated data for one tube against distance along tube
The tube is specified by a list of workspace indices of its spectra
@param IntegratedWorkspace: Workspace of integrated data
@param workspaceIndexList: list of workspace indices for the tube
@param xUnit: unit of distance ( Pixel)
@param showPlot: True = show plot of workspace created, False = just make the workspace.
Return Value: Workspace created
"""
nSpectra = len(workspaceIndexList)
if nSpectra < 1:
return
pixelNumbers = []
integratedPixelCounts = []
pixel = 1
#integratedWorkspace.
for i in workspaceIndexList:
pixelNumbers.append(pixel)
pixel = pixel + 1
integratedPixelCounts.append( integratedWorkspace.dataY(i)[0] )
CreateWorkspace(dataX=pixelNumbers,dataY=integratedPixelCounts, OutputWorkspace=outputWorkspace)
#if (showPlot):
#plotSpectrum(outputWorkspace,0)
# For some reason plotSpectrum is not recognised, but instead we can plot this worspace afterwards.
# Return the udet number and [x,y,z] position of the detector (or virtual detector) corresponding to spectra spectra_number
# Thanks to Pascal Manuel for this function
def get_detector_pos(work_handle,spectra_number):
udet=work_handle.getDetector(spectra_number)
return udet.getID(), udet.getPos()
# Given the center of a slit in pixels return the interpolated y
# Converts from pixel coords to Y.
# If a pixel coord is not integer
# it is effectively rounded to half integer before conversion, rather than interpolated.
# It allows the pixel widths to vary (unlike correctTube).
# Thanks to Pascal Manuel for this function
def get_ypos(work_handle,pixel_float):
center_low_pixel=int(math.floor(pixel_float))
center_high_pixel=int(math.ceil(pixel_float))
idlow, low=get_detector_pos(work_handle,center_low_pixel) #Get the detector position of the nearest lower pixel
idhigh, high=get_detector_pos(work_handle,center_high_pixel) #Get the detector position of the nearest higher pixel
center_y=(center_high_pixel-pixel_float)*low.getY()+(pixel_float-center_low_pixel)*high.getY()
center_y/=(center_high_pixel-center_low_pixel)
return center_y
def fitGaussianParams ( height, centre, sigma ): # Compose string argument for fit
return "name=Gaussian, Height="+str(height)+", PeakCentre="+str(centre)+", Sigma="+str(sigma)
def fitEndErfcParams ( B, C ): # Compose string argument for fit
return "name=EndErfc, B="+str(B)+", C="+str(C)
#
# definition of the functions to fit
#
def fitEdges(fitPar, index, ws, outputWs):
# find the edge position
centre = fitPar.getPeaks()[index]
outedge, inedge, endGrad = fitPar.getEdgeParameters()
margin = fitPar.getMargin()
#get values around the expected center
all_values = ws.dataY(0)
RIGHTLIMIT = len(all_values)
values = all_values[max(centre-margin,0):min(centre+margin,len(all_values))]
#identify if the edge is a sloping edge or descent edge
descentMode = values[0] > values[-1]
if descentMode:
start = max(centre - outedge,0)
end = min(centre + inedge, RIGHTLIMIT)
edgeMode = -1
else:
start = max(centre - inedge,0)
end = min(centre + outedge, RIGHTLIMIT)
edgeMode = 1
Fit(InputWorkspace=ws,Function=fitEndErfcParams(centre,endGrad*edgeMode),StartX=str(start),EndX=str(end),Output=outputWs)
return 1 # peakIndex (center) -> parameter B of EndERFC
def fitGaussian(fitPar, index, ws, outputWs):
#find the peak position
centre = fitPar.getPeaks()[index]
margin = fitPar.getMargin()
# get values around the expected center
all_values = ws.dataY(0)
RIGHTLIMIT = len(all_values)
min_index = max(centre-int(margin),0)
max_index = min(centre+int(margin), RIGHTLIMIT)
values = all_values[min_index:max_index]
# find the peak position
if fitPar.getAutomatic():
# find the parameters for fit dynamically
max_value = numpy.max(values)
min_value = numpy.min(values)
half = (max_value - min_value) * 2 / 3 + min_value
above_half_line = len(numpy.where(values > half)[0])
beyond_half_line = len(values) - above_half_line
if above_half_line < beyond_half_line:
# means that there are few values above the midle, so it is a peak
centre = numpy.argmax(values) + min_index
background = min_value
height = max_value - background
width = len(numpy.where(values > height/2 + background))
else:
# means that there are many values above the midle, so it is a trough
centre = numpy.argmin(values) + min_index
background = max_value
height = min_value - max_value # negative value
width = len(numpy.where(values < min_value + height/2))
start = max(centre - margin, 0)
end = min(centre + margin, RIGHTLIMIT)
fit_msg = 'name=LinearBackground,A0=%f;name=Gaussian,Height=%f,PeakCentre=%f,Sigma=%f'%(background, height, centre, width)
Fit(InputWorkspace=ws, Function=fit_msg,
StartX = str(start), EndX=str(end), Output=outputWs)
peakIndex = 3
else:
# get the parameters from fitParams
background = 1000
height, width = fitPar.getHeightAndWidth()
start = max(centre-margin, 0)
end = min(centre+margin, RIGHTLIMIT)
# fit the input data as a linear background + gaussian fit
# it was seen that the best result for static general fitParamters,
# is to divide the values in two fitting steps
Fit(InputWorkspace=ws, Function='name=LinearBackground,A0=%f'%(background),
StartX=str(start), EndX=str(end), Output='Z1')
Fit(InputWorkspace='Z1_Workspace',Function='name=Gaussian,Height=%f,PeakCentre=%f,Sigma=%f' %(height, centre, width),
WorkspaceIndex=2, StartX=str(start), EndX=str(end), Output=outputWs)
CloneWorkspace(outputWs+'_Workspace',OutputWorkspace='gauss_'+str(index))
peakIndex = 1
return peakIndex
def getPoints ( IntegratedWorkspace, funcForms, fitParams, whichTube, showPlot=False ):
"""
Get the centres of N slits or edges for calibration
It does look for the peak position in pixels by fitting the peaks and
edges. It is the method responsible for estimating the peak position in each tube.
.. note::
This N slit method is suited for WISH or the five sharp peaks of MERLIN .
:param IntegratedWorkspace: Workspace of integrated data
:param funcForms: array of function form 1=slit/bar, 2=edge
:param fitParams: a TubeCalibFitParams object contain the fit parameters
:param whichTube: a list of workspace indices for one tube (define a single tube)
:param showPlot: show plot for this tube
:rtype: array of the slit/edge positions (-1.0 indicates failed to find position)
"""
# Create input workspace for fitting
## get all the counts for the integrated workspace inside the tube
countsY = numpy.array([IntegratedWorkspace.dataY(i)[0] for i in whichTube])
if len(countsY) == 0:
return
getPointsWs = CreateWorkspace(range(len(countsY)),countsY,OutputWorkspace='TubePlot')
calibPointWs = 'CalibPoint'
results = []
fitt_y_values = []
fitt_x_values = []
# Loop over the points
for i in range(len(funcForms)):
if funcForms[i] == 2:
# find the edge position
peakIndex = fitEdges(fitParams, i, getPointsWs, calibPointWs)
else:
peakIndex = fitGaussian(fitParams, i, getPointsWs, calibPointWs)
# get the peak centre
peakCentre = mtd[calibPointWs + '_Parameters'].row(peakIndex).items()[1][1]
results.append(peakCentre)
if showPlot:
ws = mtd[calibPointWs + '_Workspace']
fitt_y_values.append(copy.copy(ws.dataY(1)))
fitt_x_values.append(copy.copy(ws.dataX(1)))
if showPlot:
CreateWorkspace(OutputWorkspace='FittedData',
DataX=numpy.hstack(fitt_x_values),
DataY=numpy.hstack(fitt_y_values))
return results
def getIdealTubeFromNSlits ( IntegratedWorkspace, slits ):
"""
Given N slits for calibration on an ideal tube
convert to Y values to form a ideal tube for correctTubeToIdealTube()
@param IntegratedWorkspace: Workspace of integrated data
@param eP: positions of slits for ideal tube (in pixels)
Return Value: Ideal tube in Y-coords for use by correctTubeToIdealTube()
"""
ideal = []
for i in range(len(slits)):
ideal.append( get_ypos( IntegratedWorkspace, slits[i] )) # Use Pascal Manuel's Y conversion.
return ideal
def correctTube( AP, BP, CP, nDets ):
"""
Corrects position errors in a tube in the same manner as is done for MERLIN
according to an algorithm used by Rob Bewley in his MATLAB code.
@param AP: Fit position of left (in pixels)
@param BP: Fit position of right (in pixels)
@param CP: Fit position of centre (in pixels)
@param nDets: Number of pixel detectors in tube
Return Value: Array of corrected Xs (in pixels)
"""
AO = AP/(nDets - AP)
BO = (nDets - BP)/BP
# First correct centre point for offsets
CPN = CP - (AO*(nDets - CP)) + BO*CP
x = []
for i in range(nDets):
xi = i+1.0
x.append( xi -((nDets-xi)*AO) + (xi*BO)) #this is x corrected for offsets
# Now calculate the gain error
GainError = ( (nDets+1)/2.0 - CPN) / (CPN*(nDets-CPN))
xBinNew = []
for i in range(nDets):
xo = x[i]
xBinNew.append( xo + ( xo*(nDets-xo)*GainError )) #Final bin position values corrected for offsets and gain
return xBinNew
def correctTubeToIdealTube( tubePoints, idealTubePoints, nDets, TestMode=False, polinFit=2 ):
"""
Corrects position errors in a tube given an array of points and their ideal positions.
:param tubePoints: Array of Slit Points along tube to be fitted (in pixels)
:param idealTubePoints: The corresponding points in an ideal tube (Y-coords advised)
:param nDets: Number of pixel detectors in tube
:param Testmode: If true, detectors at the position of a slit will be moved out of the way
to show the reckoned slit positions when the instrument is displayed.
:param polinFit: Order of the polinomial to fit for the ideal positions
Return Value: Array of corrected Xs (in same units as ideal tube points)
Note that any element of tubePoints not between 0.0 and nDets is considered a rogue point and so is ignored.
"""
# Check the arguments
if len(tubePoints) != len(idealTubePoints) :
print("Number of points in tube", len(tubePoints),"must equal number of points in ideal tube", len(idealTubePoints))
return xResult
# Filter out rogue slit points
usedTubePoints = []
usedIdealTubePoints = []
missedTubePoints = [] # Used for diagnostic print only
for i in range(len(tubePoints)):
if tubePoints[i] > 0.0 and tubePoints[i] < nDets:
usedTubePoints.append( tubePoints[i] )
usedIdealTubePoints.append ( idealTubePoints[i] )
else:
missedTubePoints.append(i+1)
# State number of rogue slit points, if any
if len(tubePoints) != len(usedTubePoints):
print("Only",len(usedTubePoints),"out of",len(tubePoints)," slit points used. Missed",missedTubePoints)
# Check number of usable points
if len(usedTubePoints) < 3:
print("Too few usable points in tube",len(usedTubePoints))
return []
# Fit quadratic to ideal tube points
CreateWorkspace(dataX=usedTubePoints,dataY=usedIdealTubePoints, OutputWorkspace="PolyFittingWorkspace")
try:
Fit(InputWorkspace="PolyFittingWorkspace",Function='name=Polynomial,n=%d'%(polinFit),StartX=str(0.0),EndX=str(nDets),Output="QF")
except:
print("Fit failed")
return []
paramQF = mtd['QF_Parameters']
# get the coeficients, get the Value from every row, and exclude the last one because it is the error
# rowErr is the last one, it could be used to check accuracy of fit
c = [r['Value'] for r in paramQF][:-1]
# Modify the output array by the fitted quadratic
xResult = numpy.polynomial.polynomial.polyval(range(nDets),c)
# In test mode, shove the pixels that are closest to the reckoned peaks
# to the position of the first detector so that the resulting gaps can be seen.
if TestMode :
print("TestMode code")
for i in range( len(usedTubePoints) ):
xResult[ int(usedTubePoints[i]) ] = xResult[0]
return xResult
def getCalibratedPixelPositions( ws, tubePts, idealTubePts, whichTube, peakTestMode=False, polinFit=2 ):
"""
Get the calibrated detector positions for one tube
The tube is specified by a list of workspace indices of its spectra
Calibration is assumed to be done parallel to the Y-axis
:param ws: Workspace with tubes to be calibrated - may be integrated or raw
:param tubePts: Array of calibration positions (in pixels)
:param idealTubePts: Where these calibration positions should be (in Y coords)
:param whichtube: a list of workspace indices for the tube
:param PeakTestMode: true if shoving detectors that are reckoned to be at peak away (for test purposes)
:param polinFit: Order of the polinominal to fit for the ideal positions
Return Array of pixel detector IDs and array of their calibrated positions
"""
# Arrays to be returned
detIDs = []
detPositions = []
#Get position of first and last pixel of tube
nDets = len(whichTube)
if nDets < 1:
return detIDs, detPositions
# Correct positions of detectors in tube by quadratic fit
pixels = correctTubeToIdealTube ( tubePts, idealTubePts, nDets, TestMode=peakTestMode, polinFit=polinFit )
if len(pixels) != nDets:
print("Tube correction failed.")
return detIDs, detPositions
baseInstrument = ws.getInstrument().getBaseInstrument()
# Get tube unit vector
# get the detector from the baseInstrument, in order to get the positions
# before any calibration being loaded.
det0 = baseInstrument.getDetector(ws.getDetector( whichTube[0]).getID())
detN = baseInstrument.getDetector(ws.getDetector (whichTube[-1]).getID())
d0pos,dNpos = det0.getPos(),detN.getPos()
## identical to norm of vector: |dNpos - d0pos|
tubeLength = det0.getDistance(detN)
if tubeLength <= 0.0:
print("Zero length tube cannot be calibrated, calibration failed.")
return detIDs, detPositions
#unfortunatelly, the operation '/' is not defined in V3D object, so
#I have to use the multiplication.
# unit_vectors are defined as u = (v2-v1)/|v2-v1| = (dn-d0)/length
unit_vector = (dNpos-d0pos) * (1.0/tubeLength)
# Get Centre (really want to get if from IDF to allow calibration a multiple number of times)
center = (dNpos+d0pos)*0.5 #(1.0/2)
# Move the pixel detectors (might not work for sloping tubes)
for i in range(nDets):
deti = ws.getDetector( whichTube[i])
pNew = pixels[i]
# again, the opeartion float * v3d is not defined, but v3d * float is,
# so, I wrote the new pos as center + unit_vector * (float)
newPos = center + unit_vector * pNew
detIDs.append( deti.getID() )
detPositions.append( newPos )
return detIDs, detPositions
def readPeakFile(file_name):
"""Load the file calibration
It returns a list of tuples, where the first value is the detector identification
and the second value is its calibration values.
Example of usage:
for (det_code, cal_values) in readPeakFile('pathname/TubeDemo'):
print(det_code)
print(cal_values)
"""
loaded_file = []
#split the entries to the main values:
# For example:
# MERLIN/door1/tube_1_1 [34.199347724575574, 525.5864438725401, 1001.7456248836971]
# Will be splited as:
# ['MERLIN/door1/tube_1_1', '', '34.199347724575574', '', '525.5864438725401', '', '1001.7456248836971', '', '', '']
pattern = re.compile('[\[\],\s\r]')
saveDirectory = config['defaultsave.directory']
pfile = os.path.join(saveDirectory, file_name)
for line in open(pfile,'r'):
#check if the entry is a comment line
if line.startswith('#'):
continue
#split all values
line_vals = re.split(pattern,line)
id_ = line_vals[0]
if id_ == '':
continue
try:
f_values = [float(v) for v in line_vals[1:] if v!='']
except ValueError:
continue
loaded_file.append((id_,f_values))
return loaded_file
### THESE FUNCTIONS NEXT SHOULD BE THE ONLY FUNCTIONS THE USER CALLS FROM THIS FILE
def getCalibration( ws, tubeSet, calibTable, fitPar, iTube, peaksTable,
overridePeaks=dict(), excludeShortTubes=0.0, plotTube=[],
rangeList = None, polinFit=2, peaksTestMode=False):
"""
Get the results the calibration and put them in the calibration table provided.
:param ws: Integrated Workspace with tubes to be calibrated
:param tubeSet: Specification of Set of tubes to be calibrated ( :class:`~tube_spec.TubeSpec` object)
:param calibTable: Empty calibration table into which the calibration results are placed. It is composed by 'Detector ID'
and a V3D column 'Detector Position'. It will be filled with the IDs and calibrated positions of the detectors.
:param fitPar: A :class:`~tube_calib_fit_params.TubeCalibFitParams` object for fitting the peaks
:param iTube: The :class:`~ideal_tube.IdealTube` which contains the positions in metres of the shadows of the slits,
bars or edges used for calibration.
:param peaksTable: Peaks table into wich the peaks positions will be put
:param overridePeak: dictionary with tube indexes keys and an array of peaks in pixels to override those that would be
fitted for one tube
:param exludeShortTubes: Exlude tubes shorter than specified length from calibration
:param plotTube: List of tube indexes that will be ploted
:param rangelist: list of the tube indexes that will be calibrated. Default None, means all the tubes in tubeSet
:param polinFit: Order of the polinomial to fit against the known positions. Acceptable: 2, 3
:param peakTestMode: true if shoving detectors that are reckoned to be at peak away (for test purposes)
This is the main method called from :func:`~tube.calibrate` to perform the calibration.
"""
nTubes = tubeSet.getNumTubes()
print("Number of tubes =",nTubes)
if rangeList is None:
rangeList = range(nTubes)
all_skipped = set()
for i in rangeList:
# Deal with (i+1)st tube specified
wht, skipped = tubeSet.getTube(i)
all_skipped.update(skipped)
print("Calibrating tube", i+1,"of",nTubes, tubeSet.getTubeName(i))
if len(wht) < 1 :
print("Unable to get any workspace indices (spectra) for this tube. Tube",tubeSet.getTubeName(i),"not calibrated.")
#skip this tube
continue
# Calibribate the tube, if possible
if tubeSet.getTubeLength(i) <= excludeShortTubes:
#skip this tube
continue
##############################
# Define Peak Position session
##############################
# if this tube is to be override, get the peaks positions for this tube.
if i in overridePeaks:
actualTube = overridePeaks[i]
else:
#find the peaks positions
plotThisTube = i in plotTube
actualTube = getPoints(ws, iTube.getFunctionalForms(), fitPar, wht, showPlot = plotThisTube)
if plotThisTube:
RenameWorkspace('FittedData',OutputWorkspace='FittedTube%d'%(i))
RenameWorkspace('TubePlot', OutputWorkspace='TubePlot%d'%(i))
# Set the peak positions at the peakTable
peaksTable.addRow([tubeSet.getTubeName(i)] + list(actualTube))
##########################################
# Define the correct position of detectors
##########################################
detIDList, detPosList = getCalibratedPixelPositions( ws, actualTube, iTube.getArray(), wht, peaksTestMode, polinFit)
#save the detector positions to calibTable
if len(detIDList) == len(wht): # We have corrected positions
for j in range(len(wht)):
nextRow = {'Detector ID': detIDList[j], 'Detector Position': detPosList[j] }
calibTable.addRow ( nextRow )
if len(all_skipped) > 0:
print("%i histogram(s) were excluded from the calibration since they did not have an assigned detector." % len(all_skipped))
# Delete temporary workspaces used in the calibration
for ws_name in ('TubePlot','CalibPoint_NormalisedCovarianceMatrix',
'CalibPoint_NormalisedCovarianceMatrix','CalibPoint_NormalisedCovarianceMatrix',
'CalibPoint_Parameters', 'CalibPoint_Workspace', 'PolyFittingWorkspace',
'QF_NormalisedCovarianceMatrix', 'QF_Parameters', 'QF_Workspace',
'Z1_Workspace', 'Z1_Parameters', 'Z1_NormalisedCovarianceMatrix'):
try:
DeleteWorkspace(ws_name)
except:
pass
def getCalibrationFromPeakFile ( ws, calibTable, iTube, PeakFile ):
"""
Get the results the calibration and put them in the calibration table provided.
@param ws: Integrated Workspace with tubes to be calibrated
@param calibTable: Calibration table into which the calibration results are placed
@param iTube: The ideal tube
@param PeakFile: File of peaks for calibtation
"""
# Get Ideal Tube
idealTube = iTube.getArray()
# Read Peak File
PeakArray = readPeakFile( PeakFile )
nTubes = len(PeakArray)
print("Number of tubes read from file =",nTubes)
for i in range(nTubes):
# Deal with (i+1)st tube got from file
TubeName = PeakArray[i][0] # e.g. 'MERLIN/door3/tube_3_1'
tube = TubeSpec(ws)
tube.setTubeSpecByString(TubeName)
actualTube = PeakArray[i][1] # e.g. [2.0, 512.5, 1022.0]
wht, _ = tube.getTube(0)
print("Calibrating tube", i+1 ,"of", nTubes, TubeName)
if len(wht) < 1 :
print("Unable to get any workspace indices for this tube. Calibration abandoned.")
return
detIDList, detPosList = getCalibratedPixelPositions( ws, actualTube, idealTube, wht)
if len(detIDList) == len(wht): # We have corrected positions
for j in range(len(wht)):
nextRow = {'Detector ID': detIDList[j], 'Detector Position': detPosList[j] }
calibTable.addRow ( nextRow )
if nTubes == 0:
return
# Delete temporary workspaces for getting new detector positions
DeleteWorkspace('PolyFittingWorkspace')
DeleteWorkspace('QF_NormalisedCovarianceMatrix')
DeleteWorkspace('QF_Parameters')
DeleteWorkspace('QF_Workspace')
## implement this function
def constructIdealTubeFromRealTube( ws, tube, fitPar, funcForm ):
"""
Construct an ideal tube from an actual tube (assumed ideal)
:param ws: integrated workspace
:param tube: specification of one tube (if several tubes, only first tube is used)
:param fitPar: initial fit parameters for peak of the tube
:param funcForm: listing the type of known positions 1=Gaussian; 2=edge
:rtype: IdealTube
"""
# Get workspace indices
idealTube = IdealTube()
nTubes = tube.getNumTubes()
if nTubes < 1:
raise RuntimeError("Invalid tube specification received by constructIdealTubeFromRealTube")
elif nTubes > 1:
print("Specification has several tubes. The ideal tube will be based on the first tube",tube.getTubeName(0))
wht, _ = tube.getTube(0)
# Check tube
if len(wht) < 1 :
raise RuntimeError("Unable to get any workspace indices for this tube. Cannot use as ideal tube.")
# Get actual tube on which ideal tube is based
actualTube = getPoints ( ws, funcForm, fitPar, wht)
print("Actual tube that ideal tube is to be based upon",actualTube)
# Get ideal tube based on this actual tube
try:
idealTube.setArray(actualTube)
except:
msg = "Attempted to create ideal tube based on actual tube" + str(actualTube)
msg += "Unable to create ideal tube."
msg += "Please choose another tube for constructIdealTubeFromRealTube()."
raise RuntimeError(msg)
return idealTube
| gpl-3.0 |
shaufi/odoo | addons/website_membership/models/membership.py | 221 | 1642 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class membership_membership_line(osv.Model):
_inherit = 'membership.membership_line'
def get_published_companies(self, cr, uid, ids, limit=None, context=None):
if not ids:
return []
limit_clause = '' if limit is None else ' LIMIT %d' % limit
cr.execute('SELECT DISTINCT p.id \
FROM res_partner p INNER JOIN membership_membership_line m \
ON p.id = m.partner \
WHERE website_published AND is_company AND m.id IN %s ' + limit_clause, (tuple(ids),))
return [partner_id[0] for partner_id in cr.fetchall()]
| agpl-3.0 |
PriceChild/ansible | lib/ansible/modules/network/f5/bigip_pool.py | 6 | 19982 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, Matt Hite <mhite@hotmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: bigip_pool
short_description: "Manages F5 BIG-IP LTM pools"
description:
- Manages F5 BIG-IP LTM pools via iControl SOAP API
version_added: 1.2
author:
- Matt Hite (@mhite)
- Tim Rupp (@caphrim007)
notes:
- Requires BIG-IP software version >= 11
- F5 developed module 'bigsuds' required (see http://devcentral.f5.com)
- Best run as a local_action in your playbook
requirements:
- bigsuds
options:
description:
description:
- Specifies descriptive text that identifies the pool.
required: false
version_added: "2.3"
state:
description:
- Pool/pool member state
required: false
default: present
choices:
- present
- absent
aliases: []
name:
description:
- Pool name
required: true
default: null
choices: []
aliases:
- pool
partition:
description:
- Partition of pool/pool member
required: false
default: 'Common'
choices: []
aliases: []
lb_method:
description:
- Load balancing method
version_added: "1.3"
required: False
default: 'round_robin'
choices:
- round_robin
- ratio_member
- least_connection_member
- observed_member
- predictive_member
- ratio_node_address
- least_connection_node_address
- fastest_node_address
- observed_node_address
- predictive_node_address
- dynamic_ratio
- fastest_app_response
- least_sessions
- dynamic_ratio_member
- l3_addr
- weighted_least_connection_member
- weighted_least_connection_node_address
- ratio_session
- ratio_least_connection_member
- ratio_least_connection_node_address
aliases: []
monitor_type:
description:
- Monitor rule type when monitors > 1
version_added: "1.3"
required: False
default: null
choices: ['and_list', 'm_of_n']
aliases: []
quorum:
description:
- Monitor quorum value when monitor_type is m_of_n
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
monitors:
description:
- Monitor template name list. Always use the full path to the monitor.
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
slow_ramp_time:
description:
- Sets the ramp-up time (in seconds) to gradually ramp up the load on
newly added or freshly detected up pool members
version_added: "1.3"
required: False
default: null
choices: []
aliases: []
reselect_tries:
description:
- Sets the number of times the system tries to contact a pool member
after a passive failure
version_added: "2.2"
required: False
default: null
choices: []
aliases: []
service_down_action:
description:
- Sets the action to take when node goes down in pool
version_added: "1.3"
required: False
default: null
choices:
- none
- reset
- drop
- reselect
aliases: []
host:
description:
- "Pool member IP"
required: False
default: null
choices: []
aliases:
- address
port:
description:
- Pool member port
required: False
default: null
choices: []
aliases: []
extends_documentation_fragment: f5
'''
EXAMPLES = '''
- name: Create pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "least_connection_member"
slow_ramp_time: 120
delegate_to: localhost
- name: Modify load balancer method
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
lb_method: "round_robin"
- name: Add pool member
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "present"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
- name: Remove pool member from pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
host: "{{ ansible_default_ipv4['address'] }}"
port: 80
- name: Delete pool
bigip_pool:
server: "lb.mydomain.com"
user: "admin"
password: "secret"
state: "absent"
name: "my-pool"
partition: "Common"
'''
RETURN = '''
'''
def pool_exists(api, pool):
# hack to determine if pool exists
result = False
try:
api.LocalLB.Pool.get_object_status(pool_names=[pool])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def create_pool(api, pool, lb_method):
# create requires lb_method but we don't want to default
# to a value on subsequent runs
if not lb_method:
lb_method = 'round_robin'
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.create_v2(pool_names=[pool], lb_methods=[lb_method],
members=[[]])
def remove_pool(api, pool):
api.LocalLB.Pool.delete_pool(pool_names=[pool])
def get_lb_method(api, pool):
lb_method = api.LocalLB.Pool.get_lb_method(pool_names=[pool])[0]
lb_method = lb_method.strip().replace('LB_METHOD_', '').lower()
return lb_method
def set_lb_method(api, pool, lb_method):
lb_method = "LB_METHOD_%s" % lb_method.strip().upper()
api.LocalLB.Pool.set_lb_method(pool_names=[pool], lb_methods=[lb_method])
def get_monitors(api, pool):
result = api.LocalLB.Pool.get_monitor_association(pool_names=[pool])[0]['monitor_rule']
monitor_type = result['type'].split("MONITOR_RULE_TYPE_")[-1].lower()
quorum = result['quorum']
monitor_templates = result['monitor_templates']
return (monitor_type, quorum, monitor_templates)
def set_monitors(api, pool, monitor_type, quorum, monitor_templates):
monitor_type = "MONITOR_RULE_TYPE_%s" % monitor_type.strip().upper()
monitor_rule = {'type': monitor_type, 'quorum': quorum, 'monitor_templates': monitor_templates}
monitor_association = {'pool_name': pool, 'monitor_rule': monitor_rule}
api.LocalLB.Pool.set_monitor_association(monitor_associations=[monitor_association])
def get_slow_ramp_time(api, pool):
result = api.LocalLB.Pool.get_slow_ramp_time(pool_names=[pool])[0]
return result
def set_slow_ramp_time(api, pool, seconds):
api.LocalLB.Pool.set_slow_ramp_time(pool_names=[pool], values=[seconds])
def get_reselect_tries(api, pool):
result = api.LocalLB.Pool.get_reselect_tries(pool_names=[pool])[0]
return result
def set_reselect_tries(api, pool, tries):
api.LocalLB.Pool.set_reselect_tries(pool_names=[pool], values=[tries])
def get_action_on_service_down(api, pool):
result = api.LocalLB.Pool.get_action_on_service_down(pool_names=[pool])[0]
result = result.split("SERVICE_DOWN_ACTION_")[-1].lower()
return result
def set_action_on_service_down(api, pool, action):
action = "SERVICE_DOWN_ACTION_%s" % action.strip().upper()
api.LocalLB.Pool.set_action_on_service_down(pool_names=[pool], actions=[action])
def member_exists(api, pool, address, port):
# hack to determine if member exists
result = False
try:
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.get_member_object_status(pool_names=[pool],
members=[members])
result = True
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = False
else:
# genuine exception
raise
return result
def delete_node_address(api, address):
result = False
try:
api.LocalLB.NodeAddressV2.delete_node_address(nodes=[address])
result = True
except bigsuds.OperationFailed as e:
if "is referenced by a member of pool" in str(e):
result = False
else:
# genuine exception
raise
return result
def remove_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.remove_member_v2(pool_names=[pool], members=[members])
def add_pool_member(api, pool, address, port):
members = [{'address': address, 'port': port}]
api.LocalLB.Pool.add_member_v2(pool_names=[pool], members=[members])
def set_description(api, pool, description):
api.LocalLB.Pool.set_description(
pool_names=[pool], descriptions=[description]
)
def get_description(api, pool):
return api.LocalLB.Pool.get_description(pool_names=[pool])[0]
def main():
lb_method_choices = ['round_robin', 'ratio_member',
'least_connection_member', 'observed_member',
'predictive_member', 'ratio_node_address',
'least_connection_node_address',
'fastest_node_address', 'observed_node_address',
'predictive_node_address', 'dynamic_ratio',
'fastest_app_response', 'least_sessions',
'dynamic_ratio_member', 'l3_addr',
'weighted_least_connection_member',
'weighted_least_connection_node_address',
'ratio_session', 'ratio_least_connection_member',
'ratio_least_connection_node_address']
monitor_type_choices = ['and_list', 'm_of_n']
service_down_choices = ['none', 'reset', 'drop', 'reselect']
argument_spec = f5_argument_spec()
meta_args = dict(
name=dict(type='str', required=True, aliases=['pool']),
lb_method=dict(type='str', choices=lb_method_choices),
monitor_type=dict(type='str', choices=monitor_type_choices),
quorum=dict(type='int'),
monitors=dict(type='list'),
slow_ramp_time=dict(type='int'),
reselect_tries=dict(type='int'),
service_down_action=dict(type='str', choices=service_down_choices),
host=dict(type='str', aliases=['address']),
port=dict(type='int'),
description=dict(type='str')
)
argument_spec.update(meta_args)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True
)
if not bigsuds_found:
module.fail_json(msg="the python bigsuds module is required")
if module.params['validate_certs']:
import ssl
if not hasattr(ssl, 'SSLContext'):
module.fail_json(msg='bigsuds does not support verifying certificates with python < 2.7.9. Either update python or set validate_certs=False on the task')
server = module.params['server']
server_port = module.params['server_port']
user = module.params['user']
password = module.params['password']
state = module.params['state']
partition = module.params['partition']
validate_certs = module.params['validate_certs']
description = module.params['description']
name = module.params['name']
pool = fq_name(partition, name)
lb_method = module.params['lb_method']
if lb_method:
lb_method = lb_method.lower()
monitor_type = module.params['monitor_type']
if monitor_type:
monitor_type = monitor_type.lower()
quorum = module.params['quorum']
monitors = module.params['monitors']
if monitors:
monitors = []
for monitor in module.params['monitors']:
monitors.append(fq_name(partition, monitor))
slow_ramp_time = module.params['slow_ramp_time']
reselect_tries = module.params['reselect_tries']
service_down_action = module.params['service_down_action']
if service_down_action:
service_down_action = service_down_action.lower()
host = module.params['host']
address = fq_name(partition, host)
port = module.params['port']
# sanity check user supplied values
if (host and port is None) or (port is not None and not host):
module.fail_json(msg="both host and port must be supplied")
if port is not None and (0 > port or port > 65535):
module.fail_json(msg="valid ports must be in range 0 - 65535")
if monitors:
if len(monitors) == 1:
# set default required values for single monitor
quorum = 0
monitor_type = 'single'
elif len(monitors) > 1:
if not monitor_type:
module.fail_json(msg="monitor_type required for monitors > 1")
if monitor_type == 'm_of_n' and not quorum:
module.fail_json(msg="quorum value required for monitor_type m_of_n")
if monitor_type != 'm_of_n':
quorum = 0
elif monitor_type:
# no monitors specified but monitor_type exists
module.fail_json(msg="monitor_type require monitors parameter")
elif quorum is not None:
# no monitors specified but quorum exists
module.fail_json(msg="quorum requires monitors parameter")
try:
api = bigip_api(server, user, password, validate_certs, port=server_port)
result = {'changed': False} # default
if state == 'absent':
if host and port and pool:
# member removal takes precedent
if pool_exists(api, pool) and member_exists(api, pool, address, port):
if not module.check_mode:
remove_pool_member(api, pool, address, port)
deleted = delete_node_address(api, address)
result = {'changed': True, 'deleted': deleted}
else:
result = {'changed': True}
elif pool_exists(api, pool):
# no host/port supplied, must be pool removal
if not module.check_mode:
# hack to handle concurrent runs of module
# pool might be gone before we actually remove it
try:
remove_pool(api, pool)
result = {'changed': True}
except bigsuds.OperationFailed as e:
if "was not found" in str(e):
result = {'changed': False}
else:
# genuine exception
raise
else:
# check-mode return value
result = {'changed': True}
elif state == 'present':
update = False
if not pool_exists(api, pool):
# pool does not exist -- need to create it
if not module.check_mode:
# a bit of a hack to handle concurrent runs of this module.
# even though we've checked the pool doesn't exist,
# it may exist by the time we run create_pool().
# this catches the exception and does something smart
# about it!
try:
create_pool(api, pool, lb_method)
result = {'changed': True}
except bigsuds.OperationFailed as e:
if "already exists" in str(e):
update = True
else:
# genuine exception
raise
else:
if monitors:
set_monitors(api, pool, monitor_type, quorum, monitors)
if slow_ramp_time:
set_slow_ramp_time(api, pool, slow_ramp_time)
if reselect_tries:
set_reselect_tries(api, pool, reselect_tries)
if service_down_action:
set_action_on_service_down(api, pool, service_down_action)
if host and port:
add_pool_member(api, pool, address, port)
if description:
set_description(api, pool, description)
else:
# check-mode return value
result = {'changed': True}
else:
# pool exists -- potentially modify attributes
update = True
if update:
if lb_method and lb_method != get_lb_method(api, pool):
if not module.check_mode:
set_lb_method(api, pool, lb_method)
result = {'changed': True}
if monitors:
t_monitor_type, t_quorum, t_monitor_templates = get_monitors(api, pool)
if (t_monitor_type != monitor_type) or (t_quorum != quorum) or (set(t_monitor_templates) != set(monitors)):
if not module.check_mode:
set_monitors(api, pool, monitor_type, quorum, monitors)
result = {'changed': True}
if slow_ramp_time and slow_ramp_time != get_slow_ramp_time(api, pool):
if not module.check_mode:
set_slow_ramp_time(api, pool, slow_ramp_time)
result = {'changed': True}
if reselect_tries and reselect_tries != get_reselect_tries(api, pool):
if not module.check_mode:
set_reselect_tries(api, pool, reselect_tries)
result = {'changed': True}
if service_down_action and service_down_action != get_action_on_service_down(api, pool):
if not module.check_mode:
set_action_on_service_down(api, pool, service_down_action)
result = {'changed': True}
if (host and port) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
if (host and port == 0) and not member_exists(api, pool, address, port):
if not module.check_mode:
add_pool_member(api, pool, address, port)
result = {'changed': True}
if description and description != get_description(api, pool):
if not module.check_mode:
set_description(api, pool, description)
result = {'changed': True}
except Exception as e:
module.fail_json(msg="received exception: %s" % e)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.f5_utils import *
if __name__ == '__main__':
main()
| gpl-3.0 |
sestrella/ansible | packaging/release/versionhelper/version_helper.py | 124 | 6838 | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import argparse
import os
import re
import sys
from packaging.version import Version, VERSION_PATTERN
class AnsibleVersionMunger(object):
tag_offsets = dict(
dev=0,
a=100,
b=200,
rc=1000
)
# TODO: allow overrides here for packaging bump etc
def __init__(self, raw_version, revision=None, codename=None):
self._raw_version = raw_version
self._revision = revision
self._parsed_version = Version(raw_version)
self._codename = codename
self._parsed_regex_match = re.match(VERSION_PATTERN, raw_version, re.VERBOSE | re.IGNORECASE)
@property
def deb_version(self):
v = self._parsed_version
match = self._parsed_regex_match
# treat dev/post as prerelease for now; treat dev/post as equivalent and disallow together
if v.is_prerelease or match.group('dev') or match.group('post'):
if match.group('dev') and match.group('post'):
raise Exception("dev and post may not currently be used together")
if match.group('pre'):
tag_value = match.group('pre')
tag_type = match.group('pre_l')
if match.group('dev'):
tag_value += ('~%s' % match.group('dev').strip('.'))
if match.group('post'):
tag_value += ('~%s' % match.group('post').strip('.'))
elif match.group('dev'):
tag_type = "dev"
tag_value = match.group('dev').strip('.')
elif match.group('post'):
tag_type = "dev"
tag_value = match.group('post').strip('.')
else:
raise Exception("unknown prerelease type for version {0}".format(self._raw_version))
else:
tag_type = None
tag_value = ''
# not a pre/post/dev release, just return base version
if not tag_type:
return '{base_version}'.format(base_version=self.base_version)
# it is a pre/dev release, include the tag value with a ~
return '{base_version}~{tag_value}'.format(base_version=self.base_version, tag_value=tag_value)
@property
def deb_release(self):
return '1' if self._revision is None else str(self._revision)
@property
def rpm_release(self):
v = self._parsed_version
match = self._parsed_regex_match
# treat presence of dev/post as prerelease for now; treat dev/post the same and disallow together
if v.is_prerelease or match.group('dev') or match.group('post'):
if match.group('dev') and match.group('post'):
raise Exception("dev and post may not currently be used together")
if match.group('pre'):
tag_value = match.group('pre')
tag_type = match.group('pre_l')
tag_ver = match.group('pre_n')
if match.group('dev'):
tag_value += match.group('dev')
if match.group('post'):
tag_value += match.group('post')
elif match.group('dev'):
tag_type = "dev"
tag_value = match.group('dev')
tag_ver = match.group('dev_n')
elif match.group('post'):
tag_type = "dev"
tag_value = match.group('post')
tag_ver = match.group('post_n')
else:
raise Exception("unknown prerelease type for version {0}".format(self._raw_version))
else:
tag_type = None
tag_value = ''
tag_ver = 0
# not a pre/post/dev release, just append revision (default 1)
if not tag_type:
if self._revision is None:
self._revision = 1
return '{revision}'.format(revision=self._revision)
# cleanse tag value in case it starts with .
tag_value = tag_value.strip('.')
# coerce to int and None == 0
tag_ver = int(tag_ver if tag_ver else 0)
if self._revision is None:
tag_offset = self.tag_offsets.get(tag_type)
if tag_offset is None:
raise Exception('no tag offset defined for tag {0}'.format(tag_type))
pkgrel = '0.{0}'.format(tag_offset + tag_ver)
else:
pkgrel = self._revision
return '{pkgrel}.{tag_value}'.format(pkgrel=pkgrel, tag_value=tag_value)
@property
def raw(self):
return self._raw_version
# return the x.y.z version without any other modifiers present
@property
def base_version(self):
return self._parsed_version.base_version
# return the x.y version without any other modifiers present
@property
def major_version(self):
return re.match(r'^(\d+.\d+)', self._raw_version).group(1)
@property
def codename(self):
return self._codename if self._codename else "UNKNOWN"
def main():
parser = argparse.ArgumentParser(description='Extract/transform Ansible versions to various packaging formats')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--raw', action='store_true')
group.add_argument('--majorversion', action='store_true')
group.add_argument('--baseversion', action='store_true')
group.add_argument('--debversion', action='store_true')
group.add_argument('--debrelease', action='store_true')
group.add_argument('--rpmrelease', action='store_true')
group.add_argument('--codename', action='store_true')
group.add_argument('--all', action='store_true')
parser.add_argument('--revision', action='store', default='auto')
args = parser.parse_args()
mydir = os.path.dirname(__file__)
release_loc = os.path.normpath(mydir + '/../../../lib')
sys.path.insert(0, release_loc)
from ansible import release
rev = None
if args.revision != 'auto':
rev = args.revision
v_raw = release.__version__
codename = release.__codename__
v = AnsibleVersionMunger(v_raw, revision=rev, codename=codename)
if args.raw:
print(v.raw)
elif args.baseversion:
print(v.base_version)
elif args.majorversion:
print(v.major_version)
elif args.debversion:
print(v.deb_version)
elif args.debrelease:
print(v.deb_release)
elif args.rpmrelease:
print(v.rpm_release)
elif args.codename:
print(v.codename)
elif args.all:
props = [name for (name, impl) in vars(AnsibleVersionMunger).items() if isinstance(impl, property)]
for propname in props:
print('{0}: {1}'.format(propname, getattr(v, propname)))
if __name__ == '__main__':
main()
| gpl-3.0 |
helix84/activae | qa/stress_queue.py | 1 | 3336 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2010 CENATIC: Centro Nacional de Referencia de
# Aplicacion de las TIC basadas en Fuentes Abiertas, Spain.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# Neither the name of the CENATIC nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You may contact the copyright holder at: Fundacion CENATIC, Edificio
# de Servicios Sociales: C/ Vistahermosa, 1, 3ra planta, 06200
# Almendralejo (Badajoz), Spain
MEDIA_PATH = '/opt/activae/src/static/private/assets'
FORMATS = ['mp3', 'ogg', 'avi', 'mp4', 'flv', 'h264']
TEST_PATH = '/tmp/activae_queue_stressing'
QUEUE_SERVER = "127.0.0.1"
QUEUE_PORT = 8001
import os
import errno
from xmlrpclib import ServerProxy
import mimetypes
def mkdir_p(path):
try:
os.makedirs(path, mode=777)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else: raise
def main():
mkdir_p (TEST_PATH)
# Dispatcher
http = "http://%s:%s/" % (QUEUE_SERVER, QUEUE_PORT)
client = ServerProxy (http)
for media in os.listdir(MEDIA_PATH):
source = '%s/%s' %(MEDIA_PATH,media)
if os.path.isfile(source):
mimetype, encoding = mimetypes.guess_type (source)
if not mimetype:
continue
type_name = mimetype.split('/')[0]
if type_name not in ['video', 'audio']:
continue
convert = client.ConvertMedia
thumb = client.BuildThumbnailMedia
name = os.path.basename (source)
for format in FORMATS:
target = '%s/%s.%s' % (TEST_PATH, name, format)
task_id = convert (source, target, format)
if not task_id:
raise ValueError, 'Fail: %s->%s' %(source,target)
else:
print 'OK (%s): %s->%s' %(task_id,source,target)
if __name__ == "__main__":
main()
| bsd-3-clause |
stackforge/python-solumclient | solumclient/tests/common/test_github.py | 1 | 6655 | # Copyright (c) 2015 Rackspace
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from solumclient.common import github
from solumclient.tests import base
class TestGitHubAuth(base.TestCase):
fake_repo = "http://github.com/fakeuser/fakerepo.git"
fake_trigger = "http://example.com/trigger/1"
fake_username = 'fakeuser'
fake_password = 'fakepassword'
fake_token = 'faketoken'
def test_invalid_repo(self):
self.assertRaises(ValueError,
github.GitHubAuth,
"http://example.com")
def test_auth_header_username_password(self):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
# base64.b64encode('fakeuser:fakepassword') yields 'ZmFrZX...'
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'Basic ZmFrZXVzZXI6ZmFrZXBhc3N3b3Jk',
}
self.assertEqual(expected_auth_header, gha.auth_header)
@mock.patch('getpass.getpass')
def test_auth_header_username_password_2fa(self, fake_getpass):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
gha._otp_required = True
fake_getpass.return_value = 'fakeonetime'
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'Basic ZmFrZXVzZXI6ZmFrZXBhc3N3b3Jk',
'x-github-otp': 'fakeonetime',
}
self.assertEqual(expected_auth_header, gha.auth_header)
def test_auth_header_repo_token(self):
gha = github.GitHubAuth(self.fake_repo,
repo_token=self.fake_token)
expected_auth_header = {
'Content-Type': 'application/json',
'Authorization': 'token %s' % self.fake_token,
}
self.assertEqual(expected_auth_header, gha.auth_header)
@mock.patch('httplib2.Http.request')
def test_create_webhook(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
repo_token=self.fake_token)
fake_request.return_value = ({'status': '200'},
'{"token": "%s"}' % self.fake_token)
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token123'
gha.create_webhook(self.fake_trigger)
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger,
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_create_webhook_unittest_only(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token123'
gha.create_webhook(self.fake_trigger, workflow=['unittest'])
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger + "?workflow=unittest",
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_create_webhook_unittest_build(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
gha.create_repo_token = mock.MagicMock()
gha.create_repo_token.return_value = 'token123'
gha.create_webhook(self.fake_trigger, workflow=['unittest', 'build'])
fake_request.assert_called_once_with(
'https://api.github.com/repos/fakeuser/fakerepo/hooks',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {
"config": {
"url": self.fake_trigger + "?workflow=unittest+build",
"content_type": "json"},
"name": "web",
"events": ["pull_request", "commit_comment"]}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
@mock.patch('httplib2.Http.request')
def test_add_ssh_key(self, fake_request):
gha = github.GitHubAuth(self.fake_repo,
username=self.fake_username,
password=self.fake_password)
fake_request.return_value = ({'status': '200'},
'{"token": "foo"}')
fake_pub_key = 'foo'
gha.add_ssh_key(public_key=fake_pub_key)
fake_request.assert_called_once_with(
'https://api.github.com/user/keys',
'POST',
headers=mock.ANY,
body=mock.ANY)
expected_body = {"key": "foo", "title": "devops@Solum"}
actual_body = json.loads(fake_request.call_args[1]['body'])
self.assertEqual(expected_body, actual_body)
| apache-2.0 |
arielmakestuff/loadlimit | test/unit/cli/test_loadlimitformatter.py | 1 | 1621 | # -*- coding: utf-8 -*-
# test/unit/cli/test_loadlimitformatter.py
# Copyright (C) 2016 authors and contributors (see AUTHORS file)
#
# This module is released under the MIT License.
"""Test LoadLimitFormatter"""
# ============================================================================
# Imports
# ============================================================================
# Stdlib imports
import asyncio
import logging
# Third-party imports
# Local imports
from loadlimit.cli import LoadLimitFormatter, PROGNAME
import loadlimit.channel as channel
from loadlimit.core import BaseLoop
# ============================================================================
# Tests
# ============================================================================
def test_no_datefmt(testloop, caplog):
"""Use default format"""
caplog.set_level(logging.WARNING)
async def run(logger):
"""run"""
logger.warning('hello world')
await channel.shutdown.send(0)
with BaseLoop() as main:
logger = main.logger
main.initlogging(fmtcls=LoadLimitFormatter,
style='{',
format='{asctime} {message}')
asyncio.ensure_future(run(logger))
main.start()
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.name == PROGNAME
assert record.levelname == 'WARNING'
assert record.message.endswith('hello world')
# ============================================================================
#
# ============================================================================
| mit |
nanditav/15712-TensorFlow | tensorflow/python/training/ftrl_test.py | 12 | 11148 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for Ftrl operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
class FtrlOptimizerTest(tf.test.TestCase):
def testFtrlwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([0.0, 0.0], dtype=dtype)
var1 = tf.Variable([0.0, 0.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllClose([0.0, 0.0], v0_val)
self.assertAllClose([0.0, 0.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-2.60260963, -4.29698515]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.28432083, -0.56694895]),
v1_val)
def testFtrlwithoutRegularization2(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 3 steps FTRL
for _ in range(3):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-2.55607247, -3.98729396]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.28232238, -0.56096673]),
v1_val)
def testFtrlWithL1(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=0.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(
np.array([-7.66718769, -10.91273689]),
v0_val)
self.assertAllCloseAccordingToType(
np.array([-0.93460727, -1.86147261]),
v1_val)
def testFtrlWithL1_L2(self):
for dtype in [tf.half, tf.float32]:
with self.test_session() as sess:
var0 = tf.Variable([1.0, 2.0], dtype=dtype)
var1 = tf.Variable([4.0, 3.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
opt = tf.train.FtrlOptimizer(3.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.001,
l2_regularization_strength=2.0)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType([1.0, 2.0], v0_val)
self.assertAllCloseAccordingToType([4.0, 3.0], v1_val)
# Run 10 steps FTRL
for _ in range(10):
update.run()
v0_val, v1_val = sess.run([var0, var1])
self.assertAllCloseAccordingToType(np.array([-0.24059935, -0.46829352]),
v0_val)
self.assertAllCloseAccordingToType(np.array([-0.02406147, -0.04830509]),
v1_val)
def applyOptimizer(self, opt, dtype, steps=5, is_sparse=False):
if is_sparse:
var0 = tf.Variable([[0.0], [0.0]], dtype=dtype)
var1 = tf.Variable([[0.0], [0.0]], dtype=dtype)
grads0 = tf.IndexedSlices(tf.constant([0.1], shape=[1, 1], dtype=dtype),
tf.constant([0]),
tf.constant([2, 1]))
grads1 = tf.IndexedSlices(tf.constant([0.02], shape=[1, 1], dtype=dtype),
tf.constant([1]),
tf.constant([2, 1]))
else:
var0 = tf.Variable([0.0, 0.0], dtype=dtype)
var1 = tf.Variable([0.0, 0.0], dtype=dtype)
grads0 = tf.constant([0.1, 0.2], dtype=dtype)
grads1 = tf.constant([0.01, 0.02], dtype=dtype)
update = opt.apply_gradients(zip([grads0, grads1], [var0, var1]))
tf.global_variables_initializer().run()
sess = tf.get_default_session()
v0_val, v1_val = sess.run([var0, var1])
if is_sparse:
self.assertAllCloseAccordingToType([[0.0], [0.0]], v0_val)
self.assertAllCloseAccordingToType([[0.0], [0.0]], v1_val)
else:
self.assertAllCloseAccordingToType([0.0, 0.0], v0_val)
self.assertAllCloseAccordingToType([0.0, 0.0], v1_val)
# Run Ftrl for a few steps
for _ in range(steps):
update.run()
v0_val, v1_val = sess.run([var0, var1])
return v0_val, v1_val
# When variables are initialized with Zero, FTRL-Proximal has two properties:
# 1. Without L1&L2 but with fixed learning rate, FTRL-Proximal is identical
# with GradientDescent.
# 2. Without L1&L2 but with adaptive learning rate, FTRL-Proximal is identical
# with Adagrad.
# So, basing on these two properties, we test if our implementation of
# FTRL-Proximal performs same updates as Adagrad or GradientDescent.
def testEquivAdagradwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseAdagradwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Adagrad learning rate
learning_rate_power=-0.5,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.AdagradOptimizer(3.0, initial_accumulator_value=0.1),
dtype, is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivSparseGradientDescentwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype,
is_sparse=True)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), dtype, is_sparse=True)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
def testEquivGradientDescentwithoutRegularization(self):
for dtype in [tf.half, tf.float32]:
with self.test_session():
val0, val1 = self.applyOptimizer(
tf.train.FtrlOptimizer(3.0,
# Fixed learning rate
learning_rate_power=-0.0,
initial_accumulator_value=0.1,
l1_regularization_strength=0.0,
l2_regularization_strength=0.0),
dtype)
with self.test_session():
val2, val3 = self.applyOptimizer(
tf.train.GradientDescentOptimizer(3.0), dtype)
self.assertAllCloseAccordingToType(val0, val2)
self.assertAllCloseAccordingToType(val1, val3)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
manazhao/tf_recsys | tensorflow/python/kernel_tests/bcast_ops_test.py | 102 | 4542 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.kernels.bcast_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.ops.gen_array_ops import _broadcast_args
from tensorflow.python.ops.gen_array_ops import _broadcast_gradient_args
from tensorflow.python.platform import test
class BcastOpsTest(test.TestCase):
def _GetBroadcastShape(self, xs, ys):
with self.test_session() as sess:
return sess.run(_broadcast_args(xs, ys))
def _GetGradientArgs(self, xs, ys):
with self.test_session() as sess:
return sess.run(_broadcast_gradient_args(xs, ys))
def testBasic(self):
r = self._GetBroadcastShape([2, 3, 5], [1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 5], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 3, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 3, 5])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([2, 1, 5], [3, 1])
self.assertAllEqual(r, [2, 3, 5])
r = self._GetBroadcastShape([3, 1], [2, 1, 5])
self.assertAllEqual(r, [2, 3, 5])
def testBasicGradient(self):
r0, r1 = self._GetGradientArgs([2, 3, 5], [1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 2])
r0, r1 = self._GetGradientArgs([1], [2, 3, 5])
self.assertAllEqual(r0, [0, 1, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([5], [2, 3, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0])
r0, r1 = self._GetGradientArgs([3, 5], [2, 3, 5])
self.assertAllEqual(r0, [0])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 3, 5], [3, 1])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 3, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 1, 5], [3, 1])
self.assertAllEqual(r0, [1])
self.assertAllEqual(r1, [0, 2])
r0, r1 = self._GetGradientArgs([3, 1], [2, 1, 5])
self.assertAllEqual(r0, [0, 2])
self.assertAllEqual(r1, [1])
def testZeroDims(self):
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
r = self._GetBroadcastShape([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r, [2, 0, 3, 0, 5])
def testZeroDimsGradient(self):
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 0, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1])
r0, r1 = self._GetGradientArgs([3, 0, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1])
self.assertAllEqual(r1, [])
r0, r1 = self._GetGradientArgs([2, 0, 3, 0, 5], [3, 1, 5])
self.assertAllEqual(r0, [])
self.assertAllEqual(r1, [0, 1, 3])
r0, r1 = self._GetGradientArgs([3, 1, 5], [2, 0, 3, 0, 5])
self.assertAllEqual(r0, [0, 1, 3])
self.assertAllEqual(r1, [])
if __name__ == "__main__":
test.main()
| apache-2.0 |
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/marketplace/tests/test_service_providers.py | 1 | 12107 | from ddt import data, ddt
from django.conf import settings
from django.core import mail
from rest_framework import status, test
from waldur_core.structure.tests import factories as structure_factories
from waldur_core.structure.tests import fixtures
from waldur_mastermind.marketplace import models, tasks, utils
from waldur_mastermind.marketplace.tests.helpers import override_marketplace_settings
from . import factories
@ddt
class ServiceProviderGetTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.service_provider = factories.ServiceProviderFactory(
customer=self.fixture.customer
)
@data('staff', 'owner', 'user', 'customer_support', 'admin', 'manager')
def test_service_provider_should_be_visible_to_all_authenticated_users(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.ServiceProviderFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 1)
def test_service_provider_should_be_invisible_to_unauthenticated_users(self):
url = factories.ServiceProviderFactory.get_list_url()
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
@data('staff', 'owner')
def test_service_provider_api_secret_code_is_visible(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.ServiceProviderFactory.get_url(
self.service_provider, 'api_secret_code'
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertTrue('api_secret_code' in response.data.keys())
@data('user', 'customer_support', 'admin', 'manager')
def test_service_provider_api_secret_code_is_invisible(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.ServiceProviderFactory.get_url(
self.service_provider, 'api_secret_code'
)
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@ddt
class ServiceProviderRegisterTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.customer = self.fixture.customer
@data('staff')
def test_staff_can_register_a_service_provider(self, user):
response = self.create_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue(
models.ServiceProvider.objects.filter(customer=self.customer).exists()
)
@data('user', 'customer_support', 'admin', 'manager')
def test_unauthorized_user_can_not_register_an_service_provider(self, user):
response = self.create_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@override_marketplace_settings(OWNER_CAN_REGISTER_SERVICE_PROVIDER=True)
@data('owner')
def test_owner_can_register_service_provider_with_settings_enabled(self, user):
response = self.create_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
@override_marketplace_settings(OWNER_CAN_REGISTER_SERVICE_PROVIDER=True)
@data('user', 'customer_support', 'admin', 'manager')
def test_unauthorized_user_can_not_register_service_provider_with_settings_enabled(
self, user
):
response = self.create_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@data('owner')
def test_owner_can_not_register_service_provider_with_settings_disabled(self, user):
response = self.create_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@data('user', 'customer_support', 'admin', 'manager')
def test_unauthorized_user_can_not_register_service_provider_with_settings_disabled(
self, user
):
response = self.create_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def create_service_provider(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.ServiceProviderFactory.get_list_url()
payload = {
'customer': structure_factories.CustomerFactory.get_url(self.customer),
}
return self.client.post(url, payload)
@ddt
class ServiceProviderUpdateTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.customer = self.fixture.customer
@data('staff', 'owner')
def test_authorized_user_can_update_service_provider(self, user):
response, service_provider = self.update_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
self.assertFalse(service_provider.enable_notifications)
self.assertTrue(
models.ServiceProvider.objects.filter(customer=self.customer).exists()
)
@data('user', 'customer_support', 'admin', 'manager')
def test_unauthorized_user_can_not_update_service_provider(self, user):
response, service_provider = self.update_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def update_service_provider(self, user, payload=None):
if not payload:
payload = {'enable_notifications': False}
service_provider = factories.ServiceProviderFactory(customer=self.customer)
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.ServiceProviderFactory.get_url(service_provider)
response = self.client.patch(url, payload)
service_provider.refresh_from_db()
return response, service_provider
@data('staff', 'owner')
def test_generate_api_secret_code(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
service_provider = factories.ServiceProviderFactory(customer=self.customer)
url = factories.ServiceProviderFactory.get_url(
service_provider, 'api_secret_code'
)
old_secret_code = service_provider.api_secret_code
response = self.client.post(url)
service_provider.refresh_from_db()
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertNotEqual(service_provider.api_secret_code, old_secret_code)
@data('user', 'customer_support', 'admin', 'manager')
def test_not_generate_api_secret_code(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
service_provider = factories.ServiceProviderFactory(customer=self.customer)
url = factories.ServiceProviderFactory.get_url(
service_provider, 'api_secret_code'
)
response = self.client.post(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
@ddt
class ServiceProviderDeleteTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.ProjectFixture()
self.customer = self.fixture.customer
self.service_provider = factories.ServiceProviderFactory(customer=self.customer)
@data('staff', 'owner')
def test_authorized_user_can_delete_service_provider(self, user):
response = self.delete_service_provider(user)
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT, response.data
)
self.assertFalse(
models.ServiceProvider.objects.filter(customer=self.customer).exists()
)
def test_service_provider_could_not_be_deleted_if_it_has_active_offerings(self):
factories.OfferingFactory(
customer=self.customer, state=models.Offering.States.ACTIVE
)
response = self.delete_service_provider('staff')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertTrue(
models.ServiceProvider.objects.filter(customer=self.customer).exists()
)
def test_service_provider_is_deleted_if_it_has_archived_offering(self):
factories.OfferingFactory(
customer=self.customer, state=models.Offering.States.ARCHIVED
)
response = self.delete_service_provider('staff')
self.assertEqual(
response.status_code, status.HTTP_204_NO_CONTENT, response.data
)
self.assertFalse(
models.ServiceProvider.objects.filter(customer=self.customer).exists()
)
@data('user', 'customer_support', 'admin', 'manager')
def test_unauthorized_user_can_not_delete_service_provider(self, user):
response = self.delete_service_provider(user)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertTrue(
models.ServiceProvider.objects.filter(customer=self.customer).exists()
)
def delete_service_provider(self, user):
user = getattr(self.fixture, user)
self.client.force_authenticate(user)
url = factories.ServiceProviderFactory.get_url(self.service_provider)
response = self.client.delete(url)
return response
class CustomerSerializerTest(test.APITransactionTestCase):
def test_service_provider_is_not_defined(self):
customer = structure_factories.CustomerFactory()
self.assertFalse(self.get_value(customer))
def test_service_provider_is_defined(self):
customer = factories.ServiceProviderFactory().customer
self.assertTrue(self.get_value(customer))
def get_value(self, customer):
user = structure_factories.UserFactory(is_staff=True)
url = structure_factories.CustomerFactory.get_url(customer)
self.client.force_login(user)
response = self.client.get(url)
return response.data['is_service_provider']
class ServiceProviderNotificationTest(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.CustomerFixture()
self.fixture.owner
self.service_provider = factories.ServiceProviderFactory(
customer=self.fixture.customer
)
offering = factories.OfferingFactory(customer=self.fixture.customer)
self.component = factories.OfferingComponentFactory(
billing_type=models.OfferingComponent.BillingTypes.USAGE, offering=offering
)
self.resource = factories.ResourceFactory(
offering=offering, state=models.Resource.States.OK, name='My resource'
)
def test_get_customer_if_usages_are_not_exist(self):
self.assertEqual(len(utils.get_info_about_missing_usage_reports()), 1)
self.assertEqual(
utils.get_info_about_missing_usage_reports()[0]['customer'],
self.fixture.customer,
)
def test_do_not_get_customer_if_usages_are_exist(self):
factories.ComponentUsageFactory(
resource=self.resource, component=self.component
)
self.assertEqual(len(utils.get_info_about_missing_usage_reports()), 0)
def test_usages_notification_message(self):
tasks.send_notifications_about_usages()
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [self.fixture.owner.email])
self.assertEqual(
mail.outbox[0].subject, 'Reminder about missing usage reports.'
)
self.assertTrue('My resource' in mail.outbox[0].body)
link_template = settings.WALDUR_MARKETPLACE['PUBLIC_RESOURCES_LINK_TEMPLATE']
public_resources_url = link_template.format(
organization_uuid=self.fixture.customer.uuid
)
self.assertTrue(public_resources_url in mail.outbox[0].body)
| mit |
ZachMassia/platformio | platformio/builder/scripts/frameworks/libopencm3.py | 1 | 5579 | # Copyright 2014-2016 Ivan Kravets <me@ikravets.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
libOpenCM3
The libOpenCM3 framework aims to create a free/libre/open-source
firmware library for various ARM Cortex-M0(+)/M3/M4 microcontrollers,
including ST STM32, Ti Tiva and Stellaris, NXP LPC 11xx, 13xx, 15xx,
17xx parts, Atmel SAM3, Energy Micro EFM32 and others.
http://www.libopencm3.org/wiki/Main_Page
"""
from __future__ import absolute_import
import re
from os import listdir, sep, walk
from os.path import isfile, join, normpath
from SCons.Script import DefaultEnvironment
from platformio.util import exec_command
env = DefaultEnvironment()
env.Replace(
PLATFORMFW_DIR=join("$PIOPACKAGES_DIR", "framework-libopencm3")
)
BOARD_BUILDOPTS = env.get("BOARD_OPTIONS", {}).get("build", {})
def find_ldscript(src_dir):
ldscript = None
matches = []
for item in sorted(listdir(src_dir)):
_path = join(src_dir, item)
if not isfile(_path) or not item.endswith(".ld"):
continue
matches.append(_path)
if len(matches) == 1:
ldscript = matches[0]
elif isfile(join(src_dir, BOARD_BUILDOPTS['ldscript'])):
ldscript = join(src_dir, BOARD_BUILDOPTS['ldscript'])
assert isfile(ldscript)
return ldscript
def generate_nvic_files():
fw_dir = env.subst("$PLATFORMFW_DIR")
for root, _, files in walk(join(fw_dir, "include", "libopencm3")):
if "irq.json" not in files or isfile(join(root, "nvic.h")):
continue
exec_command(
["python", join("scripts", "irq2nvic_h"),
join("." + root.replace(fw_dir, ""),
"irq.json").replace("\\", "/")],
cwd=fw_dir
)
def parse_makefile_data(makefile):
data = {"includes": [], "objs": [], "vpath": ["./"]}
with open(makefile) as f:
content = f.read()
# fetch "includes"
re_include = re.compile(r"^include\s+([^\r\n]+)", re.M)
for match in re_include.finditer(content):
data['includes'].append(match.group(1))
# fetch "vpath"s
re_vpath = re.compile(r"^VPATH\s+\+?=\s+([^\r\n]+)", re.M)
for match in re_vpath.finditer(content):
data['vpath'] += match.group(1).split(":")
# fetch obj files
objs_match = re.search(
r"^OBJS\s+\+?=\s+([^\.]+\.o\s*(?:\s+\\s+)?)+", content, re.M)
assert objs_match
data['objs'] = re.sub(
r"(OBJS|[\+=\\\s]+)", "\n", objs_match.group(0)).split()
return data
def get_source_files(src_dir):
mkdata = parse_makefile_data(join(src_dir, "Makefile"))
for include in mkdata['includes']:
_mkdata = parse_makefile_data(normpath(join(src_dir, include)))
for key, value in _mkdata.iteritems():
for v in value:
if v not in mkdata[key]:
mkdata[key].append(v)
sources = []
lib_root = env.subst("$PLATFORMFW_DIR")
for obj_file in mkdata['objs']:
src_file = obj_file[:-1] + "c"
for search_path in mkdata['vpath']:
src_path = normpath(join(src_dir, search_path, src_file))
if isfile(src_path):
sources.append(join("$BUILD_DIR", "FrameworkLibOpenCM3",
src_path.replace(lib_root + sep, "")))
break
return sources
def merge_ld_scripts(main_ld_file):
def _include_callback(match):
included_ld_file = match.group(1)
# search included ld file in lib directories
for root, _, files in walk(env.subst(join("$PLATFORMFW_DIR", "lib"))):
if included_ld_file not in files:
continue
with open(join(root, included_ld_file)) as fp:
return fp.read()
return match.group(0)
content = ""
with open(main_ld_file) as f:
content = f.read()
incre = re.compile(r"^INCLUDE\s+\"?([^\.]+\.ld)\"?", re.M)
with open(main_ld_file, "w") as f:
f.write(incre.sub(_include_callback, content))
#
# Processing ...
#
if BOARD_BUILDOPTS.get("core") == "lm4f":
env.Append(
CPPDEFINES=["LM4F"]
)
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkLibOpenCM3Variant"),
join("$PLATFORMFW_DIR", "include")
)
env.Append(
CPPPATH=[
join("$BUILD_DIR", "FrameworkLibOpenCM3"),
join("$BUILD_DIR", "FrameworkLibOpenCM3Variant")
]
)
root_dir = env.subst(
join("$PLATFORMFW_DIR", "lib", BOARD_BUILDOPTS.get("core")))
if BOARD_BUILDOPTS.get("core") == "stm32":
root_dir = join(root_dir, BOARD_BUILDOPTS.get("variant")[5:7])
ldscript_path = find_ldscript(root_dir)
merge_ld_scripts(ldscript_path)
generate_nvic_files()
# override ldscript by libopencm3
assert "LDSCRIPT_PATH" in env
env.Replace(
LDSCRIPT_PATH=ldscript_path
)
libs = []
env.VariantDirWrap(
join("$BUILD_DIR", "FrameworkLibOpenCM3"),
"$PLATFORMFW_DIR"
)
libs.append(env.Library(
join("$BUILD_DIR", "FrameworkLibOpenCM3"),
get_source_files(root_dir)
))
env.Append(LIBS=libs)
| apache-2.0 |
Fireblend/chromium-crosswalk | chrome/test/chromedriver/chrome_paths.py | 118 | 1258 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Paths to common resources in the Chrome repository."""
import os
_THIS_DIR = os.path.abspath(os.path.dirname(__file__))
def GetSrc():
"""Returns the path to the root src directory."""
return os.path.abspath(os.path.join(_THIS_DIR, os.pardir, os.pardir,
os.pardir))
def GetTestData():
"""Returns the path to the src/chrome/test/data directory."""
return os.path.join(GetSrc(), 'chrome', 'test', 'data')
def GetBuildDir(required_paths):
"""Returns the preferred build directory that contains given paths."""
dirs = ['out', 'build', 'xcodebuild']
rel_dirs = [os.path.join(x, 'Release') for x in dirs]
debug_dirs = [os.path.join(x, 'Debug') for x in dirs]
full_dirs = [os.path.join(GetSrc(), x) for x in rel_dirs + debug_dirs]
for build_dir in full_dirs:
for required_path in required_paths:
if not os.path.exists(os.path.join(build_dir, required_path)):
break
else:
return build_dir
raise RuntimeError('Cannot find build directory containing ' +
', '.join(required_paths))
| bsd-3-clause |
ctu-osgeorel/qgis-vfk-plugin | vfkDocument.py | 2 | 2955 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
vfkPluginDialog
A QGIS plugin
Plugin umoznujici praci s daty katastru nemovitosti
-------------------
begin : 2015-06-11
git sha : $Format:%H$
copyright : (C) 2015 by Stepan Bambula
email : stepan.bambula@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from abc import ABCMeta, abstractmethod
class TPair(object):
def __init__(self, first=u'', second=u''):
self.first = first
self.second = second
class VfkDocument:
__metaclass__ = ABCMeta
def __init__(self):
pass
@abstractmethod
def header(self):
pass
@abstractmethod
def footer(self):
pass
@abstractmethod
def heading1(self, text):
pass
@abstractmethod
def heading2(self, text):
pass
@abstractmethod
def heading3(self, text):
pass
@abstractmethod
def beginItemize(self):
pass
@abstractmethod
def endItemize(self):
pass
@abstractmethod
def beginItem(self):
pass
@abstractmethod
def endItem(self):
pass
@abstractmethod
def item(self, text):
pass
@abstractmethod
def beginTable(self):
pass
@abstractmethod
def endTable(self):
pass
@abstractmethod
def tableHeader(self, columns):
pass
@abstractmethod
def tableRow(self, columns):
pass
@abstractmethod
def tableRowOneColumnSpan(self, text):
pass
@abstractmethod
def link(self, href, text):
pass
@abstractmethod
def superScript(self, text):
pass
@abstractmethod
def newLine(self):
pass
@abstractmethod
def keyValueTable(self, content):
pass
@abstractmethod
def paragraph(self, text):
pass
@abstractmethod
def table(self, content, header):
pass
@abstractmethod
def text(self, text):
pass
@abstractmethod
def discardLastBeginTable(self):
pass
@abstractmethod
def isLastTableEmpty(self):
pass
| gpl-2.0 |
RicardoJohann/frappe | frappe/email/doctype/email_unsubscribe/email_unsubscribe.py | 55 | 1559 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import _
class EmailUnsubscribe(Document):
def validate(self):
if not self.global_unsubscribe and not (self.reference_doctype and self.reference_name):
frappe.throw(_("Reference DocType and Reference Name are required"), frappe.MandatoryError)
if not self.global_unsubscribe and frappe.db.get_value(self.doctype, self.name, "global_unsubscribe"):
frappe.throw(_("Delete this record to allow sending to this email address"))
if self.global_unsubscribe:
if frappe.get_all("Email Unsubscribe",
filters={"email": self.email, "global_unsubscribe": 1, "name": ["!=", self.name]}):
frappe.throw(_("{0} already unsubscribed").format(self.email), frappe.DuplicateEntryError)
else:
if frappe.get_all("Email Unsubscribe",
filters={
"email": self.email,
"reference_doctype": self.reference_doctype,
"reference_name": self.reference_name,
"name": ["!=", self.name]
}):
frappe.throw(_("{0} already unsubscribed for {1} {2}").format(
self.email, self.reference_doctype, self.reference_name),
frappe.DuplicateEntryError)
def on_update(self):
if self.reference_doctype and self.reference_name:
doc = frappe.get_doc(self.reference_doctype, self.reference_name)
doc.add_comment("Label", _("Left this conversation"), comment_by=self.email)
| mit |
0sc0d3r/enigma2 | lib/python/Screens/TimerEdit.py | 13 | 20376 | from Components.ActionMap import ActionMap
from Components.Button import Button
from Components.Label import Label
from Components.config import config
from Components.MenuList import MenuList
from Components.TimerList import TimerList
from Components.TimerSanityCheck import TimerSanityCheck
from Components.UsageConfig import preferredTimerPath
from Components.Sources.StaticText import StaticText
from RecordTimer import RecordTimerEntry, parseEvent, AFTEREVENT
from Screens.Screen import Screen
from Screens.ChoiceBox import ChoiceBox
from Screens.MessageBox import MessageBox
from ServiceReference import ServiceReference
from Screens.TimerEntry import TimerEntry, TimerLog
from Tools.BoundFunction import boundFunction
from Tools.FuzzyDate import FuzzyTime
from Tools.Directories import resolveFilename, SCOPE_HDD, fileExists
from time import time, localtime
from timer import TimerEntry as RealTimerEntry
from enigma import eServiceCenter
import Tools.CopyFiles
import os
class TimerEditList(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
CLEANUP = 3
DELETE = 4
def __init__(self, session):
Screen.__init__(self, session)
Screen.setTitle(self, _("Timer List"))
self.onChangedEntry = [ ]
list = [ ]
self.list = list
self.fillTimerList()
self["timerlist"] = TimerList(list)
self.key_red_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["key_red"] = Button(" ")
self["key_green"] = Button(_("Add"))
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self["description"] = Label()
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.openEdit,
"cancel": self.leave,
"green": self.addCurrentTimer,
"log": self.showLog,
"left": self.left,
"right": self.right,
"up": self.up,
"down": self.down
}, -1)
self.setTitle(_("Timer overview"))
self.session.nav.RecordTimer.on_state_change.append(self.onStateChange)
self.onShown.append(self.updateState)
def createSummary(self):
return TimerEditListSummary
def up(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveUp)
self.updateState()
def down(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.moveDown)
self.updateState()
def left(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageUp)
self.updateState()
def right(self):
self["timerlist"].instance.moveSelection(self["timerlist"].instance.pageDown)
self.updateState()
def toggleDisabledState(self):
cur=self["timerlist"].getCurrent()
if cur:
t = cur
if t.disabled:
# print "try to ENABLE timer"
t.enable()
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, cur)
if not timersanitycheck.check():
t.disable()
print "Sanity check failed"
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, simulTimerList)
else:
print "Sanity check passed"
if timersanitycheck.doubleCheck():
t.disable()
else:
if t.isRunning():
if t.repeated:
list = (
(_("Stop current event but not coming events"), "stoponlycurrent"),
(_("Stop current event and disable coming events"), "stopall"),
(_("Don't stop current event but disable coming events"), "stoponlycoming")
)
self.session.openWithCallback(boundFunction(self.runningEventCallback, t), ChoiceBox, title=_("Repeating event currently recording... What do you want to do?"), list = list)
else:
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def runningEventCallback(self, t, result):
if result is not None:
if result[1] == "stoponlycurrent" or result[1] == "stopall":
t.enable()
t.processRepeated(findRunningEvent = False)
self.session.nav.RecordTimer.doActivate(t)
if result[1] == "stoponlycoming" or result[1] == "stopall":
t.disable()
self.session.nav.RecordTimer.timeChanged(t)
self.refill()
self.updateState()
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
cur = self["timerlist"].getCurrent()
if cur:
self["description"].setText(cur.description)
if self.key_red_choice != self.DELETE:
self["actions"].actions.update({"red":self.removeTimerQuestion})
self["key_red"].setText(_("Delete"))
self.key_red_choice = self.DELETE
if cur.disabled and (self.key_yellow_choice != self.ENABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Enable"))
self.key_yellow_choice = self.ENABLE
elif cur.isRunning() and not cur.repeated and (self.key_yellow_choice != self.EMPTY):
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
elif ((not cur.isRunning())or cur.repeated ) and (not cur.disabled) and (self.key_yellow_choice != self.DISABLE):
self["actions"].actions.update({"yellow":self.toggleDisabledState})
self["key_yellow"].setText(_("Disable"))
self.key_yellow_choice = self.DISABLE
else:
if self.key_red_choice != self.EMPTY:
self.removeAction("red")
self["key_red"].setText(" ")
self.key_red_choice = self.EMPTY
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
showCleanup = True
for x in self.list:
if (not x[0].disabled) and (x[1] == True):
break
else:
showCleanup = False
if showCleanup and (self.key_blue_choice != self.CLEANUP):
self["actions"].actions.update({"blue":self.cleanupQuestion})
self["key_blue"].setText(_("Cleanup"))
self.key_blue_choice = self.CLEANUP
elif (not showCleanup) and (self.key_blue_choice != self.EMPTY):
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
if len(self.list) == 0:
return
timer = self['timerlist'].getCurrent()
if timer:
try:
name = str(timer.name)
time = "%s %s ... %s" % (FuzzyTime(timer.begin)[0], FuzzyTime(timer.begin)[1], FuzzyTime(timer.end)[1])
duration = ("(%d " + _("mins") + ")") % ((timer.end - timer.begin) / 60)
service = str(timer.service_ref.getServiceName())
if timer.state == RealTimerEntry.StateWaiting:
state = _("waiting")
elif timer.state == RealTimerEntry.StatePrepared:
state = _("about to start")
elif timer.state == RealTimerEntry.StateRunning:
if timer.justplay:
state = _("zapped")
else:
state = _("recording...")
elif timer.state == RealTimerEntry.StateEnded:
state = _("done!")
else:
state = _("<unknown>")
except:
name = ""
time = ""
duration = ""
service = ""
else:
name = ""
time = ""
duration = ""
service = ""
for cb in self.onChangedEntry:
cb(name, time, duration, service, state)
def fillTimerList(self):
#helper function to move finished timers to end of list
def eol_compare(x, y):
if x[0].state != y[0].state and x[0].state == RealTimerEntry.StateEnded or y[0].state == RealTimerEntry.StateEnded:
return cmp(x[0].state, y[0].state)
return cmp(x[0].begin, y[0].begin)
list = self.list
del list[:]
list.extend([(timer, False) for timer in self.session.nav.RecordTimer.timer_list])
list.extend([(timer, True) for timer in self.session.nav.RecordTimer.processed_timers])
if config.usage.timerlist_finished_timer_position.index: #end of list
list.sort(cmp = eol_compare)
else:
list.sort(key = lambda x: x[0].begin)
def showLog(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerLog, cur)
def openEdit(self):
cur=self["timerlist"].getCurrent()
if cur:
self.session.openWithCallback(self.finishedEdit, TimerEntry, cur)
def cleanupQuestion(self):
self.session.openWithCallback(self.cleanupTimer, MessageBox, _("Really delete done timers?"))
def cleanupTimer(self, delete):
if delete:
self.session.nav.RecordTimer.cleanup()
self.refill()
self.updateState()
def removeTimerQuestion(self):
cur = self["timerlist"].getCurrent()
service = str(cur.service_ref.getServiceName())
t = localtime(cur.begin)
f = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + " " + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + " - " + service + " - " + cur.name
f = f.replace(':','_')
f = f.replace(',','_')
f = f.replace('/','_')
if not cur:
return
onhdd = False
self.moviename = f
path = resolveFilename(SCOPE_HDD)
try:
files = os.listdir(path)
except:
files = ""
for file in files:
if file.startswith(f):
onhdd = True
break
if onhdd:
message = (_("Do you really want to delete %s?") % (cur.name))
choices = [(_("No"), "no"),
(_("Yes, delete from Timerlist"), "yes"),
(_("Yes, delete from Timerlist and delete recording"), "yesremove")]
self.session.openWithCallback(self.startDelete, ChoiceBox, title=message, list=choices)
else:
self.session.openWithCallback(self.removeTimer, MessageBox, _("Do you really want to delete %s?") % (cur.name), default = False)
def startDelete(self, answer):
if not answer or not answer[1]:
self.close()
return
if answer[1] == 'no':
return
elif answer[1] == 'yes':
self.removeTimer(True)
elif answer[1] == 'yesremove':
if fileExists("/usr/lib/enigma2/python/Plugins/Extensions/EnhancedMovieCenter/plugin.pyo"):
if config.EMC.movie_trashcan_enable.value:
trashpath = config.EMC.movie_trashcan_path.value
self.MoveToTrash(trashpath)
elif config.usage.movielist_trashcan.value:
trashpath = resolveFilename(SCOPE_HDD) + '.Trash'
self.MoveToTrash(trashpath)
else:
self.session.openWithCallback(self.callbackRemoveRecording, MessageBox, _("Do you really want to delete the recording?"), default = False)
def callbackRemoveRecording(self, answer):
if not answer:
return
self.delete()
def removeTimer(self, result):
if not result:
return
list = self["timerlist"]
cur = list.getCurrent()
if cur:
timer = cur
timer.afterEvent = AFTEREVENT.NONE
self.session.nav.RecordTimer.removeEntry(timer)
self.refill()
self.updateState()
def MoveToTrash(self, trashpath):
if not os.path.exists(trashpath):
os.system("mkdir -p %s" %trashpath)
self.removeTimer(True)
moviepath = os.path.normpath(resolveFilename(SCOPE_HDD))
movedList =[]
files = os.listdir(moviepath)
for file in files:
if file.startswith(self.moviename):
movedList.append((os.path.join(moviepath, file), os.path.join(trashpath, file)))
Tools.CopyFiles.moveFiles(movedList, None)
def delete(self):
item = self["timerlist"].getCurrent()
if item is None:
return # huh?
name = item.name
service = str(item.service_ref.getServiceName())
t = localtime(item.begin)
f = str(t.tm_year) + str(t.tm_mon).zfill(2) + str(t.tm_mday).zfill(2) + " " + str(t.tm_hour).zfill(2) + str(t.tm_min).zfill(2) + " - " + service + " - " + name
f = f.replace(':','_')
f = f.replace(',','_')
f = f.replace('/','_')
path = resolveFilename(SCOPE_HDD)
self.removeTimer(True)
from enigma import eBackgroundFileEraser
files = os.listdir(path)
for file in files:
if file.startswith(f):
eBackgroundFileEraser.getInstance().erase(os.path.realpath(path + file))
def refill(self):
oldsize = len(self.list)
self.fillTimerList()
lst = self["timerlist"]
newsize = len(self.list)
if oldsize and oldsize != newsize:
idx = lst.getCurrentIndex()
lst.entryRemoved(idx)
else:
lst.invalidate()
def addCurrentTimer(self):
event = None
service = self.session.nav.getCurrentService()
if service is not None:
info = service.info()
if info is not None:
event = info.getEvent(0)
# FIXME only works if already playing a service
serviceref = ServiceReference(self.session.nav.getCurrentlyPlayingServiceOrGroup())
if event is None:
data = (int(time()), int(time() + 60), "", "", None)
else:
data = parseEvent(event, description = False)
self.addTimer(RecordTimerEntry(serviceref, checkOldTimers = True, dirname = preferredTimerPath(), *data))
def addTimer(self, timer):
self.session.openWithCallback(self.finishedAdd, TimerEntry, timer)
def finishedEdit(self, answer):
# print "finished edit"
if answer[0]:
# print "Edited timer"
entry = answer[1]
timersanitycheck = TimerSanityCheck(self.session.nav.RecordTimer.timer_list, entry)
success = False
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
if not timersanitycheck.check():
simulTimerList = timersanitycheck.getSimulTimerList()
if simulTimerList is not None:
self.session.openWithCallback(self.finishedEdit, TimerSanityConflict, timersanitycheck.getSimulTimerList())
else:
success = True
else:
success = True
if success:
print "Sanity check passed"
self.session.nav.RecordTimer.timeChanged(entry)
self.fillTimerList()
self.updateState()
# else:
# print "Timeredit aborted"
def finishedAdd(self, answer):
# print "finished add"
if answer[0]:
entry = answer[1]
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
for x in simulTimerList:
if x.setAutoincreaseEnd(entry):
self.session.nav.RecordTimer.timeChanged(x)
simulTimerList = self.session.nav.RecordTimer.record(entry)
if simulTimerList is not None:
self.session.openWithCallback(self.finishSanityCorrection, TimerSanityConflict, simulTimerList)
self.fillTimerList()
self.updateState()
# else:
# print "Timeredit aborted"
def finishSanityCorrection(self, answer):
self.finishedAdd(answer)
def leave(self):
self.session.nav.RecordTimer.on_state_change.remove(self.onStateChange)
self.close()
def onStateChange(self, entry):
self.refill()
self.updateState()
class TimerSanityConflict(Screen):
EMPTY = 0
ENABLE = 1
DISABLE = 2
EDIT = 3
def __init__(self, session, timer):
Screen.__init__(self, session)
self.timer = timer
print "TimerSanityConflict"
self["timer1"] = TimerList(self.getTimerList(timer[0]))
self.list = []
self.list2 = []
count = 0
for x in timer:
if count != 0:
self.list.append((_("Conflicting timer") + " " + str(count), x))
self.list2.append((timer[count], False))
count += 1
if count == 1:
self.list.append((_("Channel not in services list")))
self["list"] = MenuList(self.list)
self["timer2"] = TimerList(self.list2)
self["key_red"] = Button(_("Edit new entry"))
self["key_green"] = Button(" ")
self["key_yellow"] = Button(" ")
self["key_blue"] = Button(" ")
self.key_green_choice = self.EMPTY
self.key_yellow_choice = self.EMPTY
self.key_blue_choice = self.EMPTY
self["actions"] = ActionMap(["OkCancelActions", "DirectionActions", "ShortcutActions", "TimerEditActions"],
{
"ok": self.leave_ok,
"cancel": self.leave_cancel,
"red": self.editTimer1,
"up": self.up,
"down": self.down
}, -1)
self.setTitle(_("Timer sanity error"))
self.onShown.append(self.updateState)
def getTimerList(self, timer):
return [(timer, False)]
def editTimer1(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer1"].getCurrent())
def editTimer2(self):
self.session.openWithCallback(self.finishedEdit, TimerEntry, self["timer2"].getCurrent())
def toggleNewTimer(self):
if self.timer[0].disabled:
self.timer[0].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[0])
elif not self.timer[0].isRunning():
self.timer[0].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[0])
self.finishedEdit((True, self.timer[0]))
def toggleTimer(self):
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x].disabled:
self.timer[x].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[x])
if not self.timer[0].isRunning():
self.timer[0].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[0])
elif not self.timer[x].isRunning():
self.timer[x].disabled = True
self.session.nav.RecordTimer.timeChanged(self.timer[x])
if self.timer[x].disabled:
self.timer[0].disabled = False
self.session.nav.RecordTimer.timeChanged(self.timer[0])
self.finishedEdit((True, self.timer[0]))
def finishedEdit(self, answer):
self.leave_ok()
def leave_ok(self):
self.close((True, self.timer[0]))
def leave_cancel(self):
self.close((False, self.timer[0]))
def up(self):
self["list"].instance.moveSelection(self["list"].instance.moveUp)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def down(self):
self["list"].instance.moveSelection(self["list"].instance.moveDown)
self["timer2"].moveToIndex(self["list"].getSelectedIndex())
def removeAction(self, descr):
actions = self["actions"].actions
if descr in actions:
del actions[descr]
def updateState(self):
if self.timer[0] is not None:
if self.timer[0].disabled and self.key_green_choice != self.ENABLE:
self["actions"].actions.update({"green":self.toggleTimer})
self["key_green"].setText(_("Enable"))
self.key_green_choice = self.ENABLE
elif self.timer[0].isRunning() and not self.timer[0].repeated and self.key_green_choice != self.EMPTY:
self.removeAction("green")
self["key_green"].setText(" ")
self.key_green_choice = self.EMPTY
elif (not self.timer[0].isRunning() or self.timer[0].repeated ) and self.key_green_choice != self.DISABLE:
self["actions"].actions.update({"green":self.toggleNewTimer})
self["key_green"].setText(_("Disable"))
self.key_green_choice = self.DISABLE
if len(self.timer) > 1:
x = self["list"].getSelectedIndex() + 1 # the first is the new timer so we do +1 here
if self.timer[x] is not None:
if self.key_yellow_choice == self.EMPTY:
self["actions"].actions.update({"yellow":self.editTimer2})
self["key_yellow"].setText(_("Edit"))
self.key_yellow_choice = self.EDIT
if self.timer[x].disabled and self.key_blue_choice != self.ENABLE:
self["actions"].actions.update({"blue":self.toggleTimer})
self["key_blue"].setText(_("Enable"))
self.key_blue_choice = self.ENABLE
elif self.timer[x].isRunning() and not self.timer[x].repeated and self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
elif (not self.timer[x].isRunning() or self.timer[x].repeated ) and self.key_blue_choice != self.DISABLE:
self["actions"].actions.update({"blue":self.toggleTimer})
self["key_blue"].setText(_("Disable"))
self.key_blue_choice = self.DISABLE
else:
#FIXME.... this doesnt hide the buttons self.... just the text
if self.key_yellow_choice != self.EMPTY:
self.removeAction("yellow")
self["key_yellow"].setText(" ")
self.key_yellow_choice = self.EMPTY
if self.key_blue_choice != self.EMPTY:
self.removeAction("blue")
self["key_blue"].setText(" ")
self.key_blue_choice = self.EMPTY
class TimerEditListSummary(Screen):
def __init__(self, session, parent):
Screen.__init__(self, session, parent = parent)
self["name"] = StaticText("")
self["service"] = StaticText("")
self["time"] = StaticText("")
self["duration"] = StaticText("")
self["state"] = StaticText("")
self.onShow.append(self.addWatcher)
self.onHide.append(self.removeWatcher)
def addWatcher(self):
self.parent.onChangedEntry.append(self.selectionChanged)
self.parent.updateState()
def removeWatcher(self):
self.parent.onChangedEntry.remove(self.selectionChanged)
def selectionChanged(self, name, time, duration, service, state):
self["name"].text = name
self["service"].text = service
self["time"].text = time
self["duration"].text = duration
self["state"].text = state
| gpl-2.0 |
VegaDevTeam/android_kernel_pantech_ef52k | tools/perf/util/setup.py | 4998 | 1330 | #!/usr/bin/python2
from distutils.core import setup, Extension
from os import getenv
from distutils.command.build_ext import build_ext as _build_ext
from distutils.command.install_lib import install_lib as _install_lib
class build_ext(_build_ext):
def finalize_options(self):
_build_ext.finalize_options(self)
self.build_lib = build_lib
self.build_temp = build_tmp
class install_lib(_install_lib):
def finalize_options(self):
_install_lib.finalize_options(self)
self.build_dir = build_lib
cflags = ['-fno-strict-aliasing', '-Wno-write-strings']
cflags += getenv('CFLAGS', '').split()
build_lib = getenv('PYTHON_EXTBUILD_LIB')
build_tmp = getenv('PYTHON_EXTBUILD_TMP')
ext_sources = [f.strip() for f in file('util/python-ext-sources')
if len(f.strip()) > 0 and f[0] != '#']
perf = Extension('perf',
sources = ext_sources,
include_dirs = ['util/include'],
extra_compile_args = cflags,
)
setup(name='perf',
version='0.1',
description='Interface with the Linux profiling infrastructure',
author='Arnaldo Carvalho de Melo',
author_email='acme@redhat.com',
license='GPLv2',
url='http://perf.wiki.kernel.org',
ext_modules=[perf],
cmdclass={'build_ext': build_ext, 'install_lib': install_lib})
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.