repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
i7mist/ramulator | test_ddr3.py | 4 | 5527 | #!/usr/bin/python
import random
import sys
import time
import tempfile
import subprocess
import psutil
import Queue
import thread
import shutil
class Sim(object):
def __init__(self, _name, _trace):
self.name = _name
self.trace = _trace
def argv(self, trc):
return ''
def parse_clk(self, stdout):
return 0
class Ramulator(Sim):
def __init__(self):
super(Ramulator, self).__init__('Ramulator', 'ramulator')
def argv(self, trc):
return ['./ramulator-dramtrace', trc]
def parse_clk(self, stdout):
stdout.seek(0)
for l in stdout.readlines():
if l.startswith('Simulation done'):
return int(l.split()[2])
class DRAMSim2(Sim):
def __init__(self):
super(DRAMSim2, self).__init__('DRAMSim2', 'ramulator')
def argv(self, trc):
base = '/Users/wky/code/DRAMSim2-2.2.2'
return [base+'/DRAMSim', '-t', trc, '-s', base+'/system.ini', '-d', base+'/ini/DDR3-1600K-2Gbx8.ini']
def parse_clk(self, stdout):
stdout.seek(0)
for l in stdout.readlines():
if '== Pending Transactions' in l:
b = l.find('(')
e = l.find(')', b)
clk = int(l[b+1:e])
return clk
class USIMM(Sim):
def __init__(self):
super(USIMM, self).__init__('USIMM', 'usimm')
def argv(self, trc):
base = '/Users/wky/code/usimm-v1.3'
return [base+'/bin/usimm', base+'/input/2Gb_x8.vi', trc]
def parse_clk(self, stdout):
stdout.seek(0)
for l in stdout.readlines():
if l.startswith('Total Simulation Cycles'):
return int(l.split()[-1])
class DrSim(Sim):
def __init__(self):
super(DrSim, self).__init__('DrSim', 'drsim')
def argv(self, trc):
base = '/Users/wky/code/drsim'
return [base+'/drsim', '--config', base+'/configs/ddr3-1600.cfg', trc]
def parse_clk(self, stdout):
stdout.seek(0)
for l in stdout.readlines():
if l.startswith('Simulation finished'):
return int(l.split()[-1])
class NVMain(Sim):
def __init__(self):
super(NVMain, self).__init__('NVMain', 'nvmain')
def argv(self, trc):
base = '/Users/wky/code/nvmain-0f076410a356'
return [base+'/nvmain.fast', base+'/Config/2D_DRAM_example.config', trc, '2000000000']
def parse_clk(self, stdout):
stdout.seek(0)
for l in stdout.readlines():
if l.startswith('Exiting at cycle'):
return int(l.split()[3])
def gen_random(cb, n, rw, s, bits):
l = s/64
b = n/l
for i in range(b):
base = random.getrandbits(bits) & 0xffffffffffc0
r = bool(random.random() < rw)
for j in range(l):
cb(base+j*64, r, l*i+j)
def gen_stream(cb, n, rw):
r = int(n * rw)
w = n - r
for i in range(r):
cb(i*64, True, i)
for i in range(w):
cb((r+i)*64, False, r+i)
# def collect_res(p, sim, ofile, res, q):
# proc = psutil.Process(p.pid)
# t, mem = 0, 0
# while p.poll() is None:
# try:
# mem = max(mem, proc.memory_info()[0]) # rss
# t = sum(proc.cpu_times())
# except psutil.AccessDenied, e: print "======== Oops %s %d failed ===============" % (sim.name, p.pid)
# time.sleep(0.1)
# print '%s(%d) finished.' % (sim.name, p.pid)
# clk = sim.parse_clk(ofile)
# res[sim.name] = (clk, t, mem)
# q.get()
# q.task_done()
def main(n_reqs, rw, rec):
trace_names = ['ramulator', 'usimm', 'drsim', 'nvmain']
def make_cb(files):
def real_cb(addr, rw, i):
files['ramulator'].write('0x%x %s\n' % (addr, 'R' if rw else 'W'))
files['usimm'].write('0 %s 0x%x %s\n' % ('R' if rw else 'W', addr, '0x0' if rw else ''))
files['drsim'].write("0x%x %s %d\n" % (addr, 'READ' if rw else 'WRITE', i))
files['nvmain'].write("%d %s 0x%x %s 0\n" % (i, 'R' if rw else 'W', addr, '0' * 128))
return real_cb
s = 64
traces = []
tmps = {name: tempfile.NamedTemporaryFile() for name in trace_names}
gen_random(make_cb(tmps), n_reqs, rw, s, 31)
for f in tmps.itervalues():
f.file.seek(0)
traces.append(tmps)
print 'Ramdom trace created'
tmps = {name: tempfile.NamedTemporaryFile() for name in trace_names}
gen_stream(make_cb(tmps), n_reqs, rw)
for f in tmps.itervalues():
f.file.seek(0)
traces.append(tmps)
print 'Stream trace created'
if rec:
for name, tmpf in traces[0].iteritems():
shutil.copy(tmpf.name, './%s-random.trace' % name)
for name, tmpf in traces[1].iteritems():
shutil.copy(tmpf.name, './%s-stream.trace' % name)
sims = [Ramulator(), DRAMSim2(), USIMM(), DrSim(), NVMain()]
cnt = len(traces) * len(sims)
blackhole = open('/dev/null', 'w')
results = []
for v in traces:
res_dict = {}
for sim in sims:
tmp = tempfile.NamedTemporaryFile()
p = subprocess.Popen(sim.argv(v[sim.trace].name), stdout=tmp.file, stderr=blackhole)
print 'Starting %s %d' % (sim.name, p.pid)
proc = psutil.Process(p.pid)
t, mem = 0, 0
while p.poll() is None:
try:
mem = max(mem, proc.memory_info()[0]) # RSS on mac
t = sum(proc.cpu_times())
except: print "======== Oops monitoring %s %d failed ===============" % (sim.name, p.pid)
time.sleep(0.1)
print '%s(%d) finished.' % (sim.name, p.pid)
clk = sim.parse_clk(tmp.file)
res_dict[sim.name] = (clk, t, mem)
tmp.file.close()
results.append(res_dict)
blackhole.close()
print results
if __name__ == '__main__':
if len(sys.argv) < 3: print 'test_ddr3.py <n-requests> <read proportion> [record]'
else: main(int(sys.argv[1]), float(sys.argv[2]), (len(sys.argv) > 3 and sys.argv[3] == 'record'))
| mit |
CrimsonDev14/crimsoncoin | qa/rpc-tests/test_framework/socks5.py | 1 | 5662 | #!/usr/bin/env python3
# Copyright (c) 2015-2016 The crimson Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Dummy Socks5 server for testing.
'''
import socket, threading, queue
import traceback, sys
### Protocol constants
class Command:
CONNECT = 0x01
class AddressType:
IPV4 = 0x01
DOMAINNAME = 0x03
IPV6 = 0x04
### Utility functions
def recvall(s, n):
'''Receive n bytes from a socket, or fail'''
rv = bytearray()
while n > 0:
d = s.recv(n)
if not d:
raise IOError('Unexpected end of stream')
rv.extend(d)
n -= len(d)
return rv
### Implementation classes
class Socks5Configuration(object):
'''Proxy configuration'''
def __init__(self):
self.addr = None # Bind address (must be set)
self.af = socket.AF_INET # Bind address family
self.unauth = False # Support unauthenticated
self.auth = False # Support authentication
class Socks5Command(object):
'''Information about an incoming socks5 command'''
def __init__(self, cmd, atyp, addr, port, username, password):
self.cmd = cmd # Command (one of Command.*)
self.atyp = atyp # Address type (one of AddressType.*)
self.addr = addr # Address
self.port = port # Port to connect to
self.username = username
self.password = password
def __repr__(self):
return 'Socks5Command(%s,%s,%s,%s,%s,%s)' % (self.cmd, self.atyp, self.addr, self.port, self.username, self.password)
class Socks5Connection(object):
def __init__(self, serv, conn, peer):
self.serv = serv
self.conn = conn
self.peer = peer
def handle(self):
'''
Handle socks5 request according to RFC1928
'''
try:
# Verify socks version
ver = recvall(self.conn, 1)[0]
if ver != 0x05:
raise IOError('Invalid socks version %i' % ver)
# Choose authentication method
nmethods = recvall(self.conn, 1)[0]
methods = bytearray(recvall(self.conn, nmethods))
method = None
if 0x02 in methods and self.serv.conf.auth:
method = 0x02 # username/password
elif 0x00 in methods and self.serv.conf.unauth:
method = 0x00 # unauthenticated
if method is None:
raise IOError('No supported authentication method was offered')
# Send response
self.conn.sendall(bytearray([0x05, method]))
# Read authentication (optional)
username = None
password = None
if method == 0x02:
ver = recvall(self.conn, 1)[0]
if ver != 0x01:
raise IOError('Invalid auth packet version %i' % ver)
ulen = recvall(self.conn, 1)[0]
username = str(recvall(self.conn, ulen))
plen = recvall(self.conn, 1)[0]
password = str(recvall(self.conn, plen))
# Send authentication response
self.conn.sendall(bytearray([0x01, 0x00]))
# Read connect request
(ver,cmd,rsv,atyp) = recvall(self.conn, 4)
if ver != 0x05:
raise IOError('Invalid socks version %i in connect request' % ver)
if cmd != Command.CONNECT:
raise IOError('Unhandled command %i in connect request' % cmd)
if atyp == AddressType.IPV4:
addr = recvall(self.conn, 4)
elif atyp == AddressType.DOMAINNAME:
n = recvall(self.conn, 1)[0]
addr = recvall(self.conn, n)
elif atyp == AddressType.IPV6:
addr = recvall(self.conn, 16)
else:
raise IOError('Unknown address type %i' % atyp)
port_hi,port_lo = recvall(self.conn, 2)
port = (port_hi << 8) | port_lo
# Send dummy response
self.conn.sendall(bytearray([0x05, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00]))
cmdin = Socks5Command(cmd, atyp, addr, port, username, password)
self.serv.queue.put(cmdin)
print('Proxy: ', cmdin)
# Fall through to disconnect
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.serv.queue.put(e)
finally:
self.conn.close()
class Socks5Server(object):
def __init__(self, conf):
self.conf = conf
self.s = socket.socket(conf.af)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.s.bind(conf.addr)
self.s.listen(5)
self.running = False
self.thread = None
self.queue = queue.Queue() # report connections and exceptions to client
def run(self):
while self.running:
(sockconn, peer) = self.s.accept()
if self.running:
conn = Socks5Connection(self, sockconn, peer)
thread = threading.Thread(None, conn.handle)
thread.daemon = True
thread.start()
def start(self):
assert(not self.running)
self.running = True
self.thread = threading.Thread(None, self.run)
self.thread.daemon = True
self.thread.start()
def stop(self):
self.running = False
# connect to self to end run loop
s = socket.socket(self.conf.af)
s.connect(self.conf.addr)
s.close()
self.thread.join()
| lgpl-3.0 |
DylanDodds/SoggySushi-py | triggers/musicChatTrigger.py | 1 | 1066 | import re
from interface.trigger import Trigger
# Tasks in this class (other then help) take an arg list consisting of the usual argument
# Passed from the input message, and the current instance of this class for player controls
class MusicChatTrigger(Trigger):
def __init__(self, bot, command):
self.command = command
self.player = None
super().__init__(bot)
async def notify(self, arg):
if not arg.content.startswith(self.command):
return
arg.content = re.sub(self.command + ' ', '', arg.content, count=1)
command = arg.content.split(' ')
sender = arg.author
if len(command) < 1 or command[0] == '':
await self._bot.send_message(arg.channel, sender.mention + ', please use "music help" for a list of commands')
return
if command[0] in self._tasks:
for task in self._tasks[command[0]]:
arg.content = re.sub(command[0] + ' ', '', arg.content, count=1)
await task(self, self._bot, command[0], arg)
| gpl-3.0 |
msdx321/android_kernel_samsung_heroXqltechn | scripts/rkp_cfp/debug.py | 26 | 19345 | #!/usr/bin/env python
# Code for validating instrumention (i.e. for detecting bugs in instrument.py).
import re
import multiprocessing
import textwrap
import subprocess
import mmap
import instrument
import common
from common import pr, log
register_re = r'(?:(x|w)\d+|xzr|sp)'
hex_char_re = r'(?:[a-f0-9])'
def validate_instrumentation(objdump_uninstr, skip, skip_stp, skip_asm, skip_save_lr_to_stack, skip_br, threads=1):
"""
Make sure that we instrumented vmlinux properly by checking some properties from its objdump.
Properties to check for:
- make sure there aren't any uninstrumented instructions
- i.e. a bl instruction that doesn't go through the springboard
- make sure there aren't any assembly routines that do things with LR that would keep us from re-encrypting it properly
- e.g. storing x30 in a callee saved register (instead of placing it on the stack and adjusting x29)
el1_preempt:
mov x24, x30
...
ret x24
- make sure there aren't any uninstrumented function prologues
i.e.
<assembled_c_function>:
(not a nop)
stp x29, x30, [sp,#-<frame>]!
(insns)
mov x29, sp
<assembled_c_function>:
nop
stp x29, x30, [sp,#-<frame>]!
(insns)
mov x29, sp
<assembled_c_function>:
nop
stp x29, x30, [sp,#<offset>]
add x29, sp, #<offset>
<assembled_c_function>:
(not a nop)
stp x29, x30, [sp,#<offset>]
add x29, sp, #<offset>
"""
lock = multiprocessing.Lock()
success = multiprocessing.Value('i', True)
def insn_text(line):
"""
>>> insn_text("ffffffc000080148: d503201f nop")
"nop"
"""
m = re.search(r'^{hex_char_re}{{16}}:\s+{hex_char_re}{{8}}\s+(.*)'.format(
hex_char_re=hex_char_re), line)
if m:
return m.group(1)
return ''
#
# Error reporting functions.
#
def _msg(list_of_func_lines, msg, is_failure):
with lock:
if len(list_of_func_lines) > 0:
log(textwrap.dedent(msg))
for func_lines in list_of_func_lines:
log()
for line in func_lines:
log(line.rstrip('\n'))
success.value = False
def errmsg(list_of_func_lines, msg):
_msg(list_of_func_lines, msg, True)
def warmsg(list_of_func_lines, msg):
_msg(list_of_func_lines, msg, False)
def err(list_of_args, msg, error):
with lock:
if len(list_of_args) > 0:
log(textwrap.dedent(msg))
for args in list_of_args:
log()
log(error(*args).rstrip('\n'))
success.value = False
asm_functions = instrument.parse_all_asm_functions(objdump_uninstr.kernel_src)
c_functions = objdump_uninstr.c_functions
#
# Validation functions. Each one runs in its own thread.
#
def validate_bin():
# Files must differ.
# subprocess.check_call('! diff -q {vmlinux_uninstr} {vmlinux_instr} > /dev/null'.format(
# vmlinux_uninstr=objdump_uninstr.vmlinux_old, vmlinux_instr=objdump_uninstr.instr), shell=True)
cmd = 'cmp -l {vmlinux_uninstr} {vmlinux_instr}'.format(
vmlinux_uninstr=objdump_uninstr.vmlinux_old, vmlinux_instr=objdump_uninstr.instr) + \
" | gawk '{printf \"%08X %02X %02X\\n\", $1, strtonum(0$2), strtonum(0$3)}'"
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
f = instrument.each_procline(proc)
to_int = lambda x: int(x, 16)
bin_errors = []
for line in f:
byte_offset, byte1, byte2 = map(to_int, re.split(r'\s+', line))
byte_offset -= 1
section = instrument.offset_to_section(byte_offset, objdump_uninstr.sections['sections'])
if section is None:
name = 'None'
bin_errors.append((byte_offset, None, None))
else:
addr = section['address'] + byte_offset - section['offset']
if 'CODE' not in section['type'] or section['name'] == '.vmm':
bin_errors.append((byte_offset, addr, section['name']))
def _to_str(byte_offset, addr, section):
if section is None:
return "byte offset 0x{byte_offset}".format(
byte_offset=instrument._hex(byte_offset))
return "0x{addr} (byte offset 0x{byte_offset}) in section {section}".format(
addr=instrument._hex(addr), byte_offset=instrument._hex(byte_offset), section=section)
err(bin_errors, """
Saw changes in binary sections of instrumented vmlinux that should not be there!
Changes should only be in the code.
""", error=_to_str)
def validate_instr():
"""
Validations to perform on the instrumented vmlinux.
"""
objdump_instr = instrument.load_and_cache_objdump(objdump_uninstr.instr,
kernel_src=objdump_uninstr.kernel_src, config_file=objdump_uninstr.config_file, make_copy=False, just_lines=True)
uninstrumented_br = []
uninstrumented_blr = []
def err_uninstr_branch(uninstr_lines):
with lock:
if len(uninstr_lines) > 0:
log()
log(textwrap.dedent("""
ERROR: instrumentation does not look right (instrument.py has a bug).
These lines in objdump of vmlinux_instr aren't instrumented correcly:
"""))
n = min(5, len(uninstr_lines))
for line in uninstr_lines[0:n]:
log(line)
if n < len(uninstr_lines):
log("...")
success.value = False
def is_uninstr_blr_branch(func, branch_pattern, uninstr_lines):
if not func.startswith('jopp_springboard_') and (
re.search(branch_pattern, line) and not re.search(r'<jopp_springboard_\w+>', line)
):
uninstr_lines.append(line)
return True
uninstrumented_prologue_errors = []
prologue_errors = []
nargs_errors = []
#import pdb; pdb.set_trace()
check_prologue = objdump_instr.is_conf_set('CONFIG_RKP_CFP_ROPP')
for func, lines, last_insns in objdump_instr.each_func_lines(num_last_insns=2):
if instrument.skip_func(func, skip, skip_asm):
continue
prologue_error = False
# TODO: This check incorrectly goes off for cases where objdump skips showing 0 .word's.
# e.g.
# ffffffc000c25d74: d503201f nop
# ...
## ffffffc000c25d84: b3ea3bad .inst 0xb3ea3bad ; undefined
##
# ffffffc003c25d88 <vcs_init>:
# ffffffc000c25d88: a9bd7ffd stp x29, xzr, [sp,#-48]!
# ffffffc000c25d8c: 910003fd mov x29, sp
# ...
#
for i, line in enumerate(lines):
#for checking BR
if re.search(r'\s+br\t', line) and (func not in skip_br):
uninstrumented_br.append(line)
# for checking BLR
if is_uninstr_blr_branch(func, r'\s+blr\t', uninstrumented_blr):
continue
# Detect uninstrumented prologues:
# nop <--- should be eor RRX, x30, RRK
# stp x29, 30
if re.search(r'^nop', insn_text(line)) and i + 1 < len(lines) and re.search(r'stp\tx29, x30, .*sp', lines[i+1]):
uninstrumented_prologue_errors.append(lines)
continue
if check_prologue:
m = re.search(r'stp\tx29, x30, .*sp', line)
if m and func not in skip_stp:
# We are in error if "stp x29, x30, [sp ..." exists in this function.
# (hopefully this doesn't raise false alarms in any assembly functions)
prologue_error = True
continue
if prologue_error:
prologue_errors.append(lines)
err_uninstr_branch(uninstrumented_br)
err_uninstr_branch(uninstrumented_blr)
errmsg(prologue_errors, """
Saw an assembly routine(s) that looks like it is saving x29 and x30 on the stack, but
has not been instrumented to save x29, xzr FIRST.
i.e.
Saw:
stp x29, x30, [sp,#-<frame>]!
(insns)
mov x29, sp <--- might get preempted just before doing this
(won't reencrypt x30!)
Expected:
stp x29, xzr, [sp,#-<frame>]!
mov x29, sp <--- it's ok if we get preempted
(insns) (x30 not stored yet)
str x30, [sp,#<+ 8>]
""")
errmsg(nargs_errors, """
Saw a dissassembled routine that doesn't have the the "number of function
arugments" and the "function entry point magic number" annotated above it.
""")
errmsg(uninstrumented_prologue_errors, """
Saw a function that doesn't have an instrumented C prologue.
In particular, we saw:
<func>:
nop
stp x29, x30, ...
...
But we expected to see:
<func>:
eor RRX, x30, RRK
stp x29, x30, ...
...
""")
def validate_uninstr_binary():
"""
Validations to perform on the uninstrumented vmlinux binary words.
"""
if objdump_uninstr.JOPP_CHECK_MAGIC_NUMBER_ON_BLR:
magic_errors = []
def each_word(section):
read_f = open(objdump_uninstr.vmlinux_old, 'rb')
read_f.seek(0)
read_mmap = mmap.mmap(read_f.fileno(), 0, access=mmap.ACCESS_READ)
try:
i = section['offset']
while i + 4 < section['size']:
word = read_mmap[i:i+4]
yield i, word
i += 4
finally:
read_mmap.close()
read_f.close()
for section in objdump_uninstr.sections['sections']:
if 'CODE' in section['type']:
# Make sure JOPP_FUNCTION_ENTRY_POINT_MAGIC_NUMBER
# doesn't appear in a word of an uninstrumented vmlinux.
for i, word in each_word(section):
if word == objdump_uninstr.JOPP_FUNCTION_ENTRY_POINT_MAGIC_NUMBER:
magic_errors.append([i, section])
err(magic_errors, """
The magic number chosen to place at the start of every function already
appears in the uninstrumented vmlinux. Find a new magic number!
(JOPP_FUNCTION_ENTRY_POINT_MAGIC_NUMBER = {JOPP_FUNCTION_ENTRY_POINT_MAGIC_NUMBER})
""",
error=lambda i, section: "0x{addr} in section {section}".format(
addr=instrument._hex(i + section['address']), section=section['name']))
def validate_uninstr_lines():
"""
Validations to perform on the uninstrumented vmlinux objdump lines.
"""
if objdump_uninstr.JOPP_FUNCTION_NOP_SPACERS:
# Assume that the key might change and require return-address reencryption. This
# means we need to have all copies of x30 either in x30 itself, or saved in memory
# and pointed to by a frame pointer.
#
# In particular, we can't allow return-addresses being saved in callee registers
# as is done in some low-level assembly routines, since when the key changes these
# registers will become invalid and not be re-encrypted.
#
# Look for and warn about:
#
# mov <rd>, x30
# ...
# ret <rd>
mov_ret_errors = []
nop_spacer_errors = []
missing_asm_annot_errors = []
c_func_br_errors = []
ldp_spacer_error_funcs = set([])
stp_spacer_error_funcs = set([])
ldp_spacer_errors = []
stp_spacer_errors = []
atomic_prologue_errors = []
atomic_prologue_error_funcs = set([])
for func_i, func, lines, last_insns in objdump_uninstr.each_func_lines(num_last_insns=2, with_func_i=True):
mov_registers = set([])
ret_registers = set([])
is_c_func = func in c_functions
saw_br = False
#if objdump_uninstr.JOPP_FUNCTION_NOP_SPACERS and \
#not instrument.skip_func(func, skip, skip_asm) and func in asm_functions:
#if any(not re.search('\tnop$', l) for l in last_insns if l is not None):
#nop_spacer_errors.append(lines)
for i, line in enumerate(lines, start=func_i):
def slice_lines(start, end):
return lines[start-func_i:end-func_i]
m = re.search(r'mov\t(?P<mov_register>{register_re}), x30'.format(register_re=register_re), line)
if m and m.group('mov_register') != 'sp':
mov_registers.add(m.group('mov_register'))
continue
m = re.search(r'ret\t(?P<ret_register>{register_re})'.format(register_re=register_re), line)
if m:
ret_registers.add(m.group('ret_register'))
continue
m = re.search(r'ldp\tx29,\s+x30,', line)
if m:
for l in lines[i+1:i+3]:
if not re.search(r'nop$'):
ldp_spacer_errors.append(lines)
ldp_spacer_error_funcs.add(func)
break
continue
m = re.search(r'stp\tx29,\s+x30,', line)
if m and func not in skip_stp:
missing_nop = False
for l in slice_lines(i-1, i):
if not re.search(r'nop$', l):
stp_spacer_errors.append(lines)
stp_spacer_error_funcs.add(func)
missing_nop = True
break
if missing_nop:
continue
if func == '__kvm_vcpu_run':
pr({'func':func})
mov_j, movx29_insn = instrument.find_add_x29_x30_imm(objdump_uninstr, func, func_i, i)
for l in slice_lines(i+1, mov_j):
if func not in atomic_prologue_error_funcs and re.search(r'\b(x29|sp)\b', insn_text(l)):
atomic_prologue_errors.append(lines)
atomic_prologue_error_funcs.add(func)
break
continue
# End of function; check for errors in that function, and if so, perserve its output.
if len(mov_registers.intersection(ret_registers)) > 0 and func not in skip_save_lr_to_stack:
mov_ret_errors.append(lines)
errmsg(c_func_br_errors, """
Saw a C function in vmlinux without information about the number of arguments it takes.
We need to know this to zero registers on BLR jumps.
""")
errmsg(missing_asm_annot_errors, """
Saw an assembly rountine(s) that hasn't been annotated with the number of
general purpose registers it uses.
Change ENTRY to FUNC_ENTRY for these assembly functions.
""")
errmsg(nop_spacer_errors, """
Saw an assembly rountine(s) that doesn't have 2 nop instruction immediately
before the function label.
We need these for any function that might be the target of a blr instruction!
""")
errmsg(mov_ret_errors, """
Saw an assembly routine(s) saving LR into a register instead of on the stack.
This would prevent us from re-encrypting it properly!
Modify these routine(s) to save LR on the stack and adjust the frame pointer (like in prologues of C functions).
e.g.
stp x29, x30, [sp,#-16]!
mov x29, sp
...
ldp x29, x30, [sp],#16
ret
NOTE: We're only reporting functions found in the compiled vmlinux
(gcc might remove dead code that needs patching as well)
""")
errmsg(ldp_spacer_errors, """
Saw a function with ldp x29, x30 but without 2 nops following it.
Either add an LDP_SPACER to this, use the right compiler, or make an exception.
""")
errmsg(stp_spacer_errors, """
Saw a function with stp x29, x30 but without 1 nop before it.
Either add an STP_SPACER to this, use the right compiler, or make an exception.
""")
warmsg(atomic_prologue_errors, """
Saw a function prologue with:
<func>:
stp x29, x30, ...
(insns)
add x29, sp, #...
BUT, one of the "(insns)" mentions either x29 or sp, so it might not be safe to turn this into:
<func>:
stp x29, x30, ...
add x29, sp, #...
(insns)
""")
procs = []
# for validate in [validate_uninstr_lines]:
for validate in [validate_bin, validate_instr, validate_uninstr_lines, validate_uninstr_binary]:
if threads == 1:
validate()
continue
proc = multiprocessing.Process(target=validate, args=())
proc.start()
procs.append(proc)
for proc in procs:
proc.join()
return bool(success.value)
if common.run_from_ipython():
def _x(*hexints):
xored = 0
for hexint in hexints:
xored ^= hexint
return "0x{0:x}".format(xored)
def _d(*addrs):
"""
Assume key is like
0x1111111111111111
Guess key, then decrypt used guessed key.
"""
def __d(addr):
addr = re.sub('^0x', '', addr)
first_4bits = int(addr[0], 16)
first_byte_of_key = (0xf ^ first_4bits) << 4 | (0xf ^ first_4bits)
key = 0
for i in xrange(0, 8):
key |= first_byte_of_key << i*8
return {'decaddr':'0x' + instrument._hex(instrument._int(addr) ^ key),
'key':'0x' + instrument._hex(key)}
return map(__d, addrs)
| gpl-2.0 |
5monkeys/pytest-sftpserver | pytest_sftpserver/sftp/interface.py | 1 | 4370 | # encoding: utf-8
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import calendar
from datetime import datetime
import os
from os import O_CREAT
import stat
from paramiko import ServerInterface, AUTH_SUCCESSFUL, OPEN_SUCCEEDED
from paramiko.sftp import SFTP_OK, SFTP_NO_SUCH_FILE, SFTP_FAILURE, SFTP_OP_UNSUPPORTED
from paramiko.sftp_attr import SFTPAttributes
from paramiko.sftp_handle import SFTPHandle
from paramiko.sftp_si import SFTPServerInterface
from pytest_sftpserver.sftp.util import abspath
class VirtualSFTPHandle(SFTPHandle):
def __init__(self, path, content_provider, flags=0):
super(VirtualSFTPHandle, self).__init__()
self.path = path
self.content_provider = content_provider
if self.content_provider.get(self.path) is None and flags and flags & O_CREAT == O_CREAT:
# Create new empty "file"
self.content_provider.put(path, "")
def close(self):
return SFTP_OK
def chattr(self, attr):
if self.content_provider.get(self.path) is None:
return SFTP_NO_SUCH_FILE
return SFTP_OK
def write(self, offset, data):
if offset != 0:
return SFTP_OP_UNSUPPORTED
return SFTP_OK if self.content_provider.put(self.path, data) else SFTP_NO_SUCH_FILE
def read(self, offset, length):
if self.content_provider.get(self.path) is None:
return SFTP_NO_SUCH_FILE
return str(self.content_provider.get(self.path))[offset:offset + length]
def stat(self):
if self.content_provider.get(self.path) is None:
return SFTP_NO_SUCH_FILE
mtime = calendar.timegm(datetime.now().timetuple())
sftp_attrs = SFTPAttributes()
sftp_attrs.st_size = self.content_provider.get_size(self.path)
sftp_attrs.st_uid = 0
sftp_attrs.st_gid = 0
sftp_attrs.st_mode = (
stat.S_IRWXO |
stat.S_IRWXG |
stat.S_IRWXU |
(
stat.S_IFDIR
if self.content_provider.is_dir(self.path)
else stat.S_IFREG
)
)
sftp_attrs.st_atime = mtime
sftp_attrs.st_mtime = mtime
sftp_attrs.filename = os.path.basename(self.path)
return sftp_attrs
class VirtualSFTPServerInterface(SFTPServerInterface):
def __init__(self, server, *largs, **kwargs):
self.content_provider = kwargs.pop('content_provider', None)
":type: ContentProvider"
super(VirtualSFTPServerInterface, self).__init__(server, *largs, **kwargs)
@abspath
def list_folder(self, path):
return [
self.stat(os.path.join(path, fname))
for fname
in self.content_provider.list(path)
]
@abspath
def open(self, path, flags, attr):
return VirtualSFTPHandle(path, self.content_provider, flags=flags)
@abspath
def remove(self, path):
return SFTP_OK if self.content_provider.remove(path) else SFTP_NO_SUCH_FILE
@abspath
def rename(self, oldpath, newpath):
content = self.content_provider.get(oldpath)
if not content:
return SFTP_NO_SUCH_FILE
res = self.content_provider.put(newpath, content)
if res:
res = res and self.content_provider.remove(oldpath)
return SFTP_OK if res else SFTP_FAILURE
@abspath
def rmdir(self, path):
return SFTP_OK if self.content_provider.remove(path) else SFTP_FAILURE
@abspath
def mkdir(self, path, attr):
if self.content_provider.get(path) is not None:
return SFTP_FAILURE
return SFTP_OK if self.content_provider.put(path, {}) else SFTP_FAILURE
@abspath
def stat(self, path):
return VirtualSFTPHandle(path, self.content_provider).stat()
@abspath
def chattr(self, path, attr):
return VirtualSFTPHandle(path, self.content_provider).chattr(attr)
class AllowAllAuthHandler(ServerInterface):
def check_auth_none(self, username):
return AUTH_SUCCESSFUL
def check_auth_password(self, username, password):
return AUTH_SUCCESSFUL
def check_auth_publickey(self, username, key):
return AUTH_SUCCESSFUL
def check_channel_request(self, kind, chanid):
return OPEN_SUCCEEDED
| mit |
vongazman/libcloud | docs/examples/compute/cloudstack/create_cloudstack_node_keypair_secgroup.py | 64 | 1035 | from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
ACCESS_ID = 'your access id'
SECRET_KEY = 'your secret key'
HOST = 'hostname or ip address of your management server'
PATH = 'path to the api endpoint, e.g: /client/api'
SIZE_ID = 'id of the computer offering you want to use'
IMAGE_ID = 'id of the template you want to use'
# Name of the existing keypair you want to use
KEYPAIR_NAME = 'keypairname'
# The security groups you want this node to be added to
SECURITY_GROUP_NAMES = ['secgroup1', 'secgroup2']
cls = get_driver(Provider.CLOUDSTACK)
driver = cls(key=ACCESS_ID, secret=SECRET_KEY, secure=True,
host=HOST, path=PATH)
sizes = driver.list_sizes()
images = driver.list_images()
size = [s for s in sizes if s.id == SIZE_ID][0]
image = [i for i in images if i.id == IMAGE_ID][0]
node = driver.create_node(name='test-node-1', image=image, size=size,
ex_security_groups=SECURITY_GROUP_NAMES,
ex_keyname=KEYPAIR_NAME)
| apache-2.0 |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/sklearn/metrics/cluster/bicluster.py | 359 | 2797 | from __future__ import division
import numpy as np
from sklearn.utils.linear_assignment_ import linear_assignment
from sklearn.utils.validation import check_consistent_length, check_array
__all__ = ["consensus_score"]
def _check_rows_and_columns(a, b):
"""Unpacks the row and column arrays and checks their shape."""
check_consistent_length(*a)
check_consistent_length(*b)
checks = lambda x: check_array(x, ensure_2d=False)
a_rows, a_cols = map(checks, a)
b_rows, b_cols = map(checks, b)
return a_rows, a_cols, b_rows, b_cols
def _jaccard(a_rows, a_cols, b_rows, b_cols):
"""Jaccard coefficient on the elements of the two biclusters."""
intersection = ((a_rows * b_rows).sum() *
(a_cols * b_cols).sum())
a_size = a_rows.sum() * a_cols.sum()
b_size = b_rows.sum() * b_cols.sum()
return intersection / (a_size + b_size - intersection)
def _pairwise_similarity(a, b, similarity):
"""Computes pairwise similarity matrix.
result[i, j] is the Jaccard coefficient of a's bicluster i and b's
bicluster j.
"""
a_rows, a_cols, b_rows, b_cols = _check_rows_and_columns(a, b)
n_a = a_rows.shape[0]
n_b = b_rows.shape[0]
result = np.array(list(list(similarity(a_rows[i], a_cols[i],
b_rows[j], b_cols[j])
for j in range(n_b))
for i in range(n_a)))
return result
def consensus_score(a, b, similarity="jaccard"):
"""The similarity of two sets of biclusters.
Similarity between individual biclusters is computed. Then the
best matching between sets is found using the Hungarian algorithm.
The final score is the sum of similarities divided by the size of
the larger set.
Read more in the :ref:`User Guide <biclustering>`.
Parameters
----------
a : (rows, columns)
Tuple of row and column indicators for a set of biclusters.
b : (rows, columns)
Another set of biclusters like ``a``.
similarity : string or function, optional, default: "jaccard"
May be the string "jaccard" to use the Jaccard coefficient, or
any function that takes four arguments, each of which is a 1d
indicator vector: (a_rows, a_columns, b_rows, b_columns).
References
----------
* Hochreiter, Bodenhofer, et. al., 2010. `FABIA: factor analysis
for bicluster acquisition
<https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2881408/>`__.
"""
if similarity == "jaccard":
similarity = _jaccard
matrix = _pairwise_similarity(a, b, similarity)
indices = linear_assignment(1. - matrix)
n_a = len(a[0])
n_b = len(b[0])
return matrix[indices[:, 0], indices[:, 1]].sum() / max(n_a, n_b)
| mit |
bunbun/ruffus | ruffus/test/test_branching_dependencies.py | 1 | 13715 | #!/usr/bin/env python
from __future__ import print_function
import json
from collections import defaultdict
import re
import time
import shutil
import unittest
from ruffus import pipeline_run, pipeline_printout, Pipeline, transform, follows, posttask, merge, \
mkdir, suffix, originate, regex, inputs, jobs_limit, files
import sys
"""
branching.py
test branching dependencies
"""
import os
tempdir = os.path.relpath(os.path.abspath(os.path.splitext(__file__)[0])) + "/"
# add grandparent to search path for testing
grandparent_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, grandparent_dir)
# module name = script name without extension
module_name = os.path.splitext(os.path.basename(__file__))[0]
# funky code to import by file name
parent_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# imports
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
try:
from StringIO import StringIO
except:
from io import StringIO
# use simplejson in place of json for python < 2.6
# try:
# import json
# except ImportError:
# import simplejson
# json = simplejson
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Functions
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
def check_job_io(infiles, outfiles, extra_params):
"""
cat input files content to output files
after writing out job parameters
"""
# dump parameters
params = (infiles, outfiles) + extra_params
if isinstance(infiles, str):
infiles = [infiles]
elif infiles is None:
infiles = []
if isinstance(outfiles, str):
outfiles = [outfiles]
output_text = list()
for f in infiles:
with open(f) as ii:
output_text.append(ii.read())
output_text = "".join(sorted(output_text))
output_text += json.dumps(infiles) + " -> " + json.dumps(outfiles) + "\n"
for f in outfiles:
with open(f, "w") as oo:
oo.write(output_text)
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Main logic
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
# Tasks
# 88888888888888888888888888888888888888888888888888888888888888888888888888888888888888888
#
# 1 -> 2 -> 3 ->
# -> 4 ->
# 5 -> 6
#
def do_write(file_name, what):
with open(file_name, "a") as oo:
oo.write(what)
test_file = tempdir + "task.done"
#
# task1
#
@originate([tempdir + d for d in ('a.1', 'b.1', 'c.1')])
@follows(mkdir(tempdir))
@posttask(lambda: do_write(test_file, "Task 1 Done\n"))
def task1(outfile, *extra_params):
"""
First task
"""
with open(tempdir + "jobs.start", "a") as oo:
oo.write('job = %s\n' % json.dumps([None, outfile]))
check_job_io(None, outfile, extra_params)
with open(tempdir + "jobs.finish", "a") as oo:
oo.write('job = %s\n' % json.dumps([None, outfile]))
#
# task2
#
@posttask(lambda: do_write(test_file, "Task 2 Done\n"))
@transform(task1, suffix(".1"), ".2")
def task2(infiles, outfiles, *extra_params):
"""
Second task
"""
with open(tempdir + "jobs.start", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
check_job_io(infiles, outfiles, extra_params)
with open(tempdir + "jobs.finish", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# task3
#
@transform(task2, regex('(.*).2'), inputs([r"\1.2", tempdir + "a.1"]), r'\1.3')
@posttask(lambda: do_write(test_file, "Task 3 Done\n"))
def task3(infiles, outfiles, *extra_params):
"""
Third task
"""
with open(tempdir + "jobs.start", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
check_job_io(infiles, outfiles, extra_params)
with open(tempdir + "jobs.finish", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# task4
#
@jobs_limit(1)
@transform(tempdir + "*.1", suffix(".1"), ".4")
@follows(task1)
@posttask(lambda: do_write(test_file, "Task 4 Done\n"))
def task4(infiles, outfiles, *extra_params):
"""
Fourth task is extra slow
"""
with open(tempdir + "jobs.start", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
time.sleep(0.1)
check_job_io(infiles, outfiles, extra_params)
with open(tempdir + "jobs.finish", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# task5
#
@files(None, tempdir + 'a.5')
@follows(mkdir(tempdir))
@posttask(lambda: do_write(test_file, "Task 5 Done\n"))
def task5(infiles, outfiles, *extra_params):
"""
Fifth task is extra slow
"""
with open(tempdir + "jobs.start", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
time.sleep(1)
check_job_io(infiles, outfiles, extra_params)
with open(tempdir + "jobs.finish", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# task6
#
# @files([[[tempdir + d for d in 'a.3', 'b.3', 'c.3', 'a.4', 'b.4', 'c.4', 'a.5'], tempdir + 'final.6']])
@merge([task3, task4, task5], tempdir + "final.6")
@follows(task3, task4, task5, )
@posttask(lambda: do_write(test_file, "Task 6 Done\n"))
def task6(infiles, outfiles, *extra_params):
"""
final task
"""
with open(tempdir + "jobs.start", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
check_job_io(infiles, outfiles, extra_params)
with open(tempdir + "jobs.finish", "a") as oo:
oo.write('job = %s\n' % json.dumps([infiles, outfiles]))
#
# Use equivalent but new sytle syntax
#
test_pipeline = Pipeline("test")
test_pipeline.originate(task_func=task1,
output=[tempdir + d for d in ('a.1', 'b.1', 'c.1')])\
.follows(mkdir(tempdir))\
.posttask(lambda: do_write(test_file, "Task 1 Done\n"))
test_pipeline.transform(task_func=task2,
input=task1,
filter=suffix(".1"),
output=".2") \
.posttask(lambda: do_write(test_file, "Task 2 Done\n"))
test_pipeline.transform(task3, task2, regex('(.*).2'), inputs([r"\1.2", tempdir + "a.1"]), r'\1.3')\
.posttask(lambda: do_write(test_file, "Task 3 Done\n"))
test_pipeline.transform(task4, tempdir + "*.1", suffix(".1"), ".4")\
.follows(task1)\
.posttask(lambda: do_write(test_file, "Task 4 Done\n"))\
.jobs_limit(1)
test_pipeline.files(task5, None, tempdir + 'a.5')\
.follows(mkdir(tempdir))\
.posttask(lambda: do_write(test_file, "Task 5 Done\n"))
test_pipeline.merge(task_func=task6,
input=[task3, task4, task5],
output=tempdir + "final.6")\
.follows(task3, task4, task5, ) \
.posttask(lambda: do_write(test_file, "Task 6 Done\n"))
def check_job_order_correct(filename):
"""
1 -> 2 -> 3 ->
-> 4 ->
5 -> 6
"""
precedence_rules = [[1, 2],
[2, 3],
[1, 4],
[5, 6],
[3, 6],
[4, 6]]
index_re = re.compile(r'.*\.([0-9])["\]\n]*$')
job_indices = defaultdict(list)
with open(filename) as ii:
for linenum, l in enumerate(ii):
m = index_re.search(l)
if not m:
raise "Non-matching line in [%s]" % filename
job_indices[int(m.group(1))].append(linenum)
for job_index in job_indices:
job_indices[job_index].sort()
for before, after in precedence_rules:
if before not in job_indices or after not in job_indices:
continue
if job_indices[before][-1] >= job_indices[after][0]:
raise Exception("Precedence violated for job %d [line %d] and job %d [line %d] of [%s]"
% (before, job_indices[before][-1],
after, job_indices[after][0],
filename))
def check_final_output_correct(after_touch_files=False):
"""
check if the final output in final.6 is as expected
"""
expected_output = \
""" ["DIR/a.1"] -> ["DIR/a.2"]
["DIR/a.1"] -> ["DIR/a.4"]
["DIR/a.2", "DIR/a.1"] -> ["DIR/a.3"]
["DIR/a.3", "DIR/b.3", "DIR/c.3", "DIR/a.4", "DIR/b.4", "DIR/c.4", "DIR/a.5"] -> ["DIR/final.6"]
["DIR/b.1"] -> ["DIR/b.2"]
["DIR/b.1"] -> ["DIR/b.4"]
["DIR/b.2", "DIR/a.1"] -> ["DIR/b.3"]
["DIR/c.1"] -> ["DIR/c.2"]
["DIR/c.1"] -> ["DIR/c.4"]
["DIR/c.2", "DIR/a.1"] -> ["DIR/c.3"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.1"]
[] -> ["DIR/a.5"]
[] -> ["DIR/b.1"]
[] -> ["DIR/b.1"]
[] -> ["DIR/c.1"]
[] -> ["DIR/c.1"]"""
expected_output = expected_output.replace(
" ", "").replace("DIR/", tempdir).split("\n")
orig_expected_output = expected_output
if after_touch_files:
expected_output.pop(-3)
with open(tempdir + "final.6", "r") as ii:
final_6_contents = sorted([l.rstrip() for l in ii.readlines()])
if final_6_contents != expected_output:
print("Actual:", file=sys.stderr)
for ll in final_6_contents:
print(ll, file=sys.stderr)
print("_" * 80, file=sys.stderr)
print("Expected:", file=sys.stderr)
for ll in orig_expected_output:
print(ll, file=sys.stderr)
print("_" * 80, file=sys.stderr)
for i, (l1, l2) in enumerate(zip(final_6_contents, expected_output)):
if l1 != l2:
sys.stderr.write(
"%d\nActual:\n >%s<\nExpected:\n >%s<\n" % (i, l1, l2))
raise Exception("Final.6 output is not as expected\n")
class Test_ruffus(unittest.TestCase):
def tearDown(self):
try:
shutil.rmtree(tempdir)
except:
pass
def setUp(self):
try:
shutil.rmtree(tempdir)
except:
pass
os.makedirs(tempdir)
def test_ruffus(self):
print("\n\n Run pipeline normally...")
pipeline_run(multiprocess=10, verbose=0, pipeline="main")
check_final_output_correct()
check_job_order_correct(tempdir + "jobs.start")
check_job_order_correct(tempdir + "jobs.finish")
print(" OK")
print("\n\n Touch task2 only:")
os.unlink(os.path.join(tempdir, "jobs.start"))
os.unlink(os.path.join(tempdir, "jobs.finish"))
print(" First delete b.1 for task2...")
os.unlink(os.path.join(tempdir, "b.1"))
print(" Then run with touch_file_only...")
pipeline_run([task2], multiprocess=10,
touch_files_only=True, verbose=0, pipeline="main")
# check touching has made task2 up to date
s = StringIO()
pipeline_printout(s, [task2], verbose=4,
wrap_width=10000, pipeline="main")
output_str = s.getvalue()
#print (">>>\n", output_str, "<<<\n", file=sys.stderr)
if "b.1" in output_str:
raise Exception("Expected b.1 created by touching...")
if "b.2" in output_str:
raise Exception("Expected b.2 created by touching...")
print(" Touching has made task2 up to date...\n")
print(" Then run normally again...")
pipeline_run(multiprocess=10, verbose=0, pipeline="main")
check_final_output_correct(True)
check_job_order_correct(tempdir + "jobs.start")
check_job_order_correct(tempdir + "jobs.finish")
def test_ruffus_new_syntax(self):
print("\n\n Run pipeline normally...")
test_pipeline.run(multiprocess=10, verbose=0)
check_final_output_correct()
check_job_order_correct(tempdir + "jobs.start")
check_job_order_correct(tempdir + "jobs.finish")
print(" OK")
print("\n\n Touch task2 only:")
os.unlink(os.path.join(tempdir, "jobs.start"))
os.unlink(os.path.join(tempdir, "jobs.finish"))
print(" First delete b.1 for task2...")
os.unlink(os.path.join(tempdir, "b.1"))
print(" Then run with touch_file_only...")
test_pipeline.run([task2], multiprocess=10,
touch_files_only=True, verbose=0)
# check touching has made task2 up to date
s = StringIO()
test_pipeline.printout(s, [task2], verbose=4, wrap_width=10000)
output_str = s.getvalue()
#print (">>>\n", output_str, "<<<\n", file=sys.stderr)
if "b.1" in output_str:
raise Exception("Expected b.1 created by touching...")
if "b.2" in output_str:
raise Exception("Expected b.2 created by touching...")
print(" Touching has made task2 up to date...\n")
print(" Then run normally again...")
test_pipeline.run(multiprocess=10, verbose=0)
check_final_output_correct(True)
check_job_order_correct(tempdir + "jobs.start")
check_job_order_correct(tempdir + "jobs.finish")
if __name__ == '__main__':
unittest.main()
| mit |
Naeka/cmsplugin-dailymotion | cmsplugin_dailymotion/migrations/0001_initial.py | 1 | 3874 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'DailymotionViewer'
db.create_table(u'cmsplugin_dailymotion_dailymotionviewer', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('video_src', self.gf('django.db.models.fields.URLField')(max_length=200)),
('width', self.gf('django.db.models.fields.CharField')(default='100%', max_length=6)),
('allow_fullscreen', self.gf('django.db.models.fields.BooleanField')(default=True)),
('start_at', self.gf('django.db.models.fields.PositiveIntegerField')(null=True, blank=True)),
('auto_start', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'cmsplugin_dailymotion', ['DailymotionViewer'])
def backwards(self, orm):
# Deleting model 'DailymotionViewer'
db.delete_table(u'cmsplugin_dailymotion_dailymotionviewer')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'cmsplugin_dailymotion.dailymotionviewer': {
'Meta': {'object_name': 'DailymotionViewer', '_ormbases': ['cms.CMSPlugin']},
'allow_fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'auto_start': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'start_at': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'video_src': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'width': ('django.db.models.fields.CharField', [], {'default': "'100%'", 'max_length': '6'})
}
}
complete_apps = ['cmsplugin_dailymotion'] | mit |
JimCircadian/ansible | lib/ansible/modules/cloud/amazon/ec2_snapshot.py | 71 | 9675 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: ec2_snapshot
short_description: creates a snapshot from an existing volume
description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
volume_id:
description:
- volume from which to take the snapshot
required: false
description:
description:
- description to be applied to the snapshot
required: false
instance_id:
description:
- instance that has the required volume to snapshot mounted
required: false
device_name:
description:
- device name of a mounted volume to be snapshotted
required: false
snapshot_tags:
description:
- a hash/dictionary of tags to add to the snapshot
required: false
version_added: "1.6"
wait:
description:
- wait for the snapshot to be ready
type: bool
required: false
default: yes
version_added: "1.5.1"
wait_timeout:
description:
- how long before wait gives up, in seconds
- specify 0 to wait forever
required: false
default: 0
version_added: "1.5.1"
state:
description:
- whether to add or create a snapshot
required: false
default: present
choices: ['absent', 'present']
version_added: "1.9"
snapshot_id:
description:
- snapshot id to remove
required: false
version_added: "1.9"
last_snapshot_min_age:
description:
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
required: false
default: 0
version_added: "2.0"
author: "Will Thames (@willthames)"
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Simple snapshot of volume using volume_id
- ec2_snapshot:
volume_id: vol-abcdef12
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume mounted on device_name attached to instance_id
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
description: snapshot of /data from DB123 taken 2013/11/28 12:18:32
# Snapshot of volume with tagging
- ec2_snapshot:
instance_id: i-12345678
device_name: /dev/sdb1
snapshot_tags:
frequency: hourly
source: /data
# Remove a snapshot
- local_action:
module: ec2_snapshot
snapshot_id: snap-abcd1234
state: absent
# Create a snapshot only if the most recent one is older than 1 hour
- local_action:
module: ec2_snapshot
volume_id: vol-abcdef12
last_snapshot_min_age: 60
'''
import time
import datetime
try:
import boto.exception
except ImportError:
pass # Taken care of by ec2.HAS_BOTO
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import HAS_BOTO, ec2_argument_spec, ec2_connect
# Find the most recent snapshot
def _get_snapshot_starttime(snap):
return datetime.datetime.strptime(snap.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
def _get_most_recent_snapshot(snapshots, max_snapshot_age_secs=None, now=None):
"""
Gets the most recently created snapshot and optionally filters the result
if the snapshot is too old
:param snapshots: list of snapshots to search
:param max_snapshot_age_secs: filter the result if its older than this
:param now: simulate time -- used for unit testing
:return:
"""
if len(snapshots) == 0:
return None
if not now:
now = datetime.datetime.utcnow()
youngest_snapshot = max(snapshots, key=_get_snapshot_starttime)
# See if the snapshot is younger that the given max age
snapshot_start = datetime.datetime.strptime(youngest_snapshot.start_time, '%Y-%m-%dT%H:%M:%S.000Z')
snapshot_age = now - snapshot_start
if max_snapshot_age_secs is not None:
if snapshot_age.total_seconds() > max_snapshot_age_secs:
return None
return youngest_snapshot
def _create_with_wait(snapshot, wait_timeout_secs, sleep_func=time.sleep):
"""
Wait for the snapshot to be created
:param snapshot:
:param wait_timeout_secs: fail this step after this many seconds
:param sleep_func:
:return:
"""
time_waited = 0
snapshot.update()
while snapshot.status != 'completed':
sleep_func(3)
snapshot.update()
time_waited += 3
if wait_timeout_secs and time_waited > wait_timeout_secs:
return False
return True
def create_snapshot(module, ec2, state=None, description=None, wait=None,
wait_timeout=None, volume_id=None, instance_id=None,
snapshot_id=None, device_name=None, snapshot_tags=None,
last_snapshot_min_age=None):
snapshot = None
changed = False
required = [volume_id, snapshot_id, instance_id]
if required.count(None) != len(required) - 1: # only 1 must be set
module.fail_json(msg='One and only one of volume_id or instance_id or snapshot_id must be specified')
if instance_id and not device_name or device_name and not instance_id:
module.fail_json(msg='Instance ID and device name must both be specified')
if instance_id:
try:
volumes = ec2.get_all_volumes(filters={'attachment.instance-id': instance_id, 'attachment.device': device_name})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
if not volumes:
module.fail_json(msg="Could not find volume with name %s attached to instance %s" % (device_name, instance_id))
volume_id = volumes[0].id
if state == 'absent':
if not snapshot_id:
module.fail_json(msg='snapshot_id must be set when state is absent')
try:
ec2.delete_snapshot(snapshot_id)
except boto.exception.BotoServerError as e:
# exception is raised if snapshot does not exist
if e.error_code == 'InvalidSnapshot.NotFound':
module.exit_json(changed=False)
else:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
# successful delete
module.exit_json(changed=True)
if last_snapshot_min_age > 0:
try:
current_snapshots = ec2.get_all_snapshots(filters={'volume_id': volume_id})
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
last_snapshot_min_age = last_snapshot_min_age * 60 # Convert to seconds
snapshot = _get_most_recent_snapshot(current_snapshots,
max_snapshot_age_secs=last_snapshot_min_age)
try:
# Create a new snapshot if we didn't find an existing one to use
if snapshot is None:
snapshot = ec2.create_snapshot(volume_id, description=description)
changed = True
if wait:
if not _create_with_wait(snapshot, wait_timeout):
module.fail_json(msg='Timed out while creating snapshot.')
if snapshot_tags:
for k, v in snapshot_tags.items():
snapshot.add_tag(k, v)
except boto.exception.BotoServerError as e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
module.exit_json(changed=changed,
snapshot_id=snapshot.id,
volume_id=snapshot.volume_id,
volume_size=snapshot.volume_size,
tags=snapshot.tags.copy())
def create_snapshot_ansible_module():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
volume_id=dict(),
description=dict(),
instance_id=dict(),
snapshot_id=dict(),
device_name=dict(),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=0),
last_snapshot_min_age=dict(type='int', default=0),
snapshot_tags=dict(type='dict', default=dict()),
state=dict(choices=['absent', 'present'], default='present'),
)
)
module = AnsibleModule(argument_spec=argument_spec)
return module
def main():
module = create_snapshot_ansible_module()
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
volume_id = module.params.get('volume_id')
snapshot_id = module.params.get('snapshot_id')
description = module.params.get('description')
instance_id = module.params.get('instance_id')
device_name = module.params.get('device_name')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
last_snapshot_min_age = module.params.get('last_snapshot_min_age')
snapshot_tags = module.params.get('snapshot_tags')
state = module.params.get('state')
ec2 = ec2_connect(module)
create_snapshot(
module=module,
state=state,
description=description,
wait=wait,
wait_timeout=wait_timeout,
ec2=ec2,
volume_id=volume_id,
instance_id=instance_id,
snapshot_id=snapshot_id,
device_name=device_name,
snapshot_tags=snapshot_tags,
last_snapshot_min_age=last_snapshot_min_age
)
if __name__ == '__main__':
main()
| gpl-3.0 |
axbaretto/beam | sdks/python/.eggs/nose-1.3.7-py2.7.egg/nose/plugins/deprecated.py | 107 | 1551 | """
This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
exception. When :class:`DeprecatedTest` is raised, the exception will be logged
in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
will be output, and the exception will not be counted as an error or failure.
It is enabled by default, but can be turned off by using ``--no-deprecated``.
"""
from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
class DeprecatedTest(Exception):
"""Raise this exception to mark a test as deprecated.
"""
pass
class Deprecated(ErrorClassPlugin):
"""
Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
by default.
"""
enabled = True
deprecated = ErrorClass(DeprecatedTest,
label='DEPRECATED',
isfailure=False)
def options(self, parser, env):
"""Register commandline options.
"""
env_opt = 'NOSE_WITHOUT_DEPRECATED'
parser.add_option('--no-deprecated', action='store_true',
dest='noDeprecated', default=env.get(env_opt, False),
help="Disable special handling of DeprecatedTest "
"exceptions.")
def configure(self, options, conf):
"""Configure plugin.
"""
if not self.can_configure:
return
self.conf = conf
disable = getattr(options, 'noDeprecated', False)
if disable:
self.enabled = False
| apache-2.0 |
SOKP/external_chromium_org | mojo/python/tests/system_unittest.py | 26 | 11412 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import random
import sys
import time
import unittest
# pylint: disable=F0401
import mojo.embedder
from mojo import system
DATA_SIZE = 1024
def _GetRandomBuffer(size):
random.seed(size)
return bytearray(''.join(chr(random.randint(0, 255)) for i in xrange(size)))
class BaseMojoTest(unittest.TestCase):
def setUp(self):
mojo.embedder.Init()
class CoreTest(BaseMojoTest):
def testResults(self):
self.assertEquals(system.RESULT_OK, 0)
self.assertLess(system.RESULT_CANCELLED, 0)
self.assertLess(system.RESULT_UNKNOWN, 0)
self.assertLess(system.RESULT_INVALID_ARGUMENT, 0)
self.assertLess(system.RESULT_DEADLINE_EXCEEDED, 0)
self.assertLess(system.RESULT_NOT_FOUND, 0)
self.assertLess(system.RESULT_ALREADY_EXISTS, 0)
self.assertLess(system.RESULT_PERMISSION_DENIED, 0)
self.assertLess(system.RESULT_RESOURCE_EXHAUSTED, 0)
self.assertLess(system.RESULT_FAILED_PRECONDITION, 0)
self.assertLess(system.RESULT_ABORTED, 0)
self.assertLess(system.RESULT_OUT_OF_RANGE, 0)
self.assertLess(system.RESULT_UNIMPLEMENTED, 0)
self.assertLess(system.RESULT_INTERNAL, 0)
self.assertLess(system.RESULT_UNAVAILABLE, 0)
self.assertLess(system.RESULT_DATA_LOSS, 0)
self.assertLess(system.RESULT_BUSY, 0)
self.assertLess(system.RESULT_SHOULD_WAIT, 0)
def testConstants(self):
self.assertGreaterEqual(system.DEADLINE_INDEFINITE, 0)
self.assertGreaterEqual(system.HANDLE_SIGNAL_NONE, 0)
self.assertGreaterEqual(system.HANDLE_SIGNAL_READABLE, 0)
self.assertGreaterEqual(system.HANDLE_SIGNAL_WRITABLE, 0)
self.assertGreaterEqual(system.WRITE_MESSAGE_FLAG_NONE, 0)
self.assertGreaterEqual(system.READ_MESSAGE_FLAG_NONE, 0)
self.assertGreaterEqual(system.READ_MESSAGE_FLAG_MAY_DISCARD, 0)
self.assertGreaterEqual(system.WRITE_DATA_FLAG_NONE, 0)
self.assertGreaterEqual(system.WRITE_DATA_FLAG_ALL_OR_NONE, 0)
self.assertGreaterEqual(system.READ_DATA_FLAG_NONE, 0)
self.assertGreaterEqual(system.READ_DATA_FLAG_ALL_OR_NONE, 0)
self.assertGreaterEqual(system.READ_DATA_FLAG_DISCARD, 0)
self.assertGreaterEqual(system.READ_DATA_FLAG_QUERY, 0)
self.assertGreaterEqual(system.MAP_BUFFER_FLAG_NONE, 0)
def testGetTimeTicksNow(self):
pt1 = time.time()
v1 = system.GetTimeTicksNow()
time.sleep(1e-3)
v2 = system.GetTimeTicksNow()
pt2 = time.time()
self.assertGreater(v1, 0)
self.assertGreater(v2, v1 + 1000)
self.assertGreater(1e6 * (pt2 - pt1), v2 - v1)
def _testHandlesCreation(self, *args):
for handle in args:
self.assertTrue(handle.IsValid())
handle.Close()
self.assertFalse(handle.IsValid())
def _TestMessageHandleCreation(self, handles):
self._testHandlesCreation(handles.handle0, handles.handle1)
def testCreateMessagePipe(self):
self._TestMessageHandleCreation(system.MessagePipe())
def testCreateMessagePipeWithNoneOptions(self):
self._TestMessageHandleCreation(system.MessagePipe(None))
def testCreateMessagePipeWithOptions(self):
self._TestMessageHandleCreation(
system.MessagePipe(system.CreateMessagePipeOptions()))
def testWaitOverMessagePipe(self):
handles = system.MessagePipe()
handle = handles.handle0
self.assertEquals(system.RESULT_OK, handle.Wait(
system.HANDLE_SIGNAL_WRITABLE, system.DEADLINE_INDEFINITE))
self.assertEquals(system.RESULT_DEADLINE_EXCEEDED,
handle.Wait(system.HANDLE_SIGNAL_READABLE, 0))
handles.handle1.WriteMessage()
self.assertEquals(
system.RESULT_OK,
handle.Wait(
system.HANDLE_SIGNAL_READABLE,
system.DEADLINE_INDEFINITE))
def testWaitOverManyMessagePipe(self):
handles = system.MessagePipe()
handle0 = handles.handle0
handle1 = handles.handle1
self.assertEquals(
0,
system.WaitMany(
[(handle0, system.HANDLE_SIGNAL_WRITABLE),
(handle1, system.HANDLE_SIGNAL_WRITABLE)],
system.DEADLINE_INDEFINITE))
self.assertEquals(
system.RESULT_DEADLINE_EXCEEDED,
system.WaitMany(
[(handle0, system.HANDLE_SIGNAL_READABLE),
(handle1, system.HANDLE_SIGNAL_READABLE)], 0))
handle0.WriteMessage()
self.assertEquals(
1,
system.WaitMany(
[(handle0, system.HANDLE_SIGNAL_READABLE),
(handle1, system.HANDLE_SIGNAL_READABLE)],
system.DEADLINE_INDEFINITE))
def testSendBytesOverMessagePipe(self):
handles = system.MessagePipe()
data = _GetRandomBuffer(DATA_SIZE)
handles.handle0.WriteMessage(data)
(res, buffers, next_message) = handles.handle1.ReadMessage()
self.assertEquals(system.RESULT_RESOURCE_EXHAUSTED, res)
self.assertEquals(None, buffers)
self.assertEquals((DATA_SIZE, 0), next_message)
result = bytearray(DATA_SIZE)
(res, buffers, next_message) = handles.handle1.ReadMessage(result)
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(None, next_message)
self.assertEquals((data, []), buffers)
def testSendEmptyDataOverMessagePipe(self):
handles = system.MessagePipe()
handles.handle0.WriteMessage(None)
(res, buffers, next_message) = handles.handle1.ReadMessage()
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(None, next_message)
self.assertEquals((None, []), buffers)
def testSendHandleOverMessagePipe(self):
handles = system.MessagePipe()
handles_to_send = system.MessagePipe()
handles.handle0.WriteMessage(handles=[handles_to_send.handle0,
handles_to_send.handle1])
(res, buffers, next_message) = handles.handle1.ReadMessage(
max_number_of_handles=2)
self.assertFalse(handles_to_send.handle0.IsValid())
self.assertFalse(handles_to_send.handle1.IsValid())
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(None, next_message)
self.assertEquals(None, buffers[0])
self.assertEquals(2, len(buffers[1]))
handles = buffers[1]
for handle in handles:
self.assertTrue(handle.IsValid())
(res, buffers, next_message) = handle.ReadMessage()
self.assertEquals(system.RESULT_SHOULD_WAIT, res)
for handle in handles:
handle.WriteMessage()
for handle in handles:
(res, buffers, next_message) = handle.ReadMessage()
self.assertEquals(system.RESULT_OK, res)
def _TestDataHandleCreation(self, handles):
self._testHandlesCreation(
handles.producer_handle, handles.consumer_handle)
def testCreateDataPipe(self):
self._TestDataHandleCreation(system.DataPipe())
def testCreateDataPipeWithNoneOptions(self):
self._TestDataHandleCreation(system.DataPipe(None))
def testCreateDataPipeWithDefaultOptions(self):
self._TestDataHandleCreation(
system.DataPipe(system.CreateDataPipeOptions()))
def testCreateDataPipeWithDiscardFlag(self):
options = system.CreateDataPipeOptions()
options.flags = system.CreateDataPipeOptions.FLAG_MAY_DISCARD
self._TestDataHandleCreation(system.DataPipe(options))
def testCreateDataPipeWithElementSize(self):
options = system.CreateDataPipeOptions()
options.element_num_bytes = 5
self._TestDataHandleCreation(system.DataPipe(options))
def testCreateDataPipeWithCapacity(self):
options = system.CreateDataPipeOptions()
options.element_capacity_num_bytes = DATA_SIZE
self._TestDataHandleCreation(system.DataPipe(options))
def testCreateDataPipeWithIncorrectParameters(self):
options = system.CreateDataPipeOptions()
options.element_num_bytes = 5
options.capacity_num_bytes = DATA_SIZE
with self.assertRaises(system.MojoException) as cm:
self._TestDataHandleCreation(system.DataPipe(options))
self.assertEquals(system.RESULT_INVALID_ARGUMENT, cm.exception.mojo_result)
def testSendEmptyDataOverDataPipe(self):
pipes = system.DataPipe()
self.assertEquals((system.RESULT_OK, 0), pipes.producer_handle.WriteData())
self.assertEquals(
(system.RESULT_OK, None), pipes.consumer_handle.ReadData())
def testSendDataOverDataPipe(self):
pipes = system.DataPipe()
data = _GetRandomBuffer(DATA_SIZE)
self.assertEquals((system.RESULT_OK, DATA_SIZE),
pipes.producer_handle.WriteData(data))
self.assertEquals((system.RESULT_OK, data),
pipes.consumer_handle.ReadData(bytearray(DATA_SIZE)))
def testTwoPhaseWriteOnDataPipe(self):
pipes = system.DataPipe()
(res, buf) = pipes.producer_handle.BeginWriteData(DATA_SIZE)
self.assertEquals(system.RESULT_OK, res)
self.assertGreaterEqual(len(buf.buffer), DATA_SIZE)
data = _GetRandomBuffer(DATA_SIZE)
buf.buffer[0:DATA_SIZE] = data
self.assertEquals(system.RESULT_OK, buf.End(DATA_SIZE))
self.assertEquals((system.RESULT_OK, data),
pipes.consumer_handle.ReadData(bytearray(DATA_SIZE)))
def testTwoPhaseReadOnDataPipe(self):
pipes = system.DataPipe()
data = _GetRandomBuffer(DATA_SIZE)
self.assertEquals((system.RESULT_OK, DATA_SIZE),
pipes.producer_handle.WriteData(data))
(res, buf) = pipes.consumer_handle.BeginReadData()
self.assertEquals(system.RESULT_OK, res)
self.assertEquals(DATA_SIZE, len(buf.buffer))
self.assertEquals(data, buf.buffer)
self.assertEquals(system.RESULT_OK, buf.End(DATA_SIZE))
def testCreateSharedBuffer(self):
self._testHandlesCreation(system.CreateSharedBuffer(DATA_SIZE))
def testCreateSharedBufferWithNoneOptions(self):
self._testHandlesCreation(system.CreateSharedBuffer(DATA_SIZE, None))
def testCreateSharedBufferWithDefaultOptions(self):
self._testHandlesCreation(
system.CreateSharedBuffer(
DATA_SIZE,
system.CreateSharedBufferOptions()))
def testDuplicateSharedBuffer(self):
handle = system.CreateSharedBuffer(DATA_SIZE)
self._testHandlesCreation(handle.Duplicate())
def testDuplicateSharedBufferWithNoneOptions(self):
handle = system.CreateSharedBuffer(DATA_SIZE)
self._testHandlesCreation(handle.Duplicate(None))
def testDuplicateSharedBufferWithDefaultOptions(self):
handle = system.CreateSharedBuffer(DATA_SIZE)
self._testHandlesCreation(
handle.Duplicate(system.DuplicateSharedBufferOptions()))
def testSendBytesOverSharedBuffer(self):
handle = system.CreateSharedBuffer(DATA_SIZE)
duplicated = handle.Duplicate()
data = _GetRandomBuffer(DATA_SIZE)
(res1, buf1) = handle.Map(0, DATA_SIZE)
(res2, buf2) = duplicated.Map(0, DATA_SIZE)
self.assertEquals(system.RESULT_OK, res1)
self.assertEquals(system.RESULT_OK, res2)
self.assertEquals(DATA_SIZE, len(buf1.buffer))
self.assertEquals(DATA_SIZE, len(buf2.buffer))
self.assertEquals(buf1.buffer, buf2.buffer)
buf1.buffer[:] = data
self.assertEquals(data, buf1.buffer)
self.assertEquals(data, buf2.buffer)
self.assertEquals(buf1.buffer, buf2.buffer)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(CoreTest)
test_results = unittest.TextTestRunner(verbosity=0).run(suite)
if not test_results.wasSuccessful():
sys.exit(1)
sys.exit(0)
| bsd-3-clause |
omapzoom/platform-external-chromium | net/tools/testserver/chromiumsync.py | 66 | 33589 | #!/usr/bin/python2.4
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An implementation of the server side of the Chromium sync protocol.
The details of the protocol are described mostly by comments in the protocol
buffer definition at chrome/browser/sync/protocol/sync.proto.
"""
import cgi
import copy
import operator
import random
import sys
import threading
import app_specifics_pb2
import autofill_specifics_pb2
import bookmark_specifics_pb2
import extension_specifics_pb2
import nigori_specifics_pb2
import password_specifics_pb2
import preference_specifics_pb2
import session_specifics_pb2
import sync_pb2
import theme_specifics_pb2
import typed_url_specifics_pb2
# An enumeration of the various kinds of data that can be synced.
# Over the wire, this enumeration is not used: a sync object's type is
# inferred by which EntitySpecifics extension it has. But in the context
# of a program, it is useful to have an enumeration.
ALL_TYPES = (
TOP_LEVEL, # The type of the 'Google Chrome' folder.
APPS,
AUTOFILL,
AUTOFILL_PROFILE,
BOOKMARK,
EXTENSIONS,
NIGORI,
PASSWORD,
PREFERENCE,
SESSION,
THEME,
TYPED_URL) = range(12)
# Well-known server tag of the top level "Google Chrome" folder.
TOP_LEVEL_FOLDER_TAG = 'google_chrome'
# Given a sync type from ALL_TYPES, find the extension token corresponding
# to that datatype. Note that TOP_LEVEL has no such token.
SYNC_TYPE_TO_EXTENSION = {
APPS: app_specifics_pb2.app,
AUTOFILL: autofill_specifics_pb2.autofill,
AUTOFILL_PROFILE: autofill_specifics_pb2.autofill_profile,
BOOKMARK: bookmark_specifics_pb2.bookmark,
EXTENSIONS: extension_specifics_pb2.extension,
NIGORI: nigori_specifics_pb2.nigori,
PASSWORD: password_specifics_pb2.password,
PREFERENCE: preference_specifics_pb2.preference,
SESSION: session_specifics_pb2.session,
THEME: theme_specifics_pb2.theme,
TYPED_URL: typed_url_specifics_pb2.typed_url,
}
# The parent ID used to indicate a top-level node.
ROOT_ID = '0'
class Error(Exception):
"""Error class for this module."""
class ProtobufExtensionNotUnique(Error):
"""An entry should not have more than one protobuf extension present."""
class DataTypeIdNotRecognized(Error):
"""The requested data type is not recognized."""
def GetEntryType(entry):
"""Extract the sync type from a SyncEntry.
Args:
entry: A SyncEntity protobuf object whose type to determine.
Returns:
A value from ALL_TYPES if the entry's type can be determined, or None
if the type cannot be determined.
Raises:
ProtobufExtensionNotUnique: More than one type was indicated by the entry.
"""
if entry.server_defined_unique_tag == TOP_LEVEL_FOLDER_TAG:
return TOP_LEVEL
entry_types = GetEntryTypesFromSpecifics(entry.specifics)
if not entry_types:
return None
# If there is more than one, either there's a bug, or else the caller
# should use GetEntryTypes.
if len(entry_types) > 1:
raise ProtobufExtensionNotUnique
return entry_types[0]
def GetEntryTypesFromSpecifics(specifics):
"""Determine the sync types indicated by an EntitySpecifics's extension(s).
If the specifics have more than one recognized extension (as commonly
happens with the requested_types field of GetUpdatesMessage), all types
will be returned. Callers must handle the possibility of the returned
value having more than one item.
Args:
specifics: A EntitySpecifics protobuf message whose extensions to
enumerate.
Returns:
A list of the sync types (values from ALL_TYPES) assocated with each
recognized extension of the specifics message.
"""
return [data_type for data_type, extension
in SYNC_TYPE_TO_EXTENSION.iteritems()
if specifics.HasExtension(extension)]
def SyncTypeToProtocolDataTypeId(data_type):
"""Convert from a sync type (python enum) to the protocol's data type id."""
return SYNC_TYPE_TO_EXTENSION[data_type].number
def ProtocolDataTypeIdToSyncType(protocol_data_type_id):
"""Convert from the protocol's data type id to a sync type (python enum)."""
for data_type, protocol_extension in SYNC_TYPE_TO_EXTENSION.iteritems():
if protocol_extension.number == protocol_data_type_id:
return data_type
raise DataTypeIdNotRecognized
def GetDefaultEntitySpecifics(data_type):
"""Get an EntitySpecifics having a sync type's default extension value."""
specifics = sync_pb2.EntitySpecifics()
if data_type in SYNC_TYPE_TO_EXTENSION:
extension_handle = SYNC_TYPE_TO_EXTENSION[data_type]
specifics.Extensions[extension_handle].SetInParent()
return specifics
def DeepCopyOfProto(proto):
"""Return a deep copy of a protocol buffer."""
new_proto = type(proto)()
new_proto.MergeFrom(proto)
return new_proto
class PermanentItem(object):
"""A specification of one server-created permanent item.
Attributes:
tag: A known-to-the-client value that uniquely identifies a server-created
permanent item.
name: The human-readable display name for this item.
parent_tag: The tag of the permanent item's parent. If ROOT_ID, indicates
a top-level item. Otherwise, this must be the tag value of some other
server-created permanent item.
sync_type: A value from ALL_TYPES, giving the datatype of this permanent
item. This controls which types of client GetUpdates requests will
cause the permanent item to be created and returned.
"""
def __init__(self, tag, name, parent_tag, sync_type):
self.tag = tag
self.name = name
self.parent_tag = parent_tag
self.sync_type = sync_type
class UpdateSieve(object):
"""A filter to remove items the client has already seen."""
def __init__(self, request):
self._original_request = request
self._state = {}
if request.from_progress_marker:
for marker in request.from_progress_marker:
if marker.HasField("timestamp_token_for_migration"):
timestamp = marker.timestamp_token_for_migration
elif marker.token:
timestamp = int(marker.token)
elif marker.HasField("token"):
timestamp = 0
else:
raise ValueError("No timestamp information in progress marker.")
data_type = ProtocolDataTypeIdToSyncType(marker.data_type_id)
self._state[data_type] = timestamp
elif request.HasField("from_timestamp"):
for data_type in GetEntryTypesFromSpecifics(request.requested_types):
self._state[data_type] = request.from_timestamp
if self._state:
self._state[TOP_LEVEL] = min(self._state.itervalues())
def ClientWantsItem(self, item):
"""Return true if the client hasn't already seen an item."""
return self._state.get(GetEntryType(item), sys.maxint) < item.version
def HasAnyTimestamp(self):
"""Return true if at least one datatype was requested."""
return bool(self._state)
def GetMinTimestamp(self):
"""Return true the smallest timestamp requested across all datatypes."""
return min(self._state.itervalues())
def GetFirstTimeTypes(self):
"""Return a list of datatypes requesting updates from timestamp zero."""
return [datatype for datatype, timestamp in self._state.iteritems()
if timestamp == 0]
def SaveProgress(self, new_timestamp, get_updates_response):
"""Write the new_timestamp or new_progress_marker fields to a response."""
if self._original_request.from_progress_marker:
for data_type, old_timestamp in self._state.iteritems():
if data_type == TOP_LEVEL:
continue
new_marker = sync_pb2.DataTypeProgressMarker()
new_marker.data_type_id = SyncTypeToProtocolDataTypeId(data_type)
new_marker.token = str(max(old_timestamp, new_timestamp))
if new_marker not in self._original_request.from_progress_marker:
get_updates_response.new_progress_marker.add().MergeFrom(new_marker)
elif self._original_request.HasField("from_timestamp"):
if self._original_request.from_timestamp < new_timestamp:
get_updates_response.new_timestamp = new_timestamp
class SyncDataModel(object):
"""Models the account state of one sync user."""
_BATCH_SIZE = 100
# Specify all the permanent items that a model might need.
_PERMANENT_ITEM_SPECS = [
PermanentItem('google_chrome', name='Google Chrome',
parent_tag=ROOT_ID, sync_type=TOP_LEVEL),
PermanentItem('google_chrome_bookmarks', name='Bookmarks',
parent_tag='google_chrome', sync_type=BOOKMARK),
PermanentItem('bookmark_bar', name='Bookmark Bar',
parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK),
PermanentItem('other_bookmarks', name='Other Bookmarks',
parent_tag='google_chrome_bookmarks', sync_type=BOOKMARK),
PermanentItem('google_chrome_preferences', name='Preferences',
parent_tag='google_chrome', sync_type=PREFERENCE),
PermanentItem('google_chrome_autofill', name='Autofill',
parent_tag='google_chrome', sync_type=AUTOFILL),
PermanentItem('google_chrome_autofill_profiles', name='Autofill Profiles',
parent_tag='google_chrome', sync_type=AUTOFILL_PROFILE),
PermanentItem('google_chrome_extensions', name='Extensions',
parent_tag='google_chrome', sync_type=EXTENSIONS),
PermanentItem('google_chrome_passwords', name='Passwords',
parent_tag='google_chrome', sync_type=PASSWORD),
PermanentItem('google_chrome_sessions', name='Sessions',
parent_tag='google_chrome', sync_type=SESSION),
PermanentItem('google_chrome_themes', name='Themes',
parent_tag='google_chrome', sync_type=THEME),
PermanentItem('google_chrome_typed_urls', name='Typed URLs',
parent_tag='google_chrome', sync_type=TYPED_URL),
PermanentItem('google_chrome_nigori', name='Nigori',
parent_tag='google_chrome', sync_type=NIGORI),
PermanentItem('google_chrome_apps', name='Apps',
parent_tag='google_chrome', sync_type=APPS),
]
def __init__(self):
# Monotonically increasing version number. The next object change will
# take on this value + 1.
self._version = 0
# The definitive copy of this client's items: a map from ID string to a
# SyncEntity protocol buffer.
self._entries = {}
# TODO(nick): uuid.uuid1() is better, but python 2.5 only.
self.store_birthday = '%0.30f' % random.random()
def _SaveEntry(self, entry):
"""Insert or update an entry in the change log, and give it a new version.
The ID fields of this entry are assumed to be valid server IDs. This
entry will be updated with a new version number and sync_timestamp.
Args:
entry: The entry to be added or updated.
"""
self._version += 1
# Maintain a global (rather than per-item) sequence number and use it
# both as the per-entry version as well as the update-progress timestamp.
# This simulates the behavior of the original server implementation.
entry.version = self._version
entry.sync_timestamp = self._version
# Preserve the originator info, which the client is not required to send
# when updating.
base_entry = self._entries.get(entry.id_string)
if base_entry:
entry.originator_cache_guid = base_entry.originator_cache_guid
entry.originator_client_item_id = base_entry.originator_client_item_id
self._entries[entry.id_string] = DeepCopyOfProto(entry)
def _ServerTagToId(self, tag):
"""Determine the server ID from a server-unique tag.
The resulting value is guaranteed not to collide with the other ID
generation methods.
Args:
tag: The unique, known-to-the-client tag of a server-generated item.
Returns:
The string value of the computed server ID.
"""
if tag and tag != ROOT_ID:
return '<server tag>%s' % tag
else:
return tag
def _ClientTagToId(self, tag):
"""Determine the server ID from a client-unique tag.
The resulting value is guaranteed not to collide with the other ID
generation methods.
Args:
tag: The unique, opaque-to-the-server tag of a client-tagged item.
Returns:
The string value of the computed server ID.
"""
return '<client tag>%s' % tag
def _ClientIdToId(self, client_guid, client_item_id):
"""Compute a unique server ID from a client-local ID tag.
The resulting value is guaranteed not to collide with the other ID
generation methods.
Args:
client_guid: A globally unique ID that identifies the client which
created this item.
client_item_id: An ID that uniquely identifies this item on the client
which created it.
Returns:
The string value of the computed server ID.
"""
# Using the client ID info is not required here (we could instead generate
# a random ID), but it's useful for debugging.
return '<server ID originally>%s/%s' % (client_guid, client_item_id)
def _WritePosition(self, entry, parent_id, prev_id=None):
"""Convert from a relative position into an absolute, numeric position.
Clients specify positions using the predecessor-based references; the
server stores and reports item positions using sparse integer values.
This method converts from the former to the latter.
Args:
entry: The entry for which to compute a position. Its ID field are
assumed to be server IDs. This entry will have its parent_id_string
and position_in_parent fields updated; its insert_after_item_id field
will be cleared.
parent_id: The ID of the entry intended as the new parent.
prev_id: The ID of the entry intended as the new predecessor. If this
is None, or an ID of an object which is not a child of the new parent,
the entry will be positioned at the end (right) of the ordering. If
the empty ID (''), this will be positioned at the front (left) of the
ordering. Otherwise, the entry will be given a position_in_parent
value placing it just after (to the right of) the new predecessor.
"""
preferred_gap = 2 ** 20
def ExtendRange(current_limit_entry, sign_multiplier):
"""Compute values at the beginning or end."""
if current_limit_entry.id_string == entry.id_string:
step = 0
else:
step = sign_multiplier * preferred_gap
return current_limit_entry.position_in_parent + step
siblings = [x for x in self._entries.values()
if x.parent_id_string == parent_id and not x.deleted]
siblings = sorted(siblings, key=operator.attrgetter('position_in_parent'))
if prev_id == entry.id_string:
prev_id = ''
if not siblings:
# First item in this container; start in the middle.
entry.position_in_parent = 0
elif not prev_id:
# A special value in the protocol. Insert at first position.
entry.position_in_parent = ExtendRange(siblings[0], -1)
else:
# Handle mid-insertion; consider items along with their successors.
for item, successor in zip(siblings, siblings[1:]):
if item.id_string != prev_id:
continue
elif successor.id_string == entry.id_string:
# We're already in place; don't change anything.
entry.position_in_parent = successor.position_in_parent
else:
# Interpolate new position between the previous item and its
# existing successor.
entry.position_in_parent = (item.position_in_parent * 7 +
successor.position_in_parent) / 8
break
else:
# Insert at end. Includes the case where prev_id is None.
entry.position_in_parent = ExtendRange(siblings[-1], +1)
entry.parent_id_string = parent_id
entry.ClearField('insert_after_item_id')
def _ItemExists(self, id_string):
"""Determine whether an item exists in the changelog."""
return id_string in self._entries
def _CreatePermanentItem(self, spec):
"""Create one permanent item from its spec, if it doesn't exist.
The resulting item is added to the changelog.
Args:
spec: A PermanentItem object holding the properties of the item to create.
"""
id_string = self._ServerTagToId(spec.tag)
if self._ItemExists(id_string):
return
print 'Creating permanent item: %s' % spec.name
entry = sync_pb2.SyncEntity()
entry.id_string = id_string
entry.non_unique_name = spec.name
entry.name = spec.name
entry.server_defined_unique_tag = spec.tag
entry.folder = True
entry.deleted = False
entry.specifics.CopyFrom(GetDefaultEntitySpecifics(spec.sync_type))
self._WritePosition(entry, self._ServerTagToId(spec.parent_tag))
self._SaveEntry(entry)
def _CreatePermanentItems(self, requested_types):
"""Ensure creation of all permanent items for a given set of sync types.
Args:
requested_types: A list of sync data types from ALL_TYPES.
Permanent items of only these types will be created.
"""
for spec in self._PERMANENT_ITEM_SPECS:
if spec.sync_type in requested_types:
self._CreatePermanentItem(spec)
def GetChanges(self, sieve):
"""Get entries which have changed, oldest first.
The returned entries are limited to being _BATCH_SIZE many. The entries
are returned in strict version order.
Args:
sieve: An update sieve to use to filter out updates the client
has already seen.
Returns:
A tuple of (version, entries, changes_remaining). Version is a new
timestamp value, which should be used as the starting point for the
next query. Entries is the batch of entries meeting the current
timestamp query. Changes_remaining indicates the number of changes
left on the server after this batch.
"""
if not sieve.HasAnyTimestamp():
return (0, [], 0)
min_timestamp = sieve.GetMinTimestamp()
self._CreatePermanentItems(sieve.GetFirstTimeTypes())
change_log = sorted(self._entries.values(),
key=operator.attrgetter('version'))
new_changes = [x for x in change_log if x.version > min_timestamp]
# Pick batch_size new changes, and then filter them. This matches
# the RPC behavior of the production sync server.
batch = new_changes[:self._BATCH_SIZE]
if not batch:
# Client is up to date.
return (min_timestamp, [], 0)
# Restrict batch to requested types. Tombstones are untyped
# and will always get included.
filtered = [DeepCopyOfProto(item) for item in batch
if item.deleted or sieve.ClientWantsItem(item)]
# The new client timestamp is the timestamp of the last item in the
# batch, even if that item was filtered out.
return (batch[-1].version, filtered, len(new_changes) - len(batch))
def _CopyOverImmutableFields(self, entry):
"""Preserve immutable fields by copying pre-commit state.
Args:
entry: A sync entity from the client.
"""
if entry.id_string in self._entries:
if self._entries[entry.id_string].HasField(
'server_defined_unique_tag'):
entry.server_defined_unique_tag = (
self._entries[entry.id_string].server_defined_unique_tag)
def _CheckVersionForCommit(self, entry):
"""Perform an optimistic concurrency check on the version number.
Clients are only allowed to commit if they report having seen the most
recent version of an object.
Args:
entry: A sync entity from the client. It is assumed that ID fields
have been converted to server IDs.
Returns:
A boolean value indicating whether the client's version matches the
newest server version for the given entry.
"""
if entry.id_string in self._entries:
# Allow edits/deletes if the version matches, and any undeletion.
return (self._entries[entry.id_string].version == entry.version or
self._entries[entry.id_string].deleted)
else:
# Allow unknown ID only if the client thinks it's new too.
return entry.version == 0
def _CheckParentIdForCommit(self, entry):
"""Check that the parent ID referenced in a SyncEntity actually exists.
Args:
entry: A sync entity from the client. It is assumed that ID fields
have been converted to server IDs.
Returns:
A boolean value indicating whether the entity's parent ID is an object
that actually exists (and is not deleted) in the current account state.
"""
if entry.parent_id_string == ROOT_ID:
# This is generally allowed.
return True
if entry.parent_id_string not in self._entries:
print 'Warning: Client sent unknown ID. Should never happen.'
return False
if entry.parent_id_string == entry.id_string:
print 'Warning: Client sent circular reference. Should never happen.'
return False
if self._entries[entry.parent_id_string].deleted:
# This can happen in a race condition between two clients.
return False
if not self._entries[entry.parent_id_string].folder:
print 'Warning: Client sent non-folder parent. Should never happen.'
return False
return True
def _RewriteIdsAsServerIds(self, entry, cache_guid, commit_session):
"""Convert ID fields in a client sync entry to server IDs.
A commit batch sent by a client may contain new items for which the
server has not generated IDs yet. And within a commit batch, later
items are allowed to refer to earlier items. This method will
generate server IDs for new items, as well as rewrite references
to items whose server IDs were generated earlier in the batch.
Args:
entry: The client sync entry to modify.
cache_guid: The globally unique ID of the client that sent this
commit request.
commit_session: A dictionary mapping the original IDs to the new server
IDs, for any items committed earlier in the batch.
"""
if entry.version == 0:
if entry.HasField('client_defined_unique_tag'):
# When present, this should determine the item's ID.
new_id = self._ClientTagToId(entry.client_defined_unique_tag)
else:
new_id = self._ClientIdToId(cache_guid, entry.id_string)
entry.originator_cache_guid = cache_guid
entry.originator_client_item_id = entry.id_string
commit_session[entry.id_string] = new_id # Remember the remapping.
entry.id_string = new_id
if entry.parent_id_string in commit_session:
entry.parent_id_string = commit_session[entry.parent_id_string]
if entry.insert_after_item_id in commit_session:
entry.insert_after_item_id = commit_session[entry.insert_after_item_id]
def CommitEntry(self, entry, cache_guid, commit_session):
"""Attempt to commit one entry to the user's account.
Args:
entry: A SyncEntity protobuf representing desired object changes.
cache_guid: A string value uniquely identifying the client; this
is used for ID generation and will determine the originator_cache_guid
if the entry is new.
commit_session: A dictionary mapping client IDs to server IDs for any
objects committed earlier this session. If the entry gets a new ID
during commit, the change will be recorded here.
Returns:
A SyncEntity reflecting the post-commit value of the entry, or None
if the entry was not committed due to an error.
"""
entry = DeepCopyOfProto(entry)
# Generate server IDs for this entry, and write generated server IDs
# from earlier entries into the message's fields, as appropriate. The
# ID generation state is stored in 'commit_session'.
self._RewriteIdsAsServerIds(entry, cache_guid, commit_session)
# Perform the optimistic concurrency check on the entry's version number.
# Clients are not allowed to commit unless they indicate that they've seen
# the most recent version of an object.
if not self._CheckVersionForCommit(entry):
return None
# Check the validity of the parent ID; it must exist at this point.
# TODO(nick): Implement cycle detection and resolution.
if not self._CheckParentIdForCommit(entry):
return None
self._CopyOverImmutableFields(entry);
# At this point, the commit is definitely going to happen.
# Deletion works by storing a limited record for an entry, called a
# tombstone. A sync server must track deleted IDs forever, since it does
# not keep track of client knowledge (there's no deletion ACK event).
if entry.deleted:
def MakeTombstone(id_string):
"""Make a tombstone entry that will replace the entry being deleted.
Args:
id_string: Index of the SyncEntity to be deleted.
Returns:
A new SyncEntity reflecting the fact that the entry is deleted.
"""
# Only the ID, version and deletion state are preserved on a tombstone.
# TODO(nick): Does the production server not preserve the type? Not
# doing so means that tombstones cannot be filtered based on
# requested_types at GetUpdates time.
tombstone = sync_pb2.SyncEntity()
tombstone.id_string = id_string
tombstone.deleted = True
tombstone.name = ''
return tombstone
def IsChild(child_id):
"""Check if a SyncEntity is a child of entry, or any of its children.
Args:
child_id: Index of the SyncEntity that is a possible child of entry.
Returns:
True if it is a child; false otherwise.
"""
if child_id not in self._entries:
return False
if self._entries[child_id].parent_id_string == entry.id_string:
return True
return IsChild(self._entries[child_id].parent_id_string)
# Identify any children entry might have.
child_ids = [child.id_string for child in self._entries.itervalues()
if IsChild(child.id_string)]
# Mark all children that were identified as deleted.
for child_id in child_ids:
self._SaveEntry(MakeTombstone(child_id))
# Delete entry itself.
entry = MakeTombstone(entry.id_string)
else:
# Comments in sync.proto detail how the representation of positional
# ordering works: the 'insert_after_item_id' field specifies a
# predecessor during Commit operations, but the 'position_in_parent'
# field provides an absolute ordering in GetUpdates contexts. Here
# we convert from the former to the latter. Specifically, we'll
# generate a numeric position placing the item just after the object
# identified by 'insert_after_item_id', and then clear the
# 'insert_after_item_id' field so that it's not sent back to the client
# during later GetUpdates requests.
if entry.HasField('insert_after_item_id'):
self._WritePosition(entry, entry.parent_id_string,
entry.insert_after_item_id)
else:
self._WritePosition(entry, entry.parent_id_string)
# Preserve the originator info, which the client is not required to send
# when updating.
base_entry = self._entries.get(entry.id_string)
if base_entry and not entry.HasField('originator_cache_guid'):
entry.originator_cache_guid = base_entry.originator_cache_guid
entry.originator_client_item_id = base_entry.originator_client_item_id
# Commit the change. This also updates the version number.
self._SaveEntry(entry)
return entry
class TestServer(object):
"""An object to handle requests for one (and only one) Chrome Sync account.
TestServer consumes the sync command messages that are the outermost
layers of the protocol, performs the corresponding actions on its
SyncDataModel, and constructs an appropropriate response message.
"""
def __init__(self):
# The implementation supports exactly one account; its state is here.
self.account = SyncDataModel()
self.account_lock = threading.Lock()
# Clients that have talked to us: a map from the full client ID
# to its nickname.
self.clients = {}
self.client_name_generator = ('+' * times + chr(c)
for times in xrange(0, sys.maxint) for c in xrange(ord('A'),ord('Z')))
def GetShortClientName(self, query):
parsed = cgi.parse_qs(query[query.find('?')+1:])
client_id = parsed.get('client_id')
if not client_id:
return '?'
client_id = client_id[0]
if client_id not in self.clients:
self.clients[client_id] = self.client_name_generator.next()
return self.clients[client_id]
def HandleCommand(self, query, raw_request):
"""Decode and handle a sync command from a raw input of bytes.
This is the main entry point for this class. It is safe to call this
method from multiple threads.
Args:
raw_request: An iterable byte sequence to be interpreted as a sync
protocol command.
Returns:
A tuple (response_code, raw_response); the first value is an HTTP
result code, while the second value is a string of bytes which is the
serialized reply to the command.
"""
self.account_lock.acquire()
try:
request = sync_pb2.ClientToServerMessage()
request.MergeFromString(raw_request)
contents = request.message_contents
response = sync_pb2.ClientToServerResponse()
response.error_code = sync_pb2.ClientToServerResponse.SUCCESS
response.store_birthday = self.account.store_birthday
log_context = "[Client %s -> %s.py]" % (self.GetShortClientName(query),
__name__)
if contents == sync_pb2.ClientToServerMessage.AUTHENTICATE:
print '%s Authenticate' % log_context
# We accept any authentication token, and support only one account.
# TODO(nick): Mock out the GAIA authentication as well; hook up here.
response.authenticate.user.email = 'syncjuser@chromium'
response.authenticate.user.display_name = 'Sync J User'
elif contents == sync_pb2.ClientToServerMessage.COMMIT:
print '%s Commit' % log_context
self.HandleCommit(request.commit, response.commit)
elif contents == sync_pb2.ClientToServerMessage.GET_UPDATES:
print ('%s GetUpdates from timestamp %d' %
(log_context, request.get_updates.from_timestamp))
self.HandleGetUpdates(request.get_updates, response.get_updates)
return (200, response.SerializeToString())
finally:
self.account_lock.release()
def HandleCommit(self, commit_message, commit_response):
"""Respond to a Commit request by updating the user's account state.
Commit attempts stop after the first error, returning a CONFLICT result
for any unattempted entries.
Args:
commit_message: A sync_pb.CommitMessage protobuf holding the content
of the client's request.
commit_response: A sync_pb.CommitResponse protobuf into which a reply
to the client request will be written.
"""
commit_response.SetInParent()
batch_failure = False
session = {} # Tracks ID renaming during the commit operation.
guid = commit_message.cache_guid
for entry in commit_message.entries:
server_entry = None
if not batch_failure:
# Try to commit the change to the account.
server_entry = self.account.CommitEntry(entry, guid, session)
# An entryresponse is returned in both success and failure cases.
reply = commit_response.entryresponse.add()
if not server_entry:
reply.response_type = sync_pb2.CommitResponse.CONFLICT
reply.error_message = 'Conflict.'
batch_failure = True # One failure halts the batch.
else:
reply.response_type = sync_pb2.CommitResponse.SUCCESS
# These are the properties that the server is allowed to override
# during commit; the client wants to know their values at the end
# of the operation.
reply.id_string = server_entry.id_string
if not server_entry.deleted:
# Note: the production server doesn't actually send the
# parent_id_string on commit responses, so we don't either.
reply.position_in_parent = server_entry.position_in_parent
reply.version = server_entry.version
reply.name = server_entry.name
reply.non_unique_name = server_entry.non_unique_name
else:
reply.version = entry.version + 1
def HandleGetUpdates(self, update_request, update_response):
"""Respond to a GetUpdates request by querying the user's account.
Args:
update_request: A sync_pb.GetUpdatesMessage protobuf holding the content
of the client's request.
update_response: A sync_pb.GetUpdatesResponse protobuf into which a reply
to the client request will be written.
"""
update_response.SetInParent()
update_sieve = UpdateSieve(update_request)
new_timestamp, entries, remaining = self.account.GetChanges(update_sieve)
update_response.changes_remaining = remaining
for entry in entries:
reply = update_response.entries.add()
reply.CopyFrom(entry)
update_sieve.SaveProgress(new_timestamp, update_response)
| bsd-3-clause |
semonte/intellij-community | python/lib/Lib/site-packages/django/utils/translation/trans_real.py | 71 | 21793 | """Translation helper functions."""
import locale
import os
import re
import sys
import warnings
import gettext as gettext_module
from cStringIO import StringIO
from django.utils.importlib import import_module
from django.utils.safestring import mark_safe, SafeData
from django.utils.thread_support import currentThread
# Translations are cached in a dictionary for every language+app tuple.
# The active translations are stored by threadid to make them thread local.
_translations = {}
_active = {}
# The default translation is based on the settings file.
_default = None
# This is a cache for normalized accept-header languages to prevent multiple
# file lookups when checking the same locale on repeated requests.
_accepted = {}
# magic gettext number to separate context from message
CONTEXT_SEPARATOR = u"\x04"
# Format of Accept-Language header values. From RFC 2616, section 14.4 and 3.9.
accept_language_re = re.compile(r'''
([A-Za-z]{1,8}(?:-[A-Za-z]{1,8})*|\*) # "en", "en-au", "x-y-z", "*"
(?:;q=(0(?:\.\d{,3})?|1(?:.0{,3})?))? # Optional "q=1.00", "q=0.8"
(?:\s*,\s*|$) # Multiple accepts per header.
''', re.VERBOSE)
def to_locale(language, to_lower=False):
"""
Turns a language name (en-us) into a locale name (en_US). If 'to_lower' is
True, the last component is lower-cased (en_us).
"""
p = language.find('-')
if p >= 0:
if to_lower:
return language[:p].lower()+'_'+language[p+1:].lower()
else:
# Get correct locale for sr-latn
if len(language[p+1:]) > 2:
return language[:p].lower()+'_'+language[p+1].upper()+language[p+2:].lower()
return language[:p].lower()+'_'+language[p+1:].upper()
else:
return language.lower()
def to_language(locale):
"""Turns a locale name (en_US) into a language name (en-us)."""
p = locale.find('_')
if p >= 0:
return locale[:p].lower()+'-'+locale[p+1:].lower()
else:
return locale.lower()
class DjangoTranslation(gettext_module.GNUTranslations):
"""
This class sets up the GNUTranslations context with regard to output
charset. Django uses a defined DEFAULT_CHARSET as the output charset on
Python 2.4.
"""
def __init__(self, *args, **kw):
from django.conf import settings
gettext_module.GNUTranslations.__init__(self, *args, **kw)
# Starting with Python 2.4, there's a function to define
# the output charset. Before 2.4, the output charset is
# identical with the translation file charset.
try:
self.set_output_charset('utf-8')
except AttributeError:
pass
self.django_output_charset = 'utf-8'
self.__language = '??'
def merge(self, other):
self._catalog.update(other._catalog)
def set_language(self, language):
self.__language = language
self.__to_language = to_language(language)
def language(self):
return self.__language
def to_language(self):
return self.__to_language
def __repr__(self):
return "<DjangoTranslation lang:%s>" % self.__language
def translation(language):
"""
Returns a translation object.
This translation object will be constructed out of multiple GNUTranslations
objects by merging their catalogs. It will construct a object for the
requested language and add a fallback to the default language, if it's
different from the requested language.
"""
global _translations
t = _translations.get(language, None)
if t is not None:
return t
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if settings.SETTINGS_MODULE is not None:
parts = settings.SETTINGS_MODULE.split('.')
project = import_module(parts[0])
projectpath = os.path.join(os.path.dirname(project.__file__), 'locale')
else:
projectpath = None
def _fetch(lang, fallback=None):
global _translations
loc = to_locale(lang)
res = _translations.get(lang, None)
if res is not None:
return res
def _translation(path):
try:
t = gettext_module.translation('django', path, [loc], DjangoTranslation)
t.set_language(lang)
return t
except IOError, e:
return None
res = _translation(globalpath)
# We want to ensure that, for example, "en-gb" and "en-us" don't share
# the same translation object (thus, merging en-us with a local update
# doesn't affect en-gb), even though they will both use the core "en"
# translation. So we have to subvert Python's internal gettext caching.
base_lang = lambda x: x.split('-', 1)[0]
if base_lang(lang) in [base_lang(trans) for trans in _translations]:
res._info = res._info.copy()
res._catalog = res._catalog.copy()
def _merge(path):
t = _translation(path)
if t is not None:
if res is None:
return t
else:
res.merge(t)
return res
for localepath in settings.LOCALE_PATHS:
if os.path.isdir(localepath):
res = _merge(localepath)
for appname in settings.INSTALLED_APPS:
app = import_module(appname)
apppath = os.path.join(os.path.dirname(app.__file__), 'locale')
if os.path.isdir(apppath):
res = _merge(apppath)
if projectpath and os.path.isdir(projectpath):
res = _merge(projectpath)
if res is None:
if fallback is not None:
res = fallback
else:
return gettext_module.NullTranslations()
_translations[lang] = res
return res
default_translation = _fetch(settings.LANGUAGE_CODE)
current_translation = _fetch(language, fallback=default_translation)
return current_translation
def activate(language):
"""
Fetches the translation object for a given tuple of application name and
language and installs it as the current translation object for the current
thread.
"""
if isinstance(language, basestring) and language == 'no':
warnings.warn(
"The use of the language code 'no' is deprecated. "
"Please use the 'nb' translation instead.",
DeprecationWarning
)
_active[currentThread()] = translation(language)
def deactivate():
"""
Deinstalls the currently active translation object so that further _ calls
will resolve against the default translation object, again.
"""
global _active
if currentThread() in _active:
del _active[currentThread()]
def deactivate_all():
"""
Makes the active translation object a NullTranslations() instance. This is
useful when we want delayed translations to appear as the original string
for some reason.
"""
_active[currentThread()] = gettext_module.NullTranslations()
def get_language():
"""Returns the currently selected language."""
t = _active.get(currentThread(), None)
if t is not None:
try:
return t.to_language()
except AttributeError:
pass
# If we don't have a real translation object, assume it's the default language.
from django.conf import settings
return settings.LANGUAGE_CODE
def get_language_bidi():
"""
Returns selected language's BiDi layout.
* False = left-to-right layout
* True = right-to-left layout
"""
from django.conf import settings
base_lang = get_language().split('-')[0]
return base_lang in settings.LANGUAGES_BIDI
def catalog():
"""
Returns the current active catalog for further processing.
This can be used if you need to modify the catalog or want to access the
whole message catalog instead of just translating one string.
"""
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return t
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return _default
def do_translate(message, translation_function):
"""
Translates 'message' using the given 'translation_function' name -- which
will be either gettext or ugettext. It uses the current thread to find the
translation object to use. If no current translation is activated, the
message will be run through the default translation object.
"""
eol_message = message.replace('\r\n', '\n').replace('\r', '\n')
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
result = getattr(t, translation_function)(eol_message)
else:
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
result = getattr(_default, translation_function)(eol_message)
if isinstance(message, SafeData):
return mark_safe(result)
return result
def gettext(message):
return do_translate(message, 'gettext')
def ugettext(message):
return do_translate(message, 'ugettext')
def pgettext(context, message):
result = do_translate(
u"%s%s%s" % (context, CONTEXT_SEPARATOR, message), 'ugettext')
if CONTEXT_SEPARATOR in result:
# Translation not found
result = message
return result
def gettext_noop(message):
"""
Marks strings for translation but doesn't translate them now. This can be
used to store strings in global variables that should stay in the base
language (because they might be used externally) and will be translated
later.
"""
return message
def do_ntranslate(singular, plural, number, translation_function):
global _default, _active
t = _active.get(currentThread(), None)
if t is not None:
return getattr(t, translation_function)(singular, plural, number)
if _default is None:
from django.conf import settings
_default = translation(settings.LANGUAGE_CODE)
return getattr(_default, translation_function)(singular, plural, number)
def ngettext(singular, plural, number):
"""
Returns a UTF-8 bytestring of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ngettext')
def ungettext(singular, plural, number):
"""
Returns a unicode strings of the translation of either the singular or
plural, based on the number.
"""
return do_ntranslate(singular, plural, number, 'ungettext')
def npgettext(context, singular, plural, number):
result = do_ntranslate(u"%s%s%s" % (context, CONTEXT_SEPARATOR, singular),
u"%s%s%s" % (context, CONTEXT_SEPARATOR, plural),
number, 'ungettext')
if CONTEXT_SEPARATOR in result:
# Translation not found
result = do_ntranslate(singular, plural, number, 'ungettext')
return result
def check_for_language(lang_code):
"""
Checks whether there is a global language file for the given language
code. This is used to decide whether a user-provided language is
available. This is only used for language codes from either the cookies or
session.
"""
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
if gettext_module.find('django', globalpath, [to_locale(lang_code)]) is not None:
return True
else:
return False
def get_language_from_request(request):
"""
Analyzes the request to find what language the user wants the system to
show. Only languages listed in settings.LANGUAGES are taken into account.
If the user requests a sublanguage where we have a main language, we send
out the main language.
"""
global _accepted
from django.conf import settings
globalpath = os.path.join(os.path.dirname(sys.modules[settings.__module__].__file__), 'locale')
supported = dict(settings.LANGUAGES)
if hasattr(request, 'session'):
lang_code = request.session.get('django_language', None)
if lang_code in supported and lang_code is not None and check_for_language(lang_code):
return lang_code
lang_code = request.COOKIES.get(settings.LANGUAGE_COOKIE_NAME)
if lang_code and lang_code not in supported:
lang_code = lang_code.split('-')[0] # e.g. if fr-ca is not supported fallback to fr
if lang_code and lang_code in supported and check_for_language(lang_code):
return lang_code
accept = request.META.get('HTTP_ACCEPT_LANGUAGE', '')
for accept_lang, unused in parse_accept_lang_header(accept):
if accept_lang == '*':
break
# We have a very restricted form for our language files (no encoding
# specifier, since they all must be UTF-8 and only one possible
# language each time. So we avoid the overhead of gettext.find() and
# work out the MO file manually.
# 'normalized' is the root name of the locale in POSIX format (which is
# the format used for the directories holding the MO files).
normalized = locale.locale_alias.get(to_locale(accept_lang, True))
if not normalized:
continue
# Remove the default encoding from locale_alias.
normalized = normalized.split('.')[0]
if normalized in _accepted:
# We've seen this locale before and have an MO file for it, so no
# need to check again.
return _accepted[normalized]
for lang, dirname in ((accept_lang, normalized),
(accept_lang.split('-')[0], normalized.split('_')[0])):
if lang.lower() not in supported:
continue
langfile = os.path.join(globalpath, dirname, 'LC_MESSAGES',
'django.mo')
if os.path.exists(langfile):
_accepted[normalized] = lang
return lang
return settings.LANGUAGE_CODE
dot_re = re.compile(r'\S')
def blankout(src, char):
"""
Changes every non-whitespace character to the given char.
Used in the templatize function.
"""
return dot_re.sub(char, src)
inline_re = re.compile(r"""^\s*trans\s+((?:".*?")|(?:'.*?'))\s*""")
block_re = re.compile(r"""^\s*blocktrans(?:\s+|$)""")
endblock_re = re.compile(r"""^\s*endblocktrans$""")
plural_re = re.compile(r"""^\s*plural$""")
constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""")
def templatize(src, origin=None):
"""
Turns a Django template into something that is understood by xgettext. It
does so by translating the Django translation tags into standard gettext
function invocations.
"""
from django.template import Lexer, TOKEN_TEXT, TOKEN_VAR, TOKEN_BLOCK, TOKEN_COMMENT
out = StringIO()
intrans = False
inplural = False
singular = []
plural = []
incomment = False
comment = []
for t in Lexer(src, origin).tokenize():
if incomment:
if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment':
out.write(' # %s' % ''.join(comment))
incomment = False
comment = []
else:
comment.append(t.contents)
elif intrans:
if t.token_type == TOKEN_BLOCK:
endbmatch = endblock_re.match(t.contents)
pluralmatch = plural_re.match(t.contents)
if endbmatch:
if inplural:
out.write(' ngettext(%r,%r,count) ' % (''.join(singular), ''.join(plural)))
for part in singular:
out.write(blankout(part, 'S'))
for part in plural:
out.write(blankout(part, 'P'))
else:
out.write(' gettext(%r) ' % ''.join(singular))
for part in singular:
out.write(blankout(part, 'S'))
intrans = False
inplural = False
singular = []
plural = []
elif pluralmatch:
inplural = True
else:
filemsg = ''
if origin:
filemsg = 'file %s, ' % origin
raise SyntaxError("Translation blocks must not include other block tags: %s (%sline %d)" % (t.contents, filemsg, t.lineno))
elif t.token_type == TOKEN_VAR:
if inplural:
plural.append('%%(%s)s' % t.contents)
else:
singular.append('%%(%s)s' % t.contents)
elif t.token_type == TOKEN_TEXT:
contents = t.contents.replace('%', '%%')
if inplural:
plural.append(contents)
else:
singular.append(contents)
else:
if t.token_type == TOKEN_BLOCK:
imatch = inline_re.match(t.contents)
bmatch = block_re.match(t.contents)
cmatches = constant_re.findall(t.contents)
if imatch:
g = imatch.group(1)
if g[0] == '"': g = g.strip('"')
elif g[0] == "'": g = g.strip("'")
out.write(' gettext(%r) ' % g)
elif bmatch:
for fmatch in constant_re.findall(t.contents):
out.write(' _(%s) ' % fmatch)
intrans = True
inplural = False
singular = []
plural = []
elif cmatches:
for cmatch in cmatches:
out.write(' _(%s) ' % cmatch)
elif t.contents == 'comment':
incomment = True
else:
out.write(blankout(t.contents, 'B'))
elif t.token_type == TOKEN_VAR:
parts = t.contents.split('|')
cmatch = constant_re.match(parts[0])
if cmatch:
out.write(' _(%s) ' % cmatch.group(1))
for p in parts[1:]:
if p.find(':_(') >= 0:
out.write(' %s ' % p.split(':',1)[1])
else:
out.write(blankout(p, 'F'))
elif t.token_type == TOKEN_COMMENT:
out.write(' # %s' % t.contents)
else:
out.write(blankout(t.contents, 'X'))
return out.getvalue()
def parse_accept_lang_header(lang_string):
"""
Parses the lang_string, which is the body of an HTTP Accept-Language
header, and returns a list of (lang, q-value), ordered by 'q' values.
Any format errors in lang_string results in an empty list being returned.
"""
result = []
pieces = accept_language_re.split(lang_string)
if pieces[-1]:
return []
for i in range(0, len(pieces) - 1, 3):
first, lang, priority = pieces[i : i + 3]
if first:
return []
priority = priority and float(priority) or 1.0
result.append((lang, priority))
result.sort(key=lambda k: k[1], reverse=True)
return result
# get_date_formats and get_partial_date_formats aren't used anymore by Django
# and are kept for backward compatibility.
# Note, it's also important to keep format names marked for translation.
# For compatibility we still want to have formats on translation catalogs.
# That makes template code like {{ my_date|date:_('DATE_FORMAT') }} still work
def get_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store date and time formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
warnings.warn(
"'django.utils.translation.get_date_formats' is deprecated. "
"Please update your code to use the new i18n aware formatting.",
DeprecationWarning
)
from django.conf import settings
date_format = ugettext('DATE_FORMAT')
datetime_format = ugettext('DATETIME_FORMAT')
time_format = ugettext('TIME_FORMAT')
if date_format == 'DATE_FORMAT':
date_format = settings.DATE_FORMAT
if datetime_format == 'DATETIME_FORMAT':
datetime_format = settings.DATETIME_FORMAT
if time_format == 'TIME_FORMAT':
time_format = settings.TIME_FORMAT
return date_format, datetime_format, time_format
def get_partial_date_formats():
"""
Checks whether translation files provide a translation for some technical
message ID to store partial date formats. If it doesn't contain one, the
formats provided in the settings will be used.
"""
warnings.warn(
"'django.utils.translation.get_partial_date_formats' is deprecated. "
"Please update your code to use the new i18n aware formatting.",
DeprecationWarning
)
from django.conf import settings
year_month_format = ugettext('YEAR_MONTH_FORMAT')
month_day_format = ugettext('MONTH_DAY_FORMAT')
if year_month_format == 'YEAR_MONTH_FORMAT':
year_month_format = settings.YEAR_MONTH_FORMAT
if month_day_format == 'MONTH_DAY_FORMAT':
month_day_format = settings.MONTH_DAY_FORMAT
return year_month_format, month_day_format
| apache-2.0 |
southpawtech/TACTIC-DEV | src/pyasm/search/database_impl.py | 1 | 100473 | ###########################################################
#
# Copyright (c) 2005, Southpaw Technology
# All Rights Reserved
#
# PROPRIETARY INFORMATION. This software is proprietary to
# Southpaw Technology, and is not to be reproduced, transmitted,
# or disclosed in any way without written permission.
#
#
#
__all__ = ['DatabaseImpl', 'PostgresImpl', 'OracleImpl', 'SqliteImpl', 'MySQLImpl', 'SQLServerImpl', 'TacticImpl']
import os, sys, types, re
import subprocess
import datetime
from pyasm.common import Environment, SetupException, Config, Container, TacticException
class DatabaseImplException(TacticException):
pass
class DatabaseImplInterface(object):
def get_columns(cls, db_resource, table):
pass
def get_column_info(cls, db_resource, table, use_cache=True):
pass
def is_column_sortable(my, db_resource, table, column):
pass
def get_id_col(db_resource, search_type):
pass
def get_code_col(db_resource, search_type):
pass
def get_page(my, limit=None, offset=0):
pass
def get_table_info(my, db_resource):
pass
def table_exists(my, db_resource, table):
pass
def execute_query(my, sql, select):
pass
def execute_update(my, sql, update):
pass
class DatabaseImpl(DatabaseImplInterface):
'''Provides an abstraction layer for the various databases'''
def get_database_type(my):
return None
def get_version(my):
return (0,0,0)
def get(vendor=None):
'''Get the current database implementation'''
from sql import Sql
if not vendor:
vendor = Sql.get_default_database_type()
return DatabaseImpl.get(vendor)
if vendor == "PostgreSQL":
return PostgresImpl()
elif vendor == "Sqlite":
return SqliteImpl()
elif vendor == "MySQL":
return MySQLImpl()
elif vendor == "SQLServer":
return SQLServerImpl()
elif vendor == "Oracle":
return OracleImpl()
# TEST
elif vendor == "MongoDb":
from mongodb import MongoDbImpl
return MongoDbImpl()
elif vendor == "TACTIC":
return TacticImpl()
raise DatabaseImplException("Vendor [%s] not supported" % vendor)
get = staticmethod(get)
def preprocess_sql(my, data, unquoted_cols):
pass
def postprocess_sql(my, statement):
return statement
def process_value(my, name, value, column_type="varchar"):
'''process a database value based on column type.
@params:
value: current input value
column_type: the database column type
@return
dict:
value: the new value
quoted: True|False - determines whether the value is quoted or not
'''
return None
def get_id_col(my, db_resource, search_type):
from pyasm.search import SearchType
search_type_obj = SearchType.get(search_type)
id_col = search_type_obj.get_search_type_id_col()
return id_col
def get_code_col(my, db_resource, search_type):
from pyasm.search import SearchType
search_type = SearchType.get(search_type)
code_col = search_type.get_search_type_code_col()
return code_col
#
# Column type functions
#
def get_text(my, not_null=False):
parts = []
parts.append("text")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
#
# Schema functions
#
def get_schema_dir(my):
# get the schema directory for the appropriate database type
install_dir = Environment.get_install_dir()
schema_dir = "%s/src/pyasm/search/upgrade/%s" % (install_dir, my.get_database_type().lower() )
if not os.path.exists(schema_dir):
raise DatabaseImplException("Schema '%s' does not exist" % schema_dir)
return schema_dir
def import_sql_file(my, db_resource, path):
from pyasm.search import DbResource, DbContainer
if isinstance(db_resource, basestring):
database = db_resource
else:
database = db_resource.get_database()
sql = DbContainer.get(db_resource)
f = open(path, 'r')
data = f.read()
f.close()
print "Importing sql to database [%s]..." % (database)
print " using path [%s]" % (path)
cmds = data.split(";")
for cmd in cmds:
cmd = cmd.strip()
sql.do_update(cmd)
my.clear_table_cache()
sql = DbContainer.get(db_resource)
def import_schema(my, db_resource, type):
'''import the schema of certain type to the given database'''
# import the necessary schema
types = ['config', type]
# sthpw schema is composed of 2 files
if db_resource == 'sthpw':
types.insert(0, 'bootstrap')
for schema_type in types:
schema_dir = my.get_schema_dir()
schema_path = "%s/%s_schema.sql" % (schema_dir, schema_type)
if not os.path.exists(schema_path):
# This warning occurs too often in harmless places
#Environment.add_warning("Schema does not exist", "Schema '%s' does not exist" % schema_path)
continue
my.import_sql_file(db_resource, schema_path)
def import_default_data(my, db_resource, type):
'''import the data of certain type to the given database'''
# import the necessary schema
schema_dir = my.get_schema_dir()
data_path = "%s/%s_data.sql" % (schema_dir, type)
data_path = os.path.normpath(data_path)
if not os.path.exists(data_path):
#Environment.add_warning("Default data does not exist", "Data '%s' does not exist" % data_path)
return
my.import_sql_file(db_resource, data_path)
#
# Database methods for base database implementation
#
def has_sequences(my):
raise DatabaseImplException("TACTIC database implementation for current database vendor does not have method has_sequences() defined.")
def get_table_info(my, database):
raise DatabaseImplException("Must override 'get_table_info' for [%s]" % my.vendor)
def database_exists(my, database, host=None, port=None):
'''@param:
database - if string, it's just a database name (old)
if DbResource, it could contain the host already
host - can be localhost or a different server
port - port number that the database is listen on'''
try:
db_resource = database
#from pyasm.search import DbContainer, DbResource
from sql import DbContainer, DbResource
if isinstance(database, basestring):
if host == None:
vendor = Config.get_value("database", "vendor")
host = Config.get_value("database", "server")
port = Config.get_value("database", "port")
else:
vendor = my.get_database_type()
db_resource = DbResource(database=database, host=host, vendor=vendor, port=port)
cached = Container.get("Sql:database_exists:%s"%db_resource.get_key())
if cached != None:
return cached
sql = DbContainer.get(db_resource, connect=True)
if sql:
if not sql.get_connection():
sql.connect()
else:
return False
# cache it for repeated use
Container.put("Sql:database_exists:%s"%db_resource.get_key(), True)
except Exception, e:
#print "Error: ", str(e)
return False
else:
return True
def clear_table_cache(cls):
# this relies on the __str__ method of db_resource
key = "DatabaseImpl:table_exists"
Container.remove(key)
key = "DatabaseImpl:table_info"
Container.remove(key)
key = "DatabaseImpl:column_info"
Container.remove(key)
clear_table_cache = classmethod(clear_table_cache)
def table_exists(my, db_resource, table):
key = "DatabaseImpl:table_exists"
cached_dict = Container.get(key)
if cached_dict == None:
cached_dict = {}
Container.put(key, cached_dict)
# this relies on the __str__ method of db_resource
key2 = "%s:%s" % (db_resource, table)
cached = cached_dict.get(key2)
if cached != None:
return cached
table_info = my.get_table_info(db_resource)
if table_info.has_key(table):
exists = True
else:
exists = False
cached_dict[key2] = exists
return exists
def get_column_types(my, db_resource, table):
return {}
#
# Save point methods
#
def has_savepoint(my):
return True
def set_savepoint(my, name='save_pt'):
'''set a savepoint'''
if not my.has_savepoint():
return None
return "SAVEPOINT %s" %name
def rollback_savepoint(my, name='save_pt', release=False):
if not my.has_savepoint():
return None
stmt = "ROLLBACK TO SAVEPOINT %s"%name
return stmt
def release_savepoint(my, name='save_pt'):
if not my.has_savepoint():
return None
stmt = "RELEASE SAVEPOINT %s"%name
return stmt
def get_constraints(my, db_resource, table):
return []
def handle_pagination(my, statement, limit, offset):
return statement
def get_id_override_statement(my, table, override=False):
return ''
def get_constraint(my, mode, name='', columns=[], table=None):
if not name and table:
if mode == 'PRIMARY KEY':
name = '%s_pkey' %table
return 'CONSTRAINT %s %s (%s)' %(name, mode, ','.join(columns))
def get_regex_filter(my, column, regex, op='EQI'):
return None
def get_text_search_filter(cls, column, keywords, column_type, table=None):
'''default impl works with Postgres'''
if isinstance(keywords, basestring):
def split_keywords(keywords):
keywords = keywords.strip()
keywords = keywords.replace(" ", "")
parts = keywords.split(" ")
value = ' | '.join(parts)
return value
if keywords.find("|") != -1 or keywords.find("&") != -1:
# prevent syntax error from multiple | or &
keywords = re.sub( r'\|+', r'|', keywords)
keywords = re.sub( r'\&+', r'&', keywords)
keywords = keywords.rstrip('&')
value = keywords
if keywords.find("|") == -1 and keywords.find("&") == -1:
value = split_keywords(keywords)
else:
value = split_keywords(keywords)
elif type(keywords) == types.ListType:
# remove empty strings from the list
keywords = filter(None, keywords)
value = ' & '.join(keywords)
else:
value = str(keywords)
# explicitly set the config in case there is an index available
# TODO: this should be configurable
config = 'english'
# for multiple columns
#coalesce(title,'') || ' ' || coalesce(body,'')
# avoid syntax error
value = value.replace("'", "''")
if table:
column = '"%s"."%s"' % (table, column)
else:
column = '"%s"' % column
if column_type in ['integer','serial']:
column = "CAST(%s AS varchar(10))" %column
else:
# prefix matching
value = '%s:*'%value
wheres = []
if column_type == 'tsvector':
wheres.append(column)
else:
wheres.append('''to_tsvector('%s', %s)''' % (config, column) )
wheres.append("@@")
wheres.append("to_tsquery('%s', '%s')" % (config, value) )
where = " ".join(wheres)
return where
get_text_search_filter = classmethod(get_text_search_filter)
def get_columns(cls, db_resource, table):
'''get ordered column names'''
# do a dummy select to get the ordered columns
from sql import Select, DbContainer
sql = DbContainer.get(db_resource)
select = Select()
select.set_database(db_resource)
select.add_table(table)
select.set_limit(0)
statement = select.get_statement()
sql.do_query(statement)
columns = []
for description in sql.description:
# convert to unicode
value = unicode(description[0], 'utf-8')
columns.append(value)
return columns
get_columns = classmethod(get_columns)
def can_join(db_resource1, db_resource2):
# if the hosts are difference, joins cannot happen
host1 = db_resource1.get_host()
host2 = db_resource2.get_host()
if host1 != host2:
return False
# if the database types are differenct, joins cannot happen
database_type1 = db_resource1.get_database_type()
database_type2 = db_resource2.get_database_type()
if database_type1 != database_type2:
return False
# if the host is the same and the database is the same, then joins
# can happen
database1 = db_resource1.get_database()
database2 = db_resource2.get_database()
if database1 == database2:
return True
# multi database joins are not support in Postgres or Sqlite
if database_type1 in ["PostgreSQL", "Sqlite"]:
return False
if database_type2 in ["PostgreSQL", "Sqlite"]:
return False
# otherwise joins can happen between the two resources
return True
can_join = staticmethod(can_join)
def can_search_types_join(search_type1, search_type2):
from search import SearchType
db_resource = SearchType.get_db_resource_by_search_type(search_type1)
db_resource2 = SearchType.get_db_resource_by_search_type(search_type2)
can_join = DatabaseImpl.can_join(db_resource, db_resource2)
return can_join
can_search_types_join = staticmethod(can_search_types_join)
# Defines temporary column name to be used. Only SQLServerImpl implements
# this
def get_temp_column_name(my):
return ""
class BaseSQLDatabaseImpl(DatabaseImpl):
def is_column_sortable(my, db_resource, table, column):
from sql import DbContainer
sql = DbContainer.get(db_resource)
columns = sql.get_columns(table)
if column in columns:
return True
else:
return False
class SQLServerImpl(BaseSQLDatabaseImpl):
'''Implementation for Microsoft SQL Server's SQL'''
def get_database_type(my):
return "SQLServer"
def __init__(my):
# NOTE: This will not work in mixed db cases because it assumes a
# global single database
my.server = Config.get_value("database", "server")
my.port = Config.get_value("database", "port")
my.user = Config.get_value("database", "user")
my.password = Config.get_value("database", "password")
def get_version(my):
from sql import DbContainer
sql = DbContainer.get("sthpw")
result = sql.do_query("select @@version")
# result is [(u'Microsoft SQL Server 2008 R2 (RTM) - 10.50.1600.1 (X64) \n\tApr 2 2010 15:48:46 \n\tCopyright (c) Microsoft Corporation\n\tExpress Edition (64-bit) on Windows NT 6.0 <X64> (Build 6002: Service Pack 2) (Hypervisor)\n', )]
version_str = result[0][0]
parts = version_str.split(" ")
version_parts = parts[7].split(".")
version_parts = [int(x) for x in version_parts]
return version_parts
def get_create_sequence(my, sequence_name):
#return 'CREATE SEQUENCE "%s" START WITH 1 INCREMENT BY 1 NO MAXVALUE CACHE 1' % name
# SQL Server specific implementation.
#postfix_len = '_id_seq'.__len__()
#table_name_len = sequence_name.__len__() - postfix_len
#table_name = sequence_name[0:table_name_len]
#print ' get_create_sequence: table_name = ', table_name
#return 'ALTER TABLE %s ADD %s INT IDENTITY(100, 5) ' % (table_name, sequence_name)
#return 'ALTER COLUMN %s ADD %s INT IDENTITY(100, 5) ' % (table_name, sequence_name)
return
def get_sequence_name(my, table, database=None):
# SQL Server specific implementation: use the ID column as the sequence.
# OLD return "%s_id_seq" % table
from pyasm.search import SearchType
if isinstance(table, SearchType):
search_type = table
table = search_type.get_table()
return table
def get_page(my, limit=None, offset=0, table_input=0, already_in_where_clause=0):
'''get the pagination sql based on limit and offset'''
#
# SQL Server implementation
#
return None
def handle_pagination(my, statement, limit, offset):
'''specific method to handle MS SQL Server's pagination'''
if limit == None:
return statement
# SQL Server implementation
#
# Example:
# SELECT * from (SELECT TOP 100 *,
# ROW_NUMBER() over (ORDER BY id) as _tmp_spt_rownum FROM
# [tblCatalogCrossReference] WHERE code='abc'
# ) tmp_spt_table
# WHERE tmp_spt_table._tmp_spt_rownum between (5) and (10)
start = offset + 1
end = start + int(limit) - 1
# one can set the limit to be 0 to return 0 row back
if end < start and limit > 0:
raise DatabaseImplException('start [%s] must be larger than end [%s] for limit' %(start, end))
#order_by = ''
#if order_bys:
# order_by = "ORDER BY %s" % ", ".join( order_bys )
tmp_col = my.get_temp_column_name()
page = "tmp_spt_table.%s BETWEEN (%s) AND (%s)" % (tmp_col, start, end)
statement = "SELECT * FROM ( \
%s ) tmp_spt_table \
WHERE %s" % (statement, page)
return statement
def get_id_override_statement(my, table, override=True):
'''SQL Server needs to manually turn on an off the auto id generation feature'''
if override:
return "SET IDENTITY_INSERT %s ON" % table
else:
return "SET IDENTITY_INSERT %s OFF" % table
#
# Column methods
#
def get_boolean(my, not_null=False):
parts = []
parts.append("bit")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_serial(my, not_null=False):
parts = []
# For SQL Server, replaced "serial" type with "identity".
# create table "hi" ("colA" int, "id" int identity(1,1) primary key("id") );
#
parts.append("int identity(1,1)")
return " ".join(parts)
def get_int(my, length=4, not_null=False):
"""
http://technet.microsoft.com/en-us/library/cc917573.aspx
If the integer is from 1 through 255, use tinyint.
If the integer is from -32768 through 32767, use smallint.
If the integer is from -2,147,483,648 through 2,147,483,647 use int.
If you require a number with decimal places, use decimal. Do not use float or real, because rounding may occur (Oracle NUMBER and SQL Server decimal do not round).
If you are not sure, use decimal; it most closely resembles Oracle NUMBER data type.
"""
parts = []
parts.append("int")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_text(my, not_null=False):
parts = []
parts.append("varchar(max)")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_varchar(my, length=256, not_null=False):
if not length:
length = 256
if length == -1:
length = 'max'
parts = []
parts.append("varchar(%s)" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_nvarchar(my, length=256, not_null=False):
assert length
if length == -1:
length = 'max'
parts = []
parts.append("nvarchar(%s)" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_timestamp(my, default="now", not_null=False, timezone=False):
# SQL Server implementation.
parts = []
if timezone:
parts.append("datetimeoffset(6)")
else:
parts.append("datetime2(6)")
if default:
if default == "now":
parts.append("DEFAULT(%s)" % my.get_timestamp_now())
else:
parts.append("DEFAULT(%S)" % default)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_timestamp_now(my, offset=None, type=None, op='+'):
# SQL Server implementation.
if not type:
type = 'day'
if offset:
# Postgres: parts.append("'%s %s'::interval" % (offset, type) )
# Postgres: eg. now() + '10 hour'::interval
# SQL Server: DATEADD(hour, +10, CURRENT_TIMESTAMP)
part = "DATEADD(%s, %s%s, GETDATE())" % (type, op, offset)
else:
part = "GETDATE()"
return part
#
# Sequence methods for SQL Server
#
def has_sequences(my):
return True
def get_nextval(my, sequence):
return '"%s".nextval' % sequence
def get_currval(my, sequence):
# IDENT_CURRENT returns the last identity value
# generated for a specified table[
return 'SELECT IDENT_CURRENT(\'' + sequence + '\')'
def get_currval_select(my, sequence):
# IDENT_CURRENT returns the last identity value
# generated for a specified table[
return 'SELECT IDENT_CURRENT(\'' + sequence + '\')'
def get_nextval_select(my, sequence):
# In Postgres, when a table is created, currval is undefined and nextval is 1.
# SQL Server doesn't have a concept of nextval.
# When the table is created, the currval *is already 1*,
# and so nextval becomes 2. This poses a problem.
# The solution is to check if
# ident_current('table_name') = 1 then just return 1.
cmd = "declare @cur_id int;"
cmd += "select @cur_id = ident_current('" + sequence + "');"
cmd += "if @cur_id = 1"
cmd += " select @cur_id;"
cmd += "else"
cmd += " select @cur_id + 1;"
return cmd
def get_setval_select(my, sequence, num):
# Set the current identity value for the specified table.
cmd = "DBCC CHECKIDENT ('" + sequence + "', RESEED, " + str(num) + ");"
return cmd
# Method to build and return an SQL statement that can be run to reset the ID sequence for a table to a number
# that is one greater than the highest index found in the given table. NOTE: this ASSUMES that there are rows
# in the table to provide a MAX id value from. TODO: provide handling for a table with no data rows.
def get_reset_table_sequence_statement(my, table, database=None):
from sql import DbContainer
sql = DbContainer.get(database)
query = "SELECT MAX(id) + 1 FROM %s ;" % table
result = sql.do_query(query)
max_id = result[0][0]
reset_seq_sql = "ALTER SEQUENCE %s_id_seq RESTART WITH %d ;" % (table, max_id)
return reset_seq_sql
#
# Regular Expressions
#
def get_regex_filter(my, column, regex, op='EQI'):
if op == 'EQI':
op = 'LIKE'
column = 'lower(CAST("%s" AS varchar(max)))' %column
regex = "lower('%%%s%%')"%regex
elif op == 'EQ':
op = 'LIKE'
regex = "'%%%s%%'" %regex
elif op == 'NEQI':
op = 'NOT LIKE'
regex = "lower('%%%s%%')"%regex
elif op == 'NEQ':
op = 'NOT LIKE'
regex = "'%%%s%%'" %regex
else:
raise SetupException('Invalid op [%s]. Try EQ, EQI, NEQ, or NEQI' %op)
return "%s %s %s" %(column, op, regex)
def get_text_search_filter(cls, column, keywords, column_type, table=None):
'''When Full Text Index is created in the db for the table, it works with SQLServer 2008 and above'''
if isinstance(keywords, basestring):
value = keywords
elif type(keywords) == types.ListType:
# remove empty strings from the list
keywords = filter(None, keywords)
value = " ".join(keywords)
else:
value = str(keywords)
# avoid syntax error
value = value.replace("'", "''")
if table:
column = '"%s"."%s"' % (table, column)
else:
column = '"%s"' % column
"""
if column_type in ['integer','serial']:
column = "CAST(%s AS varchar(10))" %column
"""
wheres = []
# use FREETEXT() or CONTAINS(), CONTAINS() takes OR AND operator
wheres.append("FREETEXT(%s, '%s')" % (column, value) )
where = " ".join(wheres)
return where
get_text_search_filter = classmethod(get_text_search_filter)
#
# Type process methods
#
def process_value(my, name, value, column_type="varchar"):
if column_type in ['timestamp','datetime']:
quoted = False
lower_value = ''
if isinstance(value, datetime.datetime):
pass
elif not value:
pass
else:
lower_value = value.lower()
if value == "NOW":
value = "getdate()"
#return {"value": value, "quoted": quoted}
# FIXME: this is implemented this way because set_value
# can be called twice. This method should called from commit
# and not set_value
elif not lower_value.startswith("convert") and not lower_value.startswith("getdate") and not lower_value.startswith("dateadd") :
if value == 'NULL':
pass
else:
value = "convert(datetime2, '%s', 0)" % value
return {"value": value, "quoted": quoted}
elif column_type in ['uniqueidentifier'] and value == "NEWID":
value = "newid()"
quoted = False
return {"value": value, "quoted": quoted}
#
# Database methods
#
def _get_db_info(my, db_resource):
''' get the database info from the config file'''
if isinstance(db_resource, DbResource):
host = db_resource.get_host()
user = db_resource.get_user()
password = db_resource.get_password()
port = db_resource.get_port()
else:
host = Config.get_value("database", "server")
user = Config.get_value("database", "user")
password = Config.get_value("database", "password")
port = Config.get_value("database", "port")
parts = []
host_str ="-S %s" % host
if port:
host_str = '%s,%s'%(host_str, port)
parts.append(host_str)
parts.append("-U %s" % user)
parts.append("-P %s" % password)
parts.append("-p %s" % port)
return " ".join(parts)
def create_database(my, database):
'''create a database. This is done by a system command'''
# if the database already exists, do nothing
if my.database_exists(database):
return
if not isinstance(database, basestring):
database = database.get_database()
# TODO: Retrieve server, username, password from TACTIC config file.
# eg. sqlcmd -S localhost -U tactic -P south123paw -d sthpw -Q "create database test1"
# note: The database we are connecting to must be 'sthpw'
create_SQL_arg = '"CREATE DATABASE ' + database + '"'
create = 'sqlcmd -S %s,%s -U %s -P %s -Q %s' % \
(my.server, my.port, my.user, my.password, create_SQL_arg)
cmd = os.popen(create)
result = cmd.readlines()
if not result:
print "No output from sql command to create db [%s], assumed success" % database
cmd.close()
return
#raise Exception("Error creating database '%s'" % database)
cmd.close()
if result[0].find("already exists") != -1:
print "already exists"
print " Try deleting C:\Program Files\Microsoft SQL Server\MSSQL10_50.SQLEXPRESS\MSSQL\DATA\<database_name>.mdf"
else:
print "no returned result from database creation (sqlcmd)"
def drop_database(my, database):
'''remove a database in SQL Server. Note this is a very dangerous
operation. Use with care.'''
# if the database does not exist, do nothing
#if not database_exists(database):
# return
# TODO: Retrieve server, username, password from TACTIC config file.
# eg. sqlcmd -S localhost -U tactic -P south123paw -d sthpw -Q "dropdatabase test1"
# note: The database we are connecting to must be 'sthpw'
drop_SQL_arg = '"DROP DATABASE %s"' %database
create = 'sqlcmd -S %s,%s -U %s -P %s -Q %s' % \
(my.server, my.port, my.user, my.password, drop_SQL_arg)
cmd = os.popen(create)
result = cmd.readlines()
if not result:
print "No output from sql command to drop db [%s], assumed success" % database
cmd.close()
return
else:
print result
cmd.close()
def get_modify_column(my, table, column, type, not_null=None):
''' get the statement for setting the column type '''
# this may not return the type exacty like before like varchar is in place of
# varchar(256) due to the column type returned from the sql impl
statements = []
statements.append('ALTER TABLE "%s" ALTER COLUMN "%s" %s' \
% (table,column,type))
if not_null == None:
return statements
if not_null:
# In order to set the column to disallow NULL values,
# we must first set any current NULL values to the empty string ''.
statements.append('UPDATE "%s" SET "%s"=\'\' WHERE %s IS NULL' \
% (table,column, column))
# Now that any existing NULL values for that column are set to the empty string,
# proceed to alter the column so that it disallows NULL values.
statements.append('ALTER TABLE "%s" ALTER COLUMN "%s" %s NOT NULL' \
% (table,column,type))
else:
statements.append('ALTER TABLE "%s" ALTER COLUMN "%s" %s' \
% (table, column, type))
return statements
def import_schema(my, db_resource, type):
'''import the schema of certain type to the given database'''
# DEPRECATED: this shouldn't be necessary anymore as the base class
# should be general enough
from pyasm.search import DbResource, DbContainer
if isinstance(db_resource, basestring):
database = db_resource
else:
database = db_resource.get_database()
sql = DbContainer.get(db_resource)
types = ['config', type]
for schema_type in types:
schema_dir = my.get_schema_dir()
schema_path = "%s/%s_schema.sql" % (schema_dir, schema_type)
schema_path = os.path.normpath(schema_path)
if not os.path.exists(schema_path):
Environment.add_warning("Schema does not exist", "Schema '%s' does not exist" % schema_path)
return
#raise Exception("Schema '%s' does not exist" % schema_path)
# cmd = 'psql -q %s %s < "%s"' % (my._get_db_info(database), database, schema_path)
# TODO: Retrieve server, username, password from TACTIC config file.
# eg. sqlcmd -S localhost -U tactic -P south123paw -d sthpw -i c:/schema.py
cmd = 'sqlcmd -S %s,%s -U %s -P %s -d %s -i "%s"' % \
(my.server, my.port, my.user, my.password, database, schema_path)
print "Importing schema ..."
print cmd
os.system(cmd)
#program = subprocess.call(cmd, shell=True)
#print "FINSIHED importing schema"
def import_default_data(my, db_resource, type):
'''import the data of certain type to the given database'''
from sql import DbResource, DbContainer
if isinstance(db_resource, DbResource):
database = db_resource.get_database()
else:
database = db_resource
# import the necessary schema
schema_dir = my.get_schema_dir()
data_path = "%s/%s_data.sql" % (schema_dir, type)
data_path = os.path.normpath(data_path)
if not os.path.exists(data_path):
#Environment.add_warning("Default data does not exist", "Data '%s' does not exist" % data_path)
return
#cmd = 'psql -q %s %s < "%s"' % (my._get_db_info(database), database, data_path)
# TODO: Retrieve server, username, password from TACTIC config file.
# eg. sqlcmd -S localhost -U tactic -P south123paw -d sthpw -i c:/schema.py
cmd = 'sqlcmd -S %s,%s -U %s -P %s -d %s -i "%s"' % \
(my.server, my.port, my.user, my.password, database, data_path)
print "Importing data ..."
print cmd
os.system(cmd)
def get_table_info(my, db_resource):
key = "DatabaseImpl:table_info"
cache_dict = Container.get(key)
if cache_dict == None:
cache_dict = {}
Container.put(key, cache_dict)
from sql import DbContainer, Sql
if isinstance(db_resource, Sql):
key2 = "%s" % (db_resource.get_db_resource())
else:
key2 = "%s" % (db_resource)
cache = cache_dict.get(key2)
if cache != None:
return cache
from sql import Select, DbContainer
sql = DbContainer.get(db_resource)
statement = 'SELECT * from sys.Tables'
results = sql.do_query(statement)
info = {}
for result in results:
table = result[0]
info[table] = table
Container.put(key2, info)
return info
def get_column_info(cls, db_resource, table, use_cache=True):
'''SQLServer: get column info like data types, is_nullable in a dict'''
from sql import DbContainer, Sql
if isinstance(db_resource, Sql):
prefix = "%s" % db_resource.get_db_resource()
else:
prefix = "%s" % db_resource
if use_cache:
# use global cache
if prefix.endswith(':sthpw'):
from pyasm.biz import CacheContainer
cache = CacheContainer.get("sthpw_column_info")
if cache:
dict = cache.get_value_by_key("data", table)
if dict != None:
return dict
key2 = "%s:%s" % (prefix, table)
key = "DatabaseImpl:column_info"
cache_dict = Container.get(key)
if cache_dict == None:
cache_dict = {}
Container.put(key, cache_dict)
if use_cache:
cache = cache_dict.get(key2)
if cache != None:
return cache
cache = {}
cache_dict[key2] = cache
# get directly from the database
from sql import DbContainer
# get directly from the database
if isinstance(db_resource, Sql):
sql = db_resource
else:
sql = DbContainer.get(db_resource)
query = "select column_name, data_type, \
is_nullable, character_maximum_length from \
information_schema.columns where table_name = '%s' \
" % table
#order by ordinal_position" % table
result = sql.do_query(query)
# convert to the proper data structure
if len(result) > 0:
for k in range(len(result)):
name = result[k][0]
data_type = result[k][1]
is_nullable = result[k][2] == 'YES'
size = result[k][3]
if data_type in ['character varying', 'varchar','nvarchar']:
data_type = 'varchar'
elif data_type in ['integer', 'smallint', 'int']:
data_type = 'integer'
elif data_type in ['text','ntext']:
data_type = "text"
elif data_type == 'boolean':
data_type = "boolean"
elif data_type == 'bit':
data_type = "boolean"
elif data_type == 'uniqueidentifier':
data_type = "uniqueidentifier"
# sqlserver handles timestamp different and is *not the same
# timestamp from other databases
elif data_type == 'timestamp':
data_type = "sqlserver_timestamp"
elif data_type.startswith('int identity'):
data_type = "serial"
elif data_type.startswith("datetime"):
data_type = "timestamp"
# for time with/wihtout time zone
elif data_type.startswith("time "):
data_type = "time"
info_dict = {'data_type': data_type, 'nullable': is_nullable, 'size': size}
cache[name] = info_dict
return cache
get_column_info = classmethod(get_column_info)
def get_column_types(my, database, table):
''' get column data types. Note: can potentially get
character_maximum_length, numeric_precision, and udt_name '''
info = my.get_column_info(database, table)
column_dict = {}
for key, value in info.items():
column_dict[key] = value.get('data_type')
return column_dict
def get_column_nullables(my, database, table):
''' get column data nullables '''
info = my.get_column_info(database, table)
column_dict = {}
for key, value in info.items():
column_dict[key] = value.get('nullable')
return column_dict
def set_savepoint(my, name='save_pt'):
'''set a savepoint'''
stmt = 'if @@TRANCOUNT > 0 SAVE TRANSACTION %s'%name
return stmt
"""
from sql import DbContainer
sql = DbContainer.get(database)
query = 'SAVE TRANSACTION %s'%name
sql.execute(query)
"""
def rollback_savepoint(my, name='save_pt', release=False):
stmt = "ROLLBACK TRANSACTION %s"%name
return stmt
def release_savepoint(my, name='save_pt'):
# does not apply in MS SQL
return None
def get_temp_column_name(my):
return "_tmp_spt_rownum"
def get_columns(cls, db_resource, table):
'''SQLServer get ordered column names'''
from sql import DbContainer
sql = DbContainer.get(db_resource)
statement = "EXEC sp_columns @table_name = '%s'"%table
results = sql.do_query(statement)
columns = []
if len(results) > 0:
for k in range(len(results)):
name = results[k][3]
columns.append(name)
# remove temp columns
columns = cls.remove_temp_column(columns, sql)
return columns
get_columns = classmethod(get_columns)
def remove_temp_column(cls, columns, sql):
# SQL Server temp columns are put in by ROW_NUMBER()
# in database_impl.handle_pagination()
impl = sql.get_database_impl()
temp_column_name = impl.get_temp_column_name()
if temp_column_name and temp_column_name in columns:
columns.remove(temp_column_name)
return columns
remove_temp_column = classmethod(remove_temp_column)
class PostgresImpl(BaseSQLDatabaseImpl):
'''Implementation for PostgreSQL'''
def get_database_type(my):
return "PostgreSQL"
def get_version(my):
from sql import DbContainer
sql = DbContainer.get("sthpw")
result = sql.do_query("select version()")
version_str = result[0][0]
#eg. result = PostgreSQL 8.2.11 on i386-redhat-linux-gnu, compiled by GCC gcc (GCC) 4.1.2 20070925 (Red Hat 4.1.2-33)
#eg. result = PostgreSQL 9.1.3, compiled by Visual C++ build 1500, 64-bit
parts = version_str.split(" ")
version_parts = parts[1].split(".")
version_parts = [int(re.split('[\.,]',x)[0]) for x in version_parts]
return version_parts
def get_page(my, limit=None, offset=0, table_input=0, already_in_where_clause=0):
'''get the pagination sql based on limit and offset'''
if limit == None:
return None
parts = []
parts.append("LIMIT %s" % limit)
if offset:
parts.append("OFFSET %s" % offset)
return " ".join(parts)
#
# Column methods
#
def get_boolean(my, not_null=False):
parts = []
parts.append("boolean")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_serial(my, length=4, not_null=False):
parts = []
parts.append("serial")
return " ".join(parts)
def get_int(my, length=4, not_null=False):
parts = []
parts.append("int%s" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_float(my, not_null=False):
parts = []
parts.append("float")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_text(my, not_null=False):
parts = []
parts.append("text")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_char(my, length=256, not_null=False):
assert length
parts = []
parts.append("char(%s)" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_varchar(my, length=256, not_null=False):
if not length:
length = 256
if length in [-1, 'max']:
return my.get_text(not_null=not_null)
parts = []
parts.append("varchar(%s)" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_nvarchar(my, length=256, not_null=False):
return my.get_varchar(length=length, not_null=not_null)
def get_timestamp(my, default=None, not_null=False, timezone=False):
parts = []
if timezone:
parts.append("timestamp with time zone")
else:
parts.append("timestamp")
if default:
if default == "now":
parts.append("DEFAULT %s" % my.get_timestamp_now())
else:
parts.append("DEFAULT %s" % default)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_timestamp_now(my, offset=None, type=None, op='+'):
parts = []
parts.append("now()")
if offset:
if not type:
type = "day"
parts.append("'%s %s'::interval" % (offset, type) )
op = ' %s ' % op
return op.join(parts)
#
# Sequence methods for Postgres
#
def has_sequences(my):
return True
def get_create_sequence(my, name):
return 'CREATE SEQUENCE "%s" START WITH 1 INCREMENT BY 1 NO MAXVALUE CACHE 1' % name
def get_sequence_name(my, table, database=None):
from pyasm.search import SearchType
if isinstance(table, SearchType):
search_type = table
table = search_type.get_table()
id_col = search_type.get_search_type_id_col()
if id_col:
return "%s_%s_seq" % (table, id_col)
return "%s_id_seq" % table
def get_nextval(my, sequence):
return '"%s".nextval' % sequence
def get_currval(my, sequence):
return '"%s".currval' % sequence
def get_currval_select(my, sequence):
return "select currval('\"%s\"')" % sequence
def get_nextval_select(my, sequence):
return "select nextval('\"%s\"')" % sequence
def get_setval_select(my, sequence, num):
return "select setval('\"%s\"', %s)" % (sequence, num)
# Method to build and return an SQL statement that can be run to reset the ID sequence for a table to a number
# that is one greater than the highest index found in the given table. NOTE: this ASSUMES that there are rows
# in the table to provide a MAX id value from. TODO: provide handling for a table with no data rows.
def get_reset_table_sequence_statement(my, table, database=None):
from sql import DbContainer
sql = DbContainer.get(database)
query = "SELECT MAX(id) + 1 FROM %s ;" % table
result = sql.do_query(query)
max_id = result[0][0]
reset_seq_sql = "ALTER SEQUENCE %s_id_seq RESTART WITH %d ;" % (table, max_id)
return reset_seq_sql
#
# Regular Expressions
#
def get_regex_filter(my, column, regex, op='EQI'):
if op == 'EQI':
op = '~*'
elif op == 'EQ':
op = '~'
elif op == 'NEQI':
op = '!~*'
elif op == 'NEQ':
op = '!~'
else:
raise SetupException('Invalid op [%s]. Try EQ, EQI, NEQ, or NEQI' %op)
return "\"%s\" %s '%s'" %(column, op, regex)
#
# Type process methods
#
def process_value(my, name, value, column_type="varchar"):
if column_type == 'timestamp':
quoted = True
if value == "NOW":
value = "now()"
return {"value": value, "quoted": quoted}
elif column_type == 'boolean':
quoted = False
if value in ['true', 'True', 1 ,'1', True]:
value = True
else:
value = False
return {"value": value, "quoted": quoted}
elif column_type in ['decimal', 'numeric']:
quoted = False
if isinstance(value, basestring):
value = value.replace("Decimal('", '')
value = value.replace("')", '')
return {"value": value, "quoted": quoted}
#
# Database methods
#
def _get_db_info(my, db_resource, host=None, port=None):
''' get the database info from the config file if db_resource object is not given. e.g. during install'''
from sql import DbResource
if isinstance(db_resource, DbResource):
host = db_resource.get_host()
user = db_resource.get_user()
password = db_resource.get_password()
port = db_resource.get_port()
else:
if not host:
host = Config.get_value("database", "server")
user = Config.get_value("database", "user")
password = Config.get_value("database", "password")
if not port:
port = Config.get_value("database", "port")
# put in some defaults
if not host:
host = '127.0.0.1'
if not user:
user = 'postgres'
if not port:
port = '5432'
parts = []
parts.append("-h %s" % host)
parts.append("-U %s" % user)
if port:
parts.append("-p %s" % port)
return " ".join(parts)
def create_database(my, database):
'''create a database. This is done by a system command'''
# if the database already exists, do nothing
if my.database_exists(database):
return
if not isinstance(database, basestring):
database = database.get_database()
from pyasm.search import DbResource
db_resource = DbResource.get_default(database)
create = 'createdb %s -E UNICODE "%s"' % (my._get_db_info(db_resource), database)
cmd = os.popen(create)
result = cmd.readlines()
# Psql 8.3 doesn't have outputs on creation
if not result:
print "no output, assumed success"
return
#raise Exception("Error creating database '%s'" % database)
cmd.close()
if result[0] == "CREATE DATABASE":
print "success"
elif result[0].endswith("already exists"):
print "already exists"
else:
print "no returned result from database creation (psql 8.2+)"
def drop_database(my, db_resource):
'''remove a postgres database . Note this is a very dangerous
operation. Use with care.'''
# if the database already exists, do nothing
if not my.database_exists(db_resource):
return
from sql import DbResource, DbContainer, Sql
if isinstance(db_resource, DbResource):
database = db_resource.get_database()
else:
database = db_resource
info = my._get_db_info(db_resource)
database_version = Sql.get_default_database_version()
major = database_version[0]
minor = database_version[1]
# connect from the main db
sql = DbContainer.get('sthpw')
# try to kill the connections first
version = '%s.%s' %(major, minor)
if version >= '8.4':
col_name = 'procpid'
if version >= '9.2':
col_name = 'pid'
sql.do_query("""SELECT pg_terminate_backend(pg_stat_activity.%s)
FROM pg_stat_activity WHERE pg_stat_activity.datname = '%s'"""%(col_name, database))
print "Dropping Database [%s] ..." % database
"""
isolation_level = sql.conn.isolation_level
sql.conn.set_isolation_level(0)
sql.execute('''DROP DATABASE "%s";''' % str(database) )
# FIXME: this creates a warning
DbContainer.release_thread_sql()
"""
cmds = ['dropdb']
cmds.extend(info.split(' '))
# has to str() to avoid unicode str
cmds.append(str(database))
#drop_cmd = "dropdb %s %s" % (info, database)
#cmd = os.popen(drop_cmd, 'r')
#result = cmd.readlines()
#cmd.close()
popen = subprocess.Popen(cmds, shell=False, stdout=subprocess.PIPE)
popen.wait()
output = ''
value = popen.communicate()
if value:
output = value[0].strip()
if not output:
err = value[1]
if err:
output = err
return output
def get_modify_column(my, table, column, type, not_null=None):
''' get the statement for setting the column type '''
# this may not return the type exacty like before like varchar is in place of
# varchar(256) due to the column type returned from the sql impl
statements = []
statements.append('ALTER TABLE "%s" ALTER "%s" TYPE %s' \
% (table,column,type))
if not_null == None:
return statements
if not_null:
statements.append('ALTER TABLE "%s" ALTER "%s" SET NOT NULL' \
% (table,column))
else:
statements.append('ALTER TABLE "%s" ALTER "%s" DROP NOT NULL' \
% (table,column))
return statements
"""
def import_default_data(my, db_resource, type):
'''import the data of certain type to the given database'''
from sql import DbResource, DbContainer
if isinstance(db_resource, DbResource):
database = db_resource.get_database()
else:
database = db_resource
# import the necessary schema
schema_dir = my.get_schema_dir()
schema_path = "%s/%s_data.sql" % (schema_dir, type)
if not os.path.exists(schema_path):
if type != 'simple':
#Environment.add_warning("Default data does not exist", "Data '%s' does not exist" % schema_path)
return
schema = 'psql -q %s %s < "%s"' % (my._get_db_info(db_resource), database, schema_path)
print "Importing data ..."
print schema
os.system(schema)
"""
def get_constraints(my, db_resource, table):
'''Get contraints primarily UNIQUE for PostgreSQL'''
from sql import Select, DbContainer
constraints = []
try:
db = DbContainer.get(db_resource)
statement = '''SELECT * from information_schema.table_constraints where table_name='%s';''' % table
results = db.do_query(statement)
# ignore Primary Key and CHECK CONSTRAINT for now
if len(results) > 0:
for k in range(len(results)):
mode = results[k][6]
name = results[k][2]
if mode in ['PRIMARY KEY', 'CHECK']:
continue
constraints.append({'mode':mode, 'name': name})
for constraint in constraints:
name = constraint.get('name')
statement = '''select pg_get_indexdef(oid) from pg_class where relname='%s';''' % name
sub_result = db.do_query(statement)
value = sub_result[0][0]
m = re.search(r'\((.*)\)', value, re.M)
group = m.group()
columns = []
if group:
columns = group.lstrip('(').rstrip(')')
columns = columns.split(',')
constraint['columns'] = columns
except Exception, e:
print e
return constraints
def get_table_info(my, db_resource):
key = "DatabaseImpl:table_info"
cache_dict = Container.get(key)
if cache_dict == None:
cache_dict = {}
Container.put(key, cache_dict)
from sql import DbContainer, Sql
if isinstance(db_resource, Sql):
key2 = "%s" % (db_resource.get_db_resource())
else:
key2 = "%s" % (db_resource)
cache = cache_dict.get(key2)
if cache != None:
return cache
info = {}
cache_dict[key2] = info
from sql import Select, DbContainer
sql = DbContainer.get(db_resource)
statement = '''SELECT tablename FROM pg_tables
WHERE tablename NOT LIKE 'pg_%'
AND tablename NOT LIKE 'sql_%'
'''
results = sql.do_query(statement)
for result in results:
table = result[0]
info[table] = table
#statement = '''SELECT viewname FROM pg_views
#WHERE schemaname NOT IN ['information_schema', 'pg_catalog']
#'''
# or (this will not work if we define schema for projects
statement = '''SELECT viewname FROM pg_views
WHERE schemaname = 'public'
'''
results = sql.do_query(statement)
for result in results:
table = result[0]
info[table] = table
return info
def get_column_info(cls, db_resource, table, use_cache=True):
'''get column info like data types, is_nullable in a dict'''
from sql import DbContainer, Sql
if isinstance(db_resource, Sql):
prefix = "%s" % db_resource.get_db_resource()
else:
prefix = "%s" % db_resource
if use_cache:
# use global cache
if prefix.endswith(':sthpw'):
from pyasm.biz import CacheContainer
cache = CacheContainer.get("sthpw_column_info")
if cache:
dict = cache.get_value_by_key("data", table)
if dict != None:
return dict
key2 = "%s:%s" % (prefix, table)
key = "DatabaseImpl:column_info"
cache_dict = Container.get(key)
if cache_dict == None:
cache_dict = {}
Container.put(key, cache_dict)
if use_cache:
cache = cache_dict.get(key2)
if cache != None:
return cache
cache = {}
cache_dict[key2] = cache
# get directly from the database
if isinstance(db_resource, Sql):
sql = db_resource
else:
sql = DbContainer.get(db_resource)
query = "select column_name, data_type, \
is_nullable, character_maximum_length from \
information_schema.columns where table_name = '%s' \
" % table
#order by ordinal_position" % table
result = sql.do_query(query)
# convert to the proper data structure
if len(result) > 0:
for k in range(len(result)):
name = result[k][0]
data_type = result[k][1]
is_nullable = result[k][2] == 'YES'
size = result[k][3]
if data_type == 'character varying':
data_type = 'varchar'
elif data_type in ['integer', 'smallint']:
data_type = 'integer'
elif data_type == 'text':
data_type = "text"
elif data_type == 'boolean':
data_type = "boolean"
elif data_type.startswith("timestamp"):
data_type = "timestamp"
# for time with/wihtout time zone
elif data_type.startswith("time "):
data_type = "time"
info_dict = {'data_type': data_type, 'nullable': is_nullable, 'size': size}
cache[name] = info_dict
return cache
get_column_info = classmethod(get_column_info)
def get_column_types(my, database, table, use_cache=True):
''' get column data types. Note: can potentially get
character_maximum_length, numeric_precision, and udt_name '''
info = my.get_column_info(database, table)
column_dict = {}
for key, value in info.items():
column_dict[key] = value.get('data_type')
return column_dict
def get_column_nullables(my, database, table):
''' get column data nullables '''
info = my.get_column_info(database, table)
column_dict = {}
for key, value in info.items():
column_dict[key] = value.get('nullable')
return column_dict
class OracleImpl(PostgresImpl):
def get_database_type(my):
return "Oracle"
def create_database(my, database):
'''create a database. This is done by a system command'''
# get the system user
from pyasm.search import DbPasswordUtil, DbContainer
password = DbPasswordUtil.get_password()
statement = 'create user %s identified by %s' % (database, password)
sql = DbContainer.get("system")
sql.do_update(statement)
def get_page(my, limit=None, offset=0):
'''get the pagination sql based on limit and offset'''
if limit == None:
return ""
# This is not used
return "rownum between %s and %s" % (offset, offset+limit)
def handle_pagination(my, statement, limit, offset):
'''specific method to handle Oracle's insane pagination'''
if limit == None:
return statement
# handle crazy logic to convert offset to start/end. Note that
# offset starts at 1 in oracle
start = offset + 1
end = start + limit - 1
if offset == 0:
page = "rownum between %s and %s" % (start, end)
statement = "SELECT * FROM (%s) WHERE %s" % (statement, page)
else:
page = "spt_rownum between %s and %s" % (start, end)
statement = "SELECT * FROM ( SELECT spt.*, rownum as spt_rownum FROM (%s ) spt ) WHERE %s" % (statement, page)
return statement
#
# Sequence methods for Oracle
#
def has_sequences(my):
return True
def get_create_sequence(my, name):
# FIXME: sequence names have quote in them. This needs to be fixed!!!
return 'CREATE SEQUENCE %s START WITH 1 NOMAXVALUE' % name
def get_sequence_name(my, table, database=None):
from pyasm.search import SearchType
if isinstance(table, SearchType):
search_type = table
table = search_type.get_table()
if database:
return '''%s."%s_id_seq"''' % (database, table)
else:
return '''"%s_id_seq"''' % (table)
# Method to build and return a PL/SQL that can be run to reset the ID sequence for a table to a number that
# is one greater than the highest index found in the given table. NOTE: this ASSUMES that there are rows
# in the table to provide a MAX id value from. TODO: provide handling for a table with no data rows.
def get_reset_table_sequence_statement(my, table, database=None):
template_stmt_arr = [
'''declare''',
'''next_val NUMBER;''',
'''new_next_val NUMBER;''',
'''incr NUMBER;''',
'''highest_id NUMBER;''',
'''v_code NUMBER;''',
'''v_errmsg VARCHAR2(64);''',
'''BEGIN''',
'''SAVEPOINT start_transaction;''',
''' -- get the max PK from the table that's using the sequence''',
''' select max("id") into highest_id from [DB]"[table]";''',
''' -- then read nextval from the sequence''',
''' EXECUTE IMMEDIATE 'select [DB]"[table]_id_seq".nextval from dual' into next_val;''',
''' DBMS_OUTPUT.PUT_LINE('[DB]"[table]_id_seq" next_val obtained is ' || next_val);''',
''' -- calculate the desired next increment for the sequence''',
''' -- incr := highest_id - next_val + 1; -- ORIGINAL LINE THAT ADDS ONE TOO MANY''',
''' incr := highest_id - next_val ;''',
''' EXECUTE IMMEDIATE 'ALTER SEQUENCE [DB]"[table]_id_seq" increment by ' || incr;''',
''' EXECUTE IMMEDIATE 'select [DB]"[table]_id_seq".nextval from dual' into new_next_val;''',
''' EXECUTE IMMEDIATE 'ALTER SEQUENCE [DB]"[table]_id_seq" increment by 1';''',
''' DBMS_OUTPUT.PUT_LINE('[DB]"[table]_id_seq" new_next_val is ' || new_next_val);''',
'''commit;''',
'''EXCEPTION''',
''' WHEN OTHERS THEN''',
''' ROLLBACK to start_transaction;''',
''' DBMS_OUTPUT.PUT_LINE('Error code ' || v_code || ': ' || v_errmsg);''',
'''end;''',
]
template_stmt = '\n'.join( template_stmt_arr )
pl_sql_stmt = template_stmt.replace("[table]", table)
if database:
pl_sql_stmt = pl_sql_stmt.replace("[DB]", "%s." % database)
else:
pl_sql_stmt = pl_sql_stmt.replace("[DB]", "")
return pl_sql_stmt
#
# Column methods
#
def get_boolean(my, not_null=False):
parts = []
# No boolean in Oracle??!??
parts.append("CHAR(1)")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_serial(my, length=4, not_null=False):
'''oracle does not have auto serial'''
parts = []
parts.append("NUMBER")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_int(my, length=4, not_null=False):
parts = []
parts.append("NUMBER")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_text(my, not_null=False):
parts = []
parts.append("CLOB")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_varchar(my, length=256, not_null=False):
if not length:
length = 256
if length in [-1, 'max']:
return my.get_text(not_null=not_null)
parts = []
parts.append("VARCHAR2(%s)" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_timestamp(my, default="now", not_null=False, timezone=False):
parts = []
parts.append("TIMESTAMP")
if default:
if default == "now":
parts.append("DEFAULT %s" % my.get_timestamp_now())
else:
parts.append("DEFAULT %s" % default)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_timestamp_now(my, offset=None, type=None, op='+'):
parts = []
parts.append("SYSTIMESTAMP")
if offset:
if not type:
type = "day"
parts.append("INTERVAL '%s' %s" % (offset, type))
op = ' %s ' % op
return op.join(parts)
#
# Sequence methods -- FIXME: quotes around sequence identifier needed?
#
def get_nextval(my, sequence):
return '%s.nextval' % sequence
def get_currval(my, sequence):
return '%s.currval' % sequence
def get_currval_select(my, sequence):
return 'select %s.currval from dual' % sequence
def get_nextval_select(my, sequence):
return 'select %s.nextval from dual' % sequence
def get_setval_select(my, sequence):
return None
#return 'select %s.setval from dual' % sequence
#
# Regular expressions
#
def get_regex_filter(my, column, regex, op='EQI'):
not_like = False
parts = []
if op == 'EQI':
op = 'i'
elif op == 'EQ':
op = 'c'
elif op == 'NEQI':
op = 'i'
parts.append("NOT")
elif op == 'NEQ':
op = 'c'
parts.append("NOT")
else:
raise SetupException('Invalid op [%s]. Try EQ, EQI, NEQ, or NEQI' %op)
expr = "REGEXP_LIKE(\"%s\", '%s', '%s')" % (column, regex, op)
parts.append(expr)
return " ".join(parts)
#
# Database methods
#
info = {}
def get_table_info(my, database):
# FIXME: this function needs to handle DbResource class
#key = "Oracle:table_info:%s" % database
#info = Container.get(key)
# FIXME: this doesn't work too well as it gets called once per session
# If the schema changes, TACTIC needs to be restarted
info = OracleImpl.info.get(database)
if not info:
from sql import Select, DbContainer
sql = DbContainer.get(database)
select = Select()
select.set_database(sql)
select.add_table("ALL_TABLES")
select.add_column("TABLE_NAME")
select.add_where('''"OWNER" in ('%s','%s')''' % (database, database.upper()))
statement = select.get_statement()
results = sql.do_query(statement)
#print results
info = {}
for result in results:
table_name = result[0]
if table_name.startswith("BIN$"):
continue
table_info = {}
info[table_name] = table_info
#Container.put(key, info)
OracleImpl.info[database] = info
return info
#
# Table definitions
#
def get_column_description(my, database, table):
'''NOTE: this is not very useful in postgres, use get_column_info()
instead'''
from sql import DbContainer, Sql, Select
sql = DbContainer.get(database)
select = Select()
select.set_database(sql)
select.add_table(table)
select.add_limit(0)
query = select.get_statement()
result = sql.do_query(query)
description = sql.get_table_description()
return description
def get_column_info(my, database, table):
'''get column info like data types and nullable'''
dict = {}
key = "OracleImpl:column_info:%s:%s" % (database, table)
description = Container.get(key)
if not description:
description = my.get_column_description(database, table)
Container.put(key, description)
import cx_Oracle
for item in description:
name = item[0]
data_type = item[1]
size = item[2]
nullable = bool(item[6])
# FIXME: for whatever reason, type(data_type) returns <type 'type'>
# and isinstance(data_type, cx_Oracle.XXX) always returns false
#if isinstance(data_type,cx_Oracle.CLOB):
# data_type = "text"
#elif isinstance(data_type,cx_Oracle.STRING):
# data_type = "character"
#elif isinstance(data_type,cx_Oracle.NUMBER):
# data_type = "integer"
data_type_str = str(data_type)
if data_type_str == "<type 'cx_Oracle.CLOB'>":
data_type = "text"
elif data_type_str == "<type 'cx_Oracle.STRING'>":
data_type = "varchar"
elif data_type_str == "<type 'cx_Oracle.FIXED_CHAR'>":
# NOTE:big assumption here that character of size 1 are booleans
if size == 1:
data_type = "boolean"
#??
data_type = "string"
else:
data_type = "string"
elif data_type_str == "<type 'cx_Oracle.NUMBER'>":
data_type = "integer"
elif data_type_str == "<type 'cx_Oracle.TIMESTAMP'>":
data_type = "timestamp"
elif data_type_str == "<type 'cx_Oracle.DATETIME'>":
data_type = "timestamp"
else:
raise DatabaseImplException("Unknown type [%s] for column [%s] in table [%s]" % (data_type_str, name, table) )
info_dict = {'data_type': data_type, 'nullable': nullable,
'size': size}
dict[name] = info_dict
return dict
def get_column_types(my, database, table):
''' get column data types in a dict '''
return super(OracleImpl, my).get_column_types(database, table)
# schema manipulation
def get_modify_column(my, table, column, type, not_null=False):
''' get the list of statements for setting the column type '''
# this may not return the type exacty like before like varchar is in place of
# varchar(256) due to the column type returned from the sql impl
statement = 'ALTER TABLE "%s" MODIFY "%s" %s' % (table,column,type)
if not_null:
statement = '%s NOT NULL' %statement
return [statement]
#
# Sql manipulation functions
#
# This deals with Oracles absurdly low 4000 byte limit on sql statements
#
def preprocess_sql(my, data, unquoted_cols):
my.plsql_vars = []
values = data.values()
cols = data.keys()
for i in range(0, len(cols)):
# plsql code to get around oracles stupid 4000 byte limit
value = values[i]
if value and type(value) in types.StringTypes and len(value) > 4000:
# remember this column
varname = "%s__var" %cols[i]
my.plsql_vars.append((varname, value))
value = varname
data[cols[i]] = value
if cols[i] not in unquoted_cols:
unquoted_cols.append(cols[i])
def postprocess_sql(my, statement):
from sql import Sql
if not my.plsql_vars:
return statement
expr = []
# inspired from:
# http://www.uaex.edu/srea/Huge_Strings_Using_LOBs.htm
l_varname = []
# do 16k chunks
chunk_length = 16*1024
# pl/sql code to get aroung oracles stupid 4000 byte limit
expr.append("declare")
for varname, value in my.plsql_vars:
length = len(value)
if length >= 30*1024:
expr.append("tmp varchar2(%s) := '';" % chunk_length)
expr.append("%s clob := empty_clob;" % varname)
l_varname.append((varname, value))
else:
expr.append("%s varchar2(%s) := %s;" % (varname, length, Sql.quote(value)))
expr.append("begin")
for varname, value in l_varname:
chunks = int(float(len(value)) / chunk_length) + 1
expr.append("dbms_lob.createTemporary(%s, true);" % varname)
expr.append("dbms_lob.open(%s, dbms_lob.lob_readwrite);" % varname)
# add to the temporary log variable in chunks
for i in range(0, chunks):
start = i*chunk_length
end = (i+1)*chunk_length
part = value[start:end]
if part == '':
continue
quoted = Sql.quote(part)
expr.append("tmp := %s;" % quoted)
expr.append("dbms_lob.writeAppend(%s, length(tmp), tmp);" % (varname) )
expr.append("dbms_lob.close(%s);" % varname)
expr.append(statement)
expr.append(";")
# free up the lob memory
for varname, value in l_varname:
expr.append("dbms_lob.freeTemporary(%s);" % varname)
expr.append("end;")
statement = "\n".join(expr)
return statement
def process_value(my, column, value, column_type="varchar"):
'''Some values need to be preprocessed before going to an sql
statement depending on type'''
quoted = True
if value == "NULL":
quoted = False
elif column_type == "integer":
quoted = False
elif column_type == "timestamp":
orig_value = value
value = str(value)
if orig_value == None:
quoted = False
value = "NULL"
elif value.startswith('SYSTIMESTAMP'):
quoted = False
else:
# try to match the date with regular expressions
# Feb 20, 1999
pattern1 = re.compile("^(\w{3}) (\d+), (\d+)$")
# 1999-02-20
pattern2 = re.compile("^(\d{4})-(\d{1,2})-(\d{1,2})$")
# 02/20/2005 10:30
pattern3 = re.compile("^(\d{2})/(\d{2})/(\d{4}) (\d{2}):(\d{2})$")
# 02/20/1999
pattern4 = re.compile("^(\d{1,2})/(\d{2})/(\d{2,4})$")
# Wed Apr 15 07:29:41 2009
pattern5 = re.compile("^(\w{3}) (\w{3}) (\d{2}) (\d{2}):(\d{2}):(\d{2}) (\d{4})$")
# 2008-03-01 00:00:00
pattern6 = re.compile("^(\d{4})-(\d{2})-(\d{2}) (\d{2}):(\d{2}):(\d{2})$")
# Wed Jun 3 18:13:13 2009
pattern7 = re.compile("^(\w{3}) (\w{3}) \ ?(\d{1,2}) (\d{1,2}):(\d{2}):(\d{2}) (\d{4})$")
# 18:13
pattern8 = re.compile("^(\d{1,2}):(\d{2})$")
# 18:13:15
pattern9 = re.compile("^(\d{1,2}):(\d{2}):(\w{2})$")
# remove the fractional seconds
if value.find(".") != -1:
value, tmp = value.split(".", 1)
# convert this using dateutil ... this makes all the
# pattern matching unnecessary
from dateutil import parser
xx = parser.parse(value)
value = xx.strftime("%Y-%m-%d %H:%M:%S")
# put this one first ... all others are probably unnecessary
if pattern6.match(value):
date_pattern = "YYYY-MM-DD HH24:MI:SS"
elif pattern1.match(value):
date_pattern = "MON DD, YYYY"
elif pattern2.match(value):
date_pattern = "YYYY-MM-DD"
elif pattern3.match(value):
date_pattern = "MM/DD/YYYY HH24:MI"
elif pattern4.match(value):
date_pattern = "MM/DD/YYYY"
elif pattern5.match(value):
value = value[4:]
date_pattern = "MON DD HH24:MI:SS YYYY"
elif pattern7.match(value):
date_pattern = "DY MON DD HH24:MI::SS YYYY"
elif pattern8.match(value):
value = '1900-01-01 %s' % value
date_pattern = "YYYY-MM-DD HH24:MI"
elif pattern9.match(value):
value = '1900-01-01 %s' % value
date_pattern = "YYYY-MM-DD HH24:MI:SS"
else:
raise DatabaseImplException("Cannot match timestamp format for value [%s] in column [%s]" % (value, column))
value = "TO_DATE('%s', '%s')" % (value, date_pattern)
quoted = False
return {"value": value, "quoted": quoted}
class SqliteImpl(PostgresImpl):
def get_database_type(my):
return "Sqlite"
"""
def get_version(my):
from sql import DbContainer
sql = DbContainer.get("sthpw")
result = sql.do_query("select version()")
version_str = result[0][0]
#PostgreSQL 8.2.11 on i386-redhat-linux-gnu, compiled by GCC gcc (GCC) 4.1.2 20070925 (Red Hat 4.1.2-33)
parts = version_str.split(" ")
version_parts = parts[1].split(".")
version_parts = [int(x) for x in version_parts]
return version_parts
"""
#
# Column methods
#
"""
def get_boolean(my, not_null=False):
parts = []
parts.append("boolean")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
"""
def get_serial(my, length=4, not_null=False):
parts = []
parts.append("integer")
return " ".join(parts)
"""
def get_int(my, length=4, not_null=False):
parts = []
parts.append("int%s" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_text(my, not_null=False):
parts = []
parts.append("text")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_varchar(my, length=256, not_null=False):
if not length:
length = 256
if length in [-1, 'max']:
return my.get_text(not_null=not_null)
parts = []
parts.append("varchar(%s)" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
"""
def has_savepoint(my):
return False
def get_timestamp(my, default='now', not_null=False):
parts = []
parts.append("timestamp")
if default:
if default == "now":
parts.append("DEFAULT %s" % my.get_timestamp_now())
else:
parts.append("DEFAULT %s" % default)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_timestamp_now(my, offset=None, type=None, op='+'):
parts = []
parts.append("CURRENT_TIMESTAMP")
if offset:
parts = ["DATETIME('now',"]
# combine the the op and offset sign.
if op == '-':
offset = offset * -1
if not type:
type = "days"
elif type.lower() in ['week','weeks']:
# doesn't understand week, but month, year are fine
type = "days"
offset = offset * 7
elif not type.endswith('s'):
type = '%ss'%type
parts.append("'%s %s')" % (offset, type) )
return ''.join(parts)
#
# Sequence methods
#
# Sequences are not used in Sqlite
def has_sequences(my):
return False
def get_reset_table_sequence_statement(my, table, database=None):
# We do not use sequences in Sqlite
return ""
#
# Regular Expressions
#
def get_regex_filter(my, column, regex, op='EQI'):
if op == 'EQI':
#op = '~*'
return "\"%s\" LIKE '%%%s%%'" %(column, regex)
elif op == 'EQ':
#op = '~'
return "\"%s\" LIKE '%%%s%%'" %(column, regex)
elif op == 'NEQI':
#op = '!~*'
return "\"%s\" NOT LIKE '%%%s%%'" %(column, regex)
elif op == 'NEQ':
#op = '!~'
return "\"%s\" NOT LIKE '%%%s%%'" %(column, regex)
else:
raise SetupException('Invalid op [%s]. Try EQ, EQI, NEQ, or NEQI' %op)
return "\"%s\" %s '%s'" %(column, op, regex)
#
# Type process methods
#
def process_value(my, name, value, column_type="varchar"):
quoted = True
if value == "NULL":
quoted = False
elif column_type == 'boolean':
quoted = False
if value in ['true', 'True', 1 ,'1', True]:
value = 1
else:
value = 0
elif column_type == 'timestamp':
if value == "NOW":
quoted = False
value = my.get_timestamp_now()
elif isinstance(value, datetime.datetime):
pass
elif value.startswith(("CURRENT_TIMESTAMP","DATETIME(")):
quoted = False
return {"value": value, "quoted": quoted}
#
# Database methods
#
def _get_database_path(my, database):
if not isinstance(database,basestring):
database = database.get_database()
# dropping a database means deleting the database file
db_dir = Config.get_value("database", "sqlite_db_dir")
if not db_dir:
data_dir = Environment.get_data_dir()
db_dir = "%s/db" % data_dir
db_path = "%s/%s.db" % (db_dir, database)
return db_path
def database_exists(my, database, host=None):
db_path = my._get_database_path(database)
if os.path.exists(db_path):
return True
else:
return False
def create_database(my, database):
'''create a database'''
# if the database already exists, do nothing
if my.database_exists(database):
return
# nothing needs to be done ... databases are created automatically
# on connection
pass
def drop_database(my, database):
'''remove a database on disk. Note this is a very dangerous
operation. Use with care.'''
# if the database already exists, do nothing
if not my.database_exists(database):
return
# dropping a database means deleting the database file
db_path = my._get_database_path(database)
if os.path.exists(db_path):
os.unlink(db_path)
def get_modify_column(my, table, column, type, not_null=None):
'''This is the same as postgres'''
return super(Sqlite, my).get_modify_column(table, column, type, not_null)
# Although this is a general function, it is presently only use for
# Sqlite. All table info is cached immediately with Sqlite because
# the PRAGMA statement below causes transactions to commit
#
def cache_database_info(cls, sql):
table_info = cls.get_table_info(sql)
for table in table_info.keys():
cls.get_column_info(sql, table)
cache_database_info = classmethod(cache_database_info)
def get_column_info(cls, db_resource, table, use_cache=True):
key = "DatabaseImpl:column_info"
cache_dict = Container.get(key)
if cache_dict == None:
cache_dict = {}
Container.put(key, cache_dict)
from sql import DbContainer, Sql
if isinstance(db_resource, Sql):
key2 = "%s:%s" % (db_resource.get_db_resource(), table)
else:
key2 = "%s:%s" % (db_resource, table)
cache = cache_dict.get(key2)
if cache != None:
return cache
cache = {}
cache_dict[key2] = cache
# get directly from the database
if isinstance(db_resource, Sql):
sql = db_resource
else:
sql = DbContainer.get(db_resource)
query = "PRAGMA table_info(%s)" % table
results = sql.do_query(query)
# data return is a list of the following
#(0, u'id', u'integer', 1, None, 0)
for result in results:
name = result[1]
data_type = result[2]
# notnull could be 1 or 99 which equals True
nullable = result[3] not in [1, 99]
#nullable = True
if data_type.startswith("character varying"):
size = data_type.replace("character varying", "")
size = size.replace("(", "")
size = size.replace(")", "")
size = int(size)
data_type = 'varchar'
elif data_type.startswith("varchar"):
size = data_type.replace("varchar", "")
size = size.replace("(", "")
size = size.replace(")", "")
if size:
size = int(size)
else:
size = 256
data_type = 'varchar'
elif data_type.startswith("timestamp"):
data_type = 'timestamp'
size = 0
else:
size = 0
info_dict = {'data_type': data_type, 'nullable': nullable,
'size': size}
cache[name] = info_dict
return cache
get_column_info = classmethod(get_column_info)
def get_table_info(cls, db_resource):
key = "DatabaseImpl:table_info"
cache_dict = Container.get(key)
if cache_dict == None:
cache_dict = {}
Container.put(key, cache_dict)
from sql import DbContainer, Sql
if isinstance(db_resource, Sql):
key2 = "%s" % (db_resource.get_db_resource())
else:
key2 = "%s" % (db_resource)
cache = cache_dict.get(key2)
if cache != None:
return cache
info = {}
cache_dict[key2] = info
if isinstance(db_resource, Sql):
sql = db_resource
else:
sql = DbContainer.get(db_resource)
statement = '''SELECT name FROM sqlite_master where type='table';'''
results = sql.do_query(statement)
for result in results:
table = result[0]
info[table] = table
return info
get_table_info = classmethod(get_table_info)
def get_constraints(my, db_resource, table):
# FIXME: this only works with Sqlite!!!
# FIXME: this only works with Sqlite!!!
# FIXME: this only works with Sqlite!!!
from sql import Select, DbContainer
db = DbContainer.get(db_resource)
statement = '''SELECT sql FROM sqlite_master where name='%s';''' % table
results = db.do_query(statement)
constraints = []
if not results:
return constraints
sql = results[0][0]
for line in sql.split("\n"):
line = line.strip()
if line.startswith("CONSTRAINT"):
parts = line.split(" ")
name = parts[1].strip('"')
mode = parts[2]
columns = parts[3].strip("(").strip(")").split(",")
# remove unicode
columns = [str(x) for x in columns]
info = {
'name': name,
'columns': columns,
'mode': mode
}
constraints.append(info)
return constraints
class MySQLImpl(PostgresImpl):
def __init__(my):
# FIXME: this will not work in mixed db cases because it assumes a global
# single database
my.server = Config.get_value("database", "server")
my.port = Config.get_value("database", "port")
my.user = Config.get_value("database", "user")
my.password = Config.get_value("database", "password")
def get_database_type(my):
return "MySQL"
def get_version(my):
from sql import DbContainer
sql = DbContainer.get("sthpw")
# eg. result is (('5.1.47',),)
result = sql.do_query("select @@version")
version_str = result[0][0]
version_parts = version_str.split(".")
version_parts = [int(x) for x in version_parts]
# eg. result is [5, 1, 47]
return version_parts
def process_value(my, name, value, column_type="varchar"):
if column_type == 'boolean':
quoted = False
if value in ['true', 1, True]:
value = 1
else:
value = 0
return {"value": value, "quoted": quoted}
def get_table_info(my, db_resource):
key = "DatabaseImpl:table_info"
cache_dict = Container.get(key)
if cache_dict == None:
cache_dict = {}
Container.put(key, cache_dict)
from sql import DbContainer, Sql
if isinstance(db_resource, Sql):
key2 = "%s" % (db_resource.get_db_resource())
else:
key2 = "%s" % (db_resource)
cache = cache_dict.get(key2)
if cache != None:
return cache
info = {}
cache_dict[key2] = info
if not isinstance(db_resource, basestring):
database_name = db_resource.get_database()
else:
database_name = db_resource
from sql import Select, DbContainer
sql = DbContainer.get(db_resource)
statement = '''SHOW TABLES FROM "%s"''' % database_name
results = sql.do_query(statement)
for result in results:
table = result[0]
info[table] = table
return info
def get_column_info(cls, db_resource, table, use_cache=True):
key = "DatabaseImpl:column_info"
cache_dict = Container.get(key)
if cache_dict == None:
cache_dict = {}
Container.put(key, cache_dict)
from sql import DbContainer, Sql
if isinstance(db_resource, Sql):
key2 = "%s:%s" % (db_resource.get_db_resource(), table)
else:
key2 = "%s:%s" % (db_resource, table)
cache = cache_dict.get(key2)
if cache != None:
return cache
dict = {}
cache_dict[key2] = dict
# get directly from the database
from sql import DbContainer
sql = DbContainer.get(db_resource)
query = '''SHOW COLUMNS FROM "%s"''' % table
results = sql.do_query(query)
# data return is a list of the following
#(0, u'id', u'integer', 1, None, 0)
for result in results:
#if table == "search_object":
# print "result: ", result
name = result[0]
data_type = result[1]
nullable = True
if data_type.startswith("character varying"):
size = data_type.replace("character varying", "")
size = size.replace("(", "")
size = size.replace(")", "")
size = int(size)
data_type = 'varchar'
elif data_type.startswith("varchar"):
size = data_type.replace("varchar", "")
size = size.replace("(", "")
size = size.replace(")", "")
if size:
size = int(size)
else:
size = 256
data_type = 'varchar'
# rather big assumption that tinyint == boolean
elif data_type.startswith("tinyint"):
size = data_type.replace("tinyint", "")
size = size.replace("(", "")
size = size.replace(")", "")
if size:
size = int(size)
else:
size = 4
data_type = 'boolean'
elif data_type.startswith("longtext"):
data_type = 'text'
size = 0
elif data_type.startswith("mediumtext"):
data_type = 'text'
size = 0
elif data_type.startswith("varchar"):
data_type = 'text'
size = 256
elif data_type.startswith("int"):
parts = data_type.split(" ")
size = parts[0]
size = size.replace("int", "")
size = size.replace("(", "")
size = size.replace(")", "")
if size:
size = int(size)
else:
size = 4
data_type = 'integer'
elif data_type.startswith("timestamp"):
data_type = 'timestamp'
size = 0
else:
size = 0
info_dict = {'data_type': data_type, 'nullable': nullable,
'size': size}
dict[name] = info_dict
return dict
#
# Column methods
#
def get_serial(my, length=4, not_null=False):
parts = []
parts.append("serial")
return " ".join(parts)
def get_boolean(my, not_null=False):
parts = []
parts.append("tinyint")
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_varchar(my, length=191, not_null=False):
if not length:
length = 191
if length in [-1, 'max']:
return my.get_text(not_null=not_null)
parts = []
parts.append("varchar(%s)" % length)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_timestamp(my, default=None, not_null=False, timezone=False):
parts = []
if timezone:
parts.append("timestamp with time zone")
else:
parts.append("timestamp")
if default:
if default == "now":
# If more than one column has CURRENT_TIMESTAMP in MySQL
# then it produces the error:
# Incorrect table definition; there can be only one TIMESTAMP
# column with CURRENT_TIMESTAMP in DEFAULT or ON UPDATE clause
# This appears to be an old code implementation error in
# MySQL, so we are ignoring the now() default until this is
# fixed
#parts.append("DEFAULT %s" % my.get_timestamp_now())
pass
else:
parts.append("DEFAULT %s" % default)
if not_null:
parts.append("NOT NULL")
return " ".join(parts)
def get_timestamp_now(my, offset=None, type=None, op='+'):
'''MySQL get current / offset timestamp from now'''
parts = []
parts.append("NOW()")
if offset:
if not type:
type = "DAY"
parts.append(" INTERVAL %s %s" % (offset, type) )
op = ' %s ' % op
return op.join(parts)
#
# Sequence methods
#
# Sequences are not used in MySQL
def has_sequences(my):
return False
def get_reset_table_sequence_statement(my, table, database=None):
# We do not use sequences in Sqlite
return ""
#
# Regular Expressions
#
def get_regex_filter(my, column, regex, op='EQI'):
if op == 'EQI':
#op = '~*'
return "\"%s\" LIKE '%%%s%%'" %(column, regex)
elif op == 'EQ':
#op = '~'
return "\"%s\" LIKE '%%%s%%'" %(column, regex)
elif op == 'NEQI':
#op = '!~*'
return "\"%s\" NOT LIKE '%%%s%%'" %(column, regex)
elif op == 'NEQ':
#op = '!~'
return "\"%s\" NOT LIKE '%%%s%%'" %(column, regex)
else:
raise SetupException('Invalid op [%s]. Try EQ, EQI, NEQ, or NEQI' %op)
return "\"%s\" %s '%s'" %(column, op, regex)
#
# Regex expressions
#
def get_regex_filter(my, column, regex, op='EQI'):
if op == 'EQI':
op = 'REGEXP'
case_sensitive = False
elif op == 'EQ':
op = 'REGEXP'
case_sensitive = True
elif op == 'NEQI':
op = 'NOT REGEXP'
case_sensitive = False
elif op == 'NEQ':
op = 'NOT REGEXP'
case_sensitive = True
else:
raise SetupException('Invalid op [%s]. Try EQ, EQI, NEQ, or NEQI' %op)
if case_sensitive:
return "\"%s\" %s '%s'" %(column, op, regex)
else:
regex = regex.lower()
return "LOWER(\"%s\") %s '%s'" %(column, op, regex)
#
# Database methods
#
def create_database(my, database):
'''create a database'''
from sql import DbContainer, DbResource
db_resource = DbResource.get_default("")
sql = DbContainer.get(db_resource)
statement = '''CREATE DATABASE IF NOT EXISTS "%s";''' % database
results = sql.do_update(statement)
def drop_database(my, database):
# TODO: if the database does not exist, do nothing
# if not database_exists(database):
# return
# TODO: Retrieve server, username, password from TACTIC config file.
# eg. mysql --host=localhost --port=5432 --user=root --password=south123paw --execute="create database unittest"
drop_SQL_arg = 'DROP DATABASE %s' % database.get_database()
create = 'mysql --host=%s --port=%s --user=%s --password=%s --execute="%s"' % \
(my.server, my.port, my.user, my.password, drop_SQL_arg)
cmd = os.popen(create)
result = cmd.readlines()
if not result:
print "No output from sql command to drop db [%s], assumed success" % database
cmd.close()
return
else:
print result
cmd.close()
class TacticImpl(PostgresImpl):
class TacticCursor(object):
def execute():
print "execute"
# Mimic DB2 API
OperationalError = Exception
def cursor():
return TacticCursor()
cursor = staticmethod(cursor)
def __init__(my):
from tactic_client_lib import TacticServerStub
my.server = TacticServerStub.get(protocol='xmlrpc')
def get_database_type(my):
return "TACTIC"
def get_table_info(my, db_resource):
search_type = "table/whatever?project=fifi"
table_info = my.server.get_table_info(search_type)
print "xxx: ", table_info
return table_info
| epl-1.0 |
trevor/mailman3 | src/mailman/commands/eml_help.py | 2 | 3007 | # Copyright (C) 2012-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
"""The email command 'help'."""
from __future__ import absolute_import, print_function, unicode_literals
__metaclass__ = type
__all__ = [
'Help',
]
from zope.interface import implementer
from mailman.config import config
from mailman.core.i18n import _
from mailman.interfaces.command import ContinueProcessing, IEmailCommand
from mailman.utilities.string import wrap
SPACE = ' '
@implementer(IEmailCommand)
class Help:
"""The email 'help' command."""
name = 'help'
argument_description = '[command]'
description = _('Get help about available email commands.')
short_description = description
def process(self, mlist, msg, msgdata, arguments, results):
"""See `IEmailCommand`."""
# With no argument, print the command and a short description, which
# is contained in the short_description attribute.
if len(arguments) == 0:
length = max(len(command) for command in config.commands)
format = '{{0: <{0}s}} - {{1}}'.format(length)
for command_name in sorted(config.commands):
command = config.commands[command_name]
short_description = getattr(
command, 'short_description', _('n/a'))
print(format.format(command.name, short_description),
file=results)
return ContinueProcessing.yes
elif len(arguments) == 1:
command_name = arguments[0]
command = config.commands.get(command_name)
if command is None:
print(_('$self.name: no such command: $command_name'),
file=results)
return ContinueProcessing.no
print('{0} {1}'.format(command.name, command.argument_description),
file=results)
print(command.short_description, file=results)
if command.short_description != command.description:
print(wrap(command.description), file=results)
return ContinueProcessing.yes
else:
printable_arguments = SPACE.join(arguments)
print(_('$self.name: too many arguments: $printable_arguments'),
file=results)
return ContinueProcessing.no
| gpl-3.0 |
franciscovn/terrama2 | scripts/check-translations.py | 5 | 1660 | import json, sys, unicodedata
dataEn = json.loads(open('../webapp/locales/en_US.json').read())
dataPt = json.loads(open('../webapp/locales/pt_BR.json').read())
dataEs = json.loads(open('../webapp/locales/es_ES.json').read())
dataFr = json.loads(open('../webapp/locales/fr_FR.json').read())
ptItens = []
esItens = []
frItens = []
for k, v in dataEn.items():
if(k not in dataPt):
ptItens.append(unicodedata.normalize("NFKD", k).encode('ascii', 'ignore'))
if(k not in dataEs):
esItens.append(unicodedata.normalize("NFKD", k).encode('ascii', 'ignore'))
#if(k not in dataFr):
#frItens.append(unicodedata.normalize("NFKD", k).encode('ascii', 'ignore'))
if(len(ptItens) == 0):
sys.stdout.write("\n\n-------------------------------------------------\n\nThe Portuguese translation file is correct.\n\n")
else:
sys.stdout.write("\n\n-------------------------------------------------\n\nMissing translations in Portuguese:\n\n")
for s in ptItens:
sys.stdout.write(" " + s + "\n")
if(len(esItens) == 0):
sys.stdout.write("\n\n-------------------------------------------------\n\nThe Spanish translation file is correct.\n\n")
else:
sys.stdout.write("\n\n-------------------------------------------------\n\nMissing translations in Spanish:\n\n")
for s in esItens:
sys.stdout.write(" " + s + "\n")
if(len(frItens) == 0):
sys.stdout.write("\n\n-------------------------------------------------\n\nThe French translation file is correct.\n\n")
else:
sys.stdout.write("\n\n-------------------------------------------------\n\nMissing translations in French:\n\n")
for s in frItens:
sys.stdout.write(" " + s + "\n") | lgpl-3.0 |
newerthcom/savagerebirth | libs/python-2.72/Lib/distutils/versionpredicate.py | 397 | 5095 | """Module for parsing and testing package version predicate strings.
"""
import re
import distutils.version
import operator
re_validPackage = re.compile(r"(?i)^\s*([a-z_]\w*(?:\.[a-z_]\w*)*)(.*)")
# (package) (rest)
re_paren = re.compile(r"^\s*\((.*)\)\s*$") # (list) inside of parentheses
re_splitComparison = re.compile(r"^\s*(<=|>=|<|>|!=|==)\s*([^\s,]+)\s*$")
# (comp) (version)
def splitUp(pred):
"""Parse a single version comparison.
Return (comparison string, StrictVersion)
"""
res = re_splitComparison.match(pred)
if not res:
raise ValueError("bad package restriction syntax: %r" % pred)
comp, verStr = res.groups()
return (comp, distutils.version.StrictVersion(verStr))
compmap = {"<": operator.lt, "<=": operator.le, "==": operator.eq,
">": operator.gt, ">=": operator.ge, "!=": operator.ne}
class VersionPredicate:
"""Parse and test package version predicates.
>>> v = VersionPredicate('pyepat.abc (>1.0, <3333.3a1, !=1555.1b3)')
The `name` attribute provides the full dotted name that is given::
>>> v.name
'pyepat.abc'
The str() of a `VersionPredicate` provides a normalized
human-readable version of the expression::
>>> print v
pyepat.abc (> 1.0, < 3333.3a1, != 1555.1b3)
The `satisfied_by()` method can be used to determine with a given
version number is included in the set described by the version
restrictions::
>>> v.satisfied_by('1.1')
True
>>> v.satisfied_by('1.4')
True
>>> v.satisfied_by('1.0')
False
>>> v.satisfied_by('4444.4')
False
>>> v.satisfied_by('1555.1b3')
False
`VersionPredicate` is flexible in accepting extra whitespace::
>>> v = VersionPredicate(' pat( == 0.1 ) ')
>>> v.name
'pat'
>>> v.satisfied_by('0.1')
True
>>> v.satisfied_by('0.2')
False
If any version numbers passed in do not conform to the
restrictions of `StrictVersion`, a `ValueError` is raised::
>>> v = VersionPredicate('p1.p2.p3.p4(>=1.0, <=1.3a1, !=1.2zb3)')
Traceback (most recent call last):
...
ValueError: invalid version number '1.2zb3'
It the module or package name given does not conform to what's
allowed as a legal module or package name, `ValueError` is
raised::
>>> v = VersionPredicate('foo-bar')
Traceback (most recent call last):
...
ValueError: expected parenthesized list: '-bar'
>>> v = VersionPredicate('foo bar (12.21)')
Traceback (most recent call last):
...
ValueError: expected parenthesized list: 'bar (12.21)'
"""
def __init__(self, versionPredicateStr):
"""Parse a version predicate string.
"""
# Fields:
# name: package name
# pred: list of (comparison string, StrictVersion)
versionPredicateStr = versionPredicateStr.strip()
if not versionPredicateStr:
raise ValueError("empty package restriction")
match = re_validPackage.match(versionPredicateStr)
if not match:
raise ValueError("bad package name in %r" % versionPredicateStr)
self.name, paren = match.groups()
paren = paren.strip()
if paren:
match = re_paren.match(paren)
if not match:
raise ValueError("expected parenthesized list: %r" % paren)
str = match.groups()[0]
self.pred = [splitUp(aPred) for aPred in str.split(",")]
if not self.pred:
raise ValueError("empty parenthesized list in %r"
% versionPredicateStr)
else:
self.pred = []
def __str__(self):
if self.pred:
seq = [cond + " " + str(ver) for cond, ver in self.pred]
return self.name + " (" + ", ".join(seq) + ")"
else:
return self.name
def satisfied_by(self, version):
"""True if version is compatible with all the predicates in self.
The parameter version must be acceptable to the StrictVersion
constructor. It may be either a string or StrictVersion.
"""
for cond, ver in self.pred:
if not compmap[cond](version, ver):
return False
return True
_provision_rx = None
def split_provision(value):
"""Return the name and optional version number of a provision.
The version number, if given, will be returned as a `StrictVersion`
instance, otherwise it will be `None`.
>>> split_provision('mypkg')
('mypkg', None)
>>> split_provision(' mypkg( 1.2 ) ')
('mypkg', StrictVersion ('1.2'))
"""
global _provision_rx
if _provision_rx is None:
_provision_rx = re.compile(
"([a-zA-Z_]\w*(?:\.[a-zA-Z_]\w*)*)(?:\s*\(\s*([^)\s]+)\s*\))?$")
value = value.strip()
m = _provision_rx.match(value)
if not m:
raise ValueError("illegal provides specification: %r" % value)
ver = m.group(2) or None
if ver:
ver = distutils.version.StrictVersion(ver)
return m.group(1), ver
| gpl-2.0 |
crmccreary/openerp_server | openerp/pychart/afm/Symbol.py | 15 | 1508 | # -*- coding: utf-8 -*-
# AFM font Symbol (path: /usr/share/fonts/afms/adobe/psyr.afm).
# Derived from Ghostscript distribution.
# Go to www.cs.wisc.edu/~ghost to get the Ghostcript source code.
import dir
dir.afm["Symbol"] = (500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 250, 333, 713, 500, 549, 833, 778, 439, 333, 333, 500, 549, 250, 549, 250, 278, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 278, 278, 549, 549, 549, 444, 549, 722, 667, 722, 612, 611, 763, 603, 722, 333, 631, 722, 686, 889, 722, 722, 768, 741, 556, 592, 611, 690, 439, 768, 645, 795, 611, 333, 863, 333, 658, 500, 500, 631, 549, 549, 494, 439, 521, 411, 603, 329, 603, 549, 549, 576, 521, 549, 549, 521, 549, 603, 439, 576, 713, 686, 493, 686, 494, 480, 200, 480, 549, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 500, 620, 247, 549, 167, 713, 500, 753, 753, 753, 753, 1042, 987, 603, 987, 603, 400, 549, 411, 549, 549, 713, 494, 460, 549, 549, 549, 549, 1000, 603, 1000, 658, 823, 686, 795, 987, 768, 768, 823, 768, 768, 713, 713, 713, 713, 713, 713, 713, 768, 713, 790, 790, 890, 823, 549, 250, 713, 603, 603, 1042, 987, 603, 987, 603, 494, 329, 790, 790, 786, 713, 384, 384, 384, 384, 384, 384, 494, 494, 494, 494, 500, 329, 274, 686, 686, 686, 384, 384, 384, 384, 384, 384, 494, 494, 494, )
| agpl-3.0 |
RSEmail/tina | tina/cookbook_metadata.py | 1 | 3368 | import os
import re
from tag import Tag
from version_requirement import VersionRequirement
class CookbookMetadata:
def __init__(self, local_dir):
# The cookbook name will default to the local directory name,
# since we can't guarantee community cookbooks will have the
# name in the metadata file.
self.cookbook_name = local_dir
self.filename = os.path.join(".tina", local_dir, "metadata.rb")
self.version = None
self.depends = []
self.requirements = {}
self.parse_metadata();
def parse_metadata(self):
try:
raw = open(self.filename, "r")
regex_name = re.compile("name\s+[\'\"](.*?)[\'\"]")
regex_depends = re.compile("depends\s+[\'\"](.*?)[\'\"]"
"(,\s*[\'\"]([~<>=]+)\s+([\d\.]+)[\'\"])?")
regex_version = re.compile("version\s+[\'\"](.*?)[\'\"]")
for line in raw:
# Find the name of the cookbook.
matches = regex_name.findall(line)
for word in matches:
self.cookbook_name = word
# Find the list of dependencies.
match = regex_depends.match(line)
if match:
name = match.group(1)
self.depends.append(name)
if match.group(2):
operator = match.group(3)
version = match.group(4)
self.requirements[name] = VersionRequirement(self.cookbook_name, name, operator, version)
# Find the current version of the cookbook.
matches = regex_version.match(line)
if matches:
if self.version:
raise Exception("Metadata file has multiple 'version' "
"sections: '%s'" % self.filename)
self.version = Tag(matches.group(1))
except IOError as e:
print "Unable to open file to parse it '{0}': " \
"'{1}'".format(self.filename, e.strerror)
raise
else:
raw.close()
return
def inject_versions(self, tag, versions):
metadata = open(self.filename, "r")
content = metadata.readlines()
metadata.close()
regex_depends = re.compile("depends\s+[\'\"](.*?)[\'\"]")
regex_version = re.compile("version\s+[\'\"](.*?)[\'\"]")
new_content = []
for line in content:
version_match = regex_version.match(line)
if version_match:
line = line.replace(version_match.group(1), tag.version_str())
depends_match = regex_depends.match(line)
if depends_match:
cookbook = depends_match.group(1)
if not cookbook in versions:
raise Exception("Missing version number for cookbook '%s' "
% cookbook)
else:
version = versions[cookbook].version_str()
line = re.sub("[\'\"]%s[\'\"].*" % cookbook,
"\"%s\", \"= %s\"" % (cookbook, version), line)
new_content.append(line)
metadata = open(self.filename, "w")
metadata.write("".join(new_content))
metadata.close()
| gpl-3.0 |
hasadna/django | tests/regressiontests/utils/termcolors.py | 138 | 7367 | from django.utils import unittest
from django.utils.termcolors import parse_color_setting, PALETTES, DEFAULT_PALETTE, LIGHT_PALETTE, DARK_PALETTE, NOCOLOR_PALETTE
class TermColorTests(unittest.TestCase):
def test_empty_string(self):
self.assertEqual(parse_color_setting(''), PALETTES[DEFAULT_PALETTE])
def test_simple_palette(self):
self.assertEqual(parse_color_setting('light'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting('dark'), PALETTES[DARK_PALETTE])
self.assertEqual(parse_color_setting('nocolor'), None)
def test_fg(self):
self.assertEqual(parse_color_setting('error=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
def test_fg_bg(self):
self.assertEqual(parse_color_setting('error=green/blue'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
def test_fg_opts(self):
self.assertEqual(parse_color_setting('error=green,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink','bold')}))
def test_fg_bg_opts(self):
self.assertEqual(parse_color_setting('error=green/blue,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green/blue,bold,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue', 'opts': ('blink','bold')}))
def test_override_palette(self):
self.assertEqual(parse_color_setting('light;error=green'),
dict(PALETTES[LIGHT_PALETTE],
ERROR={'fg':'green'}))
def test_override_nocolor(self):
self.assertEqual(parse_color_setting('nocolor;error=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg': 'green'}))
def test_reverse_override(self):
self.assertEqual(parse_color_setting('error=green;light'), PALETTES[LIGHT_PALETTE])
def test_multiple_roles(self):
self.assertEqual(parse_color_setting('error=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'},
SQL_FIELD={'fg':'blue'}))
def test_override_with_multiple_roles(self):
self.assertEqual(parse_color_setting('light;error=green;sql_field=blue'),
dict(PALETTES[LIGHT_PALETTE],
ERROR={'fg':'green'},
SQL_FIELD={'fg':'blue'}))
def test_empty_definition(self):
self.assertEqual(parse_color_setting(';'), None)
self.assertEqual(parse_color_setting('light;'), PALETTES[LIGHT_PALETTE])
self.assertEqual(parse_color_setting(';;;'), None)
def test_empty_options(self):
self.assertEqual(parse_color_setting('error=green,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=green,,,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=green,,blink,,'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
def test_bad_palette(self):
self.assertEqual(parse_color_setting('unknown'), None)
def test_bad_role(self):
self.assertEqual(parse_color_setting('unknown='), None)
self.assertEqual(parse_color_setting('unknown=green'), None)
self.assertEqual(parse_color_setting('unknown=green;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
def test_bad_color(self):
self.assertEqual(parse_color_setting('error='), None)
self.assertEqual(parse_color_setting('error=;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
self.assertEqual(parse_color_setting('error=unknown'), None)
self.assertEqual(parse_color_setting('error=unknown;sql_field=blue'),
dict(PALETTES[NOCOLOR_PALETTE],
SQL_FIELD={'fg':'blue'}))
self.assertEqual(parse_color_setting('error=green/unknown'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=green/blue/something'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg': 'blue'}))
self.assertEqual(parse_color_setting('error=green/blue/something,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg': 'blue', 'opts': ('blink',)}))
def test_bad_option(self):
self.assertEqual(parse_color_setting('error=green,unknown'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=green,unknown,blink'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
def test_role_case(self):
self.assertEqual(parse_color_setting('ERROR=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('eRrOr=green'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
def test_color_case(self):
self.assertEqual(parse_color_setting('error=GREEN'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=GREEN/BLUE'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
self.assertEqual(parse_color_setting('error=gReEn'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green'}))
self.assertEqual(parse_color_setting('error=gReEn/bLuE'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'bg':'blue'}))
def test_opts_case(self):
self.assertEqual(parse_color_setting('error=green,BLINK'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
self.assertEqual(parse_color_setting('error=green,bLiNk'),
dict(PALETTES[NOCOLOR_PALETTE],
ERROR={'fg':'green', 'opts': ('blink',)}))
| bsd-3-clause |
baoson2211/ardupilot | mk/PX4/Tools/genmsg/src/genmsg/srvs.py | 216 | 3017 | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
ROS Service Description Language Spec
Implements http://ros.org/wiki/srv
"""
import os
import sys
from . names import is_legal_resource_name, is_legal_resource_base_name, package_resource_name, resource_name
class SrvSpec(object):
def __init__(self, request, response, text, full_name = '', short_name = '', package = ''):
alt_package, alt_short_name = package_resource_name(full_name)
if not package:
package = alt_package
if not short_name:
short_name = alt_short_name
self.request = request
self.response = response
self.text = text
self.full_name = full_name
self.short_name = short_name
self.package = package
def __eq__(self, other):
if not other or not isinstance(other, SrvSpec):
return False
return self.request == other.request and \
self.response == other.response and \
self.text == other.text and \
self.full_name == other.full_name and \
self.short_name == other.short_name and \
self.package == other.package
def __ne__(self, other):
if not other or not isinstance(other, SrvSpec):
return True
return not self.__eq__(other)
def __repr__(self):
return "SrvSpec[%s, %s]"%(repr(self.request), repr(self.response))
| gpl-3.0 |
nagyistoce/odoo-dev-odoo | openerp/tools/__init__.py | 337 | 1447 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import copy
import win32
import appdirs
from config import config
from misc import *
from convert import *
from translate import *
from graph import graph
from image import *
from amount_to_text import *
from amount_to_text_en import *
from pdf_utils import *
from yaml_import import *
from sql import *
from float_utils import *
from mail import *
from func import *
from debugger import *
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
0Chencc/CTFCrackTools | Lib/json/tests/test_recursion.py | 6 | 3945 | from json.tests import PyTest, CTest
import unittest
from test import test_support
class JSONTestObject:
pass
class TestRecursion(object):
def test_listrecursion(self):
x = []
x.append(x)
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on list recursion")
x = []
y = [x]
x.append(y)
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on alternating list recursion")
y = []
x = [y, y]
# ensure that the marker is cleared
self.dumps(x)
def test_dictrecursion(self):
x = {}
x["test"] = x
try:
self.dumps(x)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on dict recursion")
x = {}
y = {"a": x, "b": x}
# ensure that the marker is cleared
self.dumps(x)
def test_defaultrecursion(self):
class RecursiveJSONEncoder(self.json.JSONEncoder):
recurse = False
def default(self, o):
if o is JSONTestObject:
if self.recurse:
return [JSONTestObject]
else:
return 'JSONTestObject'
return pyjson.JSONEncoder.default(o)
enc = RecursiveJSONEncoder()
self.assertEqual(enc.encode(JSONTestObject), '"JSONTestObject"')
enc.recurse = True
try:
enc.encode(JSONTestObject)
except ValueError:
pass
else:
self.fail("didn't raise ValueError on default recursion")
@unittest.skipIf(test_support.is_jython, "See http://bugs.jython.org/issue2536.")
def test_highly_nested_objects_decoding(self):
# test that loading highly-nested objects doesn't segfault when C
# accelerations are used. See #12017
# str
with self.assertRaises(RuntimeError):
self.loads('{"a":' * 100000 + '1' + '}' * 100000)
with self.assertRaises(RuntimeError):
self.loads('{"a":' * 100000 + '[1]' + '}' * 100000)
with self.assertRaises(RuntimeError):
self.loads('[' * 100000 + '1' + ']' * 100000)
# unicode
with self.assertRaises(RuntimeError):
self.loads(u'{"a":' * 100000 + u'1' + u'}' * 100000)
with self.assertRaises(RuntimeError):
self.loads(u'{"a":' * 100000 + u'[1]' + u'}' * 100000)
with self.assertRaises(RuntimeError):
self.loads(u'[' * 100000 + u'1' + u']' * 100000)
@unittest.skipIf(test_support.is_jython, "See http://bugs.jython.org/issue2536.")
def test_highly_nested_objects_encoding(self):
# See #12051
l, d = [], {}
for x in xrange(100000):
l, d = [l], {'k':d}
with self.assertRaises(RuntimeError):
self.dumps(l)
with self.assertRaises(RuntimeError):
self.dumps(d)
@unittest.skipIf(test_support.is_jython, "See http://bugs.jython.org/issue2536.")
def test_endless_recursion(self):
# See #12051
class EndlessJSONEncoder(self.json.JSONEncoder):
def default(self, o):
"""If check_circular is False, this will keep adding another list."""
return [o]
# NB: Jython interacts with overflows differently than CPython;
# given that the default function normally raises a ValueError upon
# an overflow, this seems reasonable.
with self.assertRaises(Exception) as cm:
EndlessJSONEncoder(check_circular=False).encode(5j)
self.assertIn(type(cm.exception), [RuntimeError, ValueError])
class TestPyRecursion(TestRecursion, PyTest): pass
class TestCRecursion(TestRecursion, CTest): pass
| gpl-3.0 |
Mobytes/django-tenant-schemas | dts_test_project/dts_test_project/settings.py | 6 | 3257 | """
Django settings for dts_test_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cl1)b#c&xmm36z3e(quna-vb@ab#&gpjtdjtpyzh!qn%bc^xxn'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
SHARED_APPS = (
'tenant_schemas', # mandatory
'customers', # you must list the app where your tenant model resides in
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
)
TENANT_APPS = (
'dts_test_app',
)
TENANT_MODEL = "customers.Client" # app.Model
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
import django
if django.VERSION >= (1, 7, 0):
INSTALLED_APPS = list(set(TENANT_APPS + SHARED_APPS))
else:
INSTALLED_APPS = TENANT_APPS + SHARED_APPS + ('tenant_schemas',)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'dts_test_project.urls'
WSGI_APPLICATION = 'dts_test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'tenant_schemas.postgresql_backend',
'NAME': 'dts_test_project',
'USER': 'postgres',
'PASSWORD': 'root',
'HOST': 'localhost',
'PORT': '',
}
}
DATABASE_ROUTERS = (
'tenant_schemas.routers.TenantSyncRouter',
)
MIDDLEWARE_CLASSES = (
'tenant_tutorial.middleware.TenantTutorialMiddleware',
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS = (
'django.core.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.core.context_processors.debug',
'django.core.context_processors.media',
'django.core.context_processors.static',
'django.contrib.messages.context_processors.messages',
)
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
| mit |
CYBAI/servo | tests/wpt/web-platform-tests/css/tools/apiclient/apiclient/apiclient.py | 79 | 10723 | # coding=utf-8
#
# Copyright © 2013 Hewlett-Packard Development Company, L.P.
#
# This work is distributed under the W3C® Software License [1]
# in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# [1] http://www.w3.org/Consortium/Legal/2002/copyright-software-20021231
#
# Process URI templates per http://tools.ietf.org/html/rfc6570
import urllib2
import urlparse
import json
import base64
import contextlib
import collections
import UserString
import uritemplate
class MimeType(UserString.MutableString):
def __init__(self, mimeType):
UserString.MutableString.__init__(self, mimeType)
self._type = None
self._subtype = None
self._structure = None
slashIndex = mimeType.find('/')
if (-1 < slashIndex):
self._type = mimeType[:slashIndex]
mimeType = mimeType[slashIndex + 1:]
plusIndex = mimeType.find('+')
if (-1 < plusIndex):
self._subtype = mimeType[:plusIndex]
self._structure = mimeType[plusIndex + 1:]
else:
self._structure = mimeType
else:
self._type = mimeType
def _update(self):
if (self._structure):
if (self._subtype):
self.data = self._type + '/' + self._subtype + '+' + self._structure
else:
self.data = self._type + '/' + self._structure
else:
self.data = self._type
def set(self, type, structure, subtype = None):
self._type = type
self._subtype = subtype
self._structure = structure
self._update()
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
self._update()
@property
def subtype(self):
return self._subtype
@subtype.setter
def subtype(self, value):
self._subtype = value
self._update()
@property
def structure(self):
return self._structure
@structure.setter
def structure(self, value):
self._structure = value
self._update()
class APIResponse(object):
def __init__(self, response):
self.status = response.getcode() if (response) else 0
self.headers = response.info() if (response) else {}
self.data = response.read() if (200 == self.status) else None
if (self.data and
(('json' == self.contentType.structure) or ('json-home' == self.contentType.structure))):
try:
self.data = json.loads(self.data, object_pairs_hook = collections.OrderedDict)
except:
pass
@property
def contentType(self):
contentType = self.headers.get('content-type') if (self.headers) else None
return MimeType(contentType.split(';')[0]) if (contentType and (';' in contentType)) else MimeType(contentType)
@property
def encoding(self):
contentType = self.headers.get('content-type') if (self.headers) else None
if (contentType and (';' in contentType)):
encoding = contentType.split(';', 1)[1]
if ('=' in encoding):
return encoding.split('=', 1)[1].strip()
return 'utf-8'
class APIHints(object):
def __init__(self, data):
self.httpMethods = [method.upper() for method in data['allow'] if method] if ('allow' in data) else ['GET']
self.formats = {}
formats = [MimeType(format) for format in data['formats']] if ('formats' in data) else []
if (formats):
if ('GET' in self.httpMethods):
self.formats['GET'] = formats
if ('PUT' in self.httpMethods):
self.formats['PUT'] = formats
if (('PATCH' in self.httpMethods) and ('accept-patch' in data)):
self.formats['PATCH'] = [MimeType(format) for format in data['accept-patch']]
if (('POST' in self.httpMethods) and ('accept-post' in data)):
self.formats['POST'] = [MimeType(format) for format in data['accept-post']]
# TODO: ranges from 'accept-ranges'; preferece tokens from 'accept-prefer';
# preconditions from 'precondition-req'; auth from 'auth-req'
self.ranges = None
self.preferences = None
self.preconditions = None
self.auth = None
self.docs = data.get('docs')
self.status = data.get('status')
class APIResource(object):
def __init__(self, baseURI, uri, variables = None, hints = None):
try:
self.template = uritemplate.URITemplate(urlparse.urljoin(baseURI, uri))
if (variables):
self.variables = {variable: urlparse.urljoin(baseURI, variables[variable]) for variable in variables}
else:
self.variables = {variable: '' for variable in self.template.variables}
self.hints = hints
except Exception as e:
self.template = uritemplate.URITemplate('')
self.variables = {}
self.hints = None
class APIClient(object):
def __init__(self, baseURI, version = None, username = None, password = None):
self._baseURI = baseURI
self.defaultVersion = version
self.defaultAccept = 'application/json'
self.username = username
self.password = password
self._resources = {}
self._versions = {}
self._accepts = {}
self._loadHome()
@property
def baseURI(self):
return self._baseURI
def _loadHome(self):
home = self._callURI('GET', self.baseURI, 'application/home+json, application/json-home, application/json')
if (home):
if ('application/json' == home.contentType):
for name in home.data:
apiKey = urlparse.urljoin(self.baseURI, name)
self._resources[apiKey] = APIResource(self.baseURI, home.data[name])
elif (('application/home+json' == home.contentType) or
('application/json-home' == home.contentType)):
resources = home.data.get('resources')
if (resources):
for name in resources:
apiKey = urlparse.urljoin(self.baseURI, name)
data = resources[name]
uri = data['href'] if ('href' in data) else data.get('href-template')
variables = data.get('href-vars')
hints = APIHints(data['hints']) if ('hints' in data) else None
self._resources[apiKey] = APIResource(self.baseURI, uri, variables, hints)
def relativeURI(self, uri):
if (uri.startswith(self.baseURI)):
relative = uri[len(self.baseURI):]
if (relative.startswith('/') and not self.baseURI.endswith('/')):
relative = relative[1:]
return relative
return uri
@property
def resourceNames(self):
return [self.relativeURI(apiKey) for apiKey in self._resources]
def resource(self, name):
return self._resources.get(urlparse.urljoin(self.baseURI, name))
def addResource(self, name, uri):
resource = APIResource(self.baseURI, uri)
apiKey = urlparse.urljoin(self.baseURI, name)
self._resources[apiKey] = resource
def _accept(self, resource):
version = None
if (api and (api in self._versions)):
version = self._versions[api]
if (not version):
version = self.defaultVersion
return ('application/' + version + '+json, application/json') if (version) else 'application/json'
def _callURI(self, method, uri, accept, payload = None, payloadType = None):
try:
request = urllib2.Request(uri, data = payload, headers = { 'Accept' : accept })
if (self.username and self.password):
request.add_header('Authorization', b'Basic ' + base64.b64encode(self.username + b':' + self.password))
if (payload and payloadType):
request.add_header('Content-Type', payloadType)
request.get_method = lambda: method
with contextlib.closing(urllib2.urlopen(request)) as response:
return APIResponse(response)
except Exception as e:
pass
return None
def _call(self, method, name, arguments, payload = None, payloadType = None):
apiKey = urlparse.urljoin(self.baseURI, name)
resource = self._resources.get(apiKey)
if (resource):
uri = resource.template.expand(**arguments)
if (uri):
version = self._versions.get(apiKey) if (apiKey in self._versions) else self.defaultVersion
accept = MimeType(self._accepts(apiKey) if (apiKey in self._accepts) else self.defaultAccept)
if (version):
accept.subtype = version
return self._callURI(method, uri, accept, payload, payloadType)
return None
def setVersion(self, name, version):
apiKey = urlparse.urljoin(self.baseURI, name)
self._versions[apiKey] = version
def setAccept(self, name, mimeType):
apiKey = urlparse.urljoin(self.baseURI, name)
self._accepts[apiKey] = mimeType
def get(self, name, **kwargs):
return self._call('GET', name, kwargs)
def post(self, name, payload = None, payloadType = None, **kwargs):
return self._call('POST', name, kwargs, payload, payloadType)
def postForm(self, name, payload = None, **kwargs):
return self._call('POST', name, kwargs, urllib.urlencode(payload), 'application/x-www-form-urlencoded')
def postJSON(self, name, payload = None, **kwargs):
return self._call('POST', name, kwargs, json.dumps(payload), 'application/json')
def put(self, name, payload = None, payloadType = None, **kwargs):
return self._call('PUT', name, kwargs, payload, payloadType)
def patch(self, name, patch = None, **kwargs):
return self._call('PATCH', name, kwargs, json.dumps(patch), 'application/json-patch')
def delete(self, name, **kwargs):
return self._call('DELETE', name, kwargs)
| mpl-2.0 |
akhan7/servo | tests/wpt/web-platform-tests/conformance-checkers/tools/url.py | 125 | 23557 | # -*- coding: utf-8 -*-
import os
ccdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# based on https://github.com/w3c/web-platform-tests/blob/275544eab54a0d0c7f74ccc2baae9711293d8908/url/urltestdata.txt
invalid = {
"scheme-trailing-tab": "a:\tfoo.com",
"scheme-trailing-newline": "a:\nfoo.com",
"scheme-trailing-cr": "a:\rfoo.com",
"scheme-trailing-space": "a: foo.com",
"scheme-trailing-tab": "a:\tfoo.com",
"scheme-trailing-newline": "a:\nfoo.com",
"scheme-trailing-cr": "a:\rfoo.com",
"scheme-http-no-slash": "http:foo.com",
"scheme-http-no-slash-colon": "http::@c:29",
"scheme-http-no-slash-square-bracket": "http:[61:27]/:foo",
"scheme-http-backslash": "http:\\\\foo.com\\",
"scheme-http-single-slash": "http:/example.com/",
"scheme-ftp-single-slash": "ftp:/example.com/",
"scheme-https-single-slash": "https:/example.com/",
"scheme-data-single-slash": "data:/example.com/",
"scheme-ftp-no-slash": "ftp:example.com/",
"scheme-https-no-slash": "https:example.com/",
"scheme-javascript-no-slash-malformed": "javascript:example.com/",
"userinfo-password-bad-chars": "http://&a:foo(b]c@d:2/",
"userinfo-username-contains-at-sign": "http://::@c@d:2",
"userinfo-backslash": "http://a\\b:c\\d@foo.com",
"host-space": "http://example .org",
"host-tab": "http://example\t.org",
"host-newline": "http://example.\norg",
"host-cr": "http://example.\rorg",
"host-square-brackets-port-contains-colon": "http://[1::2]:3:4",
"port-single-letter": "http://f:b/c",
"port-multiple-letters": "http://f:fifty-two/c",
"port-leading-colon": "http://2001::1",
"port-leading-colon-bracket-colon": "http://2001::1]:80",
"path-leading-backslash-at-sign": "http://foo.com/\\@",
"path-leading-colon-backslash": ":\\",
"path-leading-colon-chars-backslash": ":foo.com\\",
"path-relative-square-brackets": "[61:24:74]:98",
"fragment-contains-hash": "http://foo/path#f#g",
"path-percent-encoded-malformed": "http://example.com/foo/%2e%2",
"path-bare-percent-sign": "http://example.com/foo%",
"path-u0091": u"http://example.com/foo\u0091".encode('utf-8'),
"userinfo-username-contains-pile-of-poo": "http://💩:foo@example.com",
"userinfo-password-contains-pile-of-poo": "http://foo:💩@example.com",
"host-hostname-in-brackets": "http://[www.google.com]/",
"host-empty": "http://",
"host-empty-with-userinfo": "http://user:pass@/",
"port-leading-dash": "http://foo:-80/",
"host-empty-userinfo-empty": "http://@/www.example.com",
"host-invalid-unicode": u"http://\ufdd0zyx.com".encode('utf-8'),
"host-invalid-unicode-percent-encoded": "http://%ef%b7%90zyx.com",
"host-double-percent-encoded": u"http://\uff05\uff14\uff11.com".encode('utf-8'),
"host-double-percent-encoded-percent-encoded": "http://%ef%bc%85%ef%bc%94%ef%bc%91.com",
"host-u0000-percent-encoded": u"http://\uff05\uff10\uff10.com".encode('utf-8'),
"host-u0000-percent-encoded-percent-encoded": "http://%ef%bc%85%ef%bc%90%ef%bc%90.com",
}
invalid_absolute = invalid.copy()
invalid_url_code_points = {
"fragment-backslash": "#\\",
"fragment-leading-space": "http://f:21/b# e",
"path-contains-space": "/a/ /c",
"path-leading-space": "http://f:21/ b",
"path-tab": "http://example.com/foo\tbar",
"path-trailing-space": "http://f:21/b ?",
"port-cr": "http://f:\r/c",
"port-newline": "http://f:\n/c",
"port-space": "http://f: /c",
"port-tab": "http://f:\t/c",
"query-leading-space": "http://f:21/b? d",
"query-trailing-space": "http://f:21/b?d #",
}
invalid.update(invalid_url_code_points)
invalid_absolute.update(invalid_url_code_points)
valid_absolute = {
"scheme-private": "a:foo.com",
"scheme-private-slash": "foo:/",
"scheme-private-slash-slash": "foo://",
"scheme-private-path": "foo:/bar.com/",
"scheme-private-path-leading-slashes-only": "foo://///////",
"scheme-private-path-leading-slashes-chars": "foo://///////bar.com/",
"scheme-private-path-leading-slashes-colon-slashes": "foo:////://///",
"scheme-private-single-letter": "c:/foo",
"scheme-private-single-slash": "madeupscheme:/example.com/",
"scheme-file-single-slash": "file:/example.com/",
"scheme-ftps-single-slash": "ftps:/example.com/",
"scheme-gopher-single-slash": "gopher:/example.com/",
"scheme-ws-single-slash": "ws:/example.com/",
"scheme-wss-single-slash": "wss:/example.com/",
"scheme-javascript-single-slash": "javascript:/example.com/",
"scheme-mailto-single-slash": "mailto:/example.com/",
"scheme-private-no-slash": "madeupscheme:example.com/",
"scheme-ftps-no-slash": "ftps:example.com/",
"scheme-gopher-no-slash": "gopher:example.com/",
"scheme-wss-no-slash": "wss:example.com/",
"scheme-mailto-no-slash": "mailto:example.com/",
"scheme-data-no-slash": "data:text/plain,foo",
"userinfo": "http://user:pass@foo:21/bar;par?b#c",
"host-ipv6": "http://[2001::1]",
"host-ipv6-port": "http://[2001::1]:80",
"port-none-but-colon": "http://f:/c",
"port-0": "http://f:0/c",
"port-00000000000000": "http://f:00000000000000/c",
"port-00000000000000000000080": "http://f:00000000000000000000080/c",
"port-00000000000000000000080": "http://f:00000000000000000000080/c",
"userinfo-host-port-path": "http://a:b@c:29/d",
"userinfo-username-non-alpha": "http://foo.com:b@d/",
"query-contains-question-mark": "http://foo/abcd?efgh?ijkl",
"fragment-contains-question-mark": "http://foo/abcd#foo?bar",
"path-percent-encoded-dot": "http://example.com/foo/%2e",
"path-percent-encoded-space": "http://example.com/%20foo",
"path-non-ascii": u"http://example.com/\u00C2\u00A9zbar".encode('utf-8'),
"path-percent-encoded-multiple": "http://example.com/foo%41%7a",
"path-percent-encoded-u0091": "http://example.com/foo%91",
"path-percent-encoded-u0000": "http://example.com/foo%00",
"path-percent-encoded-mixed-case": "http://example.com/%3A%3a%3C%3c",
"path-unicode-han": u"http://example.com/\u4F60\u597D\u4F60\u597D".encode('utf-8'),
"path-uFEFF": u"http://example.com/\uFEFF/foo".encode('utf-8'),
"path-u202E-u202D": u"http://example.com/\u202E/foo/\u202D/bar".encode('utf-8'),
"host-is-pile-of-poo": "http://💩",
"path-contains-pile-of-poo": "http://example.com/foo/💩",
"query-contains-pile-of-poo": "http://example.com/foo?💩",
"fragment-contains-pile-of-poo": "http://example.com/foo#💩",
"host-192.0x00A80001": "http://192.0x00A80001",
"userinfo-username-contains-percent-encoded": "http://%25DOMAIN:foobar@foodomain.com",
"userinfo-empty": "http://@www.example.com",
"userinfo-user-empty": "http://:b@www.example.com",
"userinfo-password-empty": "http://a:@www.example.com",
"host-exotic-whitespace": u"http://GOO\u200b\u2060\ufeffgoo.com".encode('utf-8'),
"host-exotic-dot": u"http://www.foo\u3002bar.com".encode('utf-8'),
"host-fullwidth": u"http://\uff27\uff4f.com".encode('utf-8'),
"host-idn-unicode-han": u"http://\u4f60\u597d\u4f60\u597d".encode('utf-8'),
"host-IP-address-broken": "http://192.168.0.257/",
}
valid = valid_absolute.copy()
valid_relative = {
"scheme-schemeless-relative": "//foo/bar",
"path-slash-only-relative": "/",
"path-simple-relative": "/a/b/c",
"path-percent-encoded-slash-relative": "/a%2fc",
"path-percent-encoded-slash-plus-slashes-relative": "/a/%2f/c",
"query-empty-no-path-relative": "?",
"fragment-empty-hash-only-no-path-relative": "#",
"fragment-slash-relative": "#/",
"fragment-semicolon-question-mark-relative": "#;?",
"fragment-non-ascii-relative": u"#\u03B2".encode('utf-8'),
}
valid.update(valid_relative)
invalid_absolute.update(valid_relative)
valid_relative_colon_dot = {
"scheme-none-relative": "foo.com",
"path-colon-relative": ":",
"path-leading-colon-letter-relative": ":a",
"path-leading-colon-chars-relative": ":foo.com",
"path-leading-colon-slash-relative": ":/",
"path-leading-colon-hash-relative": ":#",
"path-leading-colon-number-relative": ":23",
"path-slash-colon-number-relative": "/:23",
"path-leading-colon-colon-relative": "::",
"path-colon-colon-number-relative": "::23",
"path-starts-with-pile-of-poo": "💩http://foo",
"path-contains-pile-of-poo": "http💩//:foo",
}
valid.update(valid_relative_colon_dot)
invalid_file = {
"scheme-file-backslash": "file:c:\\foo\\bar.html",
"scheme-file-single-slash-c-bar": "file:/C|/foo/bar",
"scheme-file-triple-slash-c-bar": "file:///C|/foo/bar",
}
invalid.update(invalid_file)
valid_file = {
"scheme-file-uppercase": "File://foo/bar.html",
"scheme-file-slash-slash-c-bar": "file://C|/foo/bar",
"scheme-file-slash-slash-abc-bar": "file://abc|/foo/bar",
"scheme-file-host-included": "file://server/foo/bar",
"scheme-file-host-empty": "file:///foo/bar.txt",
"scheme-file-scheme-only": "file:",
"scheme-file-slash-only": "file:/",
"scheme-file-slash-slash-only": "file://",
"scheme-file-slash-slash-slash-only": "file:///",
"scheme-file-no-slash": "file:test",
}
valid.update(valid_file)
valid_absolute.update(valid_file)
warnings = {
"scheme-data-contains-fragment": "data:text/html,test#test",
}
element_attribute_pairs = [
"a href",
# "a ping", space-separated list of URLs; tested elsewhere
"area href",
# "area ping", space-separated list of URLs; tested elsewhere
"audio src",
"base href",
"blockquote cite",
"button formaction",
"del cite",
"embed src",
"form action",
"html manifest",
"iframe src",
"img src", # srcset is tested elsewhere
"input formaction", # type=submit, type=image
"input src", # type=image
"input value", # type=url
"ins cite",
"link href",
#"menuitem icon", # skip until parser is updated
"object data",
"q cite",
"script src",
"source src",
"track src",
"video poster",
"video src",
]
template = "<!DOCTYPE html>\n<meta charset=utf-8>\n"
def write_novalid_files():
for el, attr in (pair.split() for pair in element_attribute_pairs):
for desc, url in invalid.items():
if ("area" == el):
f = open(os.path.join(ccdir, "html/elements/area/href/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid href: %s</title>\n' % desc)
f.write('<map name=foo><%s %s="%s" alt></map>\n' % (el, attr, url))
f.close()
elif ("base" == el or "embed" == el):
f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-novalid.html" % (el, attr, desc)), 'wb')
f.write(template + '<title>invalid %s: %s</title>\n' % (attr, desc))
f.write('<%s %s="%s">\n' % (el, attr, url))
f.close()
elif ("html" == el):
f = open(os.path.join(ccdir, "html/elements/html/manifest/%s-novalid.html" % desc), 'wb')
f.write('<!DOCTYPE html>\n')
f.write('<html manifest="%s">\n' % url)
f.write('<meta charset=utf-8>\n')
f.write('<title>invalid manifest: %s</title>\n' % desc)
f.write('</html>\n')
f.close()
elif ("img" == el):
f = open(os.path.join(ccdir, "html/elements/img/src/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid src: %s</title>\n' % desc)
f.write('<img src="%s" alt>\n' % url)
f.close()
elif ("input" == el and "src" == attr):
f = open(os.path.join(ccdir, "html/elements/input/type-image-src/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid src: %s</title>\n' % desc)
f.write('<%s type=image alt="foo" %s="%s">\n' % (el, attr, url))
f.close()
elif ("input" == el and "formaction" == attr):
f = open(os.path.join(ccdir, "html/elements/input/type-submit-formaction/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid formaction: %s</title>\n' % desc)
f.write('<%s type=submit %s="%s">\n' % (el, attr, url))
f.close()
f = open(os.path.join(ccdir, "html/elements/input/type-image-formaction/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid formaction: %s</title>\n' % desc)
f.write('<%s type=image alt="foo" %s="%s">\n' % (el, attr, url))
f.close()
elif ("input" == el and "value" == attr):
f = open(os.path.join(ccdir, "html/elements/input/type-url-value/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid value attribute: %s</title>\n' % desc)
f.write('<%s type=url %s="%s">\n' % (el, attr, url))
f.close()
elif ("link" == el):
f = open(os.path.join(ccdir, "html/elements/link/href/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid href: %s</title>\n' % desc)
f.write('<link href="%s" rel=help>\n' % url)
f.close()
elif ("source" == el or "track" == el):
f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-novalid.html" % (el, attr, desc)), 'wb')
f.write(template + '<title>invalid %s: %s</title>\n' % (attr, desc))
f.write('<video><%s %s="%s"></video>\n' % (el, attr, url))
f.close()
else:
f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-novalid.html" % (el, attr, desc)), 'wb')
f.write(template + '<title>invalid %s: %s</title>\n' % (attr, desc))
f.write('<%s %s="%s"></%s>\n' % (el, attr, url, el))
f.close()
for desc, url in invalid.items():
f = open(os.path.join(ccdir, "html/microdata/itemid/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid itemid: %s</title>\n' % desc)
f.write('<div itemid="%s" itemtype="http://foo" itemscope></div>\n' % url)
f.close()
for desc, url in invalid_absolute.items():
f = open(os.path.join(ccdir, "html/microdata/itemtype/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid itemtype: %s</title>\n' % desc)
f.write('<div itemtype="%s" itemscope></div>\n' % url)
f.close()
f = open(os.path.join(ccdir, "html/elements/input/type-url-value/%s-novalid.html" % desc), 'wb')
f.write(template + '<title>invalid value attribute: %s</title>\n' %desc)
f.write('<input type=url value="%s">\n' % url)
f.close()
def write_haswarn_files():
for el, attr in (pair.split() for pair in element_attribute_pairs):
for desc, url in warnings.items():
if ("area" == el):
f = open(os.path.join(ccdir, "html/elements/area/href/%s-haswarn.html" % desc), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<map name=foo><%s %s="%s" alt></map>\n' % (el, attr, url))
f.close()
elif ("base" == el or "embed" == el):
f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-haswarn.html" % (el, attr, desc)), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<%s %s="%s">\n' % (el, attr, url))
f.close()
elif ("html" == el):
f = open(os.path.join(ccdir, "html/elements/html/manifest/%s-haswarn.html" % desc), 'wb')
f.write('<!DOCTYPE html>\n')
f.write('<html manifest="%s">\n' % url)
f.write('<meta charset=utf-8>\n')
f.write('<title>%s warning: %s</title>\n' % (attr, desc))
f.write('</html>\n')
f.close()
elif ("img" == el):
f = open(os.path.join(ccdir, "html/elements/img/src/%s-haswarn.html" % desc), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<%s %s="%s" alt>\n' % (el, attr, url))
f.close()
elif ("input" == el and "src" == attr):
f = open(os.path.join(ccdir, "html/elements/input/type-image-src/%s-haswarn.html" % desc), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<%s type=image alt="foo" %s="%s">\n' % (el, attr, url))
f.close()
elif ("input" == el and "formaction" == attr):
f = open(os.path.join(ccdir, "html/elements/input/type-submit-formaction/%s-haswarn.html" % desc), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<%s type=submit %s="%s">\n' % (el, attr, url))
f.close()
f = open(os.path.join(ccdir, "html/elements/input/type-image-formaction/%s-haswarn.html" % desc), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<%s type=image alt="foo" %s="%s">\n' % (el, attr, url))
f.close()
elif ("input" == el and "value" == attr):
f = open(os.path.join(ccdir, "html/elements/input/type-url-value/%s-haswarn.html" % desc), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<%s type=url %s="%s">\n' % (el, attr, url))
f.close()
elif ("link" == el):
f = open(os.path.join(ccdir, "html/elements/link/href/%s-haswarn.html" % desc), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<%s %s="%s" rel=help>\n' % (el, attr, url))
f.close()
elif ("source" == el or "track" == el):
f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-haswarn.html" % (el, attr, desc)), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (attr, desc))
f.write('<video><%s %s="%s"></video>\n' % (el, attr, url))
f.close()
else:
f = open(os.path.join(ccdir, "html/elements/%s/%s/%s-haswarn.html" % (el, attr, desc)), 'wb')
f.write(template + '<title>%s warning: %s</title>\n' % (url, desc))
f.write('<%s %s="%s"></%s>\n' % (el, attr, url, el))
f.close()
for desc, url in warnings.items():
f = open(os.path.join(ccdir, "html/microdata/itemtype-%s-haswarn.html" % desc ), 'wb')
f.write(template + '<title>warning: %s</title>\n' % desc)
f.write('<div itemtype="%s" itemscope></div>\n' % url)
f.close()
f = open(os.path.join(ccdir, "html/microdata/itemid-%s-haswarn.html" % desc), 'wb')
f.write(template + '<title>warning: %s</title>\n' % desc)
f.write('<div itemid="%s" itemtype="http://foo" itemscope></div>\n' % url)
f.close()
def write_isvalid_files():
for el, attr in (pair.split() for pair in element_attribute_pairs):
if ("base" == el):
continue
if ("html" == el):
continue
elif ("input" == el and "value" == attr):
continue
elif ("input" == el and "formaction" == attr):
fs = open(os.path.join(ccdir, "html/elements/input/type-submit-formaction-isvalid.html"), 'wb')
fs.write(template + '<title>valid formaction</title>\n')
fi = open(os.path.join(ccdir, "html/elements/input/type-image-formaction-isvalid.html"), 'wb')
fi.write(template + '<title>valid formaction</title>\n')
elif ("input" == el and "src" == attr):
f = open(os.path.join(ccdir, "html/elements/input/type-image-src-isvalid.html"), 'wb')
f.write(template + '<title>valid src</title>\n')
else:
f = open(os.path.join(ccdir, "html/elements/%s/%s-isvalid.html" % (el, attr)), 'wb')
f.write(template + '<title>valid %s</title>\n' % attr)
for desc, url in valid.items():
if ("area" == el):
f.write('<map name=foo><%s %s="%s" alt></map><!-- %s -->\n' % (el, attr, url, desc))
elif ("embed" == el):
f.write('<%s %s="%s"><!-- %s -->\n' % (el, attr, url, desc))
elif ("img" == el):
f.write('<%s %s="%s" alt><!-- %s -->\n' % (el, attr, url, desc))
elif ("input" == el and "src" == attr):
f.write('<%s type=image alt="foo" %s="%s"><!-- %s -->\n' % (el, attr, url, desc))
elif ("input" == el and "formaction" == attr):
fs.write('<%s type=submit %s="%s"><!-- %s -->\n' % (el, attr, url, desc))
fi.write('<%s type=image alt="foo" %s="%s"><!-- %s -->\n' % (el, attr, url, desc))
elif ("link" == el):
f.write('<%s %s="%s" rel=help><!-- %s -->\n' % (el, attr, url, desc))
elif ("source" == el or "track" == el):
f.write('<video><%s %s="%s"></video><!-- %s -->\n' % (el, attr, url, desc))
else:
f.write('<%s %s="%s"></%s><!-- %s -->\n' % (el, attr, url, el, desc))
if ("input" == el and "formaction" == attr):
fs.close()
fi.close()
else:
if ("a" == el and "href" == attr):
f.write('<a href=""></a><!-- empty-href -->\n')
f.close()
for desc, url in valid.items():
f = open(os.path.join(ccdir, "html/elements/base/href/%s-isvalid.html" % desc), 'wb')
f.write(template + '<title>valid href: %s</title>\n' % desc)
f.write('<base href="%s">\n' % url)
f.close()
f = open(os.path.join(ccdir, "html/elements/html/manifest/%s-isvalid.html" % desc), 'wb')
f.write('<!DOCTYPE html>\n')
f.write('<html manifest="%s">\n' % url)
f.write('<meta charset=utf-8>\n')
f.write('<title>valid manifest: %s</title>\n' % desc)
f.write('</html>\n')
f.close()
f = open(os.path.join(ccdir, "html/elements/meta/refresh-isvalid.html"), 'wb')
f.write(template + '<title>valid meta refresh</title>\n')
for desc, url in valid.items():
f.write('<meta http-equiv=refresh content="0; URL=%s"><!-- %s -->\n' % (url, desc))
f.close()
f = open(os.path.join(ccdir, "html/microdata/itemid-isvalid.html"), 'wb')
f.write(template + '<title>valid itemid</title>\n')
for desc, url in valid.items():
f.write('<div itemid="%s" itemtype="http://foo" itemscope></div><!-- %s -->\n' % (url, desc))
f.close()
f = open(os.path.join(ccdir, "html/microdata/itemtype-isvalid.html"), 'wb')
f.write(template + '<title>valid itemtype</title>\n')
for desc, url in valid_absolute.items():
f.write('<div itemtype="%s" itemscope></div><!-- %s -->\n' % (url, desc))
f.close()
f = open(os.path.join(ccdir, "html/elements/input/type-url-value-isvalid.html"), 'wb')
f.write(template + '<title>valid value attribute</title>\n')
for desc, url in valid_absolute.items():
f.write('<input type=url value="%s"><!-- %s -->\n' % (url, desc))
f.close()
write_novalid_files()
write_haswarn_files()
write_isvalid_files()
# vim: ts=4:sw=4
| mpl-2.0 |
pv/scikit-learn | sklearn/cluster/tests/test_bicluster.py | 226 | 9457 | """Testing for Spectral Biclustering methods"""
import numpy as np
from scipy.sparse import csr_matrix, issparse
from sklearn.grid_search import ParameterGrid
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import SkipTest
from sklearn.base import BaseEstimator, BiclusterMixin
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.cluster.bicluster import _scale_normalize
from sklearn.cluster.bicluster import _bistochastic_normalize
from sklearn.cluster.bicluster import _log_normalize
from sklearn.metrics import consensus_score
from sklearn.datasets import make_biclusters, make_checkerboard
class MockBiclustering(BaseEstimator, BiclusterMixin):
# Mock object for testing get_submatrix.
def __init__(self):
pass
def get_indices(self, i):
# Overridden to reproduce old get_submatrix test.
return (np.where([True, True, False, False, True])[0],
np.where([False, False, True, True])[0])
def test_get_submatrix():
data = np.arange(20).reshape(5, 4)
model = MockBiclustering()
for X in (data, csr_matrix(data), data.tolist()):
submatrix = model.get_submatrix(0, X)
if issparse(submatrix):
submatrix = submatrix.toarray()
assert_array_equal(submatrix, [[2, 3],
[6, 7],
[18, 19]])
submatrix[:] = -1
if issparse(X):
X = X.toarray()
assert_true(np.all(X != -1))
def _test_shape_indices(model):
# Test get_shape and get_indices on fitted model.
for i in range(model.n_clusters):
m, n = model.get_shape(i)
i_ind, j_ind = model.get_indices(i)
assert_equal(len(i_ind), m)
assert_equal(len(j_ind), n)
def test_spectral_coclustering():
# Test Dhillon's Spectral CoClustering on a simple problem.
param_grid = {'svd_method': ['randomized', 'arpack'],
'n_svd_vecs': [None, 20],
'mini_batch': [False, True],
'init': ['k-means++'],
'n_init': [10],
'n_jobs': [1]}
random_state = 0
S, rows, cols = make_biclusters((30, 30), 3, noise=0.5,
random_state=random_state)
S -= S.min() # needs to be nonnegative before making it sparse
S = np.where(S < 1, 0, S) # threshold some values
for mat in (S, csr_matrix(S)):
for kwargs in ParameterGrid(param_grid):
model = SpectralCoclustering(n_clusters=3,
random_state=random_state,
**kwargs)
model.fit(mat)
assert_equal(model.rows_.shape, (3, 30))
assert_array_equal(model.rows_.sum(axis=0), np.ones(30))
assert_array_equal(model.columns_.sum(axis=0), np.ones(30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def test_spectral_biclustering():
# Test Kluger methods on a checkerboard dataset.
S, rows, cols = make_checkerboard((30, 30), 3, noise=0.5,
random_state=0)
non_default_params = {'method': ['scale', 'log'],
'svd_method': ['arpack'],
'n_svd_vecs': [20],
'mini_batch': [True]}
for mat in (S, csr_matrix(S)):
for param_name, param_values in non_default_params.items():
for param_value in param_values:
model = SpectralBiclustering(
n_clusters=3,
n_init=3,
init='k-means++',
random_state=0,
)
model.set_params(**dict([(param_name, param_value)]))
if issparse(mat) and model.get_params().get('method') == 'log':
# cannot take log of sparse matrix
assert_raises(ValueError, model.fit, mat)
continue
else:
model.fit(mat)
assert_equal(model.rows_.shape, (9, 30))
assert_equal(model.columns_.shape, (9, 30))
assert_array_equal(model.rows_.sum(axis=0),
np.repeat(3, 30))
assert_array_equal(model.columns_.sum(axis=0),
np.repeat(3, 30))
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
_test_shape_indices(model)
def _do_scale_test(scaled):
"""Check that rows sum to one constant, and columns to another."""
row_sum = scaled.sum(axis=1)
col_sum = scaled.sum(axis=0)
if issparse(scaled):
row_sum = np.asarray(row_sum).squeeze()
col_sum = np.asarray(col_sum).squeeze()
assert_array_almost_equal(row_sum, np.tile(row_sum.mean(), 100),
decimal=1)
assert_array_almost_equal(col_sum, np.tile(col_sum.mean(), 100),
decimal=1)
def _do_bistochastic_test(scaled):
"""Check that rows and columns sum to the same constant."""
_do_scale_test(scaled)
assert_almost_equal(scaled.sum(axis=0).mean(),
scaled.sum(axis=1).mean(),
decimal=1)
def test_scale_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled, _, _ = _scale_normalize(mat)
_do_scale_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_bistochastic_normalize():
generator = np.random.RandomState(0)
X = generator.rand(100, 100)
for mat in (X, csr_matrix(X)):
scaled = _bistochastic_normalize(mat)
_do_bistochastic_test(scaled)
if issparse(mat):
assert issparse(scaled)
def test_log_normalize():
# adding any constant to a log-scaled matrix should make it
# bistochastic
generator = np.random.RandomState(0)
mat = generator.rand(100, 100)
scaled = _log_normalize(mat) + 1
_do_bistochastic_test(scaled)
def test_fit_best_piecewise():
model = SpectralBiclustering(random_state=0)
vectors = np.array([[0, 0, 0, 1, 1, 1],
[2, 2, 2, 3, 3, 3],
[0, 1, 2, 3, 4, 5]])
best = model._fit_best_piecewise(vectors, n_best=2, n_clusters=2)
assert_array_equal(best, vectors[:2])
def test_project_and_cluster():
model = SpectralBiclustering(random_state=0)
data = np.array([[1, 1, 1],
[1, 1, 1],
[3, 6, 3],
[3, 6, 3]])
vectors = np.array([[1, 0],
[0, 1],
[0, 0]])
for mat in (data, csr_matrix(data)):
labels = model._project_and_cluster(data, vectors,
n_clusters=2)
assert_array_equal(labels, [0, 0, 1, 1])
def test_perfect_checkerboard():
raise SkipTest("This test is failing on the buildbot, but cannot"
" reproduce. Temporarily disabling it until it can be"
" reproduced and fixed.")
model = SpectralBiclustering(3, svd_method="arpack", random_state=0)
S, rows, cols = make_checkerboard((30, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((40, 30), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
S, rows, cols = make_checkerboard((30, 40), 3, noise=0,
random_state=0)
model.fit(S)
assert_equal(consensus_score(model.biclusters_,
(rows, cols)), 1)
def test_errors():
data = np.arange(25).reshape((5, 5))
model = SpectralBiclustering(n_clusters=(3, 3, 3))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters='abc')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_clusters=(3, 'abc'))
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(svd_method='unknown')
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_best=0)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering(n_components=3, n_best=4)
assert_raises(ValueError, model.fit, data)
model = SpectralBiclustering()
data = np.arange(27).reshape((3, 3, 3))
assert_raises(ValueError, model.fit, data)
| bsd-3-clause |
jiangzhuo/kbengine | kbe/res/scripts/common/Lib/lib2to3/main.py | 71 | 11622 | """
Main program for 2to3.
"""
from __future__ import with_statement
import sys
import os
import difflib
import logging
import shutil
import optparse
from . import refactor
def diff_texts(a, b, filename):
"""Return a unified diff of two strings."""
a = a.splitlines()
b = b.splitlines()
return difflib.unified_diff(a, b, filename, filename,
"(original)", "(refactored)",
lineterm="")
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
"""
A refactoring tool that can avoid overwriting its input files.
Prints output to stdout.
Output files can optionally be written to a different directory and or
have an extra file suffix appended to their name for use in situations
where you do not want to replace the input files.
"""
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
input_base_dir='', output_dir='', append_suffix=''):
"""
Args:
fixers: A list of fixers to import.
options: A dict with RefactoringTool configuration.
explicit: A list of fixers to run even if they are explicit.
nobackups: If true no backup '.bak' files will be created for those
files that are being refactored.
show_diffs: Should diffs of the refactoring be printed to stdout?
input_base_dir: The base directory for all input files. This class
will strip this path prefix off of filenames before substituting
it with output_dir. Only meaningful if output_dir is supplied.
All files processed by refactor() must start with this path.
output_dir: If supplied, all converted files will be written into
this directory tree instead of input_base_dir.
append_suffix: If supplied, all files output by this tool will have
this appended to their filename. Useful for changing .py to
.py3 for example by passing append_suffix='3'.
"""
self.nobackups = nobackups
self.show_diffs = show_diffs
if input_base_dir and not input_base_dir.endswith(os.sep):
input_base_dir += os.sep
self._input_base_dir = input_base_dir
self._output_dir = output_dir
self._append_suffix = append_suffix
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
def log_error(self, msg, *args, **kwargs):
self.errors.append((msg, args, kwargs))
self.logger.error(msg, *args, **kwargs)
def write_file(self, new_text, filename, old_text, encoding):
orig_filename = filename
if self._output_dir:
if filename.startswith(self._input_base_dir):
filename = os.path.join(self._output_dir,
filename[len(self._input_base_dir):])
else:
raise ValueError('filename %s does not start with the '
'input_base_dir %s' % (
filename, self._input_base_dir))
if self._append_suffix:
filename += self._append_suffix
if orig_filename != filename:
output_dir = os.path.dirname(filename)
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
self.log_message('Writing converted %s to %s.', orig_filename,
filename)
if not self.nobackups:
# Make backup
backup = filename + ".bak"
if os.path.lexists(backup):
try:
os.remove(backup)
except OSError as err:
self.log_message("Can't remove backup %s", backup)
try:
os.rename(filename, backup)
except OSError as err:
self.log_message("Can't rename %s to %s", filename, backup)
# Actually write the new file
write = super(StdoutRefactoringTool, self).write_file
write(new_text, filename, old_text, encoding)
if not self.nobackups:
shutil.copymode(backup, filename)
if orig_filename != filename:
# Preserve the file mode in the new output directory.
shutil.copymode(orig_filename, filename)
def print_output(self, old, new, filename, equal):
if equal:
self.log_message("No changes to %s", filename)
else:
self.log_message("Refactored %s", filename)
if self.show_diffs:
diff_lines = diff_texts(old, new, filename)
try:
if self.output_lock is not None:
with self.output_lock:
for line in diff_lines:
print(line)
sys.stdout.flush()
else:
for line in diff_lines:
print(line)
except UnicodeEncodeError:
warn("couldn't encode %s's diff for your terminal" %
(filename,))
return
def warn(msg):
print("WARNING: %s" % (msg,), file=sys.stderr)
def main(fixer_pkg, args=None):
"""Main program.
Args:
fixer_pkg: the name of a package where the fixers are located.
args: optional; a list of command line arguments. If omitted,
sys.argv[1:] is used.
Returns a suggested exit status (0, 1, 2).
"""
# Set up option parser
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
parser.add_option("-d", "--doctests_only", action="store_true",
help="Fix up doctests only")
parser.add_option("-f", "--fix", action="append", default=[],
help="Each FIX specifies a transformation; default: all")
parser.add_option("-j", "--processes", action="store", default=1,
type="int", help="Run 2to3 concurrently")
parser.add_option("-x", "--nofix", action="append", default=[],
help="Prevent a transformation from being run")
parser.add_option("-l", "--list-fixes", action="store_true",
help="List available transformations")
parser.add_option("-p", "--print-function", action="store_true",
help="Modify the grammar so that print() is a function")
parser.add_option("-v", "--verbose", action="store_true",
help="More verbose logging")
parser.add_option("--no-diffs", action="store_true",
help="Don't show diffs of the refactoring")
parser.add_option("-w", "--write", action="store_true",
help="Write back modified files")
parser.add_option("-n", "--nobackups", action="store_true", default=False,
help="Don't write backups for modified files")
parser.add_option("-o", "--output-dir", action="store", type="str",
default="", help="Put output files in this directory "
"instead of overwriting the input files. Requires -n.")
parser.add_option("-W", "--write-unchanged-files", action="store_true",
help="Also write files even if no changes were required"
" (useful with --output-dir); implies -w.")
parser.add_option("--add-suffix", action="store", type="str", default="",
help="Append this string to all output filenames."
" Requires -n if non-empty. "
"ex: --add-suffix='3' will generate .py3 files.")
# Parse command line arguments
refactor_stdin = False
flags = {}
options, args = parser.parse_args(args)
if options.write_unchanged_files:
flags["write_unchanged_files"] = True
if not options.write:
warn("--write-unchanged-files/-W implies -w.")
options.write = True
# If we allowed these, the original files would be renamed to backup names
# but not replaced.
if options.output_dir and not options.nobackups:
parser.error("Can't use --output-dir/-o without -n.")
if options.add_suffix and not options.nobackups:
parser.error("Can't use --add-suffix without -n.")
if not options.write and options.no_diffs:
warn("not writing files and not printing diffs; that's not very useful")
if not options.write and options.nobackups:
parser.error("Can't use -n without -w")
if options.list_fixes:
print("Available transformations for the -f/--fix option:")
for fixname in refactor.get_all_fix_names(fixer_pkg):
print(fixname)
if not args:
return 0
if not args:
print("At least one file or directory argument required.", file=sys.stderr)
print("Use --help to show usage.", file=sys.stderr)
return 2
if "-" in args:
refactor_stdin = True
if options.write:
print("Can't write to stdin.", file=sys.stderr)
return 2
if options.print_function:
flags["print_function"] = True
# Set up logging handler
level = logging.DEBUG if options.verbose else logging.INFO
logging.basicConfig(format='%(name)s: %(message)s', level=level)
logger = logging.getLogger('lib2to3.main')
# Initialize the refactoring tool
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
explicit = set()
if options.fix:
all_present = False
for fix in options.fix:
if fix == "all":
all_present = True
else:
explicit.add(fixer_pkg + ".fix_" + fix)
requested = avail_fixes.union(explicit) if all_present else explicit
else:
requested = avail_fixes.union(explicit)
fixer_names = requested.difference(unwanted_fixes)
input_base_dir = os.path.commonprefix(args)
if (input_base_dir and not input_base_dir.endswith(os.sep)
and not os.path.isdir(input_base_dir)):
# One or more similar names were passed, their directory is the base.
# os.path.commonprefix() is ignorant of path elements, this corrects
# for that weird API.
input_base_dir = os.path.dirname(input_base_dir)
if options.output_dir:
input_base_dir = input_base_dir.rstrip(os.sep)
logger.info('Output in %r will mirror the input directory %r layout.',
options.output_dir, input_base_dir)
rt = StdoutRefactoringTool(
sorted(fixer_names), flags, sorted(explicit),
options.nobackups, not options.no_diffs,
input_base_dir=input_base_dir,
output_dir=options.output_dir,
append_suffix=options.add_suffix)
# Refactor all files and directories passed as arguments
if not rt.errors:
if refactor_stdin:
rt.refactor_stdin()
else:
try:
rt.refactor(args, options.write, options.doctests_only,
options.processes)
except refactor.MultiprocessingUnsupported:
assert options.processes > 1
print("Sorry, -j isn't supported on this platform.",
file=sys.stderr)
return 1
rt.summarize()
# Return error status (0 if rt.errors is zero)
return int(bool(rt.errors))
| lgpl-3.0 |
free2tedy/Licenta | gui/main.py | 1 | 27389 | from threading import Thread
from PyQt5.QtCore import Qt, pyqtSignal, QDate
from PyQt5.QtWidgets import QPushButton, QApplication, QWidget, QDesktopWidget
from PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QLabel, QGroupBox
from PyQt5.QtWidgets import QSizePolicy, QLineEdit, QDialog, QListWidget
from PyQt5.QtWidgets import QGridLayout, QComboBox, QDateEdit
from articol import Article
from MySQLdb import IntegrityError
from crawler.settings import mysql_conn
from twisted.internet import reactor
from scrapy.crawler import Crawler
from scrapy import log, signals
from crawler.spiders.hotnewsPoliticSumar import HotnewspoliticSpiderSumar
from scrapy.utils.project import get_project_settings
import re
import sys
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.naive_bayes import MultinomialNB, GaussianNB, BernoulliNB
from sklearn.linear_model import SGDClassifier
from sklearn.pipeline import Pipeline
import plotly.plotly as py
from plotly.graph_objs import *
from PIL import Image
classifiers = {
'Multinomial NB': MultinomialNB(),
'Gaussian NB': GaussianNB(),
'Bernoulli NB': BernoulliNB(),
'SVM': SGDClassifier(loss='hinge', penalty='l2', alpha=1e-3, n_iter=5, random_state=42)
}
monthIntToString = {
1: 'Ian',
2: 'Feb',
3: 'Mar',
4: 'Apr',
5: 'Mai',
6: 'Iun',
7: 'Iul',
8: 'Aug',
9: 'Sep',
10: 'Oct',
11: 'Noi',
12: 'Dec'
}
reportTypes = [
"Opinions/Entity Selected",
"Opinions/All Entities",
"Appearances/Entity Selected",
"Appearances/All Entities"
]
class MainWindow(QWidget):
articleInfoUpdate = pyqtSignal()
entityUpdate = pyqtSignal()
crawlerUpdate = pyqtSignal()
def __init__(self):
super(MainWindow, self).__init__()
self.initUI()
self.articleInfoUpdate.connect(self.updateArticleInfo)
self.entityUpdate.connect(self.updateEntityList)
def initUI(self):
self.setGeometry(0, 0, 500, 700)
self.center()
self.setWindowTitle('PView')
mainLayout = QVBoxLayout()
self.createArticleInfoBox()
self.createViewArticleBox()
self.createEntityBox()
self.createReportBox()
self.createDatabaseBox()
mainLayout.addWidget(self.infoBox)
mainLayout.addWidget(self.viewArticleBox)
mainLayout.addWidget(self.entityBox)
mainLayout.addWidget(self.raportBox)
mainLayout.addWidget(self.databaseBox)
self.setLayout(mainLayout)
self.show()
def createArticleInfoBox(self):
self.articleCount = self.selectCountArticles()
entityCount = self.selectCountEntities()
associationsCount = self.selectCountAssociations()
classifiedCount = self.selectCountClassifiedAssociations()
label = "Number of articles: " + str(self.articleCount)
self.articleCountLabel = QLabel(label)
label = "Number of entities: " + str(entityCount)
self.entitiesCountLabel = QLabel(label)
label = "Number of associations: " + str(associationsCount)
self.associationsCountLabel = QLabel(label)
label = "Number of classified associations: " + str(classifiedCount)
self.classifiedCountLabel = QLabel(label)
layout = QVBoxLayout()
layout.addWidget(self.articleCountLabel)
layout.addWidget(self.entitiesCountLabel)
layout.addWidget(self.associationsCountLabel)
layout.addWidget(self.classifiedCountLabel)
self.infoBox = QGroupBox("Statistics")
self.infoBox.setLayout(layout)
def createCrawlerBox(self):
self.crawlButton = QPushButton("Crawl")
self.crawlButton.setFocusPolicy(Qt.NoFocus)
self.websiteList = QComboBox()
self.websiteList.addItem("HotNews")
layout = QGridLayout()
layout.addWidget(self.websiteList, 0, 0, 1, 1)
layout.addWidget(self.crawlButton, 0, 1, 1, 1)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 1)
self.crawlerBox = QGroupBox("Crawler")
self.crawlerBox.setLayout(layout)
def createViewArticleBox(self):
self.articleNumberLineEdit = QLineEdit("")
self.articleNumberLineEdit.setAlignment(Qt.AlignHCenter)
self.viewArticleButton = QPushButton("Open")
self.viewArticleButton.clicked.connect(self.viewArticle)
layout = QGridLayout()
layout.addWidget(self.articleNumberLineEdit, 0, 0, 1, 1)
layout.addWidget(self.viewArticleButton, 0, 1, 1, 1)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 1)
self.viewArticleBox = QGroupBox("View Article")
self.viewArticleBox.setLayout(layout)
def createReportBox(self):
minDate, maxDate = self.selectMinAndMaxDate()
minDate = QDate(minDate.year, minDate.month, minDate.day)
maxDate = QDate(maxDate.year, maxDate.month, maxDate.day)
self.fromDateEdit = QDateEdit()
self.fromDateEdit.setDate(minDate)
self.fromDateEdit.setDisplayFormat('d MMM yyyy')
self.fromDateEdit.setAlignment(Qt.AlignHCenter)
self.toDateEdit = QDateEdit()
self.toDateEdit.setDate(maxDate)
self.toDateEdit.setDisplayFormat('d MMM yyyy')
self.toDateEdit.setAlignment(Qt.AlignHCenter)
self.reportTypeComboBox = QComboBox()
for item in reportTypes:
self.reportTypeComboBox.addItem(item)
monthlyButton = QPushButton("View")
monthlyButton.clicked.connect(self.createReport)
layout = QGridLayout()
layout.addWidget(self.fromDateEdit, 0, 0, 1, 1)
layout.addWidget(self.toDateEdit, 0, 1, 1, 1)
layout.addWidget(self.reportTypeComboBox, 1, 0, 1, 1)
layout.addWidget(monthlyButton, 1, 1, 1, 1)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 1)
self.raportBox = QGroupBox("Charts")
self.raportBox.setLayout(layout)
def createEntityBox(self):
rows = self.selectCountEntities()
self.entityList = QListWidget()
entities = self.selectAllEntities()
for entity in entities:
self.doAssociationsForEntity(entity[1])
self.entityList.addItem(entity[1])
addButton = QPushButton("Add")
addButton.clicked.connect(self.addEntity)
removeButton = QPushButton("Delete")
removeButton.clicked.connect(self.removeEntity)
self.addEntityLineEdit = QLineEdit("")
viewArticlesButton = QPushButton("View articles")
viewArticlesButton.clicked.connect(self.viewArticleByEntity)
self.algorithmComboBox = QComboBox()
for key in classifiers.keys():
self.algorithmComboBox.addItem(key)
classifyButton = QPushButton("Classify")
classifyButton.clicked.connect(self.classifyAllAssociations)
layout = QGridLayout()
layout.addWidget(self.entityList, 0, 0, 1, 4)
layout.addWidget(self.addEntityLineEdit, 1, 0, 1, 2)
layout.addWidget(addButton, 1, 2, 1, 1)
layout.addWidget(removeButton, 1, 3, 1, 1)
layout.addWidget(viewArticlesButton, 2, 0, 1, 4)
layout.addWidget(self.algorithmComboBox, 3, 0, 1, 2)
layout.addWidget(classifyButton, 3, 2, 1, 2)
layout.setColumnStretch(0, 1)
layout.setColumnStretch(1, 1)
layout.setColumnStretch(2, 1)
layout.setColumnStretch(3, 1)
self.entityBox = QGroupBox("Entities")
self.entityBox.setLayout(layout)
def createDatabaseBox(self):
deleteClassificationsButton = QPushButton("Remove all classifications")
deleteClassificationsButton.clicked.connect(self.clearAllCalculatedPolarities)
deleteEntitiesButton = QPushButton("Remove all entities")
deleteEntitiesButton.clicked.connect(self.deleteAllEntities)
deleteAssociationsButton = QPushButton("Remove all associations")
deleteAssociationsButton.clicked.connect(self.deleteAllAssociations)
layout = QVBoxLayout()
layout.addWidget(deleteClassificationsButton)
layout.addWidget(deleteAssociationsButton)
layout.addWidget(deleteEntitiesButton)
self.databaseBox = QGroupBox("Database")
self.databaseBox.setLayout(layout)
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
qr.moveCenter(cp)
self.move(qr.topLeft())
def monthsBetweenDates(self, fromDate, toDate):
curDate = QDate(fromDate)
months =[]
while curDate < toDate:
months.append(curDate)
curDate = curDate.addMonths(1)
return months
def makeMonthlyPolarityChart(self, entities, fromDate, toDate):
cursor = mysql_conn.cursor()
chartData = []
for (entityId, entity) in entities:
monthlyPol = self.selectAllPolaritiesForEntity(entityId, fromDate, toDate)
trace0=Bar(
x = [self.monthYearLabel(month) for (month, _, _) in monthlyPol],
y = [(0.0 + rows.count(1L)) / (l+1) * 100 for (_, l, rows) in monthlyPol],
name = entity,
marker = Marker(
color = 'rgb(204,204,204)',
opacity = 0.5,
),
)
chartData.append(trace0)
chartData = Data(chartData)
layout = Layout(
xaxis=XAxis(
#set x-axis' labels direction at 45 degree angle
tickangle=-45,
),
barmode='group',
)
fig = Figure(data = chartData, layout = layout)
py.image.save_as({'data': chartData}, "polarities.png")
img = Image.open("polarities.png")
img.show()
def makeMonthlyAppearanceChart(self, entities, fromDate, toDate):
cursor = mysql_conn.cursor()
chartData = []
for (entityId, entity) in entities:
monthlyApp = self.selectCountAssociationsForEntityBetweenDates(entityId, fromDate, toDate)
trace0=Bar(
x = [self.monthYearLabel(month) for (month, _) in monthlyApp],
y = [count for (_, count) in monthlyApp],
name = entity,
marker = Marker(
color = 'rgb(204,204,204)',
opacity = 0.5,
),
)
chartData.append(trace0)
chartData = Data(chartData)
layout = Layout(
xaxis=XAxis(
#set x-axis' labels direction at 45 degree angle
tickangle=-45,
),
barmode='group',
)
fig = Figure(data = chartData, layout = layout)
py.image.save_as({'data': chartData}, "appearances.png")
img = Image.open("appearances.png")
img.show()
def getStringDate(self, date):
sDate = str(date.year())
sDate += '-'+str(date.month())
sDate += '-'+'01'
time = '00:00:00'
return sDate + ' ' + time
def monthYearLabel(self, date):
label = monthIntToString[date.month()] + ' '
label += str(date.year())
return label
def createReport(self):
reportType = self.reportTypeComboBox.currentText()
fromDate = self.fromDateEdit.date()
toDate = self.toDateEdit.date()
entities = []
if "All entities" in reportType:
entities = self.selectAllEntities()
else:
selected = self.entityList.selectedItems()
if len(selected) == 1:
entity = selected[0].text()
entities = [(self.selectEntityId(entity), entity)]
if len(entities) > 0:
if "Opinions" in reportType:
self.makeMonthlyPolarityChart(entities, fromDate, toDate)
else:
print entities
self.makeMonthlyAppearanceChart(entities, fromDate, toDate)
def viewArticle(self):
try:
articleId = int(self.articleNumberLineEdit.text())
if articleId > 0 and articleId < self.articleCount:
self.viewArticleButton.setEnabled(False)
self.articleNumberLineEdit.setDisabled(True)
articleList = [i+1 for i in xrange(self.articleCount)]
articleView = Article(articleId-1, articleList, parentW=self)
articleView.exec_()
self.viewArticleButton.setEnabled(True)
self.articleNumberLineEdit.setDisabled(False)
except ValueError:
print "Invalid article id"
def viewArticleByEntity(self):
selected = self.entityList.selectedItems()
if len(selected) == 1:
articles = self.selectAllArticlesByEntity(selected[0].text())
articleList = [a[0] for a in articles]
articleView = Article(0, articleList, shuffle_=True, parentW=self)
articleView.exec_()
def addEntity(self):
newEntity = self.addEntityLineEdit.text().strip()
newEntity = re.sub(r' +', ' ', newEntity)
cursor = mysql_conn.cursor()
if len(newEntity) != 0:
selectStmt = """SELECT *
FROM entities
WHERE entity=%s"""
data = (newEntity,)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
if len(rows) == 0:
insertStmt = """INSERT INTO entities (entity)
VALUES (%s)"""
data = (newEntity,)
cursor.execute(insertStmt, data)
cursor.execute("""COMMIT""")
self.entityUpdate.emit()
self.doAssociationsForEntity(newEntity)
self.addEntityLineEdit.setText("")
def removeEntity(self):
selected = self.entityList.selectedItems()
cursor = mysql_conn.cursor()
for item in selected:
self.deleteAssciationsForEntity(item.text())
selectStmt = """SELECT entity_id
FROM entities
WHERE entity=%s"""
data = (item.text(),)
cursor.execute(selectStmt, data)
entityId = cursor.fetchone()
deleteStmt = """DELETE FROM entities
WHERE entity_id=%s"""
data = (entityId[0],)
cursor.execute(deleteStmt, data)
cursor.execute("""COMMIT""")
self.entityUpdate.emit()
def updateEntityList(self):
self.entityList.clear()
entities = self.selectAllEntities()
for entity in entities:
self.entityList.addItem(entity[1])
label = "Number of entities: " + str(len(entities))
self.entitiesCountLabel.setText(label)
def updateArticleInfo(self):
self.articleCount = self.selectCountArticles()
entityCount = self.selectCountEntities()
associationsCount = self.selectCountAssociations()
classifiedCount = self.selectCountClassifiedAssociations()
label = "Number of articles: " + str(self.articleCount)
self.articleCountLabel.setText(label)
label = "Number of entities: " + str(entityCount)
self.entitiesCountLabel.setText(label)
label = "Number of classified associations: " + str(classifiedCount)
self.classifiedCountLabel.setText(label)
label = "Number of associations: " + str(associationsCount)
self.associationsCountLabel.setText(label)
def classifyAllAssociations(self):
cursor = mysql_conn.cursor()
entities = self.selectAllEntities()
for (entityId, entity) in entities:
manualPol = self.selectManualPolaritiesForEntity(entityId)
trainingData = [self.selectArticle(id_)[4] for (id_, _) in manualPol]
trainingTarget = [polarity for (_, polarity) in manualPol]
algorithm = self.algorithmComboBox.currentText()
textClf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', classifiers[algorithm]),
])
textClf.fit(trainingData, trainingTarget)
# select all articles associated with entity that need to be classified
selectStmt = """SELECT article_id
FROM assocEntityArticle
WHERE polarity_manual IS NULL
AND polarity_calculated IS NULL
AND entity_id=%s"""
data = (entityId,)
cursor.execute(selectStmt, data)
ids = cursor.fetchall()
if len(ids) > 0:
ids = [a[0] for a in ids]
testData = [self.selectArticle(id_)[4] for id_ in ids]
predicted = textClf.predict(testData)
print [x for x in predicted].count(1)
updateData = zip(predicted, ids)
updateData = [(polarity, entityId, id_) for (polarity, id_) in updateData]
updateStmt = """UPDATE assocEntityArticle
SET polarity_calculated=%s
WHERE entity_id=%s AND article_id=%s"""
cursor.executemany(updateStmt, updateData)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def selectArticle(self, articleId):
cursor = mysql_conn.cursor()
selectStmt = """SELECT *
FROM articles
WHERE article_id=%s"""
data = (articleId,)
cursor.execute(selectStmt, data)
row = cursor.fetchone()
return row
def selectEntityId(self, entity):
cursor = mysql_conn.cursor()
selectStmt = """SELECT entity_id
FROM entities
WHERE entity=%s"""
data = (entity,)
cursor.execute(selectStmt, data)
entityId = cursor.fetchone()[0]
return entityId
def selectAllArticlesByEntity(self, entity):
cursor = mysql_conn.cursor()
selectStmt = """SELECT *
FROM articles
WHERE content LIKE %s"""
data = ("%" + entity + "%",)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
return rows
def selectAllEntities(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT *
FROM entities"""
cursor.execute(selectStmt)
rows = cursor.fetchall()
return rows
def selectMinAndMaxDate(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT MIN(date), MAX(date)
FROM articles"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row
def selectCountArticles(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM articles"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectCountAuthors(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM authors"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectCountEntities(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM entities"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectCountAssociations(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM assocEntityArticle"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectCountAssociationsForEntityBetweenDates(self, entityId, fromDate, toDate):
cursor = mysql_conn.cursor()
months = self.monthsBetweenDates(fromDate, toDate)
selectStmt = """SELECT count(*)
FROM assocEntityArticle a, articles b
WHERE a.article_id = b.article_id
AND b.date BETWEEN %s AND %s
AND a.entity_id=%s"""
associations = []
if len(months) != 0:
for month in months:
fromDateString = self.getStringDate(month)
toDateString = self.getStringDate(month.addMonths(1))
data = (fromDateString, toDateString, entityId)
cursor.execute(selectStmt, data)
count = cursor.fetchone()[0]
associations.append((month, count))
return associations
def selectCountClassifiedAssociations(self):
cursor = mysql_conn.cursor()
selectStmt = """SELECT count(*)
FROM assocEntityArticle
WHERE polarity_calculated IS NOT NULL
OR polarity_manual IS NOT NULL"""
cursor.execute(selectStmt)
row = cursor.fetchone()
return row[0]
def selectManualPolaritiesForEntity(self, entityId):
cursor = mysql_conn.cursor()
selectStmt = """SELECT article_id, polarity_manual
FROM assocEntityArticle
WHERE polarity_manual IS NOT NULL
AND entity_id=%s"""
data = (entityId,)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
return rows
def selectAllPolaritiesForEntity(self, entityId, fromDate, toDate):
cursor = mysql_conn.cursor()
months = self.monthsBetweenDates(fromDate, toDate)
selectStmt = """SELECT a.polarity_manual, a.polarity_calculated
FROM assocEntityArticle a, articles b
WHERE (a.polarity_manual IS NOT NULL
OR a.polarity_calculated IS NOT NULL)
AND a.article_id = b.article_id
AND b.date BETWEEN %s AND %s
AND a.entity_id=%s"""
polarities = []
if len(months) != 0:
for month in months:
fromDateString = self.getStringDate(month)
toDateString = self.getStringDate(month.addMonths(1))
data = (fromDateString, toDateString, entityId)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
rows = [a or b for a, b in rows]
polarities.append((month, len(rows), rows))
return polarities
def doAssociationsForEntity(self, entity):
cursor = mysql_conn.cursor()
# select entity_id for entity given as parameter
entityId = self.selectEntityId(entity)
# select list of article_id for which associations exist
# in database for entity given as param
selectStmt = """SELECT article_id
FROM assocEntityArticle
WHERE entity_id=%s"""
data = (entityId,)
cursor.execute(selectStmt, data)
articleIdsInDB = cursor.fetchall()
articleIdsInDB = [pair[0] for pair in articleIdsInDB]
# select all articles that contain entity in content
selectStmt = """SELECT article_id
FROM articles
WHERE content LIKE %s"""
data = ("%" + entity + "%",)
cursor.execute(selectStmt, data)
rows = cursor.fetchall()
rows = [pair[0] for pair in rows]
# find articles for which associations don't exist in the database
diff = list(set(rows) - set(articleIdsInDB))
if len(diff) != 0:
insertStmt = """INSERT INTO assocEntityArticle (article_id, entity_id)
VALUES (%s, %s)"""
data = [(articleId, entityId) for articleId in diff]
cursor.executemany(insertStmt, data)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def deleteAssciationsForEntity(self, entity):
cursor = mysql_conn.cursor()
selectStmt = """SELECT entity_id
FROM entities
WHERE entity=%s"""
data = (entity,)
cursor.execute(selectStmt, data)
entityId = cursor.fetchone()[0]
deleteStmt = """DELETE FROM assocEntityArticle
WHERE entity_id=%s"""
data = (entityId,)
cursor.execute(deleteStmt, data)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def doAllAssociations(self):
cursor = mysql_conn.cursor()
entities = self.selectAllEntities()
for entity in entities:
self.doAssociationsForEntity(entity)
self.articleInfoUpdate.emit()
def deleteAllAssociations(self):
cursor = mysql_conn.cursor()
deleteStmt = """DELETE FROM assocEntityArticle
WHERE article_id > 0"""
cursor.execute(deleteStmt)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def clearAllCalculatedPolarities(self):
cursor = mysql_conn.cursor()
updateStmt = """UPDATE assocEntityArticle
SET polarity_calculated=%s
WHERE polarity_calculated IS NOT NULL"""
data = (None,)
cursor.execute(updateStmt, data)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
def deleteAllArticles(self):
try:
cursor = mysql_conn.cursor()
deleteStmt = """DELETE FROM articles
WHERE article_id > 0"""
cursor.execute(deleteStmt)
alterTableStmt = """ALTER TABLE articles AUTO_INCREMENT = 1"""
cursor.execute(alterTableStmt)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
except IntegrityError:
pass
def deleteAllAuthors(self):
cursor = mysql_conn.cursor()
deleteStmt = """DELETE FROM authors
WHERE author_id > 0"""
cursor.execute(deleteStmt)
alterTableStmt = """ALTER TABLE authors AUTO_INCREMENT = 1"""
cursor.execute(alterTableStmt)
cursor.execute("""COMMIT""")
def deleteAllArticlesAndAuthors(self):
self.deleteAllArticles()
self.deleteAllAuthors()
def deleteAllEntities(self):
cursor = mysql_conn.cursor()
deleteStmt = """DELETE FROM entities
WHERE entity_id > 0"""
cursor.execute(deleteStmt)
alterTableStmt = """ALTER TABLE entities AUTO_INCREMENT = 1"""
cursor.execute(alterTableStmt)
cursor.execute("""COMMIT""")
self.articleInfoUpdate.emit()
self.entityUpdate.emit()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = MainWindow()
sys.exit(app.exec_())
| gpl-2.0 |
kustodian/ansible | lib/ansible/modules/cloud/docker/docker_container.py | 2 | 147492 | #!/usr/bin/python
#
# Copyright 2016 Red Hat | Ansible
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: docker_container
short_description: manage docker containers
description:
- Manage the life cycle of docker containers.
- Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
version_added: "2.1"
notes:
- For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and
a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
prevent this.
- If the module needs to recreate the container, it will only use the options provided to the module to create the
new container (except I(image)). Therefore, always specify *all* options relevant to the container.
- When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
Please note that several options have default values; if the container to be restarted uses different values for
these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove),
I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior
can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from
Ansible 2.14 on.
options:
auto_remove:
description:
- Enable auto-removal of the container on daemon side when the container's process exits.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
version_added: "2.4"
blkio_weight:
description:
- Block IO (relative weight), between 10 and 1000.
type: int
capabilities:
description:
- List of capabilities to add to the container.
type: list
elements: str
cap_drop:
description:
- List of capabilities to drop from the container.
type: list
elements: str
version_added: "2.7"
cleanup:
description:
- Use with I(detach=false) to remove the container after successful execution.
type: bool
default: no
version_added: "2.2"
command:
description:
- Command to execute when the container starts. A command may be either a string or a list.
- Prior to version 2.4, strings were split on commas.
type: raw
comparisons:
description:
- Allows to specify how properties of existing containers are compared with
module options to decide whether the container should be recreated / updated
or not.
- Only options which correspond to the state of a container as handled by the
Docker daemon can be specified, as well as C(networks).
- Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
and C(allow_more_present).
- If C(strict) is specified, values are tested for equality, and changes always
result in updating or restarting. If C(ignore) is specified, changes are ignored.
- C(allow_more_present) is allowed only for lists, sets and dicts. If it is
specified for lists or sets, the container will only be updated or restarted if
the module option contains a value which is not present in the container's
options. If the option is specified for a dict, the container will only be updated
or restarted if the module option contains a key which isn't present in the
container's option, or if the value of a key present differs.
- The wildcard option C(*) can be used to set one of the default values C(strict)
or C(ignore) to *all* comparisons which are not explicitly set to other values.
- See the examples for details.
type: dict
version_added: "2.8"
container_default_behavior:
description:
- Various module options used to have default values. This causes problems with
containers which use different values for these options.
- The default value is C(compatibility), which will ensure that the default values
are used when the values are not explicitly specified by the user.
- From Ansible 2.14 on, the default value will switch to C(no_defaults). To avoid
deprecation warnings, please set I(container_default_behavior) to an explicit
value.
- This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
I(paused), I(privileged), I(read_only) and I(tty) options.
type: str
choices:
- compatibility
- no_defaults
version_added: "2.10"
cpu_period:
description:
- Limit CPU CFS (Completely Fair Scheduler) period.
- See I(cpus) for an easier to use alternative.
type: int
cpu_quota:
description:
- Limit CPU CFS (Completely Fair Scheduler) quota.
- See I(cpus) for an easier to use alternative.
type: int
cpus:
description:
- Specify how much of the available CPU resources a container can use.
- A value of C(1.5) means that at most one and a half CPU (core) will be used.
type: float
version_added: '2.10'
cpuset_cpus:
description:
- CPUs in which to allow execution C(1,3) or C(1-3).
type: str
cpuset_mems:
description:
- Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
type: str
cpu_shares:
description:
- CPU shares (relative weight).
type: int
detach:
description:
- Enable detached mode to leave the container running in background.
- If disabled, the task will reflect the status of the container run (failed if the command failed).
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(yes).
type: bool
devices:
description:
- List of host device bindings to add to the container.
- "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
type: list
elements: str
device_read_bps:
description:
- "List of device path and read rate (bytes per second) from device."
type: list
elements: dict
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit in format C(<number>[<unit>])."
- "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- "Omitting the unit defaults to bytes."
type: str
required: yes
version_added: "2.8"
device_write_bps:
description:
- "List of device and write rate (bytes per second) to device."
type: list
elements: dict
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit in format C(<number>[<unit>])."
- "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- "Omitting the unit defaults to bytes."
type: str
required: yes
version_added: "2.8"
device_read_iops:
description:
- "List of device and read rate (IO per second) from device."
type: list
elements: dict
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit."
- "Must be a positive integer."
type: int
required: yes
version_added: "2.8"
device_write_iops:
description:
- "List of device and write rate (IO per second) to device."
type: list
elements: dict
suboptions:
path:
description:
- Device path in the container.
type: str
required: yes
rate:
description:
- "Device read limit."
- "Must be a positive integer."
type: int
required: yes
version_added: "2.8"
dns_opts:
description:
- List of DNS options.
type: list
elements: str
dns_servers:
description:
- List of custom DNS servers.
type: list
elements: str
dns_search_domains:
description:
- List of custom DNS search domains.
type: list
elements: str
domainname:
description:
- Container domainname.
type: str
version_added: "2.5"
env:
description:
- Dictionary of key,value pairs.
- Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss.
type: dict
env_file:
description:
- Path to a file, present on the target, containing environment variables I(FOO=BAR).
- If variable also present in I(env), then the I(env) value will override.
type: path
version_added: "2.2"
entrypoint:
description:
- Command that overwrites the default C(ENTRYPOINT) of the image.
type: list
elements: str
etc_hosts:
description:
- Dict of host-to-IP mappings, where each host name is a key in the dictionary.
Each host name will be added to the container's C(/etc/hosts) file.
type: dict
exposed_ports:
description:
- List of additional container ports which informs Docker that the container
listens on the specified network ports at runtime.
- If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
need to be exposed again.
type: list
elements: str
aliases:
- exposed
- expose
force_kill:
description:
- Use the kill command when stopping a running container.
type: bool
default: no
aliases:
- forcekill
groups:
description:
- List of additional group names and/or IDs that the container process will run as.
type: list
elements: str
healthcheck:
description:
- Configure a check that is run to determine whether or not containers for this service are "healthy".
- "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
for details on how healthchecks work."
- "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
type: dict
suboptions:
test:
description:
- Command to run to check health.
- Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
type: raw
interval:
description:
- Time between running the check.
- The default used by the Docker daemon is C(30s).
type: str
timeout:
description:
- Maximum time to allow one check to run.
- The default used by the Docker daemon is C(30s).
type: str
retries:
description:
- Consecutive number of failures needed to report unhealthy.
- The default used by the Docker daemon is C(3).
type: int
start_period:
description:
- Start period for the container to initialize before starting health-retries countdown.
- The default used by the Docker daemon is C(0s).
type: str
version_added: "2.8"
hostname:
description:
- The container's hostname.
type: str
ignore_image:
description:
- When I(state) is C(present) or C(started), the module compares the configuration of an existing
container to requested configuration. The evaluation includes the image version. If the image
version in the registry does not match the container, the container will be recreated. You can
stop this behavior by setting I(ignore_image) to C(True).
- "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
I(comparisons) option."
type: bool
default: no
version_added: "2.2"
image:
description:
- Repository path and tag used to create the container. If an image is not found or pull is true, the image
will be pulled from the registry. If no tag is included, C(latest) will be used.
- Can also be an image ID. If this is the case, the image is assumed to be available locally.
The I(pull) option is ignored for this case.
type: str
init:
description:
- Run an init inside the container that forwards signals and reaps processes.
- This option requires Docker API >= 1.25.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
version_added: "2.6"
interactive:
description:
- Keep stdin open after a container is launched, even if not attached.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
ipc_mode:
description:
- Set the IPC mode for the container.
- Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
the host's IPC namespace within the container.
type: str
keep_volumes:
description:
- Retain volumes associated with a removed container.
type: bool
default: yes
kill_signal:
description:
- Override default signal used to kill a running container.
type: str
kernel_memory:
description:
- "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
- Omitting the unit defaults to bytes.
type: str
labels:
description:
- Dictionary of key value pairs.
type: dict
links:
description:
- List of name aliases for linked containers in the format C(container_name:alias).
- Setting this will force container to be restarted.
type: list
elements: str
log_driver:
description:
- Specify the logging driver. Docker uses C(json-file) by default.
- See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
type: str
log_options:
description:
- Dictionary of options specific to the chosen I(log_driver).
- See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
type: dict
aliases:
- log_opt
mac_address:
description:
- Container MAC address (e.g. 92:d0:c6:0a:29:33).
type: str
memory:
description:
- "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C("0").
type: str
memory_reservation:
description:
- "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
type: str
memory_swap:
description:
- "Total memory limit (memory + swap) in format C(<number>[<unit>]).
Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes.
type: str
memory_swappiness:
description:
- Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
- If not set, the value will be remain the same if container exists and will be inherited
from the host machine if it is (re-)created.
type: int
mounts:
version_added: "2.9"
type: list
elements: dict
description:
- Specification for mounts to be added to the container. More powerful alternative to I(volumes).
suboptions:
target:
description:
- Path inside the container.
type: str
required: true
source:
description:
- Mount source (e.g. a volume name or a host path).
type: str
type:
description:
- The mount type.
- Note that C(npipe) is only supported by Docker for Windows.
type: str
choices:
- bind
- npipe
- tmpfs
- volume
default: volume
read_only:
description:
- Whether the mount should be read-only.
type: bool
consistency:
description:
- The consistency requirement for the mount.
type: str
choices:
- cached
- consistent
- default
- delegated
propagation:
description:
- Propagation mode. Only valid for the C(bind) type.
type: str
choices:
- private
- rprivate
- shared
- rshared
- slave
- rslave
no_copy:
description:
- False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
- The default value is C(false).
type: bool
labels:
description:
- User-defined name and labels for the volume. Only valid for the C(volume) type.
type: dict
volume_driver:
description:
- Specify the volume driver. Only valid for the C(volume) type.
- See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
type: str
volume_options:
description:
- Dictionary of options specific to the chosen volume_driver. See
L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
type: dict
tmpfs_size:
description:
- "The size for the tmpfs mount in bytes in format <number>[<unit>]."
- "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- "Omitting the unit defaults to bytes."
type: str
tmpfs_mode:
description:
- The permission mode for the tmpfs mount.
type: str
name:
description:
- Assign a name to a new container or match an existing container.
- When identifying an existing container name may be a name or a long or short container ID.
type: str
required: yes
network_mode:
description:
- Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
- "*Note* that from Ansible 2.14 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this
by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
I(network_mode) is not specified."
type: str
userns_mode:
description:
- Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
type: str
version_added: "2.5"
networks:
description:
- List of networks the container belongs to.
- For examples of the data structure and usage see EXAMPLES below.
- To remove a container from one or more networks, use the I(purge_networks) option.
- Note that as opposed to C(docker run ...), M(docker_container) does not remove the default
network if I(networks) is specified. You need to explicitly use I(purge_networks) to enforce
the removal of the default network (and all other networks not explicitly mentioned in I(networks)).
Alternatively, use the I(networks_cli_compatible) option, which will be enabled by default from Ansible 2.12 on.
type: list
elements: dict
suboptions:
name:
description:
- The network's name.
type: str
required: yes
ipv4_address:
description:
- The container's IPv4 address in this network.
type: str
ipv6_address:
description:
- The container's IPv6 address in this network.
type: str
links:
description:
- A list of containers to link to.
type: list
elements: str
aliases:
description:
- List of aliases for this container in this network. These names
can be used in the network to reach this container.
type: list
elements: str
version_added: "2.2"
networks_cli_compatible:
description:
- "When networks are provided to the module via the I(networks) option, the module
behaves differently than C(docker run --network): C(docker run --network other)
will create a container with network C(other) attached, but the default network
not attached. This module with I(networks: {name: other}) will create a container
with both C(default) and C(other) attached. If I(purge_networks) is set to C(yes),
the C(default) network will be removed afterwards."
- "If I(networks_cli_compatible) is set to C(yes), this module will behave as
C(docker run --network) and will *not* add the default network if I(networks) is
specified. If I(networks) is not specified, the default network will be attached."
- "*Note* that docker CLI also sets I(network_mode) to the name of the first network
added if C(--network) is specified. For more compatibility with docker CLI, you
explicitly have to set I(network_mode) to the name of the first network you're
adding. This behavior will change for Ansible 2.14: then I(network_mode) will
automatically be set to the first network name in I(networks) if I(network_mode)
is not specified, I(networks) has at least one entry and I(networks_cli_compatible)
is C(true)."
- Current value is C(no). A new default of C(yes) will be set in Ansible 2.12.
type: bool
version_added: "2.8"
oom_killer:
description:
- Whether or not to disable OOM Killer for the container.
type: bool
oom_score_adj:
description:
- An integer value containing the score given to the container in order to tune
OOM killer preferences.
type: int
version_added: "2.2"
output_logs:
description:
- If set to true, output of the container command will be printed.
- Only effective when I(log_driver) is set to C(json-file) or C(journald).
type: bool
default: no
version_added: "2.7"
paused:
description:
- Use with the started state to pause running processes inside the container.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
pid_mode:
description:
- Set the PID namespace mode for the container.
- Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the
Docker SDK for Python (docker) allow all values supported by the Docker daemon.
type: str
pids_limit:
description:
- Set PIDs limit for the container. It accepts an integer value.
- Set C(-1) for unlimited PIDs.
type: int
version_added: "2.8"
privileged:
description:
- Give extended privileges to the container.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
published_ports:
description:
- List of ports to publish from the container to the host.
- "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
container port, 9000 is a host port, and 0.0.0.0 is a host interface."
- Port ranges can be used for source and destination ports. If two ranges with
different lengths are specified, the shorter range will be used.
- "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This
is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html)
to resolve hostnames."
- A value of C(all) will publish all exposed container ports to random host ports, ignoring
any other mappings.
- If I(networks) parameter is provided, will inspect each network to see if there exists
a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
If such a network is found, then published ports where no host IP address is specified
will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
value encountered in the list of I(networks) is the one that will be used.
type: list
elements: str
aliases:
- ports
pull:
description:
- If true, always pull the latest version of an image. Otherwise, will only pull an image
when missing.
- "*Note:* images are only pulled when specified by name. If the image is specified
as a image ID (hash), it cannot be pulled."
type: bool
default: no
purge_networks:
description:
- Remove the container from ALL networks not included in I(networks) parameter.
- Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
type: bool
default: no
version_added: "2.2"
read_only:
description:
- Mount the container's root file system as read-only.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
recreate:
description:
- Use with present and started states to force the re-creation of an existing container.
type: bool
default: no
removal_wait_timeout:
description:
- When removing an existing container, the docker daemon API call exists after the container
is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
load, removal can take longer. By default, the module will wait until the container has been
removed before trying to (re-)create it, however long this takes.
- By setting this option, the module will wait at most this many seconds for the container to be
removed. If the container is still in the removal phase after this many seconds, the module will
fail.
type: float
version_added: "2.10"
restart:
description:
- Use with started state to force a matching container to be stopped and restarted.
type: bool
default: no
restart_policy:
description:
- Container restart policy.
- Place quotes around C(no) option.
type: str
choices:
- 'no'
- 'on-failure'
- 'always'
- 'unless-stopped'
restart_retries:
description:
- Use with restart policy to control maximum number of restart attempts.
type: int
runtime:
description:
- Runtime to use for the container.
type: str
version_added: "2.8"
shm_size:
description:
- "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
C(T) (tebibyte), or C(P) (pebibyte)."
- Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
type: str
security_opts:
description:
- List of security options in the form of C("label:user:User").
type: list
elements: str
state:
description:
- 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
rather than stopping it. Use I(keep_volumes) to retain volumes associated with the removed container.'
- 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
container matches the name, a container will be created. If a container matches the name but the provided configuration
does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
with the requested config.'
- 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
state. Use I(restart) to force a matching container to be stopped and restarted.'
- 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
state.'
- To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
image version will be taken into account, you can also use the I(ignore_image) option.
- Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
- If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain volumes associated with a removed container.
- Use I(keep_volumes) to retain volumes associated with a removed container.
type: str
default: started
choices:
- absent
- present
- stopped
- started
stop_signal:
description:
- Override default signal used to stop the container.
type: str
stop_timeout:
description:
- Number of seconds to wait for the container to stop before sending C(SIGKILL).
When the container is created by this module, its C(StopTimeout) configuration
will be set to this value.
- When the container is stopped, will be used as a timeout for stopping the
container. In case the container has a custom C(StopTimeout) configuration,
the behavior depends on the version of the docker daemon. New versions of
the docker daemon will always use the container's configured C(StopTimeout)
value if it has been configured.
type: int
trust_image_content:
description:
- If C(yes), skip image verification.
- The option has never been used by the module. It will be removed in Ansible 2.14.
type: bool
default: no
tmpfs:
description:
- Mount a tmpfs directory.
type: list
elements: str
version_added: 2.4
tty:
description:
- Allocate a pseudo-TTY.
- If I(container_default_behavior) is set to C(compatiblity) (the default value), this
option has a default of C(no).
type: bool
ulimits:
description:
- "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
type: list
elements: str
sysctls:
description:
- Dictionary of key,value pairs.
type: dict
version_added: 2.4
user:
description:
- Sets the username or UID used and optionally the groupname or GID for the specified command.
- "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
type: str
uts:
description:
- Set the UTS namespace mode for the container.
type: str
volumes:
description:
- List of volumes to mount within the container.
- "Use docker CLI-style syntax: C(/host:/container[:mode])"
- "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
- SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
- "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
C(z), and C(Z)."
type: list
elements: str
volume_driver:
description:
- The container volume driver.
type: str
volumes_from:
description:
- List of container names or IDs to get volumes from.
type: list
elements: str
working_dir:
description:
- Path to the working directory.
type: str
version_added: "2.4"
extends_documentation_fragment:
- docker
- docker.docker_py_1_documentation
author:
- "Cove Schneider (@cove)"
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
- "Daan Oosterveld (@dusdanig)"
- "Chris Houseknecht (@chouseknecht)"
- "Kassian Sun (@kassiansun)"
- "Felix Fontein (@felixfontein)"
requirements:
- "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
- "Docker API >= 1.20"
'''
EXAMPLES = '''
- name: Create a data container
docker_container:
name: mydata
image: busybox
volumes:
- /data
- name: Re-create a redis container
docker_container:
name: myredis
image: redis
command: redis-server --appendonly yes
state: present
recreate: yes
exposed_ports:
- 6379
volumes_from:
- mydata
- name: Restart a container
docker_container:
name: myapplication
image: someuser/appimage
state: started
restart: yes
links:
- "myredis:aliasedredis"
devices:
- "/dev/sda:/dev/xvda:rwm"
ports:
- "8080:9000"
- "127.0.0.1:8081:9001/udp"
env:
SECRET_KEY: "ssssh"
# Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
BOOLEAN_KEY: "yes"
- name: Container present
docker_container:
name: mycontainer
state: present
image: ubuntu:14.04
command: sleep infinity
- name: Stop a container
docker_container:
name: mycontainer
state: stopped
- name: Start 4 load-balanced containers
docker_container:
name: "container{{ item }}"
recreate: yes
image: someuser/anotherappimage
command: sleep 1d
with_sequence: count=4
- name: remove container
docker_container:
name: ohno
state: absent
- name: Syslogging output
docker_container:
name: myservice
image: busybox
log_driver: syslog
log_options:
syslog-address: tcp://my-syslog-server:514
syslog-facility: daemon
# NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
# older docker installs, use "syslog-tag" instead
tag: myservice
- name: Create db container and connect to network
docker_container:
name: db_test
image: "postgres:latest"
networks:
- name: "{{ docker_network_name }}"
- name: Start container, connect to network and link
docker_container:
name: sleeper
image: ubuntu:14.04
networks:
- name: TestingNet
ipv4_address: "172.1.1.100"
aliases:
- sleepyzz
links:
- db_test:db
- name: TestingNet2
- name: Start a container with a command
docker_container:
name: sleepy
image: ubuntu:14.04
command: ["sleep", "infinity"]
- name: Add container to networks
docker_container:
name: sleepy
networks:
- name: TestingNet
ipv4_address: 172.1.1.18
links:
- sleeper
- name: TestingNet2
ipv4_address: 172.1.10.20
- name: Update network with aliases
docker_container:
name: sleepy
networks:
- name: TestingNet
aliases:
- sleepyz
- zzzz
- name: Remove container from one network
docker_container:
name: sleepy
networks:
- name: TestingNet2
purge_networks: yes
- name: Remove container from all networks
docker_container:
name: sleepy
purge_networks: yes
- name: Start a container and use an env file
docker_container:
name: agent
image: jenkinsci/ssh-slave
env_file: /var/tmp/jenkins/agent.env
- name: Create a container with limited capabilities
docker_container:
name: sleepy
image: ubuntu:16.04
command: sleep infinity
capabilities:
- sys_time
cap_drop:
- all
- name: Finer container restart/update control
docker_container:
name: test
image: ubuntu:18.04
env:
arg1: "true"
arg2: "whatever"
volumes:
- /tmp:/tmp
comparisons:
image: ignore # don't restart containers with older versions of the image
env: strict # we want precisely this environment
volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
- name: Finer container restart/update control II
docker_container:
name: test
image: ubuntu:18.04
env:
arg1: "true"
arg2: "whatever"
comparisons:
'*': ignore # by default, ignore *all* options (including image)
env: strict # except for environment variables; there, we want to be strict
- name: Start container with healthstatus
docker_container:
name: nginx-proxy
image: nginx:1.13
state: started
healthcheck:
# Check if nginx server is healthy by curl'ing the server.
# If this fails or timeouts, the healthcheck fails.
test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
interval: 1m30s
timeout: 10s
retries: 3
start_period: 30s
- name: Remove healthcheck from container
docker_container:
name: nginx-proxy
image: nginx:1.13
state: started
healthcheck:
# The "NONE" check needs to be specified
test: ["NONE"]
- name: start container with block device read limit
docker_container:
name: test
image: ubuntu:18.04
state: started
device_read_bps:
# Limit read rate for /dev/sda to 20 mebibytes per second
- path: /dev/sda
rate: 20M
device_read_iops:
# Limit read rate for /dev/sdb to 300 IO per second
- path: /dev/sdb
rate: 300
'''
RETURN = '''
container:
description:
- Facts representing the current state of the container. Matches the docker inspection output.
- Note that facts are part of the registered vars since Ansible 2.8. For compatibility reasons, the facts
are also accessible directly as C(docker_container). Note that the returned fact will be removed in Ansible 2.12.
- Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to
conflicts with the connection plugin.
- Empty if I(state) is C(absent)
- If I(detached) is C(false), will include C(Output) attribute containing any output from container run.
returned: always
type: dict
sample: '{
"AppArmorProfile": "",
"Args": [],
"Config": {
"AttachStderr": false,
"AttachStdin": false,
"AttachStdout": false,
"Cmd": [
"/usr/bin/supervisord"
],
"Domainname": "",
"Entrypoint": null,
"Env": [
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
],
"ExposedPorts": {
"443/tcp": {},
"80/tcp": {}
},
"Hostname": "8e47bf643eb9",
"Image": "lnmp_nginx:v1",
"Labels": {},
"OnBuild": null,
"OpenStdin": false,
"StdinOnce": false,
"Tty": false,
"User": "",
"Volumes": {
"/tmp/lnmp/nginx-sites/logs/": {}
},
...
}'
'''
import os
import re
import shlex
import traceback
from distutils.version import LooseVersion
from time import sleep
from ansible.module_utils.common.text.formatters import human_to_bytes
from ansible.module_utils.docker.common import (
AnsibleDockerClient,
DifferenceTracker,
DockerBaseClass,
compare_generic,
is_image_name_id,
sanitize_result,
clean_dict_booleans_for_docker_api,
omit_none_from_dict,
parse_healthcheck,
DOCKER_COMMON_ARGS,
RequestException,
)
from ansible.module_utils.six import string_types
try:
from docker import utils
from ansible.module_utils.docker.common import docker_version
if LooseVersion(docker_version) >= LooseVersion('1.10.0'):
from docker.types import Ulimit, LogConfig
from docker import types as docker_types
else:
from docker.utils.types import Ulimit, LogConfig
from docker.errors import DockerException, APIError, NotFound
except Exception:
# missing Docker SDK for Python handled in ansible.module_utils.docker.common
pass
REQUIRES_CONVERSION_TO_BYTES = [
'kernel_memory',
'memory',
'memory_reservation',
'memory_swap',
'shm_size'
]
def is_volume_permissions(mode):
for part in mode.split(','):
if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
return False
return True
def parse_port_range(range_or_port, client):
'''
Parses a string containing either a single port or a range of ports.
Returns a list of integers for each port in the list.
'''
if '-' in range_or_port:
try:
start, end = [int(port) for port in range_or_port.split('-')]
except Exception:
client.fail('Invalid port range: "{0}"'.format(range_or_port))
if end < start:
client.fail('Invalid port range: "{0}"'.format(range_or_port))
return list(range(start, end + 1))
else:
try:
return [int(range_or_port)]
except Exception:
client.fail('Invalid port: "{0}"'.format(range_or_port))
def split_colon_ipv6(text, client):
'''
Split string by ':', while keeping IPv6 addresses in square brackets in one component.
'''
if '[' not in text:
return text.split(':')
start = 0
result = []
while start < len(text):
i = text.find('[', start)
if i < 0:
result.extend(text[start:].split(':'))
break
j = text.find(']', i)
if j < 0:
client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
result.extend(text[start:i].split(':'))
k = text.find(':', j)
if k < 0:
result[-1] += text[i:]
start = len(text)
else:
result[-1] += text[i:k]
if k == len(text):
result.append('')
break
start = k + 1
return result
class TaskParameters(DockerBaseClass):
'''
Access and parse module parameters
'''
def __init__(self, client):
super(TaskParameters, self).__init__()
self.client = client
self.auto_remove = None
self.blkio_weight = None
self.capabilities = None
self.cap_drop = None
self.cleanup = None
self.command = None
self.cpu_period = None
self.cpu_quota = None
self.cpus = None
self.cpuset_cpus = None
self.cpuset_mems = None
self.cpu_shares = None
self.detach = None
self.debug = None
self.devices = None
self.device_read_bps = None
self.device_write_bps = None
self.device_read_iops = None
self.device_write_iops = None
self.dns_servers = None
self.dns_opts = None
self.dns_search_domains = None
self.domainname = None
self.env = None
self.env_file = None
self.entrypoint = None
self.etc_hosts = None
self.exposed_ports = None
self.force_kill = None
self.groups = None
self.healthcheck = None
self.hostname = None
self.ignore_image = None
self.image = None
self.init = None
self.interactive = None
self.ipc_mode = None
self.keep_volumes = None
self.kernel_memory = None
self.kill_signal = None
self.labels = None
self.links = None
self.log_driver = None
self.output_logs = None
self.log_options = None
self.mac_address = None
self.memory = None
self.memory_reservation = None
self.memory_swap = None
self.memory_swappiness = None
self.mounts = None
self.name = None
self.network_mode = None
self.userns_mode = None
self.networks = None
self.networks_cli_compatible = None
self.oom_killer = None
self.oom_score_adj = None
self.paused = None
self.pid_mode = None
self.pids_limit = None
self.privileged = None
self.purge_networks = None
self.pull = None
self.read_only = None
self.recreate = None
self.removal_wait_timeout = None
self.restart = None
self.restart_retries = None
self.restart_policy = None
self.runtime = None
self.shm_size = None
self.security_opts = None
self.state = None
self.stop_signal = None
self.stop_timeout = None
self.tmpfs = None
self.trust_image_content = None
self.tty = None
self.user = None
self.uts = None
self.volumes = None
self.volume_binds = dict()
self.volumes_from = None
self.volume_driver = None
self.working_dir = None
for key, value in client.module.params.items():
setattr(self, key, value)
self.comparisons = client.comparisons
# If state is 'absent', parameters do not have to be parsed or interpreted.
# Only the container's name is needed.
if self.state == 'absent':
return
if self.cpus is not None:
self.cpus = int(round(self.cpus * 1E9))
if self.groups:
# In case integers are passed as groups, we need to convert them to
# strings as docker internally treats them as strings.
self.groups = [str(g) for g in self.groups]
for param_name in REQUIRES_CONVERSION_TO_BYTES:
if client.module.params.get(param_name):
try:
setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
except ValueError as exc:
self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
self.publish_all_ports = False
self.published_ports = self._parse_publish_ports()
if self.published_ports in ('all', 'ALL'):
self.publish_all_ports = True
self.published_ports = None
self.ports = self._parse_exposed_ports(self.published_ports)
self.log("expose ports:")
self.log(self.ports, pretty_print=True)
self.links = self._parse_links(self.links)
if self.volumes:
self.volumes = self._expand_host_paths()
self.tmpfs = self._parse_tmpfs()
self.env = self._get_environment()
self.ulimits = self._parse_ulimits()
self.sysctls = self._parse_sysctls()
self.log_config = self._parse_log_config()
try:
self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck)
except ValueError as e:
self.fail(str(e))
self.exp_links = None
self.volume_binds = self._get_volume_binds(self.volumes)
self.pid_mode = self._replace_container_names(self.pid_mode)
self.ipc_mode = self._replace_container_names(self.ipc_mode)
self.network_mode = self._replace_container_names(self.network_mode)
self.log("volumes:")
self.log(self.volumes, pretty_print=True)
self.log("volume binds:")
self.log(self.volume_binds, pretty_print=True)
if self.networks:
for network in self.networks:
network['id'] = self._get_network_id(network['name'])
if not network['id']:
self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
if network.get('links'):
network['links'] = self._parse_links(network['links'])
if self.mac_address:
# Ensure the MAC address uses colons instead of hyphens for later comparison
self.mac_address = self.mac_address.replace('-', ':')
if self.entrypoint:
# convert from list to str.
self.entrypoint = ' '.join([str(x) for x in self.entrypoint])
if self.command:
# convert from list to str
if isinstance(self.command, list):
self.command = ' '.join([str(x) for x in self.command])
self.mounts_opt, self.expected_mounts = self._process_mounts()
self._check_mount_target_collisions()
for param_name in ["device_read_bps", "device_write_bps"]:
if client.module.params.get(param_name):
self._process_rate_bps(option=param_name)
for param_name in ["device_read_iops", "device_write_iops"]:
if client.module.params.get(param_name):
self._process_rate_iops(option=param_name)
def fail(self, msg):
self.client.fail(msg)
@property
def update_parameters(self):
'''
Returns parameters used to update a container
'''
update_parameters = dict(
blkio_weight='blkio_weight',
cpu_period='cpu_period',
cpu_quota='cpu_quota',
cpu_shares='cpu_shares',
cpuset_cpus='cpuset_cpus',
cpuset_mems='cpuset_mems',
mem_limit='memory',
mem_reservation='memory_reservation',
memswap_limit='memory_swap',
kernel_memory='kernel_memory',
restart_policy='restart_policy',
)
result = dict()
for key, value in update_parameters.items():
if getattr(self, value, None) is not None:
if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']:
restart_policy = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
result[key] = restart_policy
elif self.client.option_minimal_versions[value]['supported']:
result[key] = getattr(self, value)
return result
@property
def create_parameters(self):
'''
Returns parameters used to create a container
'''
create_params = dict(
command='command',
domainname='domainname',
hostname='hostname',
user='user',
detach='detach',
stdin_open='interactive',
tty='tty',
ports='ports',
environment='env',
name='name',
entrypoint='entrypoint',
mac_address='mac_address',
labels='labels',
stop_signal='stop_signal',
working_dir='working_dir',
stop_timeout='stop_timeout',
healthcheck='healthcheck',
)
if self.client.docker_py_version < LooseVersion('3.0'):
# cpu_shares and volume_driver moved to create_host_config in > 3
create_params['cpu_shares'] = 'cpu_shares'
create_params['volume_driver'] = 'volume_driver'
result = dict(
host_config=self._host_config(),
volumes=self._get_mounts(),
)
for key, value in create_params.items():
if getattr(self, value, None) is not None:
if self.client.option_minimal_versions[value]['supported']:
result[key] = getattr(self, value)
if self.networks_cli_compatible and self.networks:
network = self.networks[0]
params = dict()
for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
if network.get(para):
params[para] = network[para]
network_config = dict()
network_config[network['name']] = self.client.create_endpoint_config(**params)
result['networking_config'] = self.client.create_networking_config(network_config)
return result
def _expand_host_paths(self):
new_vols = []
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if re.match(r'[.~]', host):
host = os.path.abspath(os.path.expanduser(host))
new_vols.append("%s:%s:%s" % (host, container, mode))
continue
elif len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
host = os.path.abspath(os.path.expanduser(parts[0]))
new_vols.append("%s:%s:rw" % (host, parts[1]))
continue
new_vols.append(vol)
return new_vols
def _get_mounts(self):
'''
Return a list of container mounts.
:return:
'''
result = []
if self.volumes:
for vol in self.volumes:
if ':' in vol:
if len(vol.split(':')) == 3:
dummy, container, dummy = vol.split(':')
result.append(container)
continue
if len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]):
result.append(parts[1])
continue
result.append(vol)
self.log("mounts:")
self.log(result, pretty_print=True)
return result
def _host_config(self):
'''
Returns parameters used to create a HostConfig object
'''
host_config_params = dict(
port_bindings='published_ports',
publish_all_ports='publish_all_ports',
links='links',
privileged='privileged',
dns='dns_servers',
dns_opt='dns_opts',
dns_search='dns_search_domains',
binds='volume_binds',
volumes_from='volumes_from',
network_mode='network_mode',
userns_mode='userns_mode',
cap_add='capabilities',
cap_drop='cap_drop',
extra_hosts='etc_hosts',
read_only='read_only',
ipc_mode='ipc_mode',
security_opt='security_opts',
ulimits='ulimits',
sysctls='sysctls',
log_config='log_config',
mem_limit='memory',
memswap_limit='memory_swap',
mem_swappiness='memory_swappiness',
oom_score_adj='oom_score_adj',
oom_kill_disable='oom_killer',
shm_size='shm_size',
group_add='groups',
devices='devices',
pid_mode='pid_mode',
tmpfs='tmpfs',
init='init',
uts_mode='uts',
runtime='runtime',
auto_remove='auto_remove',
device_read_bps='device_read_bps',
device_write_bps='device_write_bps',
device_read_iops='device_read_iops',
device_write_iops='device_write_iops',
pids_limit='pids_limit',
mounts='mounts',
nano_cpus='cpus',
)
if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'):
# blkio_weight can always be updated, but can only be set on creation
# when Docker SDK for Python and Docker API are new enough
host_config_params['blkio_weight'] = 'blkio_weight'
if self.client.docker_py_version >= LooseVersion('3.0'):
# cpu_shares and volume_driver moved to create_host_config in > 3
host_config_params['cpu_shares'] = 'cpu_shares'
host_config_params['volume_driver'] = 'volume_driver'
params = dict()
for key, value in host_config_params.items():
if getattr(self, value, None) is not None:
if self.client.option_minimal_versions[value]['supported']:
params[key] = getattr(self, value)
if self.restart_policy:
params['restart_policy'] = dict(Name=self.restart_policy,
MaximumRetryCount=self.restart_retries)
if 'mounts' in params:
params['mounts'] = self.mounts_opt
return self.client.create_host_config(**params)
@property
def default_host_ip(self):
ip = '0.0.0.0'
if not self.networks:
return ip
for net in self.networks:
if net.get('name'):
try:
network = self.client.inspect_network(net['name'])
if network.get('Driver') == 'bridge' and \
network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
break
except NotFound as nfe:
self.client.fail(
"Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe),
exception=traceback.format_exc()
)
return ip
def _parse_publish_ports(self):
'''
Parse ports from docker CLI syntax
'''
if self.published_ports is None:
return None
if 'all' in self.published_ports:
return 'all'
default_ip = self.default_host_ip
binds = {}
for port in self.published_ports:
parts = split_colon_ipv6(str(port), self.client)
container_port = parts[-1]
protocol = ''
if '/' in container_port:
container_port, protocol = parts[-1].split('/')
container_ports = parse_port_range(container_port, self.client)
p_len = len(parts)
if p_len == 1:
port_binds = len(container_ports) * [(default_ip,)]
elif p_len == 2:
port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
elif p_len == 3:
# We only allow IPv4 and IPv6 addresses for the bind address
ipaddr = parts[0]
if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr))
if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
ipaddr = ipaddr[1:-1]
if parts[1]:
port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)]
else:
port_binds = len(container_ports) * [(ipaddr,)]
for bind, container_port in zip(port_binds, container_ports):
idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
if idx in binds:
old_bind = binds[idx]
if isinstance(old_bind, list):
old_bind.append(bind)
else:
binds[idx] = [old_bind, bind]
else:
binds[idx] = bind
return binds
def _get_volume_binds(self, volumes):
'''
Extract host bindings, if any, from list of volume mapping strings.
:return: dictionary of bind mappings
'''
result = dict()
if volumes:
for vol in volumes:
host = None
if ':' in vol:
parts = vol.split(':')
if len(parts) == 3:
host, container, mode = parts
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
elif len(parts) == 2:
if not is_volume_permissions(parts[1]):
host, container, mode = (vol.split(':') + ['rw'])
if host is not None:
result[host] = dict(
bind=container,
mode=mode
)
return result
def _parse_exposed_ports(self, published_ports):
'''
Parse exposed ports from docker CLI-style ports syntax.
'''
exposed = []
if self.exposed_ports:
for port in self.exposed_ports:
port = str(port).strip()
protocol = 'tcp'
match = re.search(r'(/.+$)', port)
if match:
protocol = match.group(1).replace('/', '')
port = re.sub(r'/.+$', '', port)
exposed.append((port, protocol))
if published_ports:
# Any published port should also be exposed
for publish_port in published_ports:
match = False
if isinstance(publish_port, string_types) and '/' in publish_port:
port, protocol = publish_port.split('/')
port = int(port)
else:
protocol = 'tcp'
port = int(publish_port)
for exposed_port in exposed:
if exposed_port[1] != protocol:
continue
if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
start_port, end_port = exposed_port[0].split('-')
if int(start_port) <= port <= int(end_port):
match = True
elif exposed_port[0] == port:
match = True
if not match:
exposed.append((port, protocol))
return exposed
@staticmethod
def _parse_links(links):
'''
Turn links into a dictionary
'''
if links is None:
return None
result = []
for link in links:
parsed_link = link.split(':', 1)
if len(parsed_link) == 2:
result.append((parsed_link[0], parsed_link[1]))
else:
result.append((parsed_link[0], parsed_link[0]))
return result
def _parse_ulimits(self):
'''
Turn ulimits into an array of Ulimit objects
'''
if self.ulimits is None:
return None
results = []
for limit in self.ulimits:
limits = dict()
pieces = limit.split(':')
if len(pieces) >= 2:
limits['name'] = pieces[0]
limits['soft'] = int(pieces[1])
limits['hard'] = int(pieces[1])
if len(pieces) == 3:
limits['hard'] = int(pieces[2])
try:
results.append(Ulimit(**limits))
except ValueError as exc:
self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
return results
def _parse_sysctls(self):
'''
Turn sysctls into an hash of Sysctl objects
'''
return self.sysctls
def _parse_log_config(self):
'''
Create a LogConfig object
'''
if self.log_driver is None:
return None
options = dict(
Type=self.log_driver,
Config=dict()
)
if self.log_options is not None:
options['Config'] = dict()
for k, v in self.log_options.items():
if not isinstance(v, string_types):
self.client.module.warn(
"Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
"If this is not correct, or you want to avoid such warnings, please quote the value." % (k, str(v))
)
v = str(v)
self.log_options[k] = v
options['Config'][k] = v
try:
return LogConfig(**options)
except ValueError as exc:
self.fail('Error parsing logging options - %s' % (exc))
def _parse_tmpfs(self):
'''
Turn tmpfs into a hash of Tmpfs objects
'''
result = dict()
if self.tmpfs is None:
return result
for tmpfs_spec in self.tmpfs:
split_spec = tmpfs_spec.split(":", 1)
if len(split_spec) > 1:
result[split_spec[0]] = split_spec[1]
else:
result[split_spec[0]] = ""
return result
def _get_environment(self):
"""
If environment file is combined with explicit environment variables, the explicit environment variables
take precedence.
"""
final_env = {}
if self.env_file:
parsed_env_file = utils.parse_env_file(self.env_file)
for name, value in parsed_env_file.items():
final_env[name] = str(value)
if self.env:
for name, value in self.env.items():
if not isinstance(value, string_types):
self.fail("Non-string value found for env option. Ambiguous env options must be "
"wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
final_env[name] = str(value)
return final_env
def _get_network_id(self, network_name):
network_id = None
try:
for network in self.client.networks(names=[network_name]):
if network['Name'] == network_name:
network_id = network['Id']
break
except Exception as exc:
self.fail("Error getting network id for %s - %s" % (network_name, str(exc)))
return network_id
def _process_mounts(self):
if self.mounts is None:
return None, None
mounts_list = []
mounts_expected = []
for mount in self.mounts:
target = mount['target']
datatype = mount['type']
mount_dict = dict(mount)
# Sanity checks (so we don't wait for docker-py to barf on input)
if mount_dict.get('source') is None and datatype != 'tmpfs':
self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype))
mount_option_types = dict(
volume_driver='volume',
volume_options='volume',
propagation='bind',
no_copy='volume',
labels='volume',
tmpfs_size='tmpfs',
tmpfs_mode='tmpfs',
)
for option, req_datatype in mount_option_types.items():
if mount_dict.get(option) is not None and datatype != req_datatype:
self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype))
# Handle volume_driver and volume_options
volume_driver = mount_dict.pop('volume_driver')
volume_options = mount_dict.pop('volume_options')
if volume_driver:
if volume_options:
volume_options = clean_dict_booleans_for_docker_api(volume_options)
mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options)
if mount_dict['labels']:
mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
if mount_dict.get('tmpfs_size') is not None:
try:
mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
except ValueError as exc:
self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc))
if mount_dict.get('tmpfs_mode') is not None:
try:
mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
except Exception as dummy:
self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
# Fill expected mount dict
mount_expected = dict(mount)
mount_expected['tmpfs_size'] = mount_dict['tmpfs_size']
mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode']
# Add result to lists
mounts_list.append(docker_types.Mount(**mount_dict))
mounts_expected.append(omit_none_from_dict(mount_expected))
return mounts_list, mounts_expected
def _process_rate_bps(self, option):
"""
Format device_read_bps and device_write_bps option
"""
devices_list = []
for v in getattr(self, option):
device_dict = dict((x.title(), y) for x, y in v.items())
device_dict['Rate'] = human_to_bytes(device_dict['Rate'])
devices_list.append(device_dict)
setattr(self, option, devices_list)
def _process_rate_iops(self, option):
"""
Format device_read_iops and device_write_iops option
"""
devices_list = []
for v in getattr(self, option):
device_dict = dict((x.title(), y) for x, y in v.items())
devices_list.append(device_dict)
setattr(self, option, devices_list)
def _replace_container_names(self, mode):
"""
Parse IPC and PID modes. If they contain a container name, replace
with the container's ID.
"""
if mode is None or not mode.startswith('container:'):
return mode
container_name = mode[len('container:'):]
# Try to inspect container to see whether this is an ID or a
# name (and in the latter case, retrieve it's ID)
container = self.client.get_container(container_name)
if container is None:
# If we can't find the container, issue a warning and continue with
# what the user specified.
self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
return mode
return 'container:{0}'.format(container['Id'])
def _check_mount_target_collisions(self):
last = dict()
def f(t, name):
if t in last:
if name == last[t]:
self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name))
else:
self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
last[t] = name
if self.expected_mounts:
for t in [m['target'] for m in self.expected_mounts]:
f(t, 'mounts')
if self.volumes:
for v in self.volumes:
vs = v.split(':')
f(vs[0 if len(vs) == 1 else 1], 'volumes')
class Container(DockerBaseClass):
def __init__(self, container, parameters):
super(Container, self).__init__()
self.raw = container
self.Id = None
self.container = container
if container:
self.Id = container['Id']
self.Image = container['Image']
self.log(self.container, pretty_print=True)
self.parameters = parameters
self.parameters.expected_links = None
self.parameters.expected_ports = None
self.parameters.expected_exposed = None
self.parameters.expected_volumes = None
self.parameters.expected_ulimits = None
self.parameters.expected_sysctls = None
self.parameters.expected_etc_hosts = None
self.parameters.expected_env = None
self.parameters_map = dict()
self.parameters_map['expected_links'] = 'links'
self.parameters_map['expected_ports'] = 'expected_ports'
self.parameters_map['expected_exposed'] = 'exposed_ports'
self.parameters_map['expected_volumes'] = 'volumes'
self.parameters_map['expected_ulimits'] = 'ulimits'
self.parameters_map['expected_sysctls'] = 'sysctls'
self.parameters_map['expected_etc_hosts'] = 'etc_hosts'
self.parameters_map['expected_env'] = 'env'
self.parameters_map['expected_entrypoint'] = 'entrypoint'
self.parameters_map['expected_binds'] = 'volumes'
self.parameters_map['expected_cmd'] = 'command'
self.parameters_map['expected_devices'] = 'devices'
self.parameters_map['expected_healthcheck'] = 'healthcheck'
self.parameters_map['expected_mounts'] = 'mounts'
def fail(self, msg):
self.parameters.client.fail(msg)
@property
def exists(self):
return True if self.container else False
@property
def removing(self):
if self.container and self.container.get('State'):
return self.container['State'].get('Status') == 'removing'
return False
@property
def running(self):
if self.container and self.container.get('State'):
if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
return True
return False
@property
def paused(self):
if self.container and self.container.get('State'):
return self.container['State'].get('Paused', False)
return False
def _compare(self, a, b, compare):
'''
Compare values a and b as described in compare.
'''
return compare_generic(a, b, compare['comparison'], compare['type'])
def _decode_mounts(self, mounts):
if not mounts:
return mounts
result = []
empty_dict = dict()
for mount in mounts:
res = dict()
res['type'] = mount.get('Type')
res['source'] = mount.get('Source')
res['target'] = mount.get('Target')
res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False
res['consistency'] = mount.get('Consistency')
res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation')
res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False)
res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict)
res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name')
res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict)
res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes')
res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode')
result.append(res)
return result
def has_different_configuration(self, image):
'''
Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
'''
self.log('Starting has_different_configuration')
self.parameters.expected_entrypoint = self._get_expected_entrypoint()
self.parameters.expected_links = self._get_expected_links()
self.parameters.expected_ports = self._get_expected_ports()
self.parameters.expected_exposed = self._get_expected_exposed(image)
self.parameters.expected_volumes = self._get_expected_volumes(image)
self.parameters.expected_binds = self._get_expected_binds(image)
self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
self.parameters.expected_env = self._get_expected_env(image)
self.parameters.expected_cmd = self._get_expected_cmd()
self.parameters.expected_devices = self._get_expected_devices()
self.parameters.expected_healthcheck = self._get_expected_healthcheck()
if not self.container.get('HostConfig'):
self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
if not self.container.get('Config'):
self.fail("has_config_diff: Error parsing container properties. Config missing.")
if not self.container.get('NetworkSettings'):
self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
host_config = self.container['HostConfig']
log_config = host_config.get('LogConfig', dict())
config = self.container['Config']
network = self.container['NetworkSettings']
# The previous version of the docker module ignored the detach state by
# assuming if the container was running, it must have been detached.
detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
# "ExposedPorts": null returns None type & causes AttributeError - PR #5517
if config.get('ExposedPorts') is not None:
expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
else:
expected_exposed = []
# Map parameters to container inspect results
config_mapping = dict(
expected_cmd=config.get('Cmd'),
domainname=config.get('Domainname'),
hostname=config.get('Hostname'),
user=config.get('User'),
detach=detach,
init=host_config.get('Init'),
interactive=config.get('OpenStdin'),
capabilities=host_config.get('CapAdd'),
cap_drop=host_config.get('CapDrop'),
expected_devices=host_config.get('Devices'),
dns_servers=host_config.get('Dns'),
dns_opts=host_config.get('DnsOptions'),
dns_search_domains=host_config.get('DnsSearch'),
expected_env=(config.get('Env') or []),
expected_entrypoint=config.get('Entrypoint'),
expected_etc_hosts=host_config['ExtraHosts'],
expected_exposed=expected_exposed,
groups=host_config.get('GroupAdd'),
ipc_mode=host_config.get("IpcMode"),
labels=config.get('Labels'),
expected_links=host_config.get('Links'),
mac_address=network.get('MacAddress'),
memory_swappiness=host_config.get('MemorySwappiness'),
network_mode=host_config.get('NetworkMode'),
userns_mode=host_config.get('UsernsMode'),
oom_killer=host_config.get('OomKillDisable'),
oom_score_adj=host_config.get('OomScoreAdj'),
pid_mode=host_config.get('PidMode'),
privileged=host_config.get('Privileged'),
expected_ports=host_config.get('PortBindings'),
read_only=host_config.get('ReadonlyRootfs'),
runtime=host_config.get('Runtime'),
shm_size=host_config.get('ShmSize'),
security_opts=host_config.get("SecurityOpt"),
stop_signal=config.get("StopSignal"),
tmpfs=host_config.get('Tmpfs'),
tty=config.get('Tty'),
expected_ulimits=host_config.get('Ulimits'),
expected_sysctls=host_config.get('Sysctls'),
uts=host_config.get('UTSMode'),
expected_volumes=config.get('Volumes'),
expected_binds=host_config.get('Binds'),
volume_driver=host_config.get('VolumeDriver'),
volumes_from=host_config.get('VolumesFrom'),
working_dir=config.get('WorkingDir'),
publish_all_ports=host_config.get('PublishAllPorts'),
expected_healthcheck=config.get('Healthcheck'),
disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']),
device_read_bps=host_config.get('BlkioDeviceReadBps'),
device_write_bps=host_config.get('BlkioDeviceWriteBps'),
device_read_iops=host_config.get('BlkioDeviceReadIOps'),
device_write_iops=host_config.get('BlkioDeviceWriteIOps'),
pids_limit=host_config.get('PidsLimit'),
# According to https://github.com/moby/moby/, support for HostConfig.Mounts
# has been included at least since v17.03.0-ce, which has API version 1.26.
# The previous tag, v1.9.1, has API version 1.21 and does not have
# HostConfig.Mounts. I have no idea what about API 1.25...
expected_mounts=self._decode_mounts(host_config.get('Mounts')),
cpus=host_config.get('NanoCpus'),
)
# Options which don't make sense without their accompanying option
if self.parameters.log_driver:
config_mapping['log_driver'] = log_config.get('Type')
config_mapping['log_options'] = log_config.get('Config')
if self.parameters.client.option_minimal_versions['auto_remove']['supported']:
# auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately
# it has a default value, that's why we have to jump through the hoops here
config_mapping['auto_remove'] = host_config.get('AutoRemove')
if self.parameters.client.option_minimal_versions['stop_timeout']['supported']:
# stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that
# stop_timeout has a hybrid role, in that it used to be something only used
# for stopping containers, and is now also used as a container property.
# That's why it needs special handling here.
config_mapping['stop_timeout'] = config.get('StopTimeout')
if self.parameters.client.docker_api_version < LooseVersion('1.22'):
# For docker API < 1.22, update_container() is not supported. Thus
# we need to handle all limits which are usually handled by
# update_container() as configuration changes which require a container
# restart.
restart_policy = host_config.get('RestartPolicy', dict())
# Options which don't make sense without their accompanying option
if self.parameters.restart_policy:
config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
config_mapping.update(dict(
blkio_weight=host_config.get('BlkioWeight'),
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpu_shares=host_config.get('CpuShares'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
restart_policy=restart_policy.get('Name')
))
differences = DifferenceTracker()
for key, value in config_mapping.items():
minimal_version = self.parameters.client.option_minimal_versions.get(key, {})
if not minimal_version.get('supported', True):
continue
compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), str(value), compare))
if getattr(self.parameters, key, None) is not None:
match = self._compare(getattr(self.parameters, key), value, compare)
if not match:
# no match. record the differences
p = getattr(self.parameters, key)
c = value
if compare['type'] == 'set':
# Since the order does not matter, sort so that the diff output is better.
if p is not None:
p = sorted(p)
if c is not None:
c = sorted(c)
elif compare['type'] == 'set(dict)':
# Since the order does not matter, sort so that the diff output is better.
if key == 'expected_mounts':
# For selected values, use one entry as key
def sort_key_fn(x):
return x['target']
else:
# We sort the list of dictionaries by using the sorted items of a dict as its key.
def sort_key_fn(x):
return sorted((a, str(b)) for a, b in x.items())
if p is not None:
p = sorted(p, key=sort_key_fn)
if c is not None:
c = sorted(c, key=sort_key_fn)
differences.add(key, parameter=p, active=c)
has_differences = not differences.empty
return has_differences, differences
def has_different_resource_limits(self):
'''
Diff parameters and container resource limits
'''
if not self.container.get('HostConfig'):
self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
if self.parameters.client.docker_api_version < LooseVersion('1.22'):
# update_container() call not supported
return False, []
host_config = self.container['HostConfig']
restart_policy = host_config.get('RestartPolicy') or dict()
config_mapping = dict(
blkio_weight=host_config.get('BlkioWeight'),
cpu_period=host_config.get('CpuPeriod'),
cpu_quota=host_config.get('CpuQuota'),
cpu_shares=host_config.get('CpuShares'),
cpuset_cpus=host_config.get('CpusetCpus'),
cpuset_mems=host_config.get('CpusetMems'),
kernel_memory=host_config.get("KernelMemory"),
memory=host_config.get('Memory'),
memory_reservation=host_config.get('MemoryReservation'),
memory_swap=host_config.get('MemorySwap'),
restart_policy=restart_policy.get('Name')
)
# Options which don't make sense without their accompanying option
if self.parameters.restart_policy:
config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
differences = DifferenceTracker()
for key, value in config_mapping.items():
if getattr(self.parameters, key, None):
compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
match = self._compare(getattr(self.parameters, key), value, compare)
if not match:
# no match. record the differences
differences.add(key, parameter=getattr(self.parameters, key), active=value)
different = not differences.empty
return different, differences
def has_network_differences(self):
'''
Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
'''
different = False
differences = []
if not self.parameters.networks:
return different, differences
if not self.container.get('NetworkSettings'):
self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings']['Networks']
for network in self.parameters.networks:
network_info = connected_networks.get(network['name'])
if network_info is None:
different = True
differences.append(dict(
parameter=network,
container=None
))
else:
diff = False
network_info_ipam = network_info.get('IPAMConfig') or {}
if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
diff = True
if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
diff = True
if network.get('aliases'):
if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
diff = True
if network.get('links'):
expected_links = []
for link, alias in network['links']:
expected_links.append("%s:%s" % (link, alias))
if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
diff = True
if diff:
different = True
differences.append(dict(
parameter=network,
container=dict(
name=network['name'],
ipv4_address=network_info_ipam.get('IPv4Address'),
ipv6_address=network_info_ipam.get('IPv6Address'),
aliases=network_info.get('Aliases'),
links=network_info.get('Links')
)
))
return different, differences
def has_extra_networks(self):
'''
Check if the container is connected to non-requested networks
'''
extra_networks = []
extra = False
if not self.container.get('NetworkSettings'):
self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
connected_networks = self.container['NetworkSettings'].get('Networks')
if connected_networks:
for network, network_config in connected_networks.items():
keep = False
if self.parameters.networks:
for expected_network in self.parameters.networks:
if expected_network['name'] == network:
keep = True
if not keep:
extra = True
extra_networks.append(dict(name=network, id=network_config['NetworkID']))
return extra, extra_networks
def _get_expected_devices(self):
if not self.parameters.devices:
return None
expected_devices = []
for device in self.parameters.devices:
parts = device.split(':')
if len(parts) == 1:
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[0],
PathOnHost=parts[0]
))
elif len(parts) == 2:
parts = device.split(':')
expected_devices.append(
dict(
CgroupPermissions='rwm',
PathInContainer=parts[1],
PathOnHost=parts[0]
)
)
else:
expected_devices.append(
dict(
CgroupPermissions=parts[2],
PathInContainer=parts[1],
PathOnHost=parts[0]
))
return expected_devices
def _get_expected_entrypoint(self):
if not self.parameters.entrypoint:
return None
return shlex.split(self.parameters.entrypoint)
def _get_expected_ports(self):
if not self.parameters.published_ports:
return None
expected_bound_ports = {}
for container_port, config in self.parameters.published_ports.items():
if isinstance(container_port, int):
container_port = "%s/tcp" % container_port
if len(config) == 1:
if isinstance(config[0], int):
expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
elif isinstance(config[0], tuple):
expected_bound_ports[container_port] = []
for host_ip, host_port in config:
expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': str(host_port)})
else:
expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': str(config[1])}]
return expected_bound_ports
def _get_expected_links(self):
if self.parameters.links is None:
return None
self.log('parameter links:')
self.log(self.parameters.links, pretty_print=True)
exp_links = []
for link, alias in self.parameters.links:
exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
return exp_links
def _get_expected_binds(self, image):
self.log('_get_expected_binds')
image_vols = []
if image:
image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes'))
param_vols = []
if self.parameters.volumes:
for vol in self.parameters.volumes:
host = None
if ':' in vol:
if len(vol.split(':')) == 3:
host, container, mode = vol.split(':')
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]):
host, container, mode = vol.split(':') + ['rw']
if host:
param_vols.append("%s:%s:%s" % (host, container, mode))
result = list(set(image_vols + param_vols))
self.log("expected_binds:")
self.log(result, pretty_print=True)
return result
def _get_image_binds(self, volumes):
'''
Convert array of binds to array of strings with format host_path:container_path:mode
:param volumes: array of bind dicts
:return: array of strings
'''
results = []
if isinstance(volumes, dict):
results += self._get_bind_from_dict(volumes)
elif isinstance(volumes, list):
for vol in volumes:
results += self._get_bind_from_dict(vol)
return results
@staticmethod
def _get_bind_from_dict(volume_dict):
results = []
if volume_dict:
for host_path, config in volume_dict.items():
if isinstance(config, dict) and config.get('bind'):
container_path = config.get('bind')
mode = config.get('mode', 'rw')
results.append("%s:%s:%s" % (host_path, container_path, mode))
return results
def _get_expected_volumes(self, image):
self.log('_get_expected_volumes')
expected_vols = dict()
if image and image[self.parameters.client.image_inspect_source].get('Volumes'):
expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes'))
if self.parameters.volumes:
for vol in self.parameters.volumes:
container = None
if ':' in vol:
if len(vol.split(':')) == 3:
dummy, container, mode = vol.split(':')
if not is_volume_permissions(mode):
self.fail('Found invalid volumes mode: {0}'.format(mode))
if len(vol.split(':')) == 2:
parts = vol.split(':')
if not is_volume_permissions(parts[1]):
dummy, container, mode = vol.split(':') + ['rw']
new_vol = dict()
if container:
new_vol[container] = dict()
else:
new_vol[vol] = dict()
expected_vols.update(new_vol)
if not expected_vols:
expected_vols = None
self.log("expected_volumes:")
self.log(expected_vols, pretty_print=True)
return expected_vols
def _get_expected_env(self, image):
self.log('_get_expected_env')
expected_env = dict()
if image and image[self.parameters.client.image_inspect_source].get('Env'):
for env_var in image[self.parameters.client.image_inspect_source]['Env']:
parts = env_var.split('=', 1)
expected_env[parts[0]] = parts[1]
if self.parameters.env:
expected_env.update(self.parameters.env)
param_env = []
for key, value in expected_env.items():
param_env.append("%s=%s" % (key, value))
return param_env
def _get_expected_exposed(self, image):
self.log('_get_expected_exposed')
image_ports = []
if image:
image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {}
image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()]
param_ports = []
if self.parameters.ports:
param_ports = [str(p[0]) + '/' + p[1] for p in self.parameters.ports]
result = list(set(image_ports + param_ports))
self.log(result, pretty_print=True)
return result
def _get_expected_ulimits(self, config_ulimits):
self.log('_get_expected_ulimits')
if config_ulimits is None:
return None
results = []
for limit in config_ulimits:
results.append(dict(
Name=limit.name,
Soft=limit.soft,
Hard=limit.hard
))
return results
def _get_expected_sysctls(self, config_sysctls):
self.log('_get_expected_sysctls')
if config_sysctls is None:
return None
result = dict()
for key, value in config_sysctls.items():
result[key] = str(value)
return result
def _get_expected_cmd(self):
self.log('_get_expected_cmd')
if not self.parameters.command:
return None
return shlex.split(self.parameters.command)
def _convert_simple_dict_to_list(self, param_name, join_with=':'):
if getattr(self.parameters, param_name, None) is None:
return None
results = []
for key, value in getattr(self.parameters, param_name).items():
results.append("%s%s%s" % (key, join_with, value))
return results
def _normalize_port(self, port):
if '/' not in port:
return port + '/tcp'
return port
def _get_expected_healthcheck(self):
self.log('_get_expected_healthcheck')
expected_healthcheck = dict()
if self.parameters.healthcheck:
expected_healthcheck.update([(k.title().replace("_", ""), v)
for k, v in self.parameters.healthcheck.items()])
return expected_healthcheck
class ContainerManager(DockerBaseClass):
'''
Perform container management tasks
'''
def __init__(self, client):
super(ContainerManager, self).__init__()
if client.module.params.get('log_options') and not client.module.params.get('log_driver'):
client.module.warn('log_options is ignored when log_driver is not specified')
if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'):
client.module.warn('healthcheck is ignored when test is not specified')
if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'):
client.module.warn('restart_retries is ignored when restart_policy is not specified')
self.client = client
self.parameters = TaskParameters(client)
self.check_mode = self.client.check_mode
self.results = {'changed': False, 'actions': []}
self.diff = {}
self.diff_tracker = DifferenceTracker()
self.facts = {}
state = self.parameters.state
if state in ('stopped', 'started', 'present'):
self.present(state)
elif state == 'absent':
self.absent()
if not self.check_mode and not self.parameters.debug:
self.results.pop('actions')
if self.client.module._diff or self.parameters.debug:
self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
self.results['diff'] = self.diff
if self.facts:
self.results['ansible_facts'] = {'docker_container': self.facts}
self.results['container'] = self.facts
def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
delay = 1.0
total_wait = 0
while True:
# Inspect container
result = self.client.get_container_by_id(container_id)
if result is None:
if accept_removal:
return
msg = 'Encontered vanished container while waiting for container "{0}"'
self.fail(msg.format(container_id))
# Check container state
state = result.get('State', {}).get('Status')
if complete_states is not None and state in complete_states:
return
if wait_states is not None and state not in wait_states:
msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
self.fail(msg.format(container_id, state))
# Wait
if max_wait is not None:
if total_wait > max_wait:
msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
self.fail(msg.format(container_id, max_wait))
if total_wait + delay > max_wait:
delay = max_wait - total_wait
sleep(delay)
total_wait += delay
# Exponential backoff, but never wait longer than 10 seconds
# (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
# until the maximal 10 seconds delay is reached. By then, the
# code will have slept for ~1.5 minutes.)
delay = min(delay * 1.1, 10)
def present(self, state):
container = self._get_container(self.parameters.name)
was_running = container.running
was_paused = container.paused
container_created = False
# If the image parameter was passed then we need to deal with the image
# version comparison. Otherwise we handle this depending on whether
# the container already runs or not; in the former case, in case the
# container needs to be restarted, we use the existing container's
# image ID.
image = self._get_image()
self.log(image, pretty_print=True)
if not container.exists or container.removing:
# New container
if container.removing:
self.log('Found container in removal phase')
else:
self.log('No container found')
if not self.parameters.image:
self.fail('Cannot create container when image is not specified!')
self.diff_tracker.add('exists', parameter=True, active=False)
if container.removing and not self.check_mode:
# Wait for container to be removed before trying to create it
self.wait_for_state(
container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
if new_container:
container = new_container
container_created = True
else:
# Existing container
different, differences = container.has_different_configuration(image)
image_different = False
if self.parameters.comparisons['image']['comparison'] == 'strict':
image_different = self._image_is_different(image, container)
if image_different or different or self.parameters.recreate:
self.diff_tracker.merge(differences)
self.diff['differences'] = differences.get_legacy_docker_container_diffs()
if image_different:
self.diff['image_different'] = True
self.log("differences")
self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
image_to_use = self.parameters.image
if not image_to_use and container and container.Image:
image_to_use = container.Image
if not image_to_use:
self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
if container.running:
self.container_stop(container.Id)
self.container_remove(container.Id)
if not self.check_mode:
self.wait_for_state(
container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
new_container = self.container_create(image_to_use, self.parameters.create_parameters)
if new_container:
container = new_container
container_created = True
if container and container.exists:
container = self.update_limits(container)
container = self.update_networks(container, container_created)
if state == 'started' and not container.running:
self.diff_tracker.add('running', parameter=True, active=was_running)
container = self.container_start(container.Id)
elif state == 'started' and self.parameters.restart:
self.diff_tracker.add('running', parameter=True, active=was_running)
self.diff_tracker.add('restarted', parameter=True, active=False)
container = self.container_restart(container.Id)
elif state == 'stopped' and container.running:
self.diff_tracker.add('running', parameter=False, active=was_running)
self.container_stop(container.Id)
container = self._get_container(container.Id)
if state == 'started' and container.paused is not None and container.paused != self.parameters.paused:
self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused)
if not self.check_mode:
try:
if self.parameters.paused:
self.client.pause(container=container.Id)
else:
self.client.unpause(container=container.Id)
except Exception as exc:
self.fail("Error %s container %s: %s" % (
"pausing" if self.parameters.paused else "unpausing", container.Id, str(exc)
))
container = self._get_container(container.Id)
self.results['changed'] = True
self.results['actions'].append(dict(set_paused=self.parameters.paused))
self.facts = container.raw
def absent(self):
container = self._get_container(self.parameters.name)
if container.exists:
if container.running:
self.diff_tracker.add('running', parameter=False, active=True)
self.container_stop(container.Id)
self.diff_tracker.add('exists', parameter=False, active=True)
self.container_remove(container.Id)
def fail(self, msg, **kwargs):
self.client.fail(msg, **kwargs)
def _output_logs(self, msg):
self.client.module.log(msg=msg)
def _get_container(self, container):
'''
Expects container ID or Name. Returns a container object
'''
return Container(self.client.get_container(container), self.parameters)
def _get_image(self):
if not self.parameters.image:
self.log('No image specified')
return None
if is_image_name_id(self.parameters.image):
image = self.client.find_image_by_id(self.parameters.image)
else:
repository, tag = utils.parse_repository_tag(self.parameters.image)
if not tag:
tag = "latest"
image = self.client.find_image(repository, tag)
if not image or self.parameters.pull:
if not self.check_mode:
self.log("Pull the image.")
image, alreadyToLatest = self.client.pull_image(repository, tag)
if alreadyToLatest:
self.results['changed'] = False
else:
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
elif not image:
# If the image isn't there, claim we'll pull.
# (Implicitly: if the image is there, claim it already was latest.)
self.results['changed'] = True
self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
self.log("image")
self.log(image, pretty_print=True)
return image
def _image_is_different(self, image, container):
if image and image.get('Id'):
if container and container.Image:
if image.get('Id') != container.Image:
self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image)
return True
return False
def update_limits(self, container):
limits_differ, different_limits = container.has_different_resource_limits()
if limits_differ:
self.log("limit differences:")
self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
self.diff_tracker.merge(different_limits)
if limits_differ and not self.check_mode:
self.container_update(container.Id, self.parameters.update_parameters)
return self._get_container(container.Id)
return container
def update_networks(self, container, container_created):
updated_container = container
if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created:
has_network_differences, network_differences = container.has_network_differences()
if has_network_differences:
if self.diff.get('differences'):
self.diff['differences'].append(dict(network_differences=network_differences))
else:
self.diff['differences'] = [dict(network_differences=network_differences)]
for netdiff in network_differences:
self.diff_tracker.add(
'network.{0}'.format(netdiff['parameter']['name']),
parameter=netdiff['parameter'],
active=netdiff['container']
)
self.results['changed'] = True
updated_container = self._add_networks(container, network_differences)
if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks:
has_extra_networks, extra_networks = container.has_extra_networks()
if has_extra_networks:
if self.diff.get('differences'):
self.diff['differences'].append(dict(purge_networks=extra_networks))
else:
self.diff['differences'] = [dict(purge_networks=extra_networks)]
for extra_network in extra_networks:
self.diff_tracker.add(
'network.{0}'.format(extra_network['name']),
active=extra_network
)
self.results['changed'] = True
updated_container = self._purge_networks(container, extra_networks)
return updated_container
def _add_networks(self, container, differences):
for diff in differences:
# remove the container from the network, if connected
if diff.get('container'):
self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
str(exc)))
# connect to the network
params = dict()
for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
if diff['parameter'].get(para):
params[para] = diff['parameter'][para]
self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
if not self.check_mode:
try:
self.log("Connecting container to network %s" % diff['parameter']['id'])
self.log(params, pretty_print=True)
self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
except Exception as exc:
self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], str(exc)))
return self._get_container(container.Id)
def _purge_networks(self, container, networks):
for network in networks:
self.results['actions'].append(dict(removed_from_network=network['name']))
if not self.check_mode:
try:
self.client.disconnect_container_from_network(container.Id, network['name'])
except Exception as exc:
self.fail("Error disconnecting container from network %s - %s" % (network['name'],
str(exc)))
return self._get_container(container.Id)
def container_create(self, image, create_parameters):
self.log("create container")
self.log("image: %s parameters:" % image)
self.log(create_parameters, pretty_print=True)
self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
self.results['changed'] = True
new_container = None
if not self.check_mode:
try:
new_container = self.client.create_container(image, **create_parameters)
self.client.report_warnings(new_container)
except Exception as exc:
self.fail("Error creating container: %s" % str(exc))
return self._get_container(new_container['Id'])
return new_container
def container_start(self, container_id):
self.log("start container %s" % (container_id))
self.results['actions'].append(dict(started=container_id))
self.results['changed'] = True
if not self.check_mode:
try:
self.client.start(container=container_id)
except Exception as exc:
self.fail("Error starting container %s: %s" % (container_id, str(exc)))
if self.parameters.detach is False:
if self.client.docker_py_version >= LooseVersion('3.0'):
status = self.client.wait(container_id)['StatusCode']
else:
status = self.client.wait(container_id)
if self.parameters.auto_remove:
output = "Cannot retrieve result as auto_remove is enabled"
if self.parameters.output_logs:
self.client.module.warn('Cannot output_logs if auto_remove is enabled!')
else:
config = self.client.inspect_container(container_id)
logging_driver = config['HostConfig']['LogConfig']['Type']
if logging_driver in ('json-file', 'journald'):
output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
if self.parameters.output_logs:
self._output_logs(msg=output)
else:
output = "Result logged using `%s` driver" % logging_driver
if status != 0:
self.fail(output, status=status)
if self.parameters.cleanup:
self.container_remove(container_id, force=True)
insp = self._get_container(container_id)
if insp.raw:
insp.raw['Output'] = output
else:
insp.raw = dict(Output=output)
return insp
return self._get_container(container_id)
def container_remove(self, container_id, link=False, force=False):
volume_state = (not self.parameters.keep_volumes)
self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
self.results['changed'] = True
response = None
if not self.check_mode:
count = 0
while True:
try:
response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
except NotFound as dummy:
pass
except APIError as exc:
if 'Unpause the container before stopping or killing' in exc.explanation:
# New docker daemon versions do not allow containers to be removed
# if they are paused. Make sure we don't end up in an infinite loop.
if count == 3:
self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, str(exc)))
count += 1
# Unpause
try:
self.client.unpause(container=container_id)
except Exception as exc2:
self.fail("Error unpausing container %s for removal: %s" % (container_id, str(exc2)))
# Now try again
continue
if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
pass
else:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
except Exception as exc:
self.fail("Error removing container %s: %s" % (container_id, str(exc)))
# We only loop when explicitly requested by 'continue'
break
return response
def container_update(self, container_id, update_parameters):
if update_parameters:
self.log("update container %s" % (container_id))
self.log(update_parameters, pretty_print=True)
self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
self.results['changed'] = True
if not self.check_mode and callable(getattr(self.client, 'update_container')):
try:
result = self.client.update_container(container_id, **update_parameters)
self.client.report_warnings(result)
except Exception as exc:
self.fail("Error updating container %s: %s" % (container_id, str(exc)))
return self._get_container(container_id)
def container_kill(self, container_id):
self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
self.results['changed'] = True
response = None
if not self.check_mode:
try:
if self.parameters.kill_signal:
response = self.client.kill(container_id, signal=self.parameters.kill_signal)
else:
response = self.client.kill(container_id)
except Exception as exc:
self.fail("Error killing container %s: %s" % (container_id, exc))
return response
def container_restart(self, container_id):
self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
if not self.check_mode:
try:
if self.parameters.stop_timeout:
dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout)
else:
dummy = self.client.restart(container_id)
except Exception as exc:
self.fail("Error restarting container %s: %s" % (container_id, str(exc)))
return self._get_container(container_id)
def container_stop(self, container_id):
if self.parameters.force_kill:
self.container_kill(container_id)
return
self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
self.results['changed'] = True
response = None
if not self.check_mode:
count = 0
while True:
try:
if self.parameters.stop_timeout:
response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
else:
response = self.client.stop(container_id)
except APIError as exc:
if 'Unpause the container before stopping or killing' in exc.explanation:
# New docker daemon versions do not allow containers to be removed
# if they are paused. Make sure we don't end up in an infinite loop.
if count == 3:
self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, str(exc)))
count += 1
# Unpause
try:
self.client.unpause(container=container_id)
except Exception as exc2:
self.fail("Error unpausing container %s for removal: %s" % (container_id, str(exc2)))
# Now try again
continue
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
except Exception as exc:
self.fail("Error stopping container %s: %s" % (container_id, str(exc)))
# We only loop when explicitly requested by 'continue'
break
return response
def detect_ipvX_address_usage(client):
'''
Helper function to detect whether any specified network uses ipv4_address or ipv6_address
'''
for network in client.module.params.get("networks") or []:
if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None:
return True
return False
class AnsibleDockerClientContainer(AnsibleDockerClient):
# A list of module options which are not docker container properties
__NON_CONTAINER_PROPERTY_OPTIONS = tuple([
'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks',
'recreate', 'restart', 'state', 'trust_image_content', 'networks', 'cleanup', 'kill_signal',
'output_logs', 'paused', 'removal_wait_timeout'
] + list(DOCKER_COMMON_ARGS.keys()))
def _parse_comparisons(self):
comparisons = {}
comp_aliases = {}
# Put in defaults
explicit_types = dict(
command='list',
devices='set(dict)',
dns_search_domains='list',
dns_servers='list',
env='set',
entrypoint='list',
etc_hosts='set',
mounts='set(dict)',
networks='set(dict)',
ulimits='set(dict)',
device_read_bps='set(dict)',
device_write_bps='set(dict)',
device_read_iops='set(dict)',
device_write_iops='set(dict)',
)
all_options = set() # this is for improving user feedback when a wrong option was specified for comparison
default_values = dict(
stop_timeout='ignore',
)
for option, data in self.module.argument_spec.items():
all_options.add(option)
for alias in data.get('aliases', []):
all_options.add(alias)
# Ignore options which aren't used as container properties
if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks':
continue
# Determine option type
if option in explicit_types:
datatype = explicit_types[option]
elif data['type'] == 'list':
datatype = 'set'
elif data['type'] == 'dict':
datatype = 'dict'
else:
datatype = 'value'
# Determine comparison type
if option in default_values:
comparison = default_values[option]
elif datatype in ('list', 'value'):
comparison = 'strict'
else:
comparison = 'allow_more_present'
comparisons[option] = dict(type=datatype, comparison=comparison, name=option)
# Keep track of aliases
comp_aliases[option] = option
for alias in data.get('aliases', []):
comp_aliases[alias] = option
# Process legacy ignore options
if self.module.params['ignore_image']:
comparisons['image']['comparison'] = 'ignore'
if self.module.params['purge_networks']:
comparisons['networks']['comparison'] = 'strict'
# Process options
if self.module.params.get('comparisons'):
# If '*' appears in comparisons, process it first
if '*' in self.module.params['comparisons']:
value = self.module.params['comparisons']['*']
if value not in ('strict', 'ignore'):
self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
for option, v in comparisons.items():
if option == 'networks':
# `networks` is special: only update if
# some value is actually specified
if self.module.params['networks'] is None:
continue
v['comparison'] = value
# Now process all other comparisons.
comp_aliases_used = {}
for key, value in self.module.params['comparisons'].items():
if key == '*':
continue
# Find main key
key_main = comp_aliases.get(key)
if key_main is None:
if key_main in all_options:
self.fail("The module option '%s' cannot be specified in the comparisons dict, "
"since it does not correspond to container's state!" % key)
self.fail("Unknown module option '%s' in comparisons dict!" % key)
if key_main in comp_aliases_used:
self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
comp_aliases_used[key_main] = key
# Check value and update accordingly
if value in ('strict', 'ignore'):
comparisons[key_main]['comparison'] = value
elif value == 'allow_more_present':
if comparisons[key_main]['type'] == 'value':
self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
comparisons[key_main]['comparison'] = value
else:
self.fail("Unknown comparison mode '%s'!" % value)
# Add implicit options
comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports')
comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports')
comparisons['disable_healthcheck'] = dict(type='value',
comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict',
name='disable_healthcheck')
# Check legacy values
if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore':
self.module.warn('The ignore_image option has been overridden by the comparisons option!')
if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict':
self.module.warn('The purge_networks option has been overridden by the comparisons option!')
self.comparisons = comparisons
def _get_additional_minimal_versions(self):
stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25')
stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent'
if stop_timeout_supported:
stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1')
if stop_timeout_needed_for_update and not stop_timeout_supported:
# We warn (instead of fail) since in older versions, stop_timeout was not used
# to update the container's configuration, but only when stopping a container.
self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update "
"the container's stop_timeout configuration. "
"If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,))
else:
if stop_timeout_needed_for_update and not stop_timeout_supported:
# We warn (instead of fail) since in older versions, stop_timeout was not used
# to update the container's configuration, but only when stopping a container.
self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or "
"update the container's stop_timeout configuration." % (self.docker_api_version_str,))
self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported
def __init__(self, **kwargs):
option_minimal_versions = dict(
# internal options
log_config=dict(),
publish_all_ports=dict(),
ports=dict(),
volume_binds=dict(),
name=dict(),
# normal options
device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'),
ipc_mode=dict(docker_api_version='1.25'),
mac_address=dict(docker_api_version='1.25'),
oom_score_adj=dict(docker_api_version='1.22'),
shm_size=dict(docker_api_version='1.22'),
stop_signal=dict(docker_api_version='1.21'),
tmpfs=dict(docker_api_version='1.22'),
volume_driver=dict(docker_api_version='1.21'),
memory_reservation=dict(docker_api_version='1.21'),
kernel_memory=dict(docker_api_version='1.21'),
auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'),
init=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'),
userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'),
pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'),
# specials
ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22',
detect_usage=detect_ipvX_address_usage,
usage_msg='ipv4_address or ipv6_address in networks'),
stop_timeout=dict(), # see _get_additional_minimal_versions()
)
super(AnsibleDockerClientContainer, self).__init__(
option_minimal_versions=option_minimal_versions,
option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS,
**kwargs
)
self.image_inspect_source = 'Config'
if self.docker_api_version < LooseVersion('1.21'):
self.image_inspect_source = 'ContainerConfig'
self._get_additional_minimal_versions()
self._parse_comparisons()
if self.module.params['container_default_behavior'] is None:
self.module.params['container_default_behavior'] = 'compatibility'
self.module.deprecate(
'The container_default_behavior option will change its default value from "compatibility" to '
'"no_defaults" in Ansible 2.14. To remove this warning, please specify an explicit value for it now',
version='2.14'
)
if self.module.params['container_default_behavior'] == 'compatibility':
old_default_values = dict(
auto_remove=False,
detach=True,
init=False,
interactive=False,
memory="0",
paused=False,
privileged=False,
read_only=False,
tty=False,
)
for param, value in old_default_values.items():
if self.module.params[param] is None:
self.module.params[param] = value
def main():
argument_spec = dict(
auto_remove=dict(type='bool'),
blkio_weight=dict(type='int'),
capabilities=dict(type='list', elements='str'),
cap_drop=dict(type='list', elements='str'),
cleanup=dict(type='bool', default=False),
command=dict(type='raw'),
comparisons=dict(type='dict'),
container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
cpu_period=dict(type='int'),
cpu_quota=dict(type='int'),
cpus=dict(type='float'),
cpuset_cpus=dict(type='str'),
cpuset_mems=dict(type='str'),
cpu_shares=dict(type='int'),
detach=dict(type='bool'),
devices=dict(type='list', elements='str'),
device_read_bps=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='str'),
)),
device_write_bps=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='str'),
)),
device_read_iops=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='int'),
)),
device_write_iops=dict(type='list', elements='dict', options=dict(
path=dict(required=True, type='str'),
rate=dict(required=True, type='int'),
)),
dns_servers=dict(type='list', elements='str'),
dns_opts=dict(type='list', elements='str'),
dns_search_domains=dict(type='list', elements='str'),
domainname=dict(type='str'),
entrypoint=dict(type='list', elements='str'),
env=dict(type='dict'),
env_file=dict(type='path'),
etc_hosts=dict(type='dict'),
exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']),
force_kill=dict(type='bool', default=False, aliases=['forcekill']),
groups=dict(type='list', elements='str'),
healthcheck=dict(type='dict', options=dict(
test=dict(type='raw'),
interval=dict(type='str'),
timeout=dict(type='str'),
start_period=dict(type='str'),
retries=dict(type='int'),
)),
hostname=dict(type='str'),
ignore_image=dict(type='bool', default=False),
image=dict(type='str'),
init=dict(type='bool'),
interactive=dict(type='bool'),
ipc_mode=dict(type='str'),
keep_volumes=dict(type='bool', default=True),
kernel_memory=dict(type='str'),
kill_signal=dict(type='str'),
labels=dict(type='dict'),
links=dict(type='list', elements='str'),
log_driver=dict(type='str'),
log_options=dict(type='dict', aliases=['log_opt']),
mac_address=dict(type='str'),
memory=dict(type='str'),
memory_reservation=dict(type='str'),
memory_swap=dict(type='str'),
memory_swappiness=dict(type='int'),
mounts=dict(type='list', elements='dict', options=dict(
target=dict(type='str', required=True),
source=dict(type='str'),
type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
read_only=dict(type='bool'),
consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
no_copy=dict(type='bool'),
labels=dict(type='dict'),
volume_driver=dict(type='str'),
volume_options=dict(type='dict'),
tmpfs_size=dict(type='str'),
tmpfs_mode=dict(type='str'),
)),
name=dict(type='str', required=True),
network_mode=dict(type='str'),
networks=dict(type='list', elements='dict', options=dict(
name=dict(type='str', required=True),
ipv4_address=dict(type='str'),
ipv6_address=dict(type='str'),
aliases=dict(type='list', elements='str'),
links=dict(type='list', elements='str'),
)),
networks_cli_compatible=dict(type='bool'),
oom_killer=dict(type='bool'),
oom_score_adj=dict(type='int'),
output_logs=dict(type='bool', default=False),
paused=dict(type='bool'),
pid_mode=dict(type='str'),
pids_limit=dict(type='int'),
privileged=dict(type='bool'),
published_ports=dict(type='list', elements='str', aliases=['ports']),
pull=dict(type='bool', default=False),
purge_networks=dict(type='bool', default=False),
read_only=dict(type='bool'),
recreate=dict(type='bool', default=False),
removal_wait_timeout=dict(type='float'),
restart=dict(type='bool', default=False),
restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
restart_retries=dict(type='int'),
runtime=dict(type='str'),
security_opts=dict(type='list', elements='str'),
shm_size=dict(type='str'),
state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
stop_signal=dict(type='str'),
stop_timeout=dict(type='int'),
sysctls=dict(type='dict'),
tmpfs=dict(type='list', elements='str'),
trust_image_content=dict(type='bool', default=False, removed_in_version='2.14'),
tty=dict(type='bool'),
ulimits=dict(type='list', elements='str'),
user=dict(type='str'),
userns_mode=dict(type='str'),
uts=dict(type='str'),
volume_driver=dict(type='str'),
volumes=dict(type='list', elements='str'),
volumes_from=dict(type='list', elements='str'),
working_dir=dict(type='str'),
)
required_if = [
('state', 'present', ['image'])
]
client = AnsibleDockerClientContainer(
argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True,
min_docker_api_version='1.20',
)
if client.module.params['networks_cli_compatible'] is None and client.module.params['networks']:
client.module.deprecate(
'Please note that docker_container handles networks slightly different than docker CLI. '
'If you specify networks, the default network will still be attached as the first network. '
'(You can specify purge_networks to remove all networks not explicitly listed.) '
'This behavior will change in Ansible 2.12. You can change the behavior now by setting '
'the new `networks_cli_compatible` option to `yes`, and remove this warning by setting '
'it to `no`',
version='2.12'
)
if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None:
client.module.deprecate(
'Please note that the default value for `network_mode` will change from not specified '
'(which is equal to `default`) to the name of the first network in `networks` if '
'`networks` has at least one entry and `networks_cli_compatible` is `true`. You can '
'change the behavior now by explicitly setting `network_mode` to the name of the first '
'network in `networks`, and remove this warning by setting `network_mode` to `default`. '
'Please make sure that the value you set to `network_mode` equals the inspection result '
'for existing containers, otherwise the module will recreate them. You can find out the '
'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' <container_name>"',
version='2.14'
)
try:
cm = ContainerManager(client)
client.module.exit_json(**sanitize_result(cm.results))
except DockerException as e:
client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
except RequestException as e:
client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
if __name__ == '__main__':
main()
| gpl-3.0 |
andrewgribben/ebook-experiments | python-xml-gen/xml-gen.py | 2 | 2180 | #!/usr/bin/env python
import mimetypes
import glob
import os
import os.path
# Initialize the mimetypes database
mimetypes.init()
# Create the package.opf file
package = open('package.opf', 'w')
# The glob below should encompass everything under
# OEBPS. Right now I'm trying to remove empty directories
# and the package_content variable.
# WARNING: This glob will add all files and directories
# to the variable. You will have to edit the file and remove
# empty directories and the package.opf file reference from
# both the manifest and the spine
package_content = glob.glob('OEBPS/**/*')
# FIRST PASS AT WRITING FUNCTION TO ADDRESS ISSUE ABOVE
#for file in os.listdir( location ):
# if os.path.isfile(os.path.join('OEBPS', file)):
# package_content = ''
# package_contet += file
# Rather than use a templating system we build the XML portion
# by hand
template_top = '''<package xmlns="http://www.idpf.org/2007/opf"
unique-identifier="book-id"
version="3.0" xml:lang="en">
<metadata >
<!-- TITLE -->
<dc:title></dc:title>
<!-- AUTHOR, PUBLISHER AND PUBLICATION DATES-->
<dc:creator></dc:creator>
<dc:publisher></dc:publisher>
<dc:date></dc:date>
<meta property="dcterms:modified"></meta>
<!-- MISC INFORMATION -->
<dc:language>en</dc:language>
<dc:identifier id="book-id"></dc:identifier>
<meta name="cover" content="img-cov" />
</metadata>
<manifest>
'''
template_transition = '''</manifest>
<spine toc="ncx">'''
template_bottom = '''</spine>
</package>'''
manifest = ""
spine = ""
# Write the content of OEBPS into the manifest and spines
for i, item in enumerate(package_content):
basename = os.path.basename(item)
mime = mimetypes.guess_type(item, strict=True)
manifest += '\t<item id="file_%s" href="%s" media-type="%s"/>\n' % (i+1, basename, mime[0])
spine += '\n\t<itemref idref="file_%s" />' % (i+1)
# I don't remember my python all that well to remember
# how to print the interpolated content.
# This should do for now.
package.write(template_top)
package.write(manifest)
package.write(template_transition)
package.write(spine)
package.write(template_bottom)
| mit |
dbaxa/django | django/core/paginator.py | 347 | 5031 | import collections
from math import ceil
from django.utils import six
class InvalidPage(Exception):
pass
class PageNotAnInteger(InvalidPage):
pass
class EmptyPage(InvalidPage):
pass
class Paginator(object):
def __init__(self, object_list, per_page, orphans=0,
allow_empty_first_page=True):
self.object_list = object_list
self.per_page = int(per_page)
self.orphans = int(orphans)
self.allow_empty_first_page = allow_empty_first_page
self._num_pages = self._count = None
def validate_number(self, number):
"""
Validates the given 1-based page number.
"""
try:
number = int(number)
except (TypeError, ValueError):
raise PageNotAnInteger('That page number is not an integer')
if number < 1:
raise EmptyPage('That page number is less than 1')
if number > self.num_pages:
if number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return number
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return self._get_page(self.object_list[bottom:top], number, self)
def _get_page(self, *args, **kwargs):
"""
Returns an instance of a single page.
This hook can be used by subclasses to use an alternative to the
standard :cls:`Page` object.
"""
return Page(*args, **kwargs)
def _get_count(self):
"""
Returns the total number of objects, across all pages.
"""
if self._count is None:
try:
self._count = self.object_list.count()
except (AttributeError, TypeError):
# AttributeError if object_list has no count() method.
# TypeError if object_list.count() requires arguments
# (i.e. is of type list).
self._count = len(self.object_list)
return self._count
count = property(_get_count)
def _get_num_pages(self):
"""
Returns the total number of pages.
"""
if self._num_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self._num_pages = 0
else:
hits = max(1, self.count - self.orphans)
self._num_pages = int(ceil(hits / float(self.per_page)))
return self._num_pages
num_pages = property(_get_num_pages)
def _get_page_range(self):
"""
Returns a 1-based range of pages for iterating through within
a template for loop.
"""
return six.moves.range(1, self.num_pages + 1)
page_range = property(_get_page_range)
QuerySetPaginator = Paginator # For backwards-compatibility.
class Page(collections.Sequence):
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.num_pages)
def __len__(self):
return len(self.object_list)
def __getitem__(self, index):
if not isinstance(index, (slice,) + six.integer_types):
raise TypeError
# The object_list is converted to a list so that if it was a QuerySet
# it won't be a database hit per __getitem__.
if not isinstance(self.object_list, list):
self.object_list = list(self.object_list)
return self.object_list[index]
def has_next(self):
return self.number < self.paginator.num_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def next_page_number(self):
return self.paginator.validate_number(self.number + 1)
def previous_page_number(self):
return self.paginator.validate_number(self.number - 1)
def start_index(self):
"""
Returns the 1-based index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page * (self.number - 1)) + 1
def end_index(self):
"""
Returns the 1-based index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page because there can be orphans.
if self.number == self.paginator.num_pages:
return self.paginator.count
return self.number * self.paginator.per_page
| bsd-3-clause |
emma-d-cotter/SampleProject | doc/tools/build_modref_templates.py | 1 | 2094 | #!/usr/bin/env python
"""Script to auto-generate API docs.
"""
from __future__ import print_function, division
# stdlib imports
import sys
import re
from os.path import join as pjoin
# local imports
from apigen import ApiDocWriter
# version comparison
from distutils.version import LooseVersion as V
#*****************************************************************************
def abort(error):
print('*WARNING* API documentation not generated: %s' % error)
exit()
if __name__ == '__main__':
package = sys.argv[1]
outdir = sys.argv[2]
try:
other_defines = sys.argv[3]
except IndexError:
other_defines = True
else:
other_defines = other_defines in ('True', 'true', '1')
# Check that the package is available. If not, the API documentation is not
# (re)generated and existing API documentation sources will be used.
try:
__import__(package)
except ImportError:
abort("Can not import " + package)
module = sys.modules[package]
# Check that the source version is equal to the installed
# version. If the versions mismatch the API documentation sources
# are not (re)generated. This avoids automatic generation of documentation
# for older or newer versions if such versions are installed on the system.
installed_version = V(module.__version__)
ver_file = pjoin('..', package, 'version.py')
with open(ver_file) as f:
exec(f.read())
source_version = __version__
print('***', source_version)
if source_version != installed_version:
abort("Installed version does not match source version")
docwriter = ApiDocWriter(package, rst_extension='.rst',
other_defines=other_defines)
docwriter.package_skip_patterns += [r'\.myproject$',
r'.*test.*$',
r'\.version.*$']
docwriter.write_api_docs(outdir)
docwriter.write_index(outdir, 'index', relative_to=outdir)
print('%d files written' % len(docwriter.written_modules))
| bsd-2-clause |
italomaia/turtle-linux | games/TypusPocus/interpol.py | 1 | 2057 | import random
class PInterpolator:
def __init__(self, pinicio, pfin):
self.inicio = complex(pinicio[0],pinicio[1])
self.fin = complex(pfin[0],pfin[1])
class MadamBezier(PInterpolator):
def __init__(self, pinicio, pfin, prev=None, aux2=None):
""" We use 2 auxiliary points for bezier interpolation (them are bezier
interpolation standard points).
We can generate them or them can be especified by the caller.
In the especification the order is:
MadamBezier( initial_point, end_point [, prev [, aux_2]])
where the points, if are in a line should be in this order:
initial_point .... aux_2 end_point
In the case you set prev to an instance of this class, it
will take the aux_2 point from them and mirror it to use
as aux_1 point for the initial_point of the newly instance.
"""
PInterpolator.__init__(self,pinicio, pfin)
d = self.fin-self.inicio
ycoef = abs(int(d.imag))
if prev<>None:
if isinstance(prev,MadamBezier):
#it's an instance, take the value and mirror it!
self.m1 = 2*self.inicio - prev.getPrev()
else:
#just use it as a normal point
self.m1 = prev
else:
self.m1 = self.inicio + d/4 + complex(0,
random.randint(-ycoef, ycoef))
if aux2<>None:
self.m2 = aux2
else:
self.m2 = self.fin-d/4 + complex(0, random.randint(-ycoef, ycoef)) #random here please
def getAt(self, t):
c = ((1-t)**3)*self.inicio + \
3*t*((1-t)**2)*self.m1+\
3*(t**2)*(1-t)*self.m2+\
(t**3)*self.fin
return c.real, c.imag
def getPrev(self):
return self.m2
if __name__=="__main__":
inter = MadamBezier( (0,0), (4,4) )
for r in range(10):
print inter.getAt( 1.0/(r+1) )
| gpl-3.0 |
hgsoft/hgsoft-addons | product_sale_price_reseller/models/__init__.py | 1 | 1405 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2016 HGSOFT - www.hgsoft.com.br #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
from . import product
| gpl-3.0 |
DenisMinich/parabox | parabox/behaviour/bitmap_collidable/bitmap_collidable.py | 1 | 1563 | from parabox.behaviour.bitmap import Bitmap
from parabox.behaviour.collidable import Collidable
class BitmapCollidable(Bitmap, Collidable):
"""Mix Bitmap and Collidable behaviour"""
def collide_widget(self, widget):
"""Check if collide another widget
:param widget: widget to check collission
:type widget: kivy.uix.widget.Widget
:returns: collide result
:rtype: bool
"""
if super(Bitmap, self).collide_widget(widget):
return Collidable.get_intersection(self, widget) is not None
return False
def collide_point(self, x_coord, y_coord):
"""Check if widget collide point
:param x_coord: x coord of point to check
:type y_coord: y coord of point to check
:returns: collide result
:rtype: bool
"""
if super(Bitmap, self).collide_point(x_coord, y_coord):
relative_x, relative_y = self._get_relative_coords_by_absolute(
x_coord, y_coord)
if (0 <= relative_x < self.size[0] and
0 <= relative_y < self.size[1]):
return self.bitmap[relative_y, relative_x]
return False
def get_collide_check_pixels(self):
"""Get points to use for checking collission
:returns: points for check collission
:rtype: array of sets
"""
check_pixels = []
for x in range(self.size[0] + 1):
for y in range(self.size[1] + 1):
check_pixels.append((x, y))
return check_pixels
| mit |
praneethkumarpidugu/matchmaking | lib/python2.7/site-packages/requests/adapters.py | 573 | 16810 | # -*- coding: utf-8 -*-
"""
requests.adapters
~~~~~~~~~~~~~~~~~
This module contains the transport adapters that Requests uses to define
and maintain connections.
"""
import socket
from .models import Response
from .packages.urllib3.poolmanager import PoolManager, proxy_from_url
from .packages.urllib3.response import HTTPResponse
from .packages.urllib3.util import Timeout as TimeoutSauce
from .packages.urllib3.util.retry import Retry
from .compat import urlparse, basestring
from .utils import (DEFAULT_CA_BUNDLE_PATH, get_encoding_from_headers,
prepend_scheme_if_needed, get_auth_from_url, urldefragauth)
from .structures import CaseInsensitiveDict
from .packages.urllib3.exceptions import ConnectTimeoutError
from .packages.urllib3.exceptions import HTTPError as _HTTPError
from .packages.urllib3.exceptions import MaxRetryError
from .packages.urllib3.exceptions import ProxyError as _ProxyError
from .packages.urllib3.exceptions import ProtocolError
from .packages.urllib3.exceptions import ReadTimeoutError
from .packages.urllib3.exceptions import SSLError as _SSLError
from .packages.urllib3.exceptions import ResponseError
from .cookies import extract_cookies_to_jar
from .exceptions import (ConnectionError, ConnectTimeout, ReadTimeout, SSLError,
ProxyError, RetryError)
from .auth import _basic_auth_str
DEFAULT_POOLBLOCK = False
DEFAULT_POOLSIZE = 10
DEFAULT_RETRIES = 0
class BaseAdapter(object):
"""The Base Transport Adapter"""
def __init__(self):
super(BaseAdapter, self).__init__()
def send(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
class HTTPAdapter(BaseAdapter):
"""The built-in HTTP Adapter for urllib3.
Provides a general-case interface for Requests sessions to contact HTTP and
HTTPS urls by implementing the Transport Adapter interface. This class will
usually be created by the :class:`Session <Session>` class under the
covers.
:param pool_connections: The number of urllib3 connection pools to cache.
:param pool_maxsize: The maximum number of connections to save in the pool.
:param int max_retries: The maximum number of retries each connection
should attempt. Note, this applies only to failed DNS lookups, socket
connections and connection timeouts, never to requests where data has
made it to the server. By default, Requests does not retry failed
connections. If you need granular control over the conditions under
which we retry a request, import urllib3's ``Retry`` class and pass
that instead.
:param pool_block: Whether the connection pool should block for connections.
Usage::
>>> import requests
>>> s = requests.Session()
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
>>> s.mount('http://', a)
"""
__attrs__ = ['max_retries', 'config', '_pool_connections', '_pool_maxsize',
'_pool_block']
def __init__(self, pool_connections=DEFAULT_POOLSIZE,
pool_maxsize=DEFAULT_POOLSIZE, max_retries=DEFAULT_RETRIES,
pool_block=DEFAULT_POOLBLOCK):
if max_retries == DEFAULT_RETRIES:
self.max_retries = Retry(0, read=False)
else:
self.max_retries = Retry.from_int(max_retries)
self.config = {}
self.proxy_manager = {}
super(HTTPAdapter, self).__init__()
self._pool_connections = pool_connections
self._pool_maxsize = pool_maxsize
self._pool_block = pool_block
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
def __getstate__(self):
return dict((attr, getattr(self, attr, None)) for attr in
self.__attrs__)
def __setstate__(self, state):
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
# because self.poolmanager uses a lambda function, which isn't pickleable.
self.proxy_manager = {}
self.config = {}
for attr, value in state.items():
setattr(self, attr, value)
self.init_poolmanager(self._pool_connections, self._pool_maxsize,
block=self._pool_block)
def init_poolmanager(self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs):
"""Initializes a urllib3 PoolManager.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param connections: The number of urllib3 connection pools to cache.
:param maxsize: The maximum number of connections to save in the pool.
:param block: Block when no free connections are available.
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
"""
# save these values for pickling
self._pool_connections = connections
self._pool_maxsize = maxsize
self._pool_block = block
self.poolmanager = PoolManager(num_pools=connections, maxsize=maxsize,
block=block, strict=True, **pool_kwargs)
def proxy_manager_for(self, proxy, **proxy_kwargs):
"""Return urllib3 ProxyManager for the given proxy.
This method should not be called from user code, and is only
exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxy: The proxy to return a urllib3 ProxyManager for.
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
:returns: ProxyManager
"""
if not proxy in self.proxy_manager:
proxy_headers = self.proxy_headers(proxy)
self.proxy_manager[proxy] = proxy_from_url(
proxy,
proxy_headers=proxy_headers,
num_pools=self._pool_connections,
maxsize=self._pool_maxsize,
block=self._pool_block,
**proxy_kwargs)
return self.proxy_manager[proxy]
def cert_verify(self, conn, url, verify, cert):
"""Verify a SSL certificate. This method should not be called from user
code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param conn: The urllib3 connection object associated with the cert.
:param url: The requested URL.
:param verify: Whether we should actually verify the certificate.
:param cert: The SSL certificate to verify.
"""
if url.lower().startswith('https') and verify:
cert_loc = None
# Allow self-specified cert location.
if verify is not True:
cert_loc = verify
if not cert_loc:
cert_loc = DEFAULT_CA_BUNDLE_PATH
if not cert_loc:
raise Exception("Could not find a suitable SSL CA certificate bundle.")
conn.cert_reqs = 'CERT_REQUIRED'
conn.ca_certs = cert_loc
else:
conn.cert_reqs = 'CERT_NONE'
conn.ca_certs = None
if cert:
if not isinstance(cert, basestring):
conn.cert_file = cert[0]
conn.key_file = cert[1]
else:
conn.cert_file = cert
def build_response(self, req, resp):
"""Builds a :class:`Response <requests.Response>` object from a urllib3
response. This should not be called from user code, and is only exposed
for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
:param resp: The urllib3 response object.
"""
response = Response()
# Fallback to None if there's no status_code, for whatever reason.
response.status_code = getattr(resp, 'status', None)
# Make headers case-insensitive.
response.headers = CaseInsensitiveDict(getattr(resp, 'headers', {}))
# Set encoding.
response.encoding = get_encoding_from_headers(response.headers)
response.raw = resp
response.reason = response.raw.reason
if isinstance(req.url, bytes):
response.url = req.url.decode('utf-8')
else:
response.url = req.url
# Add new cookies from the server.
extract_cookies_to_jar(response.cookies, req, resp)
# Give the Response some context.
response.request = req
response.connection = self
return response
def get_connection(self, url, proxies=None):
"""Returns a urllib3 connection for the given URL. This should not be
called from user code, and is only exposed for use when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param url: The URL to connect to.
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
"""
proxies = proxies or {}
proxy = proxies.get(urlparse(url.lower()).scheme)
if proxy:
proxy = prepend_scheme_if_needed(proxy, 'http')
proxy_manager = self.proxy_manager_for(proxy)
conn = proxy_manager.connection_from_url(url)
else:
# Only scheme should be lower case
parsed = urlparse(url)
url = parsed.geturl()
conn = self.poolmanager.connection_from_url(url)
return conn
def close(self):
"""Disposes of any internal state.
Currently, this just closes the PoolManager, which closes pooled
connections.
"""
self.poolmanager.clear()
def request_url(self, request, proxies):
"""Obtain the url to use when making the final request.
If the message is being sent through a HTTP proxy, the full URL has to
be used. Otherwise, we should only use the path portion of the URL.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param proxies: A dictionary of schemes to proxy URLs.
"""
proxies = proxies or {}
scheme = urlparse(request.url).scheme
proxy = proxies.get(scheme)
if proxy and scheme != 'https':
url = urldefragauth(request.url)
else:
url = request.path_url
return url
def add_headers(self, request, **kwargs):
"""Add any headers needed by the connection. As of v2.0 this does
nothing by default, but is left for overriding by users that subclass
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
:param kwargs: The keyword arguments from the call to send().
"""
pass
def proxy_headers(self, proxy):
"""Returns a dictionary of the headers to add to any request sent
through a proxy. This works with urllib3 magic to ensure that they are
correctly sent to the proxy, rather than in a tunnelled request if
CONNECT is being used.
This should not be called from user code, and is only exposed for use
when subclassing the
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
:param proxies: The url of the proxy being used for this request.
:param kwargs: Optional additional keyword arguments.
"""
headers = {}
username, password = get_auth_from_url(proxy)
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username,
password)
return headers
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
"""Sends PreparedRequest object. Returns Response object.
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
:param stream: (optional) Whether to stream the request content.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a (`connect timeout, read
timeout <user/advanced.html#timeouts>`_) tuple.
:type timeout: float or tuple
:param verify: (optional) Whether to verify SSL certificates.
:param cert: (optional) Any user-provided SSL certificate to be trusted.
:param proxies: (optional) The proxies dictionary to apply to the request.
"""
conn = self.get_connection(request.url, proxies)
self.cert_verify(conn, request.url, verify, cert)
url = self.request_url(request, proxies)
self.add_headers(request)
chunked = not (request.body is None or 'Content-Length' in request.headers)
if isinstance(timeout, tuple):
try:
connect, read = timeout
timeout = TimeoutSauce(connect=connect, read=read)
except ValueError as e:
# this may raise a string formatting error.
err = ("Invalid timeout {0}. Pass a (connect, read) "
"timeout tuple, or a single float to set "
"both timeouts to the same value".format(timeout))
raise ValueError(err)
else:
timeout = TimeoutSauce(connect=timeout, read=timeout)
try:
if not chunked:
resp = conn.urlopen(
method=request.method,
url=url,
body=request.body,
headers=request.headers,
redirect=False,
assert_same_host=False,
preload_content=False,
decode_content=False,
retries=self.max_retries,
timeout=timeout
)
# Send the request.
else:
if hasattr(conn, 'proxy_pool'):
conn = conn.proxy_pool
low_conn = conn._get_conn(timeout=timeout)
try:
low_conn.putrequest(request.method,
url,
skip_accept_encoding=True)
for header, value in request.headers.items():
low_conn.putheader(header, value)
low_conn.endheaders()
for i in request.body:
low_conn.send(hex(len(i))[2:].encode('utf-8'))
low_conn.send(b'\r\n')
low_conn.send(i)
low_conn.send(b'\r\n')
low_conn.send(b'0\r\n\r\n')
r = low_conn.getresponse()
resp = HTTPResponse.from_httplib(
r,
pool=conn,
connection=low_conn,
preload_content=False,
decode_content=False
)
except:
# If we hit any problems here, clean up the connection.
# Then, reraise so that we can handle the actual exception.
low_conn.close()
raise
else:
# All is well, return the connection to the pool.
conn._put_conn(low_conn)
except (ProtocolError, socket.error) as err:
raise ConnectionError(err, request=request)
except MaxRetryError as e:
if isinstance(e.reason, ConnectTimeoutError):
raise ConnectTimeout(e, request=request)
if isinstance(e.reason, ResponseError):
raise RetryError(e, request=request)
raise ConnectionError(e, request=request)
except _ProxyError as e:
raise ProxyError(e)
except (_SSLError, _HTTPError) as e:
if isinstance(e, _SSLError):
raise SSLError(e, request=request)
elif isinstance(e, ReadTimeoutError):
raise ReadTimeout(e, request=request)
else:
raise
return self.build_response(request, resp)
| mit |
alvinhochun/sony-xperia-m-kernel | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
dongjiaqiang/hadoop-20 | src/contrib/thriftfs/scripts/hdfs.py | 116 | 14991 | #!/usr/bin/env python
"""
hdfs.py is a python client for the thrift interface to HDFS.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied. See the License for the specific language governing permissions
and limitations under the License.
"""
import sys
sys.path.append('../gen-py')
from optparse import OptionParser
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from hadoopfs import ThriftHadoopFileSystem
from hadoopfs.ttypes import *
from readline import *
from cmd import *
import os
import re
import readline
import subprocess
#
# The address of the FileSystemClientProxy. If the host and port are
# not specified, then a proxy server is automatically spawned.
#
host = 'localhost'
port = 4677 # use any port
proxyStartScript = './start_thrift_server.sh'
startServer = True # shall we start a proxy server?
#
# The hdfs interactive shell. The Cmd class is a builtin that uses readline + implements
# a whole bunch of utility stuff like help and custom tab completions.
# It makes everything real easy.
#
class hadoopthrift_cli(Cmd):
# my custom prompt looks better than the default
prompt = 'hdfs>> '
#############################
# Class constructor
#############################
def __init__(self, server_name, server_port):
Cmd.__init__(self)
self.server_name = server_name
self.server_port = server_port
#############################
# Start the ClientProxy Server if we can find it.
# Read in its stdout to determine what port it is running on
#############################
def startProxyServer(self):
try:
p = subprocess.Popen(proxyStartScript, self.server_port, stdout=subprocess.PIPE)
content = p.stdout.readline()
p.stdout.close()
val = re.split( '\[|\]', content)
print val[1]
self.server_port = val[1]
return True
except Exception, ex:
print "ERROR in starting proxy server " + proxyStartScript
print '%s' % (ex.message)
return False
#############################
# Connect to clientproxy
#############################
def connect(self):
try:
# connect to hdfs thrift server
self.transport = TSocket.TSocket(self.server_name, self.server_port)
self.transport = TTransport.TBufferedTransport(self.transport)
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
# Create a client to use the protocol encoder
self.client = ThriftHadoopFileSystem.Client(self.protocol)
self.transport.open()
# tell the HadoopThrift server to die after 60 minutes of inactivity
self.client.setInactivityTimeoutPeriod(60*60)
return True
except Thrift.TException, tx:
print "ERROR in connecting to ", self.server_name, ":", self.server_port
print '%s' % (tx.message)
return False
#
# Disconnect from client proxy
#
def shutdown(self):
try :
self.transport.close()
except Exception, tx:
return False
#############################
# Create the specified file. Returns a handle to write data.
#############################
def do_create(self, name):
if name == "":
print " ERROR usage: create <pathname>"
print
return 0
# Create the file, and immediately closes the handle
path = Pathname();
path.pathname = name;
status = self.client.create(path)
self.client.close(status)
return 0
#############################
# Delete the specified file.
#############################
def do_rm(self, name):
if name == "":
print " ERROR usage: rm <pathname>\n"
return 0
# delete file
path = Pathname();
path.pathname = name;
status = self.client.rm(path, False)
if status == False:
print " ERROR in deleting path: " + name
return 0
#############################
# Rename the specified file/dir
#############################
def do_mv(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
src = params[0].strip()
dest = params[1].strip()
if src == "":
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
if dest == "":
print " ERROR usage: mv <srcpathname> <destpathname>\n"
return 0
# move file
path = Pathname();
path.pathname = src;
destpath = Pathname();
destpath.pathname = dest;
status = self.client.rename(path, destpath)
if status == False:
print " ERROR in renaming path: " + name
return 0
#############################
# Delete the specified file.
#############################
def do_mkdirs(self, name):
if name == "":
print " ERROR usage: mkdirs <pathname>\n"
return 0
# create directory
path = Pathname();
path.pathname = name;
fields = self.client.mkdirs(path)
return 0
#############################
# does the pathname exist?
#############################
def do_exists(self, name):
if name == "":
print " ERROR usage: exists <pathname>\n"
return 0
# check existence of pathname
path = Pathname();
path.pathname = name;
fields = self.client.exists(path)
if (fields == True):
print name + " exists."
else:
print name + " does not exist."
return 0
#############################
# copy local file into hdfs
#############################
def do_put(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
local = params[0].strip()
hdfs = params[1].strip()
if local == "":
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
if hdfs == "":
print " ERROR usage: put <localpathname> <hdfspathname>\n"
return 0
# open local file
input = open(local, 'rb')
# open output file
path = Pathname();
path.pathname = hdfs;
output = self.client.create(path)
# read 1MB at a time and upload to hdfs
while True:
chunk = input.read(1024*1024)
if not chunk: break
self.client.write(output, chunk)
self.client.close(output)
input.close()
#############################
# copy hdfs file into local
#############################
def do_get(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
hdfs = params[0].strip()
local = params[1].strip()
if local == "":
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
if hdfs == "":
print " ERROR usage: get <hdfspathname> <localpathname>\n"
return 0
# open output local file
output = open(local, 'wb')
# open input hdfs file
path = Pathname();
path.pathname = hdfs;
input = self.client.open(path)
# find size of hdfs file
filesize = self.client.stat(path).length
# read 1MB bytes at a time from hdfs
offset = 0
chunksize = 1024 * 1024
while True:
chunk = self.client.read(input, offset, chunksize)
if not chunk: break
output.write(chunk)
offset += chunksize
if (offset >= filesize): break
self.client.close(input)
output.close()
#############################
# List attributes of this path
#############################
def do_ls(self, name):
if name == "":
print " ERROR usage: list <pathname>\n"
return 0
# list file status
path = Pathname();
path.pathname = name;
status = self.client.stat(path)
if (status.isdir == False):
self.printStatus(status)
return 0
# This is a directory, fetch its contents
liststatus = self.client.listStatus(path)
for item in liststatus:
self.printStatus(item)
#############################
# Set permissions for a file
#############################
def do_chmod(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: chmod 774 <pathname>\n"
return 0
perm = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: chmod 774 <pathname>\n"
return 0
if perm == "":
print " ERROR usage: chmod 774 <pathname>\n"
return 0
# set permissions (in octal)
path = Pathname();
path.pathname = name;
status = self.client.chmod(path, int(perm,8))
return 0
#############################
# Set owner for a file. This is not an atomic operation.
# A change to the group of a file may be overwritten by this one.
#############################
def do_chown(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: chown <ownername> <pathname>\n"
return 0
owner = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: chown <ownername> <pathname>\n"
return 0
# get the current owner and group
path = Pathname();
path.pathname = name;
cur = self.client.stat(path)
# set new owner, keep old group
status = self.client.chown(path, owner, cur.group)
return 0
#######################################
# Set the replication factor for a file
######################################
def do_setreplication(self, line):
params = line.split()
if (len(params) != 2):
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
repl = params[0].strip()
name = params[1].strip()
if name == "":
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
if repl == "":
print " ERROR usage: setreplication <replication factor> <pathname>\n"
return 0
path = Pathname();
path.pathname = name;
status = self.client.setReplication(path, int(repl))
return 0
#############################
# Display the locations of the blocks of this file
#############################
def do_getlocations(self, name):
if name == "":
print " ERROR usage: getlocations <pathname>\n"
return 0
path = Pathname();
path.pathname = name;
# find size of hdfs file
filesize = self.client.stat(path).length
# getlocations file
blockLocations = self.client.getFileBlockLocations(path, 0, filesize)
for item in blockLocations:
self.printLocations(item)
return 0
#############################
# Utility methods from here
#############################
#
# If I don't do this, the last command is always re-executed which is annoying.
#
def emptyline(self):
pass
#
# print the status of a path
#
def printStatus(self, stat):
print str(stat.block_replication) + "\t" + str(stat.length) + "\t" + str(stat.modification_time) + "\t" + stat.permission + "\t" + stat.owner + "\t" + stat.group + "\t" + stat.path
#
# print the locations of a block
#
def printLocations(self, location):
print str(location.names) + "\t" + str(location.offset) + "\t" + str(location.length)
#
# Various ways to exit the hdfs shell
#
def do_quit(self,ignored):
try:
if startServer:
self.client.shutdown(1)
return -1
except Exception, ex:
return -1
def do_q(self,ignored):
return self.do_quit(ignored)
# ctl-d
def do_EOF(self,ignored):
return self.do_quit(ignored)
#
# Give the user some amount of help - I am a nice guy
#
def help_create(self):
print "create <pathname>"
def help_rm(self):
print "rm <pathname>"
def help_mv(self):
print "mv <srcpathname> <destpathname>"
def help_mkdirs(self):
print "mkdirs <pathname>"
def help_exists(self):
print "exists <pathname>"
def help_put(self):
print "put <localpathname> <hdfspathname>"
def help_get(self):
print "get <hdfspathname> <localpathname>"
def help_ls(self):
print "ls <hdfspathname>"
def help_chmod(self):
print "chmod 775 <hdfspathname>"
def help_chown(self):
print "chown <ownername> <hdfspathname>"
def help_setreplication(self):
print "setrep <replication factor> <hdfspathname>"
def help_getlocations(self):
print "getlocations <pathname>"
def help_EOF(self):
print '<ctl-d> will quit this program.'
def help_quit(self):
print 'if you need to know what quit does, you shouldn\'t be using a computer.'
def help_q(self):
print 'quit and if you need to know what quit does, you shouldn\'t be using a computer.'
def help_help(self):
print 'duh'
def usage(exec_name):
print "Usage: "
print " %s [proxyclientname [proxyclientport]]" % exec_name
print " %s -v" % exec_name
print " %s --help" % exec_name
print " %s -h" % exec_name
if __name__ == "__main__":
#
# Rudimentary command line processing.
#
# real parsing:
parser = OptionParser()
parser.add_option("-e", "--execute", dest="command_str",
help="execute this command and exit")
parser.add_option("-s","--proxyclient",dest="host",help="the proxyclient's hostname")
parser.add_option("-p","--port",dest="port",help="the proxyclient's port number")
(options, args) = parser.parse_args()
#
# Save host and port information of the proxy server
#
if (options.host):
host = options.host
startServer = False
if (options.port):
port = options.port
startServer = False
#
# Retrieve the user's readline history.
#
historyFileName = os.path.expanduser("~/.hdfs_history")
if (os.path.exists(historyFileName)):
readline.read_history_file(historyFileName)
#
# Create class and connect to proxy server
#
c = hadoopthrift_cli(host,port)
if startServer:
if c.startProxyServer() == False:
sys.exit(1)
if c.connect() == False:
sys.exit(1)
#
# If this utility was invoked with one argument, process it
#
if (options.command_str):
c.onecmd(options.command_str)
sys.exit(0)
#
# Start looping over user commands.
#
c.cmdloop('Welcome to the Thrift interactive shell for Hadoop File System. - how can I help you? ' + '\n'
'Press tab twice to see the list of commands. ' + '\n' +
'To complete the name of a command press tab once. \n'
)
c.shutdown();
readline.write_history_file(historyFileName)
print '' # I am nothing if not courteous.
sys.exit(0)
| apache-2.0 |
amirsgh/enews | web/bundles/elearning/nodejs/node_modules/socket.io/node_modules/engine.io/node_modules/engine.io-parser/node_modules/utf8/tests/generate-test-data.py | 2214 | 1347 | #!/usr/bin/env python
import re
import json
# http://mathiasbynens.be/notes/javascript-encoding#surrogate-formulae
# http://stackoverflow.com/a/13436167/96656
def unisymbol(codePoint):
if codePoint >= 0x0000 and codePoint <= 0xFFFF:
return unichr(codePoint)
elif codePoint >= 0x010000 and codePoint <= 0x10FFFF:
highSurrogate = int((codePoint - 0x10000) / 0x400) + 0xD800
lowSurrogate = int((codePoint - 0x10000) % 0x400) + 0xDC00
return unichr(highSurrogate) + unichr(lowSurrogate)
else:
return 'Error'
def hexify(codePoint):
return 'U+' + hex(codePoint)[2:].upper().zfill(6)
def writeFile(filename, contents):
print filename
with open(filename, 'w') as f:
f.write(contents.strip() + '\n')
data = []
for codePoint in range(0x000000, 0x10FFFF + 1):
symbol = unisymbol(codePoint)
# http://stackoverflow.com/a/17199950/96656
bytes = symbol.encode('utf8').decode('latin1')
data.append({
'codePoint': codePoint,
'decoded': symbol,
'encoded': bytes
});
jsonData = json.dumps(data, sort_keys=False, indent=2, separators=(',', ': '))
# Use tabs instead of double spaces for indentation
jsonData = jsonData.replace(' ', '\t')
# Escape hexadecimal digits in escape sequences
jsonData = re.sub(
r'\\u([a-fA-F0-9]{4})',
lambda match: r'\u{}'.format(match.group(1).upper()),
jsonData
)
writeFile('data.json', jsonData)
| mit |
atsolakid/edx-platform | openedx/core/djangoapps/course_groups/migrations/0002_add_model_CourseUserGroupPartitionGroup.py | 112 | 5804 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CourseUserGroupPartitionGroup'
db.create_table('course_groups_courseusergrouppartitiongroup', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('course_user_group', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['course_groups.CourseUserGroup'], unique=True)),
('partition_id', self.gf('django.db.models.fields.IntegerField')()),
('group_id', self.gf('django.db.models.fields.IntegerField')()),
('created_at', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('updated_at', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, blank=True)),
))
db.send_create_signal('course_groups', ['CourseUserGroupPartitionGroup'])
def backwards(self, orm):
# Deleting model 'CourseUserGroupPartitionGroup'
db.delete_table('course_groups_courseusergrouppartitiongroup')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'course_groups.courseusergroup': {
'Meta': {'unique_together': "(('name', 'course_id'),)", 'object_name': 'CourseUserGroup'},
'course_id': ('xmodule_django.models.CourseKeyField', [], {'max_length': '255', 'db_index': 'True'}),
'group_type': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'db_index': 'True', 'related_name': "'course_groups'", 'symmetrical': 'False', 'to': "orm['auth.User']"})
},
'course_groups.courseusergrouppartitiongroup': {
'Meta': {'object_name': 'CourseUserGroupPartitionGroup'},
'course_user_group': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['course_groups.CourseUserGroup']", 'unique': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group_id': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partition_id': ('django.db.models.fields.IntegerField', [], {}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
}
}
complete_apps = ['course_groups']
| agpl-3.0 |
flar2/flo-ElementalX | tools/perf/scripts/python/syscall-counts-by-pid.py | 11180 | 1927 | # system call counts, by pid
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
| gpl-2.0 |
zfil/ansible | lib/ansible/parsing/utils/jsonify.py | 203 | 1451 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
try:
import json
except ImportError:
import simplejson as json
def jsonify(result, format=False):
''' format JSON output (uncompressed or uncompressed) '''
if result is None:
return "{}"
result2 = result.copy()
for key, value in result2.items():
if type(value) is str:
result2[key] = value.decode('utf-8', 'ignore')
indent = None
if format:
indent = 4
try:
return json.dumps(result2, sort_keys=True, indent=indent, ensure_ascii=False)
except UnicodeDecodeError:
return json.dumps(result2, sort_keys=True, indent=indent)
| gpl-3.0 |
lightbase/LBIndex | lbiapi/views/command.py | 2 | 1431 | # -*- coding: utf-8 -*-
from . import CustomView
from response import Cmd
from error import HTTPServiceException
from response import HTTPCode
class CommandView(CustomView):
"""Tratar a visão referente aos comandos.
Extende CustomView.
Args:
context (instance): Instância do contexto à ser usado com a view.
request (pyramid.request.Request): Request gerado pelo pacote
Pyramid.
Returns:
CommandView: Instância de CommandView.
"""
def __init__(self, context, request):
super(CommandView, self).__init__(context, request)
def post_command(self):
"""Tratar o verbo HTTP POST."""
# try:
# # test = 2/0
# test = 2/2
# except Exception as e:
# raise HTTPServiceException(HTTPCode().CODE500, str(e))
params, method = self.split_req(self.request)
result = self.context.post_command(params)
cmds = Cmd(result, HTTPCode().CODE200)
self.cmds = cmds
return self.render_response()
def get_command(self):
"""Tratar o verbo HTTP GET."""
raise HTTPServiceException(HTTPCode().CODE501)
def put_command(self):
"""Tratar o verbo HTTP PUT."""
raise HTTPServiceException(HTTPCode().CODE501)
def delete_command(self):
"""Tratar o verbo HTTP DELETE."""
raise HTTPServiceException(HTTPCode().CODE501)
| gpl-2.0 |
github-account-because-they-want-it/django | tests/expressions_case/tests.py | 102 | 49774 | from __future__ import unicode_literals
import unittest
from datetime import date, datetime, time, timedelta
from decimal import Decimal
from operator import attrgetter, itemgetter
from uuid import UUID
from django.core.exceptions import FieldError
from django.db import connection, models
from django.db.models import F, Q, Max, Min, Value
from django.db.models.expressions import Case, When
from django.test import TestCase
from django.utils import six
from .models import CaseTestModel, Client, FKCaseTestModel, O2OCaseTestModel
try:
from PIL import Image
except ImportError:
Image = None
class CaseExpressionTests(TestCase):
@classmethod
def setUpTestData(cls):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=1)
o = CaseTestModel.objects.create(integer=2, integer2=3, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=2, integer2=2, string='2')
O2OCaseTestModel.objects.create(o2o=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=2)
FKCaseTestModel.objects.create(fk=o, integer=3)
o = CaseTestModel.objects.create(integer=3, integer2=4, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=3, integer2=3, string='3')
O2OCaseTestModel.objects.create(o2o=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=3)
FKCaseTestModel.objects.create(fk=o, integer=4)
o = CaseTestModel.objects.create(integer=4, integer2=5, string='4')
O2OCaseTestModel.objects.create(o2o=o, integer=1)
FKCaseTestModel.objects.create(fk=o, integer=5)
# GROUP BY on Oracle fails with TextField/BinaryField; see #24096.
cls.non_lob_fields = [
f.name for f in CaseTestModel._meta.get_fields()
if not (f.is_relation and f.auto_created) and not isinstance(f, (models.BinaryField, models.TextField))
]
def test_annotate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=1),
When(integer=2, then=2),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer=1, then=F('o2o_rel__integer') + 1),
When(integer=2, then=F('o2o_rel__integer') + 3),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 1)],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(integer2=F('o2o_rel__integer'), then=Value('equal')),
When(integer2=F('o2o_rel__integer') + 1, then=Value('+1')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, 'other')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(join_test=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
)).order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'three'), (2, 'two'), (3, 'three'), (3, 'three'), (4, 'one')],
transform=attrgetter('integer', 'join_test')
)
def test_annotate_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
f_plus_3=F('integer') + 3,
).annotate(
f_test=Case(
When(integer=1, then='f_plus_1'),
When(integer=2, then='f_plus_3'),
default='integer',
),
).order_by('pk'),
[(1, 2), (2, 5), (3, 3), (2, 5), (3, 3), (3, 3), (4, 4)],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).annotate(
f_test=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('f_plus_1'), then=Value('+1')),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'f_test')
)
def test_annotate_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_minus_2=F('integer') - 2,
).annotate(
test=Case(
When(f_minus_2=-1, then=Value('negative one')),
When(f_minus_2=0, then=Value('zero')),
When(f_minus_2=1, then=Value('one')),
default=Value('other'),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 'negative one'), (2, 'zero'), (3, 'one'), (2, 'zero'), (3, 'one'), (3, 'one'), (4, 'other')],
transform=attrgetter('integer', 'test')
)
def test_annotate_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(1, None, 1, 1), (2, 2, 2, 3), (3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4), (3, 4, 3, 4), (4, None, 5, 5)],
transform=itemgetter('integer', 'test', 'min', 'max')
)
def test_annotate_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(integer2=F('min'), then=Value('min')),
When(integer2=F('max'), then=Value('max')),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 1, 'min'), (2, 3, 'max'), (3, 4, 'max'), (2, 2, 'min'), (3, 4, 'max'), (3, 3, 'min'), (4, 5, 'min')],
transform=itemgetter('integer', 'integer2', 'test')
)
def test_annotate_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
max=Max('fk_rel__integer'),
).annotate(
test=Case(
When(max=3, then=Value('max = 3')),
When(max=4, then=Value('max = 4')),
default=Value(''),
output_field=models.CharField(),
),
).order_by('pk'),
[(1, 1, ''), (2, 3, 'max = 3'), (3, 4, 'max = 4'), (2, 3, 'max = 3'),
(3, 4, 'max = 4'), (3, 4, 'max = 4'), (4, 5, '')],
transform=itemgetter('integer', 'max', 'test')
)
def test_annotate_exclude(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(test=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
output_field=models.CharField(),
)).exclude(test='other').order_by('pk'),
[(1, 'one'), (2, 'two'), (2, 'two')],
transform=attrgetter('integer', 'test')
)
def test_combined_expression(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
) + 1,
).order_by('pk'),
[(1, 3), (2, 2), (3, 4), (2, 2), (3, 4), (3, 4), (4, 4)],
transform=attrgetter('integer', 'test')
)
if connection.vendor == 'sqlite' and connection.Database.sqlite_version_info < (3, 7, 0):
# There is a bug in sqlite < 3.7.0, where placeholder order is lost.
# Thus, the above query returns <condition_value> + <result_value>
# for each matching case instead of <result_value> + 1 (#24148).
test_combined_expression = unittest.expectedFailure(test_combined_expression)
def test_in_subquery(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(
pk__in=CaseTestModel.objects.annotate(
test=Case(
When(integer=F('integer2'), then='pk'),
When(integer=4, then='pk'),
output_field=models.IntegerField(),
),
).values('test')).order_by('pk'),
[(1, 1), (2, 2), (3, 3), (4, 5)],
transform=attrgetter('integer', 'integer2')
)
def test_case_reuse(self):
SOME_CASE = Case(
When(pk=0, then=Value('0')),
default=Value('1'),
output_field=models.CharField(),
)
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk'),
CaseTestModel.objects.annotate(somecase=SOME_CASE).order_by('pk').values_list('pk', 'somecase'),
lambda x: (x.pk, x.somecase)
)
def test_aggregate(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=models.Sum(Case(
When(integer=1, then=1),
output_field=models.IntegerField(),
)),
two=models.Sum(Case(
When(integer=2, then=1),
output_field=models.IntegerField(),
)),
three=models.Sum(Case(
When(integer=3, then=1),
output_field=models.IntegerField(),
)),
four=models.Sum(Case(
When(integer=4, then=1),
output_field=models.IntegerField(),
)),
),
{'one': 1, 'two': 2, 'three': 3, 'four': 1}
)
def test_aggregate_with_expression_as_value(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
one=models.Sum(Case(When(integer=1, then='integer'))),
two=models.Sum(Case(When(integer=2, then=F('integer') - 1))),
three=models.Sum(Case(When(integer=3, then=F('integer') + 1))),
),
{'one': 1, 'two': 2, 'three': 12}
)
def test_aggregate_with_expression_as_condition(self):
self.assertEqual(
CaseTestModel.objects.aggregate(
equal=models.Sum(Case(
When(integer2=F('integer'), then=1),
output_field=models.IntegerField(),
)),
plus_one=models.Sum(Case(
When(integer2=F('integer') + 1, then=1),
output_field=models.IntegerField(),
)),
),
{'equal': 3, 'plus_one': 4}
)
def test_filter(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
default=1,
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_without_default(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=3),
When(integer=3, then=4),
output_field=models.IntegerField(),
)).order_by('pk'),
[(2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('integer') + 1),
When(integer=3, then=F('integer')),
default='integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_expression_as_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string=Case(
When(integer2=F('integer'), then=Value('2')),
When(integer2=F('integer') + 1, then=Value('3')),
output_field=models.CharField(),
)).order_by('pk'),
[(3, 4, '3'), (2, 2, '2'), (3, 4, '3')],
transform=attrgetter('integer', 'integer2', 'string')
)
def test_filter_with_join_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(integer=2, then=F('o2o_rel__integer') + 1),
When(integer=3, then=F('o2o_rel__integer')),
default='o2o_rel__integer',
)).order_by('pk'),
[(1, 1), (2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=2),
When(integer2=F('o2o_rel__integer'), then=3),
output_field=models.IntegerField(),
)).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_join_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer2=Case(
When(o2o_rel__integer=1, then=1),
When(o2o_rel__integer=2, then=3),
When(o2o_rel__integer=3, then=4),
output_field=models.IntegerField(),
)).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f=F('integer'),
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(integer=2, then='f_plus_1'),
When(integer=3, then='f'),
),
).order_by('pk'),
[(2, 3), (3, 3)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer=Case(
When(integer2=F('integer'), then=2),
When(integer2=F('f_plus_1'), then=3),
output_field=models.IntegerField(),
),
).order_by('pk'),
[(3, 4), (2, 2), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_annotation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
f_plus_1=F('integer') + 1,
).filter(
integer2=Case(
When(f_plus_1=3, then=3),
When(f_plus_1=4, then=4),
default=1,
output_field=models.IntegerField(),
),
).order_by('pk'),
[(1, 1), (2, 3), (3, 4), (3, 4)],
transform=attrgetter('integer', 'integer2')
)
def test_filter_with_aggregation_in_value(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer2=Case(
When(integer=2, then='min'),
When(integer=3, then='max'),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
min=Min('fk_rel__integer'),
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(integer2=F('min'), then=2),
When(integer2=F('max'), then=3),
),
).order_by('pk'),
[(3, 4, 3, 4), (2, 2, 2, 3), (3, 4, 3, 4)],
transform=itemgetter('integer', 'integer2', 'min', 'max')
)
def test_filter_with_aggregation_in_predicate(self):
self.assertQuerysetEqual(
CaseTestModel.objects.values(*self.non_lob_fields).annotate(
max=Max('fk_rel__integer'),
).filter(
integer=Case(
When(max=3, then=2),
When(max=4, then=3),
),
).order_by('pk'),
[(2, 3, 3), (3, 4, 4), (2, 2, 3), (3, 4, 4), (3, 3, 4)],
transform=itemgetter('integer', 'integer2', 'max')
)
def test_update(self):
CaseTestModel.objects.update(
string=Case(
When(integer=1, then=Value('one')),
When(integer=2, then=Value('two')),
default=Value('other'),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'one'), (2, 'two'), (3, 'other'), (2, 'two'), (3, 'other'), (3, 'other'), (4, 'other')],
transform=attrgetter('integer', 'string')
)
def test_update_without_default(self):
CaseTestModel.objects.update(
integer2=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'integer2')
)
def test_update_with_expression_as_value(self):
CaseTestModel.objects.update(
integer=Case(
When(integer=1, then=F('integer') + 1),
When(integer=2, then=F('integer') + 3),
default='integer',
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[('1', 2), ('2', 5), ('3', 3), ('2', 5), ('3', 3), ('3', 3), ('4', 4)],
transform=attrgetter('string', 'integer')
)
def test_update_with_expression_as_condition(self):
CaseTestModel.objects.update(
string=Case(
When(integer2=F('integer'), then=Value('equal')),
When(integer2=F('integer') + 1, then=Value('+1')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 'equal'), (2, '+1'), (3, '+1'), (2, 'equal'), (3, '+1'), (3, 'equal'), (4, '+1')],
transform=attrgetter('integer', 'string')
)
def test_update_with_join_in_condition_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
integer=Case(
When(integer2=F('o2o_rel__integer') + 1, then=2),
When(integer2=F('o2o_rel__integer'), then=3),
output_field=models.IntegerField(),
),
)
def test_update_with_join_in_predicate_raise_field_error(self):
with self.assertRaisesMessage(FieldError, 'Joined field references are not permitted in this query'):
CaseTestModel.objects.update(
string=Case(
When(o2o_rel__integer=1, then=Value('one')),
When(o2o_rel__integer=2, then=Value('two')),
When(o2o_rel__integer=3, then=Value('three')),
default=Value('other'),
output_field=models.CharField(),
),
)
def test_update_big_integer(self):
CaseTestModel.objects.update(
big_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'big_integer')
)
def test_update_binary(self):
CaseTestModel.objects.update(
binary=Case(
# fails on postgresql on Python 2.7 if output_field is not
# set explicitly
When(integer=1, then=Value(b'one', output_field=models.BinaryField())),
When(integer=2, then=Value(b'two', output_field=models.BinaryField())),
default=Value(b'', output_field=models.BinaryField()),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, b'one'), (2, b'two'), (3, b''), (2, b'two'), (3, b''), (3, b''), (4, b'')],
transform=lambda o: (o.integer, six.binary_type(o.binary))
)
def test_update_boolean(self):
CaseTestModel.objects.update(
boolean=Case(
When(integer=1, then=True),
When(integer=2, then=True),
default=False,
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, True), (3, False), (2, True), (3, False), (3, False), (4, False)],
transform=attrgetter('integer', 'boolean')
)
def test_update_comma_separated_integer(self):
CaseTestModel.objects.update(
comma_separated_integer=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2,2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2,2'), (3, ''), (2, '2,2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'comma_separated_integer')
)
def test_update_date(self):
CaseTestModel.objects.update(
date=Case(
When(integer=1, then=date(2015, 1, 1)),
When(integer=2, then=date(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, date(2015, 1, 1)), (2, date(2015, 1, 2)), (3, None), (2, date(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date')
)
def test_update_date_time(self):
CaseTestModel.objects.update(
date_time=Case(
When(integer=1, then=datetime(2015, 1, 1)),
When(integer=2, then=datetime(2015, 1, 2)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, datetime(2015, 1, 1)), (2, datetime(2015, 1, 2)), (3, None), (2, datetime(2015, 1, 2)),
(3, None), (3, None), (4, None)
],
transform=attrgetter('integer', 'date_time')
)
def test_update_decimal(self):
CaseTestModel.objects.update(
decimal=Case(
When(integer=1, then=Decimal('1.1')),
When(integer=2, then=Decimal('2.2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, Decimal('1.1')),
(2, Decimal('2.2')),
(3, None),
(2, Decimal('2.2')),
(3, None),
(3, None),
(4, None)
],
transform=attrgetter('integer', 'decimal')
)
def test_update_duration(self):
CaseTestModel.objects.update(
duration=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing timedeltas
When(integer=1, then=Value(timedelta(1), output_field=models.DurationField())),
When(integer=2, then=Value(timedelta(2), output_field=models.DurationField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, timedelta(1)), (2, timedelta(2)), (3, None), (2, timedelta(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'duration')
)
def test_update_email(self):
CaseTestModel.objects.update(
email=Case(
When(integer=1, then=Value('1@example.com')),
When(integer=2, then=Value('2@example.com')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1@example.com'), (2, '2@example.com'), (3, ''), (2, '2@example.com'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'email')
)
def test_update_file(self):
CaseTestModel.objects.update(
file=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, six.text_type(o.file))
)
def test_update_file_path(self):
CaseTestModel.objects.update(
file_path=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'file_path')
)
def test_update_float(self):
CaseTestModel.objects.update(
float=Case(
When(integer=1, then=1.1),
When(integer=2, then=2.2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1.1), (2, 2.2), (3, None), (2, 2.2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'float')
)
@unittest.skipUnless(Image, "Pillow not installed")
def test_update_image(self):
CaseTestModel.objects.update(
image=Case(
When(integer=1, then=Value('~/1')),
When(integer=2, then=Value('~/2')),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '~/1'), (2, '~/2'), (3, ''), (2, '~/2'), (3, ''), (3, ''), (4, '')],
transform=lambda o: (o.integer, six.text_type(o.image))
)
def test_update_generic_ip_address(self):
CaseTestModel.objects.update(
generic_ip_address=Case(
# fails on postgresql if output_field is not set explicitly
When(integer=1, then=Value('1.1.1.1')),
When(integer=2, then=Value('2.2.2.2')),
output_field=models.GenericIPAddressField(),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1.1.1.1'), (2, '2.2.2.2'), (3, None), (2, '2.2.2.2'), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'generic_ip_address')
)
def test_update_null_boolean(self):
CaseTestModel.objects.update(
null_boolean=Case(
When(integer=1, then=True),
When(integer=2, then=False),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, True), (2, False), (3, None), (2, False), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'null_boolean')
)
def test_update_positive_integer(self):
CaseTestModel.objects.update(
positive_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_integer')
)
def test_update_positive_small_integer(self):
CaseTestModel.objects.update(
positive_small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'positive_small_integer')
)
def test_update_slug(self):
CaseTestModel.objects.update(
slug=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'slug')
)
def test_update_small_integer(self):
CaseTestModel.objects.update(
small_integer=Case(
When(integer=1, then=1),
When(integer=2, then=2),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, 1), (2, 2), (3, None), (2, 2), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'small_integer')
)
def test_update_string(self):
CaseTestModel.objects.filter(string__in=['1', '2']).update(
string=Case(
When(integer=1, then=Value('1', output_field=models.CharField())),
When(integer=2, then=Value('2', output_field=models.CharField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.filter(string__in=['1', '2']).order_by('pk'),
[(1, '1'), (2, '2'), (2, '2')],
transform=attrgetter('integer', 'string')
)
def test_update_text(self):
CaseTestModel.objects.update(
text=Case(
When(integer=1, then=Value('1')),
When(integer=2, then=Value('2')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, '1'), (2, '2'), (3, ''), (2, '2'), (3, ''), (3, ''), (4, '')],
transform=attrgetter('integer', 'text')
)
def test_update_time(self):
CaseTestModel.objects.update(
time=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing times
When(integer=1, then=Value(time(1), output_field=models.TimeField())),
When(integer=2, then=Value(time(2), output_field=models.TimeField())),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, time(1)), (2, time(2)), (3, None), (2, time(2)), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'time')
)
def test_update_url(self):
CaseTestModel.objects.update(
url=Case(
When(integer=1, then=Value('http://1.example.com/')),
When(integer=2, then=Value('http://2.example.com/')),
default=Value(''),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, 'http://1.example.com/'), (2, 'http://2.example.com/'), (3, ''), (2, 'http://2.example.com/'),
(3, ''), (3, ''), (4, '')
],
transform=attrgetter('integer', 'url')
)
def test_update_uuid(self):
CaseTestModel.objects.update(
uuid=Case(
# fails on sqlite if output_field is not set explicitly on all
# Values containing UUIDs
When(integer=1, then=Value(
UUID('11111111111111111111111111111111'),
output_field=models.UUIDField(),
)),
When(integer=2, then=Value(
UUID('22222222222222222222222222222222'),
output_field=models.UUIDField(),
)),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[
(1, UUID('11111111111111111111111111111111')),
(2, UUID('22222222222222222222222222222222')),
(3, None),
(2, UUID('22222222222222222222222222222222')),
(3, None),
(3, None),
(4, None),
],
transform=attrgetter('integer', 'uuid')
)
def test_update_fk(self):
obj1, obj2 = CaseTestModel.objects.all()[:2]
CaseTestModel.objects.update(
fk=Case(
When(integer=1, then=obj1.pk),
When(integer=2, then=obj2.pk),
),
)
self.assertQuerysetEqual(
CaseTestModel.objects.all().order_by('pk'),
[(1, obj1.pk), (2, obj2.pk), (3, None), (2, obj2.pk), (3, None), (3, None), (4, None)],
transform=attrgetter('integer', 'fk_id')
)
def test_lookup_in_condition(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer__lt=2, then=Value('less than 2')),
When(integer__gt=2, then=Value('greater than 2')),
default=Value('equal to 2'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 'less than 2'), (2, 'equal to 2'), (3, 'greater than 2'), (2, 'equal to 2'), (3, 'greater than 2'),
(3, 'greater than 2'), (4, 'greater than 2')
],
transform=attrgetter('integer', 'test')
)
def test_lookup_different_fields(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(integer=2, integer2=3, then=Value('when')),
default=Value('default'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'default'), (3, 4, 'default'),
(3, 3, 'default'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_combined_q_object(self):
self.assertQuerysetEqual(
CaseTestModel.objects.annotate(
test=Case(
When(Q(integer=2) | Q(integer2=3), then=Value('when')),
default=Value('default'),
output_field=models.CharField(),
),
).order_by('pk'),
[
(1, 1, 'default'), (2, 3, 'when'), (3, 4, 'default'), (2, 2, 'when'), (3, 4, 'default'),
(3, 3, 'when'), (4, 5, 'default')
],
transform=attrgetter('integer', 'integer2', 'test')
)
def test_order_by_conditional_implicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
)).order_by('test', 'pk'),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter('integer', 'test')
)
def test_order_by_conditional_explicit(self):
self.assertQuerysetEqual(
CaseTestModel.objects.filter(integer__lte=2).annotate(test=Case(
When(integer=1, then=2),
When(integer=2, then=1),
default=3,
output_field=models.IntegerField(),
)).order_by(F('test').asc(), 'pk'),
[(2, 1), (2, 1), (1, 2)],
transform=attrgetter('integer', 'test')
)
def test_join_promotion(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
output_field=models.IntegerField()
),
),
[(o, 3)],
lambda x: (x, x.foo)
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
output_field=models.IntegerField()
),
),
[(o, 2)],
lambda x: (x, x.foo)
)
def test_join_promotion_multiple_annonations(self):
o = CaseTestModel.objects.create(integer=1, integer2=1, string='1')
# Testing that:
# 1. There isn't any object on the remote side of the fk_rel
# relation. If the query used inner joins, then the join to fk_rel
# would remove o from the results. So, in effect we are testing that
# we are promoting the fk_rel join to a left outer join here.
# 2. The default value of 3 is generated for the case expression.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__pk=1, then=2),
default=3,
output_field=models.IntegerField()
),
bar=Case(
When(fk_rel__pk=1, then=4),
default=5,
output_field=models.IntegerField()
),
),
[(o, 3, 5)],
lambda x: (x, x.foo, x.bar)
)
# Now 2 should be generated, as the fk_rel is null.
self.assertQuerysetEqual(
CaseTestModel.objects.filter(pk=o.pk).annotate(
foo=Case(
When(fk_rel__isnull=True, then=2),
default=3,
output_field=models.IntegerField()
),
bar=Case(
When(fk_rel__isnull=True, then=4),
default=5,
output_field=models.IntegerField()
),
),
[(o, 2, 4)],
lambda x: (x, x.foo, x.bar)
)
def test_m2m_exclude(self):
CaseTestModel.objects.create(integer=10, integer2=1, string='1')
qs = CaseTestModel.objects.values_list('id', 'integer').annotate(
cnt=models.Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
output_field=models.IntegerField()
),
).order_by('integer')
# The first o has 2 as its fk_rel__integer=1, thus it hits the
# default=2 case. The other ones have 2 as the result as they have 2
# fk_rel objects, except for integer=4 and integer=10 (created above).
# The integer=4 case has one integer, thus the result is 1, and
# integer=10 doesn't have any and this too generates 1 (instead of 0)
# as ~Q() also matches nulls.
self.assertQuerysetEqual(
qs,
[(1, 2), (2, 2), (2, 2), (3, 2), (3, 2), (3, 2), (4, 1), (10, 1)],
lambda x: x[1:]
)
def test_m2m_reuse(self):
CaseTestModel.objects.create(integer=10, integer2=1, string='1')
# Need to use values before annotate so that Oracle will not group
# by fields it isn't capable of grouping by.
qs = CaseTestModel.objects.values_list('id', 'integer').annotate(
cnt=models.Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
output_field=models.IntegerField()
),
).annotate(
cnt2=models.Sum(
Case(When(~Q(fk_rel__integer=1), then=1), default=2),
output_field=models.IntegerField()
),
).order_by('integer')
self.assertEqual(str(qs.query).count(' JOIN '), 1)
self.assertQuerysetEqual(
qs,
[(1, 2, 2), (2, 2, 2), (2, 2, 2), (3, 2, 2), (3, 2, 2), (3, 2, 2), (4, 1, 1), (10, 1, 1)],
lambda x: x[1:]
)
class CaseDocumentationExamples(TestCase):
@classmethod
def setUpTestData(cls):
Client.objects.create(
name='Jane Doe',
account_type=Client.REGULAR,
registered_on=date.today() - timedelta(days=36),
)
Client.objects.create(
name='James Smith',
account_type=Client.GOLD,
registered_on=date.today() - timedelta(days=5),
)
Client.objects.create(
name='Jack Black',
account_type=Client.PLATINUM,
registered_on=date.today() - timedelta(days=10 * 365),
)
def test_simple_example(self):
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(account_type=Client.GOLD, then=Value('5%')),
When(account_type=Client.PLATINUM, then=Value('10%')),
default=Value('0%'),
output_field=models.CharField(),
),
).order_by('pk'),
[('Jane Doe', '0%'), ('James Smith', '5%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_lookup_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
self.assertQuerysetEqual(
Client.objects.annotate(
discount=Case(
When(registered_on__lte=a_year_ago, then=Value('10%')),
When(registered_on__lte=a_month_ago, then=Value('5%')),
default=Value('0%'),
output_field=models.CharField(),
),
).order_by('pk'),
[('Jane Doe', '5%'), ('James Smith', '0%'), ('Jack Black', '10%')],
transform=attrgetter('name', 'discount')
)
def test_conditional_update_example(self):
a_month_ago = date.today() - timedelta(days=30)
a_year_ago = date.today() - timedelta(days=365)
Client.objects.update(
account_type=Case(
When(registered_on__lte=a_year_ago, then=Value(Client.PLATINUM)),
When(registered_on__lte=a_month_ago, then=Value(Client.GOLD)),
default=Value(Client.REGULAR),
),
)
self.assertQuerysetEqual(
Client.objects.all().order_by('pk'),
[('Jane Doe', 'G'), ('James Smith', 'R'), ('Jack Black', 'P')],
transform=attrgetter('name', 'account_type')
)
def test_conditional_aggregation_example(self):
Client.objects.create(
name='Jean Grey',
account_type=Client.REGULAR,
registered_on=date.today(),
)
Client.objects.create(
name='James Bond',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
Client.objects.create(
name='Jane Porter',
account_type=Client.PLATINUM,
registered_on=date.today(),
)
self.assertEqual(
Client.objects.aggregate(
regular=models.Sum(Case(
When(account_type=Client.REGULAR, then=1),
output_field=models.IntegerField(),
)),
gold=models.Sum(Case(
When(account_type=Client.GOLD, then=1),
output_field=models.IntegerField(),
)),
platinum=models.Sum(Case(
When(account_type=Client.PLATINUM, then=1),
output_field=models.IntegerField(),
)),
),
{'regular': 2, 'gold': 1, 'platinum': 3}
)
| bsd-3-clause |
ntt-sic/nova | nova/tests/virt/powervm/test_powervm.py | 1 | 52487 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for PowerVMDriver.
"""
import contextlib
import mock
import os
import paramiko
from nova import context
from nova import db
from nova import exception as n_exc
from nova import test
from nova.compute import flavors
from nova.compute import power_state
from nova.compute import task_states
from nova.network import model as network_model
from nova.openstack.common import processutils
from nova.tests import fake_network_cache_model
from nova.tests.image import fake
from nova.virt import images
from nova.virt.powervm import blockdev as powervm_blockdev
from nova.virt.powervm import common
from nova.virt.powervm import constants
from nova.virt.powervm import driver as powervm_driver
from nova.virt.powervm import exception
from nova.virt.powervm import lpar
from nova.virt.powervm import operator as powervm_operator
def fake_lpar(instance_name, state=constants.POWERVM_RUNNING):
return lpar.LPAR(name=instance_name,
lpar_id=1, desired_mem=1024,
max_mem=2048, max_procs=2,
uptime=939395, state=state)
def fake_ssh_connect(connection):
"""Returns a new paramiko.SSHClient object."""
return paramiko.SSHClient()
def raise_(ex):
"""Raises the given Exception."""
raise ex
class FakePowerVMOperator(powervm_operator.PowerVMOperator):
def get_lpar(self, instance_name, resource_type='lpar'):
return fake_lpar(instance_name)
def run_vios_command(self, cmd):
pass
class FakeIVMOperator(powervm_operator.IVMOperator):
def get_lpar(self, instance_name, resource_type='lpar'):
return fake_lpar(instance_name)
def list_lpar_instances(self):
return ['instance-00000001', 'instance-00000002']
def create_lpar(self, lpar):
pass
def start_lpar(self, instance_name):
pass
def stop_lpar(self, instance_name, time_out=30):
pass
def remove_lpar(self, instance_name):
pass
def get_vhost_by_instance_id(self, instance_id):
return 'vhostfake'
def get_virtual_eth_adapter_id(self):
return 1
def get_disk_name_by_vhost(self, vhost):
return 'lvfake01'
def remove_disk(self, disk_name):
pass
def run_cfg_dev(self, device_name):
pass
def attach_disk_to_vhost(self, disk, vhost):
pass
def get_memory_info(self):
return {'total_mem': 65536, 'avail_mem': 46336}
def get_cpu_info(self):
return {'total_procs': 8.0, 'avail_procs': 6.3}
def get_disk_info(self):
return {'disk_total': 10168,
'disk_used': 0,
'disk_avail': 10168}
def get_hostname(self):
return 'fake-powervm'
def rename_lpar(self, old, new):
pass
def set_lpar_mac_base_value(self, instance_name, mac):
pass
def get_logical_vol_size(self, diskname):
pass
def macs_for_instance(self, instance):
return set(['FA:98:64:2B:29:39'])
def run_vios_command(self, cmd):
pass
class FakeBlockAdapter(powervm_blockdev.PowerVMLocalVolumeAdapter):
def __init__(self):
self.connection_data = common.Connection(host='fake_compute_1',
username='fake_user',
password='fake_pass')
pass
def _create_logical_volume(self, size):
return 'lvfake01'
def _remove_logical_volume(self, lv_name):
pass
def _copy_file_to_device(self, sourcePath, device, decrompress=True):
pass
def _copy_image_file(self, sourcePath, remotePath, decompress=False):
finalPath = '/tmp/rhel62.raw.7e358754160433febd6f3318b7c9e335'
size = 4294967296
return finalPath, size
def _copy_device_to_file(self, device_name, file_path):
pass
def _copy_image_file_from_host(self, remote_source_path, local_dest_dir,
compress=False):
snapshot_file = '/tmp/rhel62.raw.7e358754160433febd6f3318b7c9e335'
snap_ref = open(snapshot_file, 'w+')
snap_ref.close()
return snapshot_file
def fake_get_powervm_operator():
return FakeIVMOperator(common.Connection('fake_host', 'fake_user',
'fake_password'))
def create_instance(testcase):
fake.stub_out_image_service(testcase.stubs)
ctxt = context.get_admin_context()
instance_type = db.flavor_get(ctxt, 1)
sys_meta = flavors.save_flavor_info({}, instance_type)
return db.instance_create(ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'instance_type_id': 1,
'memory_mb': 1024,
'vcpus': 2,
'image_ref': '155d900f-4e14-4e4c-a73d-069cbf4541e6',
'system_metadata': sys_meta})
class PowerVMDriverTestCase(test.TestCase):
"""Unit tests for PowerVM connection calls."""
fake_network_info = 'fake_network_info'
fake_create_lpar_instance_called = False
def fake_create_lpar_instance(self, instance, network_info,
host_stats=None):
"""Stub for the _create_lpar_instance method.
This stub assumes that 'instance' is the one created in the test case
setUp method and 'network_info' is equal to self.fake_network_info.
@return: fake LPAR based on instance parameter where the name of the
LPAR is the uuid of the instance
"""
self.fake_create_lpar_instance_called = True
self.assertEqual(self.instance, instance)
self.assertEqual(self.fake_network_info, network_info)
return self.powervm_connection._powervm._operator.get_lpar(
instance['uuid'])
def setUp(self):
super(PowerVMDriverTestCase, self).setUp()
self.stubs.Set(powervm_operator, 'get_powervm_operator',
fake_get_powervm_operator)
self.stubs.Set(powervm_operator, 'get_powervm_disk_adapter',
lambda: FakeBlockAdapter())
self.powervm_connection = powervm_driver.PowerVMDriver(None)
self.instance = create_instance(self)
def test_list_instances(self):
instances = self.powervm_connection.list_instances()
self.assertTrue('instance-00000001' in instances)
self.assertTrue('instance-00000002' in instances)
def test_instance_exists(self):
name = self.instance['name']
self.assertTrue(self.powervm_connection.instance_exists(name))
def test_spawn(self):
def fake_image_fetch(context, image_id, file_path,
user_id, project_id):
pass
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch', fake_image_fetch)
image_meta = {}
image_meta['id'] = '666'
fake_net_info = network_model.NetworkInfo([
fake_network_cache_model.new_vif()])
self.powervm_connection.spawn(context.get_admin_context(),
self.instance, image_meta, [], 's3cr3t',
fake_net_info)
state = self.powervm_connection.get_info(self.instance)['state']
self.assertEqual(state, power_state.RUNNING)
def test_spawn_create_lpar_fail(self):
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch', lambda *x, **y: None)
self.stubs.Set(
self.powervm_connection._powervm,
'get_host_stats',
lambda *x, **y: raise_(
(processutils.ProcessExecutionError('instance_name'))))
fake_net_info = network_model.NetworkInfo([
fake_network_cache_model.new_vif()])
self.assertRaises(exception.PowerVMLPARCreationFailed,
self.powervm_connection.spawn,
context.get_admin_context(),
self.instance,
{'id': 'ANY_ID'}, [], 's3cr3t', fake_net_info)
def test_spawn_cleanup_on_fail(self):
self.flags(powervm_img_local_path='/images/')
self.stubs.Set(images, 'fetch', lambda *x, **y: None)
self.stubs.Set(
self.powervm_connection._powervm._disk_adapter,
'create_volume_from_image',
lambda *x, **y: raise_(exception.PowerVMImageCreationFailed()))
self.stubs.Set(
self.powervm_connection._powervm, '_cleanup',
lambda *x, **y: raise_(Exception('This should be logged.')))
fake_net_info = network_model.NetworkInfo([
fake_network_cache_model.new_vif()])
self.assertRaises(exception.PowerVMImageCreationFailed,
self.powervm_connection.spawn,
context.get_admin_context(),
self.instance,
{'id': 'ANY_ID'}, [], 's3cr3t', fake_net_info)
def test_snapshot(self):
def update_task_state(task_state, expected_state=None):
self._loc_task_state = task_state
self._loc_expected_task_state = expected_state
loc_context = context.get_admin_context()
arch = 'fake_arch'
properties = {'instance_id': self.instance['id'],
'user_id': str(loc_context.user_id),
'architecture': arch}
snapshot_name = 'fake_snap'
sent_meta = {'name': snapshot_name, 'is_public': False,
'status': 'creating', 'properties': properties}
image_service = fake.FakeImageService()
recv_meta = image_service.create(loc_context, sent_meta)
self.powervm_connection.snapshot(loc_context,
self.instance, recv_meta['id'],
update_task_state)
self.assertTrue(self._loc_task_state == task_states.IMAGE_UPLOADING and
self._loc_expected_task_state == task_states.IMAGE_PENDING_UPLOAD)
snapshot = image_service.show(context, recv_meta['id'])
self.assertEqual(snapshot['properties']['image_state'], 'available')
self.assertEqual(snapshot['properties']['architecture'], arch)
self.assertEqual(snapshot['status'], 'active')
self.assertEqual(snapshot['name'], snapshot_name)
def _set_get_info_stub(self, state):
def fake_get_instance(instance_name):
return {'state': state,
'max_mem': 512,
'desired_mem': 256,
'max_procs': 2,
'uptime': 2000}
self.stubs.Set(self.powervm_connection._powervm, '_get_instance',
fake_get_instance)
def test_get_info_state_nostate(self):
self._set_get_info_stub('')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.NOSTATE)
def test_get_info_state_running(self):
self._set_get_info_stub('Running')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.RUNNING)
def test_get_info_state_starting(self):
self._set_get_info_stub('Starting')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.RUNNING)
def test_get_info_state_shutdown(self):
self._set_get_info_stub('Not Activated')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.SHUTDOWN)
def test_get_info_state_shutting_down(self):
self._set_get_info_stub('Shutting Down')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.SHUTDOWN)
def test_get_info_state_error(self):
self._set_get_info_stub('Error')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.CRASHED)
def test_get_info_state_not_available(self):
self._set_get_info_stub('Not Available')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.CRASHED)
def test_get_info_state_open_firmware(self):
self._set_get_info_stub('Open Firmware')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.CRASHED)
def test_get_info_state_unmapped(self):
self._set_get_info_stub('The Universe')
info_dict = self.powervm_connection.get_info(self.instance)
self.assertEqual(info_dict['state'], power_state.NOSTATE)
def test_destroy(self):
self.powervm_connection.destroy(self.instance, None)
self.stubs.Set(FakeIVMOperator, 'get_lpar', lambda x, y: None)
name = self.instance['name']
self.assertFalse(self.powervm_connection.instance_exists(name))
def test_get_info(self):
info = self.powervm_connection.get_info(self.instance)
self.assertEqual(info['state'], power_state.RUNNING)
self.assertEqual(info['max_mem'], 2048)
self.assertEqual(info['mem'], 1024)
self.assertEqual(info['num_cpu'], 2)
self.assertEqual(info['cpu_time'], 939395)
def test_remote_utility_1(self):
path_one = '/some/file/'
path_two = '/path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_2(self):
path_one = '/some/file/'
path_two = 'path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_3(self):
path_one = '/some/file'
path_two = '/path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def test_remote_utility_4(self):
path_one = '/some/file'
path_two = 'path/filename'
joined_path = common.aix_path_join(path_one, path_two)
expected_path = '/some/file/path/filename'
self.assertEqual(joined_path, expected_path)
def _test_finish_revert_migration_after_crash(self, backup_made,
new_made,
power_on):
inst = {'name': 'foo'}
network_info = []
network_info.append({'address': 'fa:89:f0:8b:9b:39'})
self.mox.StubOutWithMock(self.powervm_connection, 'instance_exists')
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'destroy')
self.mox.StubOutWithMock(self.powervm_connection._powervm._operator,
'rename_lpar')
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'power_on')
self.mox.StubOutWithMock(self.powervm_connection._powervm._operator,
'set_lpar_mac_base_value')
self.powervm_connection.instance_exists('rsz_foo').AndReturn(
backup_made)
if backup_made:
self.powervm_connection._powervm._operator.set_lpar_mac_base_value(
'rsz_foo', 'fa:89:f0:8b:9b:39')
self.powervm_connection.instance_exists('foo').AndReturn(new_made)
if new_made:
self.powervm_connection._powervm.destroy('foo')
self.powervm_connection._powervm._operator.rename_lpar('rsz_foo',
'foo')
if power_on:
self.powervm_connection._powervm.power_on('foo')
self.mox.ReplayAll()
self.powervm_connection.finish_revert_migration(inst, network_info,
block_device_info=None,
power_on=power_on)
def test_finish_revert_migration_after_crash(self):
self._test_finish_revert_migration_after_crash(True, True, True)
def test_finish_revert_migration_after_crash_before_new(self):
self._test_finish_revert_migration_after_crash(True, False, True)
def test_finish_revert_migration_after_crash_before_backup(self):
# NOTE(mriedem): tests the power_on=False case also
self._test_finish_revert_migration_after_crash(False, False, False)
def test_migrate_volume_use_instance_name(self):
inst_name = 'instance-00000000'
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_1'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'_copy_device_to_file', fake_noop)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path, inst_name)
expected_path = 'some/image/path/instance-00000000_rsz.gz'
self.assertEqual(file_path, expected_path)
def test_migrate_volume_use_lv_name(self):
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_1'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'_copy_device_to_file', fake_noop)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path)
expected_path = 'some/image/path/logical-vol-name_rsz.gz'
self.assertEqual(file_path, expected_path)
def _test_deploy_from_migrated_file(self, power_on):
instance = self.instance
context = 'fake_context'
network_info = []
network_info.append({'address': 'fa:89:f0:8b:9b:39'})
dest = '10.8.46.20'
disk_info = {}
disk_info['root_disk_file'] = 'some/file/path.gz'
disk_info['old_lv_size'] = 30
self.flags(powervm_mgr=dest)
fake_op = self.powervm_connection._powervm
self.deploy_from_vios_file_called = False
self.power_on = power_on
exp_file_path = 'some/file/path.gz'
def fake_deploy_from_vios_file(lpar, file_path, size,
decompress, power_on):
exp_size = 40 * 1024 ** 3
exp_decompress = True
self.deploy_from_vios_file_called = True
self.assertEqual(exp_file_path, file_path)
self.assertEqual(exp_size, size)
self.assertEqual(exp_decompress, decompress)
self.assertEqual(self.power_on, power_on)
self.stubs.Set(fake_op, '_deploy_from_vios_file',
fake_deploy_from_vios_file)
# mock out the rm -f command call to vios
with mock.patch.object(self.powervm_connection._powervm._operator,
'run_vios_command_as_root',
return_value=[]) as run_vios_cmd:
self.powervm_connection.finish_migration(context, None,
instance, disk_info, network_info,
None, resize_instance=True,
block_device_info=None,
power_on=power_on)
run_vios_cmd.assert_called_once_with('rm -f %s' % exp_file_path)
self.assertEqual(self.deploy_from_vios_file_called, True)
def test_deploy_from_migrated_file_power_on(self):
self._test_deploy_from_migrated_file(True)
def test_deploy_from_migrated_file_power_off(self):
self._test_deploy_from_migrated_file(False)
def test_set_lpar_mac_base_value(self):
instance = self.instance
context = 'fake_context'
dest = '10.8.46.20' # Some fake dest IP
instance_type = 'fake_instance_type'
network_info = []
network_info.append({'address': 'fa:89:f0:8b:9b:39'})
block_device_info = None
self.flags(powervm_mgr=dest)
fake_noop = lambda *args, **kwargs: None
fake_op = self.powervm_connection._powervm._operator
self.stubs.Set(fake_op, 'get_vhost_by_instance_id', fake_noop)
self.stubs.Set(fake_op, 'get_disk_name_by_vhost', fake_noop)
self.stubs.Set(self.powervm_connection._powervm, 'power_off',
fake_noop)
self.stubs.Set(fake_op, 'get_logical_vol_size',
lambda *args, **kwargs: '20')
self.stubs.Set(self.powervm_connection, '_get_resize_name', fake_noop)
self.stubs.Set(fake_op, 'rename_lpar', fake_noop)
def fake_migrate_disk(*args, **kwargs):
disk_info = {}
disk_info['fake_dict'] = 'some/file/path.gz'
return disk_info
def fake_set_lpar_mac_base_value(inst_name, mac, *args, **kwargs):
# get expected mac address from FakeIVM set
fake_ivm = FakeIVMOperator(None)
exp_mac = fake_ivm.macs_for_instance(inst_name).pop()
self.assertEqual(exp_mac, mac)
self.stubs.Set(self.powervm_connection._powervm, 'migrate_disk',
fake_migrate_disk)
self.stubs.Set(fake_op, 'set_lpar_mac_base_value',
fake_set_lpar_mac_base_value)
disk_info = self.powervm_connection.migrate_disk_and_power_off(
context, instance,
dest, instance_type, network_info, block_device_info)
def test_migrate_build_scp_command(self):
lv_name = 'logical-vol-name'
src_host = 'compute_host_1'
dest = 'compute_host_2'
image_path = 'some/image/path'
fake_noop = lambda *args, **kwargs: None
@contextlib.contextmanager
def fake_vios_to_vios_auth(*args, **kwargs):
key_name = 'some_key'
yield key_name
self.stubs.Set(common, 'vios_to_vios_auth',
fake_vios_to_vios_auth)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command_as_root', fake_noop)
def fake_run_vios_command(*args, **kwargs):
cmd = args[0]
exp_cmd = ' '.join(['scp -o "StrictHostKeyChecking no" -i',
'some_key',
'some/image/path/logical-vol-name_rsz.gz',
'fake_user@compute_host_2:some/image/path'])
self.assertEqual(exp_cmd, cmd)
self.stubs.Set(self.powervm_connection._powervm._disk_adapter,
'run_vios_command',
fake_run_vios_command)
blockdev_op = self.powervm_connection._powervm._disk_adapter
file_path = blockdev_op.migrate_volume(lv_name, src_host, dest,
image_path)
def test_get_resize_name(self):
inst_name = 'instance-00000001'
expected_name = 'rsz_instance-00000001'
result = self.powervm_connection._get_resize_name(inst_name)
self.assertEqual(expected_name, result)
def test_get_long_resize_name(self):
inst_name = 'some_really_long_instance_name_00000001'
expected_name = 'rsz__really_long_instance_name_00000001'
result = self.powervm_connection._get_resize_name(inst_name)
self.assertEqual(expected_name, result)
def test_finish_migration_raises_exception(self):
# Tests that the finish_migration method will raise an exception
# if the 'root_disk_file' key is not found in the disk_info parameter.
self.stubs.Set(self.powervm_connection._powervm,
'_create_lpar_instance', self.fake_create_lpar_instance)
self.assertRaises(exception.PowerVMUnrecognizedRootDevice,
self.powervm_connection.finish_migration,
context.get_admin_context(), None,
self.instance, {'old_lv_size': '20'},
self.fake_network_info, None, True)
self.assertTrue(self.fake_create_lpar_instance_called)
def test_finish_migration_successful(self):
# Tests a successful migration (resize) flow and asserts various
# methods called along the way with expected argument values.
fake_file_path = 'some/file/path.py'
disk_info = {'root_disk_file': fake_file_path,
'old_lv_size': '10'}
fake_flavor = {'root_gb': 20}
fake_extract_flavor = lambda *args, **kwargs: fake_flavor
self.fake_deploy_from_migrated_file_called = False
def fake_deploy_from_migrated_file(lpar, file_path, size,
power_on=True):
self.fake_deploy_from_migrated_file_called = True
# assert the lpar is the one created for this test
self.assertEqual(self.instance['uuid'], lpar['name'])
self.assertEqual(fake_file_path, file_path)
# this tests that the 20GB fake_flavor was used
self.assertEqual(fake_flavor['root_gb'] * pow(1024, 3), size)
self.assertTrue(power_on)
self.stubs.Set(self.powervm_connection._powervm,
'_create_lpar_instance',
self.fake_create_lpar_instance)
self.stubs.Set(flavors, 'extract_flavor', fake_extract_flavor)
self.stubs.Set(self.powervm_connection._powervm,
'deploy_from_migrated_file',
fake_deploy_from_migrated_file)
self.powervm_connection.finish_migration(context.get_admin_context(),
None, self.instance,
disk_info,
self.fake_network_info,
None, True)
self.assertTrue(self.fake_create_lpar_instance_called)
self.assertTrue(self.fake_deploy_from_migrated_file_called)
def test_check_host_resources_insufficient_memory(self):
# Tests that the _check_host_resources method will raise an exception
# when the host has insufficient memory for the request.
host_stats = {'host_memory_free': 512,
'vcpus': 12,
'vcpus_used': 1}
self.assertRaises(exception.PowerVMInsufficientFreeMemory,
self.powervm_connection._powervm._check_host_resources,
self.instance, vcpus=2, mem=4096, host_stats=host_stats)
def test_check_host_resources_insufficient_vcpus(self):
# Tests that the _check_host_resources method will raise an exception
# when the host has insufficient CPU for the request.
host_stats = {'host_memory_free': 4096,
'vcpus': 2,
'vcpus_used': 1}
self.assertRaises(exception.PowerVMInsufficientCPU,
self.powervm_connection._powervm._check_host_resources,
self.instance, vcpus=12, mem=512, host_stats=host_stats)
def test_create_lpar_instance_raise_insufficient_memory(self):
# This test will raise an exception because we use the instance
# created for this test case which requires 1024 MB of memory
# but the host only has 512 free.
host_stats = {'host_memory_free': 512,
'vcpus': 12,
'vcpus_used': 1}
self.assertRaises(exception.PowerVMInsufficientFreeMemory,
self.powervm_connection._powervm._create_lpar_instance,
self.instance, self.fake_network_info, host_stats)
def test_create_lpar_instance_raise_insufficient_vcpus(self):
# This test will raise an exception because we use the instance
# created for this test case which requires 2 CPUs but the host only
# has 1 CPU free.
host_stats = {'host_memory_free': 4096,
'vcpus': 1,
'vcpus_used': 1}
self.assertRaises(exception.PowerVMInsufficientCPU,
self.powervm_connection._powervm._create_lpar_instance,
self.instance, self.fake_network_info, host_stats)
def test_confirm_migration_old_instance_destroyed(self):
# Tests that the source instance is destroyed when a migration
# is confirmed.
resize_name = 'rsz_instance'
self.fake_destroy_called = False
def fake_get_resize_name(instance_name):
self.assertEqual(self.instance['name'], instance_name)
return resize_name
def fake_destroy(instance_name, destroy_disks=True):
self.fake_destroy_called = True
self.assertEqual(resize_name, instance_name)
self.assertTrue(destroy_disks)
self.stubs.Set(self.powervm_connection, '_get_resize_name',
fake_get_resize_name)
self.stubs.Set(self.powervm_connection._powervm, 'destroy',
fake_destroy)
self.powervm_connection.confirm_migration(True, self.instance,
self.fake_network_info)
self.assertTrue(self.fake_destroy_called)
def test_get_host_stats(self):
host_stats = self.powervm_connection.get_host_stats(True)
self.assertIsNotNone(host_stats)
self.assertEqual(host_stats['vcpus'], 8.0)
self.assertEqual(round(host_stats['vcpus_used'], 1), 1.7)
self.assertEqual(host_stats['host_memory_total'], 65536)
self.assertEqual(host_stats['host_memory_free'], 46336)
self.assertEqual(host_stats['disk_total'], 10168)
self.assertEqual(host_stats['disk_used'], 0)
self.assertEqual(host_stats['disk_available'], 10168)
self.assertEqual(host_stats['disk_total'],
host_stats['disk_used'] +
host_stats['disk_available'])
self.assertEqual(host_stats['cpu_info'], ('ppc64', 'powervm', '3940'))
self.assertEqual(host_stats['hypervisor_type'], 'powervm')
self.assertEqual(host_stats['hypervisor_version'], '7.1')
self.assertEqual(host_stats['hypervisor_hostname'], "fake-powervm")
self.assertEqual(host_stats['supported_instances'][0][0], "ppc64")
self.assertEqual(host_stats['supported_instances'][0][1], "powervm")
self.assertEqual(host_stats['supported_instances'][0][2], "hvm")
def test_get_available_resource(self):
res = self.powervm_connection.get_available_resource(nodename='fake')
self.assertIsNotNone(res)
self.assertEqual(8.0, res.pop('vcpus'))
self.assertEqual(65536, res.pop('memory_mb'))
self.assertEqual((10168 / 1024), res.pop('local_gb'))
self.assertEqual(1.7, round(res.pop('vcpus_used'), 1))
self.assertEqual((65536 - 46336), res.pop('memory_mb_used'))
self.assertEqual(0, res.pop('local_gb_used'))
self.assertEqual('powervm', res.pop('hypervisor_type'))
self.assertEqual('7.1', res.pop('hypervisor_version'))
self.assertEqual('fake-powervm', res.pop('hypervisor_hostname'))
self.assertEqual('ppc64,powervm,3940', res.pop('cpu_info'))
self.assertEqual(10168, res.pop('disk_available_least'))
self.assertEqual('[["ppc64", "powervm", "hvm"]]',
res.pop('supported_instances'))
self.assertEqual(0, len(res), 'Did not test all keys.')
def test_get_host_uptime(self):
# Tests that the get_host_uptime method issues the proper sysstat
# command and parses the output correctly.
exp_cmd = "ioscli sysstat -short fake_user"
output = [("02:54PM up 24 days, 5:41, 1 user, "
"load average: 0.06, 0.03, 0.02")]
fake_op = self.powervm_connection._powervm
self.mox.StubOutWithMock(fake_op._operator, 'run_vios_command')
fake_op._operator.run_vios_command(exp_cmd).AndReturn(output)
self.mox.ReplayAll()
# the host parameter isn't used so we just pass None
uptime = self.powervm_connection.get_host_uptime(None)
self.assertEqual(output[0], uptime)
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self.powervm_connection.plug_vifs,
instance=self.instance, network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self.powervm_connection.unplug_vifs,
instance=self.instance, network_info=None)
def test_manage_image_cache(self):
# Check to make sure the method passes (does nothing) since
# it's not implemented in the powervm driver and it passes
# in the driver base class.
self.powervm_connection.manage_image_cache(context.get_admin_context(),
True)
def test_init_host(self):
# Check to make sure the method passes (does nothing) since
# it simply passes in the powervm driver but it raises a
# NotImplementedError in the base driver class.
self.powervm_connection.init_host(host='fake')
def test_pause(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError, self.powervm_connection.pause,
instance=None)
def test_unpause(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError, self.powervm_connection.unpause,
instance=None)
def test_suspend(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError, self.powervm_connection.suspend,
instance=None)
def test_resume(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError, self.powervm_connection.resume,
instance=None, network_info=None)
def test_host_power_action(self):
# Check to make sure the method raises NotImplementedError.
self.assertRaises(NotImplementedError,
self.powervm_connection.host_power_action,
host='fake', action='die!')
def test_soft_reboot(self):
# Tests that soft reboot is a no-op (mock out the power_off/power_on
# methods to ensure they aren't called)
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'power_off')
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'power_on')
self.mox.ReplayAll()
self.powervm_connection.reboot(context.get_admin_context(),
self.instance, network_info=None,
reboot_type='SOFT')
def test_hard_reboot(self):
# Tests that hard reboot is performed by stopping the lpar and then
# starting it.
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'power_off')
self.powervm_connection._powervm.power_off(self.instance['name'])
self.mox.StubOutWithMock(self.powervm_connection._powervm, 'power_on')
self.powervm_connection._powervm.power_on(self.instance['name'])
self.mox.ReplayAll()
self.powervm_connection.reboot(context.get_admin_context(),
self.instance, network_info=None,
reboot_type='HARD')
class PowerVMDriverLparTestCase(test.TestCase):
"""Unit tests for PowerVM connection calls."""
def setUp(self):
super(PowerVMDriverLparTestCase, self).setUp()
self.stubs.Set(powervm_operator.PowerVMOperator, '_update_host_stats',
lambda self: None)
self.powervm_connection = powervm_driver.PowerVMDriver(None)
def test_set_lpar_mac_base_value_command(self):
inst_name = 'some_instance'
mac = 'FA:98:64:2B:29:39'
exp_mac_str = mac[:-2].replace(':', '')
exp_cmd = ('chsyscfg -r lpar -i "name=%(inst_name)s, '
'virtual_eth_mac_base_value=%(exp_mac_str)s"'
% {'inst_name': inst_name, 'exp_mac_str': exp_mac_str})
fake_op = self.powervm_connection._powervm
self.mox.StubOutWithMock(fake_op._operator, 'run_vios_command')
fake_op._operator.run_vios_command(exp_cmd)
self.mox.ReplayAll()
fake_op._operator.set_lpar_mac_base_value(inst_name, mac)
class PowerVMDriverCommonTestCase(test.TestCase):
"""Unit tests for the nova.virt.powervm.common module."""
def setUp(self):
super(PowerVMDriverCommonTestCase, self).setUp()
# our fake connection information never changes since we can't
# actually connect to anything for these tests
self.connection = common.Connection('fake_host', 'user', 'password')
def test_check_connection_ssh_is_none(self):
"""
Passes a null ssh object to the check_connection method.
The method should create a new ssh connection using the
Connection object and return it.
"""
self.stubs.Set(common, 'ssh_connect', fake_ssh_connect)
ssh = common.check_connection(None, self.connection)
self.assertIsNotNone(ssh)
def test_check_connection_transport_is_dead(self):
"""
Passes an ssh object to the check_connection method which
does not have a transport set.
The method should create a new ssh connection using the
Connection object and return it.
"""
self.stubs.Set(common, 'ssh_connect', fake_ssh_connect)
ssh1 = fake_ssh_connect(self.connection)
ssh2 = common.check_connection(ssh1, self.connection)
self.assertIsNotNone(ssh2)
self.assertNotEqual(ssh1, ssh2)
def test_check_connection_raise_ssh_exception(self):
"""
Passes an ssh object to the check_connection method which
does not have a transport set.
The method should raise an SSHException.
"""
self.stubs.Set(common, 'ssh_connect',
lambda *x, **y: raise_(paramiko.SSHException(
'Error connecting to host.')))
ssh = fake_ssh_connect(self.connection)
self.assertRaises(paramiko.SSHException,
common.check_connection,
ssh, self.connection)
def fake_copy_image_file(source_path, remote_path):
return '/tmp/fake_file', 1
class PowerVMLocalVolumeAdapterTestCase(test.TestCase):
"""
Unit tests for nova.virt.powervm.blockdev.PowerVMLocalVolumeAdapter.
"""
def setUp(self):
super(PowerVMLocalVolumeAdapterTestCase, self).setUp()
self.context = context.get_admin_context()
self.connection = common.Connection(host='fake_compute_1',
username='fake_user',
password='fake_pass')
self.powervm_adapter = powervm_blockdev.PowerVMLocalVolumeAdapter(
self.connection)
self.instance = create_instance(self)
self.image_id = self.instance['image_ref']
def test_create_volume_from_image_fails_no_disk_name(self):
"""
Tests that delete_volume is not called after create_logical_volume
fails.
"""
def fake_create_logical_volume(size):
raise exception.PowerVMNoSpaceLeftOnVolumeGroup()
def fake_delete_volume(volume_info):
self.fail("Should not be called to do cleanup.")
self.stubs.Set(self.powervm_adapter, '_copy_image_file',
fake_copy_image_file)
self.stubs.Set(self.powervm_adapter, '_create_logical_volume',
fake_create_logical_volume)
self.stubs.Set(self.powervm_adapter, 'delete_volume',
fake_delete_volume)
self.assertRaises(exception.PowerVMNoSpaceLeftOnVolumeGroup,
self.powervm_adapter.create_volume_from_image,
self.context, self.instance, self.image_id)
def test_create_volume_from_image_fails_uncompressed_not_found(self):
def fake_run_vios_command(cmd, check_exit_code=True):
if cmd.startswith('ls -o'):
return None
else:
return "fake"
self.stubs.Set(self.powervm_adapter, 'run_vios_command',
fake_run_vios_command)
self.assertRaises(exception.PowerVMFileTransferFailed,
self.powervm_adapter.create_volume_from_image,
self.context, self.instance, self.image_id)
def test_create_volume_from_image_fails_with_disk_name(self):
"""
Tests that delete_volume is called to cleanup the volume after
create_logical_volume was successful but copy_file_to_device fails.
"""
disk_name = 'lvm_disk_name'
def fake_create_logical_volume(size):
return disk_name
def fake_copy_file_to_device(source_path, device):
raise exception.PowerVMConnectionFailed()
self.delete_volume_called = False
def fake_delete_volume(volume_info):
self.assertEqual(disk_name, volume_info)
self.delete_volume_called = True
self.stubs.Set(self.powervm_adapter, '_copy_image_file',
fake_copy_image_file)
self.stubs.Set(self.powervm_adapter, '_create_logical_volume',
fake_create_logical_volume)
self.stubs.Set(self.powervm_adapter, '_copy_file_to_device',
fake_copy_file_to_device)
self.stubs.Set(self.powervm_adapter, 'delete_volume',
fake_delete_volume)
self.assertRaises(exception.PowerVMConnectionFailed,
self.powervm_adapter.create_volume_from_image,
self.context, self.instance, self.image_id)
self.assertTrue(self.delete_volume_called)
def test_copy_image_file_ftp_failed(self):
file_path = os.tempnam('/tmp', 'image')
remote_path = '/mnt/openstack/images'
exp_remote_path = os.path.join(remote_path,
os.path.basename(file_path))
exp_cmd = ' '.join(['/usr/bin/rm -f', exp_remote_path])
fake_noop = lambda *args, **kwargs: None
fake_op = self.powervm_adapter
self.stubs.Set(fake_op, 'run_vios_command', fake_noop)
self.stubs.Set(fake_op, '_checksum_local_file', fake_noop)
self.mox.StubOutWithMock(common, 'ftp_put_command')
self.mox.StubOutWithMock(self.powervm_adapter,
'run_vios_command_as_root')
msg_args = {'ftp_cmd': 'PUT',
'source_path': file_path,
'dest_path': remote_path}
exp_exception = exception.PowerVMFTPTransferFailed(**msg_args)
common.ftp_put_command(self.connection, file_path,
remote_path).AndRaise(exp_exception)
self.powervm_adapter.run_vios_command_as_root(exp_cmd).AndReturn([])
self.mox.ReplayAll()
self.assertRaises(exception.PowerVMFTPTransferFailed,
self.powervm_adapter._copy_image_file,
file_path, remote_path)
def test_copy_image_file_wrong_checksum(self):
file_path = os.tempnam('/tmp', 'image')
remote_path = '/mnt/openstack/images'
exp_remote_path = os.path.join(remote_path,
os.path.basename(file_path))
exp_cmd = ' '.join(['/usr/bin/rm -f', exp_remote_path])
def fake_md5sum_remote_file(remote_path):
return '3202937169'
def fake_checksum_local_file(source_path):
return '3229026618'
fake_noop = lambda *args, **kwargs: None
fake_op = self.powervm_adapter
self.stubs.Set(fake_op, 'run_vios_command', fake_noop)
self.stubs.Set(fake_op, '_md5sum_remote_file',
fake_md5sum_remote_file)
self.stubs.Set(fake_op, '_checksum_local_file',
fake_checksum_local_file)
self.stubs.Set(common, 'ftp_put_command', fake_noop)
self.mox.StubOutWithMock(self.powervm_adapter,
'run_vios_command_as_root')
self.powervm_adapter.run_vios_command_as_root(exp_cmd).AndReturn([])
self.mox.ReplayAll()
self.assertRaises(exception.PowerVMFileTransferFailed,
self.powervm_adapter._copy_image_file,
file_path, remote_path)
def test_checksum_local_file(self):
file_path = os.tempnam('/tmp', 'image')
img_file = file(file_path, 'w')
img_file.write('This is a test')
img_file.close()
exp_md5sum = 'ce114e4501d2f4e2dcea3e17b546f339'
self.assertEqual(self.powervm_adapter._checksum_local_file(file_path),
exp_md5sum)
os.remove(file_path)
def test_copy_image_file_from_host_with_wrong_checksum(self):
local_path = 'some/tmp'
remote_path = os.tempnam('/mnt/openstack/images', 'image')
def fake_md5sum_remote_file(remote_path):
return '3202937169'
def fake_checksum_local_file(source_path):
return '3229026618'
fake_noop = lambda *args, **kwargs: None
fake_op = self.powervm_adapter
self.stubs.Set(fake_op, 'run_vios_command_as_root', fake_noop)
self.stubs.Set(fake_op, '_md5sum_remote_file',
fake_md5sum_remote_file)
self.stubs.Set(fake_op, '_checksum_local_file',
fake_checksum_local_file)
self.stubs.Set(common, 'ftp_get_command', fake_noop)
self.assertRaises(exception.PowerVMFileTransferFailed,
self.powervm_adapter._copy_image_file_from_host,
remote_path, local_path)
class IVMOperatorTestCase(test.TestCase):
"""Tests the IVMOperator class."""
def setUp(self):
super(IVMOperatorTestCase, self).setUp()
ivm_connection = common.Connection('fake_host', 'fake_user',
'fake_password')
self.ivm_operator = powervm_operator.IVMOperator(ivm_connection)
def test_start_lpar(self):
instance_name = 'fake'
self.mox.StubOutWithMock(self.ivm_operator, 'run_vios_command')
self.ivm_operator.run_vios_command('chsysstate -r lpar -o on -n %s' %
instance_name)
self.mox.StubOutWithMock(self.ivm_operator, 'get_lpar')
lpar1 = fake_lpar(instance_name)
self.ivm_operator.get_lpar(instance_name).AndReturn(lpar1)
self.mox.ReplayAll()
self.ivm_operator.start_lpar(instance_name)
def test_start_lpar_timeout(self):
instance_name = 'fake'
# mock the remote command call
self.mox.StubOutWithMock(self.ivm_operator, 'run_vios_command')
self.ivm_operator.run_vios_command('chsysstate -r lpar -o on -n %s' %
instance_name)
self.mox.StubOutWithMock(self.ivm_operator, 'get_lpar')
# the lpar is stopped and the timeout is less than the looping call
# interval so we timeout
lpar1 = fake_lpar(instance_name, state=constants.POWERVM_SHUTDOWN)
self.ivm_operator.get_lpar(instance_name).AndReturn(lpar1)
self.mox.ReplayAll()
self.assertRaises(exception.PowerVMLPAROperationTimeout,
self.ivm_operator.start_lpar,
instance_name=instance_name, timeout=0.5)
def test_stop_lpar(self):
instance_name = 'fake'
# mock the remote command call
self.mox.StubOutWithMock(self.ivm_operator, 'run_vios_command')
self.ivm_operator.run_vios_command('chsysstate -r lpar -o shutdown '
'--immed -n %s' % instance_name)
self.mox.StubOutWithMock(self.ivm_operator, 'get_lpar')
# the first time we check, the lpar is still running
lpar1 = fake_lpar(instance_name)
self.ivm_operator.get_lpar(instance_name).AndReturn(lpar1)
# the second time we check, the lpar is stopped
lpar2 = fake_lpar(instance_name, constants.POWERVM_SHUTDOWN)
self.ivm_operator.get_lpar(instance_name).AndReturn(lpar2)
self.mox.ReplayAll()
self.ivm_operator.stop_lpar(instance_name)
def test_stop_lpar_timeout(self):
instance_name = 'fake'
# mock the remote command call
self.mox.StubOutWithMock(self.ivm_operator, 'run_vios_command')
self.ivm_operator.run_vios_command('chsysstate -r lpar -o shutdown '
'--immed -n %s' % instance_name)
self.mox.StubOutWithMock(self.ivm_operator, 'get_lpar')
# the lpar is running and the timeout is less than the looping call
# interval so we timeout
lpar1 = fake_lpar(instance_name)
self.ivm_operator.get_lpar(instance_name).AndReturn(lpar1)
self.mox.ReplayAll()
self.assertRaises(exception.PowerVMLPAROperationTimeout,
self.ivm_operator.stop_lpar,
instance_name=instance_name, timeout=0.5)
def test_poll_for_lpar_status_no_state(self):
self.assertRaises(n_exc.InvalidParameterValue,
self.ivm_operator._poll_for_lpar_status,
'fake', constants.POWERVM_NOSTATE, 'test')
def test_poll_for_lpar_status_bad_state(self):
self.assertRaises(n_exc.InvalidParameterValue,
self.ivm_operator._poll_for_lpar_status,
'fake', 'bad-lpar-state-value', 'test')
def test_get_hostname_returns_cached(self):
self.mox.StubOutWithMock(self.ivm_operator, 'run_vios_command')
self.ivm_operator.run_vios_command(self.ivm_operator.command.hostname()
).AndReturn(('foo', None))
self.ivm_operator.run_vios_command(self.ivm_operator.command.hostname()
).AndReturn(('bar', None))
self.mox.ReplayAll()
self.assertEqual('foo', self.ivm_operator.get_hostname())
self.assertEqual('foo', self.ivm_operator.get_hostname())
| apache-2.0 |
scs/uclinux | user/python/python-2.4.4/Lib/plat-mac/buildtools.py | 4 | 14174 | """tools for BuildApplet and BuildApplication"""
import sys
import os
import string
import imp
import marshal
from Carbon import Res
import Carbon.Files
import Carbon.File
import MacOS
import macostools
import macresource
import EasyDialogs
import shutil
BuildError = "BuildError"
# .pyc file (and 'PYC ' resource magic number)
MAGIC = imp.get_magic()
# Template file (searched on sys.path)
TEMPLATE = "PythonInterpreter"
# Specification of our resource
RESTYPE = 'PYC '
RESNAME = '__main__'
# A resource with this name sets the "owner" (creator) of the destination
# It should also have ID=0. Either of these alone is not enough.
OWNERNAME = "owner resource"
# Default applet creator code
DEFAULT_APPLET_CREATOR="Pyta"
# OpenResFile mode parameters
READ = 1
WRITE = 2
# Parameter for FSOpenResourceFile
RESOURCE_FORK_NAME=Carbon.File.FSGetResourceForkName()
def findtemplate(template=None):
"""Locate the applet template along sys.path"""
if MacOS.runtimemodel == 'macho':
return None
if not template:
template=TEMPLATE
for p in sys.path:
file = os.path.join(p, template)
try:
file, d1, d2 = Carbon.File.FSResolveAliasFile(file, 1)
break
except (Carbon.File.Error, ValueError):
continue
else:
raise BuildError, "Template %r not found on sys.path" % (template,)
file = file.as_pathname()
return file
def process(template, filename, destname, copy_codefragment=0,
rsrcname=None, others=[], raw=0, progress="default", destroot=""):
if progress == "default":
progress = EasyDialogs.ProgressBar("Processing %s..."%os.path.split(filename)[1], 120)
progress.label("Compiling...")
progress.inc(0)
# check for the script name being longer than 32 chars. This may trigger a bug
# on OSX that can destroy your sourcefile.
if '#' in os.path.split(filename)[1]:
raise BuildError, "BuildApplet could destroy your sourcefile on OSX, please rename: %s" % filename
# Read the source and compile it
# (there's no point overwriting the destination if it has a syntax error)
fp = open(filename, 'rU')
text = fp.read()
fp.close()
try:
code = compile(text + '\n', filename, "exec")
except SyntaxError, arg:
raise BuildError, "Syntax error in script %s: %s" % (filename, arg)
except EOFError:
raise BuildError, "End-of-file in script %s" % (filename,)
# Set the destination file name. Note that basename
# does contain the whole filepath, only a .py is stripped.
if string.lower(filename[-3:]) == ".py":
basename = filename[:-3]
if MacOS.runtimemodel != 'macho' and not destname:
destname = basename
else:
basename = filename
if not destname:
if MacOS.runtimemodel == 'macho':
destname = basename + '.app'
else:
destname = basename + '.applet'
if not rsrcname:
rsrcname = basename + '.rsrc'
# Try removing the output file. This fails in MachO, but it should
# do any harm.
try:
os.remove(destname)
except os.error:
pass
process_common(template, progress, code, rsrcname, destname, 0,
copy_codefragment, raw, others, filename, destroot)
def update(template, filename, output):
if MacOS.runtimemodel == 'macho':
raise BuildError, "No updating yet for MachO applets"
if progress:
progress = EasyDialogs.ProgressBar("Updating %s..."%os.path.split(filename)[1], 120)
else:
progress = None
if not output:
output = filename + ' (updated)'
# Try removing the output file
try:
os.remove(output)
except os.error:
pass
process_common(template, progress, None, filename, output, 1, 1)
def process_common(template, progress, code, rsrcname, destname, is_update,
copy_codefragment, raw=0, others=[], filename=None, destroot=""):
if MacOS.runtimemodel == 'macho':
return process_common_macho(template, progress, code, rsrcname, destname,
is_update, raw, others, filename, destroot)
if others:
raise BuildError, "Extra files only allowed for MachoPython applets"
# Create FSSpecs for the various files
template_fsr, d1, d2 = Carbon.File.FSResolveAliasFile(template, 1)
template = template_fsr.as_pathname()
# Copy data (not resources, yet) from the template
if progress:
progress.label("Copy data fork...")
progress.set(10)
if copy_codefragment:
tmpl = open(template, "rb")
dest = open(destname, "wb")
data = tmpl.read()
if data:
dest.write(data)
dest.close()
tmpl.close()
del dest
del tmpl
# Open the output resource fork
if progress:
progress.label("Copy resources...")
progress.set(20)
try:
output = Res.FSOpenResourceFile(destname, RESOURCE_FORK_NAME, WRITE)
except MacOS.Error:
destdir, destfile = os.path.split(destname)
Res.FSCreateResourceFile(destdir, unicode(destfile), RESOURCE_FORK_NAME)
output = Res.FSOpenResourceFile(destname, RESOURCE_FORK_NAME, WRITE)
# Copy the resources from the target specific resource template, if any
typesfound, ownertype = [], None
try:
input = Res.FSOpenResourceFile(rsrcname, RESOURCE_FORK_NAME, READ)
except (MacOS.Error, ValueError):
pass
if progress:
progress.inc(50)
else:
if is_update:
skip_oldfile = ['cfrg']
else:
skip_oldfile = []
typesfound, ownertype = copyres(input, output, skip_oldfile, 0, progress)
Res.CloseResFile(input)
# Check which resource-types we should not copy from the template
skiptypes = []
if 'vers' in typesfound: skiptypes.append('vers')
if 'SIZE' in typesfound: skiptypes.append('SIZE')
if 'BNDL' in typesfound: skiptypes = skiptypes + ['BNDL', 'FREF', 'icl4',
'icl8', 'ics4', 'ics8', 'ICN#', 'ics#']
if not copy_codefragment:
skiptypes.append('cfrg')
## skipowner = (ownertype <> None)
# Copy the resources from the template
input = Res.FSOpenResourceFile(template, RESOURCE_FORK_NAME, READ)
dummy, tmplowner = copyres(input, output, skiptypes, 1, progress)
Res.CloseResFile(input)
## if ownertype == None:
## raise BuildError, "No owner resource found in either resource file or template"
# Make sure we're manipulating the output resource file now
Res.UseResFile(output)
if ownertype == None:
# No owner resource in the template. We have skipped the
# Python owner resource, so we have to add our own. The relevant
# bundle stuff is already included in the interpret/applet template.
newres = Res.Resource('\0')
newres.AddResource(DEFAULT_APPLET_CREATOR, 0, "Owner resource")
ownertype = DEFAULT_APPLET_CREATOR
if code:
# Delete any existing 'PYC ' resource named __main__
try:
res = Res.Get1NamedResource(RESTYPE, RESNAME)
res.RemoveResource()
except Res.Error:
pass
# Create the raw data for the resource from the code object
if progress:
progress.label("Write PYC resource...")
progress.set(120)
data = marshal.dumps(code)
del code
data = (MAGIC + '\0\0\0\0') + data
# Create the resource and write it
id = 0
while id < 128:
id = Res.Unique1ID(RESTYPE)
res = Res.Resource(data)
res.AddResource(RESTYPE, id, RESNAME)
attrs = res.GetResAttrs()
attrs = attrs | 0x04 # set preload
res.SetResAttrs(attrs)
res.WriteResource()
res.ReleaseResource()
# Close the output file
Res.CloseResFile(output)
# Now set the creator, type and bundle bit of the destination.
# Done with FSSpec's, FSRef FInfo isn't good enough yet (2.3a1+)
dest_fss = Carbon.File.FSSpec(destname)
dest_finfo = dest_fss.FSpGetFInfo()
dest_finfo.Creator = ownertype
dest_finfo.Type = 'APPL'
dest_finfo.Flags = dest_finfo.Flags | Carbon.Files.kHasBundle | Carbon.Files.kIsShared
dest_finfo.Flags = dest_finfo.Flags & ~Carbon.Files.kHasBeenInited
dest_fss.FSpSetFInfo(dest_finfo)
macostools.touched(destname)
if progress:
progress.label("Done.")
progress.inc(0)
def process_common_macho(template, progress, code, rsrcname, destname, is_update,
raw=0, others=[], filename=None, destroot=""):
# Check that we have a filename
if filename is None:
raise BuildError, "Need source filename on MacOSX"
# First make sure the name ends in ".app"
if destname[-4:] != '.app':
destname = destname + '.app'
# Now deduce the short name
destdir, shortname = os.path.split(destname)
if shortname[-4:] == '.app':
# Strip the .app suffix
shortname = shortname[:-4]
# And deduce the .plist and .icns names
plistname = None
icnsname = None
if rsrcname and rsrcname[-5:] == '.rsrc':
tmp = rsrcname[:-5]
plistname = tmp + '.plist'
if os.path.exists(plistname):
icnsname = tmp + '.icns'
if not os.path.exists(icnsname):
icnsname = None
else:
plistname = None
if not icnsname:
dft_icnsname = os.path.join(sys.prefix, 'Resources/Python.app/Contents/Resources/PythonApplet.icns')
if os.path.exists(dft_icnsname):
icnsname = dft_icnsname
else:
# This part will work when we're in the build environment
import __main__
dft_icnsname = os.path.join(
os.path.dirname(__main__.__file__),
'PythonApplet.icns')
if os.paht.exists(dft_icnsname):
icnsname = dft_icnsname
if not os.path.exists(rsrcname):
rsrcname = None
if progress:
progress.label('Creating bundle...')
import bundlebuilder
builder = bundlebuilder.AppBuilder(verbosity=0)
builder.mainprogram = filename
builder.builddir = destdir
builder.name = shortname
builder.destroot = destroot
if rsrcname:
realrsrcname = macresource.resource_pathname(rsrcname)
builder.files.append((realrsrcname,
os.path.join('Contents/Resources', os.path.basename(rsrcname))))
for o in others:
if type(o) == str:
builder.resources.append(o)
else:
builder.files.append(o)
if plistname:
import plistlib
builder.plist = plistlib.Plist.fromFile(plistname)
if icnsname:
builder.iconfile = icnsname
if not raw:
builder.argv_emulation = 1
builder.setup()
builder.build()
if progress:
progress.label('Done.')
progress.inc(0)
## macostools.touched(dest_fss)
# Copy resources between two resource file descriptors.
# skip a resource named '__main__' or (if skipowner is set) with ID zero.
# Also skip resources with a type listed in skiptypes.
#
def copyres(input, output, skiptypes, skipowner, progress=None):
ctor = None
alltypes = []
Res.UseResFile(input)
ntypes = Res.Count1Types()
progress_type_inc = 50/ntypes
for itype in range(1, 1+ntypes):
type = Res.Get1IndType(itype)
if type in skiptypes:
continue
alltypes.append(type)
nresources = Res.Count1Resources(type)
progress_cur_inc = progress_type_inc/nresources
for ires in range(1, 1+nresources):
res = Res.Get1IndResource(type, ires)
id, type, name = res.GetResInfo()
lcname = string.lower(name)
if lcname == OWNERNAME and id == 0:
if skipowner:
continue # Skip this one
else:
ctor = type
size = res.size
attrs = res.GetResAttrs()
if progress:
progress.label("Copy %s %d %s"%(type, id, name))
progress.inc(progress_cur_inc)
res.LoadResource()
res.DetachResource()
Res.UseResFile(output)
try:
res2 = Res.Get1Resource(type, id)
except MacOS.Error:
res2 = None
if res2:
if progress:
progress.label("Overwrite %s %d %s"%(type, id, name))
progress.inc(0)
res2.RemoveResource()
res.AddResource(type, id, name)
res.WriteResource()
attrs = attrs | res.GetResAttrs()
res.SetResAttrs(attrs)
Res.UseResFile(input)
return alltypes, ctor
def copyapptree(srctree, dsttree, exceptlist=[], progress=None):
names = []
if os.path.exists(dsttree):
shutil.rmtree(dsttree)
os.mkdir(dsttree)
todo = os.listdir(srctree)
while todo:
this, todo = todo[0], todo[1:]
if this in exceptlist:
continue
thispath = os.path.join(srctree, this)
if os.path.isdir(thispath):
thiscontent = os.listdir(thispath)
for t in thiscontent:
todo.append(os.path.join(this, t))
names.append(this)
for this in names:
srcpath = os.path.join(srctree, this)
dstpath = os.path.join(dsttree, this)
if os.path.isdir(srcpath):
os.mkdir(dstpath)
elif os.path.islink(srcpath):
endpoint = os.readlink(srcpath)
os.symlink(endpoint, dstpath)
else:
if progress:
progress.label('Copy '+this)
progress.inc(0)
shutil.copy2(srcpath, dstpath)
def writepycfile(codeobject, cfile):
import marshal
fc = open(cfile, 'wb')
fc.write('\0\0\0\0') # MAGIC placeholder, written later
fc.write('\0\0\0\0') # Timestap placeholder, not needed
marshal.dump(codeobject, fc)
fc.flush()
fc.seek(0, 0)
fc.write(MAGIC)
fc.close()
| gpl-2.0 |
wildtetris/python-social-auth | social/apps/flask_app/me/models.py | 79 | 1384 | """Flask SQLAlchemy ORM models for Social Auth"""
from mongoengine import ReferenceField
from social.utils import setting_name, module_member
from social.storage.mongoengine_orm import MongoengineUserMixin, \
MongoengineAssociationMixin, \
MongoengineNonceMixin, \
MongoengineCodeMixin, \
BaseMongoengineStorage
class FlaskStorage(BaseMongoengineStorage):
user = None
nonce = None
association = None
code = None
def init_social(app, db):
User = module_member(app.config[setting_name('USER_MODEL')])
class UserSocialAuth(db.Document, MongoengineUserMixin):
"""Social Auth association model"""
user = ReferenceField(User)
@classmethod
def user_model(cls):
return User
class Nonce(db.Document, MongoengineNonceMixin):
"""One use numbers"""
pass
class Association(db.Document, MongoengineAssociationMixin):
"""OpenId account association"""
pass
class Code(db.Document, MongoengineCodeMixin):
pass
# Set the references in the storage class
FlaskStorage.user = UserSocialAuth
FlaskStorage.nonce = Nonce
FlaskStorage.association = Association
FlaskStorage.code = Code
| bsd-3-clause |
asolfre/namebench | nb_third_party/dns/rdtypes/ANY/AFSDB.py | 248 | 1847 | # Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import dns.rdtypes.mxbase
class AFSDB(dns.rdtypes.mxbase.UncompressedDowncasingMX):
"""AFSDB record
@ivar subtype: the subtype value
@type subtype: int
@ivar hostname: the hostname name
@type hostname: dns.name.Name object"""
# Use the property mechanism to make "subtype" an alias for the
# "preference" attribute, and "hostname" an alias for the "exchange"
# attribute.
#
# This lets us inherit the UncompressedMX implementation but lets
# the caller use appropriate attribute names for the rdata type.
#
# We probably lose some performance vs. a cut-and-paste
# implementation, but this way we don't copy code, and that's
# good.
def get_subtype(self):
return self.preference
def set_subtype(self, subtype):
self.preference = subtype
subtype = property(get_subtype, set_subtype)
def get_hostname(self):
return self.exchange
def set_hostname(self, hostname):
self.exchange = hostname
hostname = property(get_hostname, set_hostname)
| apache-2.0 |
JKarathiya/Lean | Algorithm.Python/Alphas/ShareClassMeanReversionAlpha.py | 3 | 8135 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Common")
AddReference("QuantConnect.Indicators")
from System import *
from QuantConnect import *
from QuantConnect.Algorithm import *
from QuantConnect.Data.Market import TradeBar
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Risk import *
from QuantConnect.Orders.Fees import ConstantFeeModel
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Execution import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from QuantConnect.Algorithm.Framework.Selection import *
from QuantConnect.Indicators import RollingWindow, SimpleMovingAverage
from datetime import timedelta, datetime
import numpy as np
#
# A number of companies publicly trade two different classes of shares
# in US equity markets. If both assets trade with reasonable volume, then
# the underlying driving forces of each should be similar or the same. Given
# this, we can create a relatively dollar-netural long/short portfolio using
# the dual share classes. Theoretically, any deviation of this portfolio from
# its mean-value should be corrected, and so the motivating idea is based on
# mean-reversion. Using a Simple Moving Average indicator, we can
# compare the value of this portfolio against its SMA and generate insights
# to buy the under-valued symbol and sell the over-valued symbol.
#
# This alpha is part of the Benchmark Alpha Series created by QuantConnect which are open
# sourced so the community and client funds can see an example of an alpha.
#
class ShareClassMeanReversionAlgorithm(QCAlgorithm):
def Initialize(self):
self.SetStartDate(2019, 1, 1) #Set Start Date
self.SetCash(100000) #Set Strategy Cash
self.SetWarmUp(20)
## Setup Universe settings and tickers to be used
tickers = ['VIA','VIAB']
self.UniverseSettings.Resolution = Resolution.Minute
symbols = [ Symbol.Create(ticker, SecurityType.Equity, Market.USA) for ticker in tickers]
self.SetSecurityInitializer(lambda security: security.SetFeeModel(ConstantFeeModel(0))) ## Set $0 fees to mimic High-Frequency Trading
## Set Manual Universe Selection
self.SetUniverseSelection( ManualUniverseSelectionModel(symbols) )
## Set Custom Alpha Model
self.SetAlpha(ShareClassMeanReversionAlphaModel(tickers = tickers))
## Set Equal Weighting Portfolio Construction Model
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
## Set Immediate Execution Model
self.SetExecution(ImmediateExecutionModel())
## Set Null Risk Management Model
self.SetRiskManagement(NullRiskManagementModel())
class ShareClassMeanReversionAlphaModel(AlphaModel):
''' Initialize helper variables for the algorithm'''
def __init__(self, *args, **kwargs):
self.sma = SimpleMovingAverage(10)
self.position_window = RollingWindow[Decimal](2)
self.alpha = None
self.beta = None
if 'tickers' not in kwargs:
raise Exception('ShareClassMeanReversionAlphaModel: Missing argument: "tickers"')
self.tickers = kwargs['tickers']
self.position_value = None
self.invested = False
self.liquidate = 'liquidate'
self.long_symbol = self.tickers[0]
self.short_symbol = self.tickers[1]
self.resolution = kwargs['resolution'] if 'resolution' in kwargs else Resolution.Minute
self.prediction_interval = Time.Multiply(Extensions.ToTimeSpan(self.resolution), 5) ## Arbitrary
self.insight_magnitude = 0.001
def Update(self, algorithm, data):
insights = []
## Check to see if either ticker will return a NoneBar, and skip the data slice if so
for security in algorithm.Securities:
if self.DataEventOccured(data, security.Key):
return insights
## If Alpha and Beta haven't been calculated yet, then do so
if (self.alpha is None) or (self.beta is None):
self.CalculateAlphaBeta(algorithm, data)
algorithm.Log('Alpha: ' + str(self.alpha))
algorithm.Log('Beta: ' + str(self.beta))
## If the SMA isn't fully warmed up, then perform an update
if not self.sma.IsReady:
self.UpdateIndicators(data)
return insights
## Update indicator and Rolling Window for each data slice passed into Update() method
self.UpdateIndicators(data)
## Check to see if the portfolio is invested. If no, then perform value comparisons and emit insights accordingly
if not self.invested:
if self.position_value >= self.sma.Current.Value:
insights.append(Insight(self.long_symbol, self.prediction_interval, InsightType.Price, InsightDirection.Down, self.insight_magnitude, None))
insights.append(Insight(self.short_symbol, self.prediction_interval, InsightType.Price, InsightDirection.Up, self.insight_magnitude, None))
## Reset invested boolean
self.invested = True
elif self.position_value < self.sma.Current.Value:
insights.append(Insight(self.long_symbol, self.prediction_interval, InsightType.Price, InsightDirection.Up, self.insight_magnitude, None))
insights.append(Insight(self.short_symbol, self.prediction_interval, InsightType.Price, InsightDirection.Down, self.insight_magnitude, None))
## Reset invested boolean
self.invested = True
## If the portfolio is invested and crossed back over the SMA, then emit flat insights
elif self.invested and self.CrossedMean():
## Reset invested boolean
self.invested = False
return Insight.Group(insights)
def DataEventOccured(self, data, symbol):
## Helper function to check to see if data slice will contain a symbol
if data.Splits.ContainsKey(symbol) or \
data.Dividends.ContainsKey(symbol) or \
data.Delistings.ContainsKey(symbol) or \
data.SymbolChangedEvents.ContainsKey(symbol):
return True
def UpdateIndicators(self, data):
## Calculate position value and update the SMA indicator and Rolling Window
self.position_value = (self.alpha * data[self.long_symbol].Close) - (self.beta * data[self.short_symbol].Close)
self.sma.Update(data[self.long_symbol].EndTime, self.position_value)
self.position_window.Add(self.position_value)
def CrossedMean(self):
## Check to see if the position value has crossed the SMA and then return a boolean value
if (self.position_window[0] >= self.sma.Current.Value) and (self.position_window[1] < self.sma.Current.Value):
return True
elif (self.position_window[0] < self.sma.Current.Value) and (self.position_window[1] >= self.sma.Current.Value):
return True
else:
return False
def CalculateAlphaBeta(self, algorithm, data):
## Calculate Alpha and Beta, the initial number of shares for each security needed to achieve a 50/50 weighting
self.alpha = algorithm.CalculateOrderQuantity(self.long_symbol, 0.5)
self.beta = algorithm.CalculateOrderQuantity(self.short_symbol, 0.5) | apache-2.0 |
terentjew-alexey/market-analysis-system | mas_tools/models/autoencoders.py | 1 | 9802 | from keras.models import Model
from keras.layers import Input, Dense, concatenate, Lambda
from keras.layers import Flatten, Reshape, BatchNormalization, Dropout
from keras.layers import Conv2D, MaxPooling2D, Conv2DTranspose, UpSampling2D
from keras.losses import mse, mae, mape, binary_crossentropy
from keras.regularizers import L1L2
from keras import backend as K
def dense_ae(input_shape, encoding_dim=64, output_activation='linear'):
"""
Example from https://habr.com/post/331382/
Arguments
input_shape (tuple of int):
encoding_dim (int):
output_activation (str):
Returns
encoder:
decoder:
autoencoder:
"""
decoder_dim = 1
for i in input_shape:
decoder_dim *= i
# Encoder
input_tensor = Input(shape=input_shape)
x = Flatten()(input_tensor)
encoded = Dense(encoding_dim,
activation='relu',
kernel_initializer='glorot_uniform')(x)
# Decoder
input_encoded = Input(shape=(encoding_dim,))
y = Dense(decoder_dim,
activation=output_activation,
kernel_initializer='glorot_uniform')(input_encoded)
decoded = Reshape(input_shape)(y)
# Create models
encoder = Model(input_tensor, encoded, name="encoder")
decoder = Model(input_encoded, decoded, name="decoder")
autoencoder = Model(input_tensor, decoder(encoder(input_tensor)), name="autoencoder")
return encoder, decoder, autoencoder
def deep_ae(input_shape, encoding_dim=64,
output_activation='linear', kernel_activation='elu',
lambda_l1=0.0):
"""
Example from https://habr.com/post/331382/
Arguments
input_shape (tuple of int):
encoding_dim (int):
output_activation (str):
kernel_activation (str):
lambda_l1 (float): Regularisation value for sparse encoding.
Returns
encoder:
decoder:
autoencoder:
"""
decoder_dim = 1
for i in input_shape:
decoder_dim *= i
# Encoder
input_tensor = Input(shape=input_shape)
x = Flatten()(input_tensor)
# x = Dense(encoding_dim*4, activation=kernel_activation)(x)
x = Dense(encoding_dim*3, activation=kernel_activation)(x)
x = Dense(encoding_dim*2, activation=kernel_activation)(x)
encoded = Dense(encoding_dim, activation='linear',
activity_regularizer=L1L2(lambda_l1, 0))(x)
# Decoder
input_encoded = Input(shape=(encoding_dim,))
y = Dense(encoding_dim*2, activation=kernel_activation)(input_encoded)
y = Dense(encoding_dim*3, activation=kernel_activation)(y)
# y = Dense(encoding_dim*4, activation=kernel_activation)(y)
y = Dense(decoder_dim, activation=output_activation)(y)
decoded = Reshape(input_shape)(y)
# Create models
encoder = Model(input_tensor, encoded, name="encoder")
decoder = Model(input_encoded, decoded, name="decoder")
autoencoder = Model(input_tensor, decoder(encoder(input_tensor)), name="autoencoder")
return encoder, decoder, autoencoder
def deep_conv_ae(input_shape, latent_dim=32):
""""""
kernel_size = (1, 5)
kernel_pooling = (1, 2)
strides = (1, 1)
# Encoder
input_tensor = Input(shape=input_shape, name='encoder_input')
x = Conv2D(filters=32,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides,
input_shape=input_shape)(input_tensor)
x = Conv2D(filters=64,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides)(x)
# shape info needed to build decoder model
shape = K.int_shape(x)
# shape = enc.output_shape
# generate latent vector Q(z|X)
x = Flatten()(x)
x = Dense(latent_dim, activation='relu', name='encoder_output')(x)
# Decoder
latent_inputs = Input(shape=(latent_dim,), name='latent_input')
y = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
y = Reshape((shape[1], shape[2], shape[3]))(y)
y = Conv2DTranspose(filters=64,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides)(y)
y = Conv2DTranspose(filters=32,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides)(y)
y = Conv2DTranspose(filters=1,
kernel_size=kernel_size,
padding='same',
activation='relu',
strides=strides,
name='decoder_output')(y)
# Create models
encoder = Model(input_tensor, x, name='encoder')
decoder = Model(latent_inputs, y, name='decoder')
autoencoder = Model(input_tensor, decoder(encoder(input_tensor)), name='ae')
return encoder, decoder, autoencoder
# reparameterization trick
# instead of sampling from Q(z|X), sample eps = N(0,I)
# then z = z_mean + sqrt(var)*eps
def sampling(args):
"""Reparameterization trick by sampling fr an isotropic unit Gaussian.
Arguments
args (tensor): mean and log of variance of Q(z|X)
Returns
z (tensor): sampled latent vector
"""
z_mean, z_log_var = args
batch = K.shape(z_mean)[0]
dim = K.int_shape(z_mean)[1]
# by default, random_normal has mean=0 and std=1.0
epsilon = K.random_normal(shape=(batch, dim))
return z_mean + K.exp(0.5 * z_log_var) * epsilon
def deep_conv2d_vae(input_shape,
latent_dim=100,
filters_count=(8, 24),
dropout = 0.3):
""""""
kernel_size = (3, 3)
kernel_pooling = (2, 2)
strides = (1, 1)
initializer = 'truncated_normal'
# Encoder
input_tensor = Input(shape=input_shape, name='encoder_input')
if len(input_shape) == 2:
x = Reshape((input_shape[0], input_shape[1], 1))(input_tensor)
elif len(input_shape) == 3:
x = input_tensor
x = Conv2D(filters=filters_count[0],
kernel_size=kernel_size,
padding='same',
activation='relu',
kernel_initializer=initializer,
strides=strides)(x)
x = Dropout(dropout)(x)
x = MaxPooling2D(kernel_pooling)(x)
x = Conv2D(filters=filters_count[1],
kernel_size=kernel_size,
padding='same',
activation='relu',
kernel_initializer=initializer,
strides=strides)(x)
x = Dropout(dropout)(x)
x = MaxPooling2D(kernel_pooling)(x)
# shape info needed to build decoder model
shape = K.int_shape(x)
# shape = enc.output_shape
# generate latent vector Q(z|X)
x = Flatten()(x)
x = Dense(latent_dim*3, activation='relu', name='encoder_output')(x)
z_mean = Dense(latent_dim, name='z_mean')(x)
z_log_var = Dense(latent_dim, name='z_log_var')(x)
# use reparameterization trick to push the sampling out as input
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,), name='z')([z_mean, z_log_var])
# Decoder
latent_inputs = Input(shape=(latent_dim,), name='latent_input')
y = Dense(shape[1] * shape[2] * shape[3], activation='relu')(latent_inputs)
y = Reshape((shape[1], shape[2], shape[3]))(y)
y = UpSampling2D(kernel_pooling)(y)
y = Dropout(dropout)(y)
y = Conv2DTranspose(filters=filters_count[1],
kernel_size=kernel_size,
padding='same',
activation='relu',
kernel_initializer=initializer,
strides=strides)(y)
y = UpSampling2D(kernel_pooling)(y)
y = Dropout(dropout)(y)
y = Conv2DTranspose(filters=filters_count[0],
kernel_size=kernel_size,
padding='same',
activation='relu',
kernel_initializer=initializer,
strides=strides)(y)
y = Conv2DTranspose(filters=(1 if len(input_shape) == 2 else 3),
kernel_size=kernel_size,
padding='same',
activation='relu',
kernel_initializer=initializer,
strides=strides,
name='decoder_output')(y)
if len(input_shape) == 2:
output = Reshape((input_shape[0], input_shape[1]))(y)
elif len(input_shape) == 3:
output = Reshape(input_shape)(y)
# Create models
encoder = Model(input_tensor, [z_mean, z_log_var, z], name='encoder')
decoder = Model(latent_inputs, output, name='decoder')
autoencoder = Model(input_tensor, decoder(encoder(input_tensor)[2]), name='ae')
reconstruction_loss = mse(K.flatten(input_tensor), K.flatten(output))
if len(input_shape) == 2:
reconstruction_loss *= (input_shape[0] * input_shape[1])
elif len(input_shape) == 3:
reconstruction_loss *= (input_shape[0] * input_shape[1] * input_shape[2])
kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
kl_loss = K.sum(kl_loss, axis=-1)
kl_loss *= -0.5
vae_loss = K.mean(reconstruction_loss + kl_loss)
# autoencoder.add_loss(vae_loss)
# autoencoder.compile(optimizer='adam', metrics=['acc'])
return encoder, decoder, autoencoder, vae_loss
if __name__ == "__main__":
_, _, ae = dense_ae((20, 4), 32)
ae.summary()
_, _, d_ae = deep_ae((1, 20, 4), 32)
d_ae.summary()
_, _, c_ae = deep_conv_ae((1, 20, 4), 32)
c_ae.summary()
_, _, vae, _ = deep_conv2d_vae((100, 100, 3) )
vae.summary()
| mit |
caronc/nzbget-subliminal | Subliminal/apprise/plugins/NotifyDiscord.py | 1 | 8960 | # -*- coding: utf-8 -*-
#
# Discord Notify Wrapper
#
# Copyright (C) 2018 Chris Caron <lead2gold@gmail.com>
#
# This file is part of apprise.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# For this to work correctly you need to create a webhook. To do this just
# click on the little gear icon next to the channel you're part of. From
# here you'll be able to access the Webhooks menu and create a new one.
#
# When you've completed, you'll get a URL that looks a little like this:
# https://discordapp.com/api/webhooks/417429632418316298/\
# JHZ7lQml277CDHmQKMHI8qBe7bk2ZwO5UKjCiOAF7711o33MyqU344Qpgv7YTpadV_js
#
# Simplified, it looks like this:
# https://discordapp.com/api/webhooks/WEBHOOK_ID/WEBHOOK_TOKEN
#
# This plugin will simply work using the url of:
# discord://WEBHOOK_ID/WEBHOOK_TOKEN
#
# API Documentation on Webhooks:
# - https://discordapp.com/developers/docs/resources/webhook
#
import re
import requests
from json import dumps
from .NotifyBase import NotifyBase
from .NotifyBase import HTTP_ERROR_MAP
from ..common import NotifyImageSize
from ..common import NotifyFormat
from ..utils import parse_bool
class NotifyDiscord(NotifyBase):
"""
A wrapper to Discord Notifications
"""
# The default secure protocol
secure_protocol = 'discord'
# Discord Webhook
notify_url = 'https://discordapp.com/api/webhooks'
# Allows the user to specify the NotifyImageSize object
image_size = NotifyImageSize.XY_256
# The maximum allowable characters allowed in the body per message
body_maxlen = 2000
# Default Notify Format
notify_format = NotifyFormat.MARKDOWN
def __init__(self, webhook_id, webhook_token, tts=False, avatar=True,
footer=False, thumbnail=True, **kwargs):
"""
Initialize Discord Object
"""
super(NotifyDiscord, self).__init__(**kwargs)
if not webhook_id:
raise TypeError(
'An invalid Client ID was specified.'
)
if not webhook_token:
raise TypeError(
'An invalid Webhook Token was specified.'
)
# Store our data
self.webhook_id = webhook_id
self.webhook_token = webhook_token
# Text To Speech
self.tts = tts
# Over-ride Avatar Icon
self.avatar = avatar
# Place a footer icon
self.footer = footer
# Place a thumbnail image inline with the message body
self.thumbnail = thumbnail
return
def notify(self, title, body, notify_type, **kwargs):
"""
Perform Discord Notification
"""
headers = {
'User-Agent': self.app_id,
'Content-Type': 'multipart/form-data',
}
# Prepare JSON Object
payload = {
# Text-To-Speech
'tts': self.tts,
# If Text-To-Speech is set to True, then we do not want to wait
# for the whole message before continuing. Otherwise, we wait
'wait': self.tts is False,
# Our color associated with our notification
'color': self.color(notify_type, int),
'embeds': [{
'provider': {
'name': self.app_id,
'url': self.app_url,
},
'title': title,
'type': 'rich',
'description': body,
}]
}
if self.notify_format == NotifyFormat.MARKDOWN:
fields = self.extract_markdown_sections(body)
if len(fields) > 0:
# Apply our additional parsing for a better presentation
# Swap first entry for description
payload['embeds'][0]['description'] = \
fields[0].get('name') + fields[0].get('value')
payload['embeds'][0]['fields'] = fields[1:]
if self.footer:
logo_url = self.image_url(notify_type, logo=True)
payload['embeds'][0]['footer'] = {
'text': self.app_desc,
}
if logo_url:
payload['embeds'][0]['footer']['icon_url'] = logo_url
image_url = self.image_url(notify_type)
if image_url:
if self.thumbnail:
payload['embeds'][0]['thumbnail'] = {
'url': image_url,
'height': 256,
'width': 256,
}
if self.avatar:
payload['avatar_url'] = image_url
if self.user:
# Optionally override the default username of the webhook
payload['username'] = self.user
# Construct Notify URL
notify_url = '{0}/{1}/{2}'.format(
self.notify_url,
self.webhook_id,
self.webhook_token,
)
self.logger.debug('Discord POST URL: %s (cert_verify=%r)' % (
notify_url, self.verify_certificate,
))
self.logger.debug('Discord Payload: %s' % str(payload))
try:
r = requests.post(
notify_url,
data=dumps(payload),
headers=headers,
verify=self.verify_certificate,
)
if r.status_code not in (
requests.codes.ok, requests.codes.no_content):
# We had a problem
try:
self.logger.warning(
'Failed to send Discord notification: '
'%s (error=%s).' % (
HTTP_ERROR_MAP[r.status_code],
r.status_code))
except KeyError:
self.logger.warning(
'Failed to send Discord notification '
'(error=%s).' % r.status_code)
self.logger.debug('Response Details: %s' % r.raw.read())
# Return; we're done
return False
else:
self.logger.info('Sent Discord notification.')
except requests.RequestException as e:
self.logger.warning(
'A Connection error occured sending Discord '
'notification.'
)
self.logger.debug('Socket Exception: %s' % str(e))
return False
return True
@staticmethod
def parse_url(url):
"""
Parses the URL and returns enough arguments that can allow
us to substantiate this object.
Syntax:
discord://webhook_id/webhook_token
"""
results = NotifyBase.parse_url(url)
if not results:
# We're done early as we couldn't load the results
return results
# Store our webhook ID
webhook_id = results['host']
# Now fetch our tokens
try:
webhook_token = [x for x in filter(bool, NotifyBase.split_path(
results['fullpath']))][0]
except (ValueError, AttributeError, IndexError):
# Force some bad values that will get caught
# in parsing later
webhook_token = None
results['webhook_id'] = webhook_id
results['webhook_token'] = webhook_token
# Text To Speech
results['tts'] = parse_bool(results['qsd'].get('tts', False))
# Use Footer
results['footer'] = parse_bool(results['qsd'].get('footer', False))
# Update Avatar Icon
results['avatar'] = parse_bool(results['qsd'].get('avatar', True))
# Use Thumbnail
results['thumbnail'] = \
parse_bool(results['qsd'].get('thumbnail', True))
return results
@staticmethod
def extract_markdown_sections(markdown):
"""
Takes a string in a markdown type format and extracts
the headers and their corresponding sections into individual
fields that get passed as an embed entry to Discord.
"""
regex = re.compile(
r'\s*#+\s*(?P<name>[^#\n]+)([ \r\t\v#]*)'
r'(?P<value>(.+?)(\n(?!\s#))|\s*$)', flags=re.S)
common = regex.finditer(markdown)
fields = list()
for el in common:
d = el.groupdict()
fields.append({
'name': d.get('name', '').strip(),
'value': '```md\n' + d.get('value', '').strip() + '\n```'
})
return fields
| gpl-3.0 |
opengeogroep/NLExtract | bag/src/bagattribuut.py | 2 | 27795 | __author__ = "Just van den Broecke"
__date__ = "Jan 9, 2012 3:46:27 PM$"
"""
Naam: bagattribuut.py
Omschrijving: Attributen voor BAG-objecten
Auteur: Just van den Broecke (Matthijs van der Deijl libbagextract.py origineel)
Datum: 9 mei 2012
OpenGeoGroep.nl
"""
from log import Log
from etree import etree, tagVolledigeNS
import sys
try:
from osgeo import ogr # apt-get install python3-gdal
except ImportError:
print("FATAAL: GDAL Python bindings zijn niet beschikbaar, installeer bijv met 'apt-get install python3-gdal'")
sys.exit(-1)
# Geef de waarde van een textnode in XML
def getText(nodelist):
rc = ""
for node in nodelist:
rc = rc + node.text
return rc
# TODO: werking van deze functie controleren en vergelijken met origineel
# Geef de waardes van alle elementen met de gegeven tag binnen de XML (parent).
def getValues(parent, tag):
return [node.text for node in parent.iterfind('./' + tagVolledigeNS(tag, parent.nsmap))]
# Geef de waarde van het eerste element met de gegeven tag binnen de XML (parent). Als er geen eerste
# element gevonden wordt, is het resultaat een lege string.
def getValue(parent, tag):
values = getValues(parent, tag)
if len(values) > 0:
return values[0]
else:
# TODO: moet eigenlijk None zijn...
# ook dan alle if != '' in BAGattribuut classes nalopen..
return ""
# --------------------------------------------------------------------------------------------------------
# Class BAGattribuut
# Omschrijving Bevat binnen een BAGobject 1 attribuut met zijn waarde
# Bevat - tag
# - naam
# - waarde
# --------------------------------------------------------------------------------------------------------
class BAGattribuut:
# Constructor
def __init__(self, lengte, naam, tag):
self._lengte = lengte
self._naam = naam
self._tag = tag
self._waarde = None
self._parentObj = None
# default attr is singlevalued
self._relatieNaam = None
# Attribuut lengte
def lengte(self):
return self._lengte
# Attribuut naam
def naam(self):
return self._naam
# Attribuut enkel/of meervoudig (via relatie)
def enkelvoudig(self):
return self._relatieNaam is None
# Attribuut tag
def tag(self):
return self._tag
# Attribuut sqltype. Deze method kan worden overloaded
def sqltype(self):
return "VARCHAR(%d)" % self._lengte
# Initialiseer database voor dit type
def sqlinit(self):
return ''
# Attribuut waarde. Deze method kan worden overloaded
def waarde(self):
return self._waarde
# Attribuut waarde voor SQL. Deze method kan worden overloaded
def waardeSQL(self):
if self._waarde == '':
# Voor string kolommen (default) willen we NULL, geen lege string
return None
return self.waarde()
# Attribuut waarde voor SQL template. Deze method kan worden overloaded
def waardeSQLTpl(self):
return '%s'
# Wijzig de waarde.
def setWaarde(self, waarde):
self._waarde = waarde
# Geef aan dat het attribuut enkelvoudig is (maar 1 waarde heeft). Deze method kan worden overloaded.
def enkelvoudig(self):
return True
# Geef aan dat het attribuut wel/niet geometrie is.
def isGeometrie(self):
return False
# Initialiseer database
def sqlinit(self):
return ''
# Initialisatie vanuit XML
def leesUitXML(self, xml):
self._waarde = getValue(xml, self._tag)
# Door een bug in BAG Extract bevat de einddatumTijdvakGeldigheid een fictieve waarde 31-12-2299 in het geval dat
# deze leeg hoort te zijn. Om dit te omzeilen, controleren we hier de waarde en maken deze zo nodig
# zelf leeg.
if self._naam == "einddatumTijdvakGeldigheid" and self._waarde == "2299123100000000":
self._waarde = None
# Print informatie over het attribuut op het scherm
def schrijf(self):
print("- %-27s: %s" % (self._naam, self._waarde))
# --------------------------------------------------------------------------------------------------------
# Class BAGstringAttribuut
# Afgeleid van BAGattribuut
# Omschrijving Bevat een string waarde
# --------------------------------------------------------------------------------------------------------
class BAGstringAttribuut(BAGattribuut):
# Nodig om allerlei nare characters te verwijderen die bijv COPY
# kunnen beinvloeden
# inputChars = "\\\n~"
# outputChars = "/ ."
# from string import maketrans # Required to call maketrans function.
# translatieTabel = maketrans(inputChars, outputChars)
# Geeft problemen met niet-ASCII range unicode chars!!
# dus voorlopig even handmatig
# Initialisatie vanuit XML
def leesUitXML(self, xml):
waarde = getValue(xml, self._tag)
if waarde == '':
# Voor string kolommen (default) willen we NULL, geen lege string
waarde = None
if waarde is not None:
# print("voor:" + self._waarde)
waarde = waarde.strip()
# Kan voorkomen dat strings langer zijn in BAG
# ondanks restrictie vanuit BAG XSD model
waarde = waarde[:self._lengte]
# try:
# self._waarde = self._waarde.translate(BAGstringAttribuut.translatieTabel)
# Zie boven: voorlopig even de \ \n en ~ handmatig vervangen. Komt af en toe voor.
# Niet fraai maar werkt.
for char in waarde:
if char == '\\':
waarde = waarde.replace('\\', '/')
else:
if char == '\n':
waarde = waarde.replace('\n', ' ')
else:
if char == '~':
waarde = waarde.replace('~', '.')
# Toekennen aan object
self._waarde = waarde
#
# except:
# Log.log.warn("Fout in translate: waarde=%s tag=%s id=%s type=%s" %
# (self._waarde, self._naam, self._parentObj.objectType(), self._parentObj.identificatie()) )
# --------------------------------------------------------------------------------------------------------
# Class BAGenumAttribuut
# Afgeleid van BAGattribuut
# Omschrijving Bevat een of meerdere waarden binnen een restrictie
# --------------------------------------------------------------------------------------------------------
class BAGenumAttribuut(BAGattribuut):
# Constructor
def __init__(self, lijst, naam, tag):
self._lijst = lijst
# self._lengte = len(max(lijst, key=len))
self._lengte = len(lijst)
self._naam = naam
self._tag = tag
self._waarde = ""
# Attribuut sqltype. Deze method kan worden overloaded
def sqltype(self):
return self._naam
# Initialiseer database
def sqlinit(self):
return "DROP TYPE IF EXISTS %s;\nCREATE TYPE %s AS ENUM ('%s');\n" % (
self._naam, self._naam, "', '".join(self._lijst))
class BAGnumeriekAttribuut(BAGattribuut):
"""
Class BAGnumeriekAttribuut
Afgeleid van BAGattribuut
Omschrijving Bevat een numerieke waarde
"""
# Attribuut waarde voor SQL. Deze method kan worden overloaded
def waardeSQL(self):
if self._waarde != '':
return self._waarde
else:
return None
def sqltype(self):
"""
Attribuut sqltype. Deze method kan worden overloaded
"""
return "NUMERIC(%d)" % (self._lengte)
# --------------------------------------------------------------------------------------------------------
# Class BAGintegerAttribuut
# Afgeleid van BAGattribuut
# Omschrijving Bevat een numerieke waarde
# --------------------------------------------------------------------------------------------------------
class BAGintegerAttribuut(BAGattribuut):
# Constructor
def __init__(self, naam, tag):
self._naam = naam
self._tag = tag
self._waarde = None
def waardeSQL(self):
if self._waarde != '':
return self._waarde
else:
return None
# Attribuut sqltype. Deze method kan worden overloaded
def sqltype(self):
return "INTEGER"
# --------------------------------------------------------------------------------------------------------
# Class BAGdatumAttribuut
# Afgeleid van BAGattribuut
# Omschrijving Bevat een waarheid attribuut
# --------------------------------------------------------------------------------------------------------
class BAGbooleanAttribuut(BAGattribuut):
# Constructor
def __init__(self, naam, tag):
self._naam = naam
self._tag = tag
self._waarde = None
# Attribuut sqltype. Deze method kan worden overloaded
def sqltype(self):
return "BOOLEAN"
# Initialisatie vanuit XML
def leesUitXML(self, xml):
self._waarde = getValue(xml, self._tag)
if self._waarde == 'N':
self._waarde = 'FALSE'
elif self._waarde == 'J':
self._waarde = 'TRUE'
elif self._waarde == '':
self._waarde = None
else:
Log.log.error("Onverwachte boolean waarde: '%s'" % (self._waarde))
# --------------------------------------------------------------------------------------------------------
# Class BAGdatetimeAttribuut
# Afgeleid van BAGattribuut
# Omschrijving Bevat een DatumTijd attribuut
# <xs:simpleType name="DatumTijd">
# <xs:annotation>
# <xs:documentation>formaat JJJJMMDDUUMMSSmm</xs:documentation>
# </xs:annotation>
# <xs:restriction base="xs:token">
# <xs:minLength value="8"/>
# <xs:maxLength value="16"/>
# <xs:pattern value="[0-2][0-9][0-9][0-9][0-1][0-9][0-3][0-9][0-2][0-9][0-5][0-9][0-5][0-9][0-9][0-9]"/>
# </xs:restriction>
# </xs:simpleType>
#
# --------------------------------------------------------------------------------------------------------
class BAGdatetimeAttribuut(BAGattribuut):
# Constructor
def __init__(self, naam, tag):
self._naam = naam
self._tag = tag
self._waarde = None
# Attribuut sqltype. Deze method kan worden overloaded
def sqltype(self):
return "TIMESTAMP WITHOUT TIME ZONE"
# Initialisatie vanuit XML
def leesUitXML(self, xml):
self._waarde = getValue(xml, self._tag)
if self._waarde != '':
length = len(self._waarde)
jaar = self._waarde[0:4]
maand = self._waarde[4:6]
dag = self._waarde[6:8]
uur = minuut = seconden = secfract = '00'
if length > 8:
uur = self._waarde[8:10]
minuut = self._waarde[10:12]
seconden = self._waarde[12:14]
if length >= 16:
# 17.nov.2013: JvdB, deze werd voorheen nooit meegenomen
# Laatste twee zijn 100e-en van seconden, ISO8601 gebruikt fracties van seconden
# dus komt overeen (ISO 8601 en BAG gebruiken geen milliseconden!!)
# Zie ook: http://en.wikipedia.org/wiki/ISO_8601
secfract = self._waarde[14:16]
# 1999-01-08 04:05:06.78 (ISO8601 notatie)
# http://www.postgresql.org/docs/8.3/static/datatype-datetime.html
if jaar != '2299':
# conversie naar ISO8601 notatie,
self._waarde = '%s-%s-%s %s:%s:%s.%s' % (jaar, maand, dag, uur, minuut, seconden, secfract)
else:
self._waarde = None
else:
self._waarde = None
# --------------------------------------------------------------------------------------------------------
# Class BAGdateAttribuut
# Afgeleid van BAGattribuut
# Omschrijving Bevat een datum (jaar) attribuut
# --------------------------------------------------------------------------------------------------------
class BAGdateAttribuut(BAGattribuut):
# Constructor
def __init__(self, naam, tag):
self._naam = naam
self._tag = tag
self._waarde = None
# Attribuut sqltype. Deze method kan worden overloaded
def sqltype(self):
return "DATE"
# Initialisatie vanuit XML
def leesUitXML(self, xml):
self._waarde = getValue(xml, self._tag)
if self._waarde != '':
jaar = self._waarde[0:4]
if jaar == '2299':
self._waarde = None
else:
self._waarde = None
# --------------------------------------------------------------------------------------------------------
# Class BAGgeoAttribuut
# Afgeleid van BAGattribuut
# Omschrijving Bevat een geometrie attribuut
# --------------------------------------------------------------------------------------------------------
class BAGgeoAttribuut(BAGattribuut):
def __init__(self, dimensie, naam, tag):
self._dimensie = dimensie
self._geometrie = None
BAGattribuut.__init__(self, -1, naam, tag)
# Attribuut dimensie
def dimensie(self):
return self._dimensie
# Geef aan dat het attribuut wel/niet geometrie is.
def isGeometrie(self):
return True
# Geometrie validatie
def isValide(self):
if self._geometrie:
return self._geometrie.IsValid()
else:
return True
def waardeSQL(self):
if self._geometrie is not None:
# Forceer gespecificeerde coordinaat dimensie
self._geometrie.SetCoordinateDimension(self._dimensie)
self._waarde = self._geometrie.ExportToWkt()
if self._waarde:
return 'SRID=28992;' + self._waarde
else:
return None
# Attribuut waarde. Deze method kan worden overloaded
# Wijzig de waarde.
def setWaarde(self, waarde):
self._waarde = waarde
if self._waarde is not None:
self._geometrie = ogr.CreateGeometryFromWkt(self._waarde)
def waardeSQLTpl(self):
if self._waarde:
# Voor later: als we geometrie willen valideren, loggen en tegelijk repareren...
# return 'validateGeometry(\'' + self._parentObj.naam() + '\', ' + str(self._parentObj.identificatie()) +', GeomFromEWKT(%s))'
return 'GeomFromEWKT(%s)'
else:
return '%s'
# Attribuut soort
def soort(self):
return ""
# --------------------------------------------------------------------------------------------------------
# Class BAGpoint
# Afgeleid van BAGgeoAttribuut
# Omschrijving Bevat een Puntgeometrie attribuut (geometrie van een verblijfsobject)
# --------------------------------------------------------------------------------------------------------
class BAGpoint(BAGgeoAttribuut):
# Attribuut soort
def soort(self):
return "POINT"
def isValide(self):
return True
# Initialisatie vanuit XML
def leesUitXML(self, xml):
self.polygonAttr = None
point = None
try:
xmlGeometrie = xml.find('./' + tagVolledigeNS(self._tag, xml.nsmap))
if xmlGeometrie is not None:
gmlNode = xmlGeometrie.find('./' + tagVolledigeNS("gml:Point", xml.nsmap))
if gmlNode is not None:
gmlStr = etree.tostring(gmlNode)
self._geometrie = ogr.CreateGeometryFromGML(gmlStr.decode())
else:
# Forceer punt uit Polygoon
gmlNode = xmlGeometrie.find('./' + tagVolledigeNS("gml:Polygon", xml.nsmap))
if gmlNode is not None:
gmlStr = etree.tostring(gmlNode)
self._geometrie = ogr.CreateGeometryFromGML(gmlStr.decode())
self._geometrie = self._geometrie.Centroid()
except Exception:
Log.log.error("ik kan hier echt geen POINT van maken: %s (en zet dit op 0,0,0)" % str(point.text))
# self._waarde = "POINT(0 0 0)"
# --------------------------------------------------------------------------------------------------------
# Class BAGpolygoon
# Afgeleid van BAGgeoAttribuut
# Omschrijving Bevat een Polygoongeometrie attribuut (pand, ligplaats, standplaats of woonplaats)
# De dimensie (2D of 3D) is variabel.
# --------------------------------------------------------------------------------------------------------
class BAGpolygoon(BAGgeoAttribuut):
# Attribuut soort
def soort(self):
return "POLYGON"
# Initialisatie vanuit XML
def leesUitXML(self, xml):
xmlGeometrie = xml.find('./' + tagVolledigeNS(self._tag, xml.nsmap))
if xmlGeometrie is not None:
gmlNode = xmlGeometrie.find('./' + tagVolledigeNS("gml:Polygon", xmlGeometrie.nsmap))
if gmlNode is not None:
gmlStr = etree.tostring(gmlNode)
self._geometrie = ogr.CreateGeometryFromGML(gmlStr.decode())
# --------------------------------------------------------------------------------------------------------
# Class BAGmultiPolygoon
# Afgeleid van BAGpolygoon
# Omschrijving Bevat een MultiPolygoongeometrie attribuut (woonplaats)
# --------------------------------------------------------------------------------------------------------
class BAGmultiPolygoon(BAGpolygoon):
# Attribuut soort
def soort(self):
return "MULTIPOLYGON"
# Initialisatie vanuit XML
def leesUitXML(self, xml):
gmlNode = None
# Attribuut vinden, bijv. bag_LVC:woonplaatsGeometrie
xmlGeometrie = xml.find('./' + tagVolledigeNS(self._tag, xml.nsmap))
if xmlGeometrie is not None:
# Probeer eerst een MultiSurface te vinden
gmlNode = xmlGeometrie.find('./' + tagVolledigeNS("gml:MultiSurface", xml.nsmap))
if gmlNode is None:
# Geen MultiSurface: probeer een Polygon te vinden
gmlNode = xmlGeometrie.find('./' + tagVolledigeNS("gml:Polygon", xml.nsmap))
if gmlNode is not None:
gmlStr = etree.tostring(gmlNode)
polygon = ogr.CreateGeometryFromGML(gmlStr.decode())
self._geometrie = ogr.Geometry(ogr.wkbMultiPolygon)
self._geometrie.AddGeometryDirectly(polygon)
else:
# MultiSurface
gmlStr = etree.tostring(gmlNode)
self._geometrie = ogr.CreateGeometryFromGML(gmlStr.decode())
if self._geometrie is None:
Log.log.warn("Null MultiSurface in BAGmultiPolygoon: tag=%s parent=%s" % (
self._tag, self._parentObj.identificatie()))
if self._geometrie is None:
Log.log.warn("Null geometrie in BAGmultiPolygoon: tag=%s identificatie=%s" % (
self._tag, self._parentObj.identificatie()))
# --------------------------------------------------------------------------------------------------------
# Class BAGgeometrieValidatie
# Afgeleid van BAGattribuut
# Omschrijving Bevat de validatie waarde (true,false) van een geometrie attribuut
# --------------------------------------------------------------------------------------------------------
class BAGgeometrieValidatie(BAGattribuut):
def __init__(self, naam, naam_geo_attr):
BAGattribuut.__init__(self, -1, naam, None)
# Kolom-naam van te valideren geo-attribuut
self._naam_geo_attr = naam_geo_attr
def sqltype(self):
return "BOOLEAN"
# Initialisatie vanuit XML
def leesUitXML(self, xml):
self._waarde = None
# Bepaal validatie-waarde
def waardeSQL(self):
geo_attr = self._parentObj.attribuut(self._naam_geo_attr)
valid = 'TRUE'
if geo_attr:
valid = str(geo_attr.isValide())
return valid
# Attribuut soort
def soort(self):
return ""
# --------------------------------------------------------------------------------------------------------
# Class BAGrelatieAttribuut
# Afgeleid van BAGattribuut
# Omschrijving Bevat een attribuut dat meer dan 1 waarde kan hebben.
# --------------------------------------------------------------------------------------------------------
class BAGrelatieAttribuut(BAGattribuut):
# Constructor
def __init__(self, parent, relatieNaam, lengte, naam, tag, extraAttributes):
BAGattribuut.__init__(self, lengte, naam, tag)
# BAGObject waar dit multivalued attribuut bijhoort
self._parent = parent
self._relatieNaam = relatieNaam
self._waarde = []
# Extra te kopieren attributen van het parent-object
self._extraAttributes = extraAttributes
# Attribuut relatienaam
def relatieNaam(self):
return self._relatieNaam
# Attribuut waarde. Deze waarde overload de waarde in de basisclass
def waarde(self):
return self._waarde
# Wijzig de waarde.
def setWaarde(self, waarde):
# Waarde is serie, vanwege meerdere voorkomens van attributen in hoofdobjecten
self._waarde.append(waarde)
# Geef aan dat het attribuut niet enkelvoudig is (meerdere waardes kan hebben).
def enkelvoudig(self):
return False
# Initialisatie vanuit XML
def leesUitXML(self, xml):
self._waarde = getValues(xml, self.tag())
# Maak insert SQL voor deze relatie
def maakInsertSQL(self, append=None):
# Default is nieuw (append=None) maar kan ook appenden aan bestaande SQL
if not append:
self.sql = []
self.inhoud = []
for waarde in self._waarde:
sql = "INSERT INTO " + self.relatieNaam() + " "
sql += "(identificatie,aanduidingrecordinactief,aanduidingrecordcorrectie,begindatumtijdvakgeldigheid,einddatumtijdvakgeldigheid,"
sql += ",".join(self._extraAttributes) + ","
# Fix issue #199: het aantal toe te voegen waarden moet worden uitgebreid met het aantal
# waarden in self._extraAttributes.
sql += self.naam() + ") VALUES (%s, %s, %s, %s, %s"
sql += ", %s" * len(self._extraAttributes) + ", %s)"
inhoud = [self._parent.attribuut('identificatie').waardeSQL(),
self._parent.attribuut('aanduidingRecordInactief').waardeSQL(),
self._parent.attribuut('aanduidingRecordCorrectie').waardeSQL(),
self._parent.attribuut('begindatumTijdvakGeldigheid').waardeSQL(),
self._parent.attribuut('einddatumTijdvakGeldigheid').waardeSQL()]
for attr in self._extraAttributes:
if self._parent.heeftAttribuut(attr):
inhoud.append(self._parent.attribuut(attr).waardeSQL())
else:
inhoud.append('')
inhoud.append(waarde)
self.inhoud.append(tuple(inhoud))
self.sql.append(sql)
# Maak insert SQL voor deze relatie
def maakCopySQL(self):
velden = ["identificatie", "aanduidingrecordinactief", "aanduidingrecordcorrectie", "begindatumtijdvakgeldigheid",
"einddatumtijdvakgeldigheid"]
velden += self._extraAttributes
velden.append(self.naam())
self.velden = tuple(velden)
self.sql = ""
for waarde in self._waarde:
self.sql += self._parent.attribuut('identificatie').waardeSQL() + "~"
self.sql += self._parent.attribuut('aanduidingRecordInactief').waardeSQL() + "~"
self.sql += self._parent.attribuut('aanduidingRecordCorrectie').waardeSQL() + "~"
self.sql += self._parent.attribuut('begindatumTijdvakGeldigheid').waardeSQL() + "~"
# Einddatum kan leeg zijn : TODO vang dit op in waardeSQL()
einddatumWaardeSQL = self._parent.attribuut('einddatumTijdvakGeldigheid').waardeSQL()
if not einddatumWaardeSQL or einddatumWaardeSQL is '':
einddatumWaardeSQL = r'\N'
self.sql += einddatumWaardeSQL + "~"
for attr in self._extraAttributes:
# Ook de extra attributen kunnen leeg zijn
if self._parent.heeftAttribuut(attr):
attrWaarde = self._parent.attribuut(attr).waardeSQL()
else:
attrWaarde = ''
if not attrWaarde or attrWaarde is '':
attrWaarde = r'\N'
self.sql += attrWaarde + "~"
if not waarde:
waarde = r'\N'
self.sql += waarde + "\n"
# Maak update SQL voor deze relatie
def maakUpdateSQL(self):
self.sql = []
self.inhoud = []
# Voor relaties hebben we geen unieke keys en er kunnen relaties bijgekomen of weg zijn
# dus we deleten eerst alle bestaande relaties en voeren de nieuwe
# in via insert. Helaas maar waar.
sql = "DELETE FROM " + self.relatieNaam() + " WHERE "
sql += "identificatie = %s AND aanduidingrecordinactief = %s AND aanduidingrecordcorrectie = %s AND begindatumtijdvakgeldigheid "
# Tricky: indien beginDatum (komt in principe niet voor) leeg moet in WHERE "is NULL" staan
# want "= NULL" geeft geen resultaat
# http://stackoverflow.com/questions/4476172/postgresql-select-where-timestamp-is-empty
beginDatum = self._parent.attribuut('begindatumTijdvakGeldigheid').waardeSQL()
if beginDatum:
sql += "= %s"
else:
sql += "is %s"
self.sql.append(sql)
self.inhoud.append((self._parent.attribuut('identificatie').waardeSQL(),
self._parent.attribuut('aanduidingRecordInactief').waardeSQL(),
self._parent.attribuut('aanduidingRecordCorrectie').waardeSQL(),
beginDatum))
# Gebruik bestaande INSERT SQL generatie voor de nieuwe relaties en append aan DELETE SQL
self.maakInsertSQL(True)
# Print informatie over het attribuut op het scherm
def schrijf(self):
first = True
for waarde in self._waarde:
if first:
print("- %-27s: %s" % (self.naam(), waarde))
first = False
else:
print("- %-27s %s" % ("", waarde))
# --------------------------------------------------------------------------------------------------------
# Class BAGenumRelatieAttribuut
# Afgeleid van BAGrelatieAttribuut
# Omschrijving Bevat een relatie attribuut van type enum.
# --------------------------------------------------------------------------------------------------------
class BAGenumRelatieAttribuut(BAGrelatieAttribuut):
# Constructor
def __init__(self, parent, relatieNaam, naam, tag, extraAttributes, lijst):
BAGrelatieAttribuut.__init__(self, parent, relatieNaam, len(lijst), naam, tag, extraAttributes)
self._lijst = lijst
# self._lengte = len(max(lijst, key=len))
self._lengte = len(lijst)
# Attribuut sqltype. Deze method kan worden overloaded
def sqltype(self):
return self._naam
# Initialiseer database
def sqlinit(self):
return "DROP TYPE IF EXISTS %s;\nCREATE TYPE %s AS ENUM ('%s');\n" % (
self._naam, self._naam, "', '".join(self._lijst))
| gpl-3.0 |
jvkops/django | tests/modeladmin/models.py | 108 | 1603 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class Band(models.Model):
name = models.CharField(max_length=100)
bio = models.TextField()
sign_date = models.DateField()
class Meta:
ordering = ('name',)
def __str__(self):
return self.name
class Concert(models.Model):
main_band = models.ForeignKey(Band, models.CASCADE, related_name='main_concerts')
opening_band = models.ForeignKey(Band, models.CASCADE, related_name='opening_concerts',
blank=True)
day = models.CharField(max_length=3, choices=((1, 'Fri'), (2, 'Sat')))
transport = models.CharField(max_length=100, choices=(
(1, 'Plane'),
(2, 'Train'),
(3, 'Bus')
), blank=True)
class ValidationTestModel(models.Model):
name = models.CharField(max_length=100)
slug = models.SlugField()
users = models.ManyToManyField(User)
state = models.CharField(max_length=2, choices=(("CO", "Colorado"), ("WA", "Washington")))
is_active = models.BooleanField(default=False)
pub_date = models.DateTimeField()
band = models.ForeignKey(Band, models.CASCADE)
no = models.IntegerField(verbose_name="Number", blank=True, null=True) # This field is intentionally 2 characters long. See #16080.
def decade_published_in(self):
return self.pub_date.strftime('%Y')[:3] + "0's"
class ValidationTestInlineModel(models.Model):
parent = models.ForeignKey(ValidationTestModel, models.CASCADE)
| bsd-3-clause |
ogenstad/ansible | lib/ansible/plugins/action/synchronize.py | 22 | 19616 | # -*- coding: utf-8 -*-
# (c) 2012-2013, Timothy Appnel <tim@appnel.com>
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os.path
from collections import MutableSequence
from ansible import constants as C
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_text
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.plugins.action import ActionBase
from ansible.plugins.loader import connection_loader
class ActionModule(ActionBase):
def _get_absolute_path(self, path):
original_path = path
if path.startswith('rsync://'):
return path
if self._task._role is not None:
path = self._loader.path_dwim_relative(self._task._role._role_path, 'files', path)
else:
path = self._loader.path_dwim_relative(self._loader.get_basedir(), 'files', path)
if original_path and original_path[-1] == '/' and path[-1] != '/':
# make sure the dwim'd path ends in a trailing "/"
# if the original path did
path += '/'
return path
def _host_is_ipv6_address(self, host):
return ':' in host
def _format_rsync_rsh_target(self, host, path, user):
''' formats rsync rsh target, escaping ipv6 addresses if needed '''
user_prefix = ''
if path.startswith('rsync://'):
return path
# If using docker or buildah, do not add user information
if self._remote_transport not in ['docker', 'buildah'] and user:
user_prefix = '%s@' % (user, )
if self._host_is_ipv6_address(host):
return '[%s%s]:%s' % (user_prefix, host, path)
else:
return '%s%s:%s' % (user_prefix, host, path)
def _process_origin(self, host, path, user):
if host not in C.LOCALHOST:
return self._format_rsync_rsh_target(host, path, user)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _process_remote(self, task_args, host, path, user, port_matches_localhost_port):
"""
:arg host: hostname for the path
:arg path: file path
:arg user: username for the transfer
:arg port_matches_localhost_port: boolean whether the remote port
matches the port used by localhost's sshd. This is used in
conjunction with seeing whether the host is localhost to know
if we need to have the module substitute the pathname or if it
is a different host (for instance, an ssh tunnelled port or an
alternative ssh port to a vagrant host.)
"""
transport = self._connection.transport
# If we're connecting to a remote host or we're delegating to another
# host or we're connecting to a different ssh instance on the
# localhost then we have to format the path as a remote rsync path
if host not in C.LOCALHOST or transport != "local" or \
(host in C.LOCALHOST and not port_matches_localhost_port):
# If we're delegating to non-localhost and but the
# inventory_hostname host is localhost then we need the module to
# fix up the rsync path to use the controller's public DNS/IP
# instead of "localhost"
if port_matches_localhost_port and host in C.LOCALHOST:
task_args['_substitute_controller'] = True
return self._format_rsync_rsh_target(host, path, user)
if ':' not in path and not path.startswith('/'):
path = self._get_absolute_path(path=path)
return path
def _override_module_replaced_vars(self, task_vars):
""" Some vars are substituted into the modules. Have to make sure
that those are correct for localhost when synchronize creates its own
connection to localhost."""
# Clear the current definition of these variables as they came from the
# connection to the remote host
if 'ansible_syslog_facility' in task_vars:
del task_vars['ansible_syslog_facility']
for key in list(task_vars.keys()):
if key.startswith("ansible_") and key.endswith("_interpreter"):
del task_vars[key]
# Add the definitions from localhost
for host in C.LOCALHOST:
if host in task_vars['hostvars']:
localhost = task_vars['hostvars'][host]
break
if 'ansible_syslog_facility' in localhost:
task_vars['ansible_syslog_facility'] = localhost['ansible_syslog_facility']
for key in localhost:
if key.startswith("ansible_") and key.endswith("_interpreter"):
task_vars[key] = localhost[key]
def run(self, tmp=None, task_vars=None):
''' generates params and passes them on to the rsync module '''
# When modifying this function be aware of the tricky convolutions
# your thoughts have to go through:
#
# In normal ansible, we connect from controller to inventory_hostname
# (playbook's hosts: field) or controller to delegate_to host and run
# a module on one of those hosts.
#
# So things that are directly related to the core of ansible are in
# terms of that sort of connection that always originate on the
# controller.
#
# In synchronize we use ansible to connect to either the controller or
# to the delegate_to host and then run rsync which makes its own
# connection from controller to inventory_hostname or delegate_to to
# inventory_hostname.
#
# That means synchronize needs to have some knowledge of the
# controller to inventory_host/delegate host that ansible typically
# establishes and use those to construct a command line for rsync to
# connect from the inventory_host to the controller/delegate. The
# challenge for coders is remembering which leg of the trip is
# associated with the conditions that you're checking at any one time.
if task_vars is None:
task_vars = dict()
# We make a copy of the args here because we may fail and be asked to
# retry. If that happens we don't want to pass the munged args through
# to our next invocation. Munged args are single use only.
_tmp_args = self._task.args.copy()
result = super(ActionModule, self).run(tmp, task_vars)
del tmp # tmp no longer has any effect
# Store remote connection type
self._remote_transport = self._connection.transport
# Handle docker connection options
if self._remote_transport == 'docker':
self._docker_cmd = self._connection.docker_cmd
if self._play_context.docker_extra_args:
self._docker_cmd = "%s %s" % (self._docker_cmd, self._play_context.docker_extra_args)
# self._connection accounts for delegate_to so
# remote_transport is the transport ansible thought it would need
# between the controller and the delegate_to host or the controller
# and the remote_host if delegate_to isn't set.
remote_transport = False
if self._connection.transport != 'local':
remote_transport = True
try:
delegate_to = self._task.delegate_to
except (AttributeError, KeyError):
delegate_to = None
# ssh paramiko docker buildah and local are fully supported transports. Anything
# else only works with delegate_to
if delegate_to is None and self._connection.transport not in \
('ssh', 'paramiko', 'local', 'docker', 'buildah'):
result['failed'] = True
result['msg'] = (
"synchronize uses rsync to function. rsync needs to connect to the remote "
"host via ssh, docker client or a direct filesystem "
"copy. This remote host is being accessed via %s instead "
"so it cannot work." % self._connection.transport)
return result
use_ssh_args = _tmp_args.pop('use_ssh_args', None)
# Parameter name needed by the ansible module
_tmp_args['_local_rsync_path'] = task_vars.get('ansible_rsync_path') or 'rsync'
# rsync thinks that one end of the connection is localhost and the
# other is the host we're running the task for (Note: We use
# ansible's delegate_to mechanism to determine which host rsync is
# running on so localhost could be a non-controller machine if
# delegate_to is used)
src_host = '127.0.0.1'
inventory_hostname = task_vars.get('inventory_hostname')
dest_host_inventory_vars = task_vars['hostvars'].get(inventory_hostname)
try:
dest_host = dest_host_inventory_vars['ansible_host']
except KeyError:
dest_host = dest_host_inventory_vars.get('ansible_ssh_host', inventory_hostname)
dest_host_ids = [hostid for hostid in (dest_host_inventory_vars.get('inventory_hostname'),
dest_host_inventory_vars.get('ansible_host'),
dest_host_inventory_vars.get('ansible_ssh_host'))
if hostid is not None]
localhost_ports = set()
for host in C.LOCALHOST:
localhost_vars = task_vars['hostvars'].get(host, {})
for port_var in C.MAGIC_VARIABLE_MAPPING['port']:
port = localhost_vars.get(port_var, None)
if port:
break
else:
port = C.DEFAULT_REMOTE_PORT
localhost_ports.add(port)
# dest_is_local tells us if the host rsync runs on is the same as the
# host rsync puts the files on. This is about *rsync's connection*,
# not about the ansible connection to run the module.
dest_is_local = False
if delegate_to is None and remote_transport is False:
dest_is_local = True
elif delegate_to is not None and delegate_to in dest_host_ids:
dest_is_local = True
# CHECK FOR NON-DEFAULT SSH PORT
inv_port = task_vars.get('ansible_ssh_port', None) or C.DEFAULT_REMOTE_PORT
if _tmp_args.get('dest_port', None) is None:
if inv_port is not None:
_tmp_args['dest_port'] = inv_port
# Set use_delegate if we are going to run rsync on a delegated host
# instead of localhost
use_delegate = False
if delegate_to is not None and delegate_to in dest_host_ids:
# edge case: explicit delegate and dest_host are the same
# so we run rsync on the remote machine targeting its localhost
# (itself)
dest_host = '127.0.0.1'
use_delegate = True
elif delegate_to is not None and remote_transport:
# If we're delegating to a remote host then we need to use the
# delegate_to settings
use_delegate = True
# Delegate to localhost as the source of the rsync unless we've been
# told (via delegate_to) that a different host is the source of the
# rsync
if not use_delegate and remote_transport:
# Create a connection to localhost to run rsync on
new_stdin = self._connection._new_stdin
# Unike port, there can be only one shell
localhost_shell = None
for host in C.LOCALHOST:
localhost_vars = task_vars['hostvars'].get(host, {})
for shell_var in C.MAGIC_VARIABLE_MAPPING['shell']:
localhost_shell = localhost_vars.get(shell_var, None)
if localhost_shell:
break
if localhost_shell:
break
else:
localhost_shell = os.path.basename(C.DEFAULT_EXECUTABLE)
self._play_context.shell = localhost_shell
# Unike port, there can be only one executable
localhost_executable = None
for host in C.LOCALHOST:
localhost_vars = task_vars['hostvars'].get(host, {})
for executable_var in C.MAGIC_VARIABLE_MAPPING['executable']:
localhost_executable = localhost_vars.get(executable_var, None)
if localhost_executable:
break
if localhost_executable:
break
else:
localhost_executable = C.DEFAULT_EXECUTABLE
self._play_context.executable = localhost_executable
new_connection = connection_loader.get('local', self._play_context, new_stdin)
self._connection = new_connection
self._override_module_replaced_vars(task_vars)
# SWITCH SRC AND DEST HOST PER MODE
if _tmp_args.get('mode', 'push') == 'pull':
(dest_host, src_host) = (src_host, dest_host)
# MUNGE SRC AND DEST PER REMOTE_HOST INFO
src = _tmp_args.get('src', None)
dest = _tmp_args.get('dest', None)
if src is None or dest is None:
return dict(failed=True, msg="synchronize requires both src and dest parameters are set")
# Determine if we need a user@
user = None
if not dest_is_local:
# Src and dest rsync "path" handling
if boolean(_tmp_args.get('set_remote_user', 'yes'), strict=False):
if use_delegate:
user = task_vars.get('ansible_delegated_vars', dict()).get('ansible_ssh_user', None)
if not user:
user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
if not user:
user = C.DEFAULT_REMOTE_USER
else:
user = task_vars.get('ansible_ssh_user') or self._play_context.remote_user
# Private key handling
private_key = self._play_context.private_key_file
if private_key is not None:
_tmp_args['private_key'] = private_key
# use the mode to define src and dest's url
if _tmp_args.get('mode', 'push') == 'pull':
# src is a remote path: <user>@<host>, dest is a local path
src = self._process_remote(_tmp_args, src_host, src, user, inv_port in localhost_ports)
dest = self._process_origin(dest_host, dest, user)
else:
# src is a local path, dest is a remote path: <user>@<host>
src = self._process_origin(src_host, src, user)
dest = self._process_remote(_tmp_args, dest_host, dest, user, inv_port in localhost_ports)
else:
# Still need to munge paths (to account for roles) even if we aren't
# copying files between hosts
if not src.startswith('/'):
src = self._get_absolute_path(path=src)
if not dest.startswith('/'):
dest = self._get_absolute_path(path=dest)
_tmp_args['src'] = src
_tmp_args['dest'] = dest
# Allow custom rsync path argument
rsync_path = _tmp_args.get('rsync_path', None)
# backup original become as we are probably about to unset it
become = self._play_context.become
if not dest_is_local:
# don't escalate for docker. doing --rsync-path with docker exec fails
# and we can switch directly to the user via docker arguments
if self._play_context.become and not rsync_path and self._remote_transport != 'docker':
# If no rsync_path is set, become was originally set, and dest is
# remote then add privilege escalation here.
if self._play_context.become_method == 'sudo':
rsync_path = 'sudo rsync'
# TODO: have to add in the rest of the become methods here
# We cannot use privilege escalation on the machine running the
# module. Instead we run it on the machine rsync is connecting
# to.
self._play_context.become = False
_tmp_args['rsync_path'] = rsync_path
if use_ssh_args:
ssh_args = [
getattr(self._play_context, 'ssh_args', ''),
getattr(self._play_context, 'ssh_common_args', ''),
getattr(self._play_context, 'ssh_extra_args', ''),
]
_tmp_args['ssh_args'] = ' '.join([a for a in ssh_args if a])
# If launching synchronize against docker container
# use rsync_opts to support container to override rsh options
if self._remote_transport in ['docker', 'buildah']:
# Replicate what we do in the module argumentspec handling for lists
if not isinstance(_tmp_args.get('rsync_opts'), MutableSequence):
tmp_rsync_opts = _tmp_args.get('rsync_opts', [])
if isinstance(tmp_rsync_opts, string_types):
tmp_rsync_opts = tmp_rsync_opts.split(',')
elif isinstance(tmp_rsync_opts, (int, float)):
tmp_rsync_opts = [to_text(tmp_rsync_opts)]
_tmp_args['rsync_opts'] = tmp_rsync_opts
if '--blocking-io' not in _tmp_args['rsync_opts']:
_tmp_args['rsync_opts'].append('--blocking-io')
if self._remote_transport in ['docker']:
if become and self._play_context.become_user:
_tmp_args['rsync_opts'].append("--rsh=%s exec -u %s -i" % (self._docker_cmd, self._play_context.become_user))
elif user is not None:
_tmp_args['rsync_opts'].append("--rsh=%s exec -u %s -i" % (self._docker_cmd, user))
else:
_tmp_args['rsync_opts'].append("--rsh=%s exec -i" % self._docker_cmd)
elif self._remote_transport in ['buildah']:
_tmp_args['rsync_opts'].append("--rsh=buildah run --")
# run the module and store the result
result.update(self._execute_module('synchronize', module_args=_tmp_args, task_vars=task_vars))
if 'SyntaxError' in result.get('exception', result.get('msg', '')):
# Emit a warning about using python3 because synchronize is
# somewhat unique in running on localhost
result['exception'] = result['msg']
result['msg'] = ('SyntaxError parsing module. Perhaps invoking "python" on your local (or delegate_to) machine invokes python3. '
'You can set ansible_python_interpreter for localhost (or the delegate_to machine) to the location of python2 to fix this')
return result
| gpl-3.0 |
shinate/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/style/checkers/common.py | 203 | 3229 | # Copyright (C) 2010 Chris Jerdonek (cjerdonek@webkit.org)
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports style checking not specific to any one file type."""
# FIXME: Test this list in the same way that the list of CppChecker
# categories is tested, for example by checking that all of its
# elements appear in the unit tests. This should probably be done
# after moving the relevant cpp_unittest.ErrorCollector code
# into a shared location and refactoring appropriately.
categories = set([
"whitespace/carriage_return",
"whitespace/tab"])
class CarriageReturnChecker(object):
"""Supports checking for and handling carriage returns."""
def __init__(self, handle_style_error):
self._handle_style_error = handle_style_error
def check(self, lines):
"""Check for and strip trailing carriage returns from lines."""
for line_number in range(len(lines)):
if not lines[line_number].endswith("\r"):
continue
self._handle_style_error(line_number + 1, # Correct for offset.
"whitespace/carriage_return",
1,
"One or more unexpected \\r (^M) found; "
"better to use only a \\n")
lines[line_number] = lines[line_number].rstrip("\r")
return lines
class TabChecker(object):
"""Supports checking for and handling tabs."""
def __init__(self, file_path, handle_style_error):
self.file_path = file_path
self.handle_style_error = handle_style_error
def check(self, lines):
# FIXME: share with cpp_style.
for line_number, line in enumerate(lines):
if "\t" in line:
self.handle_style_error(line_number + 1,
"whitespace/tab", 5,
"Line contains tab character.")
| bsd-3-clause |
vikatory/kbengine | kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| lgpl-3.0 |
tal-nino/ansible | lib/ansible/plugins/callback/__init__.py | 71 | 10188 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
import difflib
import warnings
from copy import deepcopy
from six import string_types
from ansible import constants as C
from ansible.utils.unicode import to_unicode
__all__ = ["CallbackBase"]
class CallbackBase:
'''
This is a base ansible callback class that does nothing. New callbacks should
use this class as a base and override any callback methods they wish to execute
custom actions.
'''
# FIXME: the list of functions here needs to be updated once we have
# finalized the list of callback methods used in the default callback
def __init__(self, display):
self._display = display
if self._display.verbosity >= 4:
name = getattr(self, 'CALLBACK_NAME', 'unnamed')
ctype = getattr(self, 'CALLBACK_TYPE', 'old')
version = getattr(self, 'CALLBACK_VERSION', '1.0')
self._display.vvvv('Loaded callback %s of type %s, v%s' % (name, ctype, version))
def _dump_results(self, result, indent=None, sort_keys=True):
if result.get('_ansible_no_log', False):
return json.dumps(dict(censored="the output has been hidden due to the fact that 'no_log: true' was specified for this result"))
if not indent and '_ansible_verbose_always' in result and result['_ansible_verbose_always']:
indent = 4
# All result keys stating with _ansible_ are internal, so remove them from the result before we output anything.
for k in result.keys():
if isinstance(k, string_types) and k.startswith('_ansible_'):
del result[k]
return json.dumps(result, indent=indent, ensure_ascii=False, sort_keys=sort_keys)
def _handle_warnings(self, res):
''' display warnings, if enabled and any exist in the result '''
if C.COMMAND_WARNINGS and 'warnings' in res and res['warnings']:
for warning in res['warnings']:
self._display.warning(warning)
def _get_diff(self, difflist):
if not isinstance(difflist, list):
difflist = [difflist]
ret = []
for diff in difflist:
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
ret = []
if 'dst_binary' in diff:
ret.append("diff skipped: destination file appears to be binary\n")
if 'src_binary' in diff:
ret.append("diff skipped: source file appears to be binary\n")
if 'dst_larger' in diff:
ret.append("diff skipped: destination file size is greater than %d\n" % diff['dst_larger'])
if 'src_larger' in diff:
ret.append("diff skipped: source file size is greater than %d\n" % diff['src_larger'])
if 'before' in diff and 'after' in diff:
if 'before_header' in diff:
before_header = "before: %s" % diff['before_header']
else:
before_header = 'before'
if 'after_header' in diff:
after_header = "after: %s" % diff['after_header']
else:
after_header = 'after'
differ = difflib.unified_diff(to_unicode(diff['before']).splitlines(True), to_unicode(diff['after']).splitlines(True), before_header, after_header, '', '', 10)
ret.extend(list(differ))
ret.append('\n')
return u"".join(ret)
except UnicodeDecodeError:
ret.append(">> the files are different, but the diff library cannot compare unicode strings\n\n")
def _process_items(self, result):
for res in result._result['results']:
newres = deepcopy(result)
newres._result = res
if 'failed' in res and res['failed']:
self.v2_playbook_item_on_failed(newres)
elif 'skipped' in res and res['skipped']:
self.v2_playbook_item_on_skipped(newres)
else:
self.v2_playbook_item_on_ok(newres)
#del result._result['results']
def set_play_context(self, play_context):
pass
def on_any(self, *args, **kwargs):
pass
def runner_on_failed(self, host, res, ignore_errors=False):
pass
def runner_on_ok(self, host, res):
pass
def runner_on_skipped(self, host, item=None):
pass
def runner_on_unreachable(self, host, res):
pass
def runner_on_no_hosts(self):
pass
def runner_on_async_poll(self, host, res, jid, clock):
pass
def runner_on_async_ok(self, host, res, jid):
pass
def runner_on_async_failed(self, host, res, jid):
pass
def playbook_on_start(self):
pass
def playbook_on_notify(self, host, handler):
pass
def playbook_on_no_hosts_matched(self):
pass
def playbook_on_no_hosts_remaining(self):
pass
def playbook_on_task_start(self, name, is_conditional):
pass
def playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def playbook_on_setup(self):
pass
def playbook_on_import_for_host(self, host, imported_file):
pass
def playbook_on_not_import_for_host(self, host, missing_file):
pass
def playbook_on_play_start(self, name):
pass
def playbook_on_stats(self, stats):
pass
def on_file_diff(self, host, diff):
pass
####### V2 METHODS, by default they call v1 counterparts if possible ######
def v2_on_any(self, *args, **kwargs):
self.on_any(args, kwargs)
def v2_runner_on_failed(self, result, ignore_errors=False):
host = result._host.get_name()
self.runner_on_failed(host, result._result, ignore_errors)
def v2_runner_on_ok(self, result):
host = result._host.get_name()
self.runner_on_ok(host, result._result)
def v2_runner_on_skipped(self, result):
if C.DISPLAY_SKIPPED_HOSTS:
host = result._host.get_name()
#FIXME, get item to pass through
item = None
self.runner_on_skipped(host, item)
def v2_runner_on_unreachable(self, result):
host = result._host.get_name()
self.runner_on_unreachable(host, result._result)
def v2_runner_on_no_hosts(self, task):
self.runner_on_no_hosts()
def v2_runner_on_async_poll(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
#FIXME, get real clock
clock = 0
self.runner_on_async_poll(host, result._result, jid, clock)
def v2_runner_on_async_ok(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_ok(host, result._result, jid)
def v2_runner_on_async_failed(self, result):
host = result._host.get_name()
jid = result._result.get('ansible_job_id')
self.runner_on_async_failed(host, result._result, jid)
def v2_runner_on_file_diff(self, result, diff):
pass #no v1 correspondance
def v2_playbook_on_start(self):
self.playbook_on_start()
def v2_playbook_on_notify(self, result, handler):
host = result._host.get_name()
self.playbook_on_notify(host, handler)
def v2_playbook_on_no_hosts_matched(self):
self.playbook_on_no_hosts_matched()
def v2_playbook_on_no_hosts_remaining(self):
self.playbook_on_no_hosts_remaining()
def v2_playbook_on_task_start(self, task, is_conditional):
self.playbook_on_task_start(task, is_conditional)
def v2_playbook_on_cleanup_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_handler_task_start(self, task):
pass #no v1 correspondance
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
self.playbook_on_vars_prompt(varname, private, prompt, encrypt, confirm, salt_size, salt, default)
def v2_playbook_on_setup(self):
self.playbook_on_setup()
def v2_playbook_on_import_for_host(self, result, imported_file):
host = result._host.get_name()
self.playbook_on_import_for_host(host, imported_file)
def v2_playbook_on_not_import_for_host(self, result, missing_file):
host = result._host.get_name()
self.playbook_on_not_import_for_host(host, missing_file)
def v2_playbook_on_play_start(self, play):
self.playbook_on_play_start(play.name)
def v2_playbook_on_stats(self, stats):
self.playbook_on_stats(stats)
def v2_on_file_diff(self, result):
host = result._host.get_name()
if 'diff' in result._result:
self.on_file_diff(host, result._result['diff'])
def v2_playbook_on_item_ok(self, result):
pass # no v1
def v2_playbook_on_item_failed(self, result):
pass # no v1
def v2_playbook_on_item_skipped(self, result):
pass # no v1
| gpl-3.0 |
indico/indico | indico/modules/users/models/settings.py | 4 | 4692 | # This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from functools import wraps
from indico.core.db import db
from indico.core.settings import SettingsProxyBase
from indico.core.settings.models.base import JSONSettingsBase
from indico.core.settings.util import get_all_settings, get_setting
class UserSetting(JSONSettingsBase, db.Model):
"""User-specific settings."""
__table_args__ = (db.Index(None, 'user_id', 'module', 'name'),
db.Index(None, 'user_id', 'module'),
db.UniqueConstraint('user_id', 'module', 'name'),
db.CheckConstraint('module = lower(module)', 'lowercase_module'),
db.CheckConstraint('name = lower(name)', 'lowercase_name'),
{'schema': 'users'})
user_id = db.Column(
db.Integer,
db.ForeignKey('users.users.id'),
nullable=False,
index=True
)
user = db.relationship(
'User',
lazy=True,
backref=db.backref(
'_all_settings',
lazy='dynamic',
cascade='all, delete-orphan'
)
)
def __repr__(self):
return f'<UserSetting({self.user_id}, {self.module}, {self.name}, {self.value!r})>'
def user_or_id(f):
@wraps(f)
def wrapper(self, user, *args, **kwargs):
if isinstance(user, db.m.User):
if user.id is None:
# SQLAlchemy 1.3 fails when filtering by a User with no ID, so we
# just use a filter that is known to not return any results...
user = {'user_id': None}
else:
user = {'user': user}
else:
# XXX: this appears to be unused, since the code
# was previously broken and did not fail anywhere
user = {'user_id': user}
return f(self, user, *args, **kwargs)
return wrapper
class UserSettingsProxy(SettingsProxyBase):
"""Proxy class to access user-specific settings for a certain module."""
@property
def query(self):
"""Return a query object filtering by the proxy's module."""
return UserSetting.query.filter_by(module=self.module)
@user_or_id
def get_all(self, user, no_defaults=False):
"""Retrieve all settings.
:param user: ``{'user': user}`` or ``{'user_id': id}``
:param no_defaults: Only return existing settings and ignore defaults.
:return: Dict containing the settings
"""
return get_all_settings(UserSetting, None, self, no_defaults, **user)
@user_or_id
def get(self, user, name, default=SettingsProxyBase.default_sentinel):
"""Retrieve the value of a single setting.
:param user: ``{'user': user}`` or ``{'user_id': id}``
:param name: Setting name
:param default: Default value in case the setting does not exist
:return: The settings's value or the default value
"""
self._check_name(name)
return get_setting(UserSetting, self, name, default, self._cache, **user)
@user_or_id
def set(self, user, name, value):
"""Set a single setting.
:param user: ``{'user': user}`` or ``{'user_id': id}``
:param name: Setting name
:param value: Setting value; must be JSON-serializable
"""
self._check_name(name)
UserSetting.set(self.module, name, self._convert_from_python(name, value), **user)
self._flush_cache()
@user_or_id
def set_multi(self, user, items):
"""Set multiple settings at once.
:param user: ``{'user': user}`` or ``{'user_id': id}``
:param items: Dict containing the new settings
"""
for name in items:
self._check_name(name)
items = {k: self._convert_from_python(k, v) for k, v in items.items()}
UserSetting.set_multi(self.module, items, **user)
self._flush_cache()
@user_or_id
def delete(self, user, *names):
"""Delete settings.
:param user: ``{'user': user}`` or ``{'user_id': id}``
:param names: One or more names of settings to delete
"""
for name in names:
self._check_name(name)
UserSetting.delete(self.module, names, **user)
self._flush_cache()
@user_or_id
def delete_all(self, user):
"""Delete all settings.
:param user: ``{'user': user}`` or ``{'user_id': id}``
"""
UserSetting.delete_all(self.module, **user)
self._flush_cache()
| mit |
ProfessionalIT/professionalit-webiste | sdk/google_appengine/lib/django-1.4/django/contrib/admin/__init__.py | 314 | 1824 | # ACTION_CHECKBOX_NAME is unused, but should stay since its import from here
# has been referenced in documentation.
from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME
from django.contrib.admin.options import ModelAdmin, HORIZONTAL, VERTICAL
from django.contrib.admin.options import StackedInline, TabularInline
from django.contrib.admin.sites import AdminSite, site
from django.contrib.admin.filters import (ListFilter, SimpleListFilter,
FieldListFilter, BooleanFieldListFilter, RelatedFieldListFilter,
ChoicesFieldListFilter, DateFieldListFilter, AllValuesFieldListFilter)
def autodiscover():
"""
Auto-discover INSTALLED_APPS admin.py modules and fail silently when
not present. This forces an import on them to register any admin bits they
may want.
"""
import copy
from django.conf import settings
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
for app in settings.INSTALLED_APPS:
mod = import_module(app)
# Attempt to import the app's admin module.
try:
before_import_registry = copy.copy(site._registry)
import_module('%s.admin' % app)
except:
# Reset the model registry to the state before the last import as
# this import will have to reoccur on the next request and this
# could raise NotRegistered and AlreadyRegistered exceptions
# (see #8245).
site._registry = before_import_registry
# Decide whether to bubble up this error. If the app just
# doesn't have an admin module, we can ignore the error
# attempting to import it, otherwise we want it to bubble up.
if module_has_submodule(mod, 'admin'):
raise
| lgpl-3.0 |
av8ramit/tensorflow | tensorflow/contrib/boosted_trees/python/kernel_tests/split_handler_ops_test.py | 18 | 25987 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the GTFlow split handler Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.boosted_trees.proto import learner_pb2
from tensorflow.contrib.boosted_trees.proto import split_info_pb2
from tensorflow.contrib.boosted_trees.python.ops import split_handler_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class SplitHandlerOpsTest(test_util.TensorFlowTestCase):
def testMakeDenseSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Dense Quantile |
# (1.2, 0.2) | 0 | 0 |
# (-0.3, 0.19) | 0 | 1 |
# (4.0, 0.13) | 1 | 1 |
partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant(
[[0, 0], [1, 0], [1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([2.4, -0.6, 8.0])
hessians = array_ops.constant([0.4, 0.38, 0.26])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(1.2 - 0.1) / (0.2 + 1)
expected_left_weight = -0.91666
# expected_left_weight * -(1.2 - 0.1)
expected_left_gain = 1.0083333333333331
# (-0.3 + 0.1) / (0.19 + 1)
expected_right_weight = 0.1680672
# expected_right_weight * -(-0.3 + 0.1)
expected_right_gain = 0.033613445378151252
# (-0.3 + 1.2 - 0.1) ** 2 / (0.19 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
expected_right_weight = 0
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# There's only one active bucket here so zero gain is expected.
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.52, split_node.threshold, 0.00001)
def testMakeMulticlassDenseSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
partition_ids = array_ops.constant([0, 0, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant(
[[0, 0], [1, 0], [1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([[2.4, 3.0], [-0.6, 0.1], [8.0, 1.0]])
hessians = array_ops.constant([[[0.4, 1], [1, 1]], [[0.38, 1], [1, 1]],
[[0.26, 1], [1, 1]]])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.dense_float_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertAllClose(0.3, split_node.threshold, 1e-6)
def testMakeDenseSplitEmptyInputs(self):
"""Tests empty inputs op."""
with self.test_session() as sess:
partition_ids = array_ops.constant([], dtype=dtypes.int32)
bucket_ids = array_ops.constant([[]], dtype=dtypes.int64)
gradients = array_ops.constant([])
hessians = array_ops.constant([])
bucket_boundaries = [0.3, 0.52]
partitions, gains, splits = (
split_handler_ops.build_dense_inequality_splits(
num_minibatches=0,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
class_id=-1,
feature_column_group_id=0,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = sess.run([partitions, gains, splits])
# .assertEmpty doesn't exist on ubuntu-contrib
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(gains))
self.assertEqual(0, len(splits))
def testMakeSparseSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | bucket ID |
# (0.9, 0.39) | 0 | -1 |
# (1.2, 0.2) | 0 | 0 |
# (0.2, 0.12) | 0 | 1 |
# (4.0, 0.13) | 1 | -1 |
# (4.0, 0.13) | 1 | 1 |
partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32)
# We have only 1 dimension in our sparse feature column.
bucket_ids = array_ops.constant([-1, 0, 1, -1, 1], dtype=dtypes.int64)
dimension_ids = array_ops.constant([0, 0, 0, 0, 0], dtype=dtypes.int64)
bucket_ids = array_ops.stack([bucket_ids, dimension_ids], axis=1)
gradients = array_ops.constant([1.8, 2.4, 0.4, 8.0, 8.0])
hessians = array_ops.constant([0.78, 0.4, 0.24, 0.26, 0.26])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertAllEqual([0, 1], partitions)
self.assertEqual(2, len(splits))
# Check the split on partition 0.
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
# Sparse is one dimensional.
self.assertEqual(0, split_node.split.dimension_id)
self.assertAllClose(0.52, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
# Sparse is one dimensional.
self.assertEqual(0, split_node.split.dimension_id)
self.assertAllClose(0.52, split_node.split.threshold)
def testMakeSparseSplitAllEmptyDimensions(self):
"""Tests split handler op when all dimensions have only bias bucket id."""
with self.test_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Dimension | bucket ID |
# (0.9, 0.39) | 0 | 0 | -1 |
# (4.0, 0.13) | 1 | 0 | -1 |
partition_ids = array_ops.constant([0, 1], dtype=dtypes.int32)
# We have only 1 dimension in our sparse feature column.
bucket_ids = array_ops.constant([[-1, 0], [-1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([1.8, 8.0])
hessians = array_ops.constant([0.78, 0.26])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(splits))
def testMakeSparseMultidimensionalSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
# Num of steps is 2.
# The feature column is three dimensional.
# First dimension has bias bucket only, the second has bias bucket and
# two valid buckets, the third has just one bias bucket and one valid
# bucket.
# Gradients | Partition | Dimension | bucket ID |
# (0.9, 0.39) | 0 | 0 | -1 |
# (1.2, 0.2) | 0 | 1 | 0 |
# (0.2, 0.12) | 0 | 1 | 2 |
# (0.1, 0.1) | 0 | 2 | 3 |
# Now second node - nothing interesting there, just one dimension.
# Second node has the same bucket ids for all dimensions.
# (4.0, 0.13) | 1 | 0 | -1 |
# (4.0, 0.13) | 1 | 2 | 3 |
# Tree node ids.
partition_ids = array_ops.constant([0, 0, 0, 0, 1, 1], dtype=dtypes.int32)
dimension_ids = array_ops.constant([0, 1, 1, 2, 0, 2], dtype=dtypes.int64)
bucket_ids = array_ops.constant([-1, 0, 2, 3, -1, 3], dtype=dtypes.int64)
bucket_ids = array_ops.stack([bucket_ids, dimension_ids], axis=1)
gradients = array_ops.constant([1.8, 2.4, 0.4, 0.2, 8.0, 8.0])
hessians = array_ops.constant([0.78, 0.4, 0.24, 0.2, 0.26, 0.26])
bucket_boundaries = array_ops.constant([0.3, 0.52, 0.58, 0.6])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertAllEqual([0, 1], partitions)
self.assertEqual(2, len(splits))
# Check the split on node 0 - it should split on second dimension
# -(0.2 + 1.2) / (0.12 + 0.2 + 2)
expected_left_weight = -0.603448275862069
# (0.2 + 1.2) ** 2 / (0.12 + 0.2 + 2)
expected_left_gain = 0.8448275862068965
# 0.5 / (0.07 + 2)
expected_right_weight = 0.24154589371980678
# 0.5 ** 2 / (0.07 + 2)
expected_right_gain = 0.12077294685990339
# (0.2 + 1.2 - 0.5) ** 2 / (0.12 + 0.2 + 0.07 + 2)
expected_bias_gain = 0.3389121338912133
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
# Split happened on second dimension.
self.assertEqual(1, split_node.split.dimension_id)
self.assertAllClose(0.58, split_node.split.threshold)
# Check the split on partition 1.
expected_left_weight = -1.8779342723004695
expected_right_weight = 0
# Verify candidate for partition 1, there's only one active bucket here
# so zero gain is expected.
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_left
self.assertAllClose(0.0, gains[1])
self.assertAllClose([expected_left_weight], left_child.value)
self.assertAllClose([expected_right_weight], right_child.value)
self.assertEqual(0, split_node.split.feature_column)
self.assertEqual(2, split_node.split.dimension_id)
self.assertAllClose(0.6, split_node.split.threshold)
def testMakeMulticlassSparseSplit(self):
"""Tests split handler op."""
with self.test_session() as sess:
partition_ids = array_ops.constant([0, 0, 0, 1, 1], dtype=dtypes.int32)
bucket_ids = array_ops.constant(
[[-1, 0], [0, 0], [1, 0], [-1, 0], [1, 0]], dtype=dtypes.int64)
gradients = array_ops.constant([[1.8, 3.5], [2.4, 1.0], [0.4, 4.0],
[8.0, 3.1], [8.0, 0.8]])
hessian_0 = [[0.78, 1], [12, 1]]
hessian_1 = [[0.4, 1], [1, 1]]
hessian_2 = [[0.24, 1], [1, 1]]
hessian_3 = [[0.26, 1], [1, 1]]
hessian_4 = [[0.26, 1], [1, 1]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3, hessian_4])
bucket_boundaries = array_ops.constant([0.3, 0.52])
partitions, gains, splits = (
split_handler_ops.build_sparse_inequality_splits(
num_minibatches=2,
partition_ids=partition_ids,
bucket_ids=bucket_ids,
gradients=gradients,
hessians=hessians,
bucket_boundaries=bucket_boundaries,
l1_regularization=0,
l2_regularization=2,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.sparse_float_binary_split_default_right
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.split.feature_column)
self.assertAllClose(0.52, split_node.split.threshold)
def testMakeCategoricalEqualitySplit(self):
"""Tests split handler op for categorical equality split."""
with self.test_session() as sess:
# The data looks like the following after dividing by number of steps (2).
# Gradients | Partition | Feature ID |
# (0.9, 0.39) | 0 | -1 |
# (0.2, 0.12) | 0 | 1 |
# (1.4, 0.32) | 0 | 2 |
# (4.0, 0.13) | 1 | -1 |
# (4.0, 0.13) | 1 | 1 |
gradients = [1.8, 0.4, 2.8, 8.0, 8.0]
hessians = [0.78, 0.24, 0.64, 0.26, 0.26]
partition_ids = [0, 0, 0, 1, 1]
feature_ids = array_ops.constant(
[[-1, 0], [1, 0], [2, 0], [-1, 0], [1, 0]], dtype=dtypes.int64)
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=2,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
# Check the split on partition 0.
# -(0.2 + 1.2 - 0.1) / (0.12 + 0.2 + 1)
expected_left_weight = -0.9848484848484846
# (0.2 + 1.2 - 0.1) ** 2 / (0.12 + 0.2 + 1)
expected_left_gain = 1.2803030303030298
# -(-0.5 + 0.1) / (0.07 + 1)
expected_right_weight = 0.37383177570093457
# (-0.5 + 0.1) ** 2 / (0.07 + 1)
expected_right_gain = 0.14953271028037385
# (0.2 + -0.5 + 1.2 - 0.1) ** 2 / (0.12 + 0.07 + 0.2 + 1)
expected_bias_gain = 0.46043165467625885
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[0])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertEqual(0, split_node.feature_column)
self.assertEqual(2, split_node.feature_id)
self.assertAllClose(
expected_left_gain + expected_right_gain - expected_bias_gain, gains[0],
0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
# Check the split on partition 1.
# (-4 + 0.1) / (0.13 + 1)
expected_left_weight = -3.4513274336283186
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_left_gain = 13.460176991150442
expected_right_weight = 0
expected_right_gain = 0
# (-4 + 0.1) ** 2 / (0.13 + 1)
expected_bias_gain = 13.460176991150442
# Verify candidate for partition 1, there's only one active feature here
# so zero gain is expected.
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
self.assertAllClose(0.0, gains[1], 0.00001)
self.assertAllClose([expected_left_weight], left_child.value, 0.00001)
self.assertAllClose([expected_right_weight], right_child.value, 0.00001)
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testMakeMulticlassCategoricalEqualitySplit(self):
"""Tests split handler op for categorical equality split in multiclass."""
with self.test_session() as sess:
gradients = array_ops.constant([[1.8, 3.5], [2.4, 1.0], [0.4, 4.0],
[9.0, 3.1], [3.0, 0.8]])
hessian_0 = [[0.78, 1], [12, 1]]
hessian_1 = [[0.4, 1], [1, 1]]
hessian_2 = [[0.24, 1], [1, 1]]
hessian_3 = [[0.16, 2], [-1, 1]]
hessian_4 = [[0.6, 1], [2, 1]]
hessians = array_ops.constant(
[hessian_0, hessian_1, hessian_2, hessian_3, hessian_4])
partition_ids = [0, 0, 0, 1, 1]
feature_ids = array_ops.constant(
[[-1, 0], [1, 0], [2, 0], [-1, 0], [1, 0]], dtype=dtypes.int64)
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=2,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.FULL_HESSIAN))
partitions, gains, splits = sess.run([partitions, gains, splits])
self.assertAllEqual([0, 1], partitions)
split_info = split_info_pb2.SplitInfo()
split_info.ParseFromString(splits[1])
left_child = split_info.left_child.vector
right_child = split_info.right_child.vector
split_node = split_info.split_node.categorical_id_binary_split
# Each leaf has 2 element vector.
self.assertEqual(2, len(left_child.value))
self.assertEqual(2, len(right_child.value))
self.assertEqual(0, split_node.feature_column)
self.assertEqual(1, split_node.feature_id)
def testMakeCategoricalEqualitySplitEmptyInput(self):
with self.test_session() as sess:
gradients = []
hessians = []
partition_ids = []
feature_ids = [[]]
partitions, gains, splits = (
split_handler_ops.build_categorical_equality_splits(
num_minibatches=0,
partition_ids=partition_ids,
feature_ids=feature_ids,
gradients=gradients,
hessians=hessians,
l1_regularization=0.1,
l2_regularization=1,
tree_complexity_regularization=0,
min_node_weight=0,
feature_column_group_id=0,
bias_feature_id=-1,
class_id=-1,
multiclass_strategy=learner_pb2.LearnerConfig.TREE_PER_CLASS))
partitions, gains, splits = (sess.run([partitions, gains, splits]))
self.assertEqual(0, len(partitions))
self.assertEqual(0, len(gains))
self.assertEqual(0, len(splits))
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
hassanibi/erpnext | erpnext/patches/v5_0/index_on_account_and_gl_entry.py | 60 | 1275 | from __future__ import unicode_literals
import frappe
def execute():
index_map = {
"Account": ["parent_account", "lft", "rgt"],
"GL Entry": ["posting_date", "account", 'party', "voucher_no"],
"Sales Invoice": ["posting_date", "debit_to", "customer"],
"Purchase Invoice": ["posting_date", "credit_to", "supplier"]
}
for dt, indexes in index_map.items():
existing_indexes = [(d.Key_name, d.Column_name) for d in frappe.db.sql("""show index from `tab{0}`
where Column_name != 'name'""".format(dt), as_dict=1)]
for old, column in existing_indexes:
if column in ("parent", "group_or_ledger", "is_group", "is_pl_account", "debit_or_credit",
"account_name", "company", "project", "voucher_date", "due_date", "bill_no",
"bill_date", "is_opening", "fiscal_year", "outstanding_amount"):
frappe.db.sql("alter table `tab{0}` drop index {1}".format(dt, old))
existing_indexes = [(d.Key_name, d.Column_name) for d in frappe.db.sql("""show index from `tab{0}`
where Column_name != 'name'""".format(dt), as_dict=1)]
existing_indexed_columns = list(set([x[1] for x in existing_indexes]))
for new in indexes:
if new not in existing_indexed_columns:
frappe.db.sql("alter table `tab{0}` add index ({1})".format(dt, new)) | gpl-3.0 |
jnerin/ansible | test/units/modules/network/ironware/test_ironware_command.py | 57 | 4066 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests.mock import patch
from ansible.modules.network.ironware import ironware_command
from units.modules.utils import set_module_args
from .ironware_module import TestIronwareModule, load_fixture
class TestIronwareCommandModule(TestIronwareModule):
module = ironware_command
def setUp(self):
super(TestIronwareCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.ironware.ironware_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestIronwareCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for command in commands:
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_ironware_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('System Mode: MLX'))
def test_ironware_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('System Mode: MLX'))
def test_ironware_command_wait_for(self):
wait_for = 'result[0] contains "IronWare"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_ironware_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_ironware_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_ironware_command_match_any(self):
wait_for = ['result[0] contains "IronWare"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_ironware_command_match_all(self):
wait_for = ['result[0] contains "IronWare"',
'result[0] contains "uptime is"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_ironware_command_match_all_failure(self):
wait_for = ['result[0] contains "IronWare"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
akrizhevsky/cuda-convnet2 | shownet.py | 180 | 18206 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
from tarfile import TarFile, TarInfo
from matplotlib import pylab as pl
import numpy as n
import getopt as opt
from python_util.util import *
from math import sqrt, ceil, floor
from python_util.gpumodel import IGPUModel
import random as r
import numpy.random as nr
from convnet import ConvNet
from python_util.options import *
from PIL import Image
from time import sleep
class ShowNetError(Exception):
pass
class ShowConvNet(ConvNet):
def __init__(self, op, load_dic):
ConvNet.__init__(self, op, load_dic)
def init_data_providers(self):
self.need_gpu = self.op.get_value('show_preds')
class Dummy:
def advance_batch(self):
pass
if self.need_gpu:
ConvNet.init_data_providers(self)
else:
self.train_data_provider = self.test_data_provider = Dummy()
def import_model(self):
if self.need_gpu:
ConvNet.import_model(self)
def init_model_state(self):
if self.op.get_value('show_preds'):
self.softmax_name = self.op.get_value('show_preds')
def init_model_lib(self):
if self.need_gpu:
ConvNet.init_model_lib(self)
def plot_cost(self):
if self.show_cost not in self.train_outputs[0][0]:
raise ShowNetError("Cost function with name '%s' not defined by given convnet." % self.show_cost)
# print self.test_outputs
train_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.train_outputs]
test_errors = [eval(self.layers[self.show_cost]['outputFilter'])(o[0][self.show_cost], o[1])[self.cost_idx] for o in self.test_outputs]
if self.smooth_test_errors:
test_errors = [sum(test_errors[max(0,i-len(self.test_batch_range)):i])/(i-max(0,i-len(self.test_batch_range))) for i in xrange(1,len(test_errors)+1)]
numbatches = len(self.train_batch_range)
test_errors = n.row_stack(test_errors)
test_errors = n.tile(test_errors, (1, self.testing_freq))
test_errors = list(test_errors.flatten())
test_errors += [test_errors[-1]] * max(0,len(train_errors) - len(test_errors))
test_errors = test_errors[:len(train_errors)]
numepochs = len(train_errors) / float(numbatches)
pl.figure(1)
x = range(0, len(train_errors))
pl.plot(x, train_errors, 'k-', label='Training set')
pl.plot(x, test_errors, 'r-', label='Test set')
pl.legend()
ticklocs = range(numbatches, len(train_errors) - len(train_errors) % numbatches + 1, numbatches)
epoch_label_gran = int(ceil(numepochs / 20.))
epoch_label_gran = int(ceil(float(epoch_label_gran) / 10) * 10) if numepochs >= 10 else epoch_label_gran
ticklabels = map(lambda x: str((x[1] / numbatches)) if x[0] % epoch_label_gran == epoch_label_gran-1 else '', enumerate(ticklocs))
pl.xticks(ticklocs, ticklabels)
pl.xlabel('Epoch')
# pl.ylabel(self.show_cost)
pl.title('%s[%d]' % (self.show_cost, self.cost_idx))
# print "plotted cost"
def make_filter_fig(self, filters, filter_start, fignum, _title, num_filters, combine_chans, FILTERS_PER_ROW=16):
MAX_ROWS = 24
MAX_FILTERS = FILTERS_PER_ROW * MAX_ROWS
num_colors = filters.shape[0]
f_per_row = int(ceil(FILTERS_PER_ROW / float(1 if combine_chans else num_colors)))
filter_end = min(filter_start+MAX_FILTERS, num_filters)
filter_rows = int(ceil(float(filter_end - filter_start) / f_per_row))
filter_pixels = filters.shape[1]
filter_size = int(sqrt(filters.shape[1]))
fig = pl.figure(fignum)
fig.text(.5, .95, '%s %dx%d filters %d-%d' % (_title, filter_size, filter_size, filter_start, filter_end-1), horizontalalignment='center')
num_filters = filter_end - filter_start
if not combine_chans:
bigpic = n.zeros((filter_size * filter_rows + filter_rows + 1, filter_size*num_colors * f_per_row + f_per_row + 1), dtype=n.single)
else:
bigpic = n.zeros((3, filter_size * filter_rows + filter_rows + 1, filter_size * f_per_row + f_per_row + 1), dtype=n.single)
for m in xrange(filter_start,filter_end ):
filter = filters[:,:,m]
y, x = (m - filter_start) / f_per_row, (m - filter_start) % f_per_row
if not combine_chans:
for c in xrange(num_colors):
filter_pic = filter[c,:].reshape((filter_size,filter_size))
bigpic[1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size*num_colors) * x + filter_size*c:1 + (1 + filter_size*num_colors) * x + filter_size*(c+1)] = filter_pic
else:
filter_pic = filter.reshape((3, filter_size,filter_size))
bigpic[:,
1 + (1 + filter_size) * y:1 + (1 + filter_size) * y + filter_size,
1 + (1 + filter_size) * x:1 + (1 + filter_size) * x + filter_size] = filter_pic
pl.xticks([])
pl.yticks([])
if not combine_chans:
pl.imshow(bigpic, cmap=pl.cm.gray, interpolation='nearest')
else:
bigpic = bigpic.swapaxes(0,2).swapaxes(0,1)
pl.imshow(bigpic, interpolation='nearest')
def plot_filters(self):
FILTERS_PER_ROW = 16
filter_start = 0 # First filter to show
if self.show_filters not in self.layers:
raise ShowNetError("Layer with name '%s' not defined by given convnet." % self.show_filters)
layer = self.layers[self.show_filters]
filters = layer['weights'][self.input_idx]
# filters = filters - filters.min()
# filters = filters / filters.max()
if layer['type'] == 'fc': # Fully-connected layer
num_filters = layer['outputs']
channels = self.channels
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
elif layer['type'] in ('conv', 'local'): # Conv layer
num_filters = layer['filters']
channels = layer['filterChannels'][self.input_idx]
if layer['type'] == 'local':
filters = filters.reshape((layer['modules'], channels, layer['filterPixels'][self.input_idx], num_filters))
filters = filters[:, :, :, self.local_plane] # first map for now (modules, channels, pixels)
filters = filters.swapaxes(0,2).swapaxes(0,1)
num_filters = layer['modules']
# filters = filters.swapaxes(0,1).reshape(channels * layer['filterPixels'][self.input_idx], num_filters * layer['modules'])
# num_filters *= layer['modules']
FILTERS_PER_ROW = layer['modulesX']
else:
filters = filters.reshape(channels, filters.shape[0]/channels, filters.shape[1])
# Convert YUV filters to RGB
if self.yuv_to_rgb and channels == 3:
R = filters[0,:,:] + 1.28033 * filters[2,:,:]
G = filters[0,:,:] + -0.21482 * filters[1,:,:] + -0.38059 * filters[2,:,:]
B = filters[0,:,:] + 2.12798 * filters[1,:,:]
filters[0,:,:], filters[1,:,:], filters[2,:,:] = R, G, B
combine_chans = not self.no_rgb and channels == 3
# Make sure you don't modify the backing array itself here -- so no -= or /=
if self.norm_filters:
#print filters.shape
filters = filters - n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).mean(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1))
filters = filters / n.sqrt(n.tile(filters.reshape((filters.shape[0] * filters.shape[1], filters.shape[2])).var(axis=0).reshape(1, 1, filters.shape[2]), (filters.shape[0], filters.shape[1], 1)))
#filters = filters - n.tile(filters.min(axis=0).min(axis=0), (3, filters.shape[1], 1))
#filters = filters / n.tile(filters.max(axis=0).max(axis=0), (3, filters.shape[1], 1))
#else:
filters = filters - filters.min()
filters = filters / filters.max()
self.make_filter_fig(filters, filter_start, 2, 'Layer %s' % self.show_filters, num_filters, combine_chans, FILTERS_PER_ROW=FILTERS_PER_ROW)
def plot_predictions(self):
epoch, batch, data = self.get_next_batch(train=False) # get a test batch
num_classes = self.test_data_provider.get_num_classes()
NUM_ROWS = 2
NUM_COLS = 4
NUM_IMGS = NUM_ROWS * NUM_COLS if not self.save_preds else data[0].shape[1]
NUM_TOP_CLASSES = min(num_classes, 5) # show this many top labels
NUM_OUTPUTS = self.model_state['layers'][self.softmax_name]['outputs']
PRED_IDX = 1
label_names = [lab.split(',')[0] for lab in self.test_data_provider.batch_meta['label_names']]
if self.only_errors:
preds = n.zeros((data[0].shape[1], NUM_OUTPUTS), dtype=n.single)
else:
preds = n.zeros((NUM_IMGS, NUM_OUTPUTS), dtype=n.single)
#rand_idx = nr.permutation(n.r_[n.arange(1), n.where(data[1] == 552)[1], n.where(data[1] == 795)[1], n.where(data[1] == 449)[1], n.where(data[1] == 274)[1]])[:NUM_IMGS]
rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
if NUM_IMGS < data[0].shape[1]:
data = [n.require(d[:,rand_idx], requirements='C') for d in data]
# data += [preds]
# Run the model
print [d.shape for d in data], preds.shape
self.libmodel.startFeatureWriter(data, [preds], [self.softmax_name])
IGPUModel.finish_batch(self)
print preds
data[0] = self.test_data_provider.get_plottable_data(data[0])
if self.save_preds:
if not gfile.Exists(self.save_preds):
gfile.MakeDirs(self.save_preds)
preds_thresh = preds > 0.5 # Binarize predictions
data[0] = data[0] * 255.0
data[0][data[0]<0] = 0
data[0][data[0]>255] = 255
data[0] = n.require(data[0], dtype=n.uint8)
dir_name = '%s_predictions_batch_%d' % (os.path.basename(self.save_file), batch)
tar_name = os.path.join(self.save_preds, '%s.tar' % dir_name)
tfo = gfile.GFile(tar_name, "w")
tf = TarFile(fileobj=tfo, mode='w')
for img_idx in xrange(NUM_IMGS):
img = data[0][img_idx,:,:,:]
imsave = Image.fromarray(img)
prefix = "CORRECT" if data[1][0,img_idx] == preds_thresh[img_idx,PRED_IDX] else "FALSE_POS" if preds_thresh[img_idx,PRED_IDX] == 1 else "FALSE_NEG"
file_name = "%s_%.2f_%d_%05d_%d.png" % (prefix, preds[img_idx,PRED_IDX], batch, img_idx, data[1][0,img_idx])
# gf = gfile.GFile(file_name, "w")
file_string = StringIO()
imsave.save(file_string, "PNG")
tarinf = TarInfo(os.path.join(dir_name, file_name))
tarinf.size = file_string.tell()
file_string.seek(0)
tf.addfile(tarinf, file_string)
tf.close()
tfo.close()
# gf.close()
print "Wrote %d prediction PNGs to %s" % (preds.shape[0], tar_name)
else:
fig = pl.figure(3, figsize=(12,9))
fig.text(.4, .95, '%s test samples' % ('Mistaken' if self.only_errors else 'Random'))
if self.only_errors:
# what the net got wrong
if NUM_OUTPUTS > 1:
err_idx = [i for i,p in enumerate(preds.argmax(axis=1)) if p not in n.where(data[2][:,i] > 0)[0]]
else:
err_idx = n.where(data[1][0,:] != preds[:,0].T)[0]
print err_idx
err_idx = r.sample(err_idx, min(len(err_idx), NUM_IMGS))
data[0], data[1], preds = data[0][:,err_idx], data[1][:,err_idx], preds[err_idx,:]
import matplotlib.gridspec as gridspec
import matplotlib.colors as colors
cconv = colors.ColorConverter()
gs = gridspec.GridSpec(NUM_ROWS*2, NUM_COLS,
width_ratios=[1]*NUM_COLS, height_ratios=[2,1]*NUM_ROWS )
#print data[1]
for row in xrange(NUM_ROWS):
for col in xrange(NUM_COLS):
img_idx = row * NUM_COLS + col
if data[0].shape[0] <= img_idx:
break
pl.subplot(gs[(row * 2) * NUM_COLS + col])
#pl.subplot(NUM_ROWS*2, NUM_COLS, row * 2 * NUM_COLS + col + 1)
pl.xticks([])
pl.yticks([])
img = data[0][img_idx,:,:,:]
pl.imshow(img, interpolation='lanczos')
show_title = data[1].shape[0] == 1
true_label = [int(data[1][0,img_idx])] if show_title else n.where(data[1][:,img_idx]==1)[0]
#print true_label
#print preds[img_idx,:].shape
#print preds[img_idx,:].max()
true_label_names = [label_names[i] for i in true_label]
img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
#print img_labels
axes = pl.subplot(gs[(row * 2 + 1) * NUM_COLS + col])
height = 0.5
ylocs = n.array(range(NUM_TOP_CLASSES))*height
pl.barh(ylocs, [l[0] for l in img_labels], height=height, \
color=['#ffaaaa' if l[1] in true_label_names else '#aaaaff' for l in img_labels])
#pl.title(", ".join(true_labels))
if show_title:
pl.title(", ".join(true_label_names), fontsize=15, fontweight='bold')
else:
print true_label_names
pl.yticks(ylocs + height/2, [l[1] for l in img_labels], x=1, backgroundcolor=cconv.to_rgba('0.65', alpha=0.5), weight='bold')
for line in enumerate(axes.get_yticklines()):
line[1].set_visible(False)
#pl.xticks([width], [''])
#pl.yticks([])
pl.xticks([])
pl.ylim(0, ylocs[-1] + height)
pl.xlim(0, 1)
def start(self):
self.op.print_values()
# print self.show_cost
if self.show_cost:
self.plot_cost()
if self.show_filters:
self.plot_filters()
if self.show_preds:
self.plot_predictions()
if pl:
pl.show()
sys.exit(0)
@classmethod
def get_options_parser(cls):
op = ConvNet.get_options_parser()
for option in list(op.options):
if option not in ('gpu', 'load_file', 'inner_size', 'train_batch_range', 'test_batch_range', 'multiview_test', 'data_path', 'pca_noise', 'scalar_mean'):
op.delete_option(option)
op.add_option("show-cost", "show_cost", StringOptionParser, "Show specified objective function", default="")
op.add_option("show-filters", "show_filters", StringOptionParser, "Show learned filters in specified layer", default="")
op.add_option("norm-filters", "norm_filters", BooleanOptionParser, "Individually normalize filters shown with --show-filters", default=0)
op.add_option("input-idx", "input_idx", IntegerOptionParser, "Input index for layer given to --show-filters", default=0)
op.add_option("cost-idx", "cost_idx", IntegerOptionParser, "Cost function return value index for --show-cost", default=0)
op.add_option("no-rgb", "no_rgb", BooleanOptionParser, "Don't combine filter channels into RGB in layer given to --show-filters", default=False)
op.add_option("yuv-to-rgb", "yuv_to_rgb", BooleanOptionParser, "Convert RGB filters to YUV in layer given to --show-filters", default=False)
op.add_option("channels", "channels", IntegerOptionParser, "Number of channels in layer given to --show-filters (fully-connected layers only)", default=0)
op.add_option("show-preds", "show_preds", StringOptionParser, "Show predictions made by given softmax on test set", default="")
op.add_option("save-preds", "save_preds", StringOptionParser, "Save predictions to given path instead of showing them", default="")
op.add_option("only-errors", "only_errors", BooleanOptionParser, "Show only mistaken predictions (to be used with --show-preds)", default=False, requires=['show_preds'])
op.add_option("local-plane", "local_plane", IntegerOptionParser, "Local plane to show", default=0)
op.add_option("smooth-test-errors", "smooth_test_errors", BooleanOptionParser, "Use running average for test error plot?", default=1)
op.options['load_file'].default = None
return op
if __name__ == "__main__":
#nr.seed(6)
try:
op = ShowConvNet.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowConvNet(op, load_dic)
model.start()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
| apache-2.0 |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_0_0/report_job_run_broker.py | 14 | 51260 | from ..broker import Broker
class ReportJobRunBroker(Broker):
controller = "report_job_runs"
def index(self, **kwargs):
"""Lists the available report job runs. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the report job.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the report job.
:type id: Array of Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, report_id, ext_job_id, auth_user_id, status, start_time, is_foreground, last_checkin, cancel_time, created_at, updated_at, report_type, job_priority, report_job_specification_id, size.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ReportJobRun. Valid values are id, report_id, ext_job_id, auth_user_id, status, start_time, is_foreground, last_checkin, cancel_time, created_at, updated_at, report_type, job_priority, report_job_specification_id, size. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return report_job_runs: An array of the ReportJobRun objects that match the specified input criteria.
:rtype report_job_runs: Array of ReportJobRun
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def destroy(self, **kwargs):
"""Deletes the specified report job run from NetMRI.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: The internal NetMRI identifier for the report job.
:type id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("destroy"), kwargs)
def search(self, **kwargs):
"""Lists the available report job runs matching the input criteria. This method provides a more flexible search interface than the index method, but searching using this method is more demanding on the system and will not perform to the same level as the index method. The input fields listed below will be used as in the index method, to filter the result, along with the optional query string and XML filter described below.
**Inputs**
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param auth_user_id: The internal NetMRI user id that created the Report Job.
:type auth_user_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param auth_user_id: The internal NetMRI user id that created the Report Job.
:type auth_user_id: Array of Integer
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param cancel_time: The date and time the report job was canceled.
:type cancel_time: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param cancel_time: The date and time the report job was canceled.
:type cancel_time: Array of DateTime
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the report job was created.
:type created_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param created_at: The date and time the report job was created.
:type created_at: Array of DateTime
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param ext_job_id: The system process id for the report job.
:type ext_job_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param ext_job_id: The system process id for the report job.
:type ext_job_id: Array of Integer
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the report job.
:type id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param id: The internal NetMRI identifier for the report job.
:type id: Array of Integer
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param is_foreground: Value to indicate the report is being run in the NetMRI GUI.
:type is_foreground: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param is_foreground: Value to indicate the report is being run in the NetMRI GUI.
:type is_foreground: Array of Integer
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param job_priority: The report job priority, lower priority are processed first.
:type job_priority: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param job_priority: The report job priority, lower priority are processed first.
:type job_priority: Array of Integer
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param last_checkin: The date and time the report job last changed status.
:type last_checkin: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param last_checkin: The date and time the report job last changed status.
:type last_checkin: Array of DateTime
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param report_id: The internal NetMRI identifier for a specific report.
:type report_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param report_id: The internal NetMRI identifier for a specific report.
:type report_id: Array of Integer
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param report_job_specification_id: The internal NetMRI identifier for the associated Report Job Specification.
:type report_job_specification_id: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param report_job_specification_id: The internal NetMRI identifier for the associated Report Job Specification.
:type report_job_specification_id: Array of Integer
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param report_type: The report job type to indicate if a report was scheduled or run on demand.
:type report_type: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param report_type: The report job type to indicate if a report was scheduled or run on demand.
:type report_type: Array of String
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param size: The file size of a completed report.
:type size: Integer
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param size: The file size of a completed report.
:type size: Array of Integer
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param start_time: The date and time the report job started running.
:type start_time: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param start_time: The date and time the report job started running.
:type start_time: Array of DateTime
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param status: The report running status.
:type status: String
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param status: The report running status.
:type status: Array of String
| ``api version min:`` 2.1
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the report job was updated.
:type updated_at: DateTime
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param updated_at: The date and time the report job was updated.
:type updated_at: Array of DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, report_id, ext_job_id, auth_user_id, status, start_time, is_foreground, last_checkin, cancel_time, created_at, updated_at, report_type, job_priority, report_job_specification_id, size.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ReportJobRun. Valid values are id, report_id, ext_job_id, auth_user_id, status, start_time, is_foreground, last_checkin, cancel_time, created_at, updated_at, report_type, job_priority, report_job_specification_id, size. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against report job runs, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: auth_user_id, cancel_time, created_at, ext_job_id, id, is_foreground, job_priority, last_checkin, report_id, report_job_specification_id, report_type, size, start_time, status, updated_at.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return report_job_runs: An array of the ReportJobRun objects that match the specified input criteria.
:rtype report_job_runs: Array of ReportJobRun
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available report job runs matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: auth_user_id, cancel_time, created_at, ext_job_id, id, is_foreground, job_priority, last_checkin, report_id, report_job_specification_id, report_type, size, start_time, status, updated_at.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_auth_user_id: The operator to apply to the field auth_user_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. auth_user_id: The internal NetMRI user id that created the Report Job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_auth_user_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_auth_user_id: If op_auth_user_id is specified, the field named in this input will be compared to the value in auth_user_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_auth_user_id must be specified if op_auth_user_id is specified.
:type val_f_auth_user_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_auth_user_id: If op_auth_user_id is specified, this value will be compared to the value in auth_user_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_auth_user_id must be specified if op_auth_user_id is specified.
:type val_c_auth_user_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_cancel_time: The operator to apply to the field cancel_time. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. cancel_time: The date and time the report job was canceled. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_cancel_time: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_cancel_time: If op_cancel_time is specified, the field named in this input will be compared to the value in cancel_time using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_cancel_time must be specified if op_cancel_time is specified.
:type val_f_cancel_time: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_cancel_time: If op_cancel_time is specified, this value will be compared to the value in cancel_time using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_cancel_time must be specified if op_cancel_time is specified.
:type val_c_cancel_time: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_created_at: The operator to apply to the field created_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. created_at: The date and time the report job was created. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_created_at: If op_created_at is specified, the field named in this input will be compared to the value in created_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_created_at must be specified if op_created_at is specified.
:type val_f_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_created_at: If op_created_at is specified, this value will be compared to the value in created_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_created_at must be specified if op_created_at is specified.
:type val_c_created_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_ext_job_id: The operator to apply to the field ext_job_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. ext_job_id: The system process id for the report job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_ext_job_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_ext_job_id: If op_ext_job_id is specified, the field named in this input will be compared to the value in ext_job_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_ext_job_id must be specified if op_ext_job_id is specified.
:type val_f_ext_job_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_ext_job_id: If op_ext_job_id is specified, this value will be compared to the value in ext_job_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_ext_job_id must be specified if op_ext_job_id is specified.
:type val_c_ext_job_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_id: The operator to apply to the field id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. id: The internal NetMRI identifier for the report job. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_id: If op_id is specified, the field named in this input will be compared to the value in id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_id must be specified if op_id is specified.
:type val_f_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_id: If op_id is specified, this value will be compared to the value in id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_id must be specified if op_id is specified.
:type val_c_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_is_foreground: The operator to apply to the field is_foreground. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. is_foreground: Value to indicate the report is being run in the NetMRI GUI. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_is_foreground: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_is_foreground: If op_is_foreground is specified, the field named in this input will be compared to the value in is_foreground using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_is_foreground must be specified if op_is_foreground is specified.
:type val_f_is_foreground: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_is_foreground: If op_is_foreground is specified, this value will be compared to the value in is_foreground using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_is_foreground must be specified if op_is_foreground is specified.
:type val_c_is_foreground: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_job_priority: The operator to apply to the field job_priority. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. job_priority: The report job priority, lower priority are processed first. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_job_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_job_priority: If op_job_priority is specified, the field named in this input will be compared to the value in job_priority using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_job_priority must be specified if op_job_priority is specified.
:type val_f_job_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_job_priority: If op_job_priority is specified, this value will be compared to the value in job_priority using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_job_priority must be specified if op_job_priority is specified.
:type val_c_job_priority: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_last_checkin: The operator to apply to the field last_checkin. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. last_checkin: The date and time the report job last changed status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_last_checkin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_last_checkin: If op_last_checkin is specified, the field named in this input will be compared to the value in last_checkin using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_last_checkin must be specified if op_last_checkin is specified.
:type val_f_last_checkin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_last_checkin: If op_last_checkin is specified, this value will be compared to the value in last_checkin using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_last_checkin must be specified if op_last_checkin is specified.
:type val_c_last_checkin: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_report_id: The operator to apply to the field report_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. report_id: The internal NetMRI identifier for a specific report. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_report_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_report_id: If op_report_id is specified, the field named in this input will be compared to the value in report_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_report_id must be specified if op_report_id is specified.
:type val_f_report_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_report_id: If op_report_id is specified, this value will be compared to the value in report_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_report_id must be specified if op_report_id is specified.
:type val_c_report_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_report_job_specification_id: The operator to apply to the field report_job_specification_id. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. report_job_specification_id: The internal NetMRI identifier for the associated Report Job Specification. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_report_job_specification_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_report_job_specification_id: If op_report_job_specification_id is specified, the field named in this input will be compared to the value in report_job_specification_id using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_report_job_specification_id must be specified if op_report_job_specification_id is specified.
:type val_f_report_job_specification_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_report_job_specification_id: If op_report_job_specification_id is specified, this value will be compared to the value in report_job_specification_id using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_report_job_specification_id must be specified if op_report_job_specification_id is specified.
:type val_c_report_job_specification_id: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_report_type: The operator to apply to the field report_type. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. report_type: The report job type to indicate if a report was scheduled or run on demand. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_report_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_report_type: If op_report_type is specified, the field named in this input will be compared to the value in report_type using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_report_type must be specified if op_report_type is specified.
:type val_f_report_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_report_type: If op_report_type is specified, this value will be compared to the value in report_type using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_report_type must be specified if op_report_type is specified.
:type val_c_report_type: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_size: The operator to apply to the field size. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. size: The file size of a completed report. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_size: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_size: If op_size is specified, the field named in this input will be compared to the value in size using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_size must be specified if op_size is specified.
:type val_f_size: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_size: If op_size is specified, this value will be compared to the value in size using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_size must be specified if op_size is specified.
:type val_c_size: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_start_time: The operator to apply to the field start_time. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. start_time: The date and time the report job started running. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_start_time: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_start_time: If op_start_time is specified, the field named in this input will be compared to the value in start_time using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_start_time must be specified if op_start_time is specified.
:type val_f_start_time: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_start_time: If op_start_time is specified, this value will be compared to the value in start_time using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_start_time must be specified if op_start_time is specified.
:type val_c_start_time: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_status: The operator to apply to the field status. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. status: The report running status. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_status: If op_status is specified, the field named in this input will be compared to the value in status using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_status must be specified if op_status is specified.
:type val_f_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_status: If op_status is specified, this value will be compared to the value in status using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_status must be specified if op_status is specified.
:type val_c_status: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_updated_at: The operator to apply to the field updated_at. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. updated_at: The date and time the report job was updated. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_updated_at: If op_updated_at is specified, the field named in this input will be compared to the value in updated_at using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_updated_at must be specified if op_updated_at is specified.
:type val_f_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_updated_at: If op_updated_at is specified, this value will be compared to the value in updated_at using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_updated_at must be specified if op_updated_at is specified.
:type val_c_updated_at: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, report_id, ext_job_id, auth_user_id, status, start_time, is_foreground, last_checkin, cancel_time, created_at, updated_at, report_type, job_priority, report_job_specification_id, size.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each ReportJobRun. Valid values are id, report_id, ext_job_id, auth_user_id, status, start_time, is_foreground, last_checkin, cancel_time, created_at, updated_at, report_type, job_priority, report_job_specification_id, size. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return report_job_runs: An array of the ReportJobRun objects that match the specified input criteria.
:rtype report_job_runs: Array of ReportJobRun
"""
return self.api_list_request(self._get_method_fullname("find"), kwargs)
def show_by_report_job_specification_id(self, **kwargs):
"""Creates a new Report Job
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param report_job_specification_id: The internal NetMRI identifier of the report specification.
:type report_job_specification_id: Integer
**Outputs**
"""
return self.api_request(self._get_method_fullname("show_by_report_job_specification_id"), kwargs)
def cancel(self, **kwargs):
"""Cancels running or pending reports.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: One or more ReportJobID values indicating the reports to cancel
:type id: Array
**Outputs**
"""
return self.api_request(self._get_method_fullname("cancel"), kwargs)
def run_in_background(self, **kwargs):
"""Run reports in background.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: One or more ReportJobID values indicating the reports to run in the background.
:type id: Array
**Outputs**
"""
return self.api_request(self._get_method_fullname("run_in_background"), kwargs)
def delete(self, **kwargs):
"""Deletes reports that have been canceled or completed.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: One or more ReportJobID values indicating the reports to cancel
:type id: Array
**Outputs**
"""
return self.api_request(self._get_method_fullname("delete"), kwargs)
def run_next(self, **kwargs):
"""Increases a report jobs priority to the highest pending job
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: One or more ReportJobID values indicating the reports to run next
:type id: Array
**Outputs**
"""
return self.api_request(self._get_method_fullname("run_next"), kwargs)
def show(self, **kwargs):
"""Shows ReportJob details
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` True
| ``default:`` None
:param id: ReportJobID value indicating the report
:type id: Integer
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return job: A Report Job object that matches the specified input criteria.
:rtype job: Array of ReportJobRun
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return queue_size: Total number of active report jobs
:rtype queue_size: Integer
"""
return self.api_list_request(self._get_method_fullname("show"), kwargs)
| apache-2.0 |
lamby/python-social-auth | examples/pyramid_example/example/__init__.py | 54 | 1248 | import sys
sys.path.append('../..')
from pyramid.config import Configurator
from pyramid.session import UnencryptedCookieSessionFactoryConfig
from sqlalchemy import engine_from_config
from social.apps.pyramid_app.models import init_social
from .models import DBSession, Base
def main(global_config, **settings):
"""This function returns a Pyramid WSGI application."""
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.bind = engine
session_factory = UnencryptedCookieSessionFactoryConfig('thisisasecret')
config = Configurator(settings=settings,
session_factory=session_factory,
autocommit=True)
config.include('pyramid_chameleon')
config.add_static_view('static', 'static', cache_max_age=3600)
config.add_request_method('example.auth.get_user', 'user', reify=True)
config.add_route('home', '/')
config.add_route('done', '/done')
config.include('example.settings')
config.include('example.local_settings')
config.include('social.apps.pyramid_app')
init_social(config, Base, DBSession)
config.scan()
config.scan('social.apps.pyramid_app')
return config.make_wsgi_app()
| bsd-3-clause |
robbles/mortar-luigi | mortar/luigi/dynamodb.py | 1 | 13619 | # Copyright (c) 2013 Mortar Data
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import abc
import time
import boto.dynamodb2
from boto.dynamodb2.exceptions import DynamoDBError
from boto.dynamodb2.fields import HashKey, RangeKey, AllIndex
from boto.dynamodb2.table import Table
from boto.dynamodb2.types import STRING
import luigi
import logging
from mortar.luigi import target_factory
logger = logging.getLogger('luigi-interface')
class DynamoDBClient(object):
"""
A boto-based client for interacting with DynamoDB from Luigi.
seealso:: https://help.mortardata.com/technologies/luigi/dynamodb_tasks
"""
# interval to wait between polls to DynamoDB API in seconds
TABLE_OPERATION_RESULTS_POLLING_SECONDS = 5.0
# timeout for DynamoDB table creation and ramp-up in seconds
TABLE_OPERATION_RESULTS_TIMEOUT_SECONDS = 60.0 * 30.0
def __init__(self, region='us-east-1', aws_access_key_id=None, aws_secret_access_key=None):
"""
:type region: str
:param region: AWS region where your DynamoDB instance is located. Default: us-east-1.
:type aws_access_key_id: str
:param aws_access_key_id: AWS Access Key ID. If not provided, will be looked up from luigi configuration in dynamodb.aws_access_key_id.
:type aws_secret_access_key: str
:param aws_secret_access_key: AWS Secret Access Key. If not provided, will be looked up from luigi configuration in dynamodb.aws_secret_access_key.
"""
if not aws_access_key_id:
aws_access_key_id = luigi.configuration.get_config().get('dynamodb', 'aws_access_key_id')
if not aws_secret_access_key:
aws_secret_access_key = luigi.configuration.get_config().get('dynamodb', 'aws_secret_access_key')
self.dynamo_cx = boto.dynamodb2.connect_to_region(
region,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
is_secure=True)
def create_table(self, table_name, schema, throughput, indexes=None):
"""
Create a new DynamoDB table and block until it is ready to use.
:type table_name: str
:param table_name: Name for table
:type schema: list of boto.dynamodb2.fields.HashKey
:param schema: Table schema
:type throughput: dict with {'read': read_throughput, 'write': write_throughput}
:param throughput: Initial table throughput
:type indexes: list of boto.dynamodb2.fields.AllIndex
:param indexes: Initial indexes for the table. Default: no indexes.
:rtype: boto.dynamodb2.table.Table:
:returns: Newly created Table
"""
table = Table.create(table_name,
schema=schema,
throughput=throughput,
connection=self.dynamo_cx,
indexes=indexes
)
logger.info('Created new dynamodb table %s with schema %s' % \
(table_name, schema))
return self._poll_until_table_active(table)
def get_table(self, table_name):
"""
Fetch a Table from DynamoDB.
NOTE: this is a somewhat expensive operation,
which must query dynamo for the current state
of the table.
:type table_name: str
:param table_name: Name of Table to load
:rtype: boto.dynamodb2.table.Table:
:returns: Requested Table
"""
table = Table(table_name, connection=self.dynamo_cx)
# must describe the table, or it doesn't have the correct throughput values
table.describe()
return table
def update_throughput(self, table_name, throughput):
"""
Update a table's throughput, using the stepwise
fashion of increasing throughput by 2X each iteration,
until the table has reached desired throughput.
note:: As of Oct 2014, stepwise update is no longer required for DynamoDB.
:rtype: boto.dynamodb2.table.Table:
:returns: Table with updated throughput
"""
table = self.get_table(table_name)
# can only go up by 2X at a time; can go as far down in one time as wanted
i = 0
while (table.throughput['read'] != throughput['read']) or \
(table.throughput['write'] != throughput['write']):
request_throughput = {'read': min(throughput['read'], 2 * table.throughput['read']),
'write': min(throughput['write'], 2 * table.throughput['write'])}
logger.info('Round %s: Updating table to throughput %s' % (i, request_throughput))
table.update(request_throughput)
table = self._poll_until_table_active(table)
i += 1
return table
def _poll_until_table_active(self, table):
start_time = time.time()
is_table_ready = False
while (not is_table_ready) and (time.time() - start_time < DynamoDBClient.TABLE_OPERATION_RESULTS_TIMEOUT_SECONDS):
try:
describe_result = table.describe()
status = describe_result['Table']['TableStatus']
if status == 'ACTIVE':
logger.info('Table %s is ACTIVE with throughput %s' % (table.table_name, table.throughput))
is_table_ready = True
else:
logger.debug('Table %s is in status %s' % (table.table_name, status))
time.sleep(DynamoDBClient.TABLE_OPERATION_RESULTS_POLLING_SECONDS)
except DynamoDBError, e:
logger.error('Error querying DynamoDB for table status; retrying. Error: %s' % e)
if not is_table_ready:
raise RuntimeError('Timed out waiting for DynamoDB table %s to be ACTIVE' % table.table_name)
return table
class DynamoDBTask(luigi.Task):
"""
Superclass for Luigi Tasks interacting with DynamoDB.
seealso:: https://help.mortardata.com/technologies/luigi/dynamodb_tasks
"""
@abc.abstractmethod
def table_name(self):
"""
Name of the table on which operation should be performed.
:rtype: str:
:returns: table_name for operation
"""
raise RuntimeError("Please implement the table_name method")
@abc.abstractmethod
def output_token(self):
"""
Luigi Target providing path to a token that indicates
completion of this Task.
:rtype: Target:
:returns: Target for Task completion token
"""
raise RuntimeError("Please implement the output_token method")
def output(self):
"""
The output for this Task. Returns the output token
by default, so the task only runs if the token does not
already exist.
:rtype: Target:
:returns: Target for Task completion token
"""
return self.output_token()
class CreateDynamoDBTable(DynamoDBTask):
"""
Luigi Task to create a new table in DynamoDB.
This Task writes an output token to the location designated
by the `output_token` method to indicate that the
table has been successfully create. The Task will fail
if the requested table name already exists.
Table creation in DynamoDB takes between several seconds and several minutes; this Task will
block until creation has finished.
"""
# Initial read throughput of created table
read_throughput = luigi.IntParameter()
# Initial write throughput of created table
write_throughput = luigi.IntParameter()
# Name of the primary hash key for this table
hash_key = luigi.Parameter()
# Type of the primary hash key (boto.dynamodb2.types)
hash_key_type = luigi.Parameter()
# Name of the primary range key for this table, if it exists
range_key = luigi.Parameter(None)
# Type of the primary range key for this table, if it exists (boto.dynamodb2.types)
range_key_type = luigi.Parameter(None)
# Secondary indexes of the table, provided as a list of dictionaries
# [ {'name': sec_index, 'range_key': range_key_name, 'data_type': NUMBER} ]
indexes = luigi.Parameter(None)
def _generate_indexes(self):
"""
Create boto-friendly index data structure.
"""
all_index = []
for index in self.indexes:
all_index.append(AllIndex(index['name'], parts=[
HashKey(self.hash_key, data_type=self.range_key_type),
RangeKey(index['range_key'], data_type=index['data_type'])]))
return all_index
def run(self):
"""
Create the DynamoDB table.
"""
dynamodb_client = DynamoDBClient()
schema = [HashKey(self.hash_key, data_type=self.hash_key_type)]
if self.range_key:
schema.append(RangeKey(self.range_key, data_type=self.range_key_type))
throughput={'read': self.read_throughput,
'write': self.write_throughput}
if self.indexes:
dynamodb_client.create_table(self.table_name(), schema, throughput, indexes=self._generate_indexes())
else:
dynamodb_client.create_table(self.table_name(), schema, throughput)
# write token to note completion
target_factory.write_file(self.output_token())
class UpdateDynamoDBThroughput(DynamoDBTask):
"""
Luigi Task to update the throughput of an existing DynamoDB table.
This Task writes an output token to the location designated
by the `output_token` method to indicate that the
table has been successfully updated. This Task will fail if the
table does not exist.
"""
# Target read throughput
read_throughput = luigi.IntParameter()
# Target write throughput
write_throughput = luigi.IntParameter()
def run(self):
"""
Update DynamoDB table throughput.
"""
dynamodb_client = DynamoDBClient()
throughput={'read': self.read_throughput,
'write': self.write_throughput}
dynamodb_client.update_throughput(self.table_name(), throughput)
# write token to note completion
target_factory.write_file(self.output_token())
class SanityTestDynamoDBTable(DynamoDBTask):
"""
Luigi Task to sanity check that that a set of sentinal IDs
exist in a DynamoDB table (usually after loading it with data).
This Task writes an output token to the location designated
by the `output_token` method to indicate that the
Task has been successfully completed.
"""
# Name of the primary hash key for this table
hash_key = luigi.Parameter()
# number of entries required to be in the table
min_total_results = luigi.IntParameter(100)
# when testing total entries, require that these field names not be null
non_null_fields = luigi.Parameter([])
# number of results required to be returned for each primary key
result_length = luigi.IntParameter(5)
# when testing specific ids, how many are allowed to fail
failure_threshold = luigi.IntParameter(2)
@abc.abstractmethod
def ids(self):
"""
List of sentinal IDs to sanity check.
:rtype: list of str:
:returns: list of IDs
"""
return RuntimeError("Must provide list of ids to sanity test")
def run(self):
"""
Run sanity check.
"""
dynamodb_client = DynamoDBClient()
table = dynamodb_client.get_table(self.table_name())
# check that the table contains at least min_total_results entries
limit = self.min_total_results
kw = {'limit': limit}
for field in self.non_null_fields:
kw['%s__null' % field] = False
results = [r for r in table.scan(**kw)]
num_results = len(results)
if num_results < limit:
exception_string = 'Sanity check failed: only found %s / %s expected results in table %s' % \
(num_results, limit, self.table_name())
logger.warn(exception_string)
raise DynamoDBTaskException(exception_string)
# do a check on specific ids
self._sanity_check_ids(table)
# write token to note completion
target_factory.write_file(self.output_token())
def _sanity_check_ids(self, table):
failure_count = 0
kw = {'limit': self.result_length}
for id in self.ids():
kw['%s__eq' % self.hash_key] = id
results = table.query(**kw)
if len(list(results)) < self.result_length:
failure_count += 1
logger.info('Id %s only returned %s results.' % (id, len(list(results))))
if failure_count > self.failure_threshold:
exception_string = 'Sanity check failed: %s ids in table %s failed to return sufficient results' % \
(failure_count, self.table_name())
logger.warn(exception_string)
raise DynamoDBTaskException(exception_string)
class DynamoDBTaskException(Exception):
"""
Exception thrown by DynamoDBTask subclasses.
"""
pass
| apache-2.0 |
cgwalters/gnome-ostree | src/ostbuild/pyostbuild/ostbuildrc.py | 3 | 1838 | # Copyright (C) 2011 Colin Walters <walters@verbum.org>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os,sys,ConfigParser
_config = None
def get():
global _config
if _config is None:
configpath = os.path.expanduser('~/.config/ostbuild.cfg')
parser = ConfigParser.RawConfigParser()
parser.read([configpath])
_config = {}
for (k, v) in parser.items('global'):
_config[k.strip()] = v.strip()
return _config
# This hack is because we want people to be able to pass None
# for "default", but still distinguish default=None from default
# not passed.
_default_not_supplied = object()
def get_key(name, provided_args=None, default=_default_not_supplied):
global _default_not_supplied
config = get()
if provided_args:
v = provided_args.get(name)
if v is not None:
return v
if default is _default_not_supplied:
# Possibly throw a KeyError
return config[name]
value = config.get(name, _default_not_supplied)
if value is _default_not_supplied:
return default
return value
| lgpl-2.1 |
swayf/pyLoad | module/datatypes/PyFile.py | 1 | 8388 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright(c) 2008-2012 pyLoad Team
# http://www.pyload.org
#
# This file is part of pyLoad.
# pyLoad is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Subjected to the terms and conditions in LICENSE
#
# @author: RaNaN
###############################################################################
from time import sleep, time
from threading import RLock
from module.Api import ProgressInfo, DownloadProgress, FileInfo, DownloadInfo, DownloadStatus
from module.utils import format_size, format_time, lock
statusMap = {
"none": 0,
"offline": 1,
"online": 2,
"queued": 3,
"paused": 4,
"finished": 5,
"skipped": 6,
"failed": 7,
"starting": 8,
"waiting": 9,
"downloading": 10,
"temp. offline": 11,
"aborted": 12,
"decrypting": 13,
"processing": 14,
"custom": 15,
"unknown": 16,
}
class PyFile(object):
"""
Represents a file object at runtime
"""
__slots__ = ("m", "fid", "_name", "_size", "filestatus", "media", "added", "fileorder",
"url", "pluginname", "hash", "status", "error", "packageid", "ownerid",
"lock", "plugin", "waitUntil", "abort", "statusname",
"reconnected", "pluginclass")
@staticmethod
def fromInfoData(m, info):
f = PyFile(m, info.fid, info.name, info.size, info.status, info.media, info.added, info.fileorder,
"", "", "", DownloadStatus.NA, "", info.package, info.owner)
if info.download:
f.url = info.download.url
f.pluginname = info.download.plugin
f.hash = info.download.hash
f.status = info.download.status
f.error = info.download.error
return f
def __init__(self, manager, fid, name, size, filestatus, media, added, fileorder,
url, pluginname, hash, status, error, package, owner):
self.m = manager
self.fid = int(fid)
self._name = name
self._size = size
self.filestatus = filestatus
self.media = media
self.added = added
self.fileorder = fileorder
self.url = url
self.pluginname = pluginname
self.hash = hash
self.status = status
self.error = error
self.ownerid = owner
self.packageid = package #should not be used, use package() instead
# database information ends here
self.lock = RLock()
self.plugin = None
self.waitUntil = 0 # time() + time to wait
# status attributes
self.abort = False
self.reconnected = False
self.statusname = None
@property
def id(self):
self.m.core.log.debug("Deprecated attr .id, use .fid instead")
return self.fid
def setSize(self, value):
self._size = int(value)
# will convert all sizes to ints
size = property(lambda self: self._size, setSize)
def getName(self):
try:
if self.plugin.req.name:
return self.plugin.req.name
else:
return self._name
except:
return self._name
def setName(self, name):
""" Only set unicode or utf8 strings as name """
if type(name) == str:
name = name.decode("utf8")
self._name = name
name = property(getName, setName)
def __repr__(self):
return "<PyFile %s: %s@%s>" % (self.id, self.name, self.pluginname)
@lock
def initPlugin(self):
""" inits plugin instance """
if not self.plugin:
self.pluginclass = self.m.core.pluginManager.getPlugin(self.pluginname)
self.plugin = self.pluginclass(self)
@lock
def hasPlugin(self):
"""Thread safe way to determine this file has initialized plugin attribute"""
return hasattr(self, "plugin") and self.plugin
def package(self):
""" return package instance"""
return self.m.getPackage(self.packageid)
def setStatus(self, status):
self.status = statusMap[status]
# needs to sync so status is written to database
self.sync()
def setCustomStatus(self, msg, status="processing"):
self.statusname = msg
self.setStatus(status)
def getStatusName(self):
if self.status not in (13, 14) or not self.statusname:
return self.m.statusMsg[self.status]
else:
return self.statusname
def hasStatus(self, status):
return statusMap[status] == self.status
def sync(self):
"""sync PyFile instance with database"""
self.m.updateFile(self)
@lock
def release(self):
"""sync and remove from cache"""
if hasattr(self, "plugin") and self.plugin:
self.plugin.clean()
del self.plugin
self.m.releaseFile(self.fid)
def toInfoData(self):
return FileInfo(self.fid, self.getName(), self.packageid, self.ownerid, self.getSize(), self.filestatus,
self.media, self.added, self.fileorder, DownloadInfo(
self.url, self.pluginname, self.hash, self.status, self.getStatusName(), self.error
)
)
def getPath(self):
pass
def move(self, pid):
pass
def abortDownload(self):
"""abort pyfile if possible"""
while self.id in self.m.core.threadManager.processingIds():
self.abort = True
if self.plugin and self.plugin.req:
self.plugin.req.abortDownloads()
sleep(0.1)
self.abort = False
if self.hasPlugin() and self.plugin.req:
self.plugin.req.abortDownloads()
self.release()
def finishIfDone(self):
"""set status to finish and release file if every thread is finished with it"""
if self.id in self.m.core.threadManager.processingIds():
return False
self.setStatus("finished")
self.release()
self.m.checkAllLinksFinished()
return True
def checkIfProcessed(self):
self.m.checkAllLinksProcessed(self.id)
def formatWait(self):
""" formats and return wait time in humanreadable format """
return format_time(self.waitUntil - time())
def formatSize(self):
""" formats size to readable format """
return format_size(self.getSize())
def formatETA(self):
""" formats eta to readable format """
return format_time(self.getETA())
def getSpeed(self):
""" calculates speed """
try:
return self.plugin.req.speed
except:
return 0
def getETA(self):
""" gets established time of arrival"""
try:
return self.getBytesLeft() / self.getSpeed()
except:
return 0
def getBytesArrived(self):
""" gets bytes arrived """
try:
return self.plugin.req.arrived
except:
return 0
def getBytesLeft(self):
""" gets bytes left """
try:
return self.plugin.req.size - self.plugin.req.arrived
except:
return 0
def getPercent(self):
""" get % of download """
if self.status == DownloadStatus.Downloading:
try:
return self.plugin.req.percent
except:
return 0
else:
return self.progress
def getSize(self):
""" get size of download """
try:
if self.plugin.req.size:
return self.plugin.req.size
else:
return self.size
except:
return self.size
def notifyChange(self):
self.m.core.eventManager.dispatchEvent("linkUpdated", self.id, self.packageid)
def getProgressInfo(self):
return ProgressInfo(self.plugin, self.name, self.statusname, self.getETA(), self.formatETA(),
self.getBytesArrived(), self.getSize(),
DownloadProgress(self.fid, self.packageid, self.getSpeed(), self.status))
| agpl-3.0 |
bmander/dancecontraption | django/db/models/sql/datastructures.py | 396 | 1157 | """
Useful auxilliary data structures for query construction. Not useful outside
the SQL domain.
"""
class EmptyResultSet(Exception):
pass
class FullResultSet(Exception):
pass
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, level):
self.level = level
class Empty(object):
pass
class RawValue(object):
def __init__(self, value):
self.value = value
class Date(object):
"""
Add a date selection column.
"""
def __init__(self, col, lookup_type):
self.col = col
self.lookup_type = lookup_type
def relabel_aliases(self, change_map):
c = self.col
if isinstance(c, (list, tuple)):
self.col = (change_map.get(c[0], c[0]), c[1])
def as_sql(self, qn, connection):
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple([qn(c) for c in self.col])
else:
col = self.col
return connection.ops.date_trunc_sql(self.lookup_type, col)
| bsd-3-clause |
avoinsystems/odoo | addons/sale_order_dates/__init__.py | 441 | 1071 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sale_order_dates
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vks/servo | tests/wpt/web-platform-tests/html/tools/update_html5lib_tests.py | 125 | 5358 | import sys
import os
import hashlib
import urllib
import itertools
import re
import json
import glob
import shutil
try:
import genshi
from genshi.template import MarkupTemplate
from html5lib.tests import support
except ImportError:
print """This script requires the Genshi templating library and html5lib source
It is recommended that these are installed in a virtualenv:
virtualenv venv
source venv/bin/activate
pip install genshi
cd venv
git clone git@github.com:html5lib/html5lib-python.git html5lib
cd html5lib
git submodule init
git submodule update
pip install -e ./
Then run this script again, with the virtual environment still active.
When you are done, type "deactivate" to deactivate the virtual environment.
"""
TESTS_PATH = "html/syntax/parsing/"
def get_paths():
script_path = os.path.split(os.path.abspath(__file__))[0]
repo_base = get_repo_base(script_path)
tests_path = os.path.join(repo_base, TESTS_PATH)
return script_path, tests_path
def get_repo_base(path):
while path:
if os.path.exists(os.path.join(path, ".git")):
return path
else:
path = os.path.split(path)[0]
def get_expected(data):
data = "#document\n" + data
return data
def get_hash(data, container=None):
if container == None:
container = ""
return hashlib.sha1("#container%s#data%s"%(container.encode("utf8"),
data.encode("utf8"))).hexdigest()
def make_tests(script_dir, out_dir, input_file_name, test_data):
tests = []
innerHTML_tests = []
ids_seen = {}
print input_file_name
for test in test_data:
if "script-off" in test:
continue
is_innerHTML = "document-fragment" in test
data = test["data"]
container = test["document-fragment"] if is_innerHTML else None
assert test["document"], test
expected = get_expected(test["document"])
test_list = innerHTML_tests if is_innerHTML else tests
test_id = get_hash(data, container)
if test_id in ids_seen:
print "WARNING: id %s seen multiple times in file %s this time for test (%s, %s) before for test %s, skipping"%(test_id, input_file_name, container, data, ids_seen[test_id])
continue
ids_seen[test_id] = (container, data)
test_list.append({'string_uri_encoded_input':"\"%s\""%urllib.quote(data.encode("utf8")),
'input':data,
'expected':expected,
'string_escaped_expected':json.dumps(urllib.quote(expected.encode("utf8"))),
'id':test_id,
'container':container
})
path_normal = None
if tests:
path_normal = write_test_file(script_dir, out_dir,
tests, "html5lib_%s"%input_file_name,
"html5lib_test.xml")
path_innerHTML = None
if innerHTML_tests:
path_innerHTML = write_test_file(script_dir, out_dir,
innerHTML_tests, "html5lib_innerHTML_%s"%input_file_name,
"html5lib_test_fragment.xml")
return path_normal, path_innerHTML
def write_test_file(script_dir, out_dir, tests, file_name, template_file_name):
file_name = os.path.join(out_dir, file_name + ".html")
short_name = os.path.split(file_name)[1]
with open(os.path.join(script_dir, template_file_name)) as f:
template = MarkupTemplate(f)
stream = template.generate(file_name=short_name, tests=tests)
with open(file_name, "w") as f:
f.write(stream.render('html', doctype='html5',
encoding="utf8"))
return file_name
def escape_js_string(in_data):
return in_data.encode("utf8").encode("string-escape")
def serialize_filenames(test_filenames):
return "[" + ",\n".join("\"%s\""%item for item in test_filenames) + "]"
def main():
script_dir, out_dir = get_paths()
test_files = []
inner_html_files = []
if len(sys.argv) > 2:
test_iterator = itertools.izip(
itertools.repeat(False),
sorted(os.path.abspath(item) for item in
glob.glob(os.path.join(sys.argv[2], "*.dat"))))
else:
test_iterator = itertools.chain(
itertools.izip(itertools.repeat(False),
sorted(support.get_data_files("tree-construction"))),
itertools.izip(itertools.repeat(True),
sorted(support.get_data_files(
os.path.join("tree-construction", "scripted")))))
for (scripted, test_file) in test_iterator:
input_file_name = os.path.splitext(os.path.split(test_file)[1])[0]
if scripted:
input_file_name = "scripted_" + input_file_name
test_data = support.TestData(test_file)
test_filename, inner_html_file_name = make_tests(script_dir, out_dir,
input_file_name, test_data)
if test_filename is not None:
test_files.append(test_filename)
if inner_html_file_name is not None:
inner_html_files.append(inner_html_file_name)
if __name__ == "__main__":
main()
| mpl-2.0 |
5hawnknight/selenium | py/selenium/webdriver/common/actions/key_input.py | 48 | 1782 | # Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from . import interaction
from .input_device import InputDevice
from .interaction import (Interaction,
Pause)
class KeyInput(InputDevice):
def __init__(self, name):
super(KeyInput, self).__init__()
self.name = name
self.type = interaction.KEY
def encode(self):
return {"type": self.type, "id": self.name, "actions": [acts.encode() for acts in self.actions]}
def create_key_down(self, key):
self.add_action(TypingInteraction(self, "keyDown", key))
def create_key_up(self, key):
self.add_action(TypingInteraction(self, "keyUp", key))
def create_pause(self, pause_duration=0):
self.add_action(Pause(self, pause_duration))
class TypingInteraction(Interaction):
def __init__(self, source, type_, key):
super(TypingInteraction, self).__init__(source)
self.type = type_
self.key = key
def encode(self):
return {"type": self.type, "value": self.key}
| apache-2.0 |
lmprice/ansible | lib/ansible/plugins/cache/redis.py | 16 | 4208 | # (c) 2014, Brian Coca, Josh Drake, et al
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: redis
short_description: Use Redis DB for cache
description:
- This cache uses JSON formatted, per host records saved in Redis.
version_added: "1.9"
requirements:
- redis (python lib)
options:
_uri:
description:
- A colon separated string of connection information for Redis.
required: True
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the DB entries
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import time
import json
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.plugins.cache import BaseCacheModule
try:
from redis import StrictRedis
except ImportError:
raise AnsibleError("The 'redis' python module is required for the redis fact cache, 'pip install redis'")
class CacheModule(BaseCacheModule):
"""
A caching module backed by redis.
Keys are maintained in a zset with their score being the timestamp
when they are inserted. This allows for the usage of 'zremrangebyscore'
to expire keys. This mechanism is used or a pattern matched 'scan' for
performance.
"""
def __init__(self, *args, **kwargs):
if C.CACHE_PLUGIN_CONNECTION:
connection = C.CACHE_PLUGIN_CONNECTION.split(':')
else:
connection = []
self._timeout = float(C.CACHE_PLUGIN_TIMEOUT)
self._prefix = C.CACHE_PLUGIN_PREFIX
self._cache = {}
self._db = StrictRedis(*connection)
self._keys_set = 'ansible_cache_keys'
def _make_key(self, key):
return self._prefix + key
def get(self, key):
if key not in self._cache:
value = self._db.get(self._make_key(key))
# guard against the key not being removed from the zset;
# this could happen in cases where the timeout value is changed
# between invocations
if value is None:
self.delete(key)
raise KeyError
self._cache[key] = json.loads(value)
return self._cache.get(key)
def set(self, key, value):
value2 = json.dumps(value)
if self._timeout > 0: # a timeout of 0 is handled as meaning 'never expire'
self._db.setex(self._make_key(key), int(self._timeout), value2)
else:
self._db.set(self._make_key(key), value2)
self._db.zadd(self._keys_set, time.time(), key)
self._cache[key] = value
def _expire_keys(self):
if self._timeout > 0:
expiry_age = time.time() - self._timeout
self._db.zremrangebyscore(self._keys_set, 0, expiry_age)
def keys(self):
self._expire_keys()
return self._db.zrange(self._keys_set, 0, -1)
def contains(self, key):
self._expire_keys()
return (self._db.zrank(self._keys_set, key) is not None)
def delete(self, key):
del self._cache[key]
self._db.delete(self._make_key(key))
self._db.zrem(self._keys_set, key)
def flush(self):
for key in self.keys():
self.delete(key)
def copy(self):
# TODO: there is probably a better way to do this in redis
ret = dict()
for key in self.keys():
ret[key] = self.get(key)
return ret
def __getstate__(self):
return dict()
def __setstate__(self, data):
self.__init__()
| gpl-3.0 |
spotify/annoy | test/holes_test.py | 2 | 2096 | # Copyright (c) 2013 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import numpy
import random
from common import TestCase
from annoy import AnnoyIndex
class HolesTest(TestCase):
def test_random_holes(self):
f = 10
index = AnnoyIndex(f, 'angular')
valid_indices = random.sample(range(2000), 1000) # leave holes
for i in valid_indices:
v = numpy.random.normal(size=(f,))
index.add_item(i, v)
index.build(10)
for i in valid_indices:
js = index.get_nns_by_item(i, 10000)
for j in js:
self.assertTrue(j in valid_indices)
for i in range(1000):
v = numpy.random.normal(size=(f,))
js = index.get_nns_by_vector(v, 10000)
for j in js:
self.assertTrue(j in valid_indices)
def _test_holes_base(self, n, f=100, base_i=100000):
annoy = AnnoyIndex(f, 'angular')
for i in range(n):
annoy.add_item(base_i + i, numpy.random.normal(size=(f,)))
annoy.build(100)
res = annoy.get_nns_by_item(base_i, n)
self.assertEquals(set(res), set([base_i + i for i in range(n)]))
def test_root_one_child(self):
# See https://github.com/spotify/annoy/issues/223
self._test_holes_base(1)
def test_root_two_children(self):
self._test_holes_base(2)
def test_root_some_children(self):
# See https://github.com/spotify/annoy/issues/295
self._test_holes_base(10)
def test_root_many_children(self):
self._test_holes_base(1000)
| apache-2.0 |
asottile/pushmanager | pushmanager/servlets/newpush.py | 2 | 3045 | import subprocess
import time
import pushmanager.core.db as db
from pushmanager.core.mail import MailQueue
from pushmanager.core.settings import Settings
from pushmanager.core.requesthandler import RequestHandler
import pushmanager.core.util
from pushmanager.core.xmppclient import XMPPQueue
def send_notifications(people, pushtype, pushurl):
pushmanager_servername = Settings['main_app']['servername']
pushmanager_servername = pushmanager_servername.rstrip('/')
pushmanager_port = ':%d' % Settings['main_app']['port'] if Settings['main_app']['port'] != 443 else ''
pushurl = pushurl.lstrip('/')
pushmanager_url = "https://%s/%s" % (pushmanager_servername + pushmanager_port, pushurl)
if people:
msg = '%s: %s push starting! %s' % (', '.join(people), pushtype, pushmanager_url)
XMPPQueue.enqueue_user_xmpp(people, 'Push starting! %s' % pushmanager_url)
elif pushtype == 'morning':
msg = 'Morning push opened. %s' % pushmanager_servername
else:
msg = 'push starting. %s' % pushmanager_url
subprocess.call([
'/nail/sys/bin/nodebot',
'-i',
Settings['irc']['nickname'],
Settings['irc']['channel'],
msg
])
subject = "New push notification"
MailQueue.enqueue_user_email(Settings['mail']['notifyall'], msg, subject)
class NewPushServlet(RequestHandler):
def _arg(self, key):
return pushmanager.core.util.get_str_arg(self.request, key, '')
def post(self):
if not self.current_user:
return self.send_error(403)
self.pushtype = self._arg('push-type')
insert_query = db.push_pushes.insert({
'title': self._arg('push-title'),
'user': self.current_user,
'branch': self._arg('push-branch'),
'revision': "0"*40,
'created': time.time(),
'modified': time.time(),
'state': 'accepting',
'pushtype': self.pushtype,
})
select_query = db.push_requests.select().where(
db.push_requests.c.state == 'requested',
)
db.execute_transaction_cb([insert_query, select_query], self.on_db_complete)
get = post
def on_db_complete(self, success, db_results):
self.check_db_results(success, db_results)
insert_results, select_results = db_results
pushurl = '/push?id=%s' % insert_results.lastrowid
def users_involved(request):
if request['watchers']:
return [request['user']] + request['watchers'].split(',')
return [request['user']]
if self.pushtype in ('private', 'morning'):
people = None
elif self.pushtype == 'urgent':
people = set(user for x in select_results for user in users_involved(x) if 'urgent' in x['tags'].split(','))
else:
people = set(user for x in select_results for user in users_involved(x))
send_notifications(people, self.pushtype, pushurl)
return self.redirect(pushurl)
| apache-2.0 |
emop/ClickMonitor | atest/lib/simplejson/decoder.py | 77 | 14281 | """Implementation of JSONDecoder
"""
from __future__ import absolute_import
import re
import sys
import struct
from .compat import fromhex, b, u, text_type, binary_type, PY3, unichr
from .scanner import make_scanner, JSONDecodeError
def _import_c_scanstring():
try:
from ._speedups import scanstring
return scanstring
except ImportError:
return None
c_scanstring = _import_c_scanstring()
# NOTE (3.1.0): JSONDecodeError may still be imported from this module for
# compatibility, but it was never in the __all__
__all__ = ['JSONDecoder']
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
def _floatconstants():
_BYTES = fromhex('7FF80000000000007FF0000000000000')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
_CONSTANTS = {
'-Infinity': NegInf,
'Infinity': PosInf,
'NaN': NaN,
}
STRINGCHUNK = re.compile(r'(.*?)(["\\\x00-\x1f])', FLAGS)
BACKSLASH = {
'"': u('"'), '\\': u('\u005c'), '/': u('/'),
'b': u('\b'), 'f': u('\f'), 'n': u('\n'), 'r': u('\r'), 't': u('\t'),
}
DEFAULT_ENCODING = "utf-8"
def py_scanstring(s, end, encoding=None, strict=True,
_b=BACKSLASH, _m=STRINGCHUNK.match, _join=u('').join,
_PY3=PY3, _maxunicode=sys.maxunicode):
"""Scan the string s for a JSON string. End is the index of the
character in s after the quote that started the JSON string.
Unescapes all valid JSON string escape sequences and raises ValueError
on attempt to decode an invalid string. If strict is False then literal
control characters are allowed in the string.
Returns a tuple of the decoded string and the index of the character in s
after the end quote."""
if encoding is None:
encoding = DEFAULT_ENCODING
chunks = []
_append = chunks.append
begin = end - 1
while 1:
chunk = _m(s, end)
if chunk is None:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
end = chunk.end()
content, terminator = chunk.groups()
# Content is contains zero or more unescaped string characters
if content:
if not _PY3 and not isinstance(content, text_type):
content = text_type(content, encoding)
_append(content)
# Terminator is the end of string, a literal control character,
# or a backslash denoting that an escape sequence follows
if terminator == '"':
break
elif terminator != '\\':
if strict:
msg = "Invalid control character %r at"
raise JSONDecodeError(msg, s, end)
else:
_append(terminator)
continue
try:
esc = s[end]
except IndexError:
raise JSONDecodeError(
"Unterminated string starting at", s, begin)
# If not a unicode escape sequence, must be in the lookup table
if esc != 'u':
try:
char = _b[esc]
except KeyError:
msg = "Invalid \\X escape sequence %r"
raise JSONDecodeError(msg, s, end)
end += 1
else:
# Unicode escape sequence
msg = "Invalid \\uXXXX escape sequence"
esc = s[end + 1:end + 5]
escX = esc[1:2]
if len(esc) != 4 or escX == 'x' or escX == 'X':
raise JSONDecodeError(msg, s, end - 1)
try:
uni = int(esc, 16)
except ValueError:
raise JSONDecodeError(msg, s, end - 1)
end += 5
# Check for surrogate pair on UCS-4 systems
# Note that this will join high/low surrogate pairs
# but will also pass unpaired surrogates through
if (_maxunicode > 65535 and
uni & 0xfc00 == 0xd800 and
s[end:end + 2] == '\\u'):
esc2 = s[end + 2:end + 6]
escX = esc2[1:2]
if len(esc2) == 4 and not (escX == 'x' or escX == 'X'):
try:
uni2 = int(esc2, 16)
except ValueError:
raise JSONDecodeError(msg, s, end)
if uni2 & 0xfc00 == 0xdc00:
uni = 0x10000 + (((uni - 0xd800) << 10) |
(uni2 - 0xdc00))
end += 6
char = unichr(uni)
# Append the unescaped character
_append(char)
return _join(chunks), end
# Use speedup if available
scanstring = c_scanstring or py_scanstring
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
def JSONObject(state, encoding, strict, scan_once, object_hook,
object_pairs_hook, memo=None,
_w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
# Backwards compatibility
if memo is None:
memo = {}
memo_get = memo.setdefault
pairs = []
# Use a slice to prevent IndexError from being raised, the following
# check will raise a more specific ValueError if the string is empty
nextchar = s[end:end + 1]
# Normally we expect nextchar == '"'
if nextchar != '"':
if nextchar in _ws:
end = _w(s, end).end()
nextchar = s[end:end + 1]
# Trivial empty object
if nextchar == '}':
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end + 1
pairs = {}
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end + 1
elif nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end)
end += 1
while True:
key, end = scanstring(s, end, encoding, strict)
key = memo_get(key, key)
# To skip some function call overhead we optimize the fast paths where
# the JSON key separator is ": " or just ":".
if s[end:end + 1] != ':':
end = _w(s, end).end()
if s[end:end + 1] != ':':
raise JSONDecodeError("Expecting ':' delimiter", s, end)
end += 1
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
value, end = scan_once(s, end)
pairs.append((key, value))
try:
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar == '}':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or '}'", s, end - 1)
try:
nextchar = s[end]
if nextchar in _ws:
end += 1
nextchar = s[end]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end]
except IndexError:
nextchar = ''
end += 1
if nextchar != '"':
raise JSONDecodeError(
"Expecting property name enclosed in double quotes",
s, end - 1)
if object_pairs_hook is not None:
result = object_pairs_hook(pairs)
return result, end
pairs = dict(pairs)
if object_hook is not None:
pairs = object_hook(pairs)
return pairs, end
def JSONArray(state, scan_once, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
(s, end) = state
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
elif nextchar == '':
raise JSONDecodeError("Expecting value or ']'", s, end)
_append = values.append
while True:
value, end = scan_once(s, end)
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ']':
break
elif nextchar != ',':
raise JSONDecodeError("Expecting ',' delimiter or ']'", s, end - 1)
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return values, end
class JSONDecoder(object):
"""Simple JSON <http://json.org> decoder
Performs the following translations in decoding by default:
+---------------+-------------------+
| JSON | Python |
+===============+===================+
| object | dict |
+---------------+-------------------+
| array | list |
+---------------+-------------------+
| string | unicode |
+---------------+-------------------+
| number (int) | int, long |
+---------------+-------------------+
| number (real) | float |
+---------------+-------------------+
| true | True |
+---------------+-------------------+
| false | False |
+---------------+-------------------+
| null | None |
+---------------+-------------------+
It also understands ``NaN``, ``Infinity``, and ``-Infinity`` as
their corresponding ``float`` values, which is outside the JSON spec.
"""
def __init__(self, encoding=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, strict=True,
object_pairs_hook=None):
"""
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
*strict* controls the parser's behavior when it encounters an
invalid control character in a string. The default setting of
``True`` means that unescaped control characters are parse errors, if
``False`` then control characters will be allowed in strings.
"""
if encoding is None:
encoding = DEFAULT_ENCODING
self.encoding = encoding
self.object_hook = object_hook
self.object_pairs_hook = object_pairs_hook
self.parse_float = parse_float or float
self.parse_int = parse_int or int
self.parse_constant = parse_constant or _CONSTANTS.__getitem__
self.strict = strict
self.parse_object = JSONObject
self.parse_array = JSONArray
self.parse_string = scanstring
self.memo = {}
self.scan_once = make_scanner(self)
def decode(self, s, _w=WHITESPACE.match, _PY3=PY3):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
if _PY3 and isinstance(s, binary_type):
s = s.decode(self.encoding)
obj, end = self.raw_decode(s)
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj
def raw_decode(self, s, idx=0, _w=WHITESPACE.match, _PY3=PY3):
"""Decode a JSON document from ``s`` (a ``str`` or ``unicode``
beginning with a JSON document) and return a 2-tuple of the Python
representation and the index in ``s`` where the document ended.
Optionally, ``idx`` can be used to specify an offset in ``s`` where
the JSON document begins.
This can be used to decode a JSON document from a string that may
have extraneous data at the end.
"""
if _PY3 and not isinstance(s, text_type):
raise TypeError("Input string must be text, not bytes")
return self.scan_once(s, idx=_w(s, idx).end())
| mit |
Jonekee/chromium.src | tools/auto_bisect/fetch_build.py | 9 | 12527 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""This module contains functions for fetching and extracting archived builds.
The builds may be stored in different places by different types of builders;
for example, builders on tryserver.chromium.perf stores builds in one place,
while builders on chromium.linux store builds in another.
This module can be either imported or run as a stand-alone script to download
and extract a build.
Usage: fetch_build.py <type> <revision> <output_dir> [options]
"""
import argparse
import errno
import logging
import os
import shutil
import sys
import zipfile
# Telemetry (src/tools/telemetry) is expected to be in the PYTHONPATH.
from telemetry.util import cloud_storage
import bisect_utils
# Possible builder types.
PERF_BUILDER = 'perf'
FULL_BUILDER = 'full'
def GetBucketAndRemotePath(revision, builder_type=PERF_BUILDER,
target_arch='ia32', target_platform='chromium',
deps_patch_sha=None):
"""Returns the location where a build archive is expected to be.
Args:
revision: Revision string, e.g. a git commit hash or SVN revision.
builder_type: Type of build archive.
target_arch: Architecture, e.g. "ia32".
target_platform: Platform name, e.g. "chromium" or "android".
deps_patch_sha: SHA1 hash which identifies a particular combination of
custom revisions for dependency repositories.
Returns:
A pair of strings (bucket, path), where the archive is expected to be.
"""
build_archive = BuildArchive.Create(
builder_type, target_arch=target_arch, target_platform=target_platform)
bucket = build_archive.BucketName()
remote_path = build_archive.FilePath(revision, deps_patch_sha=deps_patch_sha)
return bucket, remote_path
class BuildArchive(object):
"""Represents a place where builds of some type are stored.
There are two pieces of information required to locate a file in Google
Cloud Storage, bucket name and file path. Subclasses of this class contain
specific logic about which bucket names and paths should be used to fetch
a build.
"""
@staticmethod
def Create(builder_type, target_arch='ia32', target_platform='chromium'):
if builder_type == PERF_BUILDER:
return PerfBuildArchive(target_arch, target_platform)
if builder_type == FULL_BUILDER:
return FullBuildArchive(target_arch, target_platform)
raise NotImplementedError('Builder type "%s" not supported.' % builder_type)
def __init__(self, target_arch='ia32', target_platform='chromium'):
if bisect_utils.IsLinuxHost() and target_platform == 'android':
self._platform = 'android'
elif bisect_utils.IsLinuxHost():
self._platform = 'linux'
elif bisect_utils.IsMacHost():
self._platform = 'mac'
elif bisect_utils.Is64BitWindows() and target_arch == 'x64':
self._platform = 'win64'
elif bisect_utils.IsWindowsHost():
self._platform = 'win'
else:
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
def BucketName(self):
raise NotImplementedError()
def FilePath(self, revision, deps_patch_sha=None):
"""Returns the remote file path to download a build from.
Args:
revision: A Chromium revision; this could be a git commit hash or
commit position or SVN revision number.
deps_patch_sha: The SHA1 hash of a patch to the DEPS file, which
uniquely identifies a change to use a particular revision of
a dependency.
Returns:
A file path, which not does not include a bucket name.
"""
raise NotImplementedError()
def _ZipFileName(self, revision, deps_patch_sha=None):
"""Gets the file name of a zip archive for a particular revision.
This returns a file name of the form full-build-<platform>_<revision>.zip,
which is a format used by multiple types of builders that store archives.
Args:
revision: A git commit hash or other revision string.
deps_patch_sha: SHA1 hash of a DEPS file patch.
Returns:
The archive file name.
"""
base_name = 'full-build-%s' % self._PlatformName()
if deps_patch_sha:
revision = '%s_%s' % (revision, deps_patch_sha)
return '%s_%s.zip' % (base_name, revision)
def _PlatformName(self):
"""Return a string to be used in paths for the platform."""
if self._platform in ('win', 'win64'):
# Build archive for win64 is still stored with "win32" in the name.
return 'win32'
if self._platform in ('linux', 'android'):
# Android builds are also stored with "linux" in the name.
return 'linux'
if self._platform == 'mac':
return 'mac'
raise NotImplementedError('Unknown platform "%s".' % sys.platform)
class PerfBuildArchive(BuildArchive):
def BucketName(self):
return 'chrome-perf'
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the directory name to download builds from."""
platform_to_directory = {
'android': 'android_perf_rel',
'linux': 'Linux Builder',
'mac': 'Mac Builder',
'win64': 'Win x64 Builder',
'win': 'Win Builder',
}
assert self._platform in platform_to_directory
return platform_to_directory.get(self._platform)
class FullBuildArchive(BuildArchive):
def BucketName(self):
platform_to_bucket = {
'android': 'chromium-android',
'linux': 'chromium-linux-archive',
'mac': 'chromium-mac-archive',
'win64': 'chromium-win-archive',
'win': 'chromium-win-archive',
}
assert self._platform in platform_to_bucket
return platform_to_bucket.get(self._platform)
def FilePath(self, revision, deps_patch_sha=None):
return '%s/%s' % (self._ArchiveDirectory(),
self._ZipFileName(revision, deps_patch_sha))
def _ArchiveDirectory(self):
"""Returns the remote directory to download builds from."""
platform_to_directory = {
'android': 'android_main_rel',
'linux': 'chromium.linux/Linux Builder',
'mac': 'chromium.mac/Mac Builder',
'win64': 'chromium.win/Win x64 Builder',
'win': 'chromium.win/Win Builder',
}
assert self._platform in platform_to_directory
return platform_to_directory.get(self._platform)
def BuildIsAvailable(bucket_name, remote_path):
"""Checks whether a build is currently archived at some place."""
logging.info('Checking existance: gs://%s/%s' % (bucket_name, remote_path))
try:
exists = cloud_storage.Exists(bucket_name, remote_path)
logging.info('Exists? %s' % exists)
return exists
except cloud_storage.CloudStorageError:
return False
def FetchFromCloudStorage(bucket_name, source_path, destination_dir):
"""Fetches file(s) from the Google Cloud Storage.
As a side-effect, this prints messages to stdout about what's happening.
Args:
bucket_name: Google Storage bucket name.
source_path: Source file path.
destination_dir: Destination file path.
Returns:
Local file path of downloaded file if it was downloaded. If the file does
not exist in the given bucket, or if there was an error while downloading,
None is returned.
"""
target_file = os.path.join(destination_dir, os.path.basename(source_path))
gs_url = 'gs://%s/%s' % (bucket_name, source_path)
try:
if cloud_storage.Exists(bucket_name, source_path):
logging.info('Fetching file from %s...', gs_url)
cloud_storage.Get(bucket_name, source_path, target_file)
if os.path.exists(target_file):
return target_file
else:
logging.info('File %s not found in cloud storage.', gs_url)
return None
except Exception as e:
logging.warn('Exception while fetching from cloud storage: %s', e)
if os.path.exists(target_file):
os.remove(target_file)
return None
def Unzip(file_path, output_dir, verbose=True):
"""Extracts a zip archive's contents into the given output directory.
This was based on ExtractZip from build/scripts/common/chromium_utils.py.
Args:
file_path: Path of the zip file to extract.
output_dir: Path to the destination directory.
verbose: Whether to print out what is being extracted.
Raises:
IOError: The unzip command had a non-zero exit code.
RuntimeError: Failed to create the output directory.
"""
_MakeDirectory(output_dir)
# On Linux and Mac, we use the unzip command because it handles links and
# file permissions bits, so achieving this behavior is easier than with
# ZipInfo options.
#
# The Mac Version of unzip unfortunately does not support Zip64, whereas
# the python module does, so we have to fall back to the python zip module
# on Mac if the file size is greater than 4GB.
mac_zip_size_limit = 2 ** 32 # 4GB
if (bisect_utils.IsLinuxHost() or
(bisect_utils.IsMacHost()
and os.path.getsize(file_path) < mac_zip_size_limit)):
unzip_command = ['unzip', '-o']
_UnzipUsingCommand(unzip_command, file_path, output_dir)
return
# On Windows, try to use 7z if it is installed, otherwise fall back to the
# Python zipfile module. If 7z is not installed, then this may fail if the
# zip file is larger than 512MB.
sevenzip_path = r'C:\Program Files\7-Zip\7z.exe'
if bisect_utils.IsWindowsHost() and os.path.exists(sevenzip_path):
unzip_command = [sevenzip_path, 'x', '-y']
_UnzipUsingCommand(unzip_command, file_path, output_dir)
return
_UnzipUsingZipFile(file_path, output_dir, verbose)
def _UnzipUsingCommand(unzip_command, file_path, output_dir):
"""Extracts a zip file using an external command.
Args:
unzip_command: An unzipping command, as a string list, without the filename.
file_path: Path to the zip file.
output_dir: The directory which the contents should be extracted to.
Raises:
IOError: The command had a non-zero exit code.
"""
absolute_filepath = os.path.abspath(file_path)
command = unzip_command + [absolute_filepath]
return_code = _RunCommandInDirectory(output_dir, command)
if return_code:
_RemoveDirectoryTree(output_dir)
raise IOError('Unzip failed: %s => %s' % (str(command), return_code))
def _RunCommandInDirectory(directory, command):
"""Changes to a directory, runs a command, then changes back."""
saved_dir = os.getcwd()
os.chdir(directory)
return_code = bisect_utils.RunProcess(command)
os.chdir(saved_dir)
return return_code
def _UnzipUsingZipFile(file_path, output_dir, verbose=True):
"""Extracts a zip file using the Python zipfile module."""
assert bisect_utils.IsWindowsHost() or bisect_utils.IsMacHost()
zf = zipfile.ZipFile(file_path)
for name in zf.namelist():
if verbose:
print 'Extracting %s' % name
zf.extract(name, output_dir)
if bisect_utils.IsMacHost():
# Restore file permission bits.
mode = zf.getinfo(name).external_attr >> 16
os.chmod(os.path.join(output_dir, name), mode)
def _MakeDirectory(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _RemoveDirectoryTree(path):
try:
if os.path.exists(path):
shutil.rmtree(path)
except OSError, e:
if e.errno != errno.ENOENT:
raise
def Main(argv):
"""Downloads and extracts a build based on the command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('builder_type')
parser.add_argument('revision')
parser.add_argument('output_dir')
parser.add_argument('--target-arch', default='ia32')
parser.add_argument('--target-platform', default='chromium')
parser.add_argument('--deps-patch-sha')
args = parser.parse_args(argv[1:])
bucket_name, remote_path = GetBucketAndRemotePath(
args.revision, args.builder_type, target_arch=args.target_arch,
target_platform=args.target_platform,
deps_patch_sha=args.deps_patch_sha)
print 'Bucket name: %s, remote path: %s' % (bucket_name, remote_path)
if not BuildIsAvailable(bucket_name, remote_path):
print 'Build is not available.'
return 1
FetchFromCloudStorage(bucket_name, remote_path, args.output_dir)
print 'Build has been downloaded to and extracted in %s.' % args.output_dir
return 0
if __name__ == '__main__':
sys.exit(Main(sys.argv))
| bsd-3-clause |
smashwilson/ansible-modules-extras | system/kernel_blacklist.py | 71 | 3796 | #!/usr/bin/python
# encoding: utf-8 -*-
# (c) 2013, Matthias Vogelgesang <matthias.vogelgesang@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import re
DOCUMENTATION = '''
---
module: kernel_blacklist
author: Matthias Vogelgesang
version_added: 1.4
short_description: Blacklist kernel modules
description:
- Add or remove kernel modules from blacklist.
options:
name:
required: true
description:
- Name of kernel module to black- or whitelist.
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Whether the module should be present in the blacklist or absent.
blacklist_file:
required: false
description:
- If specified, use this blacklist file instead of
C(/etc/modprobe.d/blacklist-ansible.conf).
default: null
requirements: []
'''
EXAMPLES = '''
# Blacklist the nouveau driver module
- kernel_blacklist: name=nouveau state=present
'''
class Blacklist(object):
def __init__(self, module, filename):
if not os.path.exists(filename):
open(filename, 'a').close()
self.filename = filename
self.module = module
def get_pattern(self):
return '^blacklist\s*' + self.module + '$'
def readlines(self):
f = open(self.filename, 'r')
lines = f.readlines()
f.close()
return lines
def module_listed(self):
lines = self.readlines()
pattern = self.get_pattern()
for line in lines:
stripped = line.strip()
if stripped.startswith('#'):
continue
if re.match(pattern, stripped):
return True
return False
def remove_module(self):
lines = self.readlines()
pattern = self.get_pattern()
f = open(self.filename, 'w')
for line in lines:
if not re.match(pattern, line.strip()):
f.write(line)
f.close()
def add_module(self):
f = open(self.filename, 'a')
f.write('blacklist %s\n' % self.module)
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(required=True),
state=dict(required=False, choices=['present', 'absent'],
default='present'),
blacklist_file=dict(required=False, default=None)
),
supports_check_mode=False,
)
args = dict(changed=False, failed=False,
name=module.params['name'], state=module.params['state'])
filename = '/etc/modprobe.d/blacklist-ansible.conf'
if module.params['blacklist_file']:
filename = module.params['blacklist_file']
blacklist = Blacklist(args['name'], filename)
if blacklist.module_listed():
if args['state'] == 'absent':
blacklist.remove_module()
args['changed'] = True
else:
if args['state'] == 'present':
blacklist.add_module()
args['changed'] = True
module.exit_json(**args)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
darcyfdu/findlicense | src/commoncode/ignore.py | 4 | 12580 | #
# Copyright (c) 2015 nexB Inc. and others. All rights reserved.
# http://nexb.com and https://github.com/nexB/scancode-toolkit/
# The ScanCode software is licensed under the Apache License version 2.0.
# Data generated with ScanCode require an acknowledgment.
# ScanCode is a trademark of nexB Inc.
#
# You may not use this software except in compliance with the License.
# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# When you publish or redistribute any data created with ScanCode or any ScanCode
# derivative work, you must accompany this data with the following acknowledgment:
#
# Generated with ScanCode and provided on an "AS IS" BASIS, WITHOUT WARRANTIES
# OR CONDITIONS OF ANY KIND, either express or implied. No content created from
# ScanCode should be considered or used as legal advice. Consult an Attorney
# for any legal advice.
# ScanCode is a free software code scanning tool from nexB Inc. and others.
# Visit https://github.com/nexB/scancode-toolkit/ for support and download.
from __future__ import absolute_import, print_function
from commoncode import fileset
from commoncode import filetype
from itertools import chain
from commoncode import fileutils
"""
Support for ignoring some file patterns such as .git or .svn directories, used
typically when walking file systems.
Also handle .ignore-like file and provide common default ignores.
"""
def is_ignored(location, ignores, unignores, skip_special=True):
"""
Return a tuple of (pattern , message) if a file at location is ignored
or False otherwise.
`ignores` and `unignores` are mappings of patterns to a reason.
"""
if skip_special and filetype.is_special(location):
return True
return fileset.match(location, includes=ignores, excludes=unignores)
def is_ignore_file(location):
"""
Return True if the location is an ignore file.
"""
return (filetype.is_file(location)
and fileutils.file_name(location) == '.scancodeignore')
def get_ignores(location, include_defaults=True):
"""
Return a ignores and unignores patterns mappings loaded from the
file at `location`. Optionally include defaults patterns
"""
ignores = {}
unignores = {}
if include_defaults:
ignores.update(default_ignores)
patterns = fileset.load(location)
ign, uni = fileset.includes_excludes(patterns, location)
ignores.update(ign)
unignores.update(uni)
return ignores, unignores
#
# Default ignores
#
ignores_MacOSX = {
'.DS_Store': 'Default ignore: MacOSX artifact',
'._.DS_Store': 'Default ignore: MacOSX artifact',
'__MACOSX': 'Default ignore: MacOSX artifact',
'.AppleDouble': 'Default ignore: MacOSX artifact',
'.LSOverride': 'Default ignore: MacOSX artifact',
'.DocumentRevisions-V100': 'Default ignore: MacOSX artifact',
'.fseventsd': 'Default ignore: MacOSX artifact',
'.Spotlight-V100': 'Default ignore: MacOSX artifact',
'.VolumeIcon.icns': 'Default ignore: MacOSX artifact',
'.journal': 'Default ignore: MacOSX DMG/HFS+ artifact',
'.journal_info_block': 'Default ignore: MacOSX DMG/HFS+ artifact',
'.Trashes': 'Default ignore: MacOSX DMG/HFS+ artifact',
'\[HFS+ Private Data\]': 'Default ignore: MacOSX DMG/HFS+ artifact private data',
}
ignores_Windows = {
'Thumbs.db': 'Default ignore: Windows artifact',
'ehthumbs.db': 'Default ignore: Windows artifact',
'Desktop.ini': 'Default ignore: Windows artifact',
'$RECYCLE.BIN': 'Default ignore: Windows artifact',
'*.lnk': 'Default ignore: Windows artifact',
'System Volume Information': 'Default ignore: Windows FS artifact',
'NTUSER.DAT*': 'Default ignore: Windows FS artifact',
}
ignores_Linux = {
'.directory': 'Default ignore: KDE artifact',
'.Trash-*': 'Default ignore: Linux/Gome/KDE artifact',
}
ignores_IDEs = {
'*.el': 'Default ignore: EMACS Elisp artifact',
'*.swp': 'Default ignore: VIM artifact',
'.project': 'Default ignore: Eclipse IDE artifact',
'.pydevproject': 'Default ignore: Eclipse IDE artifact',
'.settings': 'Default ignore: Eclipse IDE artifact',
'.eclipse': 'Default ignore: Eclipse IDE artifact',
'.loadpath': 'Default ignore: Eclipse IDE artifact',
'*.launch': 'Default ignore: Eclipse IDE artifact',
'.cproject': 'Default ignore: Eclipse IDE artifact',
'.cdtproject': 'Default ignore: Eclipse IDE artifact',
'.classpath': 'Default ignore: Eclipse IDE artifact',
'.buildpath': 'Default ignore: Eclipse IDE artifact',
'.texlipse': 'Default ignore: Eclipse IDE artifact',
'*.iml': 'Default ignore: JetBrains IDE artifact',
'*.ipr': 'Default ignore: JetBrains IDE artifact',
'*.iws': 'Default ignore: JetBrains IDE artifact',
'.idea/': 'Default ignore: JetBrains IDE artifact',
'.idea_modules/': 'Default ignore: JetBrains IDE artifact',
'*.kdev4': 'Default ignore: Kdevelop artifact',
'.kdev4/': 'Default ignore: Kdevelop artifact',
'*.nib': 'Default ignore: Apple Xcode artifact',
'*.plst': 'Default ignore: Apple Xcode plist artifact',
'*.pbxuser': 'Default ignore: Apple Xcode artifact',
'*.pbxproj': 'Default ignore: Apple Xcode artifact',
'xcuserdata': 'Default ignore: Apple Xcode artifact',
'*.xcuserstate': 'Default ignore: Apple Xcode artifact',
'*.csproj': 'Default ignore: Microsoft VS project artifact',
'*.unityproj': 'Default ignore: Microsoft VS project artifact',
'*.sln': 'Default ignore: Microsoft VS project artifact',
'*.sluo': 'Default ignore: Microsoft VS project artifact',
'*.suo': 'Default ignore: Microsoft VS project artifact',
'*.user': 'Default ignore: Microsoft VS project artifact',
'*.sln.docstates': 'Default ignore: Microsoft VS project artifact',
'*.dsw': 'Default ignore: Microsoft VS project artifact',
'.editorconfig': 'Default ignore: Editor config artifact',
' Leiningen.gitignore': 'Default ignore: Leiningen artifact',
'.architect': 'Default ignore: ExtJS artifact',
'*.tmproj': 'Default ignore: Textmate artifact',
'*.tmproject': 'Default ignore: Textmate artifact',
}
ignores_web = {
'.htaccess': 'Default ignore: .htaccess file',
'robots.txt': 'Default ignore: robots file',
'humans.txt': 'Default ignore: robots file',
'web.config': 'Default ignore: web config',
'.htaccess.sample': 'Default ignore: .htaccess file',
}
ignores_Maven = {
'pom.xml.tag': 'Default ignore: Maven artifact',
'pom.xml.releaseBackup': 'Default ignore: Maven artifact',
'pom.xml.versionsBackup': 'Default ignore: Maven artifact',
'pom.xml.next': 'Default ignore: Maven artifact',
'release.properties': 'Default ignore: Maven artifact',
'dependency-reduced-pom.xml': 'Default ignore: Maven artifact',
'buildNumber.properties': 'Default ignore: Maven artifact',
}
ignores_VCS = {
'.bzr': 'Default ignore: Bazaar artifact',
'.bzrignore' : 'Default ignore: Bazaar config artifact',
'.git': 'Default ignore: Git artifact',
'.gitignore' : 'Default ignore: Git config artifact',
'.gitattributes': 'Default ignore: Git config artifact',
'.hg': 'Default ignore: Mercurial artifact',
'.hgignore' : 'Default ignore: Mercurial config artifact',
'.repo': 'Default ignore: Multiple Git repository artifact',
'.svn': 'Default ignore: SVN artifact',
'.svnignore': 'Default ignore: SVN config artifact',
'.tfignore': 'Default ignore: Microsft TFS config artifact',
'vssver.scc': 'Default ignore: Visual Source Safe artifact',
'CVS': 'Default ignore: CVS artifact',
'.cvsignore': 'Default ignore: CVS config artifact',
'*/RCS': 'Default ignore: CVS artifact',
'*/SCCS': 'Default ignore: CVS artifact',
'*/_MTN': 'Default ignore: Monotone artifact',
'*/_darcs': 'Default ignore: Darcs artifact',
'*/{arch}': 'Default ignore: GNU Arch artifact',
}
ignores_Medias = {
'pspbrwse.jbf': 'Default ignore: Paintshop browse file',
'Thumbs.db': 'Default ignore: Image thumbnails DB',
'Thumbs.db:encryptable': 'Default ignore: Image thumbnails DB',
'thumbs/': 'Default ignore: Image thumbnails DB',
'_thumbs/': 'Default ignore: Image thumbnails DB',
}
ignores_Build_scripts = {
'Makefile.in': 'Default ignore: automake artifact',
'Makefile.am': 'Default ignore: automake artifact',
'autom4te.cache': 'Default ignore: autoconf artifact',
'*.m4': 'Default ignore: autotools artifact',
'configure': 'Default ignore: Configure script',
'configure.bat': 'Default ignore: Configure script',
'configure.sh': 'Default ignore: Configure script',
'configure.ac': 'Default ignore: Configure script',
'config.guess': 'Default ignore: Configure script',
'config.sub': 'Default ignore: Configure script',
'compile': 'Default ignore: autoconf artifact',
'depcomp': 'Default ignore: autoconf artifact',
'ltmain.sh': 'Default ignore: libtool autoconf artifact',
'install-sh': 'Default ignore: autoconf artifact',
'missing': 'Default ignore: autoconf artifact',
'mkinstalldirs': 'Default ignore: autoconf artifact',
'stamp-h1': 'Default ignore: autoconf artifact',
'm4/': 'Default ignore: autoconf artifact',
'autogen.sh': 'Default ignore: autotools artifact',
'autogen.sh': 'Default ignore: autotools artifact',
'CMakeCache.txt': 'Default ignore: CMake artifact',
'cmake_install.cmake': 'Default ignore: CMake artifact',
'install_manifest.txt': 'Default ignore: CMake artifact',
}
ignores_CI = {
'.travis.yml' : 'Default ignore: Travis config',
'.coveragerc' : 'Default ignore: Coverall config',
}
ignores_Python = {
'pip-selfcheck.json': 'Default ignore: Pip workfile',
'pytest.ini': 'Default ignore: Python pytest config',
'tox.ini': 'Default ignore: Python tox config',
'__pycache__/': 'Default ignore: Python bytecode cache',
'.installed.cfg': 'Default ignore: Python Buildout artifact',
'pip-log.txt': 'Default ignore: Python pip artifact',
'pip-delete-this-directory.txt': 'Default ignore: Python pip artifact',
'pyvenv.cfg': 'Default ignore: Python virtualenv artifact',
}
ignores_I18N = {
'*.mo': 'Default ignore: Translation file',
'*.pot': 'Default ignore: Translation file',
'.localized': 'Default ignore: localized file',
}
ignores_coverage_and_tests = {
'*.gcno': 'Default ignore: GCC coverage',
'*.gcda': 'Default ignore: GCC coverage',
'*.gcov': 'Default ignore: GCC coverage',
'.last_cover_stats': 'Default ignore: Perl coverage',
'htmlcov/': 'Default ignore: Python coverage',
'.tox/': 'Default ignore: Tox tem dir',
'.coverage': 'Default ignore: Python coverage',
'.coverage.*': 'Default ignore: Python coverage',
'nosetests.xml': 'Default ignore: Python nose tests',
'coverage.xml': 'Default ignore: Python coverage',
'/spec/reports/': 'Default ignore: Ruby Rails test report',
'/rdoc/': 'Default ignore: Ruby doc',
'.rvmrc': 'Default ignore: Ruby RVM',
'.sass-cache': 'Default ignore: Saas cache',
'*.css.map': 'Default ignore: Saas map',
'phpunit.xml': 'Default ignore: phpunit',
'*.VisualState.xml': 'Default ignore: Nunit',
'TestResult.xml': 'Default ignore: Nunit',
}
ignores_Misc = {
'pax_global_header': 'Default ignore: Pax header file',
'C++.gitignore': 'Default ignore: C++.gitignore',
'.gwt/': 'Default ignore: GWT compilation logs',
'.gwt-tmp/': 'Default ignore: GWT temp files',
'gradle-app.setting': 'Default ignore: Graddle app settings',
'hs_err_pid*': 'Default ignore: Java VM crash logs',
'.grunt': 'Default ignore: Grunt intermediate storage',
'.history': 'Default ignore: History file',
'.~lock.*#': 'Default ignore: LibreOffice locks',
'/.ssh': 'Default ignore: SSH configuration',
}
default_ignores = {}
default_ignores.update(chain(*[d.items() for d in [
ignores_MacOSX,
ignores_Windows,
ignores_Linux,
ignores_IDEs,
ignores_web,
ignores_Maven,
ignores_VCS,
ignores_Medias,
ignores_Build_scripts,
ignores_CI,
ignores_Python,
ignores_I18N,
ignores_coverage_and_tests,
ignores_Misc,
ignores_Build_scripts,
]]))
| apache-2.0 |
nikhilsaraf/Twitter-Analytics | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/hebrewprober.py | 2929 | 13359 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Shy Shalom
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .constants import eNotMe, eDetecting
from .compat import wrap_ord
# This prober doesn't actually recognize a language or a charset.
# It is a helper prober for the use of the Hebrew model probers
### General ideas of the Hebrew charset recognition ###
#
# Four main charsets exist in Hebrew:
# "ISO-8859-8" - Visual Hebrew
# "windows-1255" - Logical Hebrew
# "ISO-8859-8-I" - Logical Hebrew
# "x-mac-hebrew" - ?? Logical Hebrew ??
#
# Both "ISO" charsets use a completely identical set of code points, whereas
# "windows-1255" and "x-mac-hebrew" are two different proper supersets of
# these code points. windows-1255 defines additional characters in the range
# 0x80-0x9F as some misc punctuation marks as well as some Hebrew-specific
# diacritics and additional 'Yiddish' ligature letters in the range 0xc0-0xd6.
# x-mac-hebrew defines similar additional code points but with a different
# mapping.
#
# As far as an average Hebrew text with no diacritics is concerned, all four
# charsets are identical with respect to code points. Meaning that for the
# main Hebrew alphabet, all four map the same values to all 27 Hebrew letters
# (including final letters).
#
# The dominant difference between these charsets is their directionality.
# "Visual" directionality means that the text is ordered as if the renderer is
# not aware of a BIDI rendering algorithm. The renderer sees the text and
# draws it from left to right. The text itself when ordered naturally is read
# backwards. A buffer of Visual Hebrew generally looks like so:
# "[last word of first line spelled backwards] [whole line ordered backwards
# and spelled backwards] [first word of first line spelled backwards]
# [end of line] [last word of second line] ... etc' "
# adding punctuation marks, numbers and English text to visual text is
# naturally also "visual" and from left to right.
#
# "Logical" directionality means the text is ordered "naturally" according to
# the order it is read. It is the responsibility of the renderer to display
# the text from right to left. A BIDI algorithm is used to place general
# punctuation marks, numbers and English text in the text.
#
# Texts in x-mac-hebrew are almost impossible to find on the Internet. From
# what little evidence I could find, it seems that its general directionality
# is Logical.
#
# To sum up all of the above, the Hebrew probing mechanism knows about two
# charsets:
# Visual Hebrew - "ISO-8859-8" - backwards text - Words and sentences are
# backwards while line order is natural. For charset recognition purposes
# the line order is unimportant (In fact, for this implementation, even
# word order is unimportant).
# Logical Hebrew - "windows-1255" - normal, naturally ordered text.
#
# "ISO-8859-8-I" is a subset of windows-1255 and doesn't need to be
# specifically identified.
# "x-mac-hebrew" is also identified as windows-1255. A text in x-mac-hebrew
# that contain special punctuation marks or diacritics is displayed with
# some unconverted characters showing as question marks. This problem might
# be corrected using another model prober for x-mac-hebrew. Due to the fact
# that x-mac-hebrew texts are so rare, writing another model prober isn't
# worth the effort and performance hit.
#
#### The Prober ####
#
# The prober is divided between two SBCharSetProbers and a HebrewProber,
# all of which are managed, created, fed data, inquired and deleted by the
# SBCSGroupProber. The two SBCharSetProbers identify that the text is in
# fact some kind of Hebrew, Logical or Visual. The final decision about which
# one is it is made by the HebrewProber by combining final-letter scores
# with the scores of the two SBCharSetProbers to produce a final answer.
#
# The SBCSGroupProber is responsible for stripping the original text of HTML
# tags, English characters, numbers, low-ASCII punctuation characters, spaces
# and new lines. It reduces any sequence of such characters to a single space.
# The buffer fed to each prober in the SBCS group prober is pure text in
# high-ASCII.
# The two SBCharSetProbers (model probers) share the same language model:
# Win1255Model.
# The first SBCharSetProber uses the model normally as any other
# SBCharSetProber does, to recognize windows-1255, upon which this model was
# built. The second SBCharSetProber is told to make the pair-of-letter
# lookup in the language model backwards. This in practice exactly simulates
# a visual Hebrew model using the windows-1255 logical Hebrew model.
#
# The HebrewProber is not using any language model. All it does is look for
# final-letter evidence suggesting the text is either logical Hebrew or visual
# Hebrew. Disjointed from the model probers, the results of the HebrewProber
# alone are meaningless. HebrewProber always returns 0.00 as confidence
# since it never identifies a charset by itself. Instead, the pointer to the
# HebrewProber is passed to the model probers as a helper "Name Prober".
# When the Group prober receives a positive identification from any prober,
# it asks for the name of the charset identified. If the prober queried is a
# Hebrew model prober, the model prober forwards the call to the
# HebrewProber to make the final decision. In the HebrewProber, the
# decision is made according to the final-letters scores maintained and Both
# model probers scores. The answer is returned in the form of the name of the
# charset identified, either "windows-1255" or "ISO-8859-8".
# windows-1255 / ISO-8859-8 code points of interest
FINAL_KAF = 0xea
NORMAL_KAF = 0xeb
FINAL_MEM = 0xed
NORMAL_MEM = 0xee
FINAL_NUN = 0xef
NORMAL_NUN = 0xf0
FINAL_PE = 0xf3
NORMAL_PE = 0xf4
FINAL_TSADI = 0xf5
NORMAL_TSADI = 0xf6
# Minimum Visual vs Logical final letter score difference.
# If the difference is below this, don't rely solely on the final letter score
# distance.
MIN_FINAL_CHAR_DISTANCE = 5
# Minimum Visual vs Logical model score difference.
# If the difference is below this, don't rely at all on the model score
# distance.
MIN_MODEL_DISTANCE = 0.01
VISUAL_HEBREW_NAME = "ISO-8859-8"
LOGICAL_HEBREW_NAME = "windows-1255"
class HebrewProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mLogicalProber = None
self._mVisualProber = None
self.reset()
def reset(self):
self._mFinalCharLogicalScore = 0
self._mFinalCharVisualScore = 0
# The two last characters seen in the previous buffer,
# mPrev and mBeforePrev are initialized to space in order to simulate
# a word delimiter at the beginning of the data
self._mPrev = ' '
self._mBeforePrev = ' '
# These probers are owned by the group prober.
def set_model_probers(self, logicalProber, visualProber):
self._mLogicalProber = logicalProber
self._mVisualProber = visualProber
def is_final(self, c):
return wrap_ord(c) in [FINAL_KAF, FINAL_MEM, FINAL_NUN, FINAL_PE,
FINAL_TSADI]
def is_non_final(self, c):
# The normal Tsadi is not a good Non-Final letter due to words like
# 'lechotet' (to chat) containing an apostrophe after the tsadi. This
# apostrophe is converted to a space in FilterWithoutEnglishLetters
# causing the Non-Final tsadi to appear at an end of a word even
# though this is not the case in the original text.
# The letters Pe and Kaf rarely display a related behavior of not being
# a good Non-Final letter. Words like 'Pop', 'Winamp' and 'Mubarak'
# for example legally end with a Non-Final Pe or Kaf. However, the
# benefit of these letters as Non-Final letters outweighs the damage
# since these words are quite rare.
return wrap_ord(c) in [NORMAL_KAF, NORMAL_MEM, NORMAL_NUN, NORMAL_PE]
def feed(self, aBuf):
# Final letter analysis for logical-visual decision.
# Look for evidence that the received buffer is either logical Hebrew
# or visual Hebrew.
# The following cases are checked:
# 1) A word longer than 1 letter, ending with a final letter. This is
# an indication that the text is laid out "naturally" since the
# final letter really appears at the end. +1 for logical score.
# 2) A word longer than 1 letter, ending with a Non-Final letter. In
# normal Hebrew, words ending with Kaf, Mem, Nun, Pe or Tsadi,
# should not end with the Non-Final form of that letter. Exceptions
# to this rule are mentioned above in isNonFinal(). This is an
# indication that the text is laid out backwards. +1 for visual
# score
# 3) A word longer than 1 letter, starting with a final letter. Final
# letters should not appear at the beginning of a word. This is an
# indication that the text is laid out backwards. +1 for visual
# score.
#
# The visual score and logical score are accumulated throughout the
# text and are finally checked against each other in GetCharSetName().
# No checking for final letters in the middle of words is done since
# that case is not an indication for either Logical or Visual text.
#
# We automatically filter out all 7-bit characters (replace them with
# spaces) so the word boundary detection works properly. [MAP]
if self.get_state() == eNotMe:
# Both model probers say it's not them. No reason to continue.
return eNotMe
aBuf = self.filter_high_bit_only(aBuf)
for cur in aBuf:
if cur == ' ':
# We stand on a space - a word just ended
if self._mBeforePrev != ' ':
# next-to-last char was not a space so self._mPrev is not a
# 1 letter word
if self.is_final(self._mPrev):
# case (1) [-2:not space][-1:final letter][cur:space]
self._mFinalCharLogicalScore += 1
elif self.is_non_final(self._mPrev):
# case (2) [-2:not space][-1:Non-Final letter][
# cur:space]
self._mFinalCharVisualScore += 1
else:
# Not standing on a space
if ((self._mBeforePrev == ' ') and
(self.is_final(self._mPrev)) and (cur != ' ')):
# case (3) [-2:space][-1:final letter][cur:not space]
self._mFinalCharVisualScore += 1
self._mBeforePrev = self._mPrev
self._mPrev = cur
# Forever detecting, till the end or until both model probers return
# eNotMe (handled above)
return eDetecting
def get_charset_name(self):
# Make the decision: is it Logical or Visual?
# If the final letter score distance is dominant enough, rely on it.
finalsub = self._mFinalCharLogicalScore - self._mFinalCharVisualScore
if finalsub >= MIN_FINAL_CHAR_DISTANCE:
return LOGICAL_HEBREW_NAME
if finalsub <= -MIN_FINAL_CHAR_DISTANCE:
return VISUAL_HEBREW_NAME
# It's not dominant enough, try to rely on the model scores instead.
modelsub = (self._mLogicalProber.get_confidence()
- self._mVisualProber.get_confidence())
if modelsub > MIN_MODEL_DISTANCE:
return LOGICAL_HEBREW_NAME
if modelsub < -MIN_MODEL_DISTANCE:
return VISUAL_HEBREW_NAME
# Still no good, back to final letter distance, maybe it'll save the
# day.
if finalsub < 0.0:
return VISUAL_HEBREW_NAME
# (finalsub > 0 - Logical) or (don't know what to do) default to
# Logical.
return LOGICAL_HEBREW_NAME
def get_state(self):
# Remain active as long as any of the model probers are active.
if (self._mLogicalProber.get_state() == eNotMe) and \
(self._mVisualProber.get_state() == eNotMe):
return eNotMe
return eDetecting
| gpl-3.0 |
hectord/lettuce | tests/integration/lib/Django-1.3/django/core/management/validation.py | 103 | 19729 | import sys
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
from django.core.management.color import color_style
from django.utils.itercompat import is_iterable
try:
any
except NameError:
from django.utils.itercompat import any
class ModelErrorCollection:
def __init__(self, outfile=sys.stdout):
self.errors = []
self.outfile = outfile
self.style = color_style()
def add(self, context, error):
self.errors.append((context, error))
self.outfile.write(self.style.ERROR("%s: %s\n" % (context, error)))
def get_validation_errors(outfile, app=None):
"""
Validates all models that are part of the specified app. If no app name is provided,
validates all models of all installed apps. Writes errors, if any, to outfile.
Returns number of errors.
"""
from django.conf import settings
from django.db import models, connection
from django.db.models.loading import get_app_errors
from django.db.models.fields.related import RelatedObject
from django.db.models.deletion import SET_NULL, SET_DEFAULT
e = ModelErrorCollection(outfile)
for (app_name, error) in get_app_errors().items():
e.add(app_name, error)
for cls in models.get_models(app):
opts = cls._meta
# Do field-specific validation.
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
e.add(opts, '"%s": Field names cannot end with underscores, because this would lead to ambiguous queryset filters.' % f.name)
if isinstance(f, models.CharField):
try:
max_length = int(f.max_length)
if max_length <= 0:
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
except (ValueError, TypeError):
e.add(opts, '"%s": CharFields require a "max_length" attribute that is a positive integer.' % f.name)
if isinstance(f, models.DecimalField):
decimalp_ok, mdigits_ok = False, False
decimalp_msg ='"%s": DecimalFields require a "decimal_places" attribute that is a non-negative integer.'
try:
decimal_places = int(f.decimal_places)
if decimal_places < 0:
e.add(opts, decimalp_msg % f.name)
else:
decimalp_ok = True
except (ValueError, TypeError):
e.add(opts, decimalp_msg % f.name)
mdigits_msg = '"%s": DecimalFields require a "max_digits" attribute that is a positive integer.'
try:
max_digits = int(f.max_digits)
if max_digits <= 0:
e.add(opts, mdigits_msg % f.name)
else:
mdigits_ok = True
except (ValueError, TypeError):
e.add(opts, mdigits_msg % f.name)
invalid_values_msg = '"%s": DecimalFields require a "max_digits" attribute value that is greater than the value of the "decimal_places" attribute.'
if decimalp_ok and mdigits_ok:
if decimal_places >= max_digits:
e.add(opts, invalid_values_msg % f.name)
if isinstance(f, models.FileField) and not f.upload_to:
e.add(opts, '"%s": FileFields require an "upload_to" attribute.' % f.name)
if isinstance(f, models.ImageField):
# Try to import PIL in either of the two ways it can end up installed.
try:
from PIL import Image
except ImportError:
try:
import Image
except ImportError:
e.add(opts, '"%s": To use ImageFields, you need to install the Python Imaging Library. Get it at http://www.pythonware.com/products/pil/ .' % f.name)
if isinstance(f, models.BooleanField) and getattr(f, 'null', False):
e.add(opts, '"%s": BooleanFields do not accept null values. Use a NullBooleanField instead.' % f.name)
if f.choices:
if isinstance(f.choices, basestring) or not is_iterable(f.choices):
e.add(opts, '"%s": "choices" should be iterable (e.g., a tuple or list).' % f.name)
else:
for c in f.choices:
if not isinstance(c, (list, tuple)) or len(c) != 2:
e.add(opts, '"%s": "choices" should be a sequence of two-tuples.' % f.name)
if f.db_index not in (None, True, False):
e.add(opts, '"%s": "db_index" should be either None, True or False.' % f.name)
# Perform any backend-specific field validation.
connection.validation.validate_field(e, opts, f)
# Check if the on_delete behavior is sane
if f.rel and hasattr(f.rel, 'on_delete'):
if f.rel.on_delete == SET_NULL and not f.null:
e.add(opts, "'%s' specifies on_delete=SET_NULL, but cannot be null." % f.name)
elif f.rel.on_delete == SET_DEFAULT and not f.has_default():
e.add(opts, "'%s' specifies on_delete=SET_DEFAULT, but has no default value." % f.name)
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has a relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Make sure the related field specified by a ForeignKey is unique
if not f.rel.to._meta.get_field(f.rel.field_name).unique:
e.add(opts, "Field '%s' under model '%s' must have a unique=True constraint." % (f.rel.field_name, f.rel.to.__name__))
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
if not f.rel.is_hidden():
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
seen_intermediary_signatures = []
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has an m2m relation with model %s, which has either not been installed or is abstract." % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
# so skip the next section
if isinstance(f.rel.to, (str, unicode)):
continue
# Check that the field is not set to unique. ManyToManyFields do not support unique.
if f.unique:
e.add(opts, "ManyToManyFields cannot be unique. Remove the unique argument on '%s'." % f.name)
if f.rel.through is not None and not isinstance(f.rel.through, basestring):
from_model, to_model = cls, f.rel.to
if from_model == to_model and f.rel.symmetrical and not f.rel.through._meta.auto_created:
e.add(opts, "Many-to-many fields with intermediate tables cannot be symmetrical.")
seen_from, seen_to, seen_self = False, False, 0
for inter_field in f.rel.through._meta.fields:
rel_to = getattr(inter_field.rel, 'to', None)
if from_model == to_model: # relation to self
if rel_to == from_model:
seen_self += 1
if seen_self > 2:
e.add(opts, "Intermediary model %s has more than "
"two foreign keys to %s, which is ambiguous "
"and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
if rel_to == from_model:
if seen_from:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
from_model._meta.object_name
)
)
else:
seen_from = True
elif rel_to == to_model:
if seen_to:
e.add(opts, "Intermediary model %s has more "
"than one foreign key to %s, which is "
"ambiguous and is not permitted." % (
f.rel.through._meta.object_name,
rel_to._meta.object_name
)
)
else:
seen_to = True
if f.rel.through not in models.get_models(include_auto_created=True):
e.add(opts, "'%s' specifies an m2m relation through model "
"%s, which has not been installed." % (f.name, f.rel.through)
)
signature = (f.rel.to, cls, f.rel.through)
if signature in seen_intermediary_signatures:
e.add(opts, "The model %s has two manually-defined m2m "
"relations through the model %s, which is not "
"permitted. Please consider using an extra field on "
"your intermediary model instead." % (
cls._meta.object_name,
f.rel.through._meta.object_name
)
)
else:
seen_intermediary_signatures.append(signature)
if not f.rel.through._meta.auto_created:
seen_related_fk, seen_this_fk = False, False
for field in f.rel.through._meta.fields:
if field.rel:
if not seen_related_fk and field.rel.to == f.rel.to:
seen_related_fk = True
elif field.rel.to == cls:
seen_this_fk = True
if not seen_related_fk or not seen_this_fk:
e.add(opts, "'%s' is a manually-defined m2m relation "
"through model %s, which does not have foreign keys "
"to %s and %s" % (f.name, f.rel.through._meta.object_name,
f.rel.to._meta.object_name, cls._meta.object_name)
)
elif isinstance(f.rel.through, basestring):
e.add(opts, "'%s' specifies an m2m relation through model %s, "
"which has not been installed" % (f.name, f.rel.through)
)
elif isinstance(f, GenericRelation):
if not any([isinstance(vfield, GenericForeignKey) for vfield in f.rel.to._meta.virtual_fields]):
e.add(opts, "Model '%s' must have a GenericForeignKey in "
"order to create a GenericRelation that points to it."
% f.rel.to.__name__
)
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.get_all_related_many_to_many_objects():
if r.field is not f:
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for r in rel_opts.get_all_related_objects():
if r.get_accessor_name() == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
# Check ordering attribute.
if opts.ordering:
for field_name in opts.ordering:
if field_name == '?': continue
if field_name.startswith('-'):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '__' in field_name:
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
e.add(opts, '"ordering" refers to "%s", a field that doesn\'t exist.' % field_name)
# Check unique_together.
for ut in opts.unique_together:
for field_name in ut:
try:
f = opts.get_field(field_name, many_to_many=True)
except models.FieldDoesNotExist:
e.add(opts, '"unique_together" refers to %s, a field that doesn\'t exist. Check your syntax.' % field_name)
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"unique_together" refers to %s. ManyToManyFields are not supported in unique_together.' % f.name)
if f not in opts.local_fields:
e.add(opts, '"unique_together" refers to %s. This is not in the same model as the unique_together statement.' % f.name)
return len(e.errors)
| gpl-3.0 |
flavour/rgims_as_diff | modules/s3/pyvttbl/dictset.py | 11 | 22087 | # Copyright (c) 2011, Roger Lew [see LICENSE.txt]
# This software is funded in part by NIH Grant P20 RR016454.
"""This module contains the DictSet class"""
# Python 2 to 3 workarounds
import sys
if sys.version_info[0] == 2:
_xrange = xrange
elif sys.version_info[0] == 3:
from functools import reduce
_xrange = range
from copy import copy, deepcopy
import collections
# for unique_combinations method
def _rep_generator(A, times, each):
"""like r's rep function, but returns a generator
Examples:
>>> g=_rep_generator([1,2,3],times=1,each=3)
>>> [v for v in g]
[1, 1, 1, 2, 2, 2, 3, 3, 3]
>>> g=_rep_generator([1,2,3],times=3,each=1)
>>> [v for v in g]
[1, 2, 3, 1, 2, 3, 1, 2, 3]
>>> g=_rep_generator([1,2,3],times=2,each=2)
>>> [v for v in g]
[1, 1, 2, 2, 3, 3, 1, 1, 2, 2, 3, 3]
"""
return (a for t in _xrange(times) for a in A for e in _xrange(each))
class DictSet(dict):
"""A dictionary of sets that behaves like a set."""
def __init__(*args, **kwds): # args[0] -> 'self'
"""
DictSet() -> new empty dictionary of sets
DictSet(mapping) -> new dictionary of sets initialized from a
mapping object's (key, value) pairs.
Because the values become sets they must be iterable
DictSet(iterable) -> new dictionary of sets initialized as if via:
d = DictSet()
for k, v in iterable:
d[k] = set(v)
DictSet(**kwargs) -> new dictionary of sets initialized with the
name=value pairs in the keyword argument list.
For example: DictSet(one=[1], two=[2])
"""
# passing self with *args ensures that we can use
# self as keyword for initializing a DictSet
# Example: DictSet(self='abc', other='efg')
# call update or complain about having too many arguments
if len(args) == 1:
args[0].update({}, **kwds)
elif len(args) == 2:
args[0].update(args[1], **kwds)
elif len(args) > 2:
raise TypeError(
'DictSet expected at most 1 arguments, got %d' % (len(args) - 1))
def update(*args, **kwds): # args[0] -> 'self'
"""
DS.update(E, **F) -> None.
Update DS from the union of DictSet/dict/iterable E and F.
If E has a .keys() method, does:
for k in E:
DS[k] |= set(E[k])
If E lacks .keys() method, does:
for (k, v) in E:
DS[k] |= set(v)
In either case, this is followed by:
for k in F:
DS[k] |= set(F[k])
DS|=E <==> DS.update(E)
"""
# check the length of args
if len(args) > 2:
raise TypeError(
'DictSet expected at most 1 arguments, got %d' % (len(args) - 1))
# Make sure args can be mapped to a DictSet before
# we start adding them.
elif len(args) == 2:
obj = args[1]
# if obj is a DictType we can avoid checking
# to make sure it is hashable an iterable
if type(obj) == DictSet:
pass
# Check using duck typing
elif hasattr(obj, '__getitem__'):
# obj is dict or dict subclass
if hasattr(obj, 'keys'):
for k, val in obj.items():
if not isinstance(k, collections.Hashable):
raise TypeError(
"unhashable type: '%s'" % type(k).__name__)
if not hasattr(val,'__iter__'):
if not isinstance(val, str):
raise TypeError(
"'%s' object is not iterable" % type(val).__name__)
# obj is list/tuple or list/tuple subclass
else:
for item in obj:
try:
(k, val)=item
except:
raise TypeError(
'could not unpack arg to key/value pairs')
if not isinstance(k, collections.Hashable):
raise TypeError(
"unhashable type: '%s'" % type(k).__name__)
if not hasattr(val,'__iter__'):
if not isinstance(val, str):
raise TypeError(
"'%s' object is not iterable" % type(val).__name__)
# obj is not iterable, e.g. an int, float, etc.
else:
raise TypeError(
"'%s' object is not iterable" % type(obj).__name__)
# check the keyword arguments
for (k, val) in kwds.items():
# unhashable keyword argumnents don't make it to the point
# so we just need to check that the values are iterable
if not hasattr(val,'__iter__'):
if not isinstance(val, str):
raise TypeError(
"'%s' object is not iterable" % type(val).__name__)
# At this point we can be fairly certain the args and kwds
# will successfully initialize. Now we can go back through
# args and kwds and add them to ds
if len(args) == 2:
obj = args[1]
# obj is dict or dict subclass
if hasattr(obj, 'keys'):
for k, val in obj.items():
if not k in args[0].keys():
args[0][k] = set(val)
args[0][k] |= set(val)
# obj is list/tuple or list/tuple subclass
else:
for item in obj:
(k, val) = item
if not k in args[0].keys():
args[0][k] = set(val)
args[0][k] |= set(val)
# Now add keyword arguments
for (k, val) in kwds.items():
if not k in args[0].keys():
args[0][k] = set(val)
args[0][k] |= set(val)
def __ior__(self, E): # overloads |=
"""
DS.update(E, **F) -> None.
Update DS from the union of DictSet/dict/iterable E and F.
If E has a .keys() method, does:
for k in E:
DS[k] |= set(E[k])
If E lacks .keys() method, does:
for (k, v) in E:
DS[k] |= set(v)
In either case, this is followed by:
for k in F:
DS[k] |= set(F[k])
DS|=E <==> DS.update(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
return self.union(E)
def __eq__(self, E): # overloads ==
"""
Returns the equality comparison of DS with E typed
as a DictSet. If E cannot be broadcast into a DictSet
returns False.
DS==E <==> DS.__eq__(E)
"""
# Fails of d is not mappable with iterable values
try:
E = DictSet(E)
except:
return False
# check to see if self and E have the same keys
# if they don't we know they aren't equal and
# can return False
if len(set(k for (k, v) in self.items() if len(v) != 0) ^
set(k for (k, v) in E.items() if len(v) != 0)) > 0:
return False
# at this point we know they have the same keys
# if all the non-empty set differences have 0 cardinality
# the sets are equal
s = 0
for k in self.keys():
s += len(self.get(k, []) ^ E.get(k, []))
return s == 0
def __ne__(self, E): # overloads !=
"""
Returns the non-equality comparison of ES with E type
as a DictSet. If E cannot be broadcast into a DictSet
returns False.
DS==E <==> DS.__ne__(E)
"""
# Fails of d is not mappable with iterable values
try:
E = DictSet(E)
except:
return True
# check to see if self and d have the same keys
# if they don't we know they aren't equal and
# can return False
if len(set(k for (k, v) in self.items() if len(v) != 0) ^
set(k for (k, v) in E.items() if len(v) != 0)) > 0:
return True
# at this point we know they have the same keys
# if all the set differences have 0 cardinality
# the sets are equal
s = 0
for k in self.keys():
s += len(self.get(k, []) ^ E.get(k, []))
return s != 0
def issubset(self, E):
"""
Report whether all the sets of this DictSet are subsets of the E.
DS<=E <==> DS.issubset(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
if self == E == {}:
return True
b = True
for k in set(self) | set(E):
if not self.get(k, []) <= E.get(k, []):
b = False
return b
def __le__(self, E): # overloads <=
"""
Report whether all the sets of this DictSet are subsets of the E.
DS<=E <==> DS.issubset(E)
"""
return self.issubset(E)
def issuperset(self, E):
"""
Report whether all the sets of this DictSet are supersets of the E.
DS>=E <==> DS.issuperset(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
if self == E == {}:
return True
b = True
for k in set(self) | set(E):
if not self.get(k, []) >= E.get(k, []):
b = False
return b
def __ge__(self, E): # overloads >=
"""
Report whether all the sets of this DictSet are supersets of the E.
DS>=E <==> DS.issuperset(E)
"""
return self.issuperset(E)
def union(self, E):
"""
Return the union of the sets of self with the sets of E.
(i.e. all elements that are in either sets of the DictSets.)
DS|E <==> DS.union(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
foo = deepcopy(self)
for k in set(foo.keys()) | set(E.keys()):
foo.setdefault(k, [])
foo[k].update(E.get(k, []))
if not foo[k]:
del foo[k] # delete if empty set
return foo
def __or__(self, E): # overloads |
"""
Return the union of the sets of self with the sets of E.
(i.e. all elements that are in either sets of the DictSets.)
DS|E <==> DS.union(E)
"""
return self.union(E)
def intersection(self, E):
"""
Return the intersection of the sets of self with the sets of E.
(i.e. elements that are common to all of the sets of the
DictSets.)
DS&E <==> DS.intersection(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
# handle case where d=={}
if E == {}:
return DictSet()
foo = deepcopy(self)
for k in set(foo.keys()) | set(E.keys()):
foo.setdefault(k, [])
foo[k].intersection_update(E.get(k, []))
if not foo[k]:
del foo[k] # delete if empty set
return foo
def __and__(self, E): # overloads &
"""
Return the intersection of the sets of self with the sets of E.
(i.e. elements that are common to all of the sets of the
DictSets.)
DS&E <==> DS.intersection(E)
"""
return self.intersection(E)
def difference(self, E):
"""
Return the difference of the sets of self with the sets of E.
(i.e. all elements that are in the sets of this DictSet but
not the others.)
DS-E <==> DS.difference(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
foo = deepcopy(self)
for k in set(foo.keys()) | set(E.keys()):
foo.setdefault(k, [])
foo[k].difference_update(E.get(k, []))
if not foo[k]:
del foo[k] # delete if empty set
return foo
def __sub__(self, E): # overloads -
"""
Return the difference of the sets of self with the sets of E.
(i.e. all elements that are in the sets of this DictSet but
not the others.)
DS-E <==> DS.difference(E)
"""
return self.difference(E)
def symmetric_difference(self, E):
"""
Return the symmetric difference of the sets of self with the
sets of E.
(i.e. for each DictSet all elements that are in exactly one
of the sets .)
DS^E <==> DS.symmetric_difference(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
foo = deepcopy(self)
for k in set(foo.keys()) | set(E.keys()):
foo.setdefault(k, [])
foo[k].symmetric_difference_update(E.get(k, []))
if not foo[k]:
del foo[k] # delete if empty set
return foo
def __xor__(self, E): # overloads ^
"""
Return the symmetric difference of the sets of self with the
sets of E.
(i.e. for each DictSet all elements that are in exactly one
of the sets .)
DS^E <==> DS.symmetric_difference(E)
"""
return self.symmetric_difference(E)
def intersection_update(self, E):
"""
Update a DictSet with the intersection of itself and E.
DS&=E <==> DS.intersection_update(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
for k in set(self) | set(E):
self.setdefault(k, [])
self[k].intersection_update(E.get(k, []))
if len(self[k]) == 0:
del self[k]
def __iand__(self, E): # overloads &=
"""
Update a DictSet with the intersection of itself and E.
DS&=E <==> DS.intersection_update(E)
"""
return self.intersection(E)
def difference_update(self, E):
"""
Update a DictSet with the difference of itself and E.
DS-=E <==> DS.difference_update(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
for k in set(self)|set(E):
self.setdefault(k, [])
self[k].difference_update(E.get(k, []))
if len(self[k]) == 0:
del self[k]
def __isub__(self, E): # overloads -=
"""
Update a DictSet with the difference of itself and E.
DS-=E <==> DS.difference_update(E)
"""
return self.difference(E)
def symmetric_difference_update(self, E):
"""
Update a DictSet with the symmetric difference of
itself and E.
DS^=E <==> DS.symmetric_difference_update(E)
"""
if not isinstance(E, DictSet):
E = DictSet(copy(E))
for k in set(self) | set(E):
self.setdefault(k, [])
self[k].symmetric_difference_update(E.get(k, []))
if len(self[k]) == 0:
del self[k]
def __ixor__(self, E): # overloads ^=
"""
Update a DictSet with the symmetric difference of
itself and E.
DS^=E <==> DS.symmetric_difference_update(E)
"""
return self.symmetric_difference(E)
def add(self, k, v=None):
"""
Add an element v to a set DS[k].
This has no effect if the element v is already present in DS[k].
When v is not supplied adds a new set at DS[k].
Raises KeyError if k is not hashable.
"""
if k not in self.keys():
self[k] = set()
if v != None:
self[k].add(v)
def __setitem__(self, k, v):
"""DS.__setitem__(k, v) <==> x[k]=set(v)"""
if isinstance(v, set):
super(DictSet, self).__setitem__(k, v)
else:
try:
super(DictSet, self).__setitem__(k, set(v))
except:
raise
def __contains__(self, k):
"""
True if DS has a key k and len(DS[k])!=0, else False
DS.__contains__(k) <==> k in D
"""
return k in [key for (key, val) in self.items() if len(val) > 0]
def __iter__(self):
"""
Iterate over keys with non-zero lengths.
DS.__iter__(k) <==> for k in D
"""
for (key, val) in self.items():
if len(val) > 0:
yield key
def get(self, k, v=None):
"""
DS.get(k[,v]) -> DS[v] if k in DS, else set(v).
v defaults to None.
"""
if k in self:
return self[k]
if v == None:
return
try:
return set(v)
except:
raise
def setdefault(self, k, v=None):
"""
DS.setdefault(k[,v]) -> DS.get(k, v), also set DS[k]=set(v)
if k not in D. v defaults to None.
"""
if k in self:
return self[k]
if v == None:
return
else:
try:
super(DictSet, self).__setitem__(k, set(v))
except:
raise
return self[k]
def copy(self):
"""DS.copy() -> a shallow copy of DS."""
return copy(self)
def remove(self, k, v=None):
"""
Remove element v from a set DS[k]; it must be a member.
If the element v is not a member of D[k], raise a KeyError.
If v is not supplied removes DS[k]; it must be an item.
if D[k] is not an item, raise a KeyError.
"""
if k not in self.keys():
raise KeyError(k)
if v != None:
self[k].remove(v)
else:
del self[k]
def discard(self, k, v=None):
"""
Remove element v from a set DS[k]; it must be a member.
If the element v is not a member of D[k], do nothing.
If v is not supplied removes DS[k].
If D[k] is not an item, raise a KeyError.
"""
if v != None:
try:
self[k].discard(v)
except:
pass
else:
try:
del self[k]
except:
pass
# borrowed from the collections.OrderedDict in the standard library
def __repr__(self):
"""DS.__repr__() <==> repr(DS)"""
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
def unique_combinations(self, keys=None):
"""
Returns a generator yielding the unique combination of
elements. Both the keys of DS and the elements of the
sets are sorted.
When a key list (the keys argument) is supplied only the
unique combinations of the sets specified by the keys are
yielded by the generator.
The combinations are sorted by slowest repeating to fastest
repeating.
"""
# it the keys argument is not supplied assume the
# user wants the unique combinations of all the
# elements of all the sets
if keys == None:
keys = sorted(self.keys())
# eliminate keys to sets that have zero cardinality
try:
keys = [k for k in keys if k in self]
except:
raise TypeError("'%s' object is not iterable"
%type(keys).__name__)
# if the keys list is empty we can return an empty generator
if len(keys) == 0:
yield
else:
# the number of unique combinations is the product
# of the cardinalities of the non-zero sets
N = reduce(int.__mul__,(len(self[k]) for k in keys))
# now we need to build a dict of generators so we
# can build a generator or generators. To do this
# we need to figure out the each and times
# parameters to pass to rep()
gen_dict = {}
each = 1
times = 0
prev_n = 0
for i, k in enumerate(reversed(keys)):
if i != 0:
each *= prev_n
times = N / (len(self[k]) * each)
prev_n = len(self[k])
gen_dict[k] = _rep_generator(sorted(self[k]),
int(times),int(each))
# Now we just have to yield the results
for i in _xrange(N):
yield [next(gen_dict[k]) for k in keys]
@classmethod
def fromkeys(cls, seq, values=None):
"""
Create a new DictSet with keys from seq and values set to
set(values). When values is not supplied the values are
initialized as empty sets.
"""
d = cls()
for key in seq:
if values == None:
d[key] = set()
else:
d[key] = set(values)
return d
| mit |
leonardocsantoss/ehventos | lib/reportlab/graphics/charts/axes.py | 10 | 85627 | #Copyright ReportLab Europe Ltd. 2000-2010
#see license.txt for license details
__version__=''' $Id: axes.py 3748 2010-07-27 09:36:33Z rgbecker $ '''
__doc__="""Collection of axes for charts.
The current collection comprises axes for charts using cartesian
coordinate systems. All axes might have tick marks and labels.
There are two dichotomies for axes: one of X and Y flavours and
another of category and value flavours.
Category axes have an ordering but no metric. They are divided
into a number of equal-sized buckets. Their tick marks or labels,
if available, go BETWEEN the buckets, and the labels are placed
below to/left of the X/Y-axis, respectively.
Value axes have an ordering AND metric. They correspond to a nu-
meric quantity. Value axis have a real number quantity associated
with it. The chart tells it where to go.
The most basic axis divides the number line into equal spaces
and has tickmarks and labels associated with each; later we
will add variants where you can specify the sampling
interval.
The charts using axis tell them where the labels should be placed.
Axes of complementary X/Y flavours can be connected to each other
in various ways, i.e. with a specific reference point, like an
x/value axis to a y/value (or category) axis. In this case the
connection can be either at the top or bottom of the former or
at any absolute value (specified in points) or at some value of
the former axes in its own coordinate system.
"""
from reportlab.lib.validators import isNumber, isNumberOrNone, isListOfStringsOrNone, isListOfNumbers, \
isListOfNumbersOrNone, isColorOrNone, OneOf, isBoolean, SequenceOf, \
isString, EitherOr, Validator, _SequenceTypes, NoneOr, isInstanceOf, \
isNormalDate
from reportlab.lib.attrmap import *
from reportlab.lib import normalDate
from reportlab.graphics.shapes import Drawing, Line, PolyLine, Group, STATE_DEFAULTS, _textBoxLimits, _rotatedBoxLimits
from reportlab.graphics.widgetbase import Widget, TypedPropertyCollection
from reportlab.graphics.charts.textlabels import Label
from reportlab.graphics.charts.utils import nextRoundNumber
import copy
# Helpers.
def _findMinMaxValue(V, x, default, func, special=None):
if isinstance(V[0][0],_SequenceTypes):
if special:
f=lambda T,x=x,special=special,func=func: special(T,x,func)
else:
f=lambda T,x=x: T[x]
V=map(lambda e,f=f: map(f,e),V)
V = filter(len,map(lambda x: filter(lambda x: x is not None,x),V))
if len(V)==0: return default
return func(map(func,V))
def _findMin(V, x, default,special=None):
'''find minimum over V[i][x]'''
return _findMinMaxValue(V,x,default,min,special=special)
def _findMax(V, x, default,special=None):
'''find maximum over V[i][x]'''
return _findMinMaxValue(V,x,default,max,special=special)
def _allInt(values):
'''true if all values are int'''
for v in values:
try:
if int(v)!=v: return 0
except:
return 0
return 1
class AxisLineAnnotation:
'''Create a grid like line using the given user value to draw the line
kwds may contain
startOffset offset from the default grid start position
endOffset offset from the default grid end position
scaleValue True/not given --> scale the value
otherwise use the absolute value
lo lowest coordinate to draw default 0
hi highest coordinate to draw at default = length
drawAtLimit True draw line at appropriate limit if its coordinate exceeds the lo, hi range
False ignore if it's outside the range
all Line keywords are acceptable
'''
def __init__(self,v,**kwds):
self._v = v
self._kwds = kwds
def __call__(self,axis):
kwds = self._kwds.copy()
scaleValue = kwds.pop('scaleValue',True)
if axis.isYAxis:
offs = axis._x
else:
offs = axis._y
s = kwds.pop('start',None)
e = kwds.pop('end',None)
if s is None or e is None:
dim = getattr(getattr(axis,'joinAxis',None),'getGridDims',None)
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
hi = kwds.pop('hi',axis._length)
lo = kwds.pop('lo',0)
lo,hi=min(lo,hi),max(lo,hi)
drawAtLimit = kwds.pop('drawAtLimit',False)
if not scaleValue:
oaglp = axis._get_line_pos
axis._get_line_pos = lambda x: x
try:
v = self._v
func = axis._getLineFunc(s-offs,e-offs,kwds.pop('parent',None))
if not hasattr(axis,'_tickValues'):
axis._pseudo_configure()
d = axis._get_line_pos(v)
if d<lo or d>hi:
if not drawAtLimit: return None
if d<lo:
d = lo
else:
d = hi
axis._get_line_pos = lambda x: d
L = func(v)
for k,v in kwds.iteritems():
setattr(L,k,v)
finally:
if not scaleValue:
axis._get_line_pos = oaglp
return L
class TickLU:
'''lookup special cases for tick values'''
def __init__(self,*T,**kwds):
self.accuracy = kwds.pop('accuracy',1e-8)
self.T = T
def __contains__(self,t):
accuracy = self.accuracy
for x,v in self.T:
if abs(x-t)<accuracy:
return True
return False
def __getitem__(self,t):
accuracy = self.accuracy
for x,v in self.T:
if abs(x-t)<self.accuracy:
return v
raise IndexError('cannot locate index %r' % t)
class _AxisG(Widget):
def _get_line_pos(self,v):
v = self.scale(v)
try:
v = v[0]
except:
pass
return v
def _cxLine(self,x,start,end):
x = self._get_line_pos(x)
return Line(x, self._y + start, x, self._y + end)
def _cyLine(self,y,start,end):
y = self._get_line_pos(y)
return Line(self._x + start, y, self._x + end, y)
def _cxLine3d(self,x,start,end,_3d_dx,_3d_dy):
x = self._get_line_pos(x)
y0 = self._y + start
y1 = self._y + end
y0, y1 = min(y0,y1),max(y0,y1)
x1 = x + _3d_dx
return PolyLine([x,y0,x1,y0+_3d_dy,x1,y1+_3d_dy],strokeLineJoin=1)
def _cyLine3d(self,y,start,end,_3d_dx,_3d_dy):
y = self._get_line_pos(y)
x0 = self._x + start
x1 = self._x + end
x0, x1 = min(x0,x1),max(x0,x1)
y1 = y + _3d_dy
return PolyLine([x0,y,x0+_3d_dx,y1,x1+_3d_dx,y1],strokeLineJoin=1)
def _getLineFunc(self, start, end, parent=None):
_3d_dx = getattr(parent,'_3d_dx',None)
if _3d_dx is not None:
_3d_dy = getattr(parent,'_3d_dy',None)
f = self.isYAxis and self._cyLine3d or self._cxLine3d
return lambda v, s=start, e=end, f=f,_3d_dx=_3d_dx,_3d_dy=_3d_dy: f(v,s,e,_3d_dx=_3d_dx,_3d_dy=_3d_dy)
else:
f = self.isYAxis and self._cyLine or self._cxLine
return lambda v, s=start, e=end, f=f: f(v,s,e)
def _makeLines(self,g,start,end,strokeColor,strokeWidth,strokeDashArray,strokeLineJoin,strokeLineCap,strokeMiterLimit,parent=None,exclude=[],specials={}):
func = self._getLineFunc(start,end,parent)
if not hasattr(self,'_tickValues'):
self._pseudo_configure()
if exclude:
exf = self.isYAxis and (lambda l: l.y1 in exclude) or (lambda l: l.x1 in exclude)
else:
exf = None
for t in self._tickValues:
L = func(t)
if exf and exf(L): continue
L.strokeColor = strokeColor
L.strokeWidth = strokeWidth
L.strokeDashArray = strokeDashArray
L.strokeLineJoin = strokeLineJoin
L.strokeLineCap = strokeLineCap
L.strokeMiterLimit = strokeMiterLimit
if t in specials:
for a,v in specials[t].iteritems():
setattr(L,a,v)
g.add(L)
def makeGrid(self,g,dim=None,parent=None,exclude=[]):
'''this is only called by a container object'''
c = self.gridStrokeColor
w = self.gridStrokeWidth or 0
if w and c and self.visibleGrid:
s = self.gridStart
e = self.gridEnd
if s is None or e is None:
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if s or e:
if self.isYAxis: offs = self._x
else: offs = self._y
self._makeLines(g,s-offs,e-offs,c,w,self.gridStrokeDashArray,self.gridStrokeLineJoin,self.gridStrokeLineCap,self.gridStrokeMiterLimit,parent=parent,exclude=exclude,specials=getattr(self,'_gridSpecials',{}))
self._makeSubGrid(g,dim,parent,exclude=[])
def _makeSubGrid(self,g,dim=None,parent=None,exclude=[]):
'''this is only called by a container object'''
if not (getattr(self,'visibleSubGrid',0) and self.subTickNum>0): return
c = self.subGridStrokeColor
w = self.subGridStrokeWidth or 0
if not(w and c): return
s = self.subGridStart
e = self.subGridEnd
if s is None or e is None:
if dim and hasattr(dim,'__call__'):
dim = dim()
if dim:
if s is None: s = dim[0]
if e is None: e = dim[1]
else:
if s is None: s = 0
if e is None: e = 0
if s or e:
if self.isYAxis: offs = self._x
else: offs = self._y
otv = self._calcSubTicks()
try:
self._makeLines(g,s-offs,e-offs,c,w,self.subGridStrokeDashArray,self.subGridStrokeLineJoin,self.subGridStrokeLineCap,self.subGridStrokeMiterLimit,parent=parent,exclude=exclude)
finally:
self._tickValues = otv
def getGridDims(self,start=None,end=None):
if start is None: start = (self._x,self._y)[self.isYAxis]
if end is None: end = start+self._length
return start,end
def isYAxis(self):
if getattr(self,'_dataIndex',None)==1: return True
acn = self.__class__.__name__
return acn[0]=='Y' or acn[:4]=='AdjY'
isYAxis = property(isYAxis)
def isXAxis(self):
if getattr(self,'_dataIndex',None)==0: return True
acn = self.__class__.__name__
return acn[0]=='X' or acn[:11]=='NormalDateX'
isXAxis = property(isXAxis)
def addAnnotations(self,g,A=None):
if A is None: getattr(self,'annotations',[])
for x in A:
g.add(x(self))
def _splitAnnotations(self):
A = getattr(self,'annotations',[])[:]
D = {}
for v in ('early','beforeAxis','afterAxis','beforeTicks',
'afterTicks','beforeTickLabels',
'afterTickLabels','late'):
R = [].append
P = [].append
for a in A:
if getattr(a,v,0):
R(a)
else:
P(a)
D[v] = R.__self__
A[:] = P.__self__
D['late'] += A
return D
def draw(self):
g = Group()
A = self._splitAnnotations()
self.addAnnotations(g,A['early'])
if self.visible:
self.addAnnotations(g,A['beforeAxis'])
g.add(self.makeAxis())
self.addAnnotations(g,A['afterAxis'])
self.addAnnotations(g,A['beforeTicks'])
g.add(self.makeTicks())
self.addAnnotations(g,A['afterTicks'])
self.addAnnotations(g,A['beforeTickLabels'])
g.add(self.makeTickLabels())
self.addAnnotations(g,A['afterTickLabels'])
self.addAnnotations(g,A['late'])
return g
class CALabel(Label):
_attrMap = AttrMap(BASE=Label,
labelPosFrac = AttrMapValue(isNumber, desc='where in the category range [0,1] the labels should be anchored'),
)
def __init__(self,**kw):
Label.__init__(self,**kw)
self._setKeywords(
labelPosFrac = 0.5,
)
# Category axes.
class CategoryAxis(_AxisG):
"Abstract category axis, unusable in itself."
_nodoc = 1
_attrMap = AttrMap(
visible = AttrMapValue(isBoolean, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isBoolean, desc='Display axis line, if true.'),
visibleTicks = AttrMapValue(isBoolean, desc='Display axis ticks, if true.'),
visibleLabels = AttrMapValue(isBoolean, desc='Display axis labels, if true.'),
visibleGrid = AttrMapValue(isBoolean, desc='Display axis grid, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
strokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Line cap 0=butt, 1=round & 2=square"),
strokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Line join 0=miter, 1=round & 2=bevel"),
strokeMiterLimit = AttrMapValue(isNumber,desc="miter limit control miter line joins"),
gridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
gridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
gridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
gridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
gridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
gridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
gridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
gridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
drawGridLast = AttrMapValue(isBoolean, desc='if true draw gridlines after everything else.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
categoryNames = AttrMapValue(isListOfStringsOrNone, desc='List of category names.'),
joinAxis = AttrMapValue(None, desc='Join both axes if true.'),
joinAxisPos = AttrMapValue(isNumberOrNone, desc='Position at which to join with other axis.'),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
style = AttrMapValue(OneOf('parallel','stacked','parallel_3d'),"How common category bars are plotted"),
labelAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the axis labels"),
tickShift = AttrMapValue(isBoolean, desc='Tick shift typically'),
loPad = AttrMapValue(isNumber, desc='extra inner space before start of the axis'),
hiPad = AttrMapValue(isNumber, desc='extra inner space after end of the axis'),
annotations = AttrMapValue(None,desc='list of annotations'),
loLLen = AttrMapValue(isNumber, desc='extra line length before start of the axis'),
hiLLen = AttrMapValue(isNumber, desc='extra line length after end of the axis'),
)
def __init__(self):
assert self.__class__.__name__!='CategoryAxis', "Abstract Class CategoryAxis Instantiated"
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
self._x = 50
self._y = 50
self._length = 100
self._catCount = 0
# public properties
self.visible = 1
self.visibleAxis = 1
self.visibleTicks = 1
self.visibleLabels = 1
self.visibleGrid = 0
self.drawGridLast = False
self.strokeWidth = 1
self.strokeColor = STATE_DEFAULTS['strokeColor']
self.strokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.gridStrokeLineJoin = self.strokeLineJoin = STATE_DEFAULTS['strokeLineJoin']
self.gridStrokeLineCap = self.strokeLineCap = STATE_DEFAULTS['strokeLineCap']
self.gridStrokeMiterLimit = self.strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit']
self.gridStrokeWidth = 0.25
self.gridStrokeColor = STATE_DEFAULTS['strokeColor']
self.gridStrokeDashArray = STATE_DEFAULTS['strokeDashArray']
self.gridStart = self.gridEnd = None
self.strokeLineJoin = STATE_DEFAULTS['strokeLineJoin']
self.strokeLineCap = STATE_DEFAULTS['strokeLineCap']
self.strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit']
self.labels = TypedPropertyCollection(CALabel)
# if None, they don't get labels. If provided,
# you need one name per data point and they are
# used for label text.
self.categoryNames = None
self.joinAxis = None
self.joinAxisPos = None
self.joinAxisMode = None
self.labelAxisMode = 'axis'
self.reverseDirection = 0
self.style = 'parallel'
#various private things which need to be initialized
self._labelTextFormat = None
self.tickShift = 0
self.loPad = 0
self.hiPad = 0
self.loLLen = 0
self.hiLLen = 0
def setPosition(self, x, y, length):
# ensure floating point
self._x = float(x)
self._y = float(y)
self._length = float(length)
def configure(self, multiSeries,barWidth=None):
self._catCount = max(map(len,multiSeries))
self._barWidth = barWidth or ((self._length-self.loPad-self.hiPad)/float(self._catCount or 1))
self._calcTickmarkPositions()
def _calcTickmarkPositions(self):
n = self._catCount
if self.tickShift:
self._tickValues = [t+0.5 for t in xrange(n)]
else:
if self.reverseDirection:
self._tickValues = range(-1,n)
else:
self._tickValues = range(n+1)
def _scale(self,idx):
if self.reverseDirection: idx = self._catCount-idx-1
return idx
def _assertYAxis(axis):
assert axis.isYAxis, "Cannot connect to other axes (%s), but Y- ones." % axis.__class__.__name__
def _assertXAxis(axis):
assert axis.isXAxis, "Cannot connect to other axes (%s), but X- ones." % axis.__class__.__name__
class _XTicks:
_tickTweaks = 0 #try 0.25-0.5
def _drawTicksInner(self,tU,tD,g):
if tU or tD:
sW = self.strokeWidth
tW = self._tickTweaks
if tW:
if tU and not tD:
tD = tW*sW
elif tD and not tU:
tU = tW*sW
self._makeLines(g,tU,-tD,self.strokeColor,sW,self.strokeDashArray,self.strokeLineJoin,self.strokeLineCap,self.strokeMiterLimit)
def _drawTicks(self,tU,tD,g=None):
g = g or Group()
if self.visibleTicks:
self._drawTicksInner(tU,tD,g)
return g
def _calcSubTicks(self):
if not hasattr(self,'_tickValues'):
self._pseudo_configure()
otv = self._tickValues
if not hasattr(self,'_subTickValues'):
acn = self.__class__.__name__
if acn[:11]=='NormalDateX':
iFuzz = 0
dCnv = int
else:
iFuzz = 1e-8
dCnv = lambda x:x
OTV = [tv for tv in otv if getattr(tv,'_doSubTicks',1)]
T = [].append
nst = int(self.subTickNum)
i = len(OTV)
if i<2:
self._subTickValues = []
else:
if i==2:
dst = OTV[1]-OTV[0]
elif i==3:
dst = max(OTV[1]-OTV[0],OTV[2]-OTV[1])
else:
i >>= 1
dst = OTV[i+1] - OTV[i]
fuzz = dst*iFuzz
vn = self._valueMin+fuzz
vx = self._valueMax-fuzz
if OTV[0]>vn: OTV.insert(0,OTV[0]-dst)
if OTV[-1]<vx: OTV.append(OTV[-1]+dst)
dst /= float(nst+1)
for i,x in enumerate(OTV[:-1]):
for j in xrange(nst):
t = x+dCnv((j+1)*dst)
if t<=vn or t>=vx: continue
T(t)
self._subTickValues = T.__self__
self._tickValues = self._subTickValues
return otv
def _drawSubTicks(self,tU,tD,g):
if getattr(self,'visibleSubTicks',0) and self.subTickNum>0:
otv = self._calcSubTicks()
try:
self._drawTicksInner(tU,tD,g)
finally:
self._tickValues = otv
def makeTicks(self):
yold=self._y
try:
self._y = self._labelAxisPos(getattr(self,'tickAxisMode','axis'))
g = self._drawTicks(self.tickUp,self.tickDown)
self._drawSubTicks(getattr(self,'subTickHi',0),getattr(self,'subTickLo',0),g)
return g
finally:
self._y = yold
def _labelAxisPos(self,mode=None):
axis = self.joinAxis
if axis:
mode = mode or self.labelAxisMode
if mode == 'low':
return axis._y
elif mode == 'high':
return axis._y + axis._length
return self._y
class _YTicks(_XTicks):
def _labelAxisPos(self,mode=None):
axis = self.joinAxis
if axis:
mode = mode or self.labelAxisMode
if mode == 'low':
return axis._x
elif mode == 'high':
return axis._x + axis._length
return self._x
def makeTicks(self):
xold=self._x
try:
self._x = self._labelAxisPos(getattr(self,'tickAxisMode','axis'))
g = self._drawTicks(self.tickRight,self.tickLeft)
self._drawSubTicks(getattr(self,'subTickHi',0),getattr(self,'subTickLo',0),g)
return g
finally:
self._x = xold
class XCategoryAxis(_XTicks,CategoryAxis):
"X/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
)
_dataIndex = 0
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'n' #north - top edge
self.labels.dy = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickUp = 0 # how far into chart does tick go?
self.tickDown = 5 # how far below axis does tick go?
def demo(self):
self.setPosition(30, 70, 140)
self.configure([(10,20,30,40,50)])
self.categoryNames = ['One','Two','Three','Four','Five']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'n'
self.labels[4].boxAnchor = 'e'
self.labels[4].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
_assertYAxis(yAxis)
if mode == 'bottom':
self._x = yAxis._x
self._y = yAxis._y
elif mode == 'top':
self._x = yAxis._x
self._y = yAxis._y + yAxis._length
elif mode == 'value':
self._x = yAxis._x
self._y = yAxis.scale(pos)
elif mode == 'points':
self._x = yAxis._x
self._y = pos
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('bottom', 'top'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
def scale(self, idx):
"""returns the x position and width in drawing units of the slice"""
return (self._x + self.loPad + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x-self.loLLen, self._y, self._x + self._length+self.hiLLen, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
categoryNames = self.categoryNames
if categoryNames is not None:
catCount = self._catCount
n = len(categoryNames)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
_y = self._labelAxisPos()
_x = self._x
for i in xrange(catCount):
if reverseDirection: ic = catCount-i-1
else: ic = i
if ic>=n: continue
label=i-catCount
if label in self.labels:
label = self.labels[label]
else:
label = self.labels[i]
lpf = label.labelPosFrac
x = _x + (i+lpf) * barWidth
label.setOrigin(x, _y)
label.setText(categoryNames[ic] or '')
g.add(label)
return g
class YCategoryAxis(_YTicks,CategoryAxis):
"Y/category axis"
_attrMap = AttrMap(BASE=CategoryAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
)
_dataIndex = 1
def __init__(self):
CategoryAxis.__init__(self)
self.labels.boxAnchor = 'e' #east - right edge
self.labels.dx = -5
# ultra-simple tick marks for now go between categories
# and have same line style as axis - need more
self.tickLeft = 5 # how far left of axis does tick go?
self.tickRight = 0 # how far right of axis does tick go?
def demo(self):
self.setPosition(50, 10, 80)
self.configure([(10,20,30)])
self.categoryNames = ['One','Two','Three']
# all labels top-centre aligned apart from the last
self.labels.boxAnchor = 'e'
self.labels[2].boxAnchor = 's'
self.labels[2].angle = 90
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
self._y = xAxis._y * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'points':
self._x = pos * 1.0
self._y = xAxis._y * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('left', 'right'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
def scale(self, idx):
"Returns the y position and width in drawing units of the slice."
return (self._y + self._scale(idx)*self._barWidth, self._barWidth)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
categoryNames = self.categoryNames
if categoryNames is not None:
catCount = self._catCount
n = len(categoryNames)
reverseDirection = self.reverseDirection
barWidth = self._barWidth
labels = self.labels
_x = self._labelAxisPos()
_y = self._y
for i in xrange(catCount):
if reverseDirection: ic = catCount-i-1
else: ic = i
if ic>=n: continue
label=i-catCount
if label in self.labels:
label = self.labels[label]
else:
label = self.labels[i]
lpf = label.labelPosFrac
y = _y + (i+lpf) * barWidth
label.setOrigin(_x, y)
label.setText(categoryNames[ic] or '')
g.add(label)
return g
class TickLabeller:
'''Abstract base class which may be used to indicate a change
in the call signature for callable label formats
'''
def __call__(self,axis,value):
return 'Abstract class instance called'
# Value axes.
class ValueAxis(_AxisG):
"Abstract value axis, unusable in itself."
_attrMap = AttrMap(
forceZero = AttrMapValue(EitherOr((isBoolean,OneOf('near'))), desc='Ensure zero in range if true.'),
visible = AttrMapValue(isBoolean, desc='Display entire object, if true.'),
visibleAxis = AttrMapValue(isBoolean, desc='Display axis line, if true.'),
visibleLabels = AttrMapValue(isBoolean, desc='Display axis labels, if true.'),
visibleTicks = AttrMapValue(isBoolean, desc='Display axis ticks, if true.'),
visibleGrid = AttrMapValue(isBoolean, desc='Display axis grid, if true.'),
strokeWidth = AttrMapValue(isNumber, desc='Width of axis line and ticks.'),
strokeColor = AttrMapValue(isColorOrNone, desc='Color of axis line and ticks.'),
strokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for axis line.'),
strokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Line cap 0=butt, 1=round & 2=square"),
strokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Line join 0=miter, 1=round & 2=bevel"),
strokeMiterLimit = AttrMapValue(isNumber,desc="miter limit control miter line joins"),
gridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
gridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
gridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
gridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
gridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
gridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
gridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
gridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
drawGridLast = AttrMapValue(isBoolean, desc='if true draw gridlines after everything else.'),
minimumTickSpacing = AttrMapValue(isNumber, desc='Minimum value for distance between ticks.'),
maximumTicks = AttrMapValue(isNumber, desc='Maximum number of ticks.'),
labels = AttrMapValue(None, desc='Handle of the axis labels.'),
labelAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the axis labels"),
labelTextFormat = AttrMapValue(None, desc='Formatting string or function used for axis labels.'),
labelTextPostFormat = AttrMapValue(None, desc='Extra Formatting string.'),
labelTextScale = AttrMapValue(isNumberOrNone, desc='Scaling for label tick values.'),
valueMin = AttrMapValue(isNumberOrNone, desc='Minimum value on axis.'),
valueMax = AttrMapValue(isNumberOrNone, desc='Maximum value on axis.'),
valueStep = AttrMapValue(isNumberOrNone, desc='Step size used between ticks.'),
valueSteps = AttrMapValue(isListOfNumbersOrNone, desc='List of step sizes used between ticks.'),
avoidBoundFrac = AttrMapValue(EitherOr((isNumberOrNone,SequenceOf(isNumber,emptyOK=0,lo=2,hi=2))), desc='Fraction of interval to allow above and below.'),
avoidBoundSpace = AttrMapValue(EitherOr((isNumberOrNone,SequenceOf(isNumber,emptyOK=0,lo=2,hi=2))), desc='Space to allow above and below.'),
abf_ignore_zero = AttrMapValue(EitherOr((NoneOr(isBoolean),SequenceOf(isBoolean,emptyOK=0,lo=2,hi=2))), desc='Set to True to make the avoidBoundFrac calculations treat zero as non-special'),
rangeRound=AttrMapValue(OneOf('none','both','ceiling','floor'),'How to round the axis limits'),
zrangePref = AttrMapValue(isNumberOrNone, desc='Zero range axis limit preference.'),
style = AttrMapValue(OneOf('normal','stacked','parallel_3d'),"How values are plotted!"),
skipEndL = AttrMapValue(OneOf('none','start','end','both'), desc='Skip high/low tick labels'),
origShiftIPC = AttrMapValue(isNumberOrNone, desc='Lowest label shift interval ratio.'),
origShiftMin = AttrMapValue(isNumberOrNone, desc='Minimum amount to shift.'),
origShiftSpecialValue = AttrMapValue(isNumberOrNone, desc='special value for shift'),
tickAxisMode = AttrMapValue(OneOf('high','low','axis'), desc="Like joinAxisMode, but for the ticks"),
reverseDirection = AttrMapValue(isBoolean, desc='If true reverse category direction.'),
annotations = AttrMapValue(None,desc='list of annotations'),
loLLen = AttrMapValue(isNumber, desc='extra line length before start of the axis'),
hiLLen = AttrMapValue(isNumber, desc='extra line length after end of the axis'),
subTickNum = AttrMapValue(isNumber, desc='Number of axis sub ticks, if >0'),
subTickLo = AttrMapValue(isNumber, desc='sub tick down or left'),
subTickHi = AttrMapValue(isNumber, desc='sub tick up or right'),
visibleSubTicks = AttrMapValue(isBoolean, desc='Display axis sub ticks, if true.'),
visibleSubGrid = AttrMapValue(isBoolean, desc='Display axis sub grid, if true.'),
subGridStrokeWidth = AttrMapValue(isNumber, desc='Width of grid lines.'),
subGridStrokeColor = AttrMapValue(isColorOrNone, desc='Color of grid lines.'),
subGridStrokeDashArray = AttrMapValue(isListOfNumbersOrNone, desc='Dash array used for grid lines.'),
subGridStrokeLineCap = AttrMapValue(OneOf(0,1,2),desc="Grid Line cap 0=butt, 1=round & 2=square"),
subGridStrokeLineJoin = AttrMapValue(OneOf(0,1,2),desc="Grid Line join 0=miter, 1=round & 2=bevel"),
subGridStrokeMiterLimit = AttrMapValue(isNumber,desc="Grid miter limit control miter line joins"),
subGridStart = AttrMapValue(isNumberOrNone, desc='Start of grid lines wrt axis origin'),
subGridEnd = AttrMapValue(isNumberOrNone, desc='End of grid lines wrt axis origin'),
keepTickLabelsInside = AttrMapValue(isBoolean, desc='Ensure tick labels do not project beyond bounds of axis if true'),
)
def __init__(self,**kw):
assert self.__class__.__name__!='ValueAxis', 'Abstract Class ValueAxis Instantiated'
self._setKeywords(**kw)
self._setKeywords(
_configured = 0,
# private properties set by methods. The initial values
# here are to make demos easy; they would always be
# overridden in real life.
_x = 50,
_y = 50,
_length = 100,
# public properties
visible = 1,
visibleAxis = 1,
visibleLabels = 1,
visibleTicks = 1,
visibleGrid = 0,
forceZero = 0,
strokeWidth = 1,
strokeColor = STATE_DEFAULTS['strokeColor'],
strokeDashArray = STATE_DEFAULTS['strokeDashArray'],
strokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
strokeLineCap = STATE_DEFAULTS['strokeLineCap'],
strokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
gridStrokeWidth = 0.25,
gridStrokeColor = STATE_DEFAULTS['strokeColor'],
gridStrokeDashArray = STATE_DEFAULTS['strokeDashArray'],
gridStrokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
gridStrokeLineCap = STATE_DEFAULTS['strokeLineCap'],
gridStrokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
gridStart = None,
gridEnd = None,
drawGridLast = False,
visibleSubGrid = 0,
visibleSubTicks = 0,
subTickNum = 0,
subTickLo = 0,
subTickHi = 0,
subGridStrokeLineJoin = STATE_DEFAULTS['strokeLineJoin'],
subGridStrokeLineCap = STATE_DEFAULTS['strokeLineCap'],
subGridStrokeMiterLimit = STATE_DEFAULTS['strokeMiterLimit'],
subGridStrokeWidth = 0.25,
subGridStrokeColor = STATE_DEFAULTS['strokeColor'],
subGridStrokeDashArray = STATE_DEFAULTS['strokeDashArray'],
subGridStart = None,
subGridEnd = None,
labels = TypedPropertyCollection(Label),
keepTickLabelsInside = 0,
# how close can the ticks be?
minimumTickSpacing = 10,
maximumTicks = 7,
# a format string like '%0.2f'
# or a function which takes the value as an argument and returns a string
_labelTextFormat = None,
labelAxisMode = 'axis',
labelTextFormat = None,
labelTextPostFormat = None,
labelTextScale = None,
# if set to None, these will be worked out for you.
# if you override any or all of them, your values
# will be used.
valueMin = None,
valueMax = None,
valueStep = None,
avoidBoundFrac = None,
avoidBoundSpace = None,
abf_ignore_zero = False,
rangeRound = 'none',
zrangePref = 0,
style = 'normal',
skipEndL='none',
origShiftIPC = None,
origShiftMin = None,
origShiftSpecialValue = None,
tickAxisMode = 'axis',
reverseDirection=0,
loLLen=0,
hiLLen=0,
)
self.labels.angle = 0
def setPosition(self, x, y, length):
# ensure floating point
self._x = float(x)
self._y = float(y)
self._length = float(length)
def configure(self, dataSeries):
"""Let the axis configure its scale and range based on the data.
Called after setPosition. Let it look at a list of lists of
numbers determine the tick mark intervals. If valueMin,
valueMax and valueStep are configured then it
will use them; if any of them are set to None it
will look at the data and make some sensible decision.
You may override this to build custom axes with
irregular intervals. It creates an internal
variable self._values, which is a list of numbers
to use in plotting.
"""
self._setRange(dataSeries)
self._configure_end()
def _configure_end(self):
self._calcTickmarkPositions()
self._calcScaleFactor()
self._configured = 1
def _getValueStepAndTicks(self, valueMin, valueMax,cache={}):
try:
K = (valueMin,valueMax)
r = cache[K]
except:
self._valueMin = valueMin
self._valueMax = valueMax
valueStep,T = self._calcStepAndTickPositions()
r = cache[K] = valueStep, T, valueStep*1e-8
return r
def _setRange(self, dataSeries):
"""Set minimum and maximum axis values.
The dataSeries argument is assumed to be a list of data
vectors. Each vector is itself a list or tuple of numbers.
Returns a min, max tuple.
"""
oMin = valueMin = self.valueMin
oMax = valueMax = self.valueMax
rangeRound = self.rangeRound
if valueMin is None: valueMin = self._cValueMin = _findMin(dataSeries,self._dataIndex,0)
if valueMax is None: valueMax = self._cValueMax = _findMax(dataSeries,self._dataIndex,0)
if valueMin == valueMax:
if valueMax==0:
if oMin is None and oMax is None:
zrp = getattr(self,'zrangePref',0)
if zrp>0:
valueMax = zrp
valueMin = 0
elif zrp<0:
valueMax = 0
valueMin = zrp
else:
valueMax = 0.01
valueMin = -0.01
elif self.valueMin is None:
valueMin = -0.01
else:
valueMax = 0.01
else:
if valueMax>0:
valueMax = 1.2*valueMax
valueMin = 0.0
else:
valueMax = 0.0
valueMin = 1.2*valueMin
if getattr(self,'_bubblePlot',None):
bubbleMax = float(_findMax(dataSeries,2,0))
frac=.25
bubbleV=frac*(valueMax-valueMin)
self._bubbleV = bubbleV
self._bubbleMax = bubbleMax
self._bubbleRadius = frac*self._length
def special(T,x,func,bubbleV=bubbleV,bubbleMax=bubbleMax):
try:
v = T[2]
except IndexError:
v = bubbleMAx*0.1
bubbleV *= (v/bubbleMax)**0.5
return func(T[x]+bubbleV,T[x]-bubbleV)
if oMin is None: valueMin = self._cValueMin = _findMin(dataSeries,self._dataIndex,0,special=special)
if oMax is None: valueMax = self._cValueMax = _findMax(dataSeries,self._dataIndex,0,special=special)
cMin = valueMin
cMax = valueMax
forceZero = self.forceZero
if forceZero:
if forceZero=='near':
forceZero = min(abs(valueMin),abs(valueMax)) <= 5*(valueMax-valueMin)
if forceZero:
if valueMax<0: valueMax=0
elif valueMin>0: valueMin = 0
abf = self.avoidBoundFrac
do_rr = not getattr(self,'valueSteps',None)
do_abf = abf and do_rr
if not isinstance(abf,_SequenceTypes):
abf = abf, abf
abfiz = getattr(self,'abf_ignore_zero', False)
if not isinstance(abfiz,_SequenceTypes):
abfiz = abfiz, abfiz
do_rr = rangeRound is not 'none' and do_rr
if do_rr:
rrn = rangeRound in ['both','floor']
rrx = rangeRound in ['both','ceiling']
else:
rrn = rrx = 0
abS = self.avoidBoundSpace
do_abs = abS
if do_abs:
if not isinstance(abS,_SequenceTypes):
abS = abS, abS
aL = float(self._length)
go = do_rr or do_abf or do_abs
cache = {}
iter = 0
while go and iter<=10:
iter += 1
go = 0
if do_abf or do_abs:
valueStep, T, fuzz = self._getValueStepAndTicks(valueMin, valueMax, cache)
if do_abf:
i0 = valueStep*abf[0]
i1 = valueStep*abf[1]
else:
i0 = i1 = 0
if do_abs:
sf = (valueMax-valueMin)/aL
i0 = max(i0,abS[0]*sf)
i1 = max(i1,abS[1]*sf)
if rrn: v = T[0]
else: v = valueMin
u = cMin-i0
if (abfiz[0] or abs(v)>fuzz) and v>=u+fuzz:
valueMin = u
go = 1
if rrx: v = T[-1]
else: v = valueMax
u = cMax+i1
if (abfiz[1] or abs(v)>fuzz) and v<=u-fuzz:
valueMax = u
go = 1
if do_rr:
valueStep, T, fuzz = self._getValueStepAndTicks(valueMin, valueMax, cache)
if rrn:
if valueMin<T[0]-fuzz:
valueMin = T[0]-valueStep
go = 1
else:
go = valueMin>=T[0]+fuzz
valueMin = T[0]
if rrx:
if valueMax>T[-1]+fuzz:
valueMax = T[-1]+valueStep
go = 1
else:
go = valueMax<=T[-1]-fuzz
valueMax = T[-1]
if iter and not go:
self._computedValueStep = valueStep
else:
self._computedValueStep = None
self._valueMin = valueMin
self._valueMax = valueMax
origShiftIPC = self.origShiftIPC
origShiftMin = self.origShiftMin
if origShiftMin is not None or origShiftIPC is not None:
origShiftSpecialValue = self.origShiftSpecialValue
self._calcValueStep()
valueMax, valueMin = self._valueMax, self._valueMin
if origShiftSpecialValue is None or abs(origShiftSpecialValue-valueMin)<1e-6:
if origShiftIPC:
m = origShiftIPC*self._valueStep
else:
m = 0
if origShiftMin:
m = max(m,(valueMax-valueMin)*origShiftMin/self._length)
self._valueMin -= m
self._rangeAdjust()
def _pseudo_configure(self):
self._valueMin = self.valueMin
self._valueMax = self.valueMax
self._configure_end()
def _rangeAdjust(self):
"""Override this if you want to alter the calculated range.
E.g. if want a minumamum range of 30% or don't want 100%
as the first point.
"""
pass
def _adjustAxisTicks(self):
'''Override if you want to put slack at the ends of the axis
eg if you don't want the last tick to be at the bottom etc
'''
pass
def _calcScaleFactor(self):
"""Calculate the axis' scale factor.
This should be called only *after* the axis' range is set.
Returns a number.
"""
self._scaleFactor = self._length / float(self._valueMax - self._valueMin)
return self._scaleFactor
def _calcStepAndTickPositions(self):
valueStep = getattr(self,'_computedValueStep',None)
if valueStep:
del self._computedValueStep
self._valueStep = valueStep
else:
self._calcValueStep()
valueStep = self._valueStep
valueMin = self._valueMin
valueMax = self._valueMax
fuzz = 1e-8*valueStep
rangeRound = self.rangeRound
i0 = int(float(valueMin)/valueStep)
v = i0*valueStep
if rangeRound in ('both','floor'):
if v>valueMin+fuzz: i0 -= 1
elif v<valueMin-fuzz: i0 += 1
i1 = int(float(valueMax)/valueStep)
v = i1*valueStep
if rangeRound in ('both','ceiling'):
if v<valueMax-fuzz: i1 += 1
elif v>valueMax+fuzz: i1 -= 1
return valueStep,[i*valueStep for i in xrange(i0,i1+1)]
def _calcTickPositions(self):
return self._calcStepAndTickPositions()[1]
def _calcTickmarkPositions(self):
"""Calculate a list of tick positions on the axis. Returns a list of numbers."""
self._tickValues = getattr(self,'valueSteps',None)
if self._tickValues: return self._tickValues
self._tickValues = self._calcTickPositions()
self._adjustAxisTicks()
return self._tickValues
def _calcValueStep(self):
'''Calculate _valueStep for the axis or get from valueStep.'''
if self.valueStep is None:
rawRange = self._valueMax - self._valueMin
rawInterval = rawRange / min(float(self.maximumTicks-1),(float(self._length)/self.minimumTickSpacing))
self._valueStep = nextRoundNumber(rawInterval)
else:
self._valueStep = self.valueStep
def _allIntTicks(self):
return _allInt(self._tickValues)
def makeTickLabels(self):
g = Group()
if not self.visibleLabels: return g
f = self._labelTextFormat # perhaps someone already set it
if f is None:
f = self.labelTextFormat or (self._allIntTicks() and '%.0f' or str)
elif f is str and self._allIntTicks(): f = '%.0f'
elif hasattr(f,'calcPlaces'):
f.calcPlaces(self._tickValues)
post = self.labelTextPostFormat
scl = self.labelTextScale
pos = [self._x, self._y]
d = self._dataIndex
pos[1-d] = self._labelAxisPos()
labels = self.labels
if self.skipEndL!='none':
if self.isXAxis:
sk = self._x
else:
sk = self._y
if self.skipEndL=='start':
sk = [sk]
else:
sk = [sk,sk+self._length]
if self.skipEndL=='end':
del sk[0]
else:
sk = []
nticks = len(self._tickValues)
nticks1 = nticks - 1
for i,tick in enumerate(self._tickValues):
label = i-nticks
if label in labels:
label = labels[label]
else:
label = labels[i]
if f and label.visible:
v = self.scale(tick)
if sk:
for skv in sk:
if abs(skv-v)<1e-6:
v = None
break
if v is not None:
if scl is not None:
t = tick*scl
else:
t = tick
if type(f) is str: txt = f % t
elif isinstance(f,_SequenceTypes):
#it's a list, use as many items as we get
if i < len(f):
txt = f[i]
else:
txt = ''
elif hasattr(f,'__call__'):
if isinstance(f,TickLabeller):
txt = f(self,t)
else:
txt = f(t)
else:
raise ValueError, 'Invalid labelTextFormat %s' % f
if post: txt = post % txt
pos[d] = v
label.setOrigin(*pos)
label.setText(txt)
#special property to ensure a label doesn't project beyond the bounds of an x-axis
if self.keepTickLabelsInside:
if isinstance(self, XValueAxis): #not done yet for y axes
a_x = self._x
if not i: #first one
x0, y0, x1, y1 = label.getBounds()
if x0 < a_x:
label = label.clone(dx=label.dx + a_x - x0)
if i==nticks1: #final one
a_x1 = a_x +self._length
x0, y0, x1, y1 = label.getBounds()
if x1 > a_x1:
label=label.clone(dx=label.dx-x1+a_x1)
g.add(label)
return g
def scale(self, value):
"""Converts a numeric value to a plotarea position.
The chart first configures the axis, then asks it to
"""
assert self._configured, "Axis cannot scale numbers before it is configured"
if value is None: value = 0
#this could be made more efficient by moving the definition of org and sf into the configuration
org = (self._x, self._y)[self._dataIndex]
sf = self._scaleFactor
if self.reverseDirection:
sf = -sf
org += self._length
return org + sf*(value - self._valueMin)
class XValueAxis(_XTicks,ValueAxis):
"X/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickUp = AttrMapValue(isNumber,
desc='Tick length up the axis.'),
tickDown = AttrMapValue(isNumber,
desc='Tick length down the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf('bottom', 'top', 'value', 'points', None),
desc="Mode used for connecting axis ('bottom', 'top', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 0
def __init__(self,**kw):
ValueAxis.__init__(self,**kw)
self.labels.boxAnchor = 'n'
self.labels.dx = 0
self.labels.dy = -5
self.tickUp = 0
self.tickDown = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
self.setPosition(20, 50, 150)
self.configure([(10,20,30,40,50)])
d = Drawing(200, 100)
d.add(self)
return d
def joinToAxis(self, yAxis, mode='bottom', pos=None):
"Join with y-axis using some mode."
_assertYAxis(yAxis)
if mode == 'bottom':
self._x = yAxis._x * 1.0
self._y = yAxis._y * 1.0
elif mode == 'top':
self._x = yAxis._x * 1.0
self._y = (yAxis._y + yAxis._length) * 1.0
elif mode == 'value':
self._x = yAxis._x * 1.0
self._y = yAxis.scale(pos) * 1.0
elif mode == 'points':
self._x = yAxis._x * 1.0
self._y = pos * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('bottom', 'top'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x-self.loLLen, self._y, self._x + self._length+self.hiLLen, self._y)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
#additional utilities to help specify calendar dates on which tick marks
#are to be plotted. After some thought, when the magic algorithm fails,
#we can let them specify a number of days-of-the-year to tick in any given
#year.
#################################################################################
#
# Preliminary support objects/functions for the axis used in time series charts
#
#################################################################################
_months = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
_maxDays = [31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
def parseDayAndMonth(dmstr):
"""This accepts and validates strings like "31-Dec" i.e. dates
of no particular year. 29 Feb is allowed. These can be used
for recurring dates. It returns a (dd, mm) pair where mm is the
month integer. If the text is not valid it raises an error.
"""
dstr, mstr = dmstr.split('-')
dd = int(dstr)
mstr = mstr.lower()
mm = _months.index(mstr) + 1
assert dd <= _maxDays[mm-1]
return (dd, mm)
class _isListOfDaysAndMonths(Validator):
"""This accepts and validates lists of strings like "31-Dec" i.e. dates
of no particular year. 29 Feb is allowed. These can be used
for recurring dates.
"""
def test(self,x):
if isinstance(x,_SequenceTypes):
answer = True
for element in x:
try:
dd, mm = parseDayAndMonth(element)
except:
answer = False
return answer
else:
return False
def normalize(self,x):
#we store them as presented, it's the most presentable way
return x
isListOfDaysAndMonths = _isListOfDaysAndMonths()
class NormalDateXValueAxis(XValueAxis):
"""An X axis applying additional rules.
Depending on the data and some built-in rules, the axis
displays normalDate values as nicely formatted dates.
The client chart should have NormalDate X values.
"""
_attrMap = AttrMap(BASE = XValueAxis,
bottomAxisLabelSlack = AttrMapValue(isNumber, desc="Fractional amount used to adjust label spacing"),
niceMonth = AttrMapValue(isBoolean, desc="Flag for displaying months 'nicely'."),
forceEndDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of last date value.'),
forceFirstDate = AttrMapValue(isBoolean, desc='Flag for enforced displaying of first date value.'),
forceDatesEachYear = AttrMapValue(isListOfDaysAndMonths, desc='List of dates in format "31-Dec",' +
'"1-Jan". If present they will always be used for tick marks in the current year, rather ' +
'than the dates chosen by the automatic algorithm. Hyphen compulsory, case of month optional.'),
xLabelFormat = AttrMapValue(None, desc="Label format string (e.g. '{mm}/{yy}') or function."),
dayOfWeekName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=7,hi=7), desc='Weekday names.'),
monthName = AttrMapValue(SequenceOf(isString,emptyOK=0,lo=12,hi=12), desc='Month names.'),
dailyFreq = AttrMapValue(isBoolean, desc='True if we are to assume daily data to be ticked at end of month.'),
specifiedTickDates = AttrMapValue(NoneOr(SequenceOf(isNormalDate)), desc='Actual tick values to use; no calculations done'),
specialTickClear = AttrMapValue(isBoolean, desc='clear rather than delete close ticks when forced first/end dates'),
)
_valueClass = normalDate.ND
def __init__(self,**kw):
XValueAxis.__init__(self,**kw)
# some global variables still used...
self.bottomAxisLabelSlack = 0.1
self.niceMonth = 1
self.forceEndDate = 0
self.forceFirstDate = 0
self.forceDatesEachYear = []
self.dailyFreq = 0
self.xLabelFormat = "{mm}/{yy}"
self.dayOfWeekName = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
self.monthName = ['January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December']
self.specialTickClear = 0
self.valueSteps = self.specifiedTickDates = None
def _scalar2ND(self, x):
"Convert a scalar to a NormalDate value."
d = self._valueClass()
d.normalize(x)
return d
def _dateFormatter(self, v):
"Create a formatted label for some value."
if not isinstance(v,normalDate.NormalDate):
v = self._scalar2ND(v)
d, m = normalDate._dayOfWeekName, normalDate._monthName
try:
normalDate._dayOfWeekName, normalDate._monthName = self.dayOfWeekName, self.monthName
return v.formatMS(self.xLabelFormat)
finally:
normalDate._dayOfWeekName, normalDate._monthName = d, m
def _xAxisTicker(self, xVals):
"""Complex stuff...
Needs explanation...
Yes please says Andy :-(. Modified on 19 June 2006 to attempt to allow
a mode where one can specify recurring days and months.
"""
axisLength = self._length
formatter = self._dateFormatter
if isinstance(formatter,TickLabeller):
def formatter(tick):
return self._dateFormatter(self,tick)
firstDate = xVals[0]
endDate = xVals[-1]
labels = self.labels
fontName, fontSize, leading = labels.fontName, labels.fontSize, labels.leading
textAnchor, boxAnchor, angle = labels.textAnchor, labels.boxAnchor, labels.angle
RBL = _textBoxLimits(formatter(firstDate).split('\n'),fontName,
fontSize,leading or 1.2*fontSize,textAnchor,boxAnchor)
RBL = _rotatedBoxLimits(RBL[0],RBL[1],RBL[2],RBL[3], angle)
xLabelW = RBL[1]-RBL[0]
xLabelH = RBL[3]-RBL[2]
w = max(xLabelW,labels.width,self.minimumTickSpacing)
W = w+w*self.bottomAxisLabelSlack
n = len(xVals)
ticks = []
labels = []
maximumTicks = self.maximumTicks
if self.specifiedTickDates:
VC = self._valueClass
ticks = [VC(x) for x in self.specifiedTickDates]
labels = [formatter(d) for d in ticks]
if self.forceFirstDate and firstDate==ticks[0] and (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and endDate==ticks[-1] and (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
return ticks, labels
def addTick(i, xVals=xVals, formatter=formatter, ticks=ticks, labels=labels):
ticks.insert(0,xVals[i])
labels.insert(0,formatter(xVals[i]))
#AR 20060619 - first we try the approach where the user has explicitly
#specified the days of year to be ticked. Other explicit routes may
#be added.
if self.forceDatesEachYear:
forcedPartialDates = map(parseDayAndMonth, self.forceDatesEachYear)
#generate the list of dates in the range.
#print 'dates range from %s to %s' % (firstDate, endDate)
firstYear = firstDate.year()
lastYear = endDate.year()
ticks = []
labels = []
yyyy = firstYear
#generate all forced dates between the year it starts and the year it
#ends, adding them if within range.
while yyyy <= lastYear:
for (dd, mm) in forcedPartialDates:
theDate = normalDate.ND((yyyy, mm, dd))
if theDate >= firstDate and theDate <= endDate:
ticks.append(theDate)
labels.append(formatter(theDate))
yyyy += 1
#first and last may still be forced in.
if self.forceFirstDate and firstDate!=ticks[0]:
ticks.insert(0, firstDate)
labels.insert(0,formatter(firstDate))
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and endDate!=ticks[-1]:
ticks.append(endDate)
labels.append(formatter(endDate))
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
#print 'xVals found on forced dates =', ticks
return ticks, labels
#otherwise, we apply the 'magic algorithm...' which looks for nice spacing
#based on the size and separation of the labels.
for d in (1,2,3,6,12,24,60,120):
k = n/d
if k<=maximumTicks and k*W <= axisLength:
i = n-1
if self.niceMonth:
j = endDate.month() % (d<=12 and d or 12)
if j:
if self.forceEndDate:
addTick(i)
ticks[0]._doSubTicks=0
i -= j
#weird first date ie not at end of month
try:
wfd = firstDate.month() == xVals[1].month()
except:
wfd = 0
while i>=wfd:
addTick(i)
i -= d
if self.forceFirstDate and ticks[0]!=firstDate:
addTick(0)
ticks[0]._doSubTicks=0
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[1]-ticks[0])<=W:
if self.specialTickClear:
labels[1] = ''
else:
del ticks[1], labels[1]
if self.forceEndDate and self.niceMonth and j:
if (axisLength/float(ticks[-1]-ticks[0]))*(ticks[-1]-ticks[-2])<=W:
if self.specialTickClear:
labels[-2] = ''
else:
del ticks[-2], labels[-2]
try:
if labels[0] and labels[0]==labels[1]:
del ticks[1], labels[1]
except IndexError:
pass
return ticks, labels
def _convertXV(self,data):
'''Convert all XValues to a standard normalDate type'''
VC = self._valueClass
for D in data:
for i in xrange(len(D)):
x, y = D[i]
if not isinstance(x,VC):
D[i] = (VC(x),y)
def _getStepsAndLabels(self,xVals):
if self.dailyFreq:
xEOM = []
pm = 0
px = xVals[0]
for x in xVals:
m = x.month()
if pm!=m:
if pm: xEOM.append(px)
pm = m
px = x
px = xVals[-1]
if xEOM[-1]!=x: xEOM.append(px)
steps, labels = self._xAxisTicker(xEOM)
else:
steps, labels = self._xAxisTicker(xVals)
return steps, labels
def configure(self, data):
self._convertXV(data)
from reportlab.lib.set_ops import union
xVals = reduce(union,map(lambda x: map(lambda dv: dv[0],x),data),[])
xVals.sort()
steps,labels = self._getStepsAndLabels(xVals)
valueMin, valueMax = self.valueMin, self.valueMax
if valueMin is None: valueMin = xVals[0]
if valueMax is None: valueMax = xVals[-1]
self._valueMin, self._valueMax = valueMin, valueMax
self._tickValues = steps
self._labelTextFormat = labels
self._scaleFactor = self._length / float(valueMax - valueMin)
self._tickValues = steps
self._configured = 1
class YValueAxis(_YTicks,ValueAxis):
"Y/value axis"
_attrMap = AttrMap(BASE=ValueAxis,
tickLeft = AttrMapValue(isNumber,
desc='Tick length left of the axis.'),
tickRight = AttrMapValue(isNumber,
desc='Tick length right of the axis.'),
joinAxis = AttrMapValue(None,
desc='Join both axes if true.'),
joinAxisMode = AttrMapValue(OneOf(('left', 'right', 'value', 'points', None)),
desc="Mode used for connecting axis ('left', 'right', 'value', 'points', None)."),
joinAxisPos = AttrMapValue(isNumberOrNone,
desc='Position at which to join with other axis.'),
)
# Indicate the dimension of the data we're interested in.
_dataIndex = 1
def __init__(self):
ValueAxis.__init__(self)
self.labels.boxAnchor = 'e'
self.labels.dx = -5
self.labels.dy = 0
self.tickRight = 0
self.tickLeft = 5
self.joinAxis = None
self.joinAxisMode = None
self.joinAxisPos = None
def demo(self):
data = [(10, 20, 30, 42)]
self.setPosition(100, 10, 80)
self.configure(data)
drawing = Drawing(200, 100)
drawing.add(self)
return drawing
def joinToAxis(self, xAxis, mode='left', pos=None):
"Join with x-axis using some mode."
_assertXAxis(xAxis)
if mode == 'left':
self._x = xAxis._x * 1.0
self._y = xAxis._y * 1.0
elif mode == 'right':
self._x = (xAxis._x + xAxis._length) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'value':
self._x = xAxis.scale(pos) * 1.0
self._y = xAxis._y * 1.0
elif mode == 'points':
self._x = pos * 1.0
self._y = xAxis._y * 1.0
def _joinToAxis(self):
ja = self.joinAxis
if ja:
jam = self.joinAxisMode
jap = self.joinAxisPos
jta = self.joinToAxis
if jam in ('left', 'right'):
jta(ja, mode=jam)
elif jam in ('value', 'points'):
jta(ja, mode=jam, pos=jap)
def makeAxis(self):
g = Group()
self._joinToAxis()
if not self.visibleAxis: return g
axis = Line(self._x, self._y-self.loLLen, self._x, self._y + self._length+self.hiLLen)
axis.strokeColor = self.strokeColor
axis.strokeWidth = self.strokeWidth
axis.strokeDashArray = self.strokeDashArray
g.add(axis)
return g
class AdjYValueAxis(YValueAxis):
"""A Y-axis applying additional rules.
Depending on the data and some built-in rules, the axis
may choose to adjust its range and origin.
"""
_attrMap = AttrMap(BASE = YValueAxis,
requiredRange = AttrMapValue(isNumberOrNone, desc='Minimum required value range.'),
leftAxisPercent = AttrMapValue(isBoolean, desc='When true add percent sign to label values.'),
leftAxisOrigShiftIPC = AttrMapValue(isNumber, desc='Lowest label shift interval ratio.'),
leftAxisOrigShiftMin = AttrMapValue(isNumber, desc='Minimum amount to shift.'),
leftAxisSkipLL0 = AttrMapValue(EitherOr((isBoolean,isListOfNumbers)), desc='Skip/Keep lowest tick label when true/false.\nOr skiplist'),
labelVOffset = AttrMapValue(isNumber, desc='add this to the labels'),
)
def __init__(self,**kw):
YValueAxis.__init__(self,**kw)
self.requiredRange = 30
self.leftAxisPercent = 1
self.leftAxisOrigShiftIPC = 0.15
self.leftAxisOrigShiftMin = 12
self.leftAxisSkipLL0 = self.labelVOffset = 0
self.valueSteps = None
def _rangeAdjust(self):
"Adjusts the value range of the axis."
from reportlab.graphics.charts.utils import find_good_grid, ticks
y_min, y_max = self._valueMin, self._valueMax
m = self.maximumTicks
n = filter(lambda x,m=m: x<=m,[4,5,6,7,8,9])
if not n: n = [m]
valueStep, requiredRange = self.valueStep, self.requiredRange
if requiredRange and y_max - y_min < requiredRange:
y1, y2 = find_good_grid(y_min, y_max,n=n,grid=valueStep)[:2]
if y2 - y1 < requiredRange:
ym = (y1+y2)*0.5
y1 = min(ym-requiredRange*0.5,y_min)
y2 = max(ym+requiredRange*0.5,y_max)
if y_min>=100 and y1<100:
y2 = y2 + 100 - y1
y1 = 100
elif y_min>=0 and y1<0:
y2 = y2 - y1
y1 = 0
self._valueMin, self._valueMax = y1, y2
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
abf = self.avoidBoundFrac
if abf:
i1 = (T[1]-T[0])
if not isinstance(abf,_SequenceTypes):
i0 = i1 = i1*abf
else:
i0 = i1*abf[0]
i1 = i1*abf[1]
_n = getattr(self,'_cValueMin',T[0])
_x = getattr(self,'_cValueMax',T[-1])
if _n - T[0] < i0: self._valueMin = self._valueMin - i0
if T[-1]-_x < i1: self._valueMax = self._valueMax + i1
T, L = ticks(self._valueMin, self._valueMax, split=1, n=n, percent=self.leftAxisPercent,grid=valueStep, labelVOffset=self.labelVOffset)
self._valueMin = T[0]
self._valueMax = T[-1]
self._tickValues = T
if self.labelTextFormat is None:
self._labelTextFormat = L
else:
self._labelTextFormat = self.labelTextFormat
if abs(self._valueMin-100)<1e-6:
self._calcValueStep()
vMax, vMin = self._valueMax, self._valueMin
m = max(self.leftAxisOrigShiftIPC*self._valueStep,
(vMax-vMin)*self.leftAxisOrigShiftMin/self._length)
self._valueMin = self._valueMin - m
if self.leftAxisSkipLL0:
if isinstance(self.leftAxisSkipLL0,_SequenceTypes):
for x in self.leftAxisSkipLL0:
try:
L[x] = ''
except IndexError:
pass
L[0] = ''
# Sample functions.
def sample0a():
"Sample drawing with one xcat axis and two buckets."
drawing = Drawing(400, 200)
data = [(10, 20)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying', 'Yang']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample0b():
"Sample drawing with one xcat axis and one bucket only."
drawing = Drawing(400, 200)
data = [(10,)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Ying']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
return drawing
def sample1():
"Sample drawing containing two unconnected axes."
from reportlab.graphics.shapes import _baseGFontNameB
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XCategoryAxis()
xAxis.setPosition(75, 75, 300)
xAxis.configure(data)
xAxis.categoryNames = ['Beer','Wine','Meat','Cannelloni']
xAxis.labels.boxAnchor = 'n'
xAxis.labels[3].dy = -15
xAxis.labels[3].angle = 30
xAxis.labels[3].fontName = _baseGFontNameB
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 35
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c():
"Sample drawing, xvalue/yvalue axes, y connected to bottom of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4c1():
"xvalue/yvalue axes, without drawing axis lines/ticks."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
yAxis.visibleAxis = 0
yAxis.visibleTicks = 0
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.configure(data)
xAxis.visibleAxis = 0
xAxis.visibleTicks = 0
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample4d():
"Sample drawing, xvalue/yvalue axes, y connected to top of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XValueAxis()
xAxis._length = 300
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5a():
"Sample drawing, xvalue/yvalue axes, y connected at 100 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 100
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5b():
"Sample drawing, xvalue/yvalue axes, y connected at value 35 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 35
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5c():
"Sample drawing, xvalue/yvalue axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample5d():
"Sample drawing, xvalue/yvalue axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis.setPosition(50, 50, 300)
xAxis.configure(data)
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6a():
"Sample drawing, xcat/yvalue axes, x connected at top of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'top'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6b():
"Sample drawing, xcat/yvalue axes, x connected at bottom of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'bottom'
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6c():
"Sample drawing, xcat/yvalue axes, x connected at 100 pts to y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'points'
xAxis.joinAxisPos = 100
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample6d():
"Sample drawing, xcat/yvalue axes, x connected at value 20 of y."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
yAxis = YValueAxis()
yAxis.setPosition(50, 50, 125)
yAxis.configure(data)
xAxis = XCategoryAxis()
xAxis._length = 300
xAxis.configure(data)
xAxis.joinAxis = yAxis
xAxis.joinAxisMode = 'value'
xAxis.joinAxisPos = 20
xAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
xAxis.labels.boxAnchor = 'n'
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7a():
"Sample drawing, xvalue/ycat axes, y connected at right of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'right'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7b():
"Sample drawing, xvalue/ycat axes, y connected at left of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'left'
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7c():
"Sample drawing, xvalue/ycat axes, y connected at value 30 of x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'value'
yAxis.joinAxisPos = 30
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
def sample7d():
"Sample drawing, xvalue/ycat axes, y connected at 200 pts to x."
drawing = Drawing(400, 200)
data = [(10, 20, 30, 42)]
xAxis = XValueAxis()
xAxis._length = 300
xAxis.configure(data)
yAxis = YCategoryAxis()
yAxis.setPosition(50, 50, 125)
yAxis.joinAxis = xAxis
yAxis.joinAxisMode = 'points'
yAxis.joinAxisPos = 200
yAxis.categoryNames = ['Beer', 'Wine', 'Meat', 'Cannelloni']
yAxis.labels.boxAnchor = 'e'
yAxis.configure(data)
drawing.add(xAxis)
drawing.add(yAxis)
return drawing
| gpl-2.0 |
itzzshirlayyy/Online_Ordering | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/__init__.py | 147 | 4222 | """
Copyright (c) Donald Stufft, pip, and individual contributors
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from __future__ import absolute_import
import sys
class VendorAlias(object):
def __init__(self, package_names):
self._package_names = package_names
self._vendor_name = __name__
self._vendor_pkg = self._vendor_name + "."
self._vendor_pkgs = [
self._vendor_pkg + name for name in self._package_names
]
def find_module(self, fullname, path=None):
if fullname.startswith(self._vendor_pkg):
return self
def load_module(self, name):
# Ensure that this only works for the vendored name
if not name.startswith(self._vendor_pkg):
raise ImportError(
"Cannot import %s, must be a subpackage of '%s'." % (
name, self._vendor_name,
)
)
if not (name == self._vendor_name or
any(name.startswith(pkg) for pkg in self._vendor_pkgs)):
raise ImportError(
"Cannot import %s, must be one of %s." % (
name, self._vendor_pkgs
)
)
# Check to see if we already have this item in sys.modules, if we do
# then simply return that.
if name in sys.modules:
return sys.modules[name]
# Check to see if we can import the vendor name
try:
# We do this dance here because we want to try and import this
# module without hitting a recursion error because of a bunch of
# VendorAlias instances on sys.meta_path
real_meta_path = sys.meta_path[:]
try:
sys.meta_path = [
m for m in sys.meta_path
if not isinstance(m, VendorAlias)
]
__import__(name)
module = sys.modules[name]
finally:
# Re-add any additions to sys.meta_path that were made while
# during the import we just did, otherwise things like
# requests.packages.urllib3.poolmanager will fail.
for m in sys.meta_path:
if m not in real_meta_path:
real_meta_path.append(m)
# Restore sys.meta_path with any new items.
sys.meta_path = real_meta_path
except ImportError:
# We can't import the vendor name, so we'll try to import the
# "real" name.
real_name = name[len(self._vendor_pkg):]
try:
__import__(real_name)
module = sys.modules[real_name]
except ImportError:
raise ImportError("No module named '%s'" % (name,))
# If we've gotten here we've found the module we're looking for, either
# as part of our vendored package, or as the real name, so we'll add
# it to sys.modules as the vendored name so that we don't have to do
# the lookup again.
sys.modules[name] = module
# Finally, return the loaded module
return module
sys.meta_path.append(VendorAlias(["urllib3", "chardet"]))
| mit |
getadcoin/adcoin | share/qt/make_spinner.py | 4415 | 1035 | #!/usr/bin/env python
# W.J. van der Laan, 2011
# Make spinning .mng animation from a .png
# Requires imagemagick 6.7+
from __future__ import division
from os import path
from PIL import Image
from subprocess import Popen
SRC='img/reload_scaled.png'
DST='../../src/qt/res/movies/update_spinner.mng'
TMPDIR='/tmp'
TMPNAME='tmp-%03i.png'
NUMFRAMES=35
FRAMERATE=10.0
CONVERT='convert'
CLOCKWISE=True
DSIZE=(16,16)
im_src = Image.open(SRC)
if CLOCKWISE:
im_src = im_src.transpose(Image.FLIP_LEFT_RIGHT)
def frame_to_filename(frame):
return path.join(TMPDIR, TMPNAME % frame)
frame_files = []
for frame in xrange(NUMFRAMES):
rotation = (frame + 0.5) / NUMFRAMES * 360.0
if CLOCKWISE:
rotation = -rotation
im_new = im_src.rotate(rotation, Image.BICUBIC)
im_new.thumbnail(DSIZE, Image.ANTIALIAS)
outfile = frame_to_filename(frame)
im_new.save(outfile, 'png')
frame_files.append(outfile)
p = Popen([CONVERT, "-delay", str(FRAMERATE), "-dispose", "2"] + frame_files + [DST])
p.communicate()
| mit |
bvcms/bvcms | CmsWeb/Lib/lib2to3/fixes/fix_next.py | 327 | 3181 | """Fixer for it.next() -> next(it), per PEP 3114."""
# Author: Collin Winter
# Things that currently aren't covered:
# - listcomp "next" names aren't warned
# - "with" statement targets aren't checked
# Local imports
from ..pgen2 import token
from ..pygram import python_symbols as syms
from .. import fixer_base
from ..fixer_util import Name, Call, find_binding
bind_warning = "Calls to builtin next() possibly shadowed by global binding"
class FixNext(fixer_base.BaseFix):
BM_compatible = True
PATTERN = """
power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > >
|
power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > >
|
classdef< 'class' any+ ':'
suite< any*
funcdef< 'def'
name='next'
parameters< '(' NAME ')' > any+ >
any* > >
|
global=global_stmt< 'global' any* 'next' any* >
"""
order = "pre" # Pre-order tree traversal
def start_tree(self, tree, filename):
super(FixNext, self).start_tree(tree, filename)
n = find_binding(u'next', tree)
if n:
self.warning(n, bind_warning)
self.shadowed_next = True
else:
self.shadowed_next = False
def transform(self, node, results):
assert results
base = results.get("base")
attr = results.get("attr")
name = results.get("name")
if base:
if self.shadowed_next:
attr.replace(Name(u"__next__", prefix=attr.prefix))
else:
base = [n.clone() for n in base]
base[0].prefix = u""
node.replace(Call(Name(u"next", prefix=node.prefix), base))
elif name:
n = Name(u"__next__", prefix=name.prefix)
name.replace(n)
elif attr:
# We don't do this transformation if we're assigning to "x.next".
# Unfortunately, it doesn't seem possible to do this in PATTERN,
# so it's being done here.
if is_assign_target(node):
head = results["head"]
if "".join([str(n) for n in head]).strip() == u'__builtin__':
self.warning(node, bind_warning)
return
attr.replace(Name(u"__next__"))
elif "global" in results:
self.warning(node, bind_warning)
self.shadowed_next = True
### The following functions help test if node is part of an assignment
### target.
def is_assign_target(node):
assign = find_assign(node)
if assign is None:
return False
for child in assign.children:
if child.type == token.EQUAL:
return False
elif is_subtree(child, node):
return True
return False
def find_assign(node):
if node.type == syms.expr_stmt:
return node
if node.type == syms.simple_stmt or node.parent is None:
return None
return find_assign(node.parent)
def is_subtree(root, node):
if root == node:
return True
return any(is_subtree(c, node) for c in root.children)
| gpl-2.0 |
Alexx-G/openface | evaluation/lfw-classification-unknown.py | 4 | 19062 | #!/usr/bin/env python2
#
# This files can be used to benchmark different classifiers
# on lfw dataset with known and unknown dataset.
# More info at: https://github.com/cmusatyalab/openface/issues/144
# Brandon Amos & Vijayenthiran Subramaniam
# 2016/06/28
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import shutil # For copy images
import errno
import sys
import operator
from operator import itemgetter
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import openface
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.mixture import GMM
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from nolearn.dbn import DBN
import multiprocessing
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
sys.path.append('./util/')
align_dlib = __import__('align-dlib')
# The list of available classifiers. The list is used in train() and
# inferFromTest() functions.
clfChoices = [
'LinearSvm',
'GMM',
'RadialSvm',
'DecisionTree',
'GaussianNB',
'DBN']
def train(args):
start = time.time()
for clfChoice in clfChoices:
print("Loading embeddings.")
fname = "{}/labels.csv".format(args.workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(args.workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
if clfChoice == 'LinearSvm':
clf = SVC(C=1, kernel='linear', probability=True)
elif clfChoice == 'GMM': # Doesn't work best
clf = GMM(n_components=nClasses)
# ref:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#example-classification-plot-classifier-comparison-py
elif clfChoice == 'RadialSvm': # Radial Basis Function kernel
# works better with C = 1 and gamma = 2
clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif clfChoice == 'DecisionTree': # Doesn't work best
clf = DecisionTreeClassifier(max_depth=20)
elif clfChoice == 'GaussianNB':
clf = GaussianNB()
# ref: https://jessesw.com/Deep-Learning/
elif clfChoice == 'DBN':
if args.verbose:
verbose = 1
else:
verbose = 0
clf = DBN([embeddings.shape[1], 500, labelsNum[-1:][0] + 1], # i/p nodes, hidden nodes, o/p nodes
learn_rates=0.3,
# Smaller steps mean a possibly more accurate result, but the
# training will take longer
learn_rate_decays=0.9,
# a factor the initial learning rate will be multiplied by
# after each iteration of the training
epochs=300, # no of iternation
# dropouts = 0.25, # Express the percentage of nodes that
# will be randomly dropped as a decimal.
verbose=verbose)
if args.ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=args.ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
fName = os.path.join(args.workDir, clfChoice + ".pkl")
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
if args.verbose:
print(
"Training and saving the classifiers took {} seconds.".format(
time.time() - start))
def getRep(imgPath):
start = time.time()
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
bb = align.getLargestFaceBoundingBox(rgbImg)
if (bb is None):
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFace = align.align(
args.imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print(
"Neural network forward pass took {} seconds.".format(
time.time() - start))
return rep
def inferFromTest(args):
for clfChoice in clfChoices:
print "==============="
print "Using the classifier: " + clfChoice
with open(os.path.join(args.featureFolder[0], clfChoice + ".pkl"), 'r') as f_clf:
(le, clf) = pickle.load(f_clf)
correctPrediction = 0
inCorrectPrediction = 0
sumConfidence = 0.0
testSet = [
os.path.join(
args.testFolder[0], f) for f in os.listdir(
args.testFolder[0]) if not f.endswith('.DS_Store')]
for personSet in testSet:
personImages = [os.path.join(personSet, f) for f in os.listdir(
personSet) if not f.endswith('.DS_Store')]
for img in personImages:
if args.verbose:
print("\n=== {} ===".format(img.split('/')[-1:][0]))
try:
rep = getRep(img).reshape(1, -1)
except Exception as e:
print e
continue
start = time.time()
predictions = clf.predict_proba(rep).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
if args.verbose:
print(
"Prediction took {} seconds.".format(
time.time() - start))
if args.verbose:
print(
"Predict {} with {:.2f} confidence.".format(
person, confidence))
sumConfidence += confidence
if confidence <= args.threshold and args.unknown:
person = "_unknown"
if (img.split('/')[-1:][0].split('.')[0][:-5] == person and not args.unknown) or (person == "_unknown" and args.unknown):
correctPrediction += 1
else:
inCorrectPrediction += 1
if isinstance(clf, GMM) and args.verbose:
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
print "Results for the classifier: " + clfChoice
print "Correct Prediction :" + str(correctPrediction)
print "In-correct Prediction: " + str(inCorrectPrediction)
print "Accuracy :" + str(float(correctPrediction) / (correctPrediction + inCorrectPrediction))
print "Avg Confidence: " + str(float(sumConfidence) / (correctPrediction + inCorrectPrediction))
def preprocess(args):
start = time.time()
lfwPath = args.lfwDir
destPath = args.featuresDir
fullFaceDirectory = [os.path.join(lfwPath, f) for f in os.listdir(
lfwPath) if not f.endswith('.DS_Store')] # .DS_Store for the OS X
noOfImages = []
folderName = []
for folder in fullFaceDirectory:
try:
noOfImages.append(len(os.listdir(folder)))
folderName.append(folder.split('/')[-1:][0])
# print folder.split('/')[-1:][0] +": " +
# str(len(os.listdir(folder)))
except:
pass
# Sorting
noOfImages_sorted, folderName_sorted = zip(
*sorted(zip(noOfImages, folderName), key=operator.itemgetter(0), reverse=True))
with open(os.path.join(destPath, "List_of_folders_and_number_of_images.txt"), "w") as text_file:
for f, n in zip(folderName_sorted, noOfImages_sorted):
text_file.write("{} : {} \n".format(f, n))
if args.verbose:
print "Sorting lfw dataset took {} seconds.".format(time.time() - start)
start = time.time()
# Copy known train dataset
for i in range(int(args.rangeOfPeople.split(':')[0]), int(
args.rangeOfPeople.split(':')[1])):
src = os.path.join(lfwPath, folderName_sorted[i])
try:
destFolder = os.path.join(
destPath, 'train_known_raw', folderName_sorted[i])
shutil.copytree(src, destFolder)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, destFolder)
else:
if args.verbose:
print('Directory not copied. Error: %s' % e)
if args.verbose:
print "Copying train dataset from lfw took {} seconds.".format(time.time() - start)
start = time.time()
# Take 10% images from train dataset as test dataset for known
train_known_raw = [
os.path.join(
os.path.join(
destPath,
'train_known_raw'),
f) for f in os.listdir(
os.path.join(
destPath,
'train_known_raw')) if not f.endswith('.DS_Store')] # .DS_Store for the OS X
for folder in train_known_raw:
images = [os.path.join(folder, f) for f in os.listdir(
folder) if not f.endswith('.DS_Store')]
if not os.path.exists(os.path.join(
destPath, 'test_known_raw', folder.split('/')[-1:][0])):
os.makedirs(os.path.join(destPath, 'test_known_raw',
folder.split('/')[-1:][0]))
# print "Created {}".format(os.path.join(destPath,
# 'test_known_raw', folder.split('/')[-1:][0]))
for i in range(int(0.9 * len(images)), len(images)):
destFile = os.path.join(destPath, 'test_known_raw', folder.split(
'/')[-1:][0], images[i].split('/')[-1:][0])
try:
shutil.move(images[i], destFile)
except:
pass
if args.verbose:
print "Spliting lfw dataset took {} seconds.".format(time.time() - start)
start = time.time()
# Copy unknown test dataset
for i in range(int(args.rangeOfPeople.split(':')
[1]), len(folderName_sorted)):
src = os.path.join(lfwPath, folderName_sorted[i])
try:
destFolder = os.path.join(
destPath, 'test_unknown_raw', folderName_sorted[i])
shutil.copytree(src, destFolder)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, destFolder)
else:
if args.verbose:
print('Directory not copied. Error: %s' % e)
if args.verbose:
print "Copying test dataset from lfw took {} seconds.".format(time.time() - start)
start = time.time()
class Args():
"""
This class is created to pass arguments to ./util/align-dlib.py
"""
def __init__(self, inputDir, outputDir, verbose):
self.inputDir = inputDir
self.dlibFacePredictor = os.path.join(
dlibModelDir, "shape_predictor_68_face_landmarks.dat")
self.mode = 'align'
self.landmarks = 'outerEyesAndNose'
self.size = 96
self.outputDir = outputDir
self.skipMulti = True
self.verbose = verbose
self.fallbackLfw = False
argsForAlign = Args(
os.path.join(
destPath,
'train_known_raw'),
os.path.join(
destPath,
'train_known_aligned'),
args.verbose)
jobs = []
for i in range(8):
p = multiprocessing.Process(
target=align_dlib.alignMain, args=(
argsForAlign,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
if args.verbose:
print "Aligning the raw train data took {} seconds.".format(time.time() - start)
start = time.time()
os.system(
'./batch-represent/main.lua -outDir ' +
os.path.join(
destPath,
'train_known_features') +
' -data ' +
os.path.join(
destPath,
'train_known_aligned'))
if args.verbose:
print "Extracting features from aligned train data took {} seconds.".format(time.time() - start)
start = time.time()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
subparsers = parser.add_subparsers(dest='mode', help="Mode")
trainParser = subparsers.add_parser('train',
help="Train a new classifier.")
trainParser.add_argument('--ldaDim', type=int, default=-1)
trainParser.add_argument(
'--classifier',
type=str,
choices=[
'LinearSvm',
'GMM',
'RadialSvm',
'DecisionTree'],
help='The type of classifier to use.',
default='LinearSvm')
trainParser.add_argument(
'workDir',
type=str,
help="The input work directory containing 'reps.csv' and 'labels.csv'. Obtained from aligning a directory with 'align-dlib' and getting the representations with 'batch-represent'.")
inferParser = subparsers.add_parser(
'infer', help='Predict who an image contains from a trained classifier.')
inferParser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
inferParser.add_argument('imgs', type=str, nargs='+',
help="Input image.")
inferFromTestParser = subparsers.add_parser(
'inferFromTest',
help='Predict who an image contains from a trained classifier.')
# inferFromTestParser.add_argument('--classifierModel', type=str,
# help='The Python pickle representing the classifier. This is NOT the
# Torch network model, which can be set with --networkModel.')
inferFromTestParser.add_argument(
'featureFolder',
type=str,
nargs='+',
help="Input the fratures folder which has the classifiers.")
inferFromTestParser.add_argument(
'testFolder',
type=str,
nargs='+',
help="Input the test folder. It can be either known test dataset or unknown test dataset.")
inferFromTestParser.add_argument(
'--threshold',
type=float,
nargs='+',
help="Threshold of the confidence to classify a prediction as unknown person. <threshold will be predicted as unknown person.",
default=0.0)
inferFromTestParser.add_argument(
'--unknown',
action='store_true',
help="Use this flag if you are testing on unknown dataset. Make sure you set thresold value")
preprocessParser = subparsers.add_parser(
'preprocess',
help='Before Benchmarking preprocess divides the dataset into train and test pairs. Also it will align the train dataset and extract the features from it.')
preprocessParser.add_argument('--lfwDir', type=str,
help="Enter the lfw face directory")
preprocessParser.add_argument(
'--rangeOfPeople',
type=str,
help="Range of the people you would like to take as known person group. Not that the input is a list starts with 0 and the people are sorted in decending order of number of images. Eg: 0:10 ")
preprocessParser.add_argument(
'--featuresDir',
type=str,
help="Enter the directory location where the aligned images, features, and classifer model will be saved.")
args = parser.parse_args()
if args.verbose:
print("Argument parsing and import libraries took {} seconds.".format(
time.time() - start))
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
start = time.time()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
# infer(args)
raise Exception("Use ./demo/classifier.py")
elif args.mode == 'inferFromTest':
inferFromTest(args)
elif args.mode == 'preprocess':
preprocess(args)
| apache-2.0 |
aikaterna/aikaterna-cogs | youtube/youtube.py | 1 | 1805 | import aiohttp
import re
from redbot.core import commands
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
class YouTube(commands.Cog):
"""Search YouTube for videos."""
async def red_delete_data_for_user(self, **kwargs):
""" Nothing to delete """
return
def __init__(self, bot):
self.bot = bot
self.session = aiohttp.ClientSession()
async def _youtube_results(self, query: str):
try:
headers = {"user-agent": "Red-cog/3.0"}
async with self.session.get(
"https://www.youtube.com/results", params={"search_query": query}, headers=headers
) as r:
result = await r.text()
yt_find = re.findall(r"{\"videoId\":\"(.{11})", result)
url_list = []
for track in yt_find:
url = f"https://www.youtube.com/watch?v={track}"
if url not in url_list:
url_list.append(url)
except Exception as e:
url_list = [f"Something went terribly wrong! [{e}]"]
return url_list
@commands.command()
async def youtube(self, ctx, *, query: str):
"""Search on Youtube."""
result = await self._youtube_results(query)
if result:
await ctx.send(result[0])
else:
await ctx.send("Nothing found. Try again later.")
@commands.command()
async def ytsearch(self, ctx, *, query: str):
"""Search on Youtube, multiple results."""
result = await self._youtube_results(query)
if result:
await menu(ctx, result, DEFAULT_CONTROLS)
else:
await ctx.send("Nothing found. Try again later.")
def cog_unload(self):
self.bot.loop.create_task(self.session.close())
| mit |
inessadl/kinect-2-libras | Kinect2Libras/KinectFingerTracking/Lib/unittest/runner.py | 109 | 6502 | """Running tests"""
import sys
import time
from . import result
from .signals import registerResult
__unittest = True
class _WritelnDecorator(object):
"""Used to decorate file-like objects with a handy 'writeln' method"""
def __init__(self,stream):
self.stream = stream
def __getattr__(self, attr):
if attr in ('stream', '__getstate__'):
raise AttributeError(attr)
return getattr(self.stream,attr)
def writeln(self, arg=None):
if arg:
self.write(arg)
self.write('\n') # text-mode streams translate to \r\n if needed
class TextTestResult(result.TestResult):
"""A test result class that can print formatted text results to a stream.
Used by TextTestRunner.
"""
separator1 = '=' * 70
separator2 = '-' * 70
def __init__(self, stream, descriptions, verbosity):
super(TextTestResult, self).__init__()
self.stream = stream
self.showAll = verbosity > 1
self.dots = verbosity == 1
self.descriptions = descriptions
def getDescription(self, test):
doc_first_line = test.shortDescription()
if self.descriptions and doc_first_line:
return '\n'.join((str(test), doc_first_line))
else:
return str(test)
def startTest(self, test):
super(TextTestResult, self).startTest(test)
if self.showAll:
self.stream.write(self.getDescription(test))
self.stream.write(" ... ")
self.stream.flush()
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
if self.showAll:
self.stream.writeln("ok")
elif self.dots:
self.stream.write('.')
self.stream.flush()
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
if self.showAll:
self.stream.writeln("ERROR")
elif self.dots:
self.stream.write('E')
self.stream.flush()
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
if self.showAll:
self.stream.writeln("FAIL")
elif self.dots:
self.stream.write('F')
self.stream.flush()
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
if self.showAll:
self.stream.writeln("skipped {0!r}".format(reason))
elif self.dots:
self.stream.write("s")
self.stream.flush()
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
if self.showAll:
self.stream.writeln("expected failure")
elif self.dots:
self.stream.write("x")
self.stream.flush()
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
if self.showAll:
self.stream.writeln("unexpected success")
elif self.dots:
self.stream.write("u")
self.stream.flush()
def printErrors(self):
if self.dots or self.showAll:
self.stream.writeln()
self.printErrorList('ERROR', self.errors)
self.printErrorList('FAIL', self.failures)
def printErrorList(self, flavour, errors):
for test, err in errors:
self.stream.writeln(self.separator1)
self.stream.writeln("%s: %s" % (flavour,self.getDescription(test)))
self.stream.writeln(self.separator2)
self.stream.writeln("%s" % err)
class TextTestRunner(object):
"""A test runner class that displays results in textual form.
It prints out the names of tests as they are run, errors as they
occur, and a summary of the results at the end of the test run.
"""
resultclass = TextTestResult
def __init__(self, stream=sys.stderr, descriptions=True, verbosity=1,
failfast=False, buffer=False, resultclass=None):
self.stream = _WritelnDecorator(stream)
self.descriptions = descriptions
self.verbosity = verbosity
self.failfast = failfast
self.buffer = buffer
if resultclass is not None:
self.resultclass = resultclass
def _makeResult(self):
return self.resultclass(self.stream, self.descriptions, self.verbosity)
def run(self, test):
"Run the given test case or test suite."
result = self._makeResult()
registerResult(result)
result.failfast = self.failfast
result.buffer = self.buffer
startTime = time.time()
startTestRun = getattr(result, 'startTestRun', None)
if startTestRun is not None:
startTestRun()
try:
test(result)
finally:
stopTestRun = getattr(result, 'stopTestRun', None)
if stopTestRun is not None:
stopTestRun()
stopTime = time.time()
timeTaken = stopTime - startTime
result.printErrors()
if hasattr(result, 'separator2'):
self.stream.writeln(result.separator2)
run = result.testsRun
self.stream.writeln("Ran %d test%s in %.3fs" %
(run, run != 1 and "s" or "", timeTaken))
self.stream.writeln()
expectedFails = unexpectedSuccesses = skipped = 0
try:
results = map(len, (result.expectedFailures,
result.unexpectedSuccesses,
result.skipped))
except AttributeError:
pass
else:
expectedFails, unexpectedSuccesses, skipped = results
infos = []
if not result.wasSuccessful():
self.stream.write("FAILED")
failed, errored = map(len, (result.failures, result.errors))
if failed:
infos.append("failures=%d" % failed)
if errored:
infos.append("errors=%d" % errored)
else:
self.stream.write("OK")
if skipped:
infos.append("skipped=%d" % skipped)
if expectedFails:
infos.append("expected failures=%d" % expectedFails)
if unexpectedSuccesses:
infos.append("unexpected successes=%d" % unexpectedSuccesses)
if infos:
self.stream.writeln(" (%s)" % (", ".join(infos),))
else:
self.stream.write("\n")
return result
| apache-2.0 |
smartfile/django-1.4 | django/core/cache/backends/base.py | 100 | 7927 | "Base Cache class."
import warnings
from django.core.exceptions import ImproperlyConfigured, DjangoRuntimeWarning
from django.utils.encoding import smart_str
from django.utils.importlib import import_module
class InvalidCacheBackendError(ImproperlyConfigured):
pass
class CacheKeyWarning(DjangoRuntimeWarning):
pass
# Memcached does not accept keys longer than this.
MEMCACHE_MAX_KEY_LENGTH = 250
def default_key_func(key, key_prefix, version):
"""
Default function to generate keys.
Constructs the key used by all other methods. By default it prepends
the `key_prefix'. KEY_FUNCTION can be used to specify an alternate
function with custom key making behavior.
"""
return ':'.join([key_prefix, str(version), smart_str(key)])
def get_key_func(key_func):
"""
Function to decide which key function to use.
Defaults to ``default_key_func``.
"""
if key_func is not None:
if callable(key_func):
return key_func
else:
key_func_module_path, key_func_name = key_func.rsplit('.', 1)
key_func_module = import_module(key_func_module_path)
return getattr(key_func_module, key_func_name)
return default_key_func
class BaseCache(object):
def __init__(self, params):
timeout = params.get('timeout', params.get('TIMEOUT', 300))
try:
timeout = int(timeout)
except (ValueError, TypeError):
timeout = 300
self.default_timeout = timeout
options = params.get('OPTIONS', {})
max_entries = params.get('max_entries', options.get('MAX_ENTRIES', 300))
try:
self._max_entries = int(max_entries)
except (ValueError, TypeError):
self._max_entries = 300
cull_frequency = params.get('cull_frequency', options.get('CULL_FREQUENCY', 3))
try:
self._cull_frequency = int(cull_frequency)
except (ValueError, TypeError):
self._cull_frequency = 3
self.key_prefix = smart_str(params.get('KEY_PREFIX', ''))
self.version = params.get('VERSION', 1)
self.key_func = get_key_func(params.get('KEY_FUNCTION', None))
def make_key(self, key, version=None):
"""Constructs the key used by all other methods. By default it
uses the key_func to generate a key (which, by default,
prepends the `key_prefix' and 'version'). An different key
function can be provided at the time of cache construction;
alternatively, you can subclass the cache backend to provide
custom key making behavior.
"""
if version is None:
version = self.version
new_key = self.key_func(key, self.key_prefix, version)
return new_key
def add(self, key, value, timeout=None, version=None):
"""
Set a value in the cache if the key does not already exist. If
timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
Returns True if the value was stored, False otherwise.
"""
raise NotImplementedError
def get(self, key, default=None, version=None):
"""
Fetch a given key from the cache. If the key does not exist, return
default, which itself defaults to None.
"""
raise NotImplementedError
def set(self, key, value, timeout=None, version=None):
"""
Set a value in the cache. If timeout is given, that timeout will be
used for the key; otherwise the default cache timeout will be used.
"""
raise NotImplementedError
def delete(self, key, version=None):
"""
Delete a key from the cache, failing silently.
"""
raise NotImplementedError
def get_many(self, keys, version=None):
"""
Fetch a bunch of keys from the cache. For certain backends (memcached,
pgsql) this can be *much* faster when fetching multiple values.
Returns a dict mapping each key in keys to its value. If the given
key is missing, it will be missing from the response dict.
"""
d = {}
for k in keys:
val = self.get(k, version=version)
if val is not None:
d[k] = val
return d
def has_key(self, key, version=None):
"""
Returns True if the key is in the cache and has not expired.
"""
return self.get(key, version=version) is not None
def incr(self, key, delta=1, version=None):
"""
Add delta to value in the cache. If the key does not exist, raise a
ValueError exception.
"""
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
new_value = value + delta
self.set(key, new_value, version=version)
return new_value
def decr(self, key, delta=1, version=None):
"""
Subtract delta from value in the cache. If the key does not exist, raise
a ValueError exception.
"""
return self.incr(key, -delta, version=version)
def __contains__(self, key):
"""
Returns True if the key is in the cache and has not expired.
"""
# This is a separate method, rather than just a copy of has_key(),
# so that it always has the same functionality as has_key(), even
# if a subclass overrides it.
return self.has_key(key)
def set_many(self, data, timeout=None, version=None):
"""
Set a bunch of values in the cache at once from a dict of key/value
pairs. For certain backends (memcached), this is much more efficient
than calling set() multiple times.
If timeout is given, that timeout will be used for the key; otherwise
the default cache timeout will be used.
"""
for key, value in data.items():
self.set(key, value, timeout=timeout, version=version)
def delete_many(self, keys, version=None):
"""
Set a bunch of values in the cache at once. For certain backends
(memcached), this is much more efficient than calling delete() multiple
times.
"""
for key in keys:
self.delete(key, version=version)
def clear(self):
"""Remove *all* values from the cache at once."""
raise NotImplementedError
def validate_key(self, key):
"""
Warn about keys that would not be portable to the memcached
backend. This encourages (but does not force) writing backend-portable
cache code.
"""
if len(key) > MEMCACHE_MAX_KEY_LENGTH:
warnings.warn('Cache key will cause errors if used with memcached: '
'%s (longer than %s)' % (key, MEMCACHE_MAX_KEY_LENGTH),
CacheKeyWarning)
for char in key:
if ord(char) < 33 or ord(char) == 127:
warnings.warn('Cache key contains characters that will cause '
'errors if used with memcached: %r' % key,
CacheKeyWarning)
def incr_version(self, key, delta=1, version=None):
"""Adds delta to the cache version for the supplied key. Returns the
new version.
"""
if version is None:
version = self.version
value = self.get(key, version=version)
if value is None:
raise ValueError("Key '%s' not found" % key)
self.set(key, value, version=version+delta)
self.delete(key, version=version)
return version+delta
def decr_version(self, key, delta=1, version=None):
"""Substracts delta from the cache version for the supplied key. Returns
the new version.
"""
return self.incr_version(key, -delta, version)
| bsd-3-clause |
MaximNevrov/neutron | neutron/tests/unit/extensions/extensionattribute.py | 35 | 3179 | # Copyright 2013 VMware, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutron.api import extensions
from neutron.api.v2 import base
from neutron import manager
from neutron.quota import resource_registry
# Attribute Map
RESOURCE_ATTRIBUTE_MAP = {
'ext_test_resources': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string': None},
'is_visible': True},
}
}
class Extensionattribute(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "Extension Test Resource"
@classmethod
def get_alias(cls):
return "ext-obj-test"
@classmethod
def get_description(cls):
return "Extension Test Resource"
@classmethod
def get_updated(cls):
return "2013-02-05T10:00:00-00:00"
def update_attributes_map(self, attributes):
super(Extensionattribute, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
@classmethod
def get_resources(cls):
"""Returns Ext Resources."""
exts = []
plugin = manager.NeutronManager.get_plugin()
resource_name = 'ext_test_resource'
collection_name = resource_name + "s"
params = RESOURCE_ATTRIBUTE_MAP.get(collection_name, dict())
resource_registry.register_resource_by_name(resource_name)
controller = base.create_resource(collection_name,
resource_name,
plugin, params,
member_actions={})
ex = extensions.ResourceExtension(collection_name,
controller,
member_actions={})
exts.append(ex)
return exts
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
class ExtensionObjectTestPluginBase(object):
@abc.abstractmethod
def create_ext_test_resource(self, context, router):
pass
@abc.abstractmethod
def get_ext_test_resource(self, context, id, fields=None):
pass
| apache-2.0 |
nwchandler/ansible | test/units/modules/network/nxos/test_nxos_vrf_af.py | 13 | 3252 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vrf_af
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVrfafModule(TestNxosModule):
module = nxos_vrf_af
def setUp(self):
self.mock_run_commands = patch('ansible.modules.network.nxos.nxos_vrf_af.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vrf_af.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vrf_af.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_run_commands.stop()
self.mock_load_config.stop()
self.mock_get_config.stop()
def load_fixtures(self, commands=None, device=''):
self.load_config.return_value = None
def test_nxos_vrf_af_present(self):
set_module_args(dict(vrf='ntc', afi='ipv4', safi='unicast', state='present'))
result = self.execute_module(changed=True)
self.assertEqual(sorted(result['commands']), sorted(['vrf context ntc',
'address-family ipv4 unicast',
'afi ipv4',
'vrf ntc',
'safi unicast']))
def test_nxos_vrf_af_absent(self):
set_module_args(dict(vrf='ntc', afi='ipv4', safi='unicast', state='absent'))
result = self.execute_module(changed=False)
self.assertEqual(result['commands'], [])
def test_nxos_vrf_af_route_target(self):
set_module_args(dict(vrf='ntc', afi='ipv4', safi='unicast', route_target_both_auto_evpn=True))
result = self.execute_module(changed=True)
self.assertEqual(sorted(result['commands']), sorted(['vrf context ntc',
'address-family ipv4 unicast',
'afi ipv4',
'route-target both auto evpn',
'vrf ntc',
'safi unicast']))
| gpl-3.0 |
motoschifo/mame | 3rdparty/googletest/googletest/xcode/Scripts/versiongenerate.py | 3088 | 4536 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""A script to prepare version informtion for use the gtest Info.plist file.
This script extracts the version information from the configure.ac file and
uses it to generate a header file containing the same information. The
#defines in this header file will be included in during the generation of
the Info.plist of the framework, giving the correct value to the version
shown in the Finder.
This script makes the following assumptions (these are faults of the script,
not problems with the Autoconf):
1. The AC_INIT macro will be contained within the first 1024 characters
of configure.ac
2. The version string will be 3 integers separated by periods and will be
surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first
segment represents the major version, the second represents the minor
version and the third represents the fix version.
3. No ")" character exists between the opening "(" and closing ")" of
AC_INIT, including in comments and character strings.
"""
import sys
import re
# Read the command line argument (the output directory for Version.h)
if (len(sys.argv) < 3):
print "Usage: versiongenerate.py input_dir output_dir"
sys.exit(1)
else:
input_dir = sys.argv[1]
output_dir = sys.argv[2]
# Read the first 1024 characters of the configure.ac file
config_file = open("%s/configure.ac" % input_dir, 'r')
buffer_size = 1024
opening_string = config_file.read(buffer_size)
config_file.close()
# Extract the version string from the AC_INIT macro
# The following init_expression means:
# Extract three integers separated by periods and surrounded by squre
# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy
# (*? is the non-greedy flag) since that would pull in everything between
# the first "(" and the last ")" in the file.
version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)",
re.DOTALL)
version_values = version_expression.search(opening_string)
major_version = version_values.group(1)
minor_version = version_values.group(2)
fix_version = version_values.group(3)
# Write the version information to a header file to be included in the
# Info.plist file.
file_data = """//
// DO NOT MODIFY THIS FILE (but you can delete it)
//
// This file is autogenerated by the versiongenerate.py script. This script
// is executed in a "Run Script" build phase when creating gtest.framework. This
// header file is not used during compilation of C-source. Rather, it simply
// defines some version strings for substitution in the Info.plist. Because of
// this, we are not not restricted to C-syntax nor are we using include guards.
//
#define GTEST_VERSIONINFO_SHORT %s.%s
#define GTEST_VERSIONINFO_LONG %s.%s.%s
""" % (major_version, minor_version, major_version, minor_version, fix_version)
version_file = open("%s/Version.h" % output_dir, 'w')
version_file.write(file_data)
version_file.close()
| gpl-2.0 |
ZhangXinNan/tensorflow | tensorflow/python/kernel_tests/sparse_tensor_dense_matmul_op_test.py | 61 | 15941 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sparse_ops.sparse_tensor_dense_matmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
import numpy as np
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import app
from tensorflow.python.platform import test
def _maybe_complex(x):
if x.dtype.kind == "c": # complex
return (x + 1j * x) / 2
return x
class SparseTensorDenseMatMulTest(test.TestCase):
def _testMatmul(self,
x,
y,
adjoint_a=False,
adjoint_b=False,
indices_dtype=np.int64):
x_mat = np.matrix(x)
if adjoint_a:
x_mat = x_mat.H
y_mat = np.matrix(y)
if adjoint_b:
y_mat = y_mat.H
np_ans = x_mat * y_mat
x_indices = np.vstack(np.where(x)).astype(indices_dtype).T
x_values = x[np.where(x)]
x_shape = x.shape
with self.test_session(use_gpu=True):
sp_x_value = sparse_tensor.SparseTensorValue(
indices=x_indices, values=x_values, dense_shape=x_shape)
tf_value_ans = sparse_ops.sparse_tensor_dense_matmul(
sp_x_value, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b)
tf_tensor_ans = sparse_ops.sparse_tensor_dense_matmul(
sparse_tensor.SparseTensor.from_value(sp_x_value),
y,
adjoint_a=adjoint_a,
adjoint_b=adjoint_b)
# Ensure that the RHS shape is known at least.
self.assertEqual(tf_value_ans.get_shape()[1], np_ans.shape[1])
self.assertEqual(tf_tensor_ans.get_shape()[1], np_ans.shape[1])
for out in (tf_value_ans.eval(), tf_tensor_ans.eval()):
if x.dtype == np.float32:
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)
elif x.dtype == np.float64:
self.assertAllClose(np_ans, out, rtol=1e-6, atol=1e-6)
else:
self.assertAllClose(np_ans, out, rtol=1e-4, atol=1e-4)
def _testBasic(self, value_dtype, indices_dtype=np.int64):
x = _maybe_complex(np.random.rand(10, 10).astype(value_dtype))
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = _maybe_complex(np.random.randn(10, 20).astype(value_dtype))
self._testMatmul(x, y, indices_dtype=indices_dtype)
def testBasic(self):
np.random.seed(127) # Repeatable results
self._testBasic(np.int32)
self._testBasic(np.float32)
self._testBasic(np.float64)
self._testBasic(np.complex64)
self._testBasic(np.complex128)
self._testBasic(np.int32, indices_dtype=np.int32)
self._testBasic(np.float32, indices_dtype=np.int32)
def testShapeInference(self):
x = np.random.rand(10, 10)
x[np.abs(x) < 0.5] = 0 # Make it sparse
y = np.random.randn(10, 20)
x_indices = np.vstack(np.where(x)).astype(np.int64).T
x_values = x[np.where(x)]
x_shape = x.shape
x_st = sparse_tensor.SparseTensor(x_indices, x_values, x_shape)
result = sparse_ops.sparse_tensor_dense_matmul(x_st, y)
self.assertEqual(result.get_shape(), (10, 20))
x_shape_unknown = array_ops.placeholder(dtype=dtypes.int64, shape=None)
x_st_shape_unknown = sparse_tensor.SparseTensor(x_indices, x_values,
x_shape_unknown)
result_left_shape_unknown = sparse_ops.sparse_tensor_dense_matmul(
x_st_shape_unknown, y)
self.assertEqual(result_left_shape_unknown.get_shape().as_list(),
[None, 20])
x_shape_inconsistent = [10, 15]
x_st_shape_inconsistent = sparse_tensor.SparseTensor(x_indices, x_values,
x_shape_inconsistent)
with self.assertRaisesRegexp(ValueError, "Dimensions must be equal"):
sparse_ops.sparse_tensor_dense_matmul(x_st_shape_inconsistent, y)
def testInvalidIndicesForSparseTensorDenseMatmul(self):
# Note: use_gpu=False because nice errors are only returned from CPU kernel.
with self.test_session(use_gpu=False):
indices = np.matrix([[1, 10]]).astype(np.int64)
values = np.array([10]).astype(np.float32)
shape = [3, 2]
sparse_t = sparse_tensor.SparseTensor(indices, values, shape)
# Test multiplying by both a small and large dense matrix, to hit
# both cases in the kernel.
dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)
with self.assertRaisesOpError(
"k .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t).eval()
dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)
with self.assertRaisesOpError(
"k .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(sparse_t, dense_t).eval()
# Repeat with adjoint_a, to get a different error.
dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)
with self.assertRaisesOpError(
"m .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval()
dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)
with self.assertRaisesOpError(
"m .10. from index.0,1. out of bounds .>=2."):
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval()
def testInvalidIndicesForSparseTensorDenseMatmulOnGPU(self):
# Note: use_gpu=False because nice errors are only returned from CPU kerne
if not test.is_gpu_available():
return
with self.test_session(use_gpu=True):
indices = np.array([[1, 10]]).astype(np.int64)
values = np.array([10]).astype(np.float32)
shape = [3, 2]
sparse_t = sparse_tensor.SparseTensor(indices, values, shape)
# Test multiplying by both a small and large dense matrix, to hit
# both cases in the kernel.
dense_t = np.matrix([[1] * 5, [2] * 5], dtype=np.float32)
expected_t = np.array([[0] * 5, [np.nan] * 5, [0] * 5], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t).eval())
dense_t = np.matrix([[1] * 500, [2] * 500], dtype=np.float32)
expected_t = np.array(
[[0] * 500, [np.nan] * 500, [0] * 500], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t).eval())
# Repeat with adjoint_a, now the error is that the sparse index
# is OOO w.r.t. the output. The GPU kernel can't do much here,
# so it just doesn't accumulate.
dense_t = np.matrix([[1] * 5, [2] * 5, [3] * 5], dtype=np.float32)
expected_t = np.array([[0] * 5, [0] * 5], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval())
dense_t = np.matrix([[1] * 500, [2] * 500, [3] * 500], dtype=np.float32)
expected_t = np.array([[0] * 500, [0] * 500], dtype=np.float32)
self.assertAllClose(expected_t,
sparse_ops.sparse_tensor_dense_matmul(
sparse_t, dense_t, adjoint_a=True).eval())
# Tests setting one dimension to be a high value.
def _testLarge(self, np_dtype):
r1 = np.random.randint(6000, 20000)
r2 = np.random.randint(1, 10)
r3 = np.random.randint(1, 10)
for m, k, n in [(r1, r2, r3),
(r2, r1, r3),
(r2, r3, r1)]:
x = _maybe_complex(np.random.rand(m, k).astype(np_dtype))
x[np.abs(x) < 0.8] = 0
y = _maybe_complex(np.random.randn(k, n).astype(np_dtype))
self._testMatmul(x, y, adjoint_a=False, adjoint_b=False)
self._testMatmul(x.transpose(), y, adjoint_a=True, adjoint_b=False)
self._testMatmul(x, y.transpose(), adjoint_a=False, adjoint_b=True)
self._testMatmul(
x.transpose(), y.transpose(), adjoint_a=True, adjoint_b=True)
np.random.seed(127) # Repeatable results
self._testLarge(np.float32)
self._testLarge(np.float64)
self._testLarge(np.complex64)
self._testLarge(np.complex128)
# Tests random sized matrices.
def testFloatRandom(self):
np.random.seed(127) # Repeatable results
for _ in range(8):
for adjoint_a in [True, False]:
for adjoint_b in [True, False]:
for thresh in [0.0, 0.2, 0.8, 1.0]:
n, k, m = np.random.randint(1, 100, size=3)
x = np.random.rand(n, k).astype(np.float32)
x[x < thresh] = 0 # Make it sparse
y = np.random.randn(k, m).astype(np.float32)
x = x.transpose() if adjoint_a else x
y = y.transpose() if adjoint_b else y
self._testMatmul(x, y, adjoint_a, adjoint_b)
def _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(x, y, adjoint_a,
adjoint_b):
def body(t, prev):
with ops.control_dependencies([prev]):
return (t + 1, math_ops.matmul(
x,
y,
transpose_a=adjoint_a,
transpose_b=adjoint_b,
a_is_sparse=True,
b_is_sparse=False))
t0 = constant_op.constant(0)
v0 = constant_op.constant(0.0)
def _timeit(iterations, _):
(_, final) = control_flow_ops.while_loop(
lambda t, _: t < iterations,
body, (t0, v0),
parallel_iterations=1,
back_prop=False,
shape_invariants=(tensor_shape.TensorShape(()),
tensor_shape.TensorShape(None)))
return [final]
return _timeit
def _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(x_ind, x_val, x_shape,
y, adjoint_a,
adjoint_b):
sp_x = sparse_tensor.SparseTensor(
indices=x_ind, values=x_val, dense_shape=x_shape)
def body(t, prev):
with ops.control_dependencies([prev]):
return (t + 1, sparse_ops.sparse_tensor_dense_matmul(
sp_x, y, adjoint_a=adjoint_a, adjoint_b=adjoint_b))
t0 = constant_op.constant(0)
v0 = constant_op.constant(0.0)
def _timeit(iterations, _):
(_, final) = control_flow_ops.while_loop(
lambda t, _: t < iterations,
body, (t0, v0),
parallel_iterations=1,
back_prop=False,
shape_invariants=(tensor_shape.TensorShape(()),
tensor_shape.TensorShape(None)))
return [final]
return _timeit
def sparse_tensor_dense_vs_dense_matmul_benchmark(thresh,
m,
k,
n,
adjoint_a,
adjoint_b,
use_gpu,
skip_dense=False):
config = config_pb2.ConfigProto()
config.allow_soft_placement = True
# Configurable for benchmarking:
# config.intra_op_parallelism_threads = 100
# config.gpu_options.per_process_gpu_memory_fraction = 0.3
np.random.seed([6, 117]) # Reproducibility
x = np.random.rand(m, k).astype(np.float32)
x[x < thresh] = 0
y = np.random.randn(k, n).astype(np.float32)
if adjoint_a:
x = x.T
if adjoint_b:
y = y.T
def _timer(sess, ops_fn, iterations):
# Warm in
sess.run(ops_fn(10, sess))
# Timing run
start = time.time()
sess.run(ops_fn(iterations, sess))
end = time.time()
return (end - start) / (1.0 * iterations) # Average runtime per iteration
# Using regular matmul, marking one of the matrices as dense.
if skip_dense:
delta_dense = float("nan")
else:
with session.Session(config=config, graph=ops.Graph()) as sess:
if not use_gpu:
with ops.device("/cpu:0"):
x_t = constant_op.constant(x)
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
else:
with ops.device("/device:GPU:0"):
x_t = constant_op.constant(x)
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_dense(
x_t, y_t, adjoint_a, adjoint_b)
delta_dense = _timer(sess, ops_fn, 200)
# Using sparse_tensor_dense_matmul.
with session.Session("", config=config, graph=ops.Graph()) as sess:
if not use_gpu:
with ops.device("/cpu:0"):
x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
x_val = constant_op.constant(x[np.where(x)])
x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
else:
with ops.device("/device:GPU:0"):
x_ind = constant_op.constant(np.vstack(np.where(x)).astype(np.int64).T)
x_val = constant_op.constant(x[np.where(x)])
x_shape = constant_op.constant(np.array(x.shape).astype(np.int64))
y_t = constant_op.constant(y)
ops_fn = _sparse_tensor_dense_vs_dense_matmul_benchmark_sparse(
x_ind, x_val, x_shape, y_t, adjoint_a, adjoint_b)
delta_sparse = _timer(sess, ops_fn, 200)
print("%g \t %d \t %s \t %d \t %d \t %g \t %g \t %g" %
(1 - thresh, n, use_gpu, m, k, delta_dense, delta_sparse,
delta_sparse / delta_dense))
def main(_):
print("DenseDense MatMul (w/ Sparse Flag) vs. SparseTensorDense MatMul")
print("Matrix sizes:")
print(" A sparse [m, k] with % nonzero values between 1% and 80%")
print(" B dense [k, n]")
print("")
print("% nnz \t n \t gpu \t m \t k \t dt(dense) \t dt(sparse) "
"\t dt(sparse)/dt(dense)")
for thresh in (0.99, 0.8, 0.5, 0.2):
for n in (50, 100):
for use_gpu in (True, False):
for m in (100, 1000):
for k in (100, 1000):
sparse_tensor_dense_vs_dense_matmul_benchmark(
thresh, m, k, n, False, False, use_gpu=use_gpu)
# Enable for large scale benchmarks, these ones take a long time to run.
#
# for use_gpu in (True, False):
# sparse_tensor_dense_vs_dense_matmul_benchmark(
# thresh=0.99, m=1000000, k=1000, n=100, adjoint_a=False,
# adjoint_b=False, use_gpu=use_gpu, skip_dense=True)
if __name__ == "__main__":
if "--benchmarks" in sys.argv:
sys.argv.remove("--benchmarks")
app.run()
else:
test.main()
| apache-2.0 |
larrybradley/astropy | astropy/io/ascii/tests/test_types.py | 6 | 1582 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from io import StringIO
from astropy.io import ascii
from .common import assert_equal
def test_types_from_dat():
converters = {'a': [ascii.convert_numpy(float)],
'e': [ascii.convert_numpy(str)]}
dat = ascii.read(['a b c d e', '1 1 cat 2.1 4.2'],
Reader=ascii.Basic,
converters=converters)
assert dat['a'].dtype.kind == 'f'
assert dat['b'].dtype.kind == 'i'
assert dat['c'].dtype.kind in ('S', 'U')
assert dat['d'].dtype.kind == 'f'
assert dat['e'].dtype.kind in ('S', 'U')
def test_rdb_write_types():
dat = ascii.read(['a b c d', '1 1.0 cat 2.1'],
Reader=ascii.Basic)
out = StringIO()
ascii.write(dat, out, Writer=ascii.Rdb)
outs = out.getvalue().splitlines()
assert_equal(outs[1], 'N\tN\tS\tN')
def test_ipac_read_types():
table = r"""\
| ra | dec | sai |-----v2---| sptype |
| real | float | l | real | char |
| unit | unit | unit | unit | ergs |
| null | null | null | null | -999 |
2.09708 2956 73765 2.06000 B8IVpMnHg
"""
reader = ascii.get_reader(Reader=ascii.Ipac)
reader.read(table)
types = [ascii.FloatType,
ascii.FloatType,
ascii.IntType,
ascii.FloatType,
ascii.StrType]
for (col, expected_type) in zip(reader.cols, types):
assert_equal(col.type, expected_type)
| bsd-3-clause |
scraplesh/delicious-alfredworkflow | requests/structures.py | 1160 | 2977 | # -*- coding: utf-8 -*-
"""
requests.structures
~~~~~~~~~~~~~~~~~~~
Data structures that power Requests.
"""
import collections
class CaseInsensitiveDict(collections.MutableMapping):
"""
A case-insensitive ``dict``-like object.
Implements all methods and operations of
``collections.MutableMapping`` as well as dict's ``copy``. Also
provides ``lower_items``.
All keys are expected to be strings. The structure remembers the
case of the last key to be set, and ``iter(instance)``,
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
will contain case-sensitive keys. However, querying and contains
testing is case insensitive::
cid = CaseInsensitiveDict()
cid['Accept'] = 'application/json'
cid['aCCEPT'] == 'application/json' # True
list(cid) == ['Accept'] # True
For example, ``headers['content-encoding']`` will return the
value of a ``'Content-Encoding'`` response header, regardless
of how the header name was originally stored.
If the constructor, ``.update``, or equality comparison
operations are given keys that have equal ``.lower()``s, the
behavior is undefined.
"""
def __init__(self, data=None, **kwargs):
self._store = dict()
if data is None:
data = {}
self.update(data, **kwargs)
def __setitem__(self, key, value):
# Use the lowercased key for lookups, but store the actual
# key alongside the value.
self._store[key.lower()] = (key, value)
def __getitem__(self, key):
return self._store[key.lower()][1]
def __delitem__(self, key):
del self._store[key.lower()]
def __iter__(self):
return (casedkey for casedkey, mappedvalue in self._store.values())
def __len__(self):
return len(self._store)
def lower_items(self):
"""Like iteritems(), but with all lowercase keys."""
return (
(lowerkey, keyval[1])
for (lowerkey, keyval)
in self._store.items()
)
def __eq__(self, other):
if isinstance(other, collections.Mapping):
other = CaseInsensitiveDict(other)
else:
return NotImplemented
# Compare insensitively
return dict(self.lower_items()) == dict(other.lower_items())
# Copy is required
def copy(self):
return CaseInsensitiveDict(self._store.values())
def __repr__(self):
return str(dict(self.items()))
class LookupDict(dict):
"""Dictionary lookup object."""
def __init__(self, name=None):
self.name = name
super(LookupDict, self).__init__()
def __repr__(self):
return '<lookup \'%s\'>' % (self.name)
def __getitem__(self, key):
# We allow fall-through here, so values default to None
return self.__dict__.get(key, None)
def get(self, key, default=None):
return self.__dict__.get(key, default)
| mit |
DigiThinkIT/stem | stem/socket.py | 1 | 20729 | # Copyright 2011-2014, Damian Johnson and The Tor Project
# See LICENSE for licensing information
"""
Supports communication with sockets speaking the Tor control protocol. This
allows us to send messages as basic strings, and receive responses as
:class:`~stem.response.ControlMessage` instances.
**This module only consists of low level components, and is not intended for
users.** See our `tutorials <tutorials.html>`_ and `Control Module
<api/control.html>`_ if you're new to Stem and looking to get started.
With that aside, these can still be used for raw socket communication with
Tor...
::
import stem
import stem.connection
import stem.socket
if __name__ == '__main__':
try:
control_socket = stem.socket.ControlPort(port = 9051)
stem.connection.authenticate(control_socket)
except stem.SocketError as exc:
print 'Unable to connect to tor on port 9051: %s' % exc
sys.exit(1)
except stem.connection.AuthenticationFailure as exc:
print 'Unable to authenticate: %s' % exc
sys.exit(1)
print "Issuing 'GETINFO version' query...\\n"
control_socket.send('GETINFO version')
print control_socket.recv()
::
% python example.py
Issuing 'GETINFO version' query...
version=0.2.4.10-alpha-dev (git-8be6058d8f31e578)
OK
**Module Overview:**
::
ControlSocket - Socket wrapper that speaks the tor control protocol.
|- ControlPort - Control connection via a port.
| |- get_address - provides the ip address of our socket
| +- get_port - provides the port of our socket
|
|- ControlSocketFile - Control connection via a local file socket.
| +- get_socket_path - provides the path of the socket we connect to
|
|- send - sends a message to the socket
|- recv - receives a ControlMessage from the socket
|- is_alive - reports if the socket is known to be closed
|- is_localhost - returns if the socket is for the local system or not
|- connect - connects a new socket
|- close - shuts down the socket
+- __enter__ / __exit__ - manages socket connection
send_message - Writes a message to a control socket.
recv_message - Reads a ControlMessage from a control socket.
send_formatting - Performs the formatting expected from sent messages.
"""
from __future__ import absolute_import
import re
import socket
import threading
import stem.prereq
import stem.response
import stem.util.str_tools
from stem.util import log
class ControlSocket(object):
"""
Wrapper for a socket connection that speaks the Tor control protocol. To the
better part this transparently handles the formatting for sending and
receiving complete messages. All methods are thread safe.
Callers should not instantiate this class directly, but rather use subclasses
which are expected to implement the **_make_socket()** method.
"""
def __init__(self):
self._socket, self._socket_file = None, None
self._is_alive = False
# Tracks sending and receiving separately. This should be safe, and doing
# so prevents deadlock where we block writes because we're waiting to read
# a message that isn't coming.
self._send_lock = threading.RLock()
self._recv_lock = threading.RLock()
def send(self, message, raw = False):
"""
Formats and sends a message to the control socket. For more information see
the :func:`~stem.socket.send_message` function.
:param str message: message to be formatted and sent to the socket
:param bool raw: leaves the message formatting untouched, passing it to the socket as-is
:raises:
* :class:`stem.SocketError` if a problem arises in using the socket
* :class:`stem.SocketClosed` if the socket is known to be shut down
"""
with self._send_lock:
try:
if not self.is_alive():
raise stem.SocketClosed()
send_message(self._socket_file, message, raw)
except stem.SocketClosed as exc:
# if send_message raises a SocketClosed then we should properly shut
# everything down
if self.is_alive():
self.close()
raise exc
def recv(self):
"""
Receives a message from the control socket, blocking until we've received
one. For more information see the :func:`~stem.socket.recv_message` function.
:returns: :class:`~stem.response.ControlMessage` for the message received
:raises:
* :class:`stem.ProtocolError` the content from the socket is malformed
* :class:`stem.SocketClosed` if the socket closes before we receive a complete message
"""
with self._recv_lock:
try:
# makes a temporary reference to the _socket_file because connect()
# and close() may set or unset it
socket_file = self._socket_file
if not socket_file:
raise stem.SocketClosed()
return recv_message(socket_file)
except stem.SocketClosed as exc:
# If recv_message raises a SocketClosed then we should properly shut
# everything down. However, there's a couple cases where this will
# cause deadlock...
#
# * this socketClosed was *caused by* a close() call, which is joining
# on our thread
#
# * a send() call that's currently in flight is about to call close(),
# also attempting to join on us
#
# To resolve this we make a non-blocking call to acquire the send lock.
# If we get it then great, we can close safely. If not then one of the
# above are in progress and we leave the close to them.
if self.is_alive():
if self._send_lock.acquire(False):
self.close()
self._send_lock.release()
raise exc
def is_alive(self):
"""
Checks if the socket is known to be closed. We won't be aware if it is
until we either use it or have explicitily shut it down.
In practice a socket derived from a port knows about its disconnection
after a failed :func:`~stem.socket.ControlSocket.recv` call. Socket file
derived connections know after either a
:func:`~stem.socket.ControlSocket.send` or
:func:`~stem.socket.ControlSocket.recv`.
This means that to have reliable detection for when we're disconnected
you need to continually pull from the socket (which is part of what the
:class:`~stem.control.BaseController` does).
:returns: **bool** that's **True** if our socket is connected and **False** otherwise
"""
return self._is_alive
def is_localhost(self):
"""
Returns if the connection is for the local system or not.
:returns: **bool** that's **True** if the connection is for the local host and **False** otherwise
"""
return False
def connect(self):
"""
Connects to a new socket, closing our previous one if we're already
attached.
:raises: :class:`stem.SocketError` if unable to make a socket
"""
with self._send_lock:
# Closes the socket if we're currently attached to one. Once we're no
# longer alive it'll be safe to acquire the recv lock because recv()
# calls no longer block (raising SocketClosed instead).
if self.is_alive():
self.close()
with self._recv_lock:
self._socket = self._make_socket()
self._socket_file = self._socket.makefile(mode = 'rwb')
self._is_alive = True
# It's possible for this to have a transient failure...
# SocketError: [Errno 4] Interrupted system call
#
# It's safe to retry, so give it another try if it fails.
try:
self._connect()
except stem.SocketError:
self._connect() # single retry
def close(self):
"""
Shuts down the socket. If it's already closed then this is a no-op.
"""
with self._send_lock:
# Function is idempotent with one exception: we notify _close() if this
# is causing our is_alive() state to change.
is_change = self.is_alive()
if self._socket:
# if we haven't yet established a connection then this raises an error
# socket.error: [Errno 107] Transport endpoint is not connected
try:
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
# Suppressing unexpected exceptions from close. For instance, if the
# socket's file has already been closed then with python 2.7 that raises
# with...
# error: [Errno 32] Broken pipe
try:
self._socket.close()
except:
pass
if self._socket_file:
try:
self._socket_file.close()
except:
pass
self._socket = None
self._socket_file = None
self._is_alive = False
if is_change:
self._close()
def _get_send_lock(self):
"""
The send lock is useful to classes that interact with us at a deep level
because it's used to lock :func:`stem.socket.ControlSocket.connect` /
:func:`stem.socket.ControlSocket.close`, and by extension our
:func:`stem.socket.ControlSocket.is_alive` state changes.
:returns: **threading.RLock** that governs sending messages to our socket
and state changes
"""
return self._send_lock
def __enter__(self):
return self
def __exit__(self, exit_type, value, traceback):
self.close()
def _connect(self):
"""
Connection callback that can be overwritten by subclasses and wrappers.
"""
pass
def _close(self):
"""
Disconnection callback that can be overwritten by subclasses and wrappers.
"""
pass
def _make_socket(self):
"""
Constructs and connects new socket. This is implemented by subclasses.
:returns: **socket.socket** for our configuration
:raises:
* :class:`stem.SocketError` if unable to make a socket
* **NotImplementedError** if not implemented by a subclass
"""
raise NotImplementedError('Unsupported Operation: this should be implemented by the ControlSocket subclass')
class ControlPort(ControlSocket):
"""
Control connection to tor. For more information see tor's ControlPort torrc
option.
"""
def __init__(self, address = '127.0.0.1', port = 9051, connect = True):
"""
ControlPort constructor.
:param str address: ip address of the controller
:param int port: port number of the controller
:param bool connect: connects to the socket if True, leaves it unconnected otherwise
:raises: :class:`stem.SocketError` if connect is **True** and we're
unable to establish a connection
"""
super(ControlPort, self).__init__()
self._control_addr = address
self._control_port = port
if connect:
self.connect()
def get_address(self):
"""
Provides the ip address our socket connects to.
:returns: str with the ip address of our socket
"""
return self._control_addr
def get_port(self):
"""
Provides the port our socket connects to.
:returns: int with the port of our socket
"""
return self._control_port
def is_localhost(self):
return self._control_addr == '127.0.0.1'
def _make_socket(self):
try:
control_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
control_socket.connect((self._control_addr, self._control_port))
return control_socket
except socket.error as exc:
raise stem.SocketError(exc)
class ControlSocketFile(ControlSocket):
"""
Control connection to tor. For more information see tor's ControlSocket torrc
option.
"""
def __init__(self, path = '/var/run/tor/control', connect = True):
"""
ControlSocketFile constructor.
:param str socket_path: path where the control socket is located
:param bool connect: connects to the socket if True, leaves it unconnected otherwise
:raises: :class:`stem.SocketError` if connect is **True** and we're
unable to establish a connection
"""
super(ControlSocketFile, self).__init__()
self._socket_path = path
if connect:
self.connect()
def get_socket_path(self):
"""
Provides the path our socket connects to.
:returns: str with the path for our control socket
"""
return self._socket_path
def is_localhost(self):
return True
def _make_socket(self):
try:
control_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
control_socket.connect(self._socket_path)
return control_socket
except socket.error as exc:
raise stem.SocketError(exc)
def send_message(control_file, message, raw = False):
"""
Sends a message to the control socket, adding the expected formatting for
single verses multi-line messages. Neither message type should contain an
ending newline (if so it'll be treated as a multi-line message with a blank
line at the end). If the message doesn't contain a newline then it's sent
as...
::
<message>\\r\\n
and if it does contain newlines then it's split on ``\\n`` and sent as...
::
+<line 1>\\r\\n
<line 2>\\r\\n
<line 3>\\r\\n
.\\r\\n
:param file control_file: file derived from the control socket (see the
socket's makefile() method for more information)
:param str message: message to be sent on the control socket
:param bool raw: leaves the message formatting untouched, passing it to the
socket as-is
:raises:
* :class:`stem.SocketError` if a problem arises in using the socket
* :class:`stem.SocketClosed` if the socket is known to be shut down
"""
if not raw:
message = send_formatting(message)
try:
control_file.write(stem.util.str_tools._to_bytes(message))
control_file.flush()
log_message = message.replace('\r\n', '\n').rstrip()
log.trace('Sent to tor:\n' + log_message)
except socket.error as exc:
log.info('Failed to send message: %s' % exc)
# When sending there doesn't seem to be a reliable method for
# distinguishing between failures from a disconnect verses other things.
# Just accounting for known disconnection responses.
if str(exc) == '[Errno 32] Broken pipe':
raise stem.SocketClosed(exc)
else:
raise stem.SocketError(exc)
except AttributeError:
# if the control_file has been closed then flush will receive:
# AttributeError: 'NoneType' object has no attribute 'sendall'
log.info('Failed to send message: file has been closed')
raise stem.SocketClosed('file has been closed')
def recv_message(control_file):
"""
Pulls from a control socket until we either have a complete message or
encounter a problem.
:param file control_file: file derived from the control socket (see the
socket's makefile() method for more information)
:returns: :class:`~stem.response.ControlMessage` read from the socket
:raises:
* :class:`stem.ProtocolError` the content from the socket is malformed
* :class:`stem.SocketClosed` if the socket closes before we receive
a complete message
"""
parsed_content, raw_content = [], b''
logging_prefix = 'Error while receiving a control message (%s): '
while True:
try:
# From a real socket readline() would always provide bytes, but during
# tests we might be given a StringIO in which case it's unicode under
# python 3.x.
line = stem.util.str_tools._to_bytes(control_file.readline())
except AttributeError:
# if the control_file has been closed then we will receive:
# AttributeError: 'NoneType' object has no attribute 'recv'
prefix = logging_prefix % 'SocketClosed'
log.info(prefix + 'socket file has been closed')
raise stem.SocketClosed('socket file has been closed')
except (socket.error, ValueError) as exc:
# When disconnected we get...
#
# Python 2:
# socket.error: [Errno 107] Transport endpoint is not connected
#
# Python 3:
# ValueError: I/O operation on closed file.
prefix = logging_prefix % 'SocketClosed'
log.info(prefix + 'received exception "%s"' % exc)
raise stem.SocketClosed(exc)
raw_content += line
# Parses the tor control lines. These are of the form...
# <status code><divider><content>\r\n
if len(line) == 0:
# if the socket is disconnected then the readline() method will provide
# empty content
prefix = logging_prefix % 'SocketClosed'
log.info(prefix + 'empty socket content')
raise stem.SocketClosed('Received empty socket content.')
elif len(line) < 4:
prefix = logging_prefix % 'ProtocolError'
log.info(prefix + 'line too short, "%s"' % log.escape(line))
raise stem.ProtocolError('Badly formatted reply line: too short')
elif not re.match(b'^[a-zA-Z0-9]{3}[-+ ]', line):
prefix = logging_prefix % 'ProtocolError'
log.info(prefix + 'malformed status code/divider, "%s"' % log.escape(line))
raise stem.ProtocolError('Badly formatted reply line: beginning is malformed')
elif not line.endswith(b'\r\n'):
prefix = logging_prefix % 'ProtocolError'
log.info(prefix + 'no CRLF linebreak, "%s"' % log.escape(line))
raise stem.ProtocolError('All lines should end with CRLF')
line = line[:-2] # strips off the CRLF
status_code, divider, content = line[:3], line[3:4], line[4:]
if stem.prereq.is_python_3():
status_code = stem.util.str_tools._to_unicode(status_code)
divider = stem.util.str_tools._to_unicode(divider)
if divider == '-':
# mid-reply line, keep pulling for more content
parsed_content.append((status_code, divider, content))
elif divider == ' ':
# end of the message, return the message
parsed_content.append((status_code, divider, content))
log_message = raw_content.replace(b'\r\n', b'\n').rstrip()
log.trace('Received from tor:\n' + stem.util.str_tools._to_unicode(log_message))
return stem.response.ControlMessage(parsed_content, raw_content)
elif divider == '+':
# data entry, all of the following lines belong to the content until we
# get a line with just a period
while True:
try:
line = stem.util.str_tools._to_bytes(control_file.readline())
except socket.error as exc:
prefix = logging_prefix % 'SocketClosed'
log.info(prefix + 'received an exception while mid-way through a data reply (exception: "%s", read content: "%s")' % (exc, log.escape(raw_content)))
raise stem.SocketClosed(exc)
raw_content += line
if not line.endswith(b'\r\n'):
prefix = logging_prefix % 'ProtocolError'
log.info(prefix + 'CRLF linebreaks missing from a data reply, "%s"' % log.escape(raw_content))
raise stem.ProtocolError('All lines should end with CRLF')
elif line == b'.\r\n':
break # data block termination
line = line[:-2] # strips off the CRLF
# lines starting with a period are escaped by a second period (as per
# section 2.4 of the control-spec)
if line.startswith(b'..'):
line = line[1:]
# appends to previous content, using a newline rather than CRLF
# separator (more conventional for multi-line string content outside
# the windows world)
content += b'\n' + line
parsed_content.append((status_code, divider, content))
else:
# this should never be reached due to the prefix regex, but might as well
# be safe...
prefix = logging_prefix % 'ProtocolError'
log.warn(prefix + "\"%s\" isn't a recognized divider type" % divider)
raise stem.ProtocolError("Unrecognized divider type '%s': %s" % (divider, stem.util.str_tools._to_unicode(line)))
def send_formatting(message):
"""
Performs the formatting expected from sent control messages. For more
information see the :func:`~stem.socket.send_message` function.
:param str message: message to be formatted
:returns: **str** of the message wrapped by the formatting expected from
controllers
"""
# From control-spec section 2.2...
# Command = Keyword OptArguments CRLF / "+" Keyword OptArguments CRLF CmdData
# Keyword = 1*ALPHA
# OptArguments = [ SP *(SP / VCHAR) ]
#
# A command is either a single line containing a Keyword and arguments, or a
# multiline command whose initial keyword begins with +, and whose data
# section ends with a single "." on a line of its own.
# if we already have \r\n entries then standardize on \n to start with
message = message.replace('\r\n', '\n')
if '\n' in message:
return '+%s\r\n.\r\n' % message.replace('\n', '\r\n')
else:
return message + '\r\n'
| lgpl-3.0 |
zsl/CodeSample | python/del_comment/delComment.py | 1 | 3563 | #coding:utf-8
import os, sys
def help():
print """ÃüÁîÐиñʽ: DelComment srcpath [dstpath]
srcpathΪԴĿ¼»òÎļþµÄ·¾¶£»
detpath±ØÐëÊÇÒѾ´æÔڵģ¬ËüÊÇ´æ·Åת»»ºóµÄĿ¼µÄ·¾¶£¬Ä¬ÈÏΪµ±Ç°¹¤×÷Ŀ¼¡£
"""
def stripSlash(str):
return str.rstrip('\\').rstrip('/')
def copyRemoveAllComment(srcpath, dstpath):
#¶¨ÒåɨÃèµÄ״̬
STATE_SCAN = 0
STATE_IS_COMMENT_START = 1
STATE_IN_BLOCK_COMMENT = 2
STATE_IN_LINE_COMMENT = 3
STATE_IS_COMMENT_END = 4
#ÔÚdstpathÖдò¿ªÎļþ
srcfile = open(srcpath, 'rb')
dstfile = open(dstpath, 'wb')
#¹ýÂË×¢ÊÍ
readState = STATE_SCAN
text = []
while True:
ch = srcfile.read(1)
if ch == '':
break;
if readState == STATE_SCAN:
if ch == '/':
readState = STATE_IS_COMMENT_START
else:
text.append(ch)
elif readState == STATE_IS_COMMENT_START:
if ch == '/':
readState = STATE_IN_LINE_COMMENT
elif ch == '*':
readState = STATE_IN_BLOCK_COMMENT
else:
readState = STATE_SCAN
text.append('/'+ch)
elif readState == STATE_IN_LINE_COMMENT:
if ch == '\n':
readState = STATE_SCAN
elif readState == STATE_IN_BLOCK_COMMENT:
if ch == '*':
readState = STATE_IS_COMMENT_END
elif readState == STATE_IS_COMMENT_END:
if ch == '/':
readState = STATE_SCAN
ch = srcfile.read(1)
while ch == '\r' or ch == '\n':
ch = srcfile.read(1)
srcfile.seek(-1, 1)
else:
readState = STATE_IN_BLOCK_COMMENT
srcfile.seek(-1, 1)
#дÎļþ
dstfile.writelines(text)
dstfile.flush()
#¹Ø±ÕÎļþ
srcfile.close()
dstfile.close()
def convert(srcpath, dstpath):
if os.path.isdir(srcpath):
dirfiles = os.listdir(srcpath)
for fn in dirfiles:
srcfp = os.path.join(srcpath, fn)
if os.path.isfile(srcfp):
convert(srcfp, dstpath)
else:
dstfp = os.path.join(dstpath, fn)
if not os.path.exists(dstfp):
os.mkdir(dstfp)
convert(srcfp, dstfp)
else:
if srcpath.endswith('.java'):
srcfn = srcpath
dstfn = os.path.join(dstpath, os.path.basename(srcpath))
copyRemoveAllComment(srcfn, dstfn)
if __name__ == '__main__':
argvlen = len(sys.argv)
#ÖÁÉÙÒ»¸ö²ÎÊý
if argvlen > 2+1 or argvlen <= 2-1:
print '²ÎÊý´íÎó£¡:-(\n'
help()
sys.exit(1)
elif not os.path.isfile(sys.argv[1]) and not os.path.isdir(sys.argv[1]) or argvlen == 1 + 2 and os.path.isfile(sys.argv[1]):
print '²ÎÊý´íÎó£¡:-(\n'
help()
sys.exit(2)
#»ñȡԴ·¾¶
srcpath = stripSlash(sys.argv[1])
print 'srcpath: ' + srcpath
#»ñȡĿ±ê·¾¶,
dstpath = ""
if argvlen == 1 + 2:
dstpath = stripSlash(sys.argv[2])
else:
dstpath = os.getcwd() + '\\delcomment'
if not os.path.exists(dstpath):
os.mkdir(dstpath)
#Èç¹ûԴ·¾¶ÊÇÎļþ£¬ÔòÖ±½Ó·ÅÔÚÄ¿±ê·¾¶ÏÂ
if os.path.isdir(srcpath):
dstpath = dstpath + '\\' + os.path.basename(srcpath) + 'delcomment'
if not os.path.exists(dstpath):
os.mkdir(dstpath)
print 'dstpath: ' + dstpath
#¿ªÊ¼×ª»»
convert(srcpath, dstpath) | gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.