repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
stefan-caraiman/cloudbase-init | cloudbaseinit/utils/crypt.py | 7 | 6427 | # Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import ctypes
import ctypes.util
import struct
import sys
if sys.platform == "win32":
openssl_lib_path = "libeay32.dll"
else:
openssl_lib_path = ctypes.util.find_library("ssl")
openssl = ctypes.CDLL(openssl_lib_path)
clib = ctypes.CDLL(ctypes.util.find_library("c"))
class RSA(ctypes.Structure):
_fields_ = [
("pad", ctypes.c_int),
("version", ctypes.c_long),
("meth", ctypes.c_void_p),
("engine", ctypes.c_void_p),
("n", ctypes.c_void_p),
("e", ctypes.c_void_p),
("d", ctypes.c_void_p),
("p", ctypes.c_void_p),
("q", ctypes.c_void_p),
("dmp1", ctypes.c_void_p),
("dmq1", ctypes.c_void_p),
("iqmp", ctypes.c_void_p),
("sk", ctypes.c_void_p),
("dummy", ctypes.c_int),
("references", ctypes.c_int),
("flags", ctypes.c_int),
("_method_mod_n", ctypes.c_void_p),
("_method_mod_p", ctypes.c_void_p),
("_method_mod_q", ctypes.c_void_p),
("bignum_data", ctypes.c_char_p),
("blinding", ctypes.c_void_p),
("mt_blinding", ctypes.c_void_p)
]
openssl.RSA_PKCS1_PADDING = 1
openssl.RSA_new.restype = ctypes.POINTER(RSA)
openssl.BN_bin2bn.restype = ctypes.c_void_p
openssl.BN_bin2bn.argtypes = [ctypes.c_char_p, ctypes.c_int, ctypes.c_void_p]
openssl.BN_new.restype = ctypes.c_void_p
openssl.RSA_size.restype = ctypes.c_int
openssl.RSA_size.argtypes = [ctypes.POINTER(RSA)]
openssl.RSA_public_encrypt.argtypes = [ctypes.c_int,
ctypes.c_char_p,
ctypes.c_char_p,
ctypes.POINTER(RSA),
ctypes.c_int]
openssl.RSA_public_encrypt.restype = ctypes.c_int
openssl.RSA_free.argtypes = [ctypes.POINTER(RSA)]
openssl.PEM_write_RSAPublicKey.restype = ctypes.c_int
openssl.PEM_write_RSAPublicKey.argtypes = [ctypes.c_void_p,
ctypes.POINTER(RSA)]
openssl.ERR_get_error.restype = ctypes.c_long
openssl.ERR_get_error.argtypes = []
openssl.ERR_error_string_n.restype = ctypes.c_void_p
openssl.ERR_error_string_n.argtypes = [ctypes.c_long,
ctypes.c_char_p,
ctypes.c_int]
openssl.ERR_load_crypto_strings.restype = ctypes.c_int
openssl.ERR_load_crypto_strings.argtypes = []
clib.fopen.restype = ctypes.c_void_p
clib.fopen.argtypes = [ctypes.c_char_p, ctypes.c_char_p]
clib.fclose.restype = ctypes.c_int
clib.fclose.argtypes = [ctypes.c_void_p]
class CryptException(Exception):
pass
class OpenSSLException(CryptException):
def __init__(self):
message = self._get_openssl_error_msg()
super(OpenSSLException, self).__init__(message)
def _get_openssl_error_msg(self):
openssl.ERR_load_crypto_strings()
errno = openssl.ERR_get_error()
errbuf = ctypes.create_string_buffer(1024)
openssl.ERR_error_string_n(errno, errbuf, 1024)
return errbuf.value.decode("ascii")
class RSAWrapper(object):
def __init__(self, rsa_p):
self._rsa_p = rsa_p
def __enter__(self):
return self
def __exit__(self, tp, value, tb):
self.free()
def free(self):
openssl.RSA_free(self._rsa_p)
def public_encrypt(self, clear_text):
flen = len(clear_text)
rsa_size = openssl.RSA_size(self._rsa_p)
enc_text = ctypes.create_string_buffer(rsa_size)
enc_text_len = openssl.RSA_public_encrypt(flen,
clear_text,
enc_text,
self._rsa_p,
openssl.RSA_PKCS1_PADDING)
if enc_text_len == -1:
raise OpenSSLException()
return enc_text[:enc_text_len]
class CryptManager(object):
def load_ssh_rsa_public_key(self, ssh_pub_key):
ssh_rsa_prefix = "ssh-rsa "
if not ssh_pub_key.startswith(ssh_rsa_prefix):
raise CryptException('Invalid SSH key')
s = ssh_pub_key[len(ssh_rsa_prefix):]
idx = s.find(' ')
if idx >= 0:
b64_pub_key = s[:idx]
else:
b64_pub_key = s
pub_key = base64.b64decode(b64_pub_key)
offset = 0
key_type_len = struct.unpack('>I', pub_key[offset:offset + 4])[0]
offset += 4
key_type = pub_key[offset:offset + key_type_len].decode('utf-8')
offset += key_type_len
if key_type not in ['ssh-rsa', 'rsa', 'rsa1']:
raise CryptException('Unsupported SSH key type "%s". '
'Only RSA keys are currently supported'
% key_type)
rsa_p = openssl.RSA_new()
try:
rsa_p.contents.e = openssl.BN_new()
rsa_p.contents.n = openssl.BN_new()
e_len = struct.unpack('>I', pub_key[offset:offset + 4])[0]
offset += 4
e_key_bin = pub_key[offset:offset + e_len]
offset += e_len
if not openssl.BN_bin2bn(e_key_bin, e_len, rsa_p.contents.e):
raise OpenSSLException()
n_len = struct.unpack('>I', pub_key[offset:offset + 4])[0]
offset += 4
n_key_bin = pub_key[offset:offset + n_len]
offset += n_len
if offset != len(pub_key):
raise CryptException('Invalid SSH key')
if not openssl.BN_bin2bn(n_key_bin, n_len, rsa_p.contents.n):
raise OpenSSLException()
return RSAWrapper(rsa_p)
except Exception:
openssl.RSA_free(rsa_p)
raise
| apache-2.0 |
sbbm/llvm | utils/lit/lit/TestRunner.py | 20 | 19809 | from __future__ import absolute_import
import os, signal, subprocess, sys
import re
import platform
import tempfile
import lit.ShUtil as ShUtil
import lit.Test as Test
import lit.util
from lit.util import to_bytes, to_string
class InternalShellError(Exception):
def __init__(self, command, message):
self.command = command
self.message = message
kIsWindows = platform.system() == 'Windows'
# Don't use close_fds on Windows.
kUseCloseFDs = not kIsWindows
# Use temporary files to replace /dev/null on Windows.
kAvoidDevNull = kIsWindows
def executeShCmd(cmd, cfg, cwd, results):
if isinstance(cmd, ShUtil.Seq):
if cmd.op == ';':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
return executeShCmd(cmd.rhs, cfg, cwd, results)
if cmd.op == '&':
raise InternalShellError(cmd,"unsupported shell operator: '&'")
if cmd.op == '||':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res != 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
if cmd.op == '&&':
res = executeShCmd(cmd.lhs, cfg, cwd, results)
if res is None:
return res
if res == 0:
res = executeShCmd(cmd.rhs, cfg, cwd, results)
return res
raise ValueError('Unknown shell command: %r' % cmd.op)
assert isinstance(cmd, ShUtil.Pipeline)
procs = []
input = subprocess.PIPE
stderrTempFiles = []
opened_files = []
named_temp_files = []
# To avoid deadlock, we use a single stderr stream for piped
# output. This is null until we have seen some output using
# stderr.
for i,j in enumerate(cmd.commands):
# Apply the redirections, we use (N,) as a sentinel to indicate stdin,
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
# from a file are represented with a list [file, mode, file-object]
# where file-object is initially None.
redirects = [(0,), (1,), (2,)]
for r in j.redirects:
if r[0] == ('>',2):
redirects[2] = [r[1], 'w', None]
elif r[0] == ('>>',2):
redirects[2] = [r[1], 'a', None]
elif r[0] == ('>&',2) and r[1] in '012':
redirects[2] = redirects[int(r[1])]
elif r[0] == ('>&',) or r[0] == ('&>',):
redirects[1] = redirects[2] = [r[1], 'w', None]
elif r[0] == ('>',):
redirects[1] = [r[1], 'w', None]
elif r[0] == ('>>',):
redirects[1] = [r[1], 'a', None]
elif r[0] == ('<',):
redirects[0] = [r[1], 'r', None]
else:
raise InternalShellError(j,"Unsupported redirect: %r" % (r,))
# Map from the final redirections to something subprocess can handle.
final_redirects = []
for index,r in enumerate(redirects):
if r == (0,):
result = input
elif r == (1,):
if index == 0:
raise InternalShellError(j,"Unsupported redirect for stdin")
elif index == 1:
result = subprocess.PIPE
else:
result = subprocess.STDOUT
elif r == (2,):
if index != 2:
raise InternalShellError(j,"Unsupported redirect on stdout")
result = subprocess.PIPE
else:
if r[2] is None:
if kAvoidDevNull and r[0] == '/dev/null':
r[2] = tempfile.TemporaryFile(mode=r[1])
else:
r[2] = open(r[0], r[1])
# Workaround a Win32 and/or subprocess bug when appending.
#
# FIXME: Actually, this is probably an instance of PR6753.
if r[1] == 'a':
r[2].seek(0, 2)
opened_files.append(r[2])
result = r[2]
final_redirects.append(result)
stdin, stdout, stderr = final_redirects
# If stderr wants to come from stdout, but stdout isn't a pipe, then put
# stderr on a pipe and treat it as stdout.
if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE):
stderr = subprocess.PIPE
stderrIsStdout = True
else:
stderrIsStdout = False
# Don't allow stderr on a PIPE except for the last
# process, this could deadlock.
#
# FIXME: This is slow, but so is deadlock.
if stderr == subprocess.PIPE and j != cmd.commands[-1]:
stderr = tempfile.TemporaryFile(mode='w+b')
stderrTempFiles.append((i, stderr))
# Resolve the executable path ourselves.
args = list(j.args)
executable = lit.util.which(args[0], cfg.environment['PATH'])
if not executable:
raise InternalShellError(j, '%r: command not found' % j.args[0])
# Replace uses of /dev/null with temporary files.
if kAvoidDevNull:
for i,arg in enumerate(args):
if arg == "/dev/null":
f = tempfile.NamedTemporaryFile(delete=False)
f.close()
named_temp_files.append(f.name)
args[i] = f.name
try:
procs.append(subprocess.Popen(args, cwd=cwd,
executable = executable,
stdin = stdin,
stdout = stdout,
stderr = stderr,
env = cfg.environment,
close_fds = kUseCloseFDs))
except OSError as e:
raise InternalShellError(j, 'Could not create process due to {}'.format(e))
# Immediately close stdin for any process taking stdin from us.
if stdin == subprocess.PIPE:
procs[-1].stdin.close()
procs[-1].stdin = None
# Update the current stdin source.
if stdout == subprocess.PIPE:
input = procs[-1].stdout
elif stderrIsStdout:
input = procs[-1].stderr
else:
input = subprocess.PIPE
# Explicitly close any redirected files. We need to do this now because we
# need to release any handles we may have on the temporary files (important
# on Win32, for example). Since we have already spawned the subprocess, our
# handles have already been transferred so we do not need them anymore.
for f in opened_files:
f.close()
# FIXME: There is probably still deadlock potential here. Yawn.
procData = [None] * len(procs)
procData[-1] = procs[-1].communicate()
for i in range(len(procs) - 1):
if procs[i].stdout is not None:
out = procs[i].stdout.read()
else:
out = ''
if procs[i].stderr is not None:
err = procs[i].stderr.read()
else:
err = ''
procData[i] = (out,err)
# Read stderr out of the temp files.
for i,f in stderrTempFiles:
f.seek(0, 0)
procData[i] = (procData[i][0], f.read())
def to_string(bytes):
if isinstance(bytes, str):
return bytes
return bytes.encode('utf-8')
exitCode = None
for i,(out,err) in enumerate(procData):
res = procs[i].wait()
# Detect Ctrl-C in subprocess.
if res == -signal.SIGINT:
raise KeyboardInterrupt
# Ensure the resulting output is always of string type.
try:
out = to_string(out.decode('utf-8'))
except:
out = str(out)
try:
err = to_string(err.decode('utf-8'))
except:
err = str(err)
results.append((cmd.commands[i], out, err, res))
if cmd.pipe_err:
# Python treats the exit code as a signed char.
if exitCode is None:
exitCode = res
elif res < 0:
exitCode = min(exitCode, res)
else:
exitCode = max(exitCode, res)
else:
exitCode = res
# Remove any named temporary files we created.
for f in named_temp_files:
try:
os.remove(f)
except OSError:
pass
if cmd.negate:
exitCode = not exitCode
return exitCode
def executeScriptInternal(test, litConfig, tmpBase, commands, cwd):
cmds = []
for ln in commands:
try:
cmds.append(ShUtil.ShParser(ln, litConfig.isWindows,
test.config.pipefail).parse())
except:
return lit.Test.Result(Test.FAIL, "shell parser error on: %r" % ln)
cmd = cmds[0]
for c in cmds[1:]:
cmd = ShUtil.Seq(cmd, '&&', c)
results = []
try:
exitCode = executeShCmd(cmd, test.config, cwd, results)
except InternalShellError:
e = sys.exc_info()[1]
exitCode = 127
results.append((e.command, '', e.message, exitCode))
out = err = ''
for i,(cmd, cmd_out,cmd_err,res) in enumerate(results):
out += 'Command %d: %s\n' % (i, ' '.join('"%s"' % s for s in cmd.args))
out += 'Command %d Result: %r\n' % (i, res)
out += 'Command %d Output:\n%s\n\n' % (i, cmd_out)
out += 'Command %d Stderr:\n%s\n\n' % (i, cmd_err)
return out, err, exitCode
def executeScript(test, litConfig, tmpBase, commands, cwd):
bashPath = litConfig.getBashPath();
isWin32CMDEXE = (litConfig.isWindows and not bashPath)
script = tmpBase + '.script'
if isWin32CMDEXE:
script += '.bat'
# Write script file
mode = 'w'
if litConfig.isWindows and not isWin32CMDEXE:
mode += 'b' # Avoid CRLFs when writing bash scripts.
f = open(script, mode)
if isWin32CMDEXE:
f.write('\nif %ERRORLEVEL% NEQ 0 EXIT\n'.join(commands))
else:
if test.config.pipefail:
f.write('set -o pipefail;')
f.write('{ ' + '; } &&\n{ '.join(commands) + '; }')
f.write('\n')
f.close()
if isWin32CMDEXE:
command = ['cmd','/c', script]
else:
if bashPath:
command = [bashPath, script]
else:
command = ['/bin/sh', script]
if litConfig.useValgrind:
# FIXME: Running valgrind on sh is overkill. We probably could just
# run on clang with no real loss.
command = litConfig.valgrindArgs + command
return lit.util.executeCommand(command, cwd=cwd,
env=test.config.environment)
def parseIntegratedTestScriptCommands(source_path):
"""
parseIntegratedTestScriptCommands(source_path) -> commands
Parse the commands in an integrated test script file into a list of
(line_number, command_type, line).
"""
# This code is carefully written to be dual compatible with Python 2.5+ and
# Python 3 without requiring input files to always have valid codings. The
# trick we use is to open the file in binary mode and use the regular
# expression library to find the commands, with it scanning strings in
# Python2 and bytes in Python3.
#
# Once we find a match, we do require each script line to be decodable to
# UTF-8, so we convert the outputs to UTF-8 before returning. This way the
# remaining code can work with "strings" agnostic of the executing Python
# version.
keywords = ['RUN:', 'XFAIL:', 'REQUIRES:', 'UNSUPPORTED:', 'END.']
keywords_re = re.compile(
to_bytes("(%s)(.*)\n" % ("|".join(k for k in keywords),)))
f = open(source_path, 'rb')
try:
# Read the entire file contents.
data = f.read()
# Ensure the data ends with a newline.
if not data.endswith(to_bytes('\n')):
data = data + to_bytes('\n')
# Iterate over the matches.
line_number = 1
last_match_position = 0
for match in keywords_re.finditer(data):
# Compute the updated line number by counting the intervening
# newlines.
match_position = match.start()
line_number += data.count(to_bytes('\n'), last_match_position,
match_position)
last_match_position = match_position
# Convert the keyword and line to UTF-8 strings and yield the
# command. Note that we take care to return regular strings in
# Python 2, to avoid other code having to differentiate between the
# str and unicode types.
keyword,ln = match.groups()
yield (line_number, to_string(keyword[:-1].decode('utf-8')),
to_string(ln.decode('utf-8')))
finally:
f.close()
def parseIntegratedTestScript(test, normalize_slashes=False,
extra_substitutions=[], require_script=True):
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
script and extract the lines to 'RUN' as well as 'XFAIL' and 'REQUIRES'
and 'UNSUPPORTED' information. The RUN lines also will have variable
substitution performed. If 'require_script' is False an empty script may be
returned. This can be used for test formats where the actual script is
optional or ignored.
"""
# Get the temporary location, this is always relative to the test suite
# root, not test source root.
#
# FIXME: This should not be here?
sourcepath = test.getSourcePath()
sourcedir = os.path.dirname(sourcepath)
execpath = test.getExecPath()
execdir,execbase = os.path.split(execpath)
tmpDir = os.path.join(execdir, 'Output')
tmpBase = os.path.join(tmpDir, execbase)
# Normalize slashes, if requested.
if normalize_slashes:
sourcepath = sourcepath.replace('\\', '/')
sourcedir = sourcedir.replace('\\', '/')
tmpDir = tmpDir.replace('\\', '/')
tmpBase = tmpBase.replace('\\', '/')
# We use #_MARKER_# to hide %% while we do the other substitutions.
substitutions = list(extra_substitutions)
substitutions.extend([('%%', '#_MARKER_#')])
substitutions.extend(test.config.substitutions)
substitutions.extend([('%s', sourcepath),
('%S', sourcedir),
('%p', sourcedir),
('%{pathsep}', os.pathsep),
('%t', tmpBase + '.tmp'),
('%T', tmpDir),
('#_MARKER_#', '%')])
# "%/[STpst]" should be normalized.
substitutions.extend([
('%/s', sourcepath.replace('\\', '/')),
('%/S', sourcedir.replace('\\', '/')),
('%/p', sourcedir.replace('\\', '/')),
('%/t', tmpBase.replace('\\', '/') + '.tmp'),
('%/T', tmpDir.replace('\\', '/')),
])
# Collect the test lines from the script.
script = []
requires = []
unsupported = []
for line_number, command_type, ln in \
parseIntegratedTestScriptCommands(sourcepath):
if command_type == 'RUN':
# Trim trailing whitespace.
ln = ln.rstrip()
# Substitute line number expressions
ln = re.sub('%\(line\)', str(line_number), ln)
def replace_line_number(match):
if match.group(1) == '+':
return str(line_number + int(match.group(2)))
if match.group(1) == '-':
return str(line_number - int(match.group(2)))
ln = re.sub('%\(line *([\+-]) *(\d+)\)', replace_line_number, ln)
# Collapse lines with trailing '\\'.
if script and script[-1][-1] == '\\':
script[-1] = script[-1][:-1] + ln
else:
script.append(ln)
elif command_type == 'XFAIL':
test.xfails.extend([s.strip() for s in ln.split(',')])
elif command_type == 'REQUIRES':
requires.extend([s.strip() for s in ln.split(',')])
elif command_type == 'UNSUPPORTED':
unsupported.extend([s.strip() for s in ln.split(',')])
elif command_type == 'END':
# END commands are only honored if the rest of the line is empty.
if not ln.strip():
break
else:
raise ValueError("unknown script command type: %r" % (
command_type,))
# Apply substitutions to the script. Allow full regular
# expression syntax. Replace each matching occurrence of regular
# expression pattern a with substitution b in line ln.
def processLine(ln):
# Apply substitutions
for a,b in substitutions:
if kIsWindows:
b = b.replace("\\","\\\\")
ln = re.sub(a, b, ln)
# Strip the trailing newline and any extra whitespace.
return ln.strip()
script = [processLine(ln)
for ln in script]
# Verify the script contains a run line.
if require_script and not script:
return lit.Test.Result(Test.UNRESOLVED, "Test has no run line!")
# Check for unterminated run lines.
if script and script[-1][-1] == '\\':
return lit.Test.Result(Test.UNRESOLVED,
"Test has unterminated run lines (with '\\')")
# Check that we have the required features:
missing_required_features = [f for f in requires
if f not in test.config.available_features]
if missing_required_features:
msg = ', '.join(missing_required_features)
return lit.Test.Result(Test.UNSUPPORTED,
"Test requires the following features: %s" % msg)
unsupported_features = [f for f in unsupported
if f in test.config.available_features]
if unsupported_features:
msg = ', '.join(unsupported_features)
return lit.Test.Result(Test.UNSUPPORTED,
"Test is unsupported with the following features: %s" % msg)
return script,tmpBase,execdir
def _runShTest(test, litConfig, useExternalSh,
script, tmpBase, execdir):
# Create the output directory if it does not already exist.
lit.util.mkdir_p(os.path.dirname(tmpBase))
if useExternalSh:
res = executeScript(test, litConfig, tmpBase, script, execdir)
else:
res = executeScriptInternal(test, litConfig, tmpBase, script, execdir)
if isinstance(res, lit.Test.Result):
return res
out,err,exitCode = res
if exitCode == 0:
status = Test.PASS
else:
status = Test.FAIL
# Form the output log.
output = """Script:\n--\n%s\n--\nExit Code: %d\n\n""" % (
'\n'.join(script), exitCode)
# Append the outputs, if present.
if out:
output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
if err:
output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
return lit.Test.Result(status, output)
def executeShTest(test, litConfig, useExternalSh,
extra_substitutions=[]):
if test.config.unsupported:
return (Test.UNSUPPORTED, 'Test is unsupported')
res = parseIntegratedTestScript(test, useExternalSh, extra_substitutions)
if isinstance(res, lit.Test.Result):
return res
if litConfig.noExecute:
return lit.Test.Result(Test.PASS)
script, tmpBase, execdir = res
return _runShTest(test, litConfig, useExternalSh, script, tmpBase, execdir)
| gpl-3.0 |
aktech/sympy | sympy/physics/quantum/gate.py | 30 | 41905 | """An implementation of gates that act on qubits.
Gates are unitary operators that act on the space of qubits.
Medium Term Todo:
* Optimize Gate._apply_operators_Qubit to remove the creation of many
intermediate Qubit objects.
* Add commutation relationships to all operators and use this in gate_sort.
* Fix gate_sort and gate_simp.
* Get multi-target UGates plotting properly.
* Get UGate to work with either sympy/numpy matrices and output either
format. This should also use the matrix slots.
"""
from __future__ import print_function, division
from itertools import chain
import random
from sympy import Add, I, Integer, Mul, Pow, sqrt, Tuple
from sympy.core.numbers import Number
from sympy.core.compatibility import is_sequence, unicode, range
from sympy.printing.pretty.stringpict import prettyForm, stringPict
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.hilbert import ComplexSpace
from sympy.physics.quantum.operator import (UnitaryOperator, Operator,
HermitianOperator)
from sympy.physics.quantum.matrixutils import matrix_tensor_product, matrix_eye
from sympy.physics.quantum.matrixcache import matrix_cache
from sympy.matrices.matrices import MatrixBase
from sympy.utilities import default_sort_key
__all__ = [
'Gate',
'CGate',
'UGate',
'OneQubitGate',
'TwoQubitGate',
'IdentityGate',
'HadamardGate',
'XGate',
'YGate',
'ZGate',
'TGate',
'PhaseGate',
'SwapGate',
'CNotGate',
# Aliased gate names
'CNOT',
'SWAP',
'H',
'X',
'Y',
'Z',
'T',
'S',
'Phase',
'normalized',
'gate_sort',
'gate_simp',
'random_circuit',
'CPHASE',
'CGateS',
]
#-----------------------------------------------------------------------------
# Gate Super-Classes
#-----------------------------------------------------------------------------
_normalized = True
def _max(*args, **kwargs):
if "key" not in kwargs:
kwargs["key"] = default_sort_key
return max(*args, **kwargs)
def _min(*args, **kwargs):
if "key" not in kwargs:
kwargs["key"] = default_sort_key
return min(*args, **kwargs)
def normalized(normalize):
"""Set flag controlling normalization of Hadamard gates by 1/sqrt(2).
This is a global setting that can be used to simplify the look of various
expressions, by leaving off the leading 1/sqrt(2) of the Hadamard gate.
Parameters
----------
normalize : bool
Should the Hadamard gate include the 1/sqrt(2) normalization factor?
When True, the Hadamard gate will have the 1/sqrt(2). When False, the
Hadamard gate will not have this factor.
"""
global _normalized
_normalized = normalize
def _validate_targets_controls(tandc):
tandc = list(tandc)
# Check for integers
for bit in tandc:
if not bit.is_Integer and not bit.is_Symbol:
raise TypeError('Integer expected, got: %r' % tandc[bit])
# Detect duplicates
if len(list(set(tandc))) != len(tandc):
raise QuantumError(
'Target/control qubits in a gate cannot be duplicated'
)
class Gate(UnitaryOperator):
"""Non-controlled unitary gate operator that acts on qubits.
This is a general abstract gate that needs to be subclassed to do anything
useful.
Parameters
----------
label : tuple, int
A list of the target qubits (as ints) that the gate will apply to.
Examples
========
"""
_label_separator = ','
gate_name = u'G'
gate_name_latex = u'G'
#-------------------------------------------------------------------------
# Initialization/creation
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
args = Tuple(*UnitaryOperator._eval_args(args))
_validate_targets_controls(args)
return args
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(_max(args) + 1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def nqubits(self):
"""The total number of qubits this gate acts on.
For controlled gate subclasses this includes both target and control
qubits, so that, for examples the CNOT gate acts on 2 qubits.
"""
return len(self.targets)
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return _max(self.targets) + 1
@property
def targets(self):
"""A tuple of target qubits."""
return self.label
@property
def gate_name_plot(self):
return r'$%s$' % self.gate_name_latex
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
"""The matrix rep. of the target part of the gate.
Parameters
----------
format : str
The format string ('sympy','numpy', etc.)
"""
raise NotImplementedError(
'get_target_matrix is not implemented in Gate.')
#-------------------------------------------------------------------------
# Apply
#-------------------------------------------------------------------------
def _apply_operator_IntQubit(self, qubits, **options):
"""Redirect an apply from IntQubit to Qubit"""
return self._apply_operator_Qubit(qubits, **options)
def _apply_operator_Qubit(self, qubits, **options):
"""Apply this gate to a Qubit."""
# Check number of qubits this gate acts on.
if qubits.nqubits < self.min_qubits:
raise QuantumError(
'Gate needs a minimum of %r qubits to act on, got: %r' %
(self.min_qubits, qubits.nqubits)
)
# If the controls are not met, just return
if isinstance(self, CGate):
if not self.eval_controls(qubits):
return qubits
targets = self.targets
target_matrix = self.get_target_matrix(format='sympy')
# Find which column of the target matrix this applies to.
column_index = 0
n = 1
for target in targets:
column_index += n*qubits[target]
n = n << 1
column = target_matrix[:, int(column_index)]
# Now apply each column element to the qubit.
result = 0
for index in range(column.rows):
# TODO: This can be optimized to reduce the number of Qubit
# creations. We should simply manipulate the raw list of qubit
# values and then build the new Qubit object once.
# Make a copy of the incoming qubits.
new_qubit = qubits.__class__(*qubits.args)
# Flip the bits that need to be flipped.
for bit in range(len(targets)):
if new_qubit[targets[bit]] != (index >> bit) & 1:
new_qubit = new_qubit.flip(targets[bit])
# The value in that row and column times the flipped-bit qubit
# is the result for that part.
result += column[index]*new_qubit
return result
#-------------------------------------------------------------------------
# Represent
#-------------------------------------------------------------------------
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
format = options.get('format', 'sympy')
nqubits = options.get('nqubits', 0)
if nqubits == 0:
raise QuantumError(
'The number of qubits must be given as nqubits.')
# Make sure we have enough qubits for the gate.
if nqubits < self.min_qubits:
raise QuantumError(
'The number of qubits %r is too small for the gate.' % nqubits
)
target_matrix = self.get_target_matrix(format)
targets = self.targets
if isinstance(self, CGate):
controls = self.controls
else:
controls = []
m = represent_zbasis(
controls, targets, target_matrix, nqubits, format
)
return m
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _sympystr(self, printer, *args):
label = self._print_label(printer, *args)
return '%s(%s)' % (self.gate_name, label)
def _pretty(self, printer, *args):
a = stringPict(unicode(self.gate_name))
b = self._print_label_pretty(printer, *args)
return self._print_subscript_pretty(a, b)
def _latex(self, printer, *args):
label = self._print_label(printer, *args)
return '%s_{%s}' % (self.gate_name_latex, label)
def plot_gate(self, axes, gate_idx, gate_grid, wire_grid):
raise NotImplementedError('plot_gate is not implemented.')
class CGate(Gate):
"""A general unitary gate with control qubits.
A general control gate applies a target gate to a set of targets if all
of the control qubits have a particular values (set by
``CGate.control_value``).
Parameters
----------
label : tuple
The label in this case has the form (controls, gate), where controls
is a tuple/list of control qubits (as ints) and gate is a ``Gate``
instance that is the target operator.
Examples
========
"""
gate_name = u'C'
gate_name_latex = u'C'
# The values this class controls for.
control_value = Integer(1)
simplify_cgate=False
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
# _eval_args has the right logic for the controls argument.
controls = args[0]
gate = args[1]
if not is_sequence(controls):
controls = (controls,)
controls = UnitaryOperator._eval_args(controls)
_validate_targets_controls(chain(controls, gate.targets))
return (Tuple(*controls), gate)
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**_max(_max(args[0]) + 1, args[1].min_qubits)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def nqubits(self):
"""The total number of qubits this gate acts on.
For controlled gate subclasses this includes both target and control
qubits, so that, for examples the CNOT gate acts on 2 qubits.
"""
return len(self.targets) + len(self.controls)
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return _max(_max(self.controls), _max(self.targets)) + 1
@property
def targets(self):
"""A tuple of target qubits."""
return self.gate.targets
@property
def controls(self):
"""A tuple of control qubits."""
return tuple(self.label[0])
@property
def gate(self):
"""The non-controlled gate that will be applied to the targets."""
return self.label[1]
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
return self.gate.get_target_matrix(format)
def eval_controls(self, qubit):
"""Return True/False to indicate if the controls are satisfied."""
return all(qubit[bit] == self.control_value for bit in self.controls)
def decompose(self, **options):
"""Decompose the controlled gate into CNOT and single qubits gates."""
if len(self.controls) == 1:
c = self.controls[0]
t = self.gate.targets[0]
if isinstance(self.gate, YGate):
g1 = PhaseGate(t)
g2 = CNotGate(c, t)
g3 = PhaseGate(t)
g4 = ZGate(t)
return g1*g2*g3*g4
if isinstance(self.gate, ZGate):
g1 = HadamardGate(t)
g2 = CNotGate(c, t)
g3 = HadamardGate(t)
return g1*g2*g3
else:
return self
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _print_label(self, printer, *args):
controls = self._print_sequence(self.controls, ',', printer, *args)
gate = printer._print(self.gate, *args)
return '(%s),%s' % (controls, gate)
def _pretty(self, printer, *args):
controls = self._print_sequence_pretty(
self.controls, ',', printer, *args)
gate = printer._print(self.gate)
gate_name = stringPict(unicode(self.gate_name))
first = self._print_subscript_pretty(gate_name, controls)
gate = self._print_parens_pretty(gate)
final = prettyForm(*first.right((gate)))
return final
def _latex(self, printer, *args):
controls = self._print_sequence(self.controls, ',', printer, *args)
gate = printer._print(self.gate, *args)
return r'%s_{%s}{\left(%s\right)}' % \
(self.gate_name_latex, controls, gate)
def plot_gate(self, circ_plot, gate_idx):
"""
Plot the controlled gate. If *simplify_cgate* is true, simplify
C-X and C-Z gates into their more familiar forms.
"""
min_wire = int(_min(chain(self.controls, self.targets)))
max_wire = int(_max(chain(self.controls, self.targets)))
circ_plot.control_line(gate_idx, min_wire, max_wire)
for c in self.controls:
circ_plot.control_point(gate_idx, int(c))
if self.simplify_cgate:
if self.gate.gate_name == u'X':
self.gate.plot_gate_plus(circ_plot, gate_idx)
elif self.gate.gate_name == u'Z':
circ_plot.control_point(gate_idx, self.targets[0])
else:
self.gate.plot_gate(circ_plot, gate_idx)
else:
self.gate.plot_gate(circ_plot, gate_idx)
#-------------------------------------------------------------------------
# Miscellaneous
#-------------------------------------------------------------------------
def _eval_dagger(self):
if isinstance(self.gate, HermitianOperator):
return self
else:
return Gate._eval_dagger(self)
def _eval_inverse(self):
if isinstance(self.gate, HermitianOperator):
return self
else:
return Gate._eval_inverse(self)
def _eval_power(self, exp):
if isinstance(self.gate, HermitianOperator):
if exp == -1:
return Gate._eval_power(self, exp)
elif abs(exp) % 2 == 0:
return self*(Gate._eval_inverse(self))
else:
return self
else:
return Gate._eval_power(self, exp)
class CGateS(CGate):
"""Version of CGate that allows gate simplifications.
I.e. cnot looks like an oplus, cphase has dots, etc.
"""
simplify_cgate=True
class UGate(Gate):
"""General gate specified by a set of targets and a target matrix.
Parameters
----------
label : tuple
A tuple of the form (targets, U), where targets is a tuple of the
target qubits and U is a unitary matrix with dimension of
len(targets).
"""
gate_name = u'U'
gate_name_latex = u'U'
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
targets = args[0]
if not is_sequence(targets):
targets = (targets,)
targets = Gate._eval_args(targets)
_validate_targets_controls(targets)
mat = args[1]
if not isinstance(mat, MatrixBase):
raise TypeError('Matrix expected, got: %r' % mat)
dim = 2**len(targets)
if not all(dim == shape for shape in mat.shape):
raise IndexError(
'Number of targets must match the matrix size: %r %r' %
(targets, mat)
)
return (targets, mat)
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(_max(args[0]) + 1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def targets(self):
"""A tuple of target qubits."""
return tuple(self.label[0])
#-------------------------------------------------------------------------
# Gate methods
#-------------------------------------------------------------------------
def get_target_matrix(self, format='sympy'):
"""The matrix rep. of the target part of the gate.
Parameters
----------
format : str
The format string ('sympy','numpy', etc.)
"""
return self.label[1]
#-------------------------------------------------------------------------
# Print methods
#-------------------------------------------------------------------------
def _pretty(self, printer, *args):
targets = self._print_sequence_pretty(
self.targets, ',', printer, *args)
gate_name = stringPict(unicode(self.gate_name))
return self._print_subscript_pretty(gate_name, targets)
def _latex(self, printer, *args):
targets = self._print_sequence(self.targets, ',', printer, *args)
return r'%s_{%s}' % (self.gate_name_latex, targets)
def plot_gate(self, circ_plot, gate_idx):
circ_plot.one_qubit_box(
self.gate_name_plot,
gate_idx, int(self.targets[0])
)
class OneQubitGate(Gate):
"""A single qubit unitary gate base class."""
nqubits = Integer(1)
def plot_gate(self, circ_plot, gate_idx):
circ_plot.one_qubit_box(
self.gate_name_plot,
gate_idx, int(self.targets[0])
)
def _eval_commutator(self, other, **hints):
if isinstance(other, OneQubitGate):
if self.targets != other.targets or self.__class__ == other.__class__:
return Integer(0)
return Operator._eval_commutator(self, other, **hints)
def _eval_anticommutator(self, other, **hints):
if isinstance(other, OneQubitGate):
if self.targets != other.targets or self.__class__ == other.__class__:
return Integer(2)*self*other
return Operator._eval_anticommutator(self, other, **hints)
class TwoQubitGate(Gate):
"""A two qubit unitary gate base class."""
nqubits = Integer(2)
#-----------------------------------------------------------------------------
# Single Qubit Gates
#-----------------------------------------------------------------------------
class IdentityGate(OneQubitGate):
"""The single qubit identity gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
========
"""
gate_name = u'1'
gate_name_latex = u'1'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('eye2', format)
def _eval_commutator(self, other, **hints):
return Integer(0)
def _eval_anticommutator(self, other, **hints):
return Integer(2)*other
class HadamardGate(HermitianOperator, OneQubitGate):
"""The single qubit Hadamard gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
========
>>> from sympy import sqrt
>>> from sympy.physics.quantum.qubit import Qubit
>>> from sympy.physics.quantum.gate import HadamardGate
>>> from sympy.physics.quantum.qapply import qapply
>>> qapply(HadamardGate(0)*Qubit('1'))
sqrt(2)*|0>/2 - sqrt(2)*|1>/2
>>> # Hadamard on bell state, applied on 2 qubits.
>>> psi = 1/sqrt(2)*(Qubit('00')+Qubit('11'))
>>> qapply(HadamardGate(0)*HadamardGate(1)*psi)
sqrt(2)*|00>/2 + sqrt(2)*|11>/2
"""
gate_name = u'H'
gate_name_latex = u'H'
def get_target_matrix(self, format='sympy'):
if _normalized:
return matrix_cache.get_matrix('H', format)
else:
return matrix_cache.get_matrix('Hsqrt2', format)
def _eval_commutator_XGate(self, other, **hints):
return I*sqrt(2)*YGate(self.targets[0])
def _eval_commutator_YGate(self, other, **hints):
return I*sqrt(2)*(ZGate(self.targets[0]) - XGate(self.targets[0]))
def _eval_commutator_ZGate(self, other, **hints):
return -I*sqrt(2)*YGate(self.targets[0])
def _eval_anticommutator_XGate(self, other, **hints):
return sqrt(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
def _eval_anticommutator_ZGate(self, other, **hints):
return sqrt(2)*IdentityGate(self.targets[0])
class XGate(HermitianOperator, OneQubitGate):
"""The single qubit X, or NOT, gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
========
"""
gate_name = u'X'
gate_name_latex = u'X'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('X', format)
def plot_gate(self, circ_plot, gate_idx):
OneQubitGate.plot_gate(self,circ_plot,gate_idx)
def plot_gate_plus(self, circ_plot, gate_idx):
circ_plot.not_point(
gate_idx, int(self.label[0])
)
def _eval_commutator_YGate(self, other, **hints):
return Integer(2)*I*ZGate(self.targets[0])
def _eval_anticommutator_XGate(self, other, **hints):
return Integer(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
def _eval_anticommutator_ZGate(self, other, **hints):
return Integer(0)
class YGate(HermitianOperator, OneQubitGate):
"""The single qubit Y gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
========
"""
gate_name = u'Y'
gate_name_latex = u'Y'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Y', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(2)*I*XGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(2)*IdentityGate(self.targets[0])
def _eval_anticommutator_ZGate(self, other, **hints):
return Integer(0)
class ZGate(HermitianOperator, OneQubitGate):
"""The single qubit Z gate.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
========
"""
gate_name = u'Z'
gate_name_latex = u'Z'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('Z', format)
def _eval_commutator_XGate(self, other, **hints):
return Integer(2)*I*YGate(self.targets[0])
def _eval_anticommutator_YGate(self, other, **hints):
return Integer(0)
class PhaseGate(OneQubitGate):
"""The single qubit phase, or S, gate.
This gate rotates the phase of the state by pi/2 if the state is ``|1>`` and
does nothing if the state is ``|0>``.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
========
"""
gate_name = u'S'
gate_name_latex = u'S'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('S', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(0)
def _eval_commutator_TGate(self, other, **hints):
return Integer(0)
class TGate(OneQubitGate):
"""The single qubit pi/8 gate.
This gate rotates the phase of the state by pi/4 if the state is ``|1>`` and
does nothing if the state is ``|0>``.
Parameters
----------
target : int
The target qubit this gate will apply to.
Examples
========
"""
gate_name = u'T'
gate_name_latex = u'T'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('T', format)
def _eval_commutator_ZGate(self, other, **hints):
return Integer(0)
def _eval_commutator_PhaseGate(self, other, **hints):
return Integer(0)
# Aliases for gate names.
H = HadamardGate
X = XGate
Y = YGate
Z = ZGate
T = TGate
Phase = S = PhaseGate
#-----------------------------------------------------------------------------
# 2 Qubit Gates
#-----------------------------------------------------------------------------
class CNotGate(HermitianOperator, CGate, TwoQubitGate):
"""Two qubit controlled-NOT.
This gate performs the NOT or X gate on the target qubit if the control
qubits all have the value 1.
Parameters
----------
label : tuple
A tuple of the form (control, target).
Examples
========
>>> from sympy.physics.quantum.gate import CNOT
>>> from sympy.physics.quantum.qapply import qapply
>>> from sympy.physics.quantum.qubit import Qubit
>>> c = CNOT(1,0)
>>> qapply(c*Qubit('10')) # note that qubits are indexed from right to left
|11>
"""
gate_name = 'CNOT'
gate_name_latex = u'CNOT'
simplify_cgate = True
#-------------------------------------------------------------------------
# Initialization
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
args = Gate._eval_args(args)
return args
@classmethod
def _eval_hilbert_space(cls, args):
"""This returns the smallest possible Hilbert space."""
return ComplexSpace(2)**(_max(args) + 1)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def min_qubits(self):
"""The minimum number of qubits this gate needs to act on."""
return _max(self.label) + 1
@property
def targets(self):
"""A tuple of target qubits."""
return (self.label[1],)
@property
def controls(self):
"""A tuple of control qubits."""
return (self.label[0],)
@property
def gate(self):
"""The non-controlled gate that will be applied to the targets."""
return XGate(self.label[1])
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
# The default printing of Gate works better than those of CGate, so we
# go around the overridden methods in CGate.
def _print_label(self, printer, *args):
return Gate._print_label(self, printer, *args)
def _pretty(self, printer, *args):
return Gate._pretty(self, printer, *args)
def _latex(self, printer, *args):
return Gate._latex(self, printer, *args)
#-------------------------------------------------------------------------
# Commutator/AntiCommutator
#-------------------------------------------------------------------------
def _eval_commutator_ZGate(self, other, **hints):
"""[CNOT(i, j), Z(i)] == 0."""
if self.controls[0] == other.targets[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
def _eval_commutator_TGate(self, other, **hints):
"""[CNOT(i, j), T(i)] == 0."""
return self._eval_commutator_ZGate(other, **hints)
def _eval_commutator_PhaseGate(self, other, **hints):
"""[CNOT(i, j), S(i)] == 0."""
return self._eval_commutator_ZGate(other, **hints)
def _eval_commutator_XGate(self, other, **hints):
"""[CNOT(i, j), X(j)] == 0."""
if self.targets[0] == other.targets[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
def _eval_commutator_CNotGate(self, other, **hints):
"""[CNOT(i, j), CNOT(i,k)] == 0."""
if self.controls[0] == other.controls[0]:
return Integer(0)
else:
raise NotImplementedError('Commutator not implemented: %r' % other)
class SwapGate(TwoQubitGate):
"""Two qubit SWAP gate.
This gate swap the values of the two qubits.
Parameters
----------
label : tuple
A tuple of the form (target1, target2).
Examples
========
"""
gate_name = 'SWAP'
gate_name_latex = u'SWAP'
def get_target_matrix(self, format='sympy'):
return matrix_cache.get_matrix('SWAP', format)
def decompose(self, **options):
"""Decompose the SWAP gate into CNOT gates."""
i, j = self.targets[0], self.targets[1]
g1 = CNotGate(i, j)
g2 = CNotGate(j, i)
return g1*g2*g1
def plot_gate(self, circ_plot, gate_idx):
min_wire = int(_min(self.targets))
max_wire = int(_max(self.targets))
circ_plot.control_line(gate_idx, min_wire, max_wire)
circ_plot.swap_point(gate_idx, min_wire)
circ_plot.swap_point(gate_idx, max_wire)
def _represent_ZGate(self, basis, **options):
"""Represent the SWAP gate in the computational basis.
The following representation is used to compute this:
SWAP = |1><1|x|1><1| + |0><0|x|0><0| + |1><0|x|0><1| + |0><1|x|1><0|
"""
format = options.get('format', 'sympy')
targets = [int(t) for t in self.targets]
min_target = _min(targets)
max_target = _max(targets)
nqubits = options.get('nqubits', self.min_qubits)
op01 = matrix_cache.get_matrix('op01', format)
op10 = matrix_cache.get_matrix('op10', format)
op11 = matrix_cache.get_matrix('op11', format)
op00 = matrix_cache.get_matrix('op00', format)
eye2 = matrix_cache.get_matrix('eye2', format)
result = None
for i, j in ((op01, op10), (op10, op01), (op00, op00), (op11, op11)):
product = nqubits*[eye2]
product[nqubits - min_target - 1] = i
product[nqubits - max_target - 1] = j
new_result = matrix_tensor_product(*product)
if result is None:
result = new_result
else:
result = result + new_result
return result
# Aliases for gate names.
CNOT = CNotGate
SWAP = SwapGate
def CPHASE(a,b): return CGateS((a,),Z(b))
#-----------------------------------------------------------------------------
# Represent
#-----------------------------------------------------------------------------
def represent_zbasis(controls, targets, target_matrix, nqubits, format='sympy'):
"""Represent a gate with controls, targets and target_matrix.
This function does the low-level work of representing gates as matrices
in the standard computational basis (ZGate). Currently, we support two
main cases:
1. One target qubit and no control qubits.
2. One target qubits and multiple control qubits.
For the base of multiple controls, we use the following expression [1]:
1_{2**n} + (|1><1|)^{(n-1)} x (target-matrix - 1_{2})
Parameters
----------
controls : list, tuple
A sequence of control qubits.
targets : list, tuple
A sequence of target qubits.
target_matrix : sympy.Matrix, numpy.matrix, scipy.sparse
The matrix form of the transformation to be performed on the target
qubits. The format of this matrix must match that passed into
the `format` argument.
nqubits : int
The total number of qubits used for the representation.
format : str
The format of the final matrix ('sympy', 'numpy', 'scipy.sparse').
Examples
========
References
----------
[1] http://www.johnlapeyre.com/qinf/qinf_html/node6.html.
"""
controls = [int(x) for x in controls]
targets = [int(x) for x in targets]
nqubits = int(nqubits)
# This checks for the format as well.
op11 = matrix_cache.get_matrix('op11', format)
eye2 = matrix_cache.get_matrix('eye2', format)
# Plain single qubit case
if len(controls) == 0 and len(targets) == 1:
product = []
bit = targets[0]
# Fill product with [I1,Gate,I2] such that the unitaries,
# I, cause the gate to be applied to the correct Qubit
if bit != nqubits - 1:
product.append(matrix_eye(2**(nqubits - bit - 1), format=format))
product.append(target_matrix)
if bit != 0:
product.append(matrix_eye(2**bit, format=format))
return matrix_tensor_product(*product)
# Single target, multiple controls.
elif len(targets) == 1 and len(controls) >= 1:
target = targets[0]
# Build the non-trivial part.
product2 = []
for i in range(nqubits):
product2.append(matrix_eye(2, format=format))
for control in controls:
product2[nqubits - 1 - control] = op11
product2[nqubits - 1 - target] = target_matrix - eye2
return matrix_eye(2**nqubits, format=format) + \
matrix_tensor_product(*product2)
# Multi-target, multi-control is not yet implemented.
else:
raise NotImplementedError(
'The representation of multi-target, multi-control gates '
'is not implemented.'
)
#-----------------------------------------------------------------------------
# Gate manipulation functions.
#-----------------------------------------------------------------------------
def gate_simp(circuit):
"""Simplifies gates symbolically
It first sorts gates using gate_sort. It then applies basic
simplification rules to the circuit, e.g., XGate**2 = Identity
"""
# Bubble sort out gates that commute.
circuit = gate_sort(circuit)
# Do simplifications by subing a simplification into the first element
# which can be simplified. We recursively call gate_simp with new circuit
# as input more simplifications exist.
if isinstance(circuit, Add):
return sum(gate_simp(t) for t in circuit.args)
elif isinstance(circuit, Mul):
circuit_args = circuit.args
elif isinstance(circuit, Pow):
b, e = circuit.as_base_exp()
circuit_args = (gate_simp(b)**e,)
else:
return circuit
# Iterate through each element in circuit, simplify if possible.
for i in range(len(circuit_args)):
# H,X,Y or Z squared is 1.
# T**2 = S, S**2 = Z
if isinstance(circuit_args[i], Pow):
if isinstance(circuit_args[i].base,
(HadamardGate, XGate, YGate, ZGate)) \
and isinstance(circuit_args[i].exp, Number):
# Build a new circuit taking replacing the
# H,X,Y,Z squared with one.
newargs = (circuit_args[:i] +
(circuit_args[i].base**(circuit_args[i].exp % 2),) +
circuit_args[i + 1:])
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
elif isinstance(circuit_args[i].base, PhaseGate):
# Build a new circuit taking old circuit but splicing
# in simplification.
newargs = circuit_args[:i]
# Replace PhaseGate**2 with ZGate.
newargs = newargs + (ZGate(circuit_args[i].base.args[0])**
(Integer(circuit_args[i].exp/2)), circuit_args[i].base**
(circuit_args[i].exp % 2))
# Append the last elements.
newargs = newargs + circuit_args[i + 1:]
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
elif isinstance(circuit_args[i].base, TGate):
# Build a new circuit taking all the old elements.
newargs = circuit_args[:i]
# Put an Phasegate in place of any TGate**2.
newargs = newargs + (PhaseGate(circuit_args[i].base.args[0])**
Integer(circuit_args[i].exp/2), circuit_args[i].base**
(circuit_args[i].exp % 2))
# Append the last elements.
newargs = newargs + circuit_args[i + 1:]
# Recursively simplify the new circuit.
circuit = gate_simp(Mul(*newargs))
break
return circuit
def gate_sort(circuit):
"""Sorts the gates while keeping track of commutation relations
This function uses a bubble sort to rearrange the order of gate
application. Keeps track of Quantum computations special commutation
relations (e.g. things that apply to the same Qubit do not commute with
each other)
circuit is the Mul of gates that are to be sorted.
"""
# Make sure we have an Add or Mul.
if isinstance(circuit, Add):
return sum(gate_sort(t) for t in circuit.args)
if isinstance(circuit, Pow):
return gate_sort(circuit.base)**circuit.exp
elif isinstance(circuit, Gate):
return circuit
if not isinstance(circuit, Mul):
return circuit
changes = True
while changes:
changes = False
circ_array = circuit.args
for i in range(len(circ_array) - 1):
# Go through each element and switch ones that are in wrong order
if isinstance(circ_array[i], (Gate, Pow)) and \
isinstance(circ_array[i + 1], (Gate, Pow)):
# If we have a Pow object, look at only the base
first_base, first_exp = circ_array[i].as_base_exp()
second_base, second_exp = circ_array[i + 1].as_base_exp()
# Use sympy's hash based sorting. This is not mathematical
# sorting, but is rather based on comparing hashes of objects.
# See Basic.compare for details.
if first_base.compare(second_base) > 0:
if Commutator(first_base, second_base).doit() == 0:
new_args = (circuit.args[:i] + (circuit.args[i + 1],) +
(circuit.args[i],) + circuit.args[i + 2:])
circuit = Mul(*new_args)
circ_array = circuit.args
changes = True
break
if AntiCommutator(first_base, second_base).doit() == 0:
new_args = (circuit.args[:i] + (circuit.args[i + 1],) +
(circuit.args[i],) + circuit.args[i + 2:])
sign = Integer(-1)**(first_exp*second_exp)
circuit = sign*Mul(*new_args)
circ_array = circuit.args
changes = True
break
return circuit
#-----------------------------------------------------------------------------
# Utility functions
#-----------------------------------------------------------------------------
def random_circuit(ngates, nqubits, gate_space=(X, Y, Z, S, T, H, CNOT, SWAP)):
"""Return a random circuit of ngates and nqubits.
This uses an equally weighted sample of (X, Y, Z, S, T, H, CNOT, SWAP)
gates.
Parameters
----------
ngates : int
The number of gates in the circuit.
nqubits : int
The number of qubits in the circuit.
gate_space : tuple
A tuple of the gate classes that will be used in the circuit.
Repeating gate classes multiple times in this tuple will increase
the frequency they appear in the random circuit.
"""
qubit_space = range(nqubits)
result = []
for i in range(ngates):
g = random.choice(gate_space)
if g == CNotGate or g == SwapGate:
qubits = random.sample(qubit_space, 2)
g = g(*qubits)
else:
qubit = random.choice(qubit_space)
g = g(qubit)
result.append(g)
return Mul(*result)
def zx_basis_transform(self, format='sympy'):
"""Transformation matrix from Z to X basis."""
return matrix_cache.get_matrix('ZX', format)
def zy_basis_transform(self, format='sympy'):
"""Transformation matrix from Z to Y basis."""
return matrix_cache.get_matrix('ZY', format)
| bsd-3-clause |
manazhao/tf_recsys | tensorflow/python/kernel_tests/pool_test.py | 70 | 14153 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for unified pooling functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import nn_ops
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
def pool_direct_single_axis(
input, # pylint: disable=redefined-builtin
axis,
window_size,
pooling_type,
padding,
dilation_rate,
stride):
"""Numpy implementation of pooling along a single axis.
This is intended for testing only, and therefore isn't particularly efficient.
See pool_direct below for the meaning of the arguments.
Args:
input: numpy array.
axis: axis along which to perform pooling.
window_size: int >= 1. Size of pooling window within axis.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: int >= 1. Dilation factor for window, i.e. stride at which
to sample input.
stride: int >= 1. Stride at which to generate output.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
effective_window_size = (window_size - 1) * dilation_rate + 1
input_size = input.shape[axis]
if padding == "SAME":
output_size = int(math.ceil(input_size / stride))
total_padding_amount = max(
0, (output_size - 1) * stride + effective_window_size - input_size)
before_padding = total_padding_amount // 2
elif padding == "VALID":
output_size = int(
math.ceil((input_size - effective_window_size + 1) / stride))
before_padding = 0
else:
raise ValueError("Unsupported padding type: %r" % (padding,))
output_shape = input.shape[:axis] + (output_size,) + input.shape[axis + 1:]
output = np.zeros(output_shape, input.dtype)
initial_dim_selector = tuple(np.s_[:] for _ in range(axis))
if pooling_type == "MAX":
pooling_func = np.max
elif pooling_type == "AVG":
pooling_func = np.mean
else:
raise ValueError("Unsupported pooling type: %r" % (pooling_type,))
for output_pos in range(output_size):
input_start_pos = output_pos * stride - before_padding
input_end_pos = min(input_start_pos + effective_window_size, input_size)
if input_start_pos < 0:
input_start_pos += dilation_rate
input_slice = np.s_[input_start_pos:input_end_pos:dilation_rate]
output[initial_dim_selector + (output_pos,)] = pooling_func(
input[initial_dim_selector + (input_slice,)], axis=axis)
return output
def pool_direct(
input,
window_shape,
pooling_type,
padding, # pylint: disable=redefined-builtin
dilation_rate,
strides,
data_format=None):
"""Numpy implementation of pooling.
This is intended for testing only, and therefore isn't particularly efficient.
See tensorflow.nn.pool.
Args:
input: numpy array of rank N+2.
window_shape: Sequence of N ints >= 1.
pooling_type: either "MAX" or "AVG".
padding: either "SAME" or "VALID".
dilation_rate: Sequence of N ints >= 1.
strides: Sequence of N ints >= 1.
data_format: If specified and starts with "NC", indicates that second
dimension, rather than the last dimension, specifies the channel.
Returns:
pooling output array of rank N+2.
Raises:
ValueError: if arguments are invalid.
"""
if data_format is None or not data_format.startswith("NC"):
spatial_start_dim = 1
else:
spatial_start_dim = 2
output = input
for i in range(len(window_shape)):
output = pool_direct_single_axis(
input=output,
axis=i + spatial_start_dim,
window_size=window_shape[i],
pooling_type=pooling_type,
padding=padding,
dilation_rate=dilation_rate[i],
stride=strides[i])
return output
class PoolingTest(test.TestCase):
def _test(self, input_shape, **kwargs):
# Use negative numbers to make sure there isn't any zero padding getting
# used.
x = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
y1 = pool_direct(input=x, **kwargs)
y2 = nn_ops.pool(input=x, **kwargs)
self.assertAllClose(y1, y2.eval(), rtol=1e-2, atol=1e-2)
def testPoolSimple(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
self._test(
input_shape=[1, 1, 10, 1],
window_shape=[1, 3],
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=[1, 2])
def testPool1D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 2], [2, 10, 2]]:
for window_shape in [[1], [2], [3]]:
if padding != "SAME":
for dilation_rate in [[1], [2], [3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2], [3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testPool2D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 2], [2, 10, 9, 2]]:
for window_shape in [[1, 1], [2, 1], [2, 3]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [1, 2], [2, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testPool3D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["MAX", "AVG"]:
for input_shape in [[2, 9, 10, 11, 2], [2, 10, 9, 11, 2]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 3, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [1, 2, 2],
[2, 3, 3]]:
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [1, 2, 2], [2, 3, 3]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
def testPoolNC(self):
if test.is_gpu_available(cuda_only=True):
# "NC*" format is currently only supported on CUDA.
with self.test_session(use_gpu=True):
for padding in ["SAME", "VALID"]:
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[1],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 9],
window_shape=[2],
padding=padding,
pooling_type="MAX",
strides=[2],
dilation_rate=[1],
data_format="NCW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2],
dilation_rate=[1, 1],
data_format="NCHW")
self._test(
input_shape=[2, 2, 7, 5, 3],
window_shape=[2, 2, 2],
padding=padding,
pooling_type="MAX",
strides=[1, 2, 1],
dilation_rate=[1, 1, 1],
data_format="NCDHW")
self._test(
input_shape=[2, 2, 7, 9],
window_shape=[2, 2],
padding="VALID",
pooling_type="MAX",
strides=[1, 1],
dilation_rate=[2, 2],
data_format="NCHW")
def _test_gradient(self, input_shape, **kwargs):
x_val = -np.arange(
np.prod(input_shape), dtype=np.float32).reshape(input_shape) - 1
x = constant_op.constant(x_val, name="x", dtype=dtypes.float32)
output = nn_ops.pool(input=x, **kwargs)
y_shape = output.get_shape().as_list()
err = gradient_checker.compute_gradient_error(
[x], [input_shape], output, y_shape, x_init_value=[x_val])
err_tolerance = 1e-2
self.assertLess(err, err_tolerance)
def testGradient1D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 5, 2], [1, 4, 1]]:
for window_shape in [[1], [2]]:
if padding != "SAME":
for dilation_rate in [[1], [2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1])
for strides in [[1], [2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1],
strides=strides)
def testGradient2D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[2, 4, 5, 2], [1, 5, 4, 1]]:
for window_shape in [[1, 1], [2, 1], [2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1], [2, 1], [2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1])
for strides in [[1, 1], [2, 1], [1, 2], [2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1],
strides=strides)
def testGradient3D(self):
with self.test_session():
for padding in ["SAME", "VALID"]:
for pooling_type in ["AVG", "MAX"]:
for input_shape in [[1, 3, 5, 4, 1], [1, 5, 4, 3, 1]]:
for window_shape in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if padding != "SAME":
for dilation_rate in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
self._test_gradient(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=dilation_rate,
strides=[1, 1, 1])
for strides in [[1, 1, 1], [2, 1, 2], [2, 2, 2]]:
if np.any(np.array(strides) > window_shape):
continue
self._test(
input_shape=input_shape,
window_shape=window_shape,
padding=padding,
pooling_type=pooling_type,
dilation_rate=[1, 1, 1],
strides=strides)
if __name__ == "__main__":
test.main()
| apache-2.0 |
DTOcean/dtocean-core | dtocean_core/utils/database.py | 1 | 40368 | # -*- coding: utf-8 -*-
# Copyright (C) 2016-2018 Mathew Topper
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division, print_function
# Standard Library
import os
import re
import time
import shutil
import string
import logging
import datetime as dt
# External modules
import yaml
import argparse
import geoalchemy2
import numpy as np
import pandas as pd
from shapely import geos, wkb
from win32com.client import Dispatch
try:
from yaml import CLoader as Loader
except ImportError:
from yaml import Loader
# DTOcean modules
from aneris.utilities.database import PostgreSQL
from polite.paths import ObjDirectory, UserDataDirectory
from polite.configuration import ReadYAML
# Local modules
from . import SmartFormatter
from .files import onerror
# Set up logging
module_logger = logging.getLogger(__name__)
def bathy_records_to_strata(bathy_records=None,
pre_bathy=None,
has_mannings=False):
"""Convert the bathymetry layers table returned by the database into
Strata structure raw input"""
loop_start_time = time.clock()
# Allow a predefined bathymetry table and grid dimensions to be passed
# instead of the DB records.
if bathy_records is None and pre_bathy is None:
errStr = "One of arguments bathy_records or pre_bathy must be given"
raise ValueError(errStr)
elif bathy_records is not None:
bathy_table, xi, yj = init_bathy_records(bathy_records,
has_mannings)
elif pre_bathy is not None:
bathy_table, xi, yj = pre_bathy
else:
return None
msg = "Building layers..."
module_logger.debug(msg)
layers = list(set(bathy_table["layer_order"]))
layers.sort()
bathy_layer_groups = bathy_table.groupby("layer_order")
layer_depths = []
layer_sediments = []
for layer in layers:
layer_table = bathy_layer_groups.get_group(layer)
layer_depth, layer_sediment = build_bathy_layer(layer_table, xi, yj)
layer_depths.append(layer_depth)
layer_sediments.append(layer_sediment)
depth_array = np.dstack(layer_depths)
sediment_array = np.dstack(layer_sediments)
layer_names = ["layer {}".format(x) for x in layers]
raw_strata = {"values": {"depth": depth_array,
"sediment": sediment_array},
"coords": [xi, yj, layer_names]}
loop_end_time = time.clock()
loop_time = loop_end_time - loop_start_time
msg = ("Time elapsed building {} layer(s) was "
"{} seconds").format(len(layers), loop_time)
module_logger.debug(msg)
return raw_strata
def bathy_records_to_mannings(bathy_records=None, pre_bathy=None):
"""Convert the bathymetry layers table returned by the database into
mannings layer structure raw input"""
loop_start_time = time.clock()
# Allow a predefined bathymetry table and grid dimensions to be passed
# instead of the DB records.
if bathy_records is None and pre_bathy is None:
errStr = "One of arguments bathy_records or pre_bathy must be given"
raise ValueError(errStr)
elif bathy_records is not None:
bathy_table, xi, yj = init_bathy_records(bathy_records,
True)
elif pre_bathy is not None:
bathy_table, xi, yj = pre_bathy
else:
return None
msg = "Building mannings..."
module_logger.debug(msg)
bathy_layer_groups = bathy_table.groupby("layer_order")
layer_one_table = bathy_layer_groups.get_group(1)
mannings_array = build_mannings_layer(layer_one_table, xi, yj)
mannings_raw = {"values": mannings_array,
"coords": [xi, yj]}
loop_end_time = time.clock()
loop_time = loop_end_time - loop_start_time
msg = ("Time elapsed building mannings number array was "
"{} seconds").format(loop_time)
module_logger.debug(msg)
return mannings_raw
def tidal_series_records_to_xset(tidal_records):
"""Convert the bathymetry layers table returned by the database into
tidal time series structure raw input"""
loop_start_time = time.clock()
msg = "Building DataFrame from {} records".format(len(tidal_records))
module_logger.debug(msg)
tidal_table = pd.DataFrame.from_records(tidal_records, columns=[
'utm_point',
'measure_date',
'measure_time',
'u',
'v',
'turbulence_intensity',
'ssh'])
if tidal_table.empty: return None
msg = "Converting PostGIS Point types to coordinates..."
module_logger.debug(msg)
tidal_table = point_to_xy(tidal_table)
msg = "Getting grid extents..."
module_logger.debug(msg)
xi, yj = get_grid_coords(tidal_table)
msg = "Joining dates and times..."
module_logger.debug(msg)
tidal_table["datetime"] = [dt.datetime.combine(d, t) for
d, t in zip(tidal_table["measure_date"],
tidal_table["measure_time"])]
tidal_table = tidal_table.drop("measure_date", 1)
tidal_table = tidal_table.drop("measure_time", 1)
msg = "Building time steps..."
module_logger.debug(msg)
steps = list(set(tidal_table["datetime"]))
steps.sort()
tidal_table_groups = tidal_table.groupby("datetime")
u_steps = []
v_steps = []
ssh_steps = []
ti_steps = []
for step in steps:
step_table = tidal_table_groups.get_group(step)
(u_step,
v_step,
ssh_step,
ti_step) = build_tidal_series_step(step_table, xi, yj)
u_steps.append(u_step)
v_steps.append(v_step)
ssh_steps.append(ssh_step)
ti_steps.append(ti_step)
u_array = np.dstack(u_steps)
v_array = np.dstack(v_steps)
ssh_array = np.dstack(ssh_steps)
ti_array = np.dstack(ti_steps)
raw = {"values": {"U": u_array,
'V': v_array,
"SSH": ssh_array,
"TI": ti_array},
"coords": [xi, yj, steps]}
loop_end_time = time.clock()
loop_time = loop_end_time - loop_start_time
msg = ("Time elapsed building {} step(s) was "
"{} seconds").format(len(steps), loop_time)
module_logger.debug(msg)
return raw
def init_bathy_records(bathy_records, has_mannings=False):
msg = "Building DataFrame from {} records".format(len(bathy_records))
module_logger.debug(msg)
bathy_cols = ["utm_point", "depth"]
if has_mannings: bathy_cols.append("mannings_no")
bathy_cols.extend(["layer_order",
"initial_depth",
"sediment_type"])
bathy_table = pd.DataFrame.from_records(bathy_records, columns=bathy_cols)
if bathy_table.empty: return None
msg = "Converting PostGIS Point types to coordinates..."
module_logger.debug(msg)
bathy_table = point_to_xy(bathy_table)
msg = "Getting grid extents..."
module_logger.debug(msg)
xi, yj = get_grid_coords(bathy_table)
return bathy_table, xi, yj
def point_to_xy(df,
point_column="utm_point",
decimals=2,
drop_point_column=True):
x = []
y = []
for point_hex in df[point_column]:
point = wkb.loads(point_hex, hex=True)
coords = list(point.coords)[0]
x.append(coords[0])
y.append(coords[1])
x = np.array(x)
x = x.round(decimals)
y = np.array(y)
y = y.round(decimals)
df["x"] = x
df["y"] = y
if drop_point_column: df = df.drop(point_column, 1)
return df
def get_grid_coords(df, xlabel="x", ylabel="y"):
xi = np.unique(df[xlabel])
xdist = xi[1:] - xi[:-1]
if len(np.unique(xdist)) != 1:
safe_dist = [str(x) for x in np.unique(xdist)]
dist_str = ", ".join(safe_dist)
errStr = ("Distances in x-direction are not equal. Unique lengths "
"are: {}").format(dist_str)
raise ValueError(errStr)
yj = np.unique(df[ylabel])
ydist = yj[1:] - yj[:-1]
if len(np.unique(ydist)) != 1:
safe_dist = [str(y) for y in np.unique(ydist)]
dist_str = ", ".join(safe_dist)
errStr = ("Distances in y-direction are not equal. Unique lengths "
"are: {}").format(dist_str)
raise ValueError(errStr)
return xi, yj
def build_bathy_layer(layer_table, xi, yj):
depth_array = np.zeros([len(xi), len(yj)]) * np.nan
sediment_array = np.full([len(xi), len(yj)], None, dtype="object")
for record in layer_table.itertuples():
xidxs = np.where(xi == record.x)[0]
assert len(xidxs) == 1
xidx = xidxs[0]
yidxs = np.where(yj == record.y)[0]
assert len(yidxs) == 1
yidx = yidxs[0]
depth = record.depth - record.initial_depth
sediment = record.sediment_type
depth_array[xidx, yidx] = depth
sediment_array[xidx, yidx] = sediment
return depth_array, sediment_array
def build_mannings_layer(layer_table, xi, yj):
mannings_array = np.zeros([len(xi), len(yj)]) * np.nan
for record in layer_table.itertuples():
xidxs = np.where(xi == record.x)[0]
assert len(xidxs) == 1
xidx = xidxs[0]
yidxs = np.where(yj == record.y)[0]
assert len(yidxs) == 1
yidx = yidxs[0]
mannings_array[xidx, yidx] = record.mannings_no
return mannings_array
def build_tidal_series_step(step_table, xi, yj):
u_array = np.zeros([len(xi), len(yj)]) * np.nan
v_array = np.zeros([len(xi), len(yj)]) * np.nan
ssh_array = np.zeros([len(xi), len(yj)]) * np.nan
ti_array = np.zeros([len(xi), len(yj)]) * np.nan
for record in step_table.itertuples():
xidxs = np.where(xi == record.x)[0]
assert len(xidxs) == 1
xidx = xidxs[0]
yidxs = np.where(yj == record.y)[0]
assert len(yidxs) == 1
yidx = yidxs[0]
u_array[xidx, yidx] = record.u
v_array[xidx, yidx] = record.v
ssh_array[xidx, yidx] = record.ssh
ti_array[xidx, yidx] = record.turbulence_intensity
return u_array, v_array, ssh_array, ti_array
def get_table_df(db, schema, table, columns):
df = pd.read_sql_table(table,
db._engine,
schema=schema,
columns=columns)
return df
def get_one_from_column(db, schema, table, column):
Table = db.safe_reflect_table(table, schema)
result = db.session.query(Table.columns[column]).one_or_none()
return result
def filter_one_from_column(db,
schema,
table,
result_column,
filter_column,
filter_value):
TableTwo = db.safe_reflect_table(table, schema)
query = db.session.query(TableTwo.columns[result_column])
result = query.filter(
TableTwo.columns[filter_column]==filter_value).one_or_none()
return result
def get_all_from_columns(db, schema, table, columns):
Table = db.safe_reflect_table(table, schema)
col_lists = []
for column in columns:
col_all = db.session.query(Table.columns[column]).all()
trim_col = [item[0] for item in col_all]
col_lists.append(trim_col)
return col_lists
def database_to_files(root_path,
table_list,
database,
schema=None,
table_name_list=None,
pid_list=None,
fid_list=None,
where_list=None,
auto_child=False,
print_function=None):
if print_function is None: print_function = print
def _dump_child(root_path,
table_dict,
engine,
schema,
table_name_list=None,
pid_list=None,
fid_list=None,
where_list=None,
auto_val=None):
child_path = os.path.join(root_path, table_dict["table"])
if auto_val is not None:
child_path += str(auto_val)
auto_child = True
else:
auto_child = False
# dump a directory
if os.path.exists(child_path):
shutil.rmtree(child_path, onerror=onerror)
os.makedirs(child_path)
# Recurse for the children
database_to_files(child_path,
table_dict["children"],
engine,
schema,
table_name_list,
pid_list,
fid_list,
where_list,
auto_child,
print_function)
return
def _autofit_columns(xlpath):
excel = Dispatch('Excel.Application')
wb = excel.Workbooks.Open(os.path.abspath(xlpath))
#Activate second sheet
excel.Worksheets(1).Activate()
#Autofit column in active sheet
excel.ActiveSheet.Columns.AutoFit()
#Or simply save changes in a current file
wb.Save()
wb.Close()
return
for table_dict in table_list:
table_df = None
new_name_list = None
new_pid_list = None
new_fid_list = None
new_where_list = None
full_dict = check_dict(table_dict)
# Set the schema
if schema is None:
var_schema = full_dict["schema"]
else:
var_schema = schema
msg_str = "Dumping table: {}.{}".format(var_schema,
table_dict["table"])
print_function(msg_str)
if not full_dict["dummy"]:
if table_name_list is None:
table_idx = 0
new_name_list = [full_dict["table"]]
else:
table_idx = len(table_name_list)
new_name_list = table_name_list + [full_dict["table"]]
if fid_list is None:
new_fid_list = [full_dict["fkey"]]
else:
new_fid_list = fid_list + [full_dict["fkey"]]
# Filter by the parent table if required
query_str = query_builder(new_name_list,
pid_list,
new_fid_list,
where_list,
var_schema)
msg_str = "Executing query: {}".format(query_str)
print_function(msg_str)
# Read the table first
table_df = pd.read_sql(query_str, database._engine)
if full_dict["stripf"] and auto_child:
msg_str = "Stripping column: {}".format(full_dict["fkey"])
print_function(msg_str)
table_df = table_df.drop(full_dict["fkey"], 1)
if full_dict["array"] is not None:
array_str = ", ".join(full_dict["array"])
msg_str = "Coverting array columns: {}".format(array_str)
print_function(msg_str)
table_df = convert_array(table_df, full_dict["array"])
if full_dict["bool"] is not None:
bool_str = ", ".join(full_dict["bool"])
msg_str = "Coverting boolean columns: {}".format(bool_str)
print_function(msg_str)
table_df = convert_bool(table_df, full_dict["bool"])
if full_dict["geo"] is not None:
geo_str = ", ".join(full_dict["geo"])
msg_str = "Coverting Geometry columns: {}".format(geo_str)
print_function(msg_str)
table_df = convert_geo(table_df, full_dict["geo"])
if full_dict["time"] is not None:
time_str = ", ".join(full_dict["time"])
msg_str = "Coverting Time columns: {}".format(time_str)
print_function(msg_str)
table_df = convert_time(table_df, full_dict["time"])
if len(table_df) < 1e6:
table_fname = full_dict["table"] + ".xlsx"
tab_path = os.path.join(root_path, table_fname)
msg_str = "Writing to: {}".format(tab_path)
print_function(msg_str)
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter(tab_path,
engine='xlsxwriter')
# Convert the dataframe to an XlsxWriter Excel object.
table_df.to_excel(writer, index=False)
# Close the Pandas Excel writer and output the Excel file.
writer.save()
# Fit the column widths (don't let failure be catastrophic)
try:
_autofit_columns(tab_path)
except:
print_function("*** Column adjust failed. Skipping. ***")
pass
else:
table_fname = full_dict["table"] + ".csv"
tab_path = os.path.join(root_path, table_fname)
msg_str = "Writing to: {}".format(tab_path)
print_function(msg_str)
table_df.to_csv(tab_path, index=False)
if full_dict["children"] is not None:
# Include pid in iteration
if full_dict["pkey"] is not None:
if pid_list is None:
new_pid_list = [full_dict["pkey"]]
else:
new_pid_list = pid_list + [full_dict["pkey"]]
# Check autokey
if full_dict["autokey"]:
pkids = table_df[full_dict["pkey"]]
del(table_df)
for pkid in pkids:
# Add a where
new_where = {"table#": table_idx,
"value": pkid}
if where_list is None:
new_where_list = [new_where]
else:
new_where_list = where_list + [new_where]
_dump_child(root_path,
table_dict,
database,
var_schema,
new_name_list,
new_pid_list,
new_fid_list,
new_where_list,
pkid)
else:
del(table_df)
if where_list is None:
new_where_list = None
else:
new_where_list = where_list[:]
_dump_child(root_path,
table_dict,
database,
var_schema,
new_name_list,
new_pid_list,
new_fid_list,
new_where_list)
return
def database_from_files(root_path,
table_list,
database,
schema=None,
add_fid=None,
truncate=True,
drop_missing=True,
print_function=None):
if print_function is None: print_function = print
def _list_dirs(path, tab_match):
dir_list = [item for item in os.listdir(path)
if os.path.isdir(os.path.join(path, item))]
match_str = r'^{}[0-9]+'.format(tab_match)
dir_list = [s for s in dir_list if re.search(match_str, s)]
dir_list.sort()
return dir_list
for table_dict in table_list:
full_dict = check_dict(table_dict)
# Set the schema
if schema is None:
var_schema = full_dict["schema"]
else:
var_schema = schema
if not full_dict["dummy"]:
xlname = "{}.xlsx".format(full_dict["table"])
xlpath = os.path.join(root_path, xlname)
csvname = "{}.csv".format(full_dict["table"])
csvpath = os.path.join(root_path, csvname)
# Try to read the table as xl or csv
if os.path.isfile(xlpath):
msg_str = "Reading file: {}".format(xlpath)
print_function(msg_str)
xl = pd.ExcelFile(xlpath)
df = xl.parse("Sheet1")
elif os.path.isfile(csvpath):
msg_str = "Reading file: {}".format(csvpath)
print_function(msg_str)
df = pd.read_csv(csvpath)
else:
errStr = ("Table {} could not be found in directory "
"{}").format(full_dict["table"], root_path)
raise IOError(errStr)
if add_fid is not None:
msg_str = ("Adding foreign key '{}' with value: "
"{}").format(full_dict["fkey"], add_fid)
print_function(msg_str)
df[full_dict["fkey"]] = add_fid
# Get the table name
dbname = "{}.{}".format(var_schema, full_dict["table"])
# Clean the table
if truncate:
msg_str = "Truncating table: {}".format(dbname)
print_function(msg_str)
query_str = "TRUNCATE TABLE {} CASCADE".format(dbname)
database.execute_transaction(query_str)
# Drop columns not in the recepting table
if drop_missing:
actual_tables = database.get_column_names(full_dict["table"],
var_schema)
missing_set = set(df.columns) - set(actual_tables)
if missing_set:
cols_str = ", ".join(missing_set)
msg_str = ("Dropping extraneous columns: "
"{}").format(cols_str)
print_function(msg_str)
df = df.drop(missing_set, 1)
msg_str = "Writing to table: {}".format(dbname)
print_function(msg_str)
df.to_sql(full_dict["table"],
database._engine,
schema=var_schema,
if_exists="append",
index=False,
chunksize=50000)
del(df)
if full_dict["children"] is not None:
if full_dict["autokey"]:
tab_dirs = _list_dirs(root_path, full_dict["table"])
fids = [int(x.split(full_dict["table"])[1]) for x in tab_dirs]
if not tab_dirs: continue
first_dir = tab_dirs.pop(0)
first_fid = fids.pop(0)
child_path = os.path.join(root_path, first_dir)
database_from_files(child_path,
full_dict["children"],
database,
var_schema,
first_fid,
print_function=print_function)
for next_tab_dir, next_fid in zip(tab_dirs, fids):
child_path = os.path.join(root_path, next_tab_dir)
database_from_files(child_path,
full_dict["children"],
database,
var_schema,
next_fid,
False,
print_function=print_function)
else:
child_path = os.path.join(root_path, full_dict["table"])
database_from_files(child_path,
full_dict["children"],
database,
var_schema,
truncate=truncate,
print_function=print_function)
return
def query_builder(table_list,
pid_list=None,
fid_list=None,
where_list=None,
schema=None):
def _add_schema(table_name, schema=None):
if schema is None:
dbname = table_name
else:
dbname = "{}.{}".format(schema, table_name)
return dbname
consume_list = table_list[:]
table_name = _add_schema(consume_list.pop(), schema)
# No joins or wheres
if pid_list is None:
query_str = "SELECT * FROM {};".format(table_name)
return query_str
table_shorts = ["t{}".format(i) for i in xrange(len(table_list))]
consume_shorts = table_shorts[:]
table_short = consume_shorts.pop()
query_str = "SELECT {0}.*\nFROM {1} {0}".format(table_short, table_name)
consume_pid = pid_list[:]
# Add joins
if fid_list is not None:
consume_fid = fid_list[:]
while consume_list:
table_fid = consume_fid.pop()
join_table_pid = consume_pid.pop()
join_table_name = _add_schema(consume_list.pop(), schema)
join_table_short = consume_shorts.pop()
query_str += ("\nJOIN {0} {1} ON {1}.{2} = "
"{3}.{4}").format(join_table_name,
join_table_short,
join_table_pid,
table_short,
table_fid)
table_short = join_table_short
# Add wheres
if where_list is not None:
where_str = None
for where_dict in where_list:
table_short = table_shorts[where_dict["table#"]]
table_pid = pid_list[where_dict["table#"]]
pid_value = where_dict["value"]
eq_str = "{}.{} = {}".format(table_short,
table_pid,
pid_value)
if where_str is None:
where_str = "\nWHERE " + eq_str
else:
where_str += " AND " + eq_str
query_str += where_str
query_str += ";"
return query_str
def convert_array(table_df, array_cols):
brackets = string.maketrans('[]','{}')
def _safe_square2curly(x):
if x is None:
return
else:
y = str(x).translate(brackets)
return y
for array_col in array_cols:
table_df[array_col] = table_df[array_col].apply(_safe_square2curly)
return table_df
def convert_bool(table_df, bool_cols):
def _safe_bool2str(x):
if x is None:
y = None
elif x:
y = "yes"
else:
y = "no"
return y
for bool_col in bool_cols:
table_df[bool_col] = table_df[bool_col].apply(_safe_bool2str)
return table_df
def convert_geo(table_df, geo_cols):
def _safe_to_wkt(x):
if x is None:
return
else:
geo_shape = wkb.loads(x, hex=True)
srid = geos.lgeos.GEOSGetSRID(geo_shape._geom)
if srid > 0:
result = "SRID={};{}".format(srid, geo_shape.wkt)
else:
result = geo_shape.wkt
return result
for geo_col in geo_cols:
table_df[geo_col] = table_df[geo_col].apply(_safe_to_wkt)
return table_df
def convert_time(table_df, time_cols):
def _safe_time2str(x):
if x is None:
return
else:
return x.strftime("%H:%M:%S")
for time_col in time_cols:
table_df[time_col] = table_df[time_col].apply(_safe_time2str)
return table_df
def check_dict(table_dict):
full_dict = {"array": None,
"autokey": False,
"bool": None,
"children": None,
"dummy": False,
"fkey": None,
"geo": None,
"pkey": None,
"schema": None,
"stripf": False,
"time": None}
full_dict.update(table_dict)
if "table" not in full_dict.keys():
errStr = ("Each definition requires a table name under the "
"'table' key.")
raise KeyError(errStr)
return full_dict
def get_table_map(map_name="table_map.yaml"):
# Load the yaml files
objdir = ObjDirectory(__name__, "..", "config")
table_yaml = objdir.get_path(map_name)
with open(table_yaml, "r") as f:
table_list = yaml.load(f, Loader=Loader)
return table_list
def filter_map(table_list, filter_name, parent=None):
copy_list = table_list[:]
for table_dict in copy_list:
full_dict = check_dict(table_dict)
table_name = full_dict["table"]
if filter_name == table_name:
if parent is not None:
parent["children"] = [full_dict]
return parent
else:
return table_dict
elif full_dict["children"] is not None:
result = filter_map(full_dict["children"],
filter_name,
table_dict)
if result is not None:
if parent is not None:
parent["children"] = [result]
return parent
else:
return result
return None
def draw_map(table_list, level=0):
map_str = ""
for table_dict in table_list:
full_dict = check_dict(table_dict)
if level > 0:
n_spaces = 2 * level - 1
level_marks = " " + " " * n_spaces + "|"
else:
level_marks = "|"
level_marks += "-"
map_str += "{} {}\n".format(level_marks, full_dict["table"])
if full_dict["children"] is not None:
map_str += draw_map(full_dict["children"], level + 1)
return map_str
def get_database_config(db_config_name="database.yaml"):
userconfigdir = UserDataDirectory("dtocean_core", "DTOcean", "config")
useryaml = ReadYAML(userconfigdir, db_config_name)
if userconfigdir.isfile(db_config_name):
configdir = userconfigdir
else:
configdir = ObjDirectory("dtocean_core", "config")
configyaml = ReadYAML(configdir, db_config_name)
config = configyaml.read()
return useryaml, config
def get_database(credentials,
echo=False,
timeout=None,
db_adapter="psycopg2"):
database = PostgreSQL(db_adapter)
database.set_credentials(credentials)
database.set_echo(echo)
database.set_timeout(timeout)
database.configure()
return database
def database_convert_parser():
'''Command line parser for database_to_files and database_from_files.
'''
desStr = "Convert DTOcean database to and from structured files"
epiStr = "Mathew Topper, Data Only Greater, (c) 2018"
parser = argparse.ArgumentParser(description=desStr,
epilog=epiStr,
formatter_class=SmartFormatter)
parser.add_argument("action",
choices=['dump', 'load', 'list', 'view', 'dir'],
help="R|Select an action, where\n"
" dump = export database to files\n"
" load = import files into database\n"
" list = print stored credentials identifiers\n"
" view = print stored credentials (using -i "
"option)\n"
" dir = print table structure")
parser.add_argument("-d", "--directory",
help=("directory to add or read files from. "
"Defaults to '.'"),
type=str,
default=".")
parser.add_argument("-s", "--section",
choices=['device', 'site', 'other'],
help="R|Only operate on section from\n"
" device = tables related to the OEC\n"
" site = tables related to the deployment site\n"
" other = tables related to the reference data")
parser.add_argument("-i", "--identifier",
help=("stored credentials identifier"),
type=str)
parser.add_argument("--host",
help=("override database host"),
type=str)
parser.add_argument("--name",
help=("override database name"),
type=str)
parser.add_argument("--user",
help=("override database username"),
type=str)
parser.add_argument("-p", "--pwd",
help=("override database password"),
type=str)
args = parser.parse_args()
result = {"action": args.action,
"root_path": args.directory,
"filter_table": args.section,
"db_id": args.identifier,
"db_host": args.host,
"db_name": args.name,
"db_user": args.user,
"db_pwd": args.pwd}
return result
def database_convert_interface():
'''Command line interface for database_to_files and database_from_files.
Example:
To get help::
$ dtocean-database -h
'''
request = database_convert_parser()
_, config = get_database_config()
# List the available database configurations
if request["action"] == "list":
id_str = ", ".join(config.keys())
if id_str:
msg_str = ("Available database configuration identifiers are: "
"{}").format(id_str)
else:
msg_str = "No database configurations are stored"
print(msg_str)
return
if request["action"] == "view":
if request["db_id"] is None:
err_msg = "Option '-i' must be specified with 'view' action"
raise ValueError(err_msg)
cred = config[request["db_id"]]
for k, v in cred.iteritems():
print('{:>8} :: {}'.format(k, v))
return
table_list = get_table_map()
# Filter the table if required
if request["filter_table"] is not None:
filtered_dict = filter_map(table_list, request["filter_table"])
table_list = [filtered_dict]
if request["action"] == "dir":
print("\n" + draw_map(table_list))
return
# Set up the DB
if request["db_id"] is not None:
cred = config[request["db_id"]]
else:
cred = {"host": None,
"dbname": None,
"user": None,
"pwd": None}
if request["db_host"] is not None:
cred["host"] = request["db_host"]
if request["db_name"] is not None:
cred["dbname"] = request["db_name"]
if request["db_user"] is not None:
cred["user"] = request["db_user"]
if request["db_pwd"] is not None:
cred["pwd"] = "postgres"
db = get_database(cred, timeout=60)
if request["action"] == "dump":
# make a directory if required
if not os.path.exists(request["root_path"]):
os.makedirs(request["root_path"])
database_to_files(request["root_path"],
table_list,
db)
return
if request["action"] == "load":
database_from_files(request["root_path"],
table_list,
db)
return
raise RuntimeError("Highly illogical...")
return
| gpl-3.0 |
vitorio/NaNoGenMo2013 | ficly-scrape/bs4/testing.py | 3 | 22954 | """Helper classes for tests."""
import copy
import functools
import unittest
from unittest import TestCase
from bs4 import BeautifulSoup
from bs4.element import (
CharsetMetaAttributeValue,
Comment,
ContentMetaAttributeValue,
Doctype,
SoupStrainer,
)
from bs4.builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder
class SoupTest(unittest.TestCase):
@property
def default_builder(self):
return default_builder()
def soup(self, markup, **kwargs):
"""Build a Beautiful Soup object from markup."""
builder = kwargs.pop('builder', self.default_builder)
return BeautifulSoup(markup, builder=builder, **kwargs)
def document_for(self, markup):
"""Turn an HTML fragment into a document.
The details depend on the builder.
"""
return self.default_builder.test_fragment_to_document(markup)
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
builder = self.default_builder
obj = BeautifulSoup(to_parse, builder=builder)
if compare_parsed_to is None:
compare_parsed_to = to_parse
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
class HTMLTreeBuilderSmokeTest(object):
"""A basic test of a treebuilder's competence.
Any HTML treebuilder, present or future, should be able to pass
these tests. With invalid markup, there's room for interpretation,
and different parsers can handle it differently. But with the
markup in these tests, there's not much room for interpretation.
"""
def assertDoctypeHandled(self, doctype_fragment):
"""Assert that a given doctype string is handled correctly."""
doctype_str, soup = self._document_with_doctype(doctype_fragment)
# Make sure a Doctype object was created.
doctype = soup.contents[0]
self.assertEqual(doctype.__class__, Doctype)
self.assertEqual(doctype, doctype_fragment)
self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
# Make sure that the doctype was correctly associated with the
# parse tree and that the rest of the document parsed.
self.assertEqual(soup.p.contents[0], 'foo')
def _document_with_doctype(self, doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
soup = self.soup(markup)
return doctype, soup
def test_normal_doctypes(self):
"""Make sure normal, everyday HTML doctypes are handled correctly."""
self.assertDoctypeHandled("html")
self.assertDoctypeHandled(
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_public_doctype_with_url(self):
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
self.assertDoctypeHandled(doctype)
def test_system_doctype(self):
self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
def test_namespaced_system_doctype(self):
# We can handle a namespaced doctype with a system ID.
self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
def test_namespaced_public_doctype(self):
# Test a namespaced doctype with a public id.
self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
def test_real_xhtml_document(self):
"""A real XHTML document should come out more or less the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b""),
markup.replace(b"\n", b""))
def test_deepcopy(self):
"""Make sure you can copy the tree builder.
This is important because the builder is part of a
BeautifulSoup object, and we want to be able to copy that.
"""
copy.deepcopy(self.default_builder)
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
soup = self.soup("<p/>")
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), "<p></p>")
def test_unclosed_tags_get_closed(self):
"""A tag that's not closed by the end of the document should be closed.
This applies to all tags except empty-element tags.
"""
self.assertSoupEquals("<p>", "<p></p>")
self.assertSoupEquals("<b>", "<b></b>")
self.assertSoupEquals("<br>", "<br/>")
def test_br_is_always_empty_element_tag(self):
"""A <br> tag is designated as an empty-element tag.
Some parsers treat <br></br> as one <br/> tag, some parsers as
two tags, but it should always be an empty-element tag.
"""
soup = self.soup("<br></br>")
self.assertTrue(soup.br.is_empty_element)
self.assertEqual(str(soup.br), "<br/>")
def test_nested_formatting_elements(self):
self.assertSoupEquals("<em><em></em></em>")
def test_comment(self):
# Comments are represented as Comment objects.
markup = "<p>foo<!--foobar-->baz</p>"
self.assertSoupEquals(markup)
soup = self.soup(markup)
comment = soup.find(text="foobar")
self.assertEqual(comment.__class__, Comment)
# The comment is properly integrated into the tree.
foo = soup.find(text="foo")
self.assertEqual(comment, foo.next_element)
baz = soup.find(text="baz")
self.assertEqual(comment, baz.previous_element)
def test_preserved_whitespace_in_pre_and_textarea(self):
"""Whitespace must be preserved in <pre> and <textarea> tags."""
self.assertSoupEquals("<pre> </pre>")
self.assertSoupEquals("<textarea> woo </textarea>")
def test_nested_inline_elements(self):
"""Inline elements can be nested indefinitely."""
b_tag = "<b>Inside a B tag</b>"
self.assertSoupEquals(b_tag)
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
self.assertSoupEquals(nested_b_tag)
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
self.assertSoupEquals(nested_b_tag)
def test_nested_block_level_elements(self):
"""Block elements can be nested."""
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
blockquote = soup.blockquote
self.assertEqual(blockquote.p.b.string, 'Foo')
self.assertEqual(blockquote.b.string, 'Foo')
def test_correctly_nested_tables(self):
"""One table can go inside another one."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tr><td>Here\'s another table:'
'<table id="2"><tr><td>foo</td></tr></table>'
'</td></tr></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_deeply_nested_multivalued_attribute(self):
# html5lib can set the attributes of the same tag many times
# as it rearranges the tree. This has caused problems with
# multivalued attributes.
markup = '<table><div><div class="css"></div></div></table>'
soup = self.soup(markup)
self.assertEqual(["css"], soup.div.div['class'])
def test_angle_brackets_in_attribute_values_are_escaped(self):
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
def test_entities_in_attributes_converted_to_unicode(self):
expect = u'<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
def test_entities_in_text_converted_to_unicode(self):
expect = u'<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
def test_quot_entity_converted_to_quotation_mark(self):
self.assertSoupEquals("<p>I said "good day!"</p>",
'<p>I said "good day!"</p>')
def test_out_of_range_entity(self):
expect = u"\N{REPLACEMENT CHARACTER}"
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
def test_basic_namespaces(self):
"""Parsers don't need to *understand* namespaces, but at the
very least they should not choke on namespaces or lose
data."""
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
soup = self.soup(markup)
self.assertEqual(markup, soup.encode())
html = soup.html
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
self.assertEqual(
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
self.assertEqual(
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
def test_multivalued_attribute_value_becomes_list(self):
markup = b'<a class="foo bar">'
soup = self.soup(markup)
self.assertEqual(['foo', 'bar'], soup.a['class'])
#
# Generally speaking, tests below this point are more tests of
# Beautiful Soup than tests of the tree builders. But parsers are
# weird, so we run these tests separately for every tree builder
# to detect any differences between them.
#
def test_soupstrainer(self):
"""Parsers should be able to work with SoupStrainers."""
strainer = SoupStrainer("b")
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
parse_only=strainer)
self.assertEqual(soup.decode(), "<b>bold</b>")
def test_single_quote_attribute_values_become_double_quotes(self):
self.assertSoupEquals("<foo attr='bar'></foo>",
'<foo attr="bar"></foo>')
def test_attribute_values_with_nested_quotes_are_left_alone(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
self.assertSoupEquals(text)
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
soup = self.soup(text)
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
self.assertSoupEquals(
soup.foo.decode(),
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
def test_ampersand_in_attribute_value_gets_escaped(self):
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
'<this is="really messed up & stuff"></this>')
self.assertSoupEquals(
'<a href="http://example.org?a=1&b=2;3">foo</a>',
'<a href="http://example.org?a=1&b=2;3">foo</a>')
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
def test_entities_in_strings_converted_during_parsing(self):
# Both XML and HTML entities are converted to Unicode characters
# during parsing.
text = "<p><<sacré bleu!>></p>"
expected = u"<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
self.assertSoupEquals(text, expected)
def test_smart_quotes_converted_on_the_way_in(self):
# Microsoft smart quotes are converted to Unicode characters during
# parsing.
quote = b"<p>\x91Foo\x92</p>"
soup = self.soup(quote)
self.assertEqual(
soup.p.string,
u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
def test_non_breaking_spaces_converted_on_the_way_in(self):
soup = self.soup("<a> </a>")
self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2)
def test_entities_converted_on_the_way_out(self):
text = "<p><<sacré bleu!>></p>"
expected = u"<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
soup = self.soup(text)
self.assertEqual(soup.p.encode("utf-8"), expected)
def test_real_iso_latin_document(self):
# Smoke test of interrelated functionality, using an
# easy-to-understand document.
# Here it is in Unicode. Note that it claims to be in ISO-Latin-1.
unicode_html = u'<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
# That's because we're going to encode it into ISO-Latin-1, and use
# that to test.
iso_latin_html = unicode_html.encode("iso-8859-1")
# Parse the ISO-Latin-1 HTML.
soup = self.soup(iso_latin_html)
# Encode it to UTF-8.
result = soup.encode("utf-8")
# What do we expect the result to look like? Well, it would
# look like unicode_html, except that the META tag would say
# UTF-8 instead of ISO-Latin-1.
expected = unicode_html.replace("ISO-Latin-1", "utf-8")
# And, of course, it would be in UTF-8, not Unicode.
expected = expected.encode("utf-8")
# Ta-da!
self.assertEqual(result, expected)
def test_real_shift_jis_document(self):
# Smoke test to make sure the parser can handle a document in
# Shift-JIS encoding, without choking.
shift_jis_html = (
b'<html><head></head><body><pre>'
b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
b'</pre></body></html>')
unicode_html = shift_jis_html.decode("shift-jis")
soup = self.soup(unicode_html)
# Make sure the parse tree is correctly encoded to various
# encodings.
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
def test_real_hebrew_document(self):
# A real-world test to make sure we can convert ISO-8859-9 (a
# Hebrew encoding) to UTF-8.
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
soup = self.soup(
hebrew_document, from_encoding="iso8859-8")
self.assertEqual(soup.original_encoding, 'iso8859-8')
self.assertEqual(
soup.encode('utf-8'),
hebrew_document.decode("iso8859-8").encode("utf-8"))
def test_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
content = parsed_meta['content']
self.assertEqual('text/html; charset=x-sjis', content)
# But that value is actually a ContentMetaAttributeValue object.
self.assertTrue(isinstance(content, ContentMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('text/html; charset=utf8', content.encode("utf8"))
# For the rest of the story, see TestSubstitutions in
# test_tree.py.
def test_html5_style_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta id="encoding" charset="x-sjis" />')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', id="encoding")
charset = parsed_meta['charset']
self.assertEqual('x-sjis', charset)
# But that value is actually a CharsetMetaAttributeValue object.
self.assertTrue(isinstance(charset, CharsetMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('utf8', charset.encode("utf8"))
def test_tag_with_no_attributes_can_have_attributes_added(self):
data = self.soup("<a>text</a>")
data.a['foo'] = 'bar'
self.assertEqual('<a foo="bar">text</a>', data.a.decode())
class XMLTreeBuilderSmokeTest(object):
def test_docstring_generated(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
def test_real_xhtml_document(self):
"""A real XHTML document should come out *exactly* the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8"), markup)
def test_popping_namespaced_tag(self):
markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
soup = self.soup(markup)
self.assertEqual(
unicode(soup.rss), markup)
def test_docstring_includes_correct_encoding(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode("latin1"),
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
def test_large_xml_document(self):
"""A large XML document should come out the same as it went in."""
markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>'
+ b'0' * (2**12)
+ b'</root>')
soup = self.soup(markup)
self.assertEqual(soup.encode("utf-8"), markup)
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
self.assertSoupEquals("<p>", "<p/>")
self.assertSoupEquals("<p>foo</p>")
def test_namespaces_are_preserved(self):
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
soup = self.soup(markup)
root = soup.root
self.assertEqual("http://example.com/", root['xmlns:a'])
self.assertEqual("http://example.net/", root['xmlns:b'])
def test_closing_namespaced_tag(self):
markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.p), markup)
def test_namespaced_attributes(self):
markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.foo), markup)
def test_namespaced_attributes_xml_namespace(self):
markup = '<foo xml:lang="fr">bar</foo>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.foo), markup)
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
"""Smoke test for a tree builder that supports HTML5."""
def test_real_xhtml_document(self):
# Since XHTML is not HTML5, HTML5 parsers are not tested to handle
# XHTML documents in any particular way.
pass
def test_html_tags_have_namespace(self):
markup = "<a>"
soup = self.soup(markup)
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
def test_svg_tags_have_namespace(self):
markup = '<svg><circle/></svg>'
soup = self.soup(markup)
namespace = "http://www.w3.org/2000/svg"
self.assertEqual(namespace, soup.svg.namespace)
self.assertEqual(namespace, soup.circle.namespace)
def test_mathml_tags_have_namespace(self):
markup = '<math><msqrt>5</msqrt></math>'
soup = self.soup(markup)
namespace = 'http://www.w3.org/1998/Math/MathML'
self.assertEqual(namespace, soup.math.namespace)
self.assertEqual(namespace, soup.msqrt.namespace)
def test_xml_declaration_becomes_comment(self):
markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
soup = self.soup(markup)
self.assertTrue(isinstance(soup.contents[0], Comment))
self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?')
self.assertEqual("html", soup.contents[0].next_element.name)
def skipIf(condition, reason):
def nothing(test, *args, **kwargs):
return None
def decorator(test_item):
if condition:
return nothing
else:
return test_item
return decorator
| cc0-1.0 |
neumerance/cloudloon2 | .venv/lib/python2.7/site-packages/django/contrib/localflavor/id/forms.py | 100 | 7179 | """
ID-specific Form helpers
"""
from __future__ import absolute_import, unicode_literals
import re
import time
from django.core.validators import EMPTY_VALUES
from django.forms import ValidationError
from django.forms.fields import Field, Select
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_text
postcode_re = re.compile(r'^[1-9]\d{4}$')
phone_re = re.compile(r'^(\+62|0)[2-9]\d{7,10}$')
plate_re = re.compile(r'^(?P<prefix>[A-Z]{1,2}) ' + \
r'(?P<number>\d{1,5})( (?P<suffix>([A-Z]{1,3}|[1-9][0-9]{,2})))?$')
nik_re = re.compile(r'^\d{16}$')
class IDPostCodeField(Field):
"""
An Indonesian post code field.
http://id.wikipedia.org/wiki/Kode_pos
"""
default_error_messages = {
'invalid': _('Enter a valid post code'),
}
def clean(self, value):
super(IDPostCodeField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = value.strip()
if not postcode_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) < 10110:
raise ValidationError(self.error_messages['invalid'])
# 1xxx0
if value[0] == '1' and value[4] != '0':
raise ValidationError(self.error_messages['invalid'])
return '%s' % (value, )
class IDProvinceSelect(Select):
"""
A Select widget that uses a list of provinces of Indonesia as its
choices.
"""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from django.contrib.localflavor.id.id_choices import PROVINCE_CHOICES
super(IDProvinceSelect, self).__init__(attrs, choices=PROVINCE_CHOICES)
class IDPhoneNumberField(Field):
"""
An Indonesian telephone number field.
http://id.wikipedia.org/wiki/Daftar_kode_telepon_di_Indonesia
"""
default_error_messages = {
'invalid': _('Enter a valid phone number'),
}
def clean(self, value):
super(IDPhoneNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
phone_number = re.sub(r'[\-\s\(\)]', '', smart_text(value))
if phone_re.search(phone_number):
return smart_text(value)
raise ValidationError(self.error_messages['invalid'])
class IDLicensePlatePrefixSelect(Select):
"""
A Select widget that uses a list of vehicle license plate prefix code
of Indonesia as its choices.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
"""
def __init__(self, attrs=None):
# Load data in memory only when it is required, see also #17275
from django.contrib.localflavor.id.id_choices import LICENSE_PLATE_PREFIX_CHOICES
super(IDLicensePlatePrefixSelect, self).__init__(attrs,
choices=LICENSE_PLATE_PREFIX_CHOICES)
class IDLicensePlateField(Field):
"""
An Indonesian vehicle license plate field.
http://id.wikipedia.org/wiki/Tanda_Nomor_Kendaraan_Bermotor
Plus: "B 12345 12"
"""
default_error_messages = {
'invalid': _('Enter a valid vehicle license plate number'),
}
def clean(self, value):
# Load data in memory only when it is required, see also #17275
from django.contrib.localflavor.id.id_choices import LICENSE_PLATE_PREFIX_CHOICES
super(IDLicensePlateField, self).clean(value)
if value in EMPTY_VALUES:
return ''
plate_number = re.sub(r'\s+', ' ',
smart_text(value.strip())).upper()
matches = plate_re.search(plate_number)
if matches is None:
raise ValidationError(self.error_messages['invalid'])
# Make sure prefix is in the list of known codes.
prefix = matches.group('prefix')
if prefix not in [choice[0] for choice in LICENSE_PLATE_PREFIX_CHOICES]:
raise ValidationError(self.error_messages['invalid'])
# Only Jakarta (prefix B) can have 3 letter suffix.
suffix = matches.group('suffix')
if suffix is not None and len(suffix) == 3 and prefix != 'B':
raise ValidationError(self.error_messages['invalid'])
# RI plates don't have suffix.
if prefix == 'RI' and suffix is not None and suffix != '':
raise ValidationError(self.error_messages['invalid'])
# Number can't be zero.
number = matches.group('number')
if number == '0':
raise ValidationError(self.error_messages['invalid'])
# CD, CC and B 12345 12
if len(number) == 5 or prefix in ('CD', 'CC'):
# suffix must be numeric and non-empty
if re.match(r'^\d+$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
# Known codes range is 12-124
if prefix in ('CD', 'CC') and not (12 <= int(number) <= 124):
raise ValidationError(self.error_messages['invalid'])
if len(number) == 5 and not (12 <= int(suffix) <= 124):
raise ValidationError(self.error_messages['invalid'])
else:
# suffix must be non-numeric
if suffix is not None and re.match(r'^[A-Z]{,3}$', suffix) is None:
raise ValidationError(self.error_messages['invalid'])
return plate_number
class IDNationalIdentityNumberField(Field):
"""
An Indonesian national identity number (NIK/KTP#) field.
http://id.wikipedia.org/wiki/Nomor_Induk_Kependudukan
xx.xxxx.ddmmyy.xxxx - 16 digits (excl. dots)
"""
default_error_messages = {
'invalid': _('Enter a valid NIK/KTP number'),
}
def clean(self, value):
super(IDNationalIdentityNumberField, self).clean(value)
if value in EMPTY_VALUES:
return ''
value = re.sub(r'[\s.]', '', smart_text(value))
if not nik_re.search(value):
raise ValidationError(self.error_messages['invalid'])
if int(value) == 0:
raise ValidationError(self.error_messages['invalid'])
def valid_nik_date(year, month, day):
try:
t1 = (int(year), int(month), int(day), 0, 0, 0, 0, 0, -1)
d = time.mktime(t1)
t2 = time.localtime(d)
if t1[:3] != t2[:3]:
return False
else:
return True
except (OverflowError, ValueError):
return False
year = int(value[10:12])
month = int(value[8:10])
day = int(value[6:8])
current_year = time.localtime().tm_year
if year < int(str(current_year)[-2:]):
if not valid_nik_date(2000 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
elif not valid_nik_date(1900 + int(year), month, day):
raise ValidationError(self.error_messages['invalid'])
if value[:6] == '000000' or value[12:] == '0000':
raise ValidationError(self.error_messages['invalid'])
return '%s.%s.%s.%s' % (value[:2], value[2:6], value[6:12], value[12:])
| apache-2.0 |
numbas/editor | editor/views/extension.py | 1 | 3583 | import os
from zipfile import ZipFile
from django import http
from django.views import generic
from django.urls import reverse
from django.shortcuts import redirect
from editor.models import Extension, ExtensionAccess
from editor import forms
from editor.views.generic import AuthorRequiredMixin, CanViewMixin
from editor.views import editable_package
class ExtensionViewMixin:
upload_file_form_class = forms.ReplaceExtensionFileForm
class CreateView(generic.CreateView):
""" Create an extension """
model = Extension
form_class = forms.CreateExtensionForm
template_name = 'extension/create.html'
def get_form_kwargs(self):
kwargs = super(CreateView, self).get_form_kwargs()
kwargs['author'] = self.request.user
return kwargs
def get_success_url(self):
return reverse('extension_edit_source', args=(self.object.pk,))
class UploadView(generic.CreateView):
""" Upload an extension """
model = Extension
form_class = forms.UploadExtensionForm
template_name = 'extension/upload.html'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['author'] = self.request.user
return kwargs
def get_success_url(self):
return reverse('extension_edit_source', args=(self.object.pk,))
class UpdateView(ExtensionViewMixin,editable_package.UpdateView):
model = Extension
form_class = forms.UpdateExtensionForm
def get_success_url(self):
return reverse('extension_list_profile', args=(self.request.user.pk,))
class EditView(ExtensionViewMixin,editable_package.EditView):
model = Extension
form_class = forms.EditExtensionForm
replace_form_class = forms.ReplaceExtensionFileForm
success_view = 'extension_edit_source'
class ReplaceFileView(ExtensionViewMixin,editable_package.ReplaceFileView):
model = Extension
form_class = forms.ReplaceExtensionFileForm
success_view = 'extension_edit_source'
class DeleteFileView(ExtensionViewMixin,editable_package.DeleteFileView):
model = Extension
form_class = forms.ExtensionDeleteFileForm
success_view = 'extension_edit_source'
class AccessView(ExtensionViewMixin,editable_package.AccessView):
model = Extension
form_class = forms.ExtensionAccessFormset
single_form_class = forms.AddExtensionAccessForm
success_view = 'extension_access'
class AddAccessView(ExtensionViewMixin,editable_package.AddAccessView):
model = ExtensionAccess
form_class = forms.AddExtensionAccessForm
def get_package(self):
return Extension.objects.get(pk=self.kwargs['extension_pk'])
def get_success_url(self):
return reverse('extension_access', args=(self.object.extension.pk,))
class DocumentationView(ExtensionViewMixin,editable_package.DocumentationView):
model = Extension
class DownloadView(CanViewMixin, generic.DetailView):
model = Extension
def get(self, request, *args, **kwargs):
extension = self.get_object()
response = http.HttpResponse(content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="{}.zip"'.format(extension.location)
zf = ZipFile(response,'w')
for fname in extension.filenames():
zf.write(os.path.join(extension.extracted_path,fname), fname)
return response
class DeleteView(AuthorRequiredMixin, generic.DeleteView):
model = Extension
template_name = 'extension/delete.html'
def get_success_url(self):
return reverse('extension_list_profile', args=(self.request.user.pk,))
| apache-2.0 |
pratikmallya/hue | desktop/libs/liboozie/src/liboozie/submittion2_tests.py | 6 | 11700 | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from django.contrib.auth.models import User
from nose.plugins.attrib import attr
from nose.tools import assert_equal, assert_true, assert_not_equal
from hadoop import cluster, pseudo_hdfs4
from hadoop.conf import HDFS_CLUSTERS, MR_CLUSTERS, YARN_CLUSTERS
from desktop.lib.django_test_util import make_logged_in_client
from oozie.models2 import Node
from oozie.tests import OozieMockBase
from liboozie.submission2 import Submission
LOG = logging.getLogger(__name__)
@attr('requires_hadoop')
def test_copy_files():
cluster = pseudo_hdfs4.shared_cluster()
try:
c = make_logged_in_client()
user = User.objects.get(username='test')
prefix = '/tmp/test_copy_files'
if cluster.fs.exists(prefix):
cluster.fs.rmtree(prefix)
# Jars in various locations
deployment_dir = '%s/workspace' % prefix
external_deployment_dir = '%s/deployment' % prefix
jar_1 = '%s/udf1.jar' % prefix
jar_2 = '%s/lib/udf2.jar' % prefix
jar_3 = '%s/udf3.jar' % deployment_dir
jar_4 = '%s/lib/udf4.jar' % deployment_dir # Doesn't move
jar_5 = 'udf5.jar'
jar_6 = 'lib/udf6.jar' # Doesn't move
cluster.fs.mkdir(prefix)
cluster.fs.create(jar_1)
cluster.fs.create(jar_2)
cluster.fs.create(jar_3)
cluster.fs.create(jar_4)
cluster.fs.create(deployment_dir + '/' + jar_5)
cluster.fs.create(deployment_dir + '/' + jar_6)
class MockJob():
XML_FILE_NAME = 'workflow.xml'
def __init__(self):
self.deployment_dir = deployment_dir
self.nodes = [
Node({'id': '1', 'type': 'mapreduce', 'properties': {'jar_path': jar_1}}),
Node({'id': '2', 'type': 'mapreduce', 'properties': {'jar_path': jar_2}}),
Node({'id': '3', 'type': 'java', 'properties': {'jar_path': jar_3}}),
Node({'id': '4', 'type': 'java', 'properties': {'jar_path': jar_4}}),
# Workspace relative paths
Node({'id': '5', 'type': 'java', 'properties': {'jar_path': jar_5}}),
Node({'id': '6', 'type': 'java', 'properties': {'jar_path': jar_6}})
]
submission = Submission(user, job=MockJob(), fs=cluster.fs, jt=cluster.jt)
submission._copy_files(deployment_dir, "<xml>My XML</xml>", {'prop1': 'val1'})
submission._copy_files(external_deployment_dir, "<xml>My XML</xml>", {'prop1': 'val1'})
assert_true(cluster.fs.exists(deployment_dir + '/workflow.xml'), deployment_dir)
assert_true(cluster.fs.exists(deployment_dir + '/job.properties'), deployment_dir)
# All sources still there
assert_true(cluster.fs.exists(jar_1))
assert_true(cluster.fs.exists(jar_2))
assert_true(cluster.fs.exists(jar_3))
assert_true(cluster.fs.exists(jar_4))
assert_true(cluster.fs.exists(deployment_dir + '/' + jar_5))
assert_true(cluster.fs.exists(deployment_dir + '/' + jar_6))
# Lib
deployment_dir = deployment_dir + '/lib'
external_deployment_dir = external_deployment_dir + '/lib'
list_dir_workspace = cluster.fs.listdir(deployment_dir)
list_dir_deployement = cluster.fs.listdir(external_deployment_dir)
# All destinations there
assert_true(cluster.fs.exists(deployment_dir + '/udf1.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf2.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf3.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf4.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf5.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(deployment_dir + '/udf6.jar'), list_dir_workspace)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf1.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf2.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf3.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf4.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf5.jar'), list_dir_deployement)
assert_true(cluster.fs.exists(external_deployment_dir + '/udf6.jar'), list_dir_deployement)
stats_udf1 = cluster.fs.stats(deployment_dir + '/udf1.jar')
stats_udf2 = cluster.fs.stats(deployment_dir + '/udf2.jar')
stats_udf3 = cluster.fs.stats(deployment_dir + '/udf3.jar')
stats_udf4 = cluster.fs.stats(deployment_dir + '/udf4.jar')
stats_udf5 = cluster.fs.stats(deployment_dir + '/udf5.jar')
stats_udf6 = cluster.fs.stats(deployment_dir + '/udf6.jar')
submission._copy_files('%s/workspace' % prefix, "<xml>My XML</xml>", {'prop1': 'val1'})
assert_not_equal(stats_udf1['fileId'], cluster.fs.stats(deployment_dir + '/udf1.jar')['fileId'])
assert_not_equal(stats_udf2['fileId'], cluster.fs.stats(deployment_dir + '/udf2.jar')['fileId'])
assert_not_equal(stats_udf3['fileId'], cluster.fs.stats(deployment_dir + '/udf3.jar')['fileId'])
assert_equal(stats_udf4['fileId'], cluster.fs.stats(deployment_dir + '/udf4.jar')['fileId'])
assert_not_equal(stats_udf5['fileId'], cluster.fs.stats(deployment_dir + '/udf5.jar')['fileId'])
assert_equal(stats_udf6['fileId'], cluster.fs.stats(deployment_dir + '/udf6.jar')['fileId'])
# Test _create_file()
submission._create_file(deployment_dir, 'test.txt', data='Test data')
assert_true(cluster.fs.exists(deployment_dir + '/test.txt'), list_dir_workspace)
finally:
try:
cluster.fs.rmtree(prefix)
except:
LOG.exception('failed to remove %s' % prefix)
class MockFs():
def __init__(self, logical_name=None):
self.fs_defaultfs = 'hdfs://curacao:8020'
self.logical_name = logical_name if logical_name else ''
class MockJt():
def __init__(self, logical_name=None):
self.logical_name = logical_name if logical_name else ''
class TestSubmission(OozieMockBase):
def test_get_properties(self):
submission = Submission(self.user, fs=MockFs())
assert_equal({'security_enabled': False}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'curacao:8032',
'nameNode': 'hdfs://curacao:8020',
'security_enabled': False
}, submission.properties)
def test_get_logical_properties(self):
submission = Submission(self.user, fs=MockFs(logical_name='fsname'), jt=MockJt(logical_name='jtname'))
assert_equal({'security_enabled': False}, submission.properties)
submission._update_properties('curacao:8032', '/deployment_dir')
assert_equal({
'jobTracker': 'jtname',
'nameNode': 'fsname',
'security_enabled': False
}, submission.properties)
def test_update_properties(self):
finish = []
finish.append(MR_CLUSTERS.set_for_testing({'default': {}}))
finish.append(MR_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
finish.append(YARN_CLUSTERS.set_for_testing({'default': {}}))
finish.append(YARN_CLUSTERS['default'].SUBMIT_TO.set_for_testing(True))
try:
properties = {
'user.name': 'hue',
'test.1': 'http://localhost/test?test1=test&test2=test',
'nameNode': 'hdfs://curacao:8020',
'jobTracker': 'jtaddress',
'security_enabled': False
}
final_properties = properties.copy()
submission = Submission(None, properties=properties, oozie_id='test', fs=MockFs())
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
cluster.clear_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jtaddress',
'nameNode': fs.fs_defaultfs
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finish.append(HDFS_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('namenode'))
finish.append(MR_CLUSTERS['default'].LOGICAL_NAME.set_for_testing('jobtracker'))
cluster.clear_caches()
fs = cluster.get_hdfs()
jt = cluster.get_next_ha_mrcluster()[1]
final_properties = properties.copy()
final_properties.update({
'jobTracker': 'jobtracker',
'nameNode': 'namenode'
})
submission = Submission(None, properties=properties, oozie_id='test', fs=fs, jt=jt)
assert_equal(properties, submission.properties)
submission._update_properties('jtaddress', 'deployment-directory')
assert_equal(final_properties, submission.properties)
finally:
cluster.clear_caches()
for reset in finish:
reset()
def test_get_external_parameters(self):
xml = """
<workflow-app name="Pig" xmlns="uri:oozie:workflow:0.4">
<start to="Pig"/>
<action name="Pig">
<pig>
<job-tracker>${jobTracker}</job-tracker>
<name-node>${nameNode}</name-node>
<prepare>
<delete path="${output}"/>
</prepare>
<script>aggregate.pig</script>
<argument>-param</argument>
<argument>INPUT=${input}</argument>
<argument>-param</argument>
<argument>OUTPUT=${output}</argument>
<configuration>
<property>
<name>mapred.input.format.class</name>
<value>org.apache.hadoop.examples.SleepJob$SleepInputFormat</value>
</property>
</configuration>
</pig>
<ok to="end"/>
<error to="kill"/>
</action>
<kill name="kill">
<message>Action failed, error message[${wf:errorMessage(wf:lastErrorNode())}]</message>
</kill>
<end name="end"/>
</workflow-app>
"""
properties = """
#
# Licensed to the Hue
#
nameNode=hdfs://localhost:8020
jobTracker=localhost:8021
queueName=default
examplesRoot=examples
oozie.use.system.libpath=true
oozie.wf.application.path=${nameNode}/user/${user.name}/${examplesRoot}/apps/pig
"""
parameters = Submission(self.user)._get_external_parameters(xml, properties)
assert_equal({'oozie.use.system.libpath': 'true',
'input': '',
'jobTracker': 'localhost:8021',
'oozie.wf.application.path': '${nameNode}/user/${user.name}/${examplesRoot}/apps/pig',
'examplesRoot': 'examples',
'output': '',
'nameNode': 'hdfs://localhost:8020',
'queueName': 'default'
},
parameters)
| apache-2.0 |
Jumpscale/jumpscale_core8 | lib/JumpScale/data/idgenerator/IDGenerator.py | 1 | 2290 |
from JumpScale import j
import random
import sys
import string
class IDGenerator:
"""
generic provider of id's
lives at j.data.idgenerator
"""
def __init__(self):
self.__jslocation__ = "j.data.idgenerator"
self.cryptogen = random.SystemRandom()
def generateRandomInt(self, fromInt, toInt):
"""
how to use: j.data.idgenerator.generateRandomInt(0,10)
"""
return random.randint(fromInt, toInt)
def generateIncrID(self, incrTypeId, reset=False):
"""
type is like agent, job, jobstep
needs to be a unique type, can only work if application service is known
how to use: j.data.idgenerator.generateIncrID("agent")
@reset if True means restart from 1
"""
key = "incrementor_%s" % incrTypeId
if reset:
j.core.db.delete(key)
return j.core.db.incr(key)
def getID(self, incrTypeId, objectUniqueSeedInfo, reset=False):
"""
get a unique id for an object uniquely identified
remembers previously given id's
"""
key = "idint_%s_%s" % (incrTypeId, objectUniqueSeedInfo)
if j.core.db.exists(key) and reset is False:
id = int(j.core.db.get(key))
return id
else:
id = self.generateIncrID(incrTypeId)
j.core.db.set(key, str(id))
return id
def generateGUID(self):
"""
generate unique guid
how to use: j.data.idgenerator.generateGUID()
"""
import uuid
return str(uuid.uuid4())
def generateXCharID(self, x):
r = "1234567890abcdefghijklmnopqrstuvwxyz"
l = len(r)
out = ""
for i in range(0, x):
p = self.generateRandomInt(0, l - 1)
out += r[p]
return out
def generatePasswd(self, x, al=string.printable):
l = len(al)
out = ""
for i in range(0, x):
p = self.cryptogen.randrange(0, l - 1)
out += al[p]
return out
def generateCapnpID(self):
"""
Generates a valid id for a capnp schema.
"""
# the bitwise is for validating the id check capnp/parser.c++
return hex(random.randint(0, 2 ** 64) | 1 << 63)
| apache-2.0 |
KerkhoffTechnologies/shinken | test/test_star_in_hostgroups.py | 18 | 2032 | #!/usr/bin/env python
# Copyright (C) 2009-2014:
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
# This file is part of Shinken.
#
# Shinken is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Shinken is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
from shinken_test import *
class TestStarInGroups(ShinkenTest):
def setUp(self):
self.setup_with_file('etc/shinken_star_in_hostgroups.cfg')
# If we reach a good start, we are ok :)
# the bug was that an * hostgroup expand get all host_name != ''
# without looking at register 0 or not
def test_star_in_groups(self):
#
# Config is not correct because of a wrong relative path
# in the main config file
#
print "Get the hosts and services"
now = time.time()
host = self.sched.hosts.find_by_name("test_host_0")
host.checks_in_progress = []
host.act_depend_of = [] # ignore the router
router = self.sched.hosts.find_by_name("test_router_0")
router.checks_in_progress = []
router.act_depend_of = [] # ignore the router
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST")
self.assertIsNot(svc, None)
svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "TEST_HNAME_STAR")
self.assertIsNot(svc, None)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
DaanHoogland/cloudstack | tools/apidoc/gen_toc.py | 1 | 9536 | #!/cygdrive/c/Python27
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import os.path
import sys
from xml.dom import minidom
from xml.parsers.expat import ExpatError
ROOT_ADMIN = 'r'
user_to_func = {
ROOT_ADMIN: 'populateForApi',
}
user_to_cns = {
ROOT_ADMIN: 'allCommandNames',
}
dirname_to_user = {
'apis': ROOT_ADMIN,
}
dirname_to_dirname = {
'apis': 'apis',
}
known_categories = {
'Cisco' : 'External Device',
'SystemVm': 'System VM',
'VirtualMachine': 'Virtual Machine',
'VM': 'Virtual Machine',
'Domain': 'Domain',
'Template': 'Template',
'Iso': 'ISO',
'Volume': 'Volume',
'Vlan': 'VLAN',
'IpAddress': 'Address',
'PortForwarding': 'Firewall',
'Firewall': 'Firewall',
'StaticNat': 'NAT',
'IpForwarding': 'NAT',
'Host': 'Host',
'OutOfBand': 'Out-of-band Management',
'Cluster': 'Cluster',
'Account': 'Account',
'Role': 'Role',
'Snapshot': 'Snapshot',
'User': 'User',
'Os': 'Guest OS',
'ServiceOffering': 'Service Offering',
'DiskOffering': 'Disk Offering',
'LoadBalancer': 'Load Balancer',
'SslCert': 'Load Balancer',
'Router': 'Router',
'SystemVm': 'System VM',
'Configuration': 'Configuration',
'Capabilities': 'Configuration',
'Pod': 'Pod',
'PublicIpRange': 'Network',
'Zone': 'Zone',
'Vmware' : 'Zone',
'NetworkOffering': 'Network Offering',
'NetworkACL': 'Network ACL',
'Network': 'Network',
'CiscoNexus': 'Network',
'OpenDaylight': 'Network',
'createServiceInstance': 'Network',
'addGloboDnsHost': 'Network',
'Vpn': 'VPN',
'Limit': 'Limit',
'ResourceCount': 'Limit',
'CloudIdentifier': 'Cloud Identifier',
'InstanceGroup': 'VM Group',
'StorageMaintenance': 'Storage Pool',
'StoragePool': 'Storage Pool',
'StorageProvider': 'Storage Pool',
'SecurityGroup': 'Security Group',
'SSH': 'SSH',
'register': 'Registration',
'AsyncJob': 'Async job',
'Certificate': 'Certificate',
'Hypervisor': 'Hypervisor',
'Alert': 'Alert',
'Event': 'Event',
'login': 'Authentication',
'logout': 'Authentication',
'saml': 'Authentication',
'getSPMetadata': 'Authentication',
'listIdps': 'Authentication',
'authorizeSamlSso': 'Authentication',
'listSamlAuthorization': 'Authentication',
'quota': 'Quota',
'emailTemplate': 'Quota',
'Capacity': 'System Capacity',
'NetworkDevice': 'Network Device',
'ExternalLoadBalancer': 'Ext Load Balancer',
'ExternalFirewall': 'Ext Firewall',
'Usage': 'Usage',
'TrafficMonitor': 'Usage',
'TrafficType': 'Usage',
'Product': 'Product',
'LB': 'Load Balancer',
'ldap': 'LDAP',
'Ldap': 'LDAP',
'Swift': 'Swift',
'S3' : 'S3',
'SecondaryStorage': 'Host',
'Project': 'Project',
'Lun': 'Storage',
'Pool': 'Pool',
'VPC': 'VPC',
'PrivateGateway': 'VPC',
'migrateVpc': 'VPC',
'Simulator': 'simulator',
'StaticRoute': 'VPC',
'Tags': 'Resource tags',
'NiciraNvpDevice': 'Nicira NVP',
'BrocadeVcsDevice': 'Brocade VCS',
'BigSwitchBcfDevice': 'BigSwitch BCF',
'AutoScale': 'AutoScale',
'Counter': 'AutoScale',
'Condition': 'AutoScale',
'Api': 'API Discovery',
'Region': 'Region',
'Detail': 'Resource metadata',
'addIpToNic': 'Nic',
'removeIpFromNic': 'Nic',
'updateVmNicIp': 'Nic',
'listNics':'Nic',
'AffinityGroup': 'Affinity Group',
'addImageStore': 'Image Store',
'listImageStore': 'Image Store',
'deleteImageStore': 'Image Store',
'createSecondaryStagingStore': 'Image Store',
'deleteSecondaryStagingStore': 'Image Store',
'listSecondaryStagingStores': 'Image Store',
'InternalLoadBalancer': 'Internal LB',
'DeploymentPlanners': 'Configuration',
'ObjectStore': 'Image Store',
'PortableIp': 'Portable IP',
'dedicateHost': 'Dedicate Resources',
'releaseDedicatedHost': 'Dedicate Resources',
'Baremetal' : 'Baremetal',
'UCS' : 'UCS',
'Ucs' : 'UCS',
'CacheStores' : 'Cache Stores',
'CacheStore' : 'Cache Store',
'OvsElement' : 'Ovs Element',
'StratosphereSsp' : ' Stratosphere SSP',
'Metrics' : 'Metrics',
'Infrastructure' : 'Metrics',
'listNetscalerControlCenter' : 'Load Balancer',
'listRegisteredServicePackages': 'Load Balancer',
'listNsVpx' : 'Load Balancer',
'destroyNsVPx': 'Load Balancer',
'deployNetscalerVpx' : 'Load Balancer',
'deleteNetscalerControlCenter' : 'Load Balancer',
'stopNetScalerVpx' : 'Load Balancer',
'deleteServicePackageOffering' : 'Load Balancer',
'destroyNsVpx' : 'Load Balancer',
'startNsVpx' : 'Load Balancer',
'listAnnotations' : 'Annotations',
'addAnnotation' : 'Annotations',
'removeAnnotation' : 'Annotations',
'CA': 'Certificate',
'listElastistorInterface': 'Misc',
'cloudian': 'Cloudian',
'Sioc' : 'Sioc',
'Diagnostics': 'Diagnostics',
'Management': 'Management',
'Backup' : 'Backup and Recovery',
'Restore' : 'Backup and Recovery',
'UnmanagedInstance': 'Virtual Machine',
'KubernetesSupportedVersion': 'Kubernetes Service',
'KubernetesCluster': 'Kubernetes Service',
'UnmanagedInstance': 'Virtual Machine',
'Rolling': 'Rolling Maintenance'
}
categories = {}
def choose_category(fn):
for k, v in known_categories.items():
if k in fn:
return v
raise Exception('Need to add a category for %s to %s:known_categories' %
(fn, __file__))
sys.exit(1)
for f in sys.argv:
dirname, fn = os.path.split(f)
if not fn.endswith('.xml'):
continue
if fn.endswith('Summary.xml'):
continue
if fn.endswith('SummarySorted.xml'):
continue
if fn == 'alert_types.xml':
continue
if dirname.startswith('./'):
dirname = dirname[2:]
try:
with open(f) as data:
dom = minidom.parse(data)
name = dom.getElementsByTagName('name')[0].firstChild.data
isAsync = dom.getElementsByTagName('isAsync')[0].firstChild.data
category = choose_category(fn)
if category not in categories:
categories[category] = []
categories[category].append({
'name': name,
'dirname': dirname_to_dirname[dirname],
'async': isAsync == 'true',
'user': dirname_to_user[dirname],
})
except ExpatError as e:
pass
except IndexError as e:
print(fn)
def xml_for(command):
name = command['name']
async = command['async'] and ' (A)' or ''
dirname = command['dirname']
return '''<xsl:if test="name=\'%(name)s\'">
<li><a href="%(dirname)s/%(name)s.html"><xsl:value-of select="name"/>%(async)s</a></li>
</xsl:if>
''' % locals()
def write_xml(out, user):
with open(out, 'w') as f:
cat_strings = []
for category in categories.keys():
strings = []
for command in categories[category]:
if command['user'] == user:
strings.append(xml_for(command))
if strings:
all_strings = ''.join(strings)
cat_strings.append((len(strings), category, all_strings))
cat_strings.sort(reverse=True)
i = 0
for _1, category, all_strings in cat_strings:
if i == 0:
f.write('<div class="apismallsections">\n')
f.write('''<div class="apismallbullet_box">
<h5>%(category)s</h5>
<ul>
<xsl:for-each select="commands/command">
%(all_strings)s
</xsl:for-each>
</ul>
</div>
''' % locals())
if i == 3:
f.write('</div>\n')
i = 0
else:
i += 1
if i != 0:
f.write('</div>\n')
def java_for(command, user):
name = command['name']
cns = user_to_cns[user]
return '''%(cns)s.add("%(name)s");
''' % locals()
def java_for_user(user):
strings = []
for category in categories.keys():
for command in categories[category]:
if command['user'] == user:
strings.append(java_for(command, user))
func = user_to_func[user]
all_strings = ''.join(strings)
return '''
public void %(func)s() {
%(all_strings)s
}
''' % locals()
def write_java(out):
with open(out, 'w') as f:
f.write('''/* Generated using gen_toc.py. Do not edit. */
import java.util.HashSet;
import java.util.Set;
public class XmlToHtmlConverterData {
Set<String> allCommandNames = new HashSet<String>();
''')
f.write(java_for_user(ROOT_ADMIN) + "\n")
f.write('''
}
''')
write_xml('generatetoc_include.xsl', ROOT_ADMIN)
write_java('XmlToHtmlConverterData.java')
| apache-2.0 |
firerszd/kbengine | kbe/src/lib/python/Lib/test/test_unicode.py | 67 | 116018 | """ Test script for the Unicode implementation.
Written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""#"
import _string
import codecs
import itertools
import struct
import sys
import unittest
import warnings
from test import support, string_tests
# Error handling (bad decoder return)
def search_function(encoding):
def decode1(input, errors="strict"):
return 42 # not a tuple
def encode1(input, errors="strict"):
return 42 # not a tuple
def encode2(input, errors="strict"):
return (42, 42) # no unicode
def decode2(input, errors="strict"):
return (42, 42) # no unicode
if encoding=="test.unicode1":
return (encode1, decode1, None, None)
elif encoding=="test.unicode2":
return (encode2, decode2, None, None)
else:
return None
codecs.register(search_function)
def duplicate_string(text):
"""
Try to get a fresh clone of the specified text:
new object with a reference count of 1.
This is a best-effort: latin1 single letters and the empty
string ('') are singletons and cannot be cloned.
"""
return text.encode().decode()
class UnicodeTest(string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrUnicodeTest,
unittest.TestCase):
type2test = str
def checkequalnofix(self, result, object, methodname, *args):
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(type(realresult) is type(result))
# if the original is returned make sure that
# this doesn't happen with subclasses
if realresult is object:
class usub(str):
def __repr__(self):
return 'usub(%r)' % str.__repr__(self)
object = usub(object)
method = getattr(object, methodname)
realresult = method(*args)
self.assertEqual(realresult, result)
self.assertTrue(object is not realresult)
def test_literals(self):
self.assertEqual('\xff', '\u00ff')
self.assertEqual('\uffff', '\U0000ffff')
self.assertRaises(SyntaxError, eval, '\'\\Ufffffffe\'')
self.assertRaises(SyntaxError, eval, '\'\\Uffffffff\'')
self.assertRaises(SyntaxError, eval, '\'\\U%08x\'' % 0x110000)
# raw strings should not have unicode escapes
self.assertNotEqual(r"\u0020", " ")
def test_ascii(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(ascii('abc'), "'abc'")
self.assertEqual(ascii('ab\\c'), "'ab\\\\c'")
self.assertEqual(ascii('ab\\'), "'ab\\\\'")
self.assertEqual(ascii('\\c'), "'\\\\c'")
self.assertEqual(ascii('\\'), "'\\\\'")
self.assertEqual(ascii('\n'), "'\\n'")
self.assertEqual(ascii('\r'), "'\\r'")
self.assertEqual(ascii('\t'), "'\\t'")
self.assertEqual(ascii('\b'), "'\\x08'")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'\""), """'\\'"'""")
self.assertEqual(ascii("'"), '''"'"''')
self.assertEqual(ascii('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\\xa1\\xa2\\xa3\\xa4\\xa5\\xa6\\xa7\\xa8\\xa9"
"\\xaa\\xab\\xac\\xad\\xae\\xaf\\xb0\\xb1\\xb2\\xb3\\xb4\\xb5\\xb6\\xb7"
"\\xb8\\xb9\\xba\\xbb\\xbc\\xbd\\xbe\\xbf\\xc0\\xc1\\xc2\\xc3\\xc4\\xc5"
"\\xc6\\xc7\\xc8\\xc9\\xca\\xcb\\xcc\\xcd\\xce\\xcf\\xd0\\xd1\\xd2\\xd3"
"\\xd4\\xd5\\xd6\\xd7\\xd8\\xd9\\xda\\xdb\\xdc\\xdd\\xde\\xdf\\xe0\\xe1"
"\\xe2\\xe3\\xe4\\xe5\\xe6\\xe7\\xe8\\xe9\\xea\\xeb\\xec\\xed\\xee\\xef"
"\\xf0\\xf1\\xf2\\xf3\\xf4\\xf5\\xf6\\xf7\\xf8\\xf9\\xfa\\xfb\\xfc\\xfd"
"\\xfe\\xff'")
testrepr = ascii(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test ascii works on wide unicode escapes without overflow.
self.assertEqual(ascii("\U00010000" * 39 + "\uffff" * 4096),
ascii("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, ascii, WrongRepr())
def test_repr(self):
if not sys.platform.startswith('java'):
# Test basic sanity of repr()
self.assertEqual(repr('abc'), "'abc'")
self.assertEqual(repr('ab\\c'), "'ab\\\\c'")
self.assertEqual(repr('ab\\'), "'ab\\\\'")
self.assertEqual(repr('\\c'), "'\\\\c'")
self.assertEqual(repr('\\'), "'\\\\'")
self.assertEqual(repr('\n'), "'\\n'")
self.assertEqual(repr('\r'), "'\\r'")
self.assertEqual(repr('\t'), "'\\t'")
self.assertEqual(repr('\b'), "'\\x08'")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'\""), """'\\'"'""")
self.assertEqual(repr("'"), '''"'"''')
self.assertEqual(repr('"'), """'"'""")
latin1repr = (
"'\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x07\\x08\\t\\n\\x0b\\x0c\\r"
"\\x0e\\x0f\\x10\\x11\\x12\\x13\\x14\\x15\\x16\\x17\\x18\\x19\\x1a"
"\\x1b\\x1c\\x1d\\x1e\\x1f !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHI"
"JKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\\x7f"
"\\x80\\x81\\x82\\x83\\x84\\x85\\x86\\x87\\x88\\x89\\x8a\\x8b\\x8c\\x8d"
"\\x8e\\x8f\\x90\\x91\\x92\\x93\\x94\\x95\\x96\\x97\\x98\\x99\\x9a\\x9b"
"\\x9c\\x9d\\x9e\\x9f\\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9"
"\xaa\xab\xac\\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
"\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf\xc0\xc1\xc2\xc3\xc4\xc5"
"\xc6\xc7\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf\xd0\xd1\xd2\xd3"
"\xd4\xd5\xd6\xd7\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf\xe0\xe1"
"\xe2\xe3\xe4\xe5\xe6\xe7\xe8\xe9\xea\xeb\xec\xed\xee\xef"
"\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7\xf8\xf9\xfa\xfb\xfc\xfd"
"\xfe\xff'")
testrepr = repr(''.join(map(chr, range(256))))
self.assertEqual(testrepr, latin1repr)
# Test repr works on wide unicode escapes without overflow.
self.assertEqual(repr("\U00010000" * 39 + "\uffff" * 4096),
repr("\U00010000" * 39 + "\uffff" * 4096))
class WrongRepr:
def __repr__(self):
return b'byte-repr'
self.assertRaises(TypeError, repr, WrongRepr())
def test_iterators(self):
# Make sure unicode objects have an __iter__ method
it = "\u1111\u2222\u3333".__iter__()
self.assertEqual(next(it), "\u1111")
self.assertEqual(next(it), "\u2222")
self.assertEqual(next(it), "\u3333")
self.assertRaises(StopIteration, next, it)
def test_count(self):
string_tests.CommonTest.test_count(self)
# check mixed argument types
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(3, 'aaa', 'count', 'a')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(0, 'aaa', 'count', 'b')
self.checkequalnofix(1, 'aaa', 'count', 'a', -1)
self.checkequalnofix(3, 'aaa', 'count', 'a', -10)
self.checkequalnofix(2, 'aaa', 'count', 'a', 0, -1)
self.checkequalnofix(0, 'aaa', 'count', 'a', 0, -10)
def test_find(self):
string_tests.CommonTest.test_find(self)
# test implementation details of the memchr fast path
self.checkequal(100, 'a' * 100 + '\u0102', 'find', '\u0102')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0201')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0120')
self.checkequal(-1, 'a' * 100 + '\u0102', 'find', '\u0220')
self.checkequal(100, 'a' * 100 + '\U00100304', 'find', '\U00100304')
self.checkequal(-1, 'a' * 100 + '\U00100304', 'find', '\U00100204')
self.checkequal(-1, 'a' * 100 + '\U00100304', 'find', '\U00102004')
# check mixed argument types
self.checkequalnofix(0, 'abcdefghiabc', 'find', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'find', 'abc', 1)
self.checkequalnofix(-1, 'abcdefghiabc', 'find', 'def', 4)
self.assertRaises(TypeError, 'hello'.find)
self.assertRaises(TypeError, 'hello'.find, 42)
def test_rfind(self):
string_tests.CommonTest.test_rfind(self)
# test implementation details of the memrchr fast path
self.checkequal(0, '\u0102' + 'a' * 100 , 'rfind', '\u0102')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0201')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0120')
self.checkequal(-1, '\u0102' + 'a' * 100 , 'rfind', '\u0220')
self.checkequal(0, '\U00100304' + 'a' * 100, 'rfind', '\U00100304')
self.checkequal(-1, '\U00100304' + 'a' * 100, 'rfind', '\U00100204')
self.checkequal(-1, '\U00100304' + 'a' * 100, 'rfind', '\U00102004')
# check mixed argument types
self.checkequalnofix(9, 'abcdefghiabc', 'rfind', 'abc')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
self.checkequalnofix(12, 'abcdefghiabc', 'rfind', '')
def test_index(self):
string_tests.CommonTest.test_index(self)
self.checkequalnofix(0, 'abcdefghiabc', 'index', '')
self.checkequalnofix(3, 'abcdefghiabc', 'index', 'def')
self.checkequalnofix(0, 'abcdefghiabc', 'index', 'abc')
self.checkequalnofix(9, 'abcdefghiabc', 'index', 'abc', 1)
self.assertRaises(ValueError, 'abcdefghiabc'.index, 'hib')
self.assertRaises(ValueError, 'abcdefghiab'.index, 'abc', 1)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', 8)
self.assertRaises(ValueError, 'abcdefghi'.index, 'ghi', -1)
def test_rindex(self):
string_tests.CommonTest.test_rindex(self)
self.checkequalnofix(12, 'abcdefghiabc', 'rindex', '')
self.checkequalnofix(3, 'abcdefghiabc', 'rindex', 'def')
self.checkequalnofix(9, 'abcdefghiabc', 'rindex', 'abc')
self.checkequalnofix(0, 'abcdefghiabc', 'rindex', 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghiabc'.rindex, 'hib')
self.assertRaises(ValueError, 'defghiabc'.rindex, 'def', 1)
self.assertRaises(ValueError, 'defghiabc'.rindex, 'abc', 0, -1)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, 8)
self.assertRaises(ValueError, 'abcdefghi'.rindex, 'ghi', 0, -1)
def test_maketrans_translate(self):
# these work with plain translate()
self.checkequalnofix('bbbc', 'abababc', 'translate',
{ord('a'): None})
self.checkequalnofix('iiic', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i')})
self.checkequalnofix('iiix', 'abababc', 'translate',
{ord('a'): None, ord('b'): ord('i'), ord('c'): 'x'})
self.checkequalnofix('c', 'abababc', 'translate',
{ord('a'): None, ord('b'): ''})
self.checkequalnofix('xyyx', 'xzx', 'translate',
{ord('z'): 'yy'})
# this needs maketrans()
self.checkequalnofix('abababc', 'abababc', 'translate',
{'b': '<i>'})
tbl = self.type2test.maketrans({'a': None, 'b': '<i>'})
self.checkequalnofix('<i><i><i>c', 'abababc', 'translate', tbl)
# test alternative way of calling maketrans()
tbl = self.type2test.maketrans('abc', 'xyz', 'd')
self.checkequalnofix('xyzzy', 'abdcdcbdddd', 'translate', tbl)
self.assertRaises(TypeError, self.type2test.maketrans)
self.assertRaises(ValueError, self.type2test.maketrans, 'abc', 'defg')
self.assertRaises(TypeError, self.type2test.maketrans, 2, 'def')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 2)
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def', 2)
self.assertRaises(ValueError, self.type2test.maketrans, {'xy': 2})
self.assertRaises(TypeError, self.type2test.maketrans, {(1,): 2})
self.assertRaises(TypeError, 'hello'.translate)
self.assertRaises(TypeError, 'abababc'.translate, 'abc', 'xyz')
def test_split(self):
string_tests.CommonTest.test_split(self)
# Mixed arguments
self.checkequalnofix(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequalnofix(['a', 'b', 'c', 'd'], 'a//b//c//d', 'split', '//')
self.checkequalnofix(['endcase ', ''], 'endcase test', 'split', 'test')
def test_join(self):
string_tests.MixinStrUnicodeUserStringTest.test_join(self)
class MyWrapper:
def __init__(self, sval): self.sval = sval
def __str__(self): return self.sval
# mixed arguments
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('a b c d', ' ', 'join', ['a', 'b', 'c', 'd'])
self.checkequalnofix('abcd', '', 'join', ('a', 'b', 'c', 'd'))
self.checkequalnofix('w x y z', ' ', 'join', string_tests.Sequence('wxyz'))
self.checkraises(TypeError, ' ', 'join', ['1', '2', MyWrapper('foo')])
self.checkraises(TypeError, ' ', 'join', ['1', '2', '3', bytes()])
self.checkraises(TypeError, ' ', 'join', [1, 2, 3])
self.checkraises(TypeError, ' ', 'join', ['1', '2', 3])
def test_replace(self):
string_tests.CommonTest.test_replace(self)
# method call forwarded from str implementation because of unicode argument
self.checkequalnofix('one@two!three!', 'one!two!three!', 'replace', '!', '@', 1)
self.assertRaises(TypeError, 'replace'.replace, "r", 42)
@support.cpython_only
def test_replace_id(self):
pattern = 'abc'
text = 'abc def'
self.assertIs(text.replace(pattern, pattern), text)
def test_bytes_comparison(self):
with support.check_warnings():
warnings.simplefilter('ignore', BytesWarning)
self.assertEqual('abc' == b'abc', False)
self.assertEqual('abc' != b'abc', True)
self.assertEqual('abc' == bytearray(b'abc'), False)
self.assertEqual('abc' != bytearray(b'abc'), True)
def test_comparison(self):
# Comparisons:
self.assertEqual('abc', 'abc')
self.assertTrue('abcd' > 'abc')
self.assertTrue('abc' < 'abcd')
if 0:
# Move these tests to a Unicode collation module test...
# Testing UTF-16 code point order comparisons...
# No surrogates, no fixup required.
self.assertTrue('\u0061' < '\u20ac')
# Non surrogate below surrogate value, no fixup required
self.assertTrue('\u0061' < '\ud800\udc02')
# Non surrogate above surrogate value, fixup required
def test_lecmp(s, s2):
self.assertTrue(s < s2)
def test_fixup(s):
s2 = '\ud800\udc01'
test_lecmp(s, s2)
s2 = '\ud900\udc01'
test_lecmp(s, s2)
s2 = '\uda00\udc01'
test_lecmp(s, s2)
s2 = '\udb00\udc01'
test_lecmp(s, s2)
s2 = '\ud800\udd01'
test_lecmp(s, s2)
s2 = '\ud900\udd01'
test_lecmp(s, s2)
s2 = '\uda00\udd01'
test_lecmp(s, s2)
s2 = '\udb00\udd01'
test_lecmp(s, s2)
s2 = '\ud800\ude01'
test_lecmp(s, s2)
s2 = '\ud900\ude01'
test_lecmp(s, s2)
s2 = '\uda00\ude01'
test_lecmp(s, s2)
s2 = '\udb00\ude01'
test_lecmp(s, s2)
s2 = '\ud800\udfff'
test_lecmp(s, s2)
s2 = '\ud900\udfff'
test_lecmp(s, s2)
s2 = '\uda00\udfff'
test_lecmp(s, s2)
s2 = '\udb00\udfff'
test_lecmp(s, s2)
test_fixup('\ue000')
test_fixup('\uff61')
# Surrogates on both sides, no fixup required
self.assertTrue('\ud800\udc02' < '\ud84d\udc56')
def test_islower(self):
string_tests.MixinStrUnicodeUserStringTest.test_islower(self)
self.checkequalnofix(False, '\u1FFc', 'islower')
self.assertFalse('\u2167'.islower())
self.assertTrue('\u2177'.islower())
# non-BMP, uppercase
self.assertFalse('\U00010401'.islower())
self.assertFalse('\U00010427'.islower())
# non-BMP, lowercase
self.assertTrue('\U00010429'.islower())
self.assertTrue('\U0001044E'.islower())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.islower())
self.assertFalse('\U0001F46F'.islower())
def test_isupper(self):
string_tests.MixinStrUnicodeUserStringTest.test_isupper(self)
if not sys.platform.startswith('java'):
self.checkequalnofix(False, '\u1FFc', 'isupper')
self.assertTrue('\u2167'.isupper())
self.assertFalse('\u2177'.isupper())
# non-BMP, uppercase
self.assertTrue('\U00010401'.isupper())
self.assertTrue('\U00010427'.isupper())
# non-BMP, lowercase
self.assertFalse('\U00010429'.isupper())
self.assertFalse('\U0001044E'.isupper())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.isupper())
self.assertFalse('\U0001F46F'.isupper())
def test_istitle(self):
string_tests.MixinStrUnicodeUserStringTest.test_istitle(self)
self.checkequalnofix(True, '\u1FFc', 'istitle')
self.checkequalnofix(True, 'Greek \u1FFcitlecases ...', 'istitle')
# non-BMP, uppercase + lowercase
self.assertTrue('\U00010401\U00010429'.istitle())
self.assertTrue('\U00010427\U0001044E'.istitle())
# apparently there are no titlecased (Lt) non-BMP chars in Unicode 6
for ch in ['\U00010429', '\U0001044E', '\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.istitle(), '{!a} is not title'.format(ch))
def test_isspace(self):
string_tests.MixinStrUnicodeUserStringTest.test_isspace(self)
self.checkequalnofix(True, '\u2000', 'isspace')
self.checkequalnofix(True, '\u200a', 'isspace')
self.checkequalnofix(False, '\u2014', 'isspace')
# apparently there are no non-BMP spaces chars in Unicode 6
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.isspace(), '{!a} is not space.'.format(ch))
def test_isalnum(self):
string_tests.MixinStrUnicodeUserStringTest.test_isalnum(self)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']:
self.assertTrue(ch.isalnum(), '{!a} is alnum.'.format(ch))
def test_isalpha(self):
string_tests.MixinStrUnicodeUserStringTest.test_isalpha(self)
self.checkequalnofix(True, '\u1FFc', 'isalpha')
# non-BMP, cased
self.assertTrue('\U00010401'.isalpha())
self.assertTrue('\U00010427'.isalpha())
self.assertTrue('\U00010429'.isalpha())
self.assertTrue('\U0001044E'.isalpha())
# non-BMP, non-cased
self.assertFalse('\U0001F40D'.isalpha())
self.assertFalse('\U0001F46F'.isalpha())
def test_isdecimal(self):
self.checkequalnofix(False, '', 'isdecimal')
self.checkequalnofix(False, 'a', 'isdecimal')
self.checkequalnofix(True, '0', 'isdecimal')
self.checkequalnofix(False, '\u2460', 'isdecimal') # CIRCLED DIGIT ONE
self.checkequalnofix(False, '\xbc', 'isdecimal') # VULGAR FRACTION ONE QUARTER
self.checkequalnofix(True, '\u0660', 'isdecimal') # ARABIC-INDIC DIGIT ZERO
self.checkequalnofix(True, '0123456789', 'isdecimal')
self.checkequalnofix(False, '0123456789a', 'isdecimal')
self.checkraises(TypeError, 'abc', 'isdecimal', 42)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F', '\U00011065', '\U0001F107']:
self.assertFalse(ch.isdecimal(), '{!a} is not decimal.'.format(ch))
for ch in ['\U0001D7F6', '\U00011066', '\U000104A0']:
self.assertTrue(ch.isdecimal(), '{!a} is decimal.'.format(ch))
def test_isdigit(self):
string_tests.MixinStrUnicodeUserStringTest.test_isdigit(self)
self.checkequalnofix(True, '\u2460', 'isdigit')
self.checkequalnofix(False, '\xbc', 'isdigit')
self.checkequalnofix(True, '\u0660', 'isdigit')
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F', '\U00011065']:
self.assertFalse(ch.isdigit(), '{!a} is not a digit.'.format(ch))
for ch in ['\U0001D7F6', '\U00011066', '\U000104A0', '\U0001F107']:
self.assertTrue(ch.isdigit(), '{!a} is a digit.'.format(ch))
def test_isnumeric(self):
self.checkequalnofix(False, '', 'isnumeric')
self.checkequalnofix(False, 'a', 'isnumeric')
self.checkequalnofix(True, '0', 'isnumeric')
self.checkequalnofix(True, '\u2460', 'isnumeric')
self.checkequalnofix(True, '\xbc', 'isnumeric')
self.checkequalnofix(True, '\u0660', 'isnumeric')
self.checkequalnofix(True, '0123456789', 'isnumeric')
self.checkequalnofix(False, '0123456789a', 'isnumeric')
self.assertRaises(TypeError, "abc".isnumeric, 42)
for ch in ['\U00010401', '\U00010427', '\U00010429', '\U0001044E',
'\U0001F40D', '\U0001F46F']:
self.assertFalse(ch.isnumeric(), '{!a} is not numeric.'.format(ch))
for ch in ['\U00011065', '\U0001D7F6', '\U00011066',
'\U000104A0', '\U0001F107']:
self.assertTrue(ch.isnumeric(), '{!a} is numeric.'.format(ch))
def test_isidentifier(self):
self.assertTrue("a".isidentifier())
self.assertTrue("Z".isidentifier())
self.assertTrue("_".isidentifier())
self.assertTrue("b0".isidentifier())
self.assertTrue("bc".isidentifier())
self.assertTrue("b_".isidentifier())
self.assertTrue("µ".isidentifier())
self.assertTrue("𝔘𝔫𝔦𝔠𝔬𝔡𝔢".isidentifier())
self.assertFalse(" ".isidentifier())
self.assertFalse("[".isidentifier())
self.assertFalse("©".isidentifier())
self.assertFalse("0".isidentifier())
def test_isprintable(self):
self.assertTrue("".isprintable())
self.assertTrue(" ".isprintable())
self.assertTrue("abcdefg".isprintable())
self.assertFalse("abcdefg\n".isprintable())
# some defined Unicode character
self.assertTrue("\u0374".isprintable())
# undefined character
self.assertFalse("\u0378".isprintable())
# single surrogate character
self.assertFalse("\ud800".isprintable())
self.assertTrue('\U0001F46F'.isprintable())
self.assertFalse('\U000E0020'.isprintable())
def test_surrogates(self):
for s in ('a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'):
self.assertTrue(s.islower())
self.assertFalse(s.isupper())
self.assertFalse(s.istitle())
for s in ('A\uD800B\uDFFF', 'A\uDFFFB\uD800',
'A\uD800B\uDFFFA', 'A\uDFFFB\uD800A'):
self.assertFalse(s.islower())
self.assertTrue(s.isupper())
self.assertTrue(s.istitle())
for meth_name in ('islower', 'isupper', 'istitle'):
meth = getattr(str, meth_name)
for s in ('\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF'):
self.assertFalse(meth(s), '%a.%s() is False' % (s, meth_name))
for meth_name in ('isalpha', 'isalnum', 'isdigit', 'isspace',
'isdecimal', 'isnumeric',
'isidentifier', 'isprintable'):
meth = getattr(str, meth_name)
for s in ('\uD800', '\uDFFF', '\uD800\uD800', '\uDFFF\uDFFF',
'a\uD800b\uDFFF', 'a\uDFFFb\uD800',
'a\uD800b\uDFFFa', 'a\uDFFFb\uD800a'):
self.assertFalse(meth(s), '%a.%s() is False' % (s, meth_name))
def test_lower(self):
string_tests.CommonTest.test_lower(self)
self.assertEqual('\U00010427'.lower(), '\U0001044F')
self.assertEqual('\U00010427\U00010427'.lower(),
'\U0001044F\U0001044F')
self.assertEqual('\U00010427\U0001044F'.lower(),
'\U0001044F\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.lower(),
'x\U0001044Fx\U0001044F')
self.assertEqual('fi'.lower(), 'fi')
self.assertEqual('\u0130'.lower(), '\u0069\u0307')
# Special case for GREEK CAPITAL LETTER SIGMA U+03A3
self.assertEqual('\u03a3'.lower(), '\u03c3')
self.assertEqual('\u0345\u03a3'.lower(), '\u0345\u03c3')
self.assertEqual('A\u0345\u03a3'.lower(), 'a\u0345\u03c2')
self.assertEqual('A\u0345\u03a3a'.lower(), 'a\u0345\u03c3a')
self.assertEqual('A\u0345\u03a3'.lower(), 'a\u0345\u03c2')
self.assertEqual('A\u03a3\u0345'.lower(), 'a\u03c2\u0345')
self.assertEqual('\u03a3\u0345 '.lower(), '\u03c3\u0345 ')
self.assertEqual('\U0008fffe'.lower(), '\U0008fffe')
self.assertEqual('\u2177'.lower(), '\u2177')
def test_casefold(self):
self.assertEqual('hello'.casefold(), 'hello')
self.assertEqual('hELlo'.casefold(), 'hello')
self.assertEqual('ß'.casefold(), 'ss')
self.assertEqual('fi'.casefold(), 'fi')
self.assertEqual('\u03a3'.casefold(), '\u03c3')
self.assertEqual('A\u0345\u03a3'.casefold(), 'a\u03b9\u03c3')
self.assertEqual('\u00b5'.casefold(), '\u03bc')
def test_upper(self):
string_tests.CommonTest.test_upper(self)
self.assertEqual('\U0001044F'.upper(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.upper(),
'\U00010427\U00010427')
self.assertEqual('\U00010427\U0001044F'.upper(),
'\U00010427\U00010427')
self.assertEqual('X\U00010427x\U0001044F'.upper(),
'X\U00010427X\U00010427')
self.assertEqual('fi'.upper(), 'FI')
self.assertEqual('\u0130'.upper(), '\u0130')
self.assertEqual('\u03a3'.upper(), '\u03a3')
self.assertEqual('ß'.upper(), 'SS')
self.assertEqual('\u1fd2'.upper(), '\u0399\u0308\u0300')
self.assertEqual('\U0008fffe'.upper(), '\U0008fffe')
self.assertEqual('\u2177'.upper(), '\u2167')
def test_capitalize(self):
string_tests.CommonTest.test_capitalize(self)
self.assertEqual('\U0001044F'.capitalize(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('\U00010427\U0001044F'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('\U0001044F\U00010427'.capitalize(),
'\U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.capitalize(),
'X\U0001044Fx\U0001044F')
self.assertEqual('h\u0130'.capitalize(), 'H\u0069\u0307')
exp = '\u0399\u0308\u0300\u0069\u0307'
self.assertEqual('\u1fd2\u0130'.capitalize(), exp)
self.assertEqual('finnish'.capitalize(), 'FInnish')
self.assertEqual('A\u0345\u03a3'.capitalize(), 'A\u0345\u03c2')
def test_title(self):
string_tests.MixinStrUnicodeUserStringTest.test_title(self)
self.assertEqual('\U0001044F'.title(), '\U00010427')
self.assertEqual('\U0001044F\U0001044F'.title(),
'\U00010427\U0001044F')
self.assertEqual('\U0001044F\U0001044F \U0001044F\U0001044F'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('\U00010427\U0001044F \U00010427\U0001044F'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('\U0001044F\U00010427 \U0001044F\U00010427'.title(),
'\U00010427\U0001044F \U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F X\U00010427x\U0001044F'.title(),
'X\U0001044Fx\U0001044F X\U0001044Fx\U0001044F')
self.assertEqual('fiNNISH'.title(), 'Finnish')
self.assertEqual('A\u03a3 \u1fa1xy'.title(), 'A\u03c2 \u1fa9xy')
self.assertEqual('A\u03a3A'.title(), 'A\u03c3a')
def test_swapcase(self):
string_tests.CommonTest.test_swapcase(self)
self.assertEqual('\U0001044F'.swapcase(), '\U00010427')
self.assertEqual('\U00010427'.swapcase(), '\U0001044F')
self.assertEqual('\U0001044F\U0001044F'.swapcase(),
'\U00010427\U00010427')
self.assertEqual('\U00010427\U0001044F'.swapcase(),
'\U0001044F\U00010427')
self.assertEqual('\U0001044F\U00010427'.swapcase(),
'\U00010427\U0001044F')
self.assertEqual('X\U00010427x\U0001044F'.swapcase(),
'x\U0001044FX\U00010427')
self.assertEqual('fi'.swapcase(), 'FI')
self.assertEqual('\u0130'.swapcase(), '\u0069\u0307')
# Special case for GREEK CAPITAL LETTER SIGMA U+03A3
self.assertEqual('\u03a3'.swapcase(), '\u03c3')
self.assertEqual('\u0345\u03a3'.swapcase(), '\u0399\u03c3')
self.assertEqual('A\u0345\u03a3'.swapcase(), 'a\u0399\u03c2')
self.assertEqual('A\u0345\u03a3a'.swapcase(), 'a\u0399\u03c3A')
self.assertEqual('A\u0345\u03a3'.swapcase(), 'a\u0399\u03c2')
self.assertEqual('A\u03a3\u0345'.swapcase(), 'a\u03c2\u0399')
self.assertEqual('\u03a3\u0345 '.swapcase(), '\u03c3\u0399 ')
self.assertEqual('\u03a3'.swapcase(), '\u03c3')
self.assertEqual('ß'.swapcase(), 'SS')
self.assertEqual('\u1fd2'.swapcase(), '\u0399\u0308\u0300')
def test_center(self):
string_tests.CommonTest.test_center(self)
self.assertEqual('x'.center(2, '\U0010FFFF'),
'x\U0010FFFF')
self.assertEqual('x'.center(3, '\U0010FFFF'),
'\U0010FFFFx\U0010FFFF')
self.assertEqual('x'.center(4, '\U0010FFFF'),
'\U0010FFFFx\U0010FFFF\U0010FFFF')
def test_contains(self):
# Testing Unicode contains method
self.assertIn('a', 'abdb')
self.assertIn('a', 'bdab')
self.assertIn('a', 'bdaba')
self.assertIn('a', 'bdba')
self.assertNotIn('a', 'bdb')
self.assertIn('a', 'bdba')
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertIn('a', ('a',1,None))
self.assertIn('a', (1,None,'a'))
self.assertNotIn('a', ('x',1,'y'))
self.assertNotIn('a', ('x',1,None))
self.assertNotIn('abcd', 'abcxxxx')
self.assertIn('ab', 'abcd')
self.assertIn('ab', 'abc')
self.assertIn('ab', (1,None,'ab'))
self.assertIn('', 'abc')
self.assertIn('', '')
self.assertIn('', 'abc')
self.assertNotIn('\0', 'abc')
self.assertIn('\0', '\0abc')
self.assertIn('\0', 'abc\0')
self.assertIn('a', '\0abc')
self.assertIn('asdf', 'asdf')
self.assertNotIn('asdf', 'asd')
self.assertNotIn('asdf', '')
self.assertRaises(TypeError, "abc".__contains__)
def test_issue18183(self):
'\U00010000\U00100000'.lower()
'\U00010000\U00100000'.casefold()
'\U00010000\U00100000'.upper()
'\U00010000\U00100000'.capitalize()
'\U00010000\U00100000'.title()
'\U00010000\U00100000'.swapcase()
'\U00100000'.center(3, '\U00010000')
'\U00100000'.ljust(3, '\U00010000')
'\U00100000'.rjust(3, '\U00010000')
def test_format(self):
self.assertEqual(''.format(), '')
self.assertEqual('a'.format(), 'a')
self.assertEqual('ab'.format(), 'ab')
self.assertEqual('a{{'.format(), 'a{')
self.assertEqual('a}}'.format(), 'a}')
self.assertEqual('{{b'.format(), '{b')
self.assertEqual('}}b'.format(), '}b')
self.assertEqual('a{{b'.format(), 'a{b')
# examples from the PEP:
import datetime
self.assertEqual("My name is {0}".format('Fred'), "My name is Fred")
self.assertEqual("My name is {0[name]}".format(dict(name='Fred')),
"My name is Fred")
self.assertEqual("My name is {0} :-{{}}".format('Fred'),
"My name is Fred :-{}")
d = datetime.date(2007, 8, 18)
self.assertEqual("The year is {0.year}".format(d),
"The year is 2007")
# classes we'll use for testing
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
class D:
def __init__(self, x):
self.x = x
def __format__(self, spec):
return str(self.x)
# class with __str__, but no __format__
class E:
def __init__(self, x):
self.x = x
def __str__(self):
return 'E(' + self.x + ')'
# class with __repr__, but no __format__ or __str__
class F:
def __init__(self, x):
self.x = x
def __repr__(self):
return 'F(' + self.x + ')'
# class with __format__ that forwards to string, for some format_spec's
class G:
def __init__(self, x):
self.x = x
def __str__(self):
return "string is " + self.x
def __format__(self, format_spec):
if format_spec == 'd':
return 'G(' + self.x + ')'
return object.__format__(self, format_spec)
class I(datetime.date):
def __format__(self, format_spec):
return self.strftime(format_spec)
class J(int):
def __format__(self, format_spec):
return int.__format__(self * 2, format_spec)
self.assertEqual(''.format(), '')
self.assertEqual('abc'.format(), 'abc')
self.assertEqual('{0}'.format('abc'), 'abc')
self.assertEqual('{0:}'.format('abc'), 'abc')
# self.assertEqual('{ 0 }'.format('abc'), 'abc')
self.assertEqual('X{0}'.format('abc'), 'Xabc')
self.assertEqual('{0}X'.format('abc'), 'abcX')
self.assertEqual('X{0}Y'.format('abc'), 'XabcY')
self.assertEqual('{1}'.format(1, 'abc'), 'abc')
self.assertEqual('X{1}'.format(1, 'abc'), 'Xabc')
self.assertEqual('{1}X'.format(1, 'abc'), 'abcX')
self.assertEqual('X{1}Y'.format(1, 'abc'), 'XabcY')
self.assertEqual('{0}'.format(-15), '-15')
self.assertEqual('{0}{1}'.format(-15, 'abc'), '-15abc')
self.assertEqual('{0}X{1}'.format(-15, 'abc'), '-15Xabc')
self.assertEqual('{{'.format(), '{')
self.assertEqual('}}'.format(), '}')
self.assertEqual('{{}}'.format(), '{}')
self.assertEqual('{{x}}'.format(), '{x}')
self.assertEqual('{{{0}}}'.format(123), '{123}')
self.assertEqual('{{{{0}}}}'.format(), '{{0}}')
self.assertEqual('}}{{'.format(), '}{')
self.assertEqual('}}x{{'.format(), '}x{')
# weird field names
self.assertEqual("{0[foo-bar]}".format({'foo-bar':'baz'}), 'baz')
self.assertEqual("{0[foo bar]}".format({'foo bar':'baz'}), 'baz')
self.assertEqual("{0[ ]}".format({' ':3}), '3')
self.assertEqual('{foo._x}'.format(foo=C(20)), '20')
self.assertEqual('{1}{0}'.format(D(10), D(20)), '2010')
self.assertEqual('{0._x.x}'.format(C(D('abc'))), 'abc')
self.assertEqual('{0[0]}'.format(['abc', 'def']), 'abc')
self.assertEqual('{0[1]}'.format(['abc', 'def']), 'def')
self.assertEqual('{0[1][0]}'.format(['abc', ['def']]), 'def')
self.assertEqual('{0[1][0].x}'.format(['abc', [D('def')]]), 'def')
# strings
self.assertEqual('{0:.3s}'.format('abc'), 'abc')
self.assertEqual('{0:.3s}'.format('ab'), 'ab')
self.assertEqual('{0:.3s}'.format('abcdef'), 'abc')
self.assertEqual('{0:.0s}'.format('abcdef'), '')
self.assertEqual('{0:3.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.3s}'.format('abc'), 'abc')
self.assertEqual('{0:2.2s}'.format('abc'), 'ab')
self.assertEqual('{0:3.2s}'.format('abc'), 'ab ')
self.assertEqual('{0:x<0s}'.format('result'), 'result')
self.assertEqual('{0:x<5s}'.format('result'), 'result')
self.assertEqual('{0:x<6s}'.format('result'), 'result')
self.assertEqual('{0:x<7s}'.format('result'), 'resultx')
self.assertEqual('{0:x<8s}'.format('result'), 'resultxx')
self.assertEqual('{0: <7s}'.format('result'), 'result ')
self.assertEqual('{0:<7s}'.format('result'), 'result ')
self.assertEqual('{0:>7s}'.format('result'), ' result')
self.assertEqual('{0:>8s}'.format('result'), ' result')
self.assertEqual('{0:^8s}'.format('result'), ' result ')
self.assertEqual('{0:^9s}'.format('result'), ' result ')
self.assertEqual('{0:^10s}'.format('result'), ' result ')
self.assertEqual('{0:10000}'.format('a'), 'a' + ' ' * 9999)
self.assertEqual('{0:10000}'.format(''), ' ' * 10000)
self.assertEqual('{0:10000000}'.format(''), ' ' * 10000000)
# issue 12546: use \x00 as a fill character
self.assertEqual('{0:\x00<6s}'.format('foo'), 'foo\x00\x00\x00')
self.assertEqual('{0:\x01<6s}'.format('foo'), 'foo\x01\x01\x01')
self.assertEqual('{0:\x00^6s}'.format('foo'), '\x00foo\x00\x00')
self.assertEqual('{0:^6s}'.format('foo'), ' foo ')
self.assertEqual('{0:\x00<6}'.format(3), '3\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<6}'.format(3), '3\x01\x01\x01\x01\x01')
self.assertEqual('{0:\x00^6}'.format(3), '\x00\x003\x00\x00\x00')
self.assertEqual('{0:<6}'.format(3), '3 ')
self.assertEqual('{0:\x00<6}'.format(3.14), '3.14\x00\x00')
self.assertEqual('{0:\x01<6}'.format(3.14), '3.14\x01\x01')
self.assertEqual('{0:\x00^6}'.format(3.14), '\x003.14\x00')
self.assertEqual('{0:^6}'.format(3.14), ' 3.14 ')
self.assertEqual('{0:\x00<12}'.format(3+2.0j), '(3+2j)\x00\x00\x00\x00\x00\x00')
self.assertEqual('{0:\x01<12}'.format(3+2.0j), '(3+2j)\x01\x01\x01\x01\x01\x01')
self.assertEqual('{0:\x00^12}'.format(3+2.0j), '\x00\x00\x00(3+2j)\x00\x00\x00')
self.assertEqual('{0:^12}'.format(3+2.0j), ' (3+2j) ')
# format specifiers for user defined type
self.assertEqual('{0:abc}'.format(C()), 'abc')
# !r, !s and !a coercions
self.assertEqual('{0!s}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:}'.format('Hello'), 'Hello')
self.assertEqual('{0!s:15}'.format('Hello'), 'Hello ')
self.assertEqual('{0!s:15s}'.format('Hello'), 'Hello ')
self.assertEqual('{0!r}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!r}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!r}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!r}'.format('\u0374'), "'\u0374'") # printable
self.assertEqual('{0!r}'.format(F('\u0374')), 'F(\u0374)')
self.assertEqual('{0!a}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format('\u0378'), "'\\u0378'") # nonprintable
self.assertEqual('{0!a}'.format('\u0374'), "'\\u0374'") # printable
self.assertEqual('{0!a:}'.format('Hello'), "'Hello'")
self.assertEqual('{0!a}'.format(F('Hello')), 'F(Hello)')
self.assertEqual('{0!a}'.format(F('\u0374')), 'F(\\u0374)')
# test fallback to object.__format__
self.assertEqual('{0}'.format({}), '{}')
self.assertEqual('{0}'.format([]), '[]')
self.assertEqual('{0}'.format([1]), '[1]')
self.assertEqual('{0:d}'.format(G('data')), 'G(data)')
self.assertEqual('{0!s}'.format(G('data')), 'string is data')
self.assertRaises(TypeError, '{0:^10}'.format, E('data'))
self.assertRaises(TypeError, '{0:^10s}'.format, E('data'))
self.assertRaises(TypeError, '{0:>15s}'.format, G('data'))
self.assertEqual("{0:date: %Y-%m-%d}".format(I(year=2007,
month=8,
day=27)),
"date: 2007-08-27")
# test deriving from a builtin type and overriding __format__
self.assertEqual("{0}".format(J(10)), "20")
# string format specifiers
self.assertEqual('{0:}'.format('a'), 'a')
# computed format specifiers
self.assertEqual("{0:.{1}}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{1}s}".format('hello world', 5), 'hello')
self.assertEqual("{0:.{precision}s}".format('hello world', precision=5), 'hello')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width=10, precision=5), 'hello ')
self.assertEqual("{0:{width}.{precision}s}".format('hello world', width='10', precision='5'), 'hello ')
# test various errors
self.assertRaises(ValueError, '{'.format)
self.assertRaises(ValueError, '}'.format)
self.assertRaises(ValueError, 'a{'.format)
self.assertRaises(ValueError, 'a}'.format)
self.assertRaises(ValueError, '{a'.format)
self.assertRaises(ValueError, '}a'.format)
self.assertRaises(IndexError, '{0}'.format)
self.assertRaises(IndexError, '{1}'.format, 'abc')
self.assertRaises(KeyError, '{x}'.format)
self.assertRaises(ValueError, "}{".format)
self.assertRaises(ValueError, "abc{0:{}".format)
self.assertRaises(ValueError, "{0".format)
self.assertRaises(IndexError, "{0.}".format)
self.assertRaises(ValueError, "{0.}".format, 0)
self.assertRaises(ValueError, "{0[}".format)
self.assertRaises(ValueError, "{0[}".format, [])
self.assertRaises(KeyError, "{0]}".format)
self.assertRaises(ValueError, "{0.[]}".format, 0)
self.assertRaises(ValueError, "{0..foo}".format, 0)
self.assertRaises(ValueError, "{0[0}".format, 0)
self.assertRaises(ValueError, "{0[0:foo}".format, 0)
self.assertRaises(KeyError, "{c]}".format)
self.assertRaises(ValueError, "{{ {{{0}}".format, 0)
self.assertRaises(ValueError, "{0}}".format, 0)
self.assertRaises(KeyError, "{foo}".format, bar=3)
self.assertRaises(ValueError, "{0!x}".format, 3)
self.assertRaises(ValueError, "{0!}".format, 0)
self.assertRaises(ValueError, "{0!rs}".format, 0)
self.assertRaises(ValueError, "{!}".format)
self.assertRaises(IndexError, "{:}".format)
self.assertRaises(IndexError, "{:s}".format)
self.assertRaises(IndexError, "{}".format)
big = "23098475029384702983476098230754973209482573"
self.assertRaises(ValueError, ("{" + big + "}").format)
self.assertRaises(ValueError, ("{[" + big + "]}").format, [0])
# issue 6089
self.assertRaises(ValueError, "{0[0]x}".format, [None])
self.assertRaises(ValueError, "{0[0](10)}".format, [None])
# can't have a replacement on the field name portion
self.assertRaises(TypeError, '{0[{1}]}'.format, 'abcdefg', 4)
# exceed maximum recursion depth
self.assertRaises(ValueError, "{0:{1:{2}}}".format, 'abc', 's', '')
self.assertRaises(ValueError, "{0:{1:{2:{3:{4:{5:{6}}}}}}}".format,
0, 1, 2, 3, 4, 5, 6, 7)
# string format spec errors
self.assertRaises(ValueError, "{0:-s}".format, '')
self.assertRaises(ValueError, format, "", "-")
self.assertRaises(ValueError, "{0:=s}".format, '')
# Alternate formatting is not supported
self.assertRaises(ValueError, format, '', '#')
self.assertRaises(ValueError, format, '', '#20')
# Non-ASCII
self.assertEqual("{0:s}{1:s}".format("ABC", "\u0410\u0411\u0412"),
'ABC\u0410\u0411\u0412')
self.assertEqual("{0:.3s}".format("ABC\u0410\u0411\u0412"),
'ABC')
self.assertEqual("{0:.0s}".format("ABC\u0410\u0411\u0412"),
'')
self.assertEqual("{[{}]}".format({"{}": 5}), "5")
self.assertEqual("{[{}]}".format({"{}" : "a"}), "a")
self.assertEqual("{[{]}".format({"{" : "a"}), "a")
self.assertEqual("{[}]}".format({"}" : "a"}), "a")
self.assertEqual("{[[]}".format({"[" : "a"}), "a")
self.assertEqual("{[!]}".format({"!" : "a"}), "a")
self.assertRaises(ValueError, "{a{}b}".format, 42)
self.assertRaises(ValueError, "{a{b}".format, 42)
self.assertRaises(ValueError, "{[}".format, 42)
self.assertEqual("0x{:0{:d}X}".format(0x0,16), "0x0000000000000000")
def test_format_map(self):
self.assertEqual(''.format_map({}), '')
self.assertEqual('a'.format_map({}), 'a')
self.assertEqual('ab'.format_map({}), 'ab')
self.assertEqual('a{{'.format_map({}), 'a{')
self.assertEqual('a}}'.format_map({}), 'a}')
self.assertEqual('{{b'.format_map({}), '{b')
self.assertEqual('}}b'.format_map({}), '}b')
self.assertEqual('a{{b'.format_map({}), 'a{b')
# using mappings
class Mapping(dict):
def __missing__(self, key):
return key
self.assertEqual('{hello}'.format_map(Mapping()), 'hello')
self.assertEqual('{a} {world}'.format_map(Mapping(a='hello')), 'hello world')
class InternalMapping:
def __init__(self):
self.mapping = {'a': 'hello'}
def __getitem__(self, key):
return self.mapping[key]
self.assertEqual('{a}'.format_map(InternalMapping()), 'hello')
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{foo._x}'.format_map({'foo': C(20)}), '20')
# test various errors
self.assertRaises(TypeError, ''.format_map)
self.assertRaises(TypeError, 'a'.format_map)
self.assertRaises(ValueError, '{'.format_map, {})
self.assertRaises(ValueError, '}'.format_map, {})
self.assertRaises(ValueError, 'a{'.format_map, {})
self.assertRaises(ValueError, 'a}'.format_map, {})
self.assertRaises(ValueError, '{a'.format_map, {})
self.assertRaises(ValueError, '}a'.format_map, {})
# issue #12579: can't supply positional params to format_map
self.assertRaises(ValueError, '{}'.format_map, {'a' : 2})
self.assertRaises(ValueError, '{}'.format_map, 'a')
self.assertRaises(ValueError, '{a} {}'.format_map, {"a" : 2, "b" : 1})
def test_format_huge_precision(self):
format_string = ".{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_width(self):
format_string = "{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format(2.34, format_string)
def test_format_huge_item_number(self):
format_string = "{{{}:.6f}}".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string.format(2.34)
def test_format_auto_numbering(self):
class C:
def __init__(self, x=100):
self._x = x
def __format__(self, spec):
return spec
self.assertEqual('{}'.format(10), '10')
self.assertEqual('{:5}'.format('s'), 's ')
self.assertEqual('{!r}'.format('s'), "'s'")
self.assertEqual('{._x}'.format(C(10)), '10')
self.assertEqual('{[1]}'.format([1, 2]), '2')
self.assertEqual('{[a]}'.format({'a':4, 'b':2}), '4')
self.assertEqual('a{}b{}c'.format(0, 1), 'a0b1c')
self.assertEqual('a{:{}}b'.format('x', '^10'), 'a x b')
self.assertEqual('a{:{}x}b'.format(20, '#'), 'a0x14b')
# can't mix and match numbering and auto-numbering
self.assertRaises(ValueError, '{}{1}'.format, 1, 2)
self.assertRaises(ValueError, '{1}{}'.format, 1, 2)
self.assertRaises(ValueError, '{:{1}}'.format, 1, 2)
self.assertRaises(ValueError, '{0:{}}'.format, 1, 2)
# can mix and match auto-numbering and named
self.assertEqual('{f}{}'.format(4, f='test'), 'test4')
self.assertEqual('{}{f}'.format(4, f='test'), '4test')
self.assertEqual('{:{f}}{g}{}'.format(1, 3, g='g', f=2), ' 1g3')
self.assertEqual('{f:{}}{}{g}'.format(2, 4, f=1, g='g'), ' 14g')
def test_formatting(self):
string_tests.MixinStrUnicodeUserStringTest.test_formatting(self)
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % ("abc", "abc"), 'abc, abc')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, 2, 3), 'abc, abc, 1, 2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", 1, -2, 3), 'abc, abc, 1, -2.000000, 3.00')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.5), 'abc, abc, -1, -2.000000, 3.50')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 3.57), 'abc, abc, -1, -2.000000, 3.57')
self.assertEqual("%s, %s, %i, %f, %5.2f" % ("abc", "abc", -1, -2, 1003.57), 'abc, abc, -1, -2.000000, 1003.57')
if not sys.platform.startswith('java'):
self.assertEqual("%r, %r" % (b"abc", "abc"), "b'abc', 'abc'")
self.assertEqual("%r" % ("\u1234",), "'\u1234'")
self.assertEqual("%a" % ("\u1234",), "'\\u1234'")
self.assertEqual("%(x)s, %(y)s" % {'x':"abc", 'y':"def"}, 'abc, def')
self.assertEqual("%(x)s, %(\xfc)s" % {'x':"abc", '\xfc':"def"}, 'abc, def')
self.assertEqual('%c' % 0x1234, '\u1234')
self.assertEqual('%c' % 0x21483, '\U00021483')
self.assertRaises(OverflowError, "%c".__mod__, (0x110000,))
self.assertEqual('%c' % '\U00021483', '\U00021483')
self.assertRaises(TypeError, "%c".__mod__, "aa")
self.assertRaises(ValueError, "%.1\u1032f".__mod__, (1.0/3))
self.assertRaises(TypeError, "%i".__mod__, "aa")
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc"}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%(foo)s...' % {'foo':"abc",'def':123}, '...abc...')
self.assertEqual('...%s...%s...%s...%s...' % (1,2,3,"abc"), '...1...2...3...abc...')
self.assertEqual('...%%...%%s...%s...%s...%s...%s...' % (1,2,3,"abc"), '...%...%s...1...2...3...abc...')
self.assertEqual('...%s...' % "abc", '...abc...')
self.assertEqual('%*s' % (5,'abc',), ' abc')
self.assertEqual('%*s' % (-5,'abc',), 'abc ')
self.assertEqual('%*.*s' % (5,2,'abc',), ' ab')
self.assertEqual('%*.*s' % (5,3,'abc',), ' abc')
self.assertEqual('%i %*.*s' % (10, 5,3,'abc',), '10 abc')
self.assertEqual('%i%s %*.*s' % (10, 3, 5, 3, 'abc',), '103 abc')
self.assertEqual('%c' % 'a', 'a')
class Wrapper:
def __str__(self):
return '\u1234'
self.assertEqual('%s' % Wrapper(), '\u1234')
# issue 3382
NAN = float('nan')
INF = float('inf')
self.assertEqual('%f' % NAN, 'nan')
self.assertEqual('%F' % NAN, 'NAN')
self.assertEqual('%f' % INF, 'inf')
self.assertEqual('%F' % INF, 'INF')
# PEP 393
self.assertEqual('%.1s' % "a\xe9\u20ac", 'a')
self.assertEqual('%.2s' % "a\xe9\u20ac", 'a\xe9')
#issue 19995
class PsuedoInt:
def __init__(self, value):
self.value = int(value)
def __int__(self):
return self.value
def __index__(self):
return self.value
class PsuedoFloat:
def __init__(self, value):
self.value = float(value)
def __int__(self):
return int(self.value)
pi = PsuedoFloat(3.1415)
letter_m = PsuedoInt(109)
self.assertEqual('%x' % 42, '2a')
self.assertEqual('%X' % 15, 'F')
self.assertEqual('%o' % 9, '11')
self.assertEqual('%c' % 109, 'm')
self.assertEqual('%x' % letter_m, '6d')
self.assertEqual('%X' % letter_m, '6D')
self.assertEqual('%o' % letter_m, '155')
self.assertEqual('%c' % letter_m, 'm')
self.assertWarns(DeprecationWarning, '%x'.__mod__, pi),
self.assertWarns(DeprecationWarning, '%x'.__mod__, 3.14),
self.assertWarns(DeprecationWarning, '%X'.__mod__, 2.11),
self.assertWarns(DeprecationWarning, '%o'.__mod__, 1.79),
self.assertWarns(DeprecationWarning, '%c'.__mod__, pi),
def test_formatting_with_enum(self):
# issue18780
import enum
class Float(float, enum.Enum):
PI = 3.1415926
class Int(enum.IntEnum):
IDES = 15
class Str(str, enum.Enum):
ABC = 'abc'
# Testing Unicode formatting strings...
self.assertEqual("%s, %s" % (Str.ABC, Str.ABC),
'Str.ABC, Str.ABC')
self.assertEqual("%s, %s, %d, %i, %u, %f, %5.2f" %
(Str.ABC, Str.ABC,
Int.IDES, Int.IDES, Int.IDES,
Float.PI, Float.PI),
'Str.ABC, Str.ABC, 15, 15, 15, 3.141593, 3.14')
# formatting jobs delegated from the string implementation:
self.assertEqual('...%(foo)s...' % {'foo':Str.ABC},
'...Str.ABC...')
self.assertEqual('...%(foo)s...' % {'foo':Int.IDES},
'...Int.IDES...')
self.assertEqual('...%(foo)i...' % {'foo':Int.IDES},
'...15...')
self.assertEqual('...%(foo)d...' % {'foo':Int.IDES},
'...15...')
self.assertEqual('...%(foo)u...' % {'foo':Int.IDES, 'def':Float.PI},
'...15...')
self.assertEqual('...%(foo)f...' % {'foo':Float.PI,'def':123},
'...3.141593...')
def test_formatting_huge_precision(self):
format_string = "%.{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
@support.cpython_only
def test_formatting_huge_precision_c_limits(self):
from _testcapi import INT_MAX
format_string = "%.{}f".format(INT_MAX + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_formatting_huge_width(self):
format_string = "%{}f".format(sys.maxsize + 1)
with self.assertRaises(ValueError):
result = format_string % 2.34
def test_startswith_endswith_errors(self):
for meth in ('foo'.startswith, 'foo'.endswith):
with self.assertRaises(TypeError) as cm:
meth(['f'])
exc = str(cm.exception)
self.assertIn('str', exc)
self.assertIn('tuple', exc)
@support.run_with_locale('LC_ALL', 'de_DE', 'fr_FR')
def test_format_float(self):
# should not format with a comma, but always with C locale
self.assertEqual('1.0', '%.1f' % 1.0)
def test_constructor(self):
# unicode(obj) tests (this maps to PyObject_Unicode() at C level)
self.assertEqual(
str('unicode remains unicode'),
'unicode remains unicode'
)
class UnicodeSubclass(str):
pass
for text in ('ascii', '\xe9', '\u20ac', '\U0010FFFF'):
subclass = UnicodeSubclass(text)
self.assertEqual(str(subclass), text)
self.assertEqual(len(subclass), len(text))
if text == 'ascii':
self.assertEqual(subclass.encode('ascii'), b'ascii')
self.assertEqual(subclass.encode('utf-8'), b'ascii')
self.assertEqual(
str('strings are converted to unicode'),
'strings are converted to unicode'
)
class StringCompat:
def __init__(self, x):
self.x = x
def __str__(self):
return self.x
self.assertEqual(
str(StringCompat('__str__ compatible objects are recognized')),
'__str__ compatible objects are recognized'
)
# unicode(obj) is compatible to str():
o = StringCompat('unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
self.assertEqual(str(o), 'unicode(obj) is compatible to str()')
for obj in (123, 123.45, 123):
self.assertEqual(str(obj), str(str(obj)))
# unicode(obj, encoding, error) tests (this maps to
# PyUnicode_FromEncodedObject() at C level)
if not sys.platform.startswith('java'):
self.assertRaises(
TypeError,
str,
'decoding unicode is not supported',
'utf-8',
'strict'
)
self.assertEqual(
str(b'strings are decoded to unicode', 'utf-8', 'strict'),
'strings are decoded to unicode'
)
if not sys.platform.startswith('java'):
self.assertEqual(
str(
memoryview(b'character buffers are decoded to unicode'),
'utf-8',
'strict'
),
'character buffers are decoded to unicode'
)
self.assertRaises(TypeError, str, 42, 42, 42)
def test_constructor_keyword_args(self):
"""Pass various keyword argument combinations to the constructor."""
# The object argument can be passed as a keyword.
self.assertEqual(str(object='foo'), 'foo')
self.assertEqual(str(object=b'foo', encoding='utf-8'), 'foo')
# The errors argument without encoding triggers "decode" mode.
self.assertEqual(str(b'foo', errors='strict'), 'foo') # not "b'foo'"
self.assertEqual(str(object=b'foo', errors='strict'), 'foo')
def test_constructor_defaults(self):
"""Check the constructor argument defaults."""
# The object argument defaults to '' or b''.
self.assertEqual(str(), '')
self.assertEqual(str(errors='strict'), '')
utf8_cent = '¢'.encode('utf-8')
# The encoding argument defaults to utf-8.
self.assertEqual(str(utf8_cent, errors='strict'), '¢')
# The errors argument defaults to strict.
self.assertRaises(UnicodeDecodeError, str, utf8_cent, encoding='ascii')
def test_codecs_utf7(self):
utfTests = [
('A\u2262\u0391.', b'A+ImIDkQ.'), # RFC2152 example
('Hi Mom -\u263a-!', b'Hi Mom -+Jjo--!'), # RFC2152 example
('\u65E5\u672C\u8A9E', b'+ZeVnLIqe-'), # RFC2152 example
('Item 3 is \u00a31.', b'Item 3 is +AKM-1.'), # RFC2152 example
('+', b'+-'),
('+-', b'+--'),
('+?', b'+-?'),
('\?', b'+AFw?'),
('+?', b'+-?'),
(r'\\?', b'+AFwAXA?'),
(r'\\\?', b'+AFwAXABc?'),
(r'++--', b'+-+---'),
('\U000abcde', b'+2m/c3g-'), # surrogate pairs
('/', b'/'),
]
for (x, y) in utfTests:
self.assertEqual(x.encode('utf-7'), y)
# Unpaired surrogates are passed through
self.assertEqual('\uD801'.encode('utf-7'), b'+2AE-')
self.assertEqual('\uD801x'.encode('utf-7'), b'+2AE-x')
self.assertEqual('\uDC01'.encode('utf-7'), b'+3AE-')
self.assertEqual('\uDC01x'.encode('utf-7'), b'+3AE-x')
self.assertEqual(b'+2AE-'.decode('utf-7'), '\uD801')
self.assertEqual(b'+2AE-x'.decode('utf-7'), '\uD801x')
self.assertEqual(b'+3AE-'.decode('utf-7'), '\uDC01')
self.assertEqual(b'+3AE-x'.decode('utf-7'), '\uDC01x')
self.assertEqual('\uD801\U000abcde'.encode('utf-7'), b'+2AHab9ze-')
self.assertEqual(b'+2AHab9ze-'.decode('utf-7'), '\uD801\U000abcde')
# Issue #2242: crash on some Windows/MSVC versions
self.assertEqual(b'+\xc1'.decode('utf-7'), '\xc1')
# Direct encoded characters
set_d = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'(),-./:?"
# Optional direct characters
set_o = '!"#$%&*;<=>@[]^_`{|}'
for c in set_d:
self.assertEqual(c.encode('utf7'), c.encode('ascii'))
self.assertEqual(c.encode('ascii').decode('utf7'), c)
for c in set_o:
self.assertEqual(c.encode('ascii').decode('utf7'), c)
def test_codecs_utf8(self):
self.assertEqual(''.encode('utf-8'), b'')
self.assertEqual('\u20ac'.encode('utf-8'), b'\xe2\x82\xac')
self.assertEqual('\U00010002'.encode('utf-8'), b'\xf0\x90\x80\x82')
self.assertEqual('\U00023456'.encode('utf-8'), b'\xf0\xa3\x91\x96')
self.assertEqual('\ud800'.encode('utf-8', 'surrogatepass'), b'\xed\xa0\x80')
self.assertEqual('\udc00'.encode('utf-8', 'surrogatepass'), b'\xed\xb0\x80')
self.assertEqual(('\U00010002'*10).encode('utf-8'),
b'\xf0\x90\x80\x82'*10)
self.assertEqual(
'\u6b63\u78ba\u306b\u8a00\u3046\u3068\u7ffb\u8a33\u306f'
'\u3055\u308c\u3066\u3044\u307e\u305b\u3093\u3002\u4e00'
'\u90e8\u306f\u30c9\u30a4\u30c4\u8a9e\u3067\u3059\u304c'
'\u3001\u3042\u3068\u306f\u3067\u305f\u3089\u3081\u3067'
'\u3059\u3002\u5b9f\u969b\u306b\u306f\u300cWenn ist das'
' Nunstuck git und'.encode('utf-8'),
b'\xe6\xad\xa3\xe7\xa2\xba\xe3\x81\xab\xe8\xa8\x80\xe3\x81'
b'\x86\xe3\x81\xa8\xe7\xbf\xbb\xe8\xa8\xb3\xe3\x81\xaf\xe3'
b'\x81\x95\xe3\x82\x8c\xe3\x81\xa6\xe3\x81\x84\xe3\x81\xbe'
b'\xe3\x81\x9b\xe3\x82\x93\xe3\x80\x82\xe4\xb8\x80\xe9\x83'
b'\xa8\xe3\x81\xaf\xe3\x83\x89\xe3\x82\xa4\xe3\x83\x84\xe8'
b'\xaa\x9e\xe3\x81\xa7\xe3\x81\x99\xe3\x81\x8c\xe3\x80\x81'
b'\xe3\x81\x82\xe3\x81\xa8\xe3\x81\xaf\xe3\x81\xa7\xe3\x81'
b'\x9f\xe3\x82\x89\xe3\x82\x81\xe3\x81\xa7\xe3\x81\x99\xe3'
b'\x80\x82\xe5\xae\x9f\xe9\x9a\x9b\xe3\x81\xab\xe3\x81\xaf'
b'\xe3\x80\x8cWenn ist das Nunstuck git und'
)
# UTF-8 specific decoding tests
self.assertEqual(str(b'\xf0\xa3\x91\x96', 'utf-8'), '\U00023456' )
self.assertEqual(str(b'\xf0\x90\x80\x82', 'utf-8'), '\U00010002' )
self.assertEqual(str(b'\xe2\x82\xac', 'utf-8'), '\u20ac' )
# Other possible utf-8 test cases:
# * strict decoding testing for all of the
# UTF8_ERROR cases in PyUnicode_DecodeUTF8
def test_utf8_decode_valid_sequences(self):
sequences = [
# single byte
(b'\x00', '\x00'), (b'a', 'a'), (b'\x7f', '\x7f'),
# 2 bytes
(b'\xc2\x80', '\x80'), (b'\xdf\xbf', '\u07ff'),
# 3 bytes
(b'\xe0\xa0\x80', '\u0800'), (b'\xed\x9f\xbf', '\ud7ff'),
(b'\xee\x80\x80', '\uE000'), (b'\xef\xbf\xbf', '\uffff'),
# 4 bytes
(b'\xF0\x90\x80\x80', '\U00010000'),
(b'\xf4\x8f\xbf\xbf', '\U0010FFFF')
]
for seq, res in sequences:
self.assertEqual(seq.decode('utf-8'), res)
def test_utf8_decode_invalid_sequences(self):
# continuation bytes in a sequence of 2, 3, or 4 bytes
continuation_bytes = [bytes([x]) for x in range(0x80, 0xC0)]
# start bytes of a 2-byte sequence equivalent to codepoints < 0x7F
invalid_2B_seq_start_bytes = [bytes([x]) for x in range(0xC0, 0xC2)]
# start bytes of a 4-byte sequence equivalent to codepoints > 0x10FFFF
invalid_4B_seq_start_bytes = [bytes([x]) for x in range(0xF5, 0xF8)]
invalid_start_bytes = (
continuation_bytes + invalid_2B_seq_start_bytes +
invalid_4B_seq_start_bytes + [bytes([x]) for x in range(0xF7, 0x100)]
)
for byte in invalid_start_bytes:
self.assertRaises(UnicodeDecodeError, byte.decode, 'utf-8')
for sb in invalid_2B_seq_start_bytes:
for cb in continuation_bytes:
self.assertRaises(UnicodeDecodeError, (sb+cb).decode, 'utf-8')
for sb in invalid_4B_seq_start_bytes:
for cb1 in continuation_bytes[:3]:
for cb3 in continuation_bytes[:3]:
self.assertRaises(UnicodeDecodeError,
(sb+cb1+b'\x80'+cb3).decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0xA0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xE0'+cb+b'\xBF').decode, 'utf-8')
# surrogates
for cb in [bytes([x]) for x in range(0xA0, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xED'+cb+b'\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x80, 0x90)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF0'+cb+b'\xBF\xBF').decode, 'utf-8')
for cb in [bytes([x]) for x in range(0x90, 0xC0)]:
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\x80\x80').decode, 'utf-8')
self.assertRaises(UnicodeDecodeError,
(b'\xF4'+cb+b'\xBF\xBF').decode, 'utf-8')
def test_issue8271(self):
# Issue #8271: during the decoding of an invalid UTF-8 byte sequence,
# only the start byte and the continuation byte(s) are now considered
# invalid, instead of the number of bytes specified by the start byte.
# See http://www.unicode.org/versions/Unicode5.2.0/ch03.pdf (page 95,
# table 3-8, Row 2) for more information about the algorithm used.
FFFD = '\ufffd'
sequences = [
# invalid start bytes
(b'\x80', FFFD), # continuation byte
(b'\x80\x80', FFFD*2), # 2 continuation bytes
(b'\xc0', FFFD),
(b'\xc0\xc0', FFFD*2),
(b'\xc1', FFFD),
(b'\xc1\xc0', FFFD*2),
(b'\xc0\xc1', FFFD*2),
# with start byte of a 2-byte sequence
(b'\xc2', FFFD), # only the start byte
(b'\xc2\xc2', FFFD*2), # 2 start bytes
(b'\xc2\xc2\xc2', FFFD*3), # 3 start bytes
(b'\xc2\x41', FFFD+'A'), # invalid continuation byte
# with start byte of a 3-byte sequence
(b'\xe1', FFFD), # only the start byte
(b'\xe1\xe1', FFFD*2), # 2 start bytes
(b'\xe1\xe1\xe1', FFFD*3), # 3 start bytes
(b'\xe1\xe1\xe1\xe1', FFFD*4), # 4 start bytes
(b'\xe1\x80', FFFD), # only 1 continuation byte
(b'\xe1\x41', FFFD+'A'), # invalid continuation byte
(b'\xe1\x41\x80', FFFD+'A'+FFFD), # invalid cb followed by valid cb
(b'\xe1\x41\x41', FFFD+'AA'), # 2 invalid continuation bytes
(b'\xe1\x80\x41', FFFD+'A'), # only 1 valid continuation byte
(b'\xe1\x80\xe1\x41', FFFD*2+'A'), # 1 valid and the other invalid
(b'\xe1\x41\xe1\x80', FFFD+'A'+FFFD), # 1 invalid and the other valid
# with start byte of a 4-byte sequence
(b'\xf1', FFFD), # only the start byte
(b'\xf1\xf1', FFFD*2), # 2 start bytes
(b'\xf1\xf1\xf1', FFFD*3), # 3 start bytes
(b'\xf1\xf1\xf1\xf1', FFFD*4), # 4 start bytes
(b'\xf1\xf1\xf1\xf1\xf1', FFFD*5), # 5 start bytes
(b'\xf1\x80', FFFD), # only 1 continuation bytes
(b'\xf1\x80\x80', FFFD), # only 2 continuation bytes
(b'\xf1\x80\x41', FFFD+'A'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x41\x41', FFFD+'AA'), # 1 valid cb and 1 invalid
(b'\xf1\x80\x80\x41', FFFD+'A'), # 2 valid cb and 1 invalid
(b'\xf1\x41\x80', FFFD+'A'+FFFD), # 1 invalid cv and 1 valid
(b'\xf1\x41\x80\x80', FFFD+'A'+FFFD*2), # 1 invalid cb and 2 invalid
(b'\xf1\x41\x80\x41', FFFD+'A'+FFFD+'A'), # 2 invalid cb and 1 invalid
(b'\xf1\x41\x41\x80', FFFD+'AA'+FFFD), # 1 valid cb and 1 invalid
(b'\xf1\x41\xf1\x80', FFFD+'A'+FFFD),
(b'\xf1\x41\x80\xf1', FFFD+'A'+FFFD*2),
(b'\xf1\xf1\x80\x41', FFFD*2+'A'),
(b'\xf1\x41\xf1\xf1', FFFD+'A'+FFFD*2),
# with invalid start byte of a 4-byte sequence (rfc2279)
(b'\xf5', FFFD), # only the start byte
(b'\xf5\xf5', FFFD*2), # 2 start bytes
(b'\xf5\x80', FFFD*2), # only 1 continuation byte
(b'\xf5\x80\x80', FFFD*3), # only 2 continuation byte
(b'\xf5\x80\x80\x80', FFFD*4), # 3 continuation bytes
(b'\xf5\x80\x41', FFFD*2+'A'), # 1 valid cb and 1 invalid
(b'\xf5\x80\x41\xf5', FFFD*2+'A'+FFFD),
(b'\xf5\x41\x80\x80\x41', FFFD+'A'+FFFD*2+'A'),
# with invalid start byte of a 5-byte sequence (rfc2279)
(b'\xf8', FFFD), # only the start byte
(b'\xf8\xf8', FFFD*2), # 2 start bytes
(b'\xf8\x80', FFFD*2), # only one continuation byte
(b'\xf8\x80\x41', FFFD*2 + 'A'), # 1 valid cb and 1 invalid
(b'\xf8\x80\x80\x80\x80', FFFD*5), # invalid 5 bytes seq with 5 bytes
# with invalid start byte of a 6-byte sequence (rfc2279)
(b'\xfc', FFFD), # only the start byte
(b'\xfc\xfc', FFFD*2), # 2 start bytes
(b'\xfc\x80\x80', FFFD*3), # only 2 continuation bytes
(b'\xfc\x80\x80\x80\x80\x80', FFFD*6), # 6 continuation bytes
# invalid start byte
(b'\xfe', FFFD),
(b'\xfe\x80\x80', FFFD*3),
# other sequences
(b'\xf1\x80\x41\x42\x43', '\ufffd\x41\x42\x43'),
(b'\xf1\x80\xff\x42\x43', '\ufffd\ufffd\x42\x43'),
(b'\xf1\x80\xc2\x81\x43', '\ufffd\x81\x43'),
(b'\x61\xF1\x80\x80\xE1\x80\xC2\x62\x80\x63\x80\xBF\x64',
'\x61\uFFFD\uFFFD\uFFFD\x62\uFFFD\x63\uFFFD\uFFFD\x64'),
]
for n, (seq, res) in enumerate(sequences):
self.assertRaises(UnicodeDecodeError, seq.decode, 'utf-8', 'strict')
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((seq+b'b').decode('utf-8', 'replace'), res+'b')
self.assertEqual(seq.decode('utf-8', 'ignore'),
res.replace('\uFFFD', ''))
def to_bytestring(self, seq):
return bytes(int(c, 16) for c in seq.split())
def assertCorrectUTF8Decoding(self, seq, res, err):
"""
Check that an invalid UTF-8 sequence raises an UnicodeDecodeError when
'strict' is used, returns res when 'replace' is used, and that doesn't
return anything when 'ignore' is used.
"""
with self.assertRaises(UnicodeDecodeError) as cm:
seq.decode('utf-8')
exc = cm.exception
self.assertIn(err, str(exc))
self.assertEqual(seq.decode('utf-8', 'replace'), res)
self.assertEqual((b'aaaa' + seq + b'bbbb').decode('utf-8', 'replace'),
'aaaa' + res + 'bbbb')
res = res.replace('\ufffd', '')
self.assertEqual(seq.decode('utf-8', 'ignore'), res)
self.assertEqual((b'aaaa' + seq + b'bbbb').decode('utf-8', 'ignore'),
'aaaa' + res + 'bbbb')
def test_invalid_start_byte(self):
"""
Test that an 'invalid start byte' error is raised when the first byte
is not in the ASCII range or is not a valid start byte of a 2-, 3-, or
4-bytes sequence. The invalid start byte is replaced with a single
U+FFFD when errors='replace'.
E.g. <80> is a continuation byte and can appear only after a start byte.
"""
FFFD = '\ufffd'
for byte in b'\x80\xA0\x9F\xBF\xC0\xC1\xF5\xFF':
self.assertCorrectUTF8Decoding(bytes([byte]), '\ufffd',
'invalid start byte')
def test_unexpected_end_of_data(self):
"""
Test that an 'unexpected end of data' error is raised when the string
ends after a start byte of a 2-, 3-, or 4-bytes sequence without having
enough continuation bytes. The incomplete sequence is replaced with a
single U+FFFD when errors='replace'.
E.g. in the sequence <F3 80 80>, F3 is the start byte of a 4-bytes
sequence, but it's followed by only 2 valid continuation bytes and the
last continuation bytes is missing.
Note: the continuation bytes must be all valid, if one of them is
invalid another error will be raised.
"""
sequences = [
'C2', 'DF',
'E0 A0', 'E0 BF', 'E1 80', 'E1 BF', 'EC 80', 'EC BF',
'ED 80', 'ED 9F', 'EE 80', 'EE BF', 'EF 80', 'EF BF',
'F0 90', 'F0 BF', 'F0 90 80', 'F0 90 BF', 'F0 BF 80', 'F0 BF BF',
'F1 80', 'F1 BF', 'F1 80 80', 'F1 80 BF', 'F1 BF 80', 'F1 BF BF',
'F3 80', 'F3 BF', 'F3 80 80', 'F3 80 BF', 'F3 BF 80', 'F3 BF BF',
'F4 80', 'F4 8F', 'F4 80 80', 'F4 80 BF', 'F4 8F 80', 'F4 8F BF'
]
FFFD = '\ufffd'
for seq in sequences:
self.assertCorrectUTF8Decoding(self.to_bytestring(seq), '\ufffd',
'unexpected end of data')
def test_invalid_cb_for_2bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte of a 2-bytes sequence is invalid. The start byte
is replaced by a single U+FFFD and the second byte is handled
separately when errors='replace'.
E.g. in the sequence <C2 41>, C2 is the start byte of a 2-bytes
sequence, but 41 is not a valid continuation byte because it's the
ASCII letter 'A'.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('C2 00', FFFD+'\x00'), ('C2 7F', FFFD+'\x7f'),
('C2 C0', FFFDx2), ('C2 FF', FFFDx2),
('DF 00', FFFD+'\x00'), ('DF 7F', FFFD+'\x7f'),
('DF C0', FFFDx2), ('DF FF', FFFDx2),
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(self.to_bytestring(seq), res,
'invalid continuation byte')
def test_invalid_cb_for_3bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte(s) of a 3-bytes sequence are invalid. When
errors='replace', if the first continuation byte is valid, the first
two bytes (start byte + 1st cb) are replaced by a single U+FFFD and the
third byte is handled separately, otherwise only the start byte is
replaced with a U+FFFD and the other continuation bytes are handled
separately.
E.g. in the sequence <E1 80 41>, E1 is the start byte of a 3-bytes
sequence, 80 is a valid continuation byte, but 41 is not a valid cb
because it's the ASCII letter 'A'.
Note: when the start byte is E0 or ED, the valid ranges for the first
continuation byte are limited to A0..BF and 80..9F respectively.
Python 2 used to consider all the bytes in range 80..BF valid when the
start byte was ED. This is fixed in Python 3.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('E0 00', FFFD+'\x00'), ('E0 7F', FFFD+'\x7f'), ('E0 80', FFFDx2),
('E0 9F', FFFDx2), ('E0 C0', FFFDx2), ('E0 FF', FFFDx2),
('E0 A0 00', FFFD+'\x00'), ('E0 A0 7F', FFFD+'\x7f'),
('E0 A0 C0', FFFDx2), ('E0 A0 FF', FFFDx2),
('E0 BF 00', FFFD+'\x00'), ('E0 BF 7F', FFFD+'\x7f'),
('E0 BF C0', FFFDx2), ('E0 BF FF', FFFDx2), ('E1 00', FFFD+'\x00'),
('E1 7F', FFFD+'\x7f'), ('E1 C0', FFFDx2), ('E1 FF', FFFDx2),
('E1 80 00', FFFD+'\x00'), ('E1 80 7F', FFFD+'\x7f'),
('E1 80 C0', FFFDx2), ('E1 80 FF', FFFDx2),
('E1 BF 00', FFFD+'\x00'), ('E1 BF 7F', FFFD+'\x7f'),
('E1 BF C0', FFFDx2), ('E1 BF FF', FFFDx2), ('EC 00', FFFD+'\x00'),
('EC 7F', FFFD+'\x7f'), ('EC C0', FFFDx2), ('EC FF', FFFDx2),
('EC 80 00', FFFD+'\x00'), ('EC 80 7F', FFFD+'\x7f'),
('EC 80 C0', FFFDx2), ('EC 80 FF', FFFDx2),
('EC BF 00', FFFD+'\x00'), ('EC BF 7F', FFFD+'\x7f'),
('EC BF C0', FFFDx2), ('EC BF FF', FFFDx2), ('ED 00', FFFD+'\x00'),
('ED 7F', FFFD+'\x7f'),
('ED A0', FFFDx2), ('ED BF', FFFDx2), # see note ^
('ED C0', FFFDx2), ('ED FF', FFFDx2), ('ED 80 00', FFFD+'\x00'),
('ED 80 7F', FFFD+'\x7f'), ('ED 80 C0', FFFDx2),
('ED 80 FF', FFFDx2), ('ED 9F 00', FFFD+'\x00'),
('ED 9F 7F', FFFD+'\x7f'), ('ED 9F C0', FFFDx2),
('ED 9F FF', FFFDx2), ('EE 00', FFFD+'\x00'),
('EE 7F', FFFD+'\x7f'), ('EE C0', FFFDx2), ('EE FF', FFFDx2),
('EE 80 00', FFFD+'\x00'), ('EE 80 7F', FFFD+'\x7f'),
('EE 80 C0', FFFDx2), ('EE 80 FF', FFFDx2),
('EE BF 00', FFFD+'\x00'), ('EE BF 7F', FFFD+'\x7f'),
('EE BF C0', FFFDx2), ('EE BF FF', FFFDx2), ('EF 00', FFFD+'\x00'),
('EF 7F', FFFD+'\x7f'), ('EF C0', FFFDx2), ('EF FF', FFFDx2),
('EF 80 00', FFFD+'\x00'), ('EF 80 7F', FFFD+'\x7f'),
('EF 80 C0', FFFDx2), ('EF 80 FF', FFFDx2),
('EF BF 00', FFFD+'\x00'), ('EF BF 7F', FFFD+'\x7f'),
('EF BF C0', FFFDx2), ('EF BF FF', FFFDx2),
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(self.to_bytestring(seq), res,
'invalid continuation byte')
def test_invalid_cb_for_4bytes_seq(self):
"""
Test that an 'invalid continuation byte' error is raised when the
continuation byte(s) of a 4-bytes sequence are invalid. When
errors='replace',the start byte and all the following valid
continuation bytes are replaced with a single U+FFFD, and all the bytes
starting from the first invalid continuation bytes (included) are
handled separately.
E.g. in the sequence <E1 80 41>, E1 is the start byte of a 3-bytes
sequence, 80 is a valid continuation byte, but 41 is not a valid cb
because it's the ASCII letter 'A'.
Note: when the start byte is E0 or ED, the valid ranges for the first
continuation byte are limited to A0..BF and 80..9F respectively.
However, when the start byte is ED, Python 2 considers all the bytes
in range 80..BF valid. This is fixed in Python 3.
"""
FFFD = '\ufffd'
FFFDx2 = FFFD * 2
sequences = [
('F0 00', FFFD+'\x00'), ('F0 7F', FFFD+'\x7f'), ('F0 80', FFFDx2),
('F0 8F', FFFDx2), ('F0 C0', FFFDx2), ('F0 FF', FFFDx2),
('F0 90 00', FFFD+'\x00'), ('F0 90 7F', FFFD+'\x7f'),
('F0 90 C0', FFFDx2), ('F0 90 FF', FFFDx2),
('F0 BF 00', FFFD+'\x00'), ('F0 BF 7F', FFFD+'\x7f'),
('F0 BF C0', FFFDx2), ('F0 BF FF', FFFDx2),
('F0 90 80 00', FFFD+'\x00'), ('F0 90 80 7F', FFFD+'\x7f'),
('F0 90 80 C0', FFFDx2), ('F0 90 80 FF', FFFDx2),
('F0 90 BF 00', FFFD+'\x00'), ('F0 90 BF 7F', FFFD+'\x7f'),
('F0 90 BF C0', FFFDx2), ('F0 90 BF FF', FFFDx2),
('F0 BF 80 00', FFFD+'\x00'), ('F0 BF 80 7F', FFFD+'\x7f'),
('F0 BF 80 C0', FFFDx2), ('F0 BF 80 FF', FFFDx2),
('F0 BF BF 00', FFFD+'\x00'), ('F0 BF BF 7F', FFFD+'\x7f'),
('F0 BF BF C0', FFFDx2), ('F0 BF BF FF', FFFDx2),
('F1 00', FFFD+'\x00'), ('F1 7F', FFFD+'\x7f'), ('F1 C0', FFFDx2),
('F1 FF', FFFDx2), ('F1 80 00', FFFD+'\x00'),
('F1 80 7F', FFFD+'\x7f'), ('F1 80 C0', FFFDx2),
('F1 80 FF', FFFDx2), ('F1 BF 00', FFFD+'\x00'),
('F1 BF 7F', FFFD+'\x7f'), ('F1 BF C0', FFFDx2),
('F1 BF FF', FFFDx2), ('F1 80 80 00', FFFD+'\x00'),
('F1 80 80 7F', FFFD+'\x7f'), ('F1 80 80 C0', FFFDx2),
('F1 80 80 FF', FFFDx2), ('F1 80 BF 00', FFFD+'\x00'),
('F1 80 BF 7F', FFFD+'\x7f'), ('F1 80 BF C0', FFFDx2),
('F1 80 BF FF', FFFDx2), ('F1 BF 80 00', FFFD+'\x00'),
('F1 BF 80 7F', FFFD+'\x7f'), ('F1 BF 80 C0', FFFDx2),
('F1 BF 80 FF', FFFDx2), ('F1 BF BF 00', FFFD+'\x00'),
('F1 BF BF 7F', FFFD+'\x7f'), ('F1 BF BF C0', FFFDx2),
('F1 BF BF FF', FFFDx2), ('F3 00', FFFD+'\x00'),
('F3 7F', FFFD+'\x7f'), ('F3 C0', FFFDx2), ('F3 FF', FFFDx2),
('F3 80 00', FFFD+'\x00'), ('F3 80 7F', FFFD+'\x7f'),
('F3 80 C0', FFFDx2), ('F3 80 FF', FFFDx2),
('F3 BF 00', FFFD+'\x00'), ('F3 BF 7F', FFFD+'\x7f'),
('F3 BF C0', FFFDx2), ('F3 BF FF', FFFDx2),
('F3 80 80 00', FFFD+'\x00'), ('F3 80 80 7F', FFFD+'\x7f'),
('F3 80 80 C0', FFFDx2), ('F3 80 80 FF', FFFDx2),
('F3 80 BF 00', FFFD+'\x00'), ('F3 80 BF 7F', FFFD+'\x7f'),
('F3 80 BF C0', FFFDx2), ('F3 80 BF FF', FFFDx2),
('F3 BF 80 00', FFFD+'\x00'), ('F3 BF 80 7F', FFFD+'\x7f'),
('F3 BF 80 C0', FFFDx2), ('F3 BF 80 FF', FFFDx2),
('F3 BF BF 00', FFFD+'\x00'), ('F3 BF BF 7F', FFFD+'\x7f'),
('F3 BF BF C0', FFFDx2), ('F3 BF BF FF', FFFDx2),
('F4 00', FFFD+'\x00'), ('F4 7F', FFFD+'\x7f'), ('F4 90', FFFDx2),
('F4 BF', FFFDx2), ('F4 C0', FFFDx2), ('F4 FF', FFFDx2),
('F4 80 00', FFFD+'\x00'), ('F4 80 7F', FFFD+'\x7f'),
('F4 80 C0', FFFDx2), ('F4 80 FF', FFFDx2),
('F4 8F 00', FFFD+'\x00'), ('F4 8F 7F', FFFD+'\x7f'),
('F4 8F C0', FFFDx2), ('F4 8F FF', FFFDx2),
('F4 80 80 00', FFFD+'\x00'), ('F4 80 80 7F', FFFD+'\x7f'),
('F4 80 80 C0', FFFDx2), ('F4 80 80 FF', FFFDx2),
('F4 80 BF 00', FFFD+'\x00'), ('F4 80 BF 7F', FFFD+'\x7f'),
('F4 80 BF C0', FFFDx2), ('F4 80 BF FF', FFFDx2),
('F4 8F 80 00', FFFD+'\x00'), ('F4 8F 80 7F', FFFD+'\x7f'),
('F4 8F 80 C0', FFFDx2), ('F4 8F 80 FF', FFFDx2),
('F4 8F BF 00', FFFD+'\x00'), ('F4 8F BF 7F', FFFD+'\x7f'),
('F4 8F BF C0', FFFDx2), ('F4 8F BF FF', FFFDx2)
]
for seq, res in sequences:
self.assertCorrectUTF8Decoding(self.to_bytestring(seq), res,
'invalid continuation byte')
def test_codecs_idna(self):
# Test whether trailing dot is preserved
self.assertEqual("www.python.org.".encode("idna"), b"www.python.org.")
def test_codecs_errors(self):
# Error handling (encoding)
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii')
self.assertRaises(UnicodeError, 'Andr\202 x'.encode, 'ascii','strict')
self.assertEqual('Andr\202 x'.encode('ascii','ignore'), b"Andr x")
self.assertEqual('Andr\202 x'.encode('ascii','replace'), b"Andr? x")
self.assertEqual('Andr\202 x'.encode('ascii', 'replace'),
'Andr\202 x'.encode('ascii', errors='replace'))
self.assertEqual('Andr\202 x'.encode('ascii', 'ignore'),
'Andr\202 x'.encode(encoding='ascii', errors='ignore'))
# Error handling (decoding)
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii')
self.assertRaises(UnicodeError, str, b'Andr\202 x', 'ascii', 'strict')
self.assertEqual(str(b'Andr\202 x', 'ascii', 'ignore'), "Andr x")
self.assertEqual(str(b'Andr\202 x', 'ascii', 'replace'), 'Andr\uFFFD x')
# Error handling (unknown character names)
self.assertEqual(b"\\N{foo}xx".decode("unicode-escape", "ignore"), "xx")
# Error handling (truncated escape sequence)
self.assertRaises(UnicodeError, b"\\".decode, "unicode-escape")
self.assertRaises(TypeError, b"hello".decode, "test.unicode1")
self.assertRaises(TypeError, str, b"hello", "test.unicode2")
self.assertRaises(TypeError, "hello".encode, "test.unicode1")
self.assertRaises(TypeError, "hello".encode, "test.unicode2")
# Error handling (wrong arguments)
self.assertRaises(TypeError, "hello".encode, 42, 42, 42)
# Error handling (lone surrogate in PyUnicode_TransformDecimalToASCII())
self.assertRaises(UnicodeError, float, "\ud800")
self.assertRaises(UnicodeError, float, "\udf00")
self.assertRaises(UnicodeError, complex, "\ud800")
self.assertRaises(UnicodeError, complex, "\udf00")
def test_codecs(self):
# Encoding
self.assertEqual('hello'.encode('ascii'), b'hello')
self.assertEqual('hello'.encode('utf-7'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf-8'), b'hello')
self.assertEqual('hello'.encode('utf-16-le'), b'h\000e\000l\000l\000o\000')
self.assertEqual('hello'.encode('utf-16-be'), b'\000h\000e\000l\000l\000o')
self.assertEqual('hello'.encode('latin-1'), b'hello')
# Default encoding is utf-8
self.assertEqual('\u2603'.encode(), b'\xe2\x98\x83')
# Roundtrip safety for BMP (just the first 1024 chars)
for c in range(1024):
u = chr(c)
for encoding in ('utf-7', 'utf-8', 'utf-16', 'utf-16-le',
'utf-16-be', 'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
with warnings.catch_warnings():
# unicode-internal has been deprecated
warnings.simplefilter("ignore", DeprecationWarning)
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 256 chars)
for c in range(256):
u = chr(c)
for encoding in ('latin-1',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for BMP (just the first 128 chars)
for c in range(128):
u = chr(c)
for encoding in ('ascii',):
self.assertEqual(str(u.encode(encoding),encoding), u)
# Roundtrip safety for non-BMP (just a few chars)
with warnings.catch_warnings():
# unicode-internal has been deprecated
warnings.simplefilter("ignore", DeprecationWarning)
u = '\U00010001\U00020002\U00030003\U00040004\U00050005'
for encoding in ('utf-8', 'utf-16', 'utf-16-le', 'utf-16-be',
'raw_unicode_escape',
'unicode_escape', 'unicode_internal'):
self.assertEqual(str(u.encode(encoding),encoding), u)
# UTF-8 must be roundtrip safe for all code points
# (except surrogates, which are forbidden).
u = ''.join(map(chr, list(range(0, 0xd800)) +
list(range(0xe000, 0x110000))))
for encoding in ('utf-8',):
self.assertEqual(str(u.encode(encoding),encoding), u)
def test_codecs_charmap(self):
# 0-127
s = bytes(range(128))
for encoding in (
'cp037', 'cp1026', 'cp273',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp1125',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6',
'iso8859_7', 'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
'cp1256', 'cp1257', 'cp1258',
'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
'cp1006', 'iso8859_8',
### These have undefined mappings:
#'cp424',
### These fail the round-trip:
#'cp875'
):
self.assertEqual(str(s, encoding).encode(encoding), s)
# 128-255
s = bytes(range(128, 256))
for encoding in (
'cp037', 'cp1026', 'cp273',
'cp437', 'cp500', 'cp720', 'cp737', 'cp775', 'cp850',
'cp852', 'cp855', 'cp858', 'cp860', 'cp861', 'cp862',
'cp863', 'cp865', 'cp866', 'cp1125',
'iso8859_10', 'iso8859_13', 'iso8859_14', 'iso8859_15',
'iso8859_2', 'iso8859_4', 'iso8859_5',
'iso8859_9', 'koi8_r', 'latin_1',
'mac_cyrillic', 'mac_latin2',
### These have undefined mappings:
#'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254', 'cp1255',
#'cp1256', 'cp1257', 'cp1258',
#'cp424', 'cp856', 'cp857', 'cp864', 'cp869', 'cp874',
#'iso8859_3', 'iso8859_6', 'iso8859_7',
#'mac_greek', 'mac_iceland','mac_roman', 'mac_turkish',
### These fail the round-trip:
#'cp1006', 'cp875', 'iso8859_8',
):
self.assertEqual(str(s, encoding).encode(encoding), s)
def test_concatenation(self):
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def"), "abcdef")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
self.assertEqual(("abc" "def" "ghi"), "abcdefghi")
def test_printing(self):
class BitBucket:
def write(self, text):
pass
out = BitBucket()
print('abc', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc', 'def', file=out)
print('abc\n', file=out)
print('abc\n', end=' ', file=out)
print('abc\n', end=' ', file=out)
print('def\n', file=out)
print('def\n', file=out)
def test_ucs4(self):
x = '\U00100000'
y = x.encode("raw-unicode-escape").decode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00100000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
y = br'\U00010000'
x = y.decode("raw-unicode-escape").encode("raw-unicode-escape")
self.assertEqual(x, y)
try:
br'\U11111111'.decode("raw-unicode-escape")
except UnicodeDecodeError as e:
self.assertEqual(e.start, 0)
self.assertEqual(e.end, 10)
else:
self.fail("Should have raised UnicodeDecodeError")
def test_conversion(self):
# Make sure __unicode__() works properly
class Foo0:
def __str__(self):
return "foo"
class Foo1:
def __str__(self):
return "foo"
class Foo2(object):
def __str__(self):
return "foo"
class Foo3(object):
def __str__(self):
return "foo"
class Foo4(str):
def __str__(self):
return "foo"
class Foo5(str):
def __str__(self):
return "foo"
class Foo6(str):
def __str__(self):
return "foos"
def __str__(self):
return "foou"
class Foo7(str):
def __str__(self):
return "foos"
def __str__(self):
return "foou"
class Foo8(str):
def __new__(cls, content=""):
return str.__new__(cls, 2*content)
def __str__(self):
return self
class Foo9(str):
def __str__(self):
return "not unicode"
self.assertEqual(str(Foo0()), "foo")
self.assertEqual(str(Foo1()), "foo")
self.assertEqual(str(Foo2()), "foo")
self.assertEqual(str(Foo3()), "foo")
self.assertEqual(str(Foo4("bar")), "foo")
self.assertEqual(str(Foo5("bar")), "foo")
self.assertEqual(str(Foo6("bar")), "foou")
self.assertEqual(str(Foo7("bar")), "foou")
self.assertEqual(str(Foo8("foo")), "foofoo")
self.assertEqual(str(Foo9("foo")), "not unicode")
def test_unicode_repr(self):
class s1:
def __repr__(self):
return '\\n'
class s2:
def __repr__(self):
return '\\n'
self.assertEqual(repr(s1()), '\\n')
self.assertEqual(repr(s2()), '\\n')
def test_printable_repr(self):
self.assertEqual(repr('\U00010000'), "'%c'" % (0x10000,)) # printable
self.assertEqual(repr('\U00014000'), "'\\U00014000'") # nonprintable
# This test only affects 32-bit platforms because expandtabs can only take
# an int as the max value, not a 64-bit C long. If expandtabs is changed
# to take a 64-bit long, this test should apply to all platforms.
@unittest.skipIf(sys.maxsize > (1 << 32) or struct.calcsize('P') != 4,
'only applies to 32-bit platforms')
def test_expandtabs_overflows_gracefully(self):
self.assertRaises(OverflowError, 't\tt\t'.expandtabs, sys.maxsize)
@support.cpython_only
def test_expandtabs_optimization(self):
s = 'abc'
self.assertIs(s.expandtabs(), s)
def test_raiseMemError(self):
if struct.calcsize('P') == 8:
# 64 bits pointers
ascii_struct_size = 48
compact_struct_size = 72
else:
# 32 bits pointers
ascii_struct_size = 24
compact_struct_size = 36
for char in ('a', '\xe9', '\u20ac', '\U0010ffff'):
code = ord(char)
if code < 0x100:
char_size = 1 # sizeof(Py_UCS1)
struct_size = ascii_struct_size
elif code < 0x10000:
char_size = 2 # sizeof(Py_UCS2)
struct_size = compact_struct_size
else:
char_size = 4 # sizeof(Py_UCS4)
struct_size = compact_struct_size
# Note: sys.maxsize is half of the actual max allocation because of
# the signedness of Py_ssize_t. Strings of maxlen-1 should in principle
# be allocatable, given enough memory.
maxlen = ((sys.maxsize - struct_size) // char_size)
alloc = lambda: char * maxlen
self.assertRaises(MemoryError, alloc)
self.assertRaises(MemoryError, alloc)
def test_format_subclass(self):
class S(str):
def __str__(self):
return '__str__ overridden'
s = S('xxx')
self.assertEqual("%s" % s, '__str__ overridden')
self.assertEqual("{}".format(s), '__str__ overridden')
# Test PyUnicode_FromFormat()
def test_from_format(self):
support.import_module('ctypes')
from ctypes import (
pythonapi, py_object, sizeof,
c_int, c_long, c_longlong, c_ssize_t,
c_uint, c_ulong, c_ulonglong, c_size_t, c_void_p)
name = "PyUnicode_FromFormat"
_PyUnicode_FromFormat = getattr(pythonapi, name)
_PyUnicode_FromFormat.restype = py_object
def PyUnicode_FromFormat(format, *args):
cargs = tuple(
py_object(arg) if isinstance(arg, str) else arg
for arg in args)
return _PyUnicode_FromFormat(format, *cargs)
def check_format(expected, format, *args):
text = PyUnicode_FromFormat(format, *args)
self.assertEqual(expected, text)
# ascii format, non-ascii argument
check_format('ascii\x7f=unicode\xe9',
b'ascii\x7f=%U', 'unicode\xe9')
# non-ascii format, ascii argument: ensure that PyUnicode_FromFormatV()
# raises an error
self.assertRaisesRegex(ValueError,
'^PyUnicode_FromFormatV\(\) expects an ASCII-encoded format '
'string, got a non-ASCII byte: 0xe9$',
PyUnicode_FromFormat, b'unicode\xe9=%s', 'ascii')
# test "%c"
check_format('\uabcd',
b'%c', c_int(0xabcd))
check_format('\U0010ffff',
b'%c', c_int(0x10ffff))
with self.assertRaises(OverflowError):
PyUnicode_FromFormat(b'%c', c_int(0x110000))
# Issue #18183
check_format('\U00010000\U00100000',
b'%c%c', c_int(0x10000), c_int(0x100000))
# test "%"
check_format('%',
b'%')
check_format('%',
b'%%')
check_format('%s',
b'%%s')
check_format('[%]',
b'[%%]')
check_format('%abc',
b'%%%s', b'abc')
# truncated string
check_format('abc',
b'%.3s', b'abcdef')
check_format('abc[\ufffd',
b'%.5s', 'abc[\u20ac]'.encode('utf8'))
check_format("'\\u20acABC'",
b'%A', '\u20acABC')
check_format("'\\u20",
b'%.5A', '\u20acABCDEF')
check_format("'\u20acABC'",
b'%R', '\u20acABC')
check_format("'\u20acA",
b'%.3R', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3S', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3U', '\u20acABCDEF')
check_format('\u20acAB',
b'%.3V', '\u20acABCDEF', None)
check_format('abc[\ufffd',
b'%.5V', None, 'abc[\u20ac]'.encode('utf8'))
# following tests comes from #7330
# test width modifier and precision modifier with %S
check_format("repr= abc",
b'repr=%5S', 'abc')
check_format("repr=ab",
b'repr=%.2S', 'abc')
check_format("repr= ab",
b'repr=%5.2S', 'abc')
# test width modifier and precision modifier with %R
check_format("repr= 'abc'",
b'repr=%8R', 'abc')
check_format("repr='ab",
b'repr=%.3R', 'abc')
check_format("repr= 'ab",
b'repr=%5.3R', 'abc')
# test width modifier and precision modifier with %A
check_format("repr= 'abc'",
b'repr=%8A', 'abc')
check_format("repr='ab",
b'repr=%.3A', 'abc')
check_format("repr= 'ab",
b'repr=%5.3A', 'abc')
# test width modifier and precision modifier with %s
check_format("repr= abc",
b'repr=%5s', b'abc')
check_format("repr=ab",
b'repr=%.2s', b'abc')
check_format("repr= ab",
b'repr=%5.2s', b'abc')
# test width modifier and precision modifier with %U
check_format("repr= abc",
b'repr=%5U', 'abc')
check_format("repr=ab",
b'repr=%.2U', 'abc')
check_format("repr= ab",
b'repr=%5.2U', 'abc')
# test width modifier and precision modifier with %V
check_format("repr= abc",
b'repr=%5V', 'abc', b'123')
check_format("repr=ab",
b'repr=%.2V', 'abc', b'123')
check_format("repr= ab",
b'repr=%5.2V', 'abc', b'123')
check_format("repr= 123",
b'repr=%5V', None, b'123')
check_format("repr=12",
b'repr=%.2V', None, b'123')
check_format("repr= 12",
b'repr=%5.2V', None, b'123')
# test integer formats (%i, %d, %u)
check_format('010',
b'%03i', c_int(10))
check_format('0010',
b'%0.4i', c_int(10))
check_format('-123',
b'%i', c_int(-123))
check_format('-123',
b'%li', c_long(-123))
check_format('-123',
b'%lli', c_longlong(-123))
check_format('-123',
b'%zi', c_ssize_t(-123))
check_format('-123',
b'%d', c_int(-123))
check_format('-123',
b'%ld', c_long(-123))
check_format('-123',
b'%lld', c_longlong(-123))
check_format('-123',
b'%zd', c_ssize_t(-123))
check_format('123',
b'%u', c_uint(123))
check_format('123',
b'%lu', c_ulong(123))
check_format('123',
b'%llu', c_ulonglong(123))
check_format('123',
b'%zu', c_size_t(123))
# test long output
min_longlong = -(2 ** (8 * sizeof(c_longlong) - 1))
max_longlong = -min_longlong - 1
check_format(str(min_longlong),
b'%lld', c_longlong(min_longlong))
check_format(str(max_longlong),
b'%lld', c_longlong(max_longlong))
max_ulonglong = 2 ** (8 * sizeof(c_ulonglong)) - 1
check_format(str(max_ulonglong),
b'%llu', c_ulonglong(max_ulonglong))
PyUnicode_FromFormat(b'%p', c_void_p(-1))
# test padding (width and/or precision)
check_format('123'.rjust(10, '0'),
b'%010i', c_int(123))
check_format('123'.rjust(100),
b'%100i', c_int(123))
check_format('123'.rjust(100, '0'),
b'%.100i', c_int(123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80i', c_int(123))
check_format('123'.rjust(10, '0'),
b'%010u', c_uint(123))
check_format('123'.rjust(100),
b'%100u', c_uint(123))
check_format('123'.rjust(100, '0'),
b'%.100u', c_uint(123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80u', c_uint(123))
check_format('123'.rjust(10, '0'),
b'%010x', c_int(0x123))
check_format('123'.rjust(100),
b'%100x', c_int(0x123))
check_format('123'.rjust(100, '0'),
b'%.100x', c_int(0x123))
check_format('123'.rjust(80, '0').rjust(100),
b'%100.80x', c_int(0x123))
# test %A
check_format(r"%A:'abc\xe9\uabcd\U0010ffff'",
b'%%A:%A', 'abc\xe9\uabcd\U0010ffff')
# test %V
check_format('repr=abc',
b'repr=%V', 'abc', b'xyz')
# Test string decode from parameter of %s using utf-8.
# b'\xe4\xba\xba\xe6\xb0\x91' is utf-8 encoded byte sequence of
# '\u4eba\u6c11'
check_format('repr=\u4eba\u6c11',
b'repr=%V', None, b'\xe4\xba\xba\xe6\xb0\x91')
#Test replace error handler.
check_format('repr=abc\ufffd',
b'repr=%V', None, b'abc\xff')
# not supported: copy the raw format string. these tests are just here
# to check for crashs and should not be considered as specifications
check_format('%s',
b'%1%s', b'abc')
check_format('%1abc',
b'%1abc')
check_format('%+i',
b'%+i', c_int(10))
check_format('%.%s',
b'%.%s', b'abc')
# Test PyUnicode_AsWideChar()
@support.cpython_only
def test_aswidechar(self):
from _testcapi import unicode_aswidechar
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidechar('abcdef', 2)
self.assertEqual(size, 2)
self.assertEqual(wchar, 'ab')
wchar, size = unicode_aswidechar('abc', 3)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc')
wchar, size = unicode_aswidechar('abc', 4)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc', 10)
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidechar('abc\0def', 20)
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
buflen = 3
nchar = 2
else: # sizeof(c_wchar) == 4
buflen = 2
nchar = 1
wchar, size = unicode_aswidechar(nonbmp, buflen)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
# Test PyUnicode_AsWideCharString()
@support.cpython_only
def test_aswidecharstring(self):
from _testcapi import unicode_aswidecharstring
support.import_module('ctypes')
from ctypes import c_wchar, sizeof
wchar, size = unicode_aswidecharstring('abc')
self.assertEqual(size, 3)
self.assertEqual(wchar, 'abc\0')
wchar, size = unicode_aswidecharstring('abc\0def')
self.assertEqual(size, 7)
self.assertEqual(wchar, 'abc\0def\0')
nonbmp = chr(0x10ffff)
if sizeof(c_wchar) == 2:
nchar = 2
else: # sizeof(c_wchar) == 4
nchar = 1
wchar, size = unicode_aswidecharstring(nonbmp)
self.assertEqual(size, nchar)
self.assertEqual(wchar, nonbmp + '\0')
def test_subclass_add(self):
class S(str):
def __add__(self, o):
return "3"
self.assertEqual(S("4") + S("5"), "3")
class S(str):
def __iadd__(self, o):
return "3"
s = S("1")
s += "4"
self.assertEqual(s, "3")
@support.cpython_only
def test_encode_decimal(self):
from _testcapi import unicode_encodedecimal
self.assertEqual(unicode_encodedecimal('123'),
b'123')
self.assertEqual(unicode_encodedecimal('\u0663.\u0661\u0664'),
b'3.14')
self.assertEqual(unicode_encodedecimal("\N{EM SPACE}3.14\N{EN SPACE}"),
b' 3.14 ')
self.assertRaises(UnicodeEncodeError,
unicode_encodedecimal, "123\u20ac", "strict")
self.assertRaisesRegex(
ValueError,
"^'decimal' codec can't encode character",
unicode_encodedecimal, "123\u20ac", "replace")
@support.cpython_only
def test_transform_decimal(self):
from _testcapi import unicode_transformdecimaltoascii as transform_decimal
self.assertEqual(transform_decimal('123'),
'123')
self.assertEqual(transform_decimal('\u0663.\u0661\u0664'),
'3.14')
self.assertEqual(transform_decimal("\N{EM SPACE}3.14\N{EN SPACE}"),
"\N{EM SPACE}3.14\N{EN SPACE}")
self.assertEqual(transform_decimal('123\u20ac'),
'123\u20ac')
def test_getnewargs(self):
text = 'abc'
args = text.__getnewargs__()
self.assertIsNot(args[0], text)
self.assertEqual(args[0], text)
self.assertEqual(len(args), 1)
def test_resize(self):
for length in range(1, 100, 7):
# generate a fresh string (refcount=1)
text = 'a' * length + 'b'
with support.check_warnings(('unicode_internal codec has been '
'deprecated', DeprecationWarning)):
# fill wstr internal field
abc = text.encode('unicode_internal')
self.assertEqual(abc.decode('unicode_internal'), text)
# resize text: wstr field must be cleared and then recomputed
text += 'c'
abcdef = text.encode('unicode_internal')
self.assertNotEqual(abc, abcdef)
self.assertEqual(abcdef.decode('unicode_internal'), text)
def test_compare(self):
# Issue #17615
N = 10
ascii = 'a' * N
ascii2 = 'z' * N
latin = '\x80' * N
latin2 = '\xff' * N
bmp = '\u0100' * N
bmp2 = '\uffff' * N
astral = '\U00100000' * N
astral2 = '\U0010ffff' * N
strings = (
ascii, ascii2,
latin, latin2,
bmp, bmp2,
astral, astral2)
for text1, text2 in itertools.combinations(strings, 2):
equal = (text1 is text2)
self.assertEqual(text1 == text2, equal)
self.assertEqual(text1 != text2, not equal)
if equal:
self.assertTrue(text1 <= text2)
self.assertTrue(text1 >= text2)
# text1 is text2: duplicate strings to skip the "str1 == str2"
# optimization in unicode_compare_eq() and really compare
# character per character
copy1 = duplicate_string(text1)
copy2 = duplicate_string(text2)
self.assertIsNot(copy1, copy2)
self.assertTrue(copy1 == copy2)
self.assertFalse(copy1 != copy2)
self.assertTrue(copy1 <= copy2)
self.assertTrue(copy2 >= copy2)
self.assertTrue(ascii < ascii2)
self.assertTrue(ascii < latin)
self.assertTrue(ascii < bmp)
self.assertTrue(ascii < astral)
self.assertFalse(ascii >= ascii2)
self.assertFalse(ascii >= latin)
self.assertFalse(ascii >= bmp)
self.assertFalse(ascii >= astral)
self.assertFalse(latin < ascii)
self.assertTrue(latin < latin2)
self.assertTrue(latin < bmp)
self.assertTrue(latin < astral)
self.assertTrue(latin >= ascii)
self.assertFalse(latin >= latin2)
self.assertFalse(latin >= bmp)
self.assertFalse(latin >= astral)
self.assertFalse(bmp < ascii)
self.assertFalse(bmp < latin)
self.assertTrue(bmp < bmp2)
self.assertTrue(bmp < astral)
self.assertTrue(bmp >= ascii)
self.assertTrue(bmp >= latin)
self.assertFalse(bmp >= bmp2)
self.assertFalse(bmp >= astral)
self.assertFalse(astral < ascii)
self.assertFalse(astral < latin)
self.assertFalse(astral < bmp2)
self.assertTrue(astral < astral2)
self.assertTrue(astral >= ascii)
self.assertTrue(astral >= latin)
self.assertTrue(astral >= bmp2)
self.assertFalse(astral >= astral2)
class StringModuleTest(unittest.TestCase):
def test_formatter_parser(self):
def parse(format):
return list(_string.formatter_parser(format))
formatter = parse("prefix {2!s}xxx{0:^+10.3f}{obj.attr!s} {z[0]!s:10}")
self.assertEqual(formatter, [
('prefix ', '2', '', 's'),
('xxx', '0', '^+10.3f', None),
('', 'obj.attr', '', 's'),
(' ', 'z[0]', '10', 's'),
])
formatter = parse("prefix {} suffix")
self.assertEqual(formatter, [
('prefix ', '', '', None),
(' suffix', None, None, None),
])
formatter = parse("str")
self.assertEqual(formatter, [
('str', None, None, None),
])
formatter = parse("")
self.assertEqual(formatter, [])
formatter = parse("{0}")
self.assertEqual(formatter, [
('', '0', '', None),
])
self.assertRaises(TypeError, _string.formatter_parser, 1)
def test_formatter_field_name_split(self):
def split(name):
items = list(_string.formatter_field_name_split(name))
items[1] = list(items[1])
return items
self.assertEqual(split("obj"), ["obj", []])
self.assertEqual(split("obj.arg"), ["obj", [(True, 'arg')]])
self.assertEqual(split("obj[key]"), ["obj", [(False, 'key')]])
self.assertEqual(split("obj.arg[key1][key2]"), [
"obj",
[(True, 'arg'),
(False, 'key1'),
(False, 'key2'),
]])
self.assertRaises(TypeError, _string.formatter_field_name_split, 1)
if __name__ == "__main__":
unittest.main()
| lgpl-3.0 |
av8ramit/tensorflow | tensorflow/python/keras/_impl/keras/applications/inception_resnet_v2.py | 4 | 14897 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
# pylint: disable=unused-import
"""Inception-ResNet V2 model for Keras.
# Reference
- [Inception-v4, Inception-ResNet and the Impact of
Residual Connections on Learning](https://arxiv.org/abs/1602.07261)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.applications import imagenet_utils
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import _obtain_input_shape
from tensorflow.python.keras._impl.keras.applications.imagenet_utils import decode_predictions
from tensorflow.python.keras._impl.keras.engine.topology import get_source_inputs
from tensorflow.python.keras._impl.keras.layers import Activation
from tensorflow.python.keras._impl.keras.layers import AveragePooling2D
from tensorflow.python.keras._impl.keras.layers import BatchNormalization
from tensorflow.python.keras._impl.keras.layers import Concatenate
from tensorflow.python.keras._impl.keras.layers import Conv2D
from tensorflow.python.keras._impl.keras.layers import Dense
from tensorflow.python.keras._impl.keras.layers import GlobalAveragePooling2D
from tensorflow.python.keras._impl.keras.layers import GlobalMaxPooling2D
from tensorflow.python.keras._impl.keras.layers import Input
from tensorflow.python.keras._impl.keras.layers import Lambda
from tensorflow.python.keras._impl.keras.layers import MaxPooling2D
from tensorflow.python.keras._impl.keras.models import Model
from tensorflow.python.keras._impl.keras.utils.data_utils import get_file
from tensorflow.python.platform import tf_logging as logging
BASE_WEIGHT_URL = 'https://github.com/fchollet/deep-learning-models/releases/download/v0.7/'
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
Arguments:
x: a 4D numpy array consists of RGB values within [0, 255].
Returns:
Preprocessed array.
"""
return imagenet_utils.preprocess_input(x, mode='tf')
def conv2d_bn(x,
filters,
kernel_size,
strides=1,
padding='same',
activation='relu',
use_bias=False,
name=None):
"""Utility function to apply conv + BN.
Arguments:
x: input tensor.
filters: filters in `Conv2D`.
kernel_size: kernel size as in `Conv2D`.
strides: strides in `Conv2D`.
padding: padding mode in `Conv2D`.
activation: activation in `Conv2D`.
use_bias: whether to use a bias in `Conv2D`.
name: name of the ops; will become `name + '_ac'` for the activation
and `name + '_bn'` for the batch norm layer.
Returns:
Output tensor after applying `Conv2D` and `BatchNormalization`.
"""
x = Conv2D(
filters,
kernel_size,
strides=strides,
padding=padding,
use_bias=use_bias,
name=name)(
x)
if not use_bias:
bn_axis = 1 if K.image_data_format() == 'channels_first' else 3
bn_name = None if name is None else name + '_bn'
x = BatchNormalization(axis=bn_axis, scale=False, name=bn_name)(x)
if activation is not None:
ac_name = None if name is None else name + '_ac'
x = Activation(activation, name=ac_name)(x)
return x
def inception_resnet_block(x, scale, block_type, block_idx, activation='relu'):
"""Adds a Inception-ResNet block.
This function builds 3 types of Inception-ResNet blocks mentioned
in the paper, controlled by the `block_type` argument (which is the
block name used in the official TF-slim implementation):
- Inception-ResNet-A: `block_type='block35'`
- Inception-ResNet-B: `block_type='block17'`
- Inception-ResNet-C: `block_type='block8'`
Arguments:
x: input tensor.
scale: scaling factor to scale the residuals (i.e., the output of
passing `x` through an inception module) before adding them
to the shortcut branch. Let `r` be the output from the residual
branch,
the output of this block will be `x + scale * r`.
block_type: `'block35'`, `'block17'` or `'block8'`, determines
the network structure in the residual branch.
block_idx: an `int` used for generating layer names. The Inception-ResNet
blocks
are repeated many times in this network. We use `block_idx` to
identify
each of the repetitions. For example, the first Inception-ResNet-A
block
will have `block_type='block35', block_idx=0`, ane the layer names
will have
a common prefix `'block35_0'`.
activation: activation function to use at the end of the block.
When `activation=None`, no activation is applied
(i.e., "linear" activation: `a(x) = x`).
Returns:
Output tensor for the block.
Raises:
ValueError: if `block_type` is not one of `'block35'`,
`'block17'` or `'block8'`.
"""
if block_type == 'block35':
branch_0 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(x, 32, 1)
branch_1 = conv2d_bn(branch_1, 32, 3)
branch_2 = conv2d_bn(x, 32, 1)
branch_2 = conv2d_bn(branch_2, 48, 3)
branch_2 = conv2d_bn(branch_2, 64, 3)
branches = [branch_0, branch_1, branch_2]
elif block_type == 'block17':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 128, 1)
branch_1 = conv2d_bn(branch_1, 160, [1, 7])
branch_1 = conv2d_bn(branch_1, 192, [7, 1])
branches = [branch_0, branch_1]
elif block_type == 'block8':
branch_0 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(x, 192, 1)
branch_1 = conv2d_bn(branch_1, 224, [1, 3])
branch_1 = conv2d_bn(branch_1, 256, [3, 1])
branches = [branch_0, branch_1]
else:
raise ValueError('Unknown Inception-ResNet block type. '
'Expects "block35", "block17" or "block8", '
'but got: ' + str(block_type))
block_name = block_type + '_' + str(block_idx)
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
mixed = Concatenate(axis=channel_axis, name=block_name + '_mixed')(branches)
up = conv2d_bn(
mixed,
K.int_shape(x)[channel_axis],
1,
activation=None,
use_bias=True,
name=block_name + '_conv')
x = Lambda(
lambda inputs, scale: inputs[0] + inputs[1] * scale,
output_shape=K.int_shape(x)[1:],
arguments={'scale': scale},
name=block_name)([x, up])
if activation is not None:
x = Activation(activation, name=block_name + '_ac')(x)
return x
def InceptionResNetV2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000):
"""Instantiates the Inception-ResNet v2 architecture.
Optionally loads weights pre-trained on ImageNet.
Note that when using TensorFlow, for best performance you should
set `"image_data_format": "channels_last"` in your Keras config
at `~/.keras/keras.json`.
The model and the weights are compatible with TensorFlow, Theano and
CNTK backends. The data format convention used by the model is
the one specified in your Keras config file.
Note that the default input image size for this model is 299x299, instead
of 224x224 as in the VGG16 and ResNet models. Also, the input preprocessing
function is different (i.e., do not use `imagenet_utils.preprocess_input()`
with this model. Use `preprocess_input()` defined in this module instead).
Arguments:
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is `False` (otherwise the input shape
has to be `(299, 299, 3)` (with `'channels_last'` data format)
or `(3, 299, 299)` (with `'channels_first'` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 139.
E.g. `(150, 150, 3)` would be one valid value.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the last convolutional layer.
- `'avg'` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `'max'` means that global max pooling will be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is `True`, and
if no `weights` argument is specified.
Returns:
A Keras `Model` instance.
Raises:
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as imagenet with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(
input_shape,
default_size=299,
min_size=139,
data_format=K.image_data_format(),
require_flatten=False,
weights=weights)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
# Stem block: 35 x 35 x 192
x = conv2d_bn(img_input, 32, 3, strides=2, padding='valid')
x = conv2d_bn(x, 32, 3, padding='valid')
x = conv2d_bn(x, 64, 3)
x = MaxPooling2D(3, strides=2)(x)
x = conv2d_bn(x, 80, 1, padding='valid')
x = conv2d_bn(x, 192, 3, padding='valid')
x = MaxPooling2D(3, strides=2)(x)
# Mixed 5b (Inception-A block): 35 x 35 x 320
branch_0 = conv2d_bn(x, 96, 1)
branch_1 = conv2d_bn(x, 48, 1)
branch_1 = conv2d_bn(branch_1, 64, 5)
branch_2 = conv2d_bn(x, 64, 1)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_2 = conv2d_bn(branch_2, 96, 3)
branch_pool = AveragePooling2D(3, strides=1, padding='same')(x)
branch_pool = conv2d_bn(branch_pool, 64, 1)
branches = [branch_0, branch_1, branch_2, branch_pool]
channel_axis = 1 if K.image_data_format() == 'channels_first' else 3
x = Concatenate(axis=channel_axis, name='mixed_5b')(branches)
# 10x block35 (Inception-ResNet-A block): 35 x 35 x 320
for block_idx in range(1, 11):
x = inception_resnet_block(
x, scale=0.17, block_type='block35', block_idx=block_idx)
# Mixed 6a (Reduction-A block): 17 x 17 x 1088
branch_0 = conv2d_bn(x, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 256, 3)
branch_1 = conv2d_bn(branch_1, 384, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_6a')(branches)
# 20x block17 (Inception-ResNet-B block): 17 x 17 x 1088
for block_idx in range(1, 21):
x = inception_resnet_block(
x, scale=0.1, block_type='block17', block_idx=block_idx)
# Mixed 7a (Reduction-B block): 8 x 8 x 2080
branch_0 = conv2d_bn(x, 256, 1)
branch_0 = conv2d_bn(branch_0, 384, 3, strides=2, padding='valid')
branch_1 = conv2d_bn(x, 256, 1)
branch_1 = conv2d_bn(branch_1, 288, 3, strides=2, padding='valid')
branch_2 = conv2d_bn(x, 256, 1)
branch_2 = conv2d_bn(branch_2, 288, 3)
branch_2 = conv2d_bn(branch_2, 320, 3, strides=2, padding='valid')
branch_pool = MaxPooling2D(3, strides=2, padding='valid')(x)
branches = [branch_0, branch_1, branch_2, branch_pool]
x = Concatenate(axis=channel_axis, name='mixed_7a')(branches)
# 10x block8 (Inception-ResNet-C block): 8 x 8 x 2080
for block_idx in range(1, 10):
x = inception_resnet_block(
x, scale=0.2, block_type='block8', block_idx=block_idx)
x = inception_resnet_block(
x, scale=1., activation=None, block_type='block8', block_idx=10)
# Final convolution block: 8 x 8 x 1536
x = conv2d_bn(x, 1536, 1, name='conv_7b')
if include_top:
# Classification block
x = GlobalAveragePooling2D(name='avg_pool')(x)
x = Dense(classes, activation='softmax', name='predictions')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model
model = Model(inputs, x, name='inception_resnet_v2')
# Load weights
if weights == 'imagenet':
if include_top:
fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels.h5'
weights_path = get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='e693bd0210a403b3192acc6073ad2e96')
else:
fname = 'inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5'
weights_path = get_file(
fname,
BASE_WEIGHT_URL + fname,
cache_subdir='models',
file_hash='d19885ff4a710c122648d3b5c3b684e4')
model.load_weights(weights_path)
elif weights is not None:
model.load_weights(weights)
return model
| apache-2.0 |
DepthDeluxe/ansible | lib/ansible/modules/network/lenovo/cnos_vlan.py | 59 | 11374 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send VLAN commands to Lenovo Switches
# Overloading aspect of vlan creation in a range is pending
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_vlan
author: "Dave Kasberg (@dkasberg)"
short_description: Manage VLAN resources and attributes on devices running Lenovo CNOS
description:
- This module allows you to work with VLAN related configurations. The
operators used are overloaded to ensure control over switch VLAN
configurations. The first level of VLAN configuration allows to set up the
VLAN range, the VLAN tag persistence, a VLAN access map and access map
filter. After passing this level, there are five VLAN arguments that will
perform further configurations. They are vlanArg1, vlanArg2, vlanArg3,
vlanArg4, and vlanArg5. The value of vlanArg1 will determine the way
following arguments will be evaluated. For more details on how to use these
arguments, see [Overloaded Variables].
This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_vlan.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
vlanArg1:
description:
- This is an overloaded vlan first argument. Usage of this argument can be found is the User Guide referenced above.
required: true
default: null
choices: [access-map, dot1q, filter, <1-3999> VLAN ID 1-3999 or range]
vlanArg2:
description:
- This is an overloaded vlan second argument. Usage of this argument can be found is the User Guide referenced above.
required: false
default: null
choices: [VLAN Access Map name,egress-only,name, flood,state, ip]
vlanArg3:
description:
- This is an overloaded vlan third argument. Usage of this argument can be found is the User Guide referenced above.
required: false
default: null
choices: [action, match, statistics, enter VLAN id or range of vlan, ascii name for the VLAN, ipv4 or ipv6, active or suspend, fast-leave,
last-member-query-interval, mrouter, querier, querier-timeout, query-interval, query-max-response-time, report-suppression,
robustness-variable, startup-query-count, startup-query-interval, static-group]
vlanArg4:
description:
- This is an overloaded vlan fourth argument. Usage of this argument can be found is the User Guide referenced above.
required: false
default: null
choices: [drop or forward or redirect, ip or mac,Interval in seconds,ethernet, port-aggregation, Querier IP address,
Querier Timeout in seconds, Query Interval in seconds, Query Max Response Time in seconds, Robustness Variable value,
Number of queries sent at startup, Query Interval at startup]
vlanArg5:
description:
- This is an overloaded vlan fifth argument. Usage of this argument can be found is the User Guide referenced above.
required: false
default: null
choices: [access-list name, Slot/chassis number, Port Aggregation Number]
'''
EXAMPLES = '''
Tasks: The following are examples of using the module cnos_vlan. These are written in the main.yml file of the tasks directory.
---
- name: Test Vlan - Create a vlan, name it
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "name"
vlanArg3: "Anil"
- name: Test Vlan - Create a vlan, Flood configuration
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "flood"
vlanArg3: "ipv4"
- name: Test Vlan - Create a vlan, State configuration
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "state"
vlanArg3: "active"
- name: Test Vlan - VLAN Access map1
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: "access-map"
vlanArg2: "Anil"
vlanArg3: "statistics"
- name: Test Vlan - VLAN Accep Map2
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: "access-map"
vlanArg2: "Anil"
vlanArg3: "action"
vlanArg4: "forward"
- name: Test Vlan - ip igmp snooping query interval
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "ip"
vlanArg3: "query-interval"
vlanArg4: 1313
- name: Test Vlan - ip igmp snooping mrouter interface port-aggregation 23
cnos_vlan:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_vlan_{{ inventory_hostname }}_output.txt"
vlanArg1: 13
vlanArg2: "ip"
vlanArg3: "mrouter"
vlanArg4: "port-aggregation"
vlanArg5: 23
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "VLAN configuration is accomplished"
'''
import sys
import paramiko
import time
import argparse
import socket
import array
import json
import time
import re
try:
from ansible.module_utils import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
#
# Define parameters for vlan creation entry
#
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
vlanArg1=dict(required=True),
vlanArg2=dict(required=False),
vlanArg3=dict(required=False),
vlanArg4=dict(required=False),
vlanArg5=dict(required=False),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
vlanArg1 = module.params['vlanArg1']
vlanArg2 = module.params['vlanArg2']
vlanArg3 = module.params['vlanArg3']
vlanArg4 = module.params['vlanArg4']
vlanArg5 = module.params['vlanArg5']
outputfile = module.params['outputfile']
hostIP = module.params['host']
deviceType = module.params['deviceType']
output = ""
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(hostIP, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Go to config mode
output = output + \
cnos.waitForDeviceResponse("conf d\n", "(config)#", 2, remote_conn)
# Send the CLi command
output = output + \
cnos.vlanConfig(
remote_conn, deviceType, "(config)#", 2, vlanArg1, vlanArg2,
vlanArg3, vlanArg4, vlanArg5)
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# need to add logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="VLAN configuration is accomplished")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
NetApp/manila | manila/share/drivers/emc/plugins/vnx/connector.py | 2 | 6292 | # Copyright (c) 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pipes
from oslo_concurrency import processutils
from oslo_log import log
from oslo_utils import excutils
import six
from six.moves import http_cookiejar
from six.moves.urllib import error as url_error # pylint: disable=E0611
from six.moves.urllib import request as url_request # pylint: disable=E0611
from manila import exception
from manila.i18n import _
from manila.i18n import _LE
from manila.share.drivers.emc.plugins.vnx import constants
from manila import utils
LOG = log.getLogger(__name__)
class XMLAPIConnector(object):
def __init__(self, configuration, debug=True):
super(XMLAPIConnector, self).__init__()
self.storage_ip = configuration.emc_nas_server
self.username = configuration.emc_nas_login
self.password = configuration.emc_nas_password
self.debug = debug
self.auth_url = 'https://' + self.storage_ip + '/Login'
self._url = ('https://' + self.storage_ip
+ '/servlets/CelerraManagementServices')
https_handler = url_request.HTTPSHandler()
cookie_handler = url_request.HTTPCookieProcessor(
http_cookiejar.CookieJar())
self.url_opener = url_request.build_opener(https_handler,
cookie_handler)
self._do_setup()
def _do_setup(self):
credential = ('user=' + self.username
+ '&password=' + self.password
+ '&Login=Login')
req = url_request.Request(self.auth_url, credential,
constants.CONTENT_TYPE_URLENCODE)
resp = self.url_opener.open(req)
resp_body = resp.read()
self._http_log_resp(resp, resp_body)
def _http_log_req(self, req):
if not self.debug:
return
string_parts = ['curl -i']
string_parts.append(' -X %s' % req.get_method())
for k in req.headers:
header = ' -H "%s: %s"' % (k, req.headers[k])
string_parts.append(header)
if req.data:
string_parts.append(" -d '%s'" % req.data)
string_parts.append(' ' + req.get_full_url())
LOG.debug("\nREQ: %s.\n", "".join(string_parts))
def _http_log_resp(self, resp, body):
if not self.debug:
return
headers = six.text_type(resp.headers).replace('\n', '\\n')
LOG.debug(
'RESP: [%(code)s] %(resp_hdrs)s\n'
'RESP BODY: %(resp_b)s.\n',
{
'code': resp.getcode(),
'resp_hdrs': headers,
'resp_b': body,
}
)
def _request(self, req_body=None, method=None,
header=constants.CONTENT_TYPE_URLENCODE):
req = url_request.Request(self._url, req_body, header)
if method not in (None, 'GET', 'POST'):
req.get_method = lambda: method
self._http_log_req(req)
try:
resp = self.url_opener.open(req)
resp_body = resp.read()
self._http_log_resp(resp, resp_body)
except url_error.HTTPError as http_err:
err = {'errorCode': -1,
'httpStatusCode': http_err.code,
'messages': six.text_type(http_err),
'request': req_body}
msg = (_("The request is invalid. Reason: %(reason)s") %
{'reason': err})
if '403' == six.text_type(http_err.code):
raise exception.NotAuthorized()
else:
raise exception.ManilaException(message=msg)
return resp_body
def request(self, req_body=None, method=None,
header=constants.CONTENT_TYPE_URLENCODE):
try:
resp_body = self._request(req_body, method, header)
except exception.NotAuthorized:
LOG.debug("Login again because client certification "
"may be expired.")
self._do_setup()
resp_body = self._request(req_body, method, header)
return resp_body
class SSHConnector(object):
def __init__(self, configuration, debug=True):
super(SSHConnector, self).__init__()
self.storage_ip = configuration.emc_nas_server
self.username = configuration.emc_nas_login
self.password = configuration.emc_nas_password
self.debug = debug
self.sshpool = utils.SSHPool(ip=self.storage_ip,
port=22,
conn_timeout=None,
login=self.username,
password=self.password)
def run_ssh(self, cmd_list, check_exit_code=False):
command = ' '.join(pipes.quote(cmd_arg) for cmd_arg in cmd_list)
with self.sshpool.item() as ssh:
try:
out, err = processutils.ssh_execute(
ssh, command, check_exit_code=check_exit_code)
self.log_request(command, out, err)
return out, err
except processutils.ProcessExecutionError as e:
with excutils.save_and_reraise_exception():
msg = (_LE('Error running SSH command: %(cmd)s. '
'Error: %(excmsg)s.'),
{'cmd': command, 'excmsg': six.text_type(e)})
LOG.error(msg)
def log_request(self, cmd, out, err):
if not self.debug:
return
LOG.debug("\nSSH command: %s.\n", cmd)
LOG.debug("SSH command output: out=%(out)s, err=%(err)s.\n",
{'out': out, 'err': err})
| apache-2.0 |
imk1/IMKTFBindingCode | personalGenome_to_vcf.py | 1 | 2846 | from optparse import OptionParser
import os
import math,re
from time import gmtime, strftime
import pysam
import pickle
import subprocess
import gzip
'''
Author:Oana Ursu
Modified by Irene Kaplow
'''
def main():
parser=OptionParser()
parser.add_option('--fasta_1m',dest='fa_1m',help='Fasta file 1 maternal more or less',
default='/srv/gs1/projects/snyder/jzaugg/histoneQTL/ChIPseq_alignment/data/genomes/Fasta/NA19099.maternal.fa')
parser.add_option('--fasta_1p',dest='fa_1p',help='Fasta file 1 paternal more or less',
default='/srv/gs1/projects/snyder/jzaugg/histoneQTL/ChIPseq_alignment/data/genomes/Fasta/NA19099.paternal.fa')
parser.add_option('--out',dest='out',help='Out')
parser.add_option('--indiv',dest='indiv',help='Name of individual (for column name in vcf)')
parser.add_option('--vcf',dest='vcf',help='vcf file with positions of interest')
opts,args=parser.parse_args()
vcf=open(opts.vcf)
new_vcf=open(opts.out,'w')
fa1m=pysam.Fastafile(opts.fa_1m)
fa1p=pysam.Fastafile(opts.fa_1p)
#line_c=0
for line in iter(vcf):
#line_c=line_c+1
#if line_c>100:
# break
if '##' in line:
new_vcf.write(line)
continue
if 'DEL' in line:
continue
if '#CHROM' in line:
items=line.strip().split('\t')
new_vcf.write('\t'.join(items)+'\t'+opts.indiv+'\n')
continue
items=line.strip().split('\t')
#print items
if len(items)<5:
continue
chromo=items[0]
if "chr" not in chromo:
# The chr needs to be added to the chromosome
chromo='chr'+items[0]
#Understand the genotype from vcf
alleles=[items[3],items[4]]
#genotype=re.compile('[\d]\|[\d]').findall(items[9])[0].split('|')
#seq_for_this=[alleles[int(genotype[0])],alleles[int(genotype[1])]]
i=int(items[1])-1
r=chromo
pos_base_1m=fa1m.fetch(r,i,i+1)
pos_base_1p=fa1p.fetch(r,i,i+1)
genotype_from_genome=[pos_base_1p.upper(),pos_base_1m.upper()]
#write down 0|1 type genotype as learned from the genome
first='.'
second='.'
if genotype_from_genome[0]==alleles[0]:
first='0'
elif genotype_from_genome[0]==alleles[1]:
first='1'
if genotype_from_genome[1]==alleles[0]:
second='0'
elif genotype_from_genome[1]==alleles[1]:
second='1'
if len(items) < 9:
# Not all of the vcf information is available
for i in range(len(items), 9):
# Iterate through the missing data and add NA for each missing element
items.append("NA")
new_vcf.write('\t'.join(items[0:9])+'\t'+first+'|'+second+'\n')
main()
| mit |
yannickcr/Sick-Beard | lib/hachoir_parser/container/swf.py | 90 | 15001 | """
SWF (Macromedia/Adobe Flash) file parser.
Documentation:
- Alexis' SWF Reference:
http://www.m2osw.com/swf_alexref.html
- http://www.half-serious.com/swf/format/
- http://www.anotherbigidea.com/javaswf/
- http://www.gnu.org/software/gnash/
Author: Victor Stinner
Creation date: 29 october 2006
"""
from lib.hachoir_parser import Parser
from lib.hachoir_core.field import (FieldSet, ParserError,
Bit, Bits, UInt8, UInt32, UInt16, CString, Enum,
Bytes, RawBytes, NullBits, String, SubFile)
from lib.hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN
from lib.hachoir_core.text_handler import textHandler, filesizeHandler
from lib.hachoir_core.tools import paddingSize, humanFrequency
from lib.hachoir_parser.image.common import RGB
from lib.hachoir_parser.image.jpeg import JpegChunk, JpegFile
from lib.hachoir_core.stream import StringInputStream, ConcatStream
from lib.hachoir_parser.common.deflate import Deflate, has_deflate
from lib.hachoir_parser.container.action_script import parseActionScript
import math
# Maximum file size (50 MB)
MAX_FILE_SIZE = 50 * 1024 * 1024
TWIPS = 20
class RECT(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
yield Bits(self, "nbits", 5)
nbits = self["nbits"].value
if not nbits:
raise ParserError("SWF parser: Invalid RECT field size (0)")
yield Bits(self, "xmin", nbits, "X minimum in twips")
yield Bits(self, "xmax", nbits, "X maximum in twips")
yield Bits(self, "ymin", nbits, "Y minimum in twips")
yield Bits(self, "ymax", nbits, "Y maximum in twips")
size = paddingSize(self.current_size, 8)
if size:
yield NullBits(self, "padding", size)
def getWidth(self):
return math.ceil(float(self["xmax"].value) / TWIPS)
def getHeight(self):
return math.ceil(float(self["ymax"].value) / TWIPS)
def createDescription(self):
return "Rectangle: %ux%u" % (self.getWidth(), self.getHeight())
class FixedFloat16(FieldSet):
def createFields(self):
yield UInt8(self, "float_part")
yield UInt8(self, "int_part")
def createValue(self):
return self["int_part"].value + float(self["float_part"].value) / 256
def parseBackgroundColor(parent, size):
yield RGB(parent, "color")
def bit2hertz(field):
return humanFrequency(5512.5 * (2 ** field.value))
SOUND_CODEC_MP3 = 2
SOUND_CODEC = {
0: "RAW",
1: "ADPCM",
SOUND_CODEC_MP3: "MP3",
3: "Uncompressed",
6: "Nellymoser",
}
class SoundEnvelope(FieldSet):
def createFields(self):
yield UInt8(self, "count")
for index in xrange(self["count"].value):
yield UInt32(self, "mark44[]")
yield UInt16(self, "level0[]")
yield UInt16(self, "level1[]")
def parseSoundBlock(parent, size):
# TODO: Be able to get codec... Need to know last sound "def_sound[]" field
# if not (...)sound_header:
# raise ParserError("Sound block without header")
if True: #sound_header == SOUND_CODEC_MP3:
yield UInt16(parent, "samples")
yield UInt16(parent, "left")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseStartSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "has_in_point")
yield Bit(parent, "has_out_point")
yield Bit(parent, "has_loops")
yield Bit(parent, "has_envelope")
yield Bit(parent, "no_multiple")
yield Bit(parent, "stop_playback")
yield NullBits(parent, "reserved", 2)
if parent["has_in_point"].value:
yield UInt32(parent, "in_point")
if parent["has_out_point"].value:
yield UInt32(parent, "out_point")
if parent["has_loops"].value:
yield UInt16(parent, "loop_count")
if parent["has_envelope"].value:
yield SoundEnvelope(parent, "envelope")
def parseDefineSound(parent, size):
yield UInt16(parent, "sound_id")
yield Bit(parent, "is_stereo")
yield Bit(parent, "is_16bit")
yield textHandler(Bits(parent, "rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt32(parent, "sample_count")
if parent["codec"].value == SOUND_CODEC_MP3:
yield UInt16(parent, "len")
size = (parent.size - parent.current_size) // 8
if size:
yield RawBytes(parent, "music_data", size)
def parseSoundHeader(parent, size):
yield Bit(parent, "playback_is_stereo")
yield Bit(parent, "playback_is_16bit")
yield textHandler(Bits(parent, "playback_rate", 2), bit2hertz)
yield NullBits(parent, "reserved", 4)
yield Bit(parent, "sound_is_stereo")
yield Bit(parent, "sound_is_16bit")
yield textHandler(Bits(parent, "sound_rate", 2), bit2hertz)
yield Enum(Bits(parent, "codec", 4), SOUND_CODEC)
yield UInt16(parent, "sample_count")
if parent["codec"].value == 2:
yield UInt16(parent, "latency_seek")
class JpegHeader(FieldSet):
endian = BIG_ENDIAN
def createFields(self):
count = 1
while True:
chunk = JpegChunk(self, "jpeg_chunk[]")
yield chunk
if 1 < count and chunk["type"].value in (JpegChunk.TAG_SOI, JpegChunk.TAG_EOI):
break
count += 1
def parseJpeg(parent, size):
yield UInt16(parent, "char_id", "Character identifier")
size -= 2
code = parent["code"].value
if code != Tag.TAG_BITS:
if code == Tag.TAG_BITS_JPEG3:
yield UInt32(parent, "alpha_offset", "Character identifier")
size -= 4
addr = parent.absolute_address + parent.current_size + 16
if parent.stream.readBytes(addr, 2) in ("\xff\xdb", "\xff\xd8"):
header = JpegHeader(parent, "jpeg_header")
yield header
hdr_size = header.size // 8
size -= hdr_size
else:
hdr_size = 0
if code == Tag.TAG_BITS_JPEG3:
img_size = parent["alpha_offset"].value - hdr_size
else:
img_size = size
else:
img_size = size
yield SubFile(parent, "image", img_size, "JPEG picture", parser=JpegFile)
if code == Tag.TAG_BITS_JPEG3:
size = (parent.size - parent.current_size) // 8
yield RawBytes(parent, "alpha", size, "Image data")
def parseVideoFrame(parent, size):
yield UInt16(parent, "stream_id")
yield UInt16(parent, "frame_num")
if 4 < size:
yield RawBytes(parent, "video_data", size-4)
class Export(FieldSet):
def createFields(self):
yield UInt16(self, "object_id")
yield CString(self, "name")
def parseExport(parent, size):
yield UInt16(parent, "count")
for index in xrange(parent["count"].value):
yield Export(parent, "export[]")
class Tag(FieldSet):
TAG_BITS = 6
TAG_BITS_JPEG2 = 32
TAG_BITS_JPEG3 = 35
TAG_INFO = {
# SWF version 1.0
0: ("end[]", "End", None),
1: ("show_frame[]", "Show frame", None),
2: ("def_shape[]", "Define shape", None),
3: ("free_char[]", "Free character", None),
4: ("place_obj[]", "Place object", None),
5: ("remove_obj[]", "Remove object", None),
6: ("def_bits[]", "Define bits", parseJpeg),
7: ("def_but[]", "Define button", None),
8: ("jpg_table", "JPEG tables", None),
9: ("bkgd_color[]", "Set background color", parseBackgroundColor),
10: ("def_font[]", "Define font", None),
11: ("def_text[]", "Define text", None),
12: ("action[]", "Action script", parseActionScript),
13: ("def_font_info[]", "Define font info", None),
# SWF version 2.0
14: ("def_sound[]", "Define sound", parseDefineSound),
15: ("start_sound[]", "Start sound", parseStartSound),
16: ("stop_sound[]", "Stop sound", None),
17: ("def_but_sound[]", "Define button sound", None),
18: ("sound_hdr", "Sound stream header", parseSoundHeader),
19: ("sound_blk[]", "Sound stream block", parseSoundBlock),
20: ("def_bits_lossless[]", "Define bits lossless", None),
21: ("def_bits_jpeg2[]", "Define bits JPEG 2", parseJpeg),
22: ("def_shape2[]", "Define shape 2", None),
23: ("def_but_cxform[]", "Define button CXFORM", None),
24: ("protect", "File is protected", None),
# SWF version 3.0
25: ("path_are_ps[]", "Paths are Postscript", None),
26: ("place_obj2[]", "Place object 2", None),
28: ("remove_obj2[]", "Remove object 2", None),
29: ("sync_frame[]", "Synchronize frame", None),
31: ("free_all[]", "Free all", None),
32: ("def_shape3[]", "Define shape 3", None),
33: ("def_text2[]", "Define text 2", None),
34: ("def_but2[]", "Define button2", None),
35: ("def_bits_jpeg3[]", "Define bits JPEG 3", parseJpeg),
36: ("def_bits_lossless2[]", "Define bits lossless 2", None),
39: ("def_sprite[]", "Define sprite", None),
40: ("name_character[]", "Name character", None),
41: ("serial_number", "Serial number", None),
42: ("generator_text[]", "Generator text", None),
43: ("frame_label[]", "Frame label", None),
45: ("sound_hdr2[]", "Sound stream header2", parseSoundHeader),
46: ("def_morph_shape[]", "Define morph shape", None),
47: ("gen_frame[]", "Generate frame", None),
48: ("def_font2[]", "Define font 2", None),
49: ("tpl_command[]", "Template command", None),
# SWF version 4.0
37: ("def_text_field[]", "Define text field", None),
38: ("def_quicktime_movie[]", "Define QuickTime movie", None),
# SWF version 5.0
50: ("def_cmd_obj[]", "Define command object", None),
51: ("flash_generator", "Flash generator", None),
52: ("gen_ext_font[]", "Gen external font", None),
56: ("export[]", "Export", parseExport),
57: ("import[]", "Import", None),
58: ("ebnable_debug", "Enable debug", None),
# SWF version 6.0
59: ("do_init_action[]", "Do init action", None),
60: ("video_str[]", "Video stream", None),
61: ("video_frame[]", "Video frame", parseVideoFrame),
62: ("def_font_info2[]", "Define font info 2", None),
63: ("mx4[]", "MX4", None),
64: ("enable_debug2", "Enable debugger 2", None),
# SWF version 7.0
65: ("script_limits[]", "Script limits", None),
66: ("tab_index[]", "Set tab index", None),
# SWF version 8.0
69: ("file_attr[]", "File attributes", None),
70: ("place_obj3[]", "Place object 3", None),
71: ("import2[]", "Import a definition list from another movie", None),
73: ("def_font_align[]", "Define font alignment zones", None),
74: ("csm_txt_set[]", "CSM text settings", None),
75: ("def_font3[]", "Define font text 3", None),
77: ("metadata[]", "XML code describing the movie", None),
78: ("def_scale_grid[]", "Define scaling factors", None),
83: ("def_shape4[]", "Define shape 4", None),
84: ("def_morph2[]", "Define a morphing shape 2", None),
}
def __init__(self, *args):
FieldSet.__init__(self, *args)
size = self["length"].value
if self[0].name == "length_ext":
self._size = (6+size) * 8
else:
self._size = (2+size) * 8
code = self["code"].value
if code in self.TAG_INFO:
self._name, self._description, self.parser = self.TAG_INFO[code]
else:
self.parser = None
def createFields(self):
if self.stream.readBits(self.absolute_address, 6, self.endian) == 63:
yield Bits(self, "length_ext", 6)
yield Bits(self, "code", 10)
yield filesizeHandler(UInt32(self, "length"))
else:
yield filesizeHandler(Bits(self, "length", 6))
yield Bits(self, "code", 10)
size = self["length"].value
if 0 < size:
if self.parser:
for field in self.parser(self, size):
yield field
else:
yield RawBytes(self, "data", size)
def createDescription(self):
return "Tag: %s (%s)" % (self["code"].display, self["length"].display)
class SwfFile(Parser):
VALID_VERSIONS = set(xrange(1, 9+1))
PARSER_TAGS = {
"id": "swf",
"category": "container",
"file_ext": ["swf"],
"mime": (u"application/x-shockwave-flash",),
"min_size": 64,
"description": u"Macromedia Flash data"
}
PARSER_TAGS["magic"] = []
for version in VALID_VERSIONS:
PARSER_TAGS["magic"].append(("FWS%c" % version, 0))
PARSER_TAGS["magic"].append(("CWS%c" % version, 0))
endian = LITTLE_ENDIAN
SWF_SCALE_FACTOR = 1.0 / 20
def validate(self):
if self.stream.readBytes(0, 3) not in ("FWS", "CWS"):
return "Wrong file signature"
if self["version"].value not in self.VALID_VERSIONS:
return "Unknown version"
if MAX_FILE_SIZE < self["filesize"].value:
return "File too big (%u)" % self["filesize"].value
if self["signature"].value == "FWS":
if self["rect/padding"].value != 0:
return "Unknown rectangle padding value"
return True
def createFields(self):
yield String(self, "signature", 3, "SWF format signature", charset="ASCII")
yield UInt8(self, "version")
yield filesizeHandler(UInt32(self, "filesize"))
if self["signature"].value != "CWS":
yield RECT(self, "rect")
yield FixedFloat16(self, "frame_rate")
yield UInt16(self, "frame_count")
while not self.eof:
yield Tag(self, "tag[]")
else:
size = (self.size - self.current_size) // 8
if has_deflate:
data = Deflate(Bytes(self, "compressed_data", size), False)
def createInputStream(cis, source=None, **args):
stream = cis(source=source)
header = StringInputStream("FWS" + self.stream.readBytes(3*8, 5))
args.setdefault("tags",[]).append(("class", SwfFile))
return ConcatStream((header, stream), source=stream.source, **args)
data.setSubIStream(createInputStream)
yield data
else:
yield Bytes(self, "compressed_data", size)
def createDescription(self):
desc = ["version %u" % self["version"].value]
if self["signature"].value == "CWS":
desc.append("compressed")
return u"Macromedia Flash data: %s" % (", ".join(desc))
def createContentSize(self):
if self["signature"].value == "FWS":
return self["filesize"].value * 8
else:
# TODO: Size of compressed Flash?
return None
| gpl-3.0 |
mathause/netCDF4p | test/tst_vars.py | 1 | 3352 | import sys
import unittest
import os
import tempfile
import numpy as NP
from numpy.random.mtrand import uniform
from numpy.testing import assert_array_equal, assert_array_almost_equal
import netCDF4p as netCDF4
# test variable creation.
FILE_NAME = tempfile.mktemp(".nc")
VAR_DOUBLE_NAME="dummy_var"
VAR_SHORT_NAME='dummy_var_short'
VARNAMES = sorted([VAR_DOUBLE_NAME,VAR_SHORT_NAME])
GROUP_NAME = "dummy_group"
DIM1_NAME="x"
DIM1_LEN=2
DIM2_NAME="y"
DIM2_LEN=3
DIM3_NAME="z"
DIM3_LEN=25
randomdata = uniform(size=(DIM1_LEN,DIM2_LEN,DIM3_LEN))
class VariablesTestCase(unittest.TestCase):
def setUp(self):
self.file = FILE_NAME
f = netCDF4.Dataset(self.file, 'w')
f.createDimension(DIM1_NAME, DIM1_LEN)
f.createDimension(DIM2_NAME, DIM2_LEN)
f.createDimension(DIM3_NAME, DIM3_LEN)
v1 = f.createVariable(VAR_DOUBLE_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME))
v2 = f.createVariable(VAR_SHORT_NAME, 'i2',(DIM2_NAME,DIM3_NAME))
v1.long_name = 'dummy data root'
g = f.createGroup(GROUP_NAME)
g.createDimension(DIM1_NAME, DIM1_LEN)
g.createDimension(DIM2_NAME, DIM2_LEN)
g.createDimension(DIM3_NAME, DIM3_LEN)
v1g = g.createVariable(VAR_DOUBLE_NAME, 'f8',(DIM1_NAME,DIM2_NAME,DIM3_NAME))
v2g = g.createVariable(VAR_SHORT_NAME, 'i2',(DIM2_NAME,DIM3_NAME))
v1g.long_name = 'dummy data subgroup'
v1[:] = randomdata
v1g[:] = randomdata
f.close()
def tearDown(self):
# Remove the temporary files
os.remove(self.file)
def runTest(self):
"""testing primitive variables"""
f = netCDF4.Dataset(self.file, 'r')
# check variables in root group.
varnames = sorted(f.variables.keys())
v1 = f.variables[VAR_DOUBLE_NAME]
v2 = f.variables[VAR_SHORT_NAME]
assert varnames == VARNAMES
assert v1.dtype.str[1:] == 'f8'
assert v2.dtype.str[1:] == 'i2'
assert v1.long_name == 'dummy data root'
assert v1.dimensions == (DIM1_NAME,DIM2_NAME,DIM3_NAME)
assert v2.dimensions == (DIM2_NAME,DIM3_NAME)
assert v1.shape == (DIM1_LEN,DIM2_LEN,DIM3_LEN)
assert v2.shape == (DIM2_LEN,DIM3_LEN)
assert v1.size == DIM1_LEN * DIM2_LEN * DIM3_LEN
assert len(v1) == DIM1_LEN
#assert NP.allclose(v1[:],randomdata)
assert_array_almost_equal(v1[:],randomdata)
# check variables in sub group.
g = f.groups[GROUP_NAME]
varnames = sorted(g.variables.keys())
v1 = g.variables[VAR_DOUBLE_NAME]
# test iterating over variable (should stop when
# it gets to the end and raises IndexError, issue 121)
for v in v1:
pass
v2 = g.variables[VAR_SHORT_NAME]
assert varnames == VARNAMES
assert v1.dtype.str[1:] == 'f8'
assert v2.dtype.str[1:] == 'i2'
assert v1.long_name == 'dummy data subgroup'
assert v1.dimensions == (DIM1_NAME,DIM2_NAME,DIM3_NAME)
assert v2.dimensions == (DIM2_NAME,DIM3_NAME)
assert v1.shape == (DIM1_LEN,DIM2_LEN,DIM3_LEN)
assert v2.shape == (DIM2_LEN,DIM3_LEN)
#assert NP.allclose(v1[:],randomdata)
assert_array_almost_equal(v1[:],randomdata)
f.close()
if __name__ == '__main__':
unittest.main()
| mit |
yamt/tempest | tempest/api/compute/admin/test_security_group_default_rules.py | 4 | 5530 | # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
import testtools
from tempest.api.compute import base
from tempest import config
from tempest import test
CONF = config.CONF
class SecurityGroupDefaultRulesTest(base.BaseV2ComputeAdminTest):
@classmethod
# TODO(GMann): Once Bug# 1311500 is fixed, these test can run
# for Neutron also.
@testtools.skipIf(CONF.service_available.neutron,
"Skip as this functionality is not yet "
"implemented in Neutron. Related Bug#1311500")
def setup_credentials(cls):
# A network and a subnet will be created for these tests
cls.set_network_resources(network=True, subnet=True)
super(SecurityGroupDefaultRulesTest, cls).setup_credentials()
@classmethod
def setup_clients(cls):
super(SecurityGroupDefaultRulesTest, cls).setup_clients()
cls.adm_client = cls.os_adm.security_group_default_rules_client
def _create_security_group_default_rules(self, ip_protocol='tcp',
from_port=22, to_port=22,
cidr='10.10.0.0/24'):
# Create Security Group default rule
rule = self.adm_client.create_security_default_group_rule(
ip_protocol,
from_port,
to_port,
cidr=cidr)
self.assertEqual(ip_protocol, rule['ip_protocol'])
self.assertEqual(from_port, rule['from_port'])
self.assertEqual(to_port, rule['to_port'])
self.assertEqual(cidr, rule['ip_range']['cidr'])
return rule
@test.idempotent_id('6d880615-eec3-4d29-97c5-7a074dde239d')
def test_create_delete_security_group_default_rules(self):
# Create and delete Security Group default rule
ip_protocols = ['tcp', 'udp', 'icmp']
for ip_protocol in ip_protocols:
rule = self._create_security_group_default_rules(ip_protocol)
# Delete Security Group default rule
self.adm_client.delete_security_group_default_rule(rule['id'])
self.assertRaises(lib_exc.NotFound,
self.adm_client.get_security_group_default_rule,
rule['id'])
@test.idempotent_id('4d752e0a-33a1-4c3a-b498-ff8667ca22e5')
def test_create_security_group_default_rule_without_cidr(self):
ip_protocol = 'udp'
from_port = 80
to_port = 80
rule = self.adm_client.create_security_default_group_rule(
ip_protocol,
from_port,
to_port)
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
self.assertNotEqual(0, rule['id'])
self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
@test.idempotent_id('29f2d218-69b0-4a95-8f3d-6bd0ef732b3a')
def test_create_security_group_default_rule_with_blank_cidr(self):
ip_protocol = 'icmp'
from_port = 10
to_port = 10
cidr = ''
rule = self.adm_client.create_security_default_group_rule(
ip_protocol,
from_port,
to_port,
cidr=cidr)
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
self.assertNotEqual(0, rule['id'])
self.assertEqual('0.0.0.0/0', rule['ip_range']['cidr'])
@test.idempotent_id('6e6de55e-9146-4ae0-89f2-3569586e0b9b')
def test_security_group_default_rules_list(self):
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cidr = '10.10.0.0/24'
rule = self._create_security_group_default_rules(ip_protocol,
from_port,
to_port,
cidr)
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
rules = self.adm_client.list_security_group_default_rules()
self.assertNotEqual(0, len(rules))
self.assertIn(rule, rules)
@test.idempotent_id('15cbb349-86b4-4f71-a048-04b7ef3f150b')
def test_default_security_group_default_rule_show(self):
ip_protocol = 'tcp'
from_port = 22
to_port = 22
cidr = '10.10.0.0/24'
rule = self._create_security_group_default_rules(ip_protocol,
from_port,
to_port,
cidr)
self.addCleanup(self.adm_client.delete_security_group_default_rule,
rule['id'])
fetched_rule = self.adm_client.get_security_group_default_rule(
rule['id'])
self.assertEqual(rule, fetched_rule)
| apache-2.0 |
ramadhane/odoo | addons/hr_timesheet_invoice/wizard/hr_timesheet_final_invoice_create.py | 337 | 3000 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
#
# Create an final invoice based on selected timesheet lines
#
#
# TODO: check unit of measure !!!
#
class final_invoice_create(osv.osv_memory):
_name = 'hr.timesheet.invoice.create.final'
_description = 'Create invoice from timesheet final'
_columns = {
'date': fields.boolean('Date', help='Display date in the history of works'),
'time': fields.boolean('Time Spent', help='Display time in the history of works'),
'name': fields.boolean('Log of Activity', help='Display detail of work in the invoice line.'),
'price': fields.boolean('Cost', help='Display cost of the item you reinvoice'),
'product': fields.many2one('product.product', 'Product', help='The product that will be used to invoice the remaining amount'),
}
def do_create(self, cr, uid, ids, context=None):
data = self.read(cr, uid, ids, context=context)[0]
# hack for fixing small issue (context should not propagate implicitly between actions)
if 'default_type' in context:
del context['default_type']
ids = self.pool.get('account.analytic.line').search(cr, uid, [('invoice_id','=',False),('to_invoice','<>', False), ('account_id', 'in', context['active_ids'])], context=context)
invs = self.pool.get('account.analytic.line').invoice_cost_create(cr, uid, ids, data, context=context)
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
mod_ids = mod_obj.search(cr, uid, [('name', '=', 'action_invoice_tree1')], context=context)[0]
res_id = mod_obj.read(cr, uid, mod_ids, ['res_id'], context=context)['res_id']
act_win = act_obj.read(cr, uid, [res_id], context=context)[0]
act_win['domain'] = [('id','in',invs),('type','=','out_invoice')]
act_win['name'] = _('Invoices')
return act_win
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Antiun/bank-payment | account_banking/migrations/7.0.0.4/pre-migration.py | 14 | 1881 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Akretion (http://www.akretion.com/)
# @author: Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
def table_exists(cr, table):
""" Check whether a certain table or view exists """
cr.execute(
'SELECT count(relname) FROM pg_class WHERE relname = %s',
(table,))
return cr.fetchone()[0] == 1
def migrate(cr, version):
"""
Migration script for semantic changes in account_banking_payment_export.
Putting the same script in this module for users migrating from 6.1,
before the export module was refactored out.
"""
if not version or not table_exists(cr, 'payment_line'):
return
cr.execute(
"UPDATE payment_line SET communication = communication2, "
"communication2 = null "
"FROM payment_order "
"WHERE payment_line.order_id = payment_order.id "
"AND payment_order.state in ('draft', 'open') "
"AND payment_line.state = 'normal' "
"AND communication2 is not null")
| agpl-3.0 |
timsnyder/bokeh | bokeh/server/views/autoload_js_handler.py | 2 | 3867 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide a request handler that returns a page displaying a document.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
from six.moves.urllib.parse import urlparse
from tornado import gen
# Bokeh imports
from bokeh.core.templates import AUTOLOAD_JS
from bokeh.util.string import encode_utf8
from bokeh.util.compiler import bundle_all_models
from bokeh.embed.elements import script_for_render_items
from bokeh.embed.util import RenderItem
from .session_handler import SessionHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'AutoloadJsHandler',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
class AutoloadJsHandler(SessionHandler):
''' Implements a custom Tornado handler for the autoload JS chunk
'''
@gen.coroutine
def get(self, *args, **kwargs):
session = yield self.get_session()
element_id = self.get_argument("bokeh-autoload-element", default=None)
if not element_id:
self.send_error(status_code=400, reason='No bokeh-autoload-element query parameter')
return
app_path = self.get_argument("bokeh-app-path", default="/")
absolute_url = self.get_argument("bokeh-absolute-url", default=None)
if absolute_url:
server_url = '{uri.scheme}://{uri.netloc}/'.format(uri=urlparse(absolute_url))
else:
server_url = None
resources = self.application.resources(server_url)
bundle = bundle_all_models() or ""
render_items = [RenderItem(sessionid=session.id, elementid=element_id, use_for_title=False)]
script = script_for_render_items(None, render_items, app_path=app_path, absolute_url=absolute_url)
resources_param = self.get_argument("resources", "default")
if resources_param == "none":
js_urls = []
css_urls = []
else:
js_urls = resources.js_files
css_urls = resources.css_files
js = AUTOLOAD_JS.render(
js_urls = js_urls,
css_urls = css_urls,
js_raw = resources.js_raw + [bundle, script],
css_raw = resources.css_raw_str,
elementid = element_id,
)
self.set_header("Content-Type", 'application/javascript')
self.write(encode_utf8(js))
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
| bsd-3-clause |
amenonsen/ansible | lib/ansible/modules/network/f5/bigip_profile_server_ssl.py | 38 | 20536 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_profile_server_ssl
short_description: Manages server SSL profiles on a BIG-IP
description:
- Manages server SSL profiles on a BIG-IP.
version_added: 2.8
options:
name:
description:
- Specifies the name of the profile.
type: str
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed.
type: str
default: /Common/serverssl
ciphers:
description:
- Specifies the list of ciphers that the system supports. When creating a new
profile, the default cipher list is provided by the parent profile.
type: str
secure_renegotiation:
description:
- Specifies the method of secure renegotiations for SSL connections. When
creating a new profile, the setting is provided by the parent profile.
- When C(request) is set the system request secure renegotation of SSL
connections.
- C(require) is a default setting and when set the system permits initial SSL
handshakes from clients but terminates renegotiations from unpatched clients.
- The C(require-strict) setting the system requires strict renegotiation of SSL
connections. In this mode the system refuses connections to insecure servers,
and terminates existing SSL connections to insecure servers.
type: str
choices:
- require
- require-strict
- request
server_name:
description:
- Specifies the fully qualified DNS hostname of the server used in Server Name
Indication communications. When creating a new profile, the setting is provided
by the parent profile.
type: str
sni_default:
description:
- Indicates that the system uses this profile as the default SSL profile when there
is no match to the server name, or when the client provides no SNI extension support.
- When creating a new profile, the setting is provided by the parent profile.
- There can be only one SSL profile with this setting enabled.
type: bool
sni_require:
description:
- Requires that the network peers also provide SNI support, setting only takes
effect when C(sni_default) is C(yes).
- When creating a new profile, the setting is provided by the parent profile.
type: bool
server_certificate:
description:
- Specifies the way the system handles server certificates.
- When C(ignore), specifies that the system ignores certificates from server systems.
- When C(require), specifies that the system requires a server to present a valid
certificate.
type: str
choices:
- ignore
- require
certificate:
description:
- Specifies the name of the certificate that the system uses for server-side SSL
processing.
type: str
key:
description:
- Specifies the file name of the SSL key.
type: str
chain:
description:
- Specifies the certificates-key chain to associate with the SSL profile.
type: str
passphrase:
description:
- Specifies a passphrase used to encrypt the key.
type: str
update_password:
description:
- C(always) will allow to update passwords if the user chooses to do so.
C(on_create) will only set the password for newly created profiles.
type: str
choices:
- always
- on_create
default: always
ocsp_profile:
description:
- Specifies the name of the OCSP profile for purpose of validating status
of server certificate.
type: str
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
state:
description:
- When C(present), ensures that the profile exists.
- When C(absent), ensures the profile is removed.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Create a new server SSL profile
bigip_profile_server_ssl:
name: foo
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
ciphers:
description: The ciphers applied to the profile.
returned: changed
type: str
sample: "!SSLv3:!SSLv2:ECDHE+AES-GCM+SHA256:ECDHE-RSA-AES128-CBC-SHA"
secure_renegotiation:
description: The method of secure SSL renegotiation.
returned: changed
type: str
sample: request
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import fq_name
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import flatten_boolean
from library.module_utils.network.f5.common import transform_name
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import fq_name
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import flatten_boolean
from ansible.module_utils.network.f5.common import transform_name
class Parameters(AnsibleF5Parameters):
api_map = {
'cert': 'certificate',
'ocsp': 'ocsp_profile',
'defaultsFrom': 'parent',
'secureRenegotiation': 'secure_renegotiation',
'sniDefault': 'sni_default',
'sniRequire': 'sni_require',
'serverName': 'server_name',
'peerCertMode': 'server_certificate',
}
api_attributes = [
'cert',
'chain',
'ciphers',
'defaultsFrom',
'key',
'ocsp',
'secureRenegotiation',
'sniDefault',
'sniRequire',
'serverName',
'peerCertMode',
]
returnables = [
'certificate',
'chain',
'ciphers',
'key',
'ocsp_profile',
'secure_renegotiation',
'parent',
'sni_default',
'sni_require',
'server_name',
'server_certificate',
]
updatables = [
'certificate',
'chain',
'ciphers',
'key',
'ocsp_profile',
'secure_renegotiation',
'sni_default',
'sni_require',
'server_name',
'server_certificate',
]
@property
def sni_default(self):
return flatten_boolean(self._values['sni_default'])
@property
def certificate(self):
if self._values['certificate'] is None:
return None
if self._values['certificate'] in ['', 'none']:
return ''
result = fq_name(self.partition, self._values['certificate'])
return result
@property
def key(self):
if self._values['key'] is None:
return None
if self._values['key'] in ['', 'none']:
return ''
result = fq_name(self.partition, self._values['key'])
return result
@property
def chain(self):
if self._values['chain'] is None:
return None
if self._values['chain'] in ['', 'none']:
return ''
result = fq_name(self.partition, self._values['chain'])
return result
@property
def ocsp_profile(self):
if self._values['ocsp_profile'] is None:
return None
if self._values['ocsp_profile'] in ['', 'none']:
return ''
result = fq_name(self.partition, self._values['ocsp_profile'])
return result
class ApiParameters(Parameters):
@property
def sni_require(self):
return flatten_boolean(self._values['sni_require'])
@property
def server_name(self):
if self._values['server_name'] in [None, 'none']:
return None
return self._values['server_name']
class ModuleParameters(Parameters):
@property
def server_name(self):
if self._values['server_name'] is None:
return None
if self._values['server_name'] in ['', 'none']:
return ''
return self._values['server_name']
@property
def parent(self):
if self._values['parent'] is None:
return None
if self._values['parent'] == 'serverssl':
return '/Common/serverssl'
result = fq_name(self.partition, self._values['parent'])
return result
@property
def sni_require(self):
require = flatten_boolean(self._values['sni_require'])
default = self.sni_default
if require is None:
return None
if default in [None, 'no']:
if require == 'yes':
raise F5ModuleError(
"Cannot set 'sni_require' to {0} if 'sni_default' is set as {1}".format(require, default))
return require
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
@property
def sni_default(self):
if self._values['sni_default'] is None:
return None
elif self._values['sni_default'] == 'yes':
return 'true'
else:
return 'false'
@property
def sni_require(self):
if self._values['sni_require'] is None:
return None
elif self._values['sni_require'] == 'yes':
return 'true'
else:
return 'false'
class ReportableChanges(Changes):
@property
def sni_default(self):
result = flatten_boolean(self._values['sni_default'])
return result
@property
def sni_require(self):
result = flatten_boolean(self._values['sni_require'])
return result
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent profile cannot be changed"
)
@property
def sni_require(self):
if self.want.sni_require is None:
return None
if self.want.sni_require == 'no':
if self.have.sni_default == 'yes' and self.want.sni_default is None:
raise F5ModuleError(
"Cannot set 'sni_require' to {0} if 'sni_default' is {1}".format(
self.want.sni_require, self.have.sni_default
)
)
if self.want.sni_require != self.have.sni_require:
return self.want.sni_require
@property
def server_name(self):
if self.want.server_name is None:
return None
if self.want.server_name == '' and self.have.server_name is None:
return None
if self.want.server_name != self.have.server_name:
return self.want.server_name
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/server-ssl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if self.want.update_password == 'always':
self.want.update({'passphrase': self.want.passphrase})
else:
if self.want.passphrase:
del self.want._values['passphrase']
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/server-ssl/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/server-ssl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 404]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/server-ssl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
response = self.client.api.delete(uri)
if response.status == 200:
return True
raise F5ModuleError(response.content)
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/profile/server-ssl/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
certificate=dict(),
chain=dict(),
key=dict(),
passphrase=dict(no_log=True),
parent=dict(default='/Common/serverssl'),
ciphers=dict(),
secure_renegotiation=dict(
choices=['require', 'require-strict', 'request']
),
server_certificate=dict(
choices=['ignore', 'require']
),
state=dict(
default='present',
choices=['present', 'absent']
),
update_password=dict(
default='always',
choices=['always', 'on_create']
),
sni_default=dict(type='bool'),
sni_require=dict(type='bool'),
server_name=dict(),
ocsp_profile=dict(),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
self.required_together = [
['certificate', 'key']
]
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
required_together=spec.required_together,
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
vulcansteel/autorest | AutoRest/Generators/Python/Azure.Python.Tests/Expected/AcceptanceTests/AzureSpecials/auto_rest_azure_special_parameters_test_client/operations/api_version_default_operations.py | 1 | 7668 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
import uuid
from .. import models
class ApiVersionDefaultOperations(object):
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def get_method_global_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
GET method with api-version modeled in global settings.
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/azurespecials/apiVersion/method/string/none/query/global/2015-07-01-preview'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_method_global_not_provided_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
GET method with api-version modeled in global settings.
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/azurespecials/apiVersion/method/string/none/query/globalNotProvided/2015-07-01-preview'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_path_global_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
GET method with api-version modeled in global settings.
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/azurespecials/apiVersion/path/string/none/query/global/2015-07-01-preview'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
def get_swagger_global_valid(
self, custom_headers={}, raw=False, **operation_config):
"""
GET method with api-version modeled in global settings.
:param dict custom_headers: headers that will be added to the request
:param boolean raw: returns the direct response alongside the
deserialized response
:rtype: None or (None, requests.response) or concurrent.futures.Future
"""
# Construct URL
url = '/azurespecials/apiVersion/swagger/string/none/query/global/2015-07-01-preview'
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.config.api_version", self.config.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if self.config.generate_client_request_id:
header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())
if custom_headers:
header_parameters.update(custom_headers)
if self.config.accept_language is not None:
header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str')
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response
| mit |
40223219/w16_test | static/Brython3.1.1-20150328-091302/Lib/unittest/test/test_result.py | 788 | 19069 | import io
import sys
import textwrap
from test import support
import traceback
import unittest
class Test_TestResult(unittest.TestCase):
# Note: there are not separate tests for TestResult.wasSuccessful(),
# TestResult.errors, TestResult.failures, TestResult.testsRun or
# TestResult.shouldStop because these only have meaning in terms of
# other TestResult methods.
#
# Accordingly, tests for the aforenamed attributes are incorporated
# in with the tests for the defining methods.
################################################################
def test_init(self):
result = unittest.TestResult()
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 0)
self.assertEqual(result.shouldStop, False)
self.assertIsNone(result._stdout_buffer)
self.assertIsNone(result._stderr_buffer)
# "This method can be called to signal that the set of tests being
# run should be aborted by setting the TestResult's shouldStop
# attribute to True."
def test_stop(self):
result = unittest.TestResult()
result.stop()
self.assertEqual(result.shouldStop, True)
# "Called when the test case test is about to be run. The default
# implementation simply increments the instance's testsRun counter."
def test_startTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# "Called after the test case test has been executed, regardless of
# the outcome. The default implementation does nothing."
def test_stopTest(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
result.stopTest(test)
# Same tests as above; make sure nothing has changed
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "Called before and after tests are run. The default implementation does nothing."
def test_startTestRun_stopTestRun(self):
result = unittest.TestResult()
result.startTestRun()
result.stopTestRun()
# "addSuccess(test)"
# ...
# "Called when the test case test succeeds"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addSuccess(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
result = unittest.TestResult()
result.startTest(test)
result.addSuccess(test)
result.stopTest(test)
self.assertTrue(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
# "addFailure(test, err)"
# ...
# "Called when the test case test signals a failure. err is a tuple of
# the form returned by sys.exc_info(): (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addFailure(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
test.fail("foo")
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addFailure(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 0)
self.assertEqual(len(result.failures), 1)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.failures[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
# "addError(test, err)"
# ...
# "Called when the test case test raises an unexpected exception err
# is a tuple of the form returned by sys.exc_info():
# (type, value, traceback)"
# ...
# "wasSuccessful() - Returns True if all tests run so far have passed,
# otherwise returns False"
# ...
# "testsRun - The total number of tests run so far."
# ...
# "errors - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test which raised an
# unexpected exception. Contains formatted
# tracebacks instead of sys.exc_info() results."
# ...
# "failures - A list containing 2-tuples of TestCase instances and
# formatted tracebacks. Each tuple represents a test where a failure was
# explicitly signalled using the TestCase.fail*() or TestCase.assert*()
# methods. Contains formatted tracebacks instead
# of sys.exc_info() results."
def test_addError(self):
class Foo(unittest.TestCase):
def test_1(self):
pass
test = Foo('test_1')
try:
raise TypeError()
except:
exc_info_tuple = sys.exc_info()
result = unittest.TestResult()
result.startTest(test)
result.addError(test, exc_info_tuple)
result.stopTest(test)
self.assertFalse(result.wasSuccessful())
self.assertEqual(len(result.errors), 1)
self.assertEqual(len(result.failures), 0)
self.assertEqual(result.testsRun, 1)
self.assertEqual(result.shouldStop, False)
test_case, formatted_exc = result.errors[0]
self.assertTrue(test_case is test)
self.assertIsInstance(formatted_exc, str)
def testGetDescriptionWithoutDocstring(self):
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
'testGetDescriptionWithoutDocstring (' + __name__ +
'.Test_TestResult)')
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithOneLineDocstring(self):
"""Tests getDescription() for a method with a docstring."""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithOneLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a docstring.'))
@unittest.skipIf(sys.flags.optimize >= 2,
"Docstrings are omitted with -O2 and above")
def testGetDescriptionWithMultiLineDocstring(self):
"""Tests getDescription() for a method with a longer docstring.
The second line of the docstring.
"""
result = unittest.TextTestResult(None, True, 1)
self.assertEqual(
result.getDescription(self),
('testGetDescriptionWithMultiLineDocstring '
'(' + __name__ + '.Test_TestResult)\n'
'Tests getDescription() for a method with a longer '
'docstring.'))
def testStackFrameTrimming(self):
class Frame(object):
class tb_frame(object):
f_globals = {}
result = unittest.TestResult()
self.assertFalse(result._is_relevant_tb_level(Frame))
Frame.tb_frame.f_globals['__unittest'] = True
self.assertTrue(result._is_relevant_tb_level(Frame))
def testFailFast(self):
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addError(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addFailure(None, None)
self.assertTrue(result.shouldStop)
result = unittest.TestResult()
result._exc_info_to_string = lambda *_: ''
result.failfast = True
result.addUnexpectedSuccess(None)
self.assertTrue(result.shouldStop)
def testFailFastSetByRunner(self):
runner = unittest.TextTestRunner(stream=io.StringIO(), failfast=True)
def test(result):
self.assertTrue(result.failfast)
result = runner.run(test)
classDict = dict(unittest.TestResult.__dict__)
for m in ('addSkip', 'addExpectedFailure', 'addUnexpectedSuccess',
'__init__'):
del classDict[m]
def __init__(self, stream=None, descriptions=None, verbosity=None):
self.failures = []
self.errors = []
self.testsRun = 0
self.shouldStop = False
self.buffer = False
classDict['__init__'] = __init__
OldResult = type('OldResult', (object,), classDict)
class Test_OldTestResult(unittest.TestCase):
def assertOldResultWarning(self, test, failures):
with support.check_warnings(("TestResult has no add.+ method,",
RuntimeWarning)):
result = OldResult()
test.run(result)
self.assertEqual(len(result.failures), failures)
def testOldTestResult(self):
class Test(unittest.TestCase):
def testSkip(self):
self.skipTest('foobar')
@unittest.expectedFailure
def testExpectedFail(self):
raise TypeError
@unittest.expectedFailure
def testUnexpectedSuccess(self):
pass
for test_name, should_pass in (('testSkip', True),
('testExpectedFail', True),
('testUnexpectedSuccess', False)):
test = Test(test_name)
self.assertOldResultWarning(test, int(not should_pass))
def testOldTestTesultSetup(self):
class Test(unittest.TestCase):
def setUp(self):
self.skipTest('no reason')
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldTestResultClass(self):
@unittest.skip('no reason')
class Test(unittest.TestCase):
def testFoo(self):
pass
self.assertOldResultWarning(Test('testFoo'), 0)
def testOldResultWithRunner(self):
class Test(unittest.TestCase):
def testFoo(self):
pass
runner = unittest.TextTestRunner(resultclass=OldResult,
stream=io.StringIO())
# This will raise an exception if TextTestRunner can't handle old
# test result objects
runner.run(Test('testFoo'))
class MockTraceback(object):
@staticmethod
def format_exception(*_):
return ['A traceback']
def restore_traceback():
unittest.result.traceback = traceback
class TestOutputBuffering(unittest.TestCase):
def setUp(self):
self._real_out = sys.stdout
self._real_err = sys.stderr
def tearDown(self):
sys.stdout = self._real_out
sys.stderr = self._real_err
def testBufferOutputOff(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
def testBufferOutputStartTestAddSuccess(self):
real_out = self._real_out
real_err = self._real_err
result = unittest.TestResult()
self.assertFalse(result.buffer)
result.buffer = True
self.assertIs(real_out, sys.stdout)
self.assertIs(real_err, sys.stderr)
result.startTest(self)
self.assertIsNot(real_out, sys.stdout)
self.assertIsNot(real_err, sys.stderr)
self.assertIsInstance(sys.stdout, io.StringIO)
self.assertIsInstance(sys.stderr, io.StringIO)
self.assertIsNot(sys.stdout, sys.stderr)
out_stream = sys.stdout
err_stream = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo')
print('bar', file=sys.stderr)
self.assertEqual(out_stream.getvalue(), 'foo\n')
self.assertEqual(err_stream.getvalue(), 'bar\n')
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
result.addSuccess(self)
result.stopTest(self)
self.assertIs(sys.stdout, result._original_stdout)
self.assertIs(sys.stderr, result._original_stderr)
self.assertEqual(result._original_stdout.getvalue(), '')
self.assertEqual(result._original_stderr.getvalue(), '')
self.assertEqual(out_stream.getvalue(), '')
self.assertEqual(err_stream.getvalue(), '')
def getStartedResult(self):
result = unittest.TestResult()
result.buffer = True
result.startTest(self)
return result
def testBufferOutputAddErrorOrFailure(self):
unittest.result.traceback = MockTraceback
self.addCleanup(restore_traceback)
for message_attr, add_attr, include_error in [
('errors', 'addError', True),
('failures', 'addFailure', False),
('errors', 'addError', True),
('failures', 'addFailure', False)
]:
result = self.getStartedResult()
buffered_out = sys.stdout
buffered_err = sys.stderr
result._original_stdout = io.StringIO()
result._original_stderr = io.StringIO()
print('foo', file=sys.stdout)
if include_error:
print('bar', file=sys.stderr)
addFunction = getattr(result, add_attr)
addFunction(self, (None, None, None))
result.stopTest(self)
result_list = getattr(result, message_attr)
self.assertEqual(len(result_list), 1)
test, message = result_list[0]
expectedOutMessage = textwrap.dedent("""
Stdout:
foo
""")
expectedErrMessage = ''
if include_error:
expectedErrMessage = textwrap.dedent("""
Stderr:
bar
""")
expectedFullMessage = 'A traceback%s%s' % (expectedOutMessage, expectedErrMessage)
self.assertIs(test, self)
self.assertEqual(result._original_stdout.getvalue(), expectedOutMessage)
self.assertEqual(result._original_stderr.getvalue(), expectedErrMessage)
self.assertMultiLineEqual(message, expectedFullMessage)
def testBufferSetupClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def setUpClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownClass(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
@classmethod
def tearDownClass(cls):
1/0
def test_foo(self):
pass
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferSetUpModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def setUpModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
def testBufferTearDownModule(self):
result = unittest.TestResult()
result.buffer = True
class Foo(unittest.TestCase):
def test_foo(self):
pass
class Module(object):
@staticmethod
def tearDownModule():
1/0
Foo.__module__ = 'Module'
sys.modules['Module'] = Module
self.addCleanup(sys.modules.pop, 'Module')
suite = unittest.TestSuite([Foo('test_foo')])
suite(result)
self.assertEqual(len(result.errors), 1)
if __name__ == '__main__':
unittest.main()
| agpl-3.0 |
ujdhesa/unisubs | apps/teams/migrations/0133_add_notify_interval.py | 1 | 39847 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Team.notify_interval'
db.add_column('teams_team', 'notify_interval',
self.gf('django.db.models.fields.CharField')(default='D', max_length=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Team.notify_interval'
db.delete_column('teams_team', 'notify_interval')
models = {
'accountlinker.thirdpartyaccount': {
'Meta': {'unique_together': "(('type', 'username'),)", 'object_name': 'ThirdPartyAccount'},
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'oauth_access_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'oauth_refresh_token': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'can_send_messages': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '63', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_partner': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'notify_by_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'notify_by_message': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Partner']", 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'subtitles.subtitlelanguage': {
'Meta': {'unique_together': "[('video', 'language_code')]", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'new_followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'official_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_expired_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'pending_signoff_unexpired_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'subtitles_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'unofficial_signoff_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitlelanguage_set'", 'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'writelocked_newlanguages'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'subtitles.subtitleversion': {
'Meta': {'unique_together': "[('video', 'subtitle_language', 'version_number'), ('video', 'language_code', 'version_number')]", 'object_name': 'SubtitleVersion'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitleversion_set'", 'to': "orm['auth.CustomUser']"}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'meta_1_content': ('apps.videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_2_content': ('apps.videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_3_content': ('apps.videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'note': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '512', 'blank': 'True'}),
'origin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['subtitles.SubtitleVersion']", 'symmetrical': 'False', 'blank': 'True'}),
'rollback_of_version_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'serialized_lineage': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'serialized_subtitles': ('django.db.models.fields.TextField', [], {}),
'subtitle_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleLanguage']"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'version_number': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'newsubtitleversion_set'", 'to': "orm['videos.Video']"}),
'visibility': ('django.db.models.fields.CharField', [], {'default': "'public'", 'max_length': '10'}),
'visibility_override': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '10', 'blank': 'True'})
},
'teams.application': {
'Meta': {'unique_together': "(('team', 'user', 'status'),)", 'object_name': 'Application'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'history': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'applications'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_applications'", 'to': "orm['auth.CustomUser']"})
},
'teams.billingrecord': {
'Meta': {'unique_together': "(('video', 'new_subtitle_language'),)", 'object_name': 'BillingRecord'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'minutes': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'new_subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'new_subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'teams.billingreport': {
'Meta': {'object_name': 'BillingReport'},
'csv_file': ('utils.amazon.fields.S3EnabledFileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'billing_reports'", 'symmetrical': 'False', 'to': "orm['teams.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '2'})
},
'teams.invite': {
'Meta': {'object_name': 'Invite'},
'approved': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'note': ('django.db.models.fields.TextField', [], {'max_length': '200', 'blank': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'invitations'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_invitations'", 'to': "orm['auth.CustomUser']"})
},
'teams.membershipnarrowing': {
'Meta': {'object_name': 'MembershipNarrowing'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'narrowing_includer'", 'null': 'True', 'to': "orm['teams.TeamMember']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '24', 'blank': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'narrowings'", 'to': "orm['teams.TeamMember']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']", 'null': 'True', 'blank': 'True'})
},
'teams.partner': {
'Meta': {'object_name': 'Partner'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'managed_partners'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['auth.CustomUser']"}),
'can_request_paid_captions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'teams.project': {
'Meta': {'unique_together': "(('team', 'name'), ('team', 'slug'))", 'object_name': 'Project'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'guidelines': ('django.db.models.fields.TextField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'teams.setting': {
'Meta': {'unique_together': "(('key', 'team'),)", 'object_name': 'Setting'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.PositiveIntegerField', [], {}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'settings'", 'to': "orm['teams.Team']"})
},
'teams.task': {
'Meta': {'object_name': 'Task'},
'approved': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'assignee': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'completed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'expiration_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '16', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'new_review_base_version': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tasks_based_on_new'", 'null': 'True', 'to': "orm['subtitles.SubtitleVersion']"}),
'new_subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['subtitles.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True', 'blank': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'review_base_version': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tasks_based_on'", 'null': 'True', 'to': "orm['videos.SubtitleVersion']"}),
'subtitle_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'teams.team': {
'Meta': {'ordering': "['name']", 'object_name': 'Team'},
'applicants': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'applicated_teams'", 'symmetrical': 'False', 'through': "orm['teams.Application']", 'to': "orm['auth.CustomUser']"}),
'application_text': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'auth_provider_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '24', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'header_html_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'highlight': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_moderated': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_visible': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'last_notification_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'logo': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'autocrop': True}", 'max_length': '100', 'blank': 'True'}),
'max_tasks_per_member': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'membership_policy': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '250'}),
'notify_interval': ('django.db.models.fields.CharField', [], {'default': "'D'", 'max_length': '1'}),
'page_content': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'partner': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'teams'", 'null': 'True', 'to': "orm['teams.Partner']"}),
'points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'projects_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'subtitle_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_assign_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'task_expiration': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'third_party_accounts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'to': "orm['accountlinker.ThirdPartyAccount']"}),
'translate_policy': ('django.db.models.fields.IntegerField', [], {'default': '10'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'teams'", 'symmetrical': 'False', 'through': "orm['teams.TeamMember']", 'to': "orm['auth.CustomUser']"}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'intro_for_teams'", 'null': 'True', 'to': "orm['videos.Video']"}),
'video_policy': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'through': "orm['teams.TeamVideo']", 'symmetrical': 'False'}),
'workflow_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'teams.teamlanguagepreference': {
'Meta': {'unique_together': "(('team', 'language_code'),)", 'object_name': 'TeamLanguagePreference'},
'allow_reads': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_writes': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'preferred': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'lang_preferences'", 'to': "orm['teams.Team']"})
},
'teams.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'default': "'contributor'", 'max_length': '16', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'members'", 'to': "orm['teams.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'team_members'", 'to': "orm['auth.CustomUser']"})
},
'teams.teamnotificationsetting': {
'Meta': {'object_name': 'TeamNotificationSetting'},
'basic_auth_password': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'basic_auth_username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'notification_class': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'partner': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'notification_settings'", 'unique': 'True', 'null': 'True', 'to': "orm['teams.Partner']"}),
'request_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'notification_settings'", 'unique': 'True', 'null': 'True', 'to': "orm['teams.Team']"})
},
'teams.teamvideo': {
'Meta': {'unique_together': "(('team', 'video'),)", 'object_name': 'TeamVideo'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'all_languages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'partner_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']"}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'null': 'True', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'video': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['videos.Video']", 'unique': 'True'})
},
'teams.teamvideomigration': {
'Meta': {'object_name': 'TeamVideoMigration'},
'datetime': ('django.db.models.fields.DateTimeField', [], {}),
'from_team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['teams.Team']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'to_project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['teams.Project']"}),
'to_team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['teams.Team']"})
},
'teams.workflow': {
'Meta': {'unique_together': "(('team', 'project', 'team_video'),)", 'object_name': 'Workflow'},
'approve_allowed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'autocreate_subtitle': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'autocreate_translate': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Project']", 'null': 'True', 'blank': 'True'}),
'review_allowed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.Team']"}),
'team_video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['teams.TeamVideo']", 'null': 'True', 'blank': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'needs_sync': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'new_subtitle_language': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'old_subtitle_version'", 'null': 'True', 'to': "orm['subtitles.SubtitleLanguage']"}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitleversion': {
'Meta': {'ordering': "['-version_no']", 'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'forked_from': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'moderation_status': ('django.db.models.fields.CharField', [], {'default': "'not__under_moderation'", 'max_length': '32', 'db_index': 'True'}),
'needs_sync': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'new_subtitle_version': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'old_subtitle_version'", 'unique': 'True', 'null': 'True', 'to': "orm['subtitles.SubtitleVersion']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'result_of_rollback': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'featured': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'meta_1_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_1_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'meta_2_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_2_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'meta_3_content': ('videos.metadata.MetadataContentField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'meta_3_type': ('videos.metadata.MetadataTypeField', [], {'null': 'True', 'blank': 'True'}),
'moderated_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'moderating'", 'null': 'True', 'to': "orm['teams.Team']"}),
'primary_audio_language_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'thumb_options': "{'upscale': True, 'crop': 'smart'}", 'max_length': '100', 'thumb_sizes': '((290, 165), (120, 90))', 'blank': 'True'}),
'small_thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
}
}
complete_apps = ['teams'] | agpl-3.0 |
tobiaswaldvogel/openwrt-ib42x0 | tools/b43-tools/files/b43-fwsquash.py | 494 | 4767 | #!/usr/bin/env python
#
# b43 firmware file squasher
# Removes unnecessary firmware files
#
# Copyright (c) 2009 Michael Buesch <mb@bu3sch.de>
#
# Licensed under the GNU/GPL version 2 or (at your option) any later version.
#
import sys
import os
def usage():
print("Usage: %s PHYTYPES COREREVS /path/to/extracted/firmware" % sys.argv[0])
print("")
print("PHYTYPES is a comma separated list of:")
print("A => A-PHY")
print("AG => Dual A-PHY G-PHY")
print("G => G-PHY")
print("LP => LP-PHY")
print("N => N-PHY")
print("HT => HT-PHY")
print("LCN => LCN-PHY")
print("LCN40 => LCN40-PHY")
print("AC => AC-PHY")
print("")
print("COREREVS is a comma separated list of core revision numbers.")
if len(sys.argv) != 4:
usage()
sys.exit(1)
phytypes = sys.argv[1]
corerevs = sys.argv[2]
fwpath = sys.argv[3]
phytypes = phytypes.split(',')
try:
corerevs = map(lambda r: int(r), corerevs.split(','))
except ValueError:
print("ERROR: \"%s\" is not a valid COREREVS string\n" % corerevs)
usage()
sys.exit(1)
fwfiles = os.listdir(fwpath)
fwfiles = filter(lambda str: str.endswith(".fw"), fwfiles)
if not fwfiles:
print("ERROR: No firmware files found in %s" % fwpath)
sys.exit(1)
required_fwfiles = []
def revs_match(revs_a, revs_b):
for rev in revs_a:
if rev in revs_b:
return True
return False
def phytypes_match(types_a, types_b):
for type in types_a:
type = type.strip().upper()
if type in types_b:
return True
return False
revmapping = {
"ucode2.fw" : ( (2,3,), ("G",), ),
"ucode4.fw" : ( (4,), ("G",), ),
"ucode5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
"ucode11.fw" : ( (11,12,), ("N",), ),
"ucode13.fw" : ( (13,), ("LP","G",), ),
"ucode14.fw" : ( (14,), ("LP",), ),
"ucode15.fw" : ( (15,), ("LP",), ),
"ucode16_mimo.fw" : ( (16,17,18,19,23,), ("N",), ),
# "ucode16_lp.fw" : ( (16,17,18,19,), ("LP",), ),
"ucode24_lcn.fw" : ( (24,), ("LCN",), ),
"ucode25_mimo.fw" : ( (25,28,), ("N",), ),
"ucode25_lcn.fw" : ( (25,28,), ("LCN",), ),
"ucode26_mimo.fw" : ( (26,), ("HT",), ),
"ucode29_mimo.fw" : ( (29,), ("HT",), ),
"ucode30_mimo.fw" : ( (30,), ("N",), ),
"ucode33_lcn40.fw" : ( (33,), ("LCN40",), ),
"ucode40.fw" : ( (40,), ("AC",), ),
"ucode42.fw" : ( (42,), ("AC",), ),
"pcm4.fw" : ( (1,2,3,4,), ("G",), ),
"pcm5.fw" : ( (5,6,7,8,9,10,), ("G","A","AG",), ),
}
initvalmapping = {
"a0g1initvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG",), ),
"b0g0initvals2.fw" : ( (2,4,), ("G",), ),
"b0g0initvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"b0g0initvals13.fw" : ( (13,), ("G",), ),
"n0initvals11.fw" : ( (11,12,), ("N",), ),
"n0initvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0initvals24.fw" : ( (24,), ("N",), ),
"n0initvals25.fw" : ( (25,28,), ("N",), ),
"n16initvals30.fw" : ( (30,), ("N",), ),
"lp0initvals13.fw" : ( (13,), ("LP",), ),
"lp0initvals14.fw" : ( (14,), ("LP",), ),
"lp0initvals15.fw" : ( (15,), ("LP",), ),
# "lp0initvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0initvals24.fw" : ( (24,), ("LCN",), ),
"ht0initvals26.fw" : ( (26,), ("HT",), ),
"ht0initvals29.fw" : ( (29,), ("HT",), ),
"lcn400initvals33.fw" : ( (33,), ("LCN40",), ),
"ac0initvals40.fw" : ( (40,), ("AC",), ),
"ac1initvals42.fw" : ( (42,), ("AC",), ),
"a0g1bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("AG",), ),
"a0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("A", "AG"), ),
"b0g0bsinitvals5.fw" : ( (5,6,7,8,9,10,), ("G",), ),
"n0bsinitvals11.fw" : ( (11,12,), ("N",), ),
"n0bsinitvals16.fw" : ( (16,17,18,23,), ("N",), ),
"n0bsinitvals24.fw" : ( (24,), ("N",), ),
"n0bsinitvals25.fw" : ( (25,28,), ("N",), ),
"n16bsinitvals30.fw" : ( (30,), ("N",), ),
"lp0bsinitvals13.fw" : ( (13,), ("LP",), ),
"lp0bsinitvals14.fw" : ( (14,), ("LP",), ),
"lp0bsinitvals15.fw" : ( (15,), ("LP",), ),
# "lp0bsinitvals16.fw" : ( (16,17,18,), ("LP",), ),
"lcn0bsinitvals24.fw" : ( (24,), ("LCN",), ),
"ht0bsinitvals26.fw" : ( (26,), ("HT",), ),
"ht0bsinitvals29.fw" : ( (29,), ("HT",), ),
"lcn400bsinitvals33.fw" : ( (33,), ("LCN40",), ),
"ac0bsinitvals40.fw" : ( (40,), ("AC",), ),
"ac1bsinitvals42.fw" : ( (42,), ("AC",), ),
}
for f in fwfiles:
if f in revmapping:
if revs_match(corerevs, revmapping[f][0]) and\
phytypes_match(phytypes, revmapping[f][1]):
required_fwfiles += [f]
continue
if f in initvalmapping:
if revs_match(corerevs, initvalmapping[f][0]) and\
phytypes_match(phytypes, initvalmapping[f][1]):
required_fwfiles += [f]
continue
print("WARNING: Firmware file %s not found in the mapping lists" % f)
for f in fwfiles:
if f not in required_fwfiles:
print("Deleting %s" % f)
os.unlink(fwpath + '/' + f)
| gpl-2.0 |
petersn/money | symsplit.py | 1 | 3222 | #! /usr/bin/python
"""
Symmetric secret sharing.
"""
import hashlib, struct, os, subprocess
def choose(n, k, start=0):
if k:
for i in xrange(start, n):
for remaining in choose(n, k-1, i+1):
yield [i] + remaining
else: yield []
def hashalg(s):
for i in xrange(2):
s = hashlib.sha512(s).digest()
return s
def verify_share(s):
return hashalg(s[:-1])[0] == s[-1]
def ser(l):
return "".join(i.encode("hex")+":" for i in l)
def deser(s):
return [i.decode("hex") for i in s.split(":")[:-1]]
def symmetric(block, key, operation):
in_fd, out_fd = os.pipe()
os.write(out_fd, key.encode("hex"))
os.close(out_fd)
operation = {"enc": "--symmetric", "dec": "--decrypt"}[operation]
proc = subprocess.Popen(["gpg", "-q", operation, "--cipher-algo", "AES", "--passphrase-fd", str(in_fd), "-"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate(block)
return stdout
def split_secret(secret, n, k):
"""split_secret(secret, n, k) -> shares, common
Splits s into n shares, such that join_shares(shares[:k], common) == secret.
"""
# Start by padding the secret, to hide its length.
#secret = struct.pack("<I", len(secret)) + secret
#secret += "\0"*(256-len(secret))
# Generate some random shares.
shares = [chr(i) + os.urandom(20) for i in xrange(n)]
# Add a one byte checksum.
for i in xrange(n):
shares[i] += hashalg(shares[i])[0]
common = [chr(n), chr(k)]
for ind in choose(n, k):
subset = map(shares.__getitem__, ind)
subset.sort()
key = hashalg("".join(subset))
common.append(symmetric(secret, key, "enc"))
common = ser(common)
return shares, common
def join_shares(shares, common):
# Verify the shares.
assert all(map(verify_share, shares)), "Invalid checksum on share."
shares.sort()
key = hashalg("".join(shares))
# Extract the appropriate encrypted block.
n, k = map(ord, common[:2])
assert len(shares) >= k, "Too few shares."
shares = shares[:k] # Accept being given too many shares.
indexes = [ord(share[0]) for share in shares]
block_index = list(choose(n, k)).index(indexes)
block = common[block_index+2]
return symmetric(block, key, "dec")
if __name__ == "__main__":
import sys, getpass
if len(sys.argv) == 4:
n, k = map(int, sys.argv[2:])
#secret = getpass.getpass("Paste in secret data: ")
secret = open(sys.argv[1]).read()
shares, common = split_secret(secret, n, k)
for i, share in enumerate(shares):
fd = open("share%i" % i, "w")
fd.write(share.encode("hex") + "\n")
fd.close()
fd = open("common", "w")
fd.write(common+"\n")
fd.close()
elif len(sys.argv) == 3:
common = deser(open(sys.argv[1]).read())
shares = []
n, k = map(ord, common[:2])
for i in xrange(k):
share = getpass.getpass("Share %i/%i: " % (i+1, k)).strip().decode("hex")
assert verify_share(share), "Invalid share."
shares.append(share)
block = join_shares(shares, common)
fd = open(sys.argv[2], "w")
fd.write(block)
fd.close()
else:
print "usage: symsplit file n k | symsplit common output"
print "If 3 arguments:"
print " Writes out files share0, share1, ... share{n-1}, and common."
print "If 2 argument:"
print " Writes the results to output."
exit(2)
| unlicense |
imolainformatica/airnotifier | api/__init__.py | 1 | 22104 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012, Dongsheng Cai
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Dongsheng Cai nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL DONGSHENG CAI BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from hashlib import md5
try:
from httplib import BAD_REQUEST, LOCKED, FORBIDDEN, NOT_FOUND, \
INTERNAL_SERVER_ERROR, OK
except:
from http.client import BAD_REQUEST, LOCKED, FORBIDDEN, NOT_FOUND, \
INTERNAL_SERVER_ERROR, OK
import binascii
import json
import logging
import random
import time
import urllib
import uuid
from bson.objectid import ObjectId
from tornado.options import options
import requests
import tornado.web
from constants import DEVICE_TYPE_IOS, DEVICE_TYPE_ANDROID, DEVICE_TYPE_WNS, \
DEVICE_TYPE_MPNS
from pushservices.apns import PayLoad
from pushservices.gcm import GCMException, GCMInvalidRegistrationException, \
GCMNotRegisteredException, GCMUpdateRegIDsException
from pushservices.wns import WNSInvalidPushTypeException
from routes import route
from util import filter_alphabetanum, json_default, strip_tags
API_PERMISSIONS = {
'create_token': (0b00001, 'Create token'),
'delete_token': (0b00010, 'Delete token'),
'send_notification': (0b00100, 'Send notification'),
'send_broadcast': (0b01000, 'Send broadcast'),
'create_accesskey': (0b10000, 'Create access key')
}
class APIBaseHandler(tornado.web.RequestHandler):
"""APIBaseHandler class to precess REST requests
"""
def initialize(self):
self.accesskeyrequired = True
self._time_start = time.time()
def prepare(self):
"""Pre-process HTTP request
"""
self.appname = None
if self.request.headers.has_key('X-An-App-Name'):
""" App name """
self.appname = self.request.headers['X-An-App-Name'];
if not self.appname:
self.appname = filter_alphabetanum(self.get_argument('appname'))
self.appkey = None
if self.request.headers.has_key('X-An-App-Key'):
""" App key """
self.appkey = self.request.headers['X-An-App-Key']
self.token = self.get_argument('token', None)
self.device = self.get_argument('device', DEVICE_TYPE_IOS).lower()
if self.device == DEVICE_TYPE_IOS:
if self.token:
# If token provided, it must be 64 chars
if len(self.token) != 64:
self.send_response(BAD_REQUEST, dict(error='Invalid token'))
try:
# Validate token
binascii.unhexlify(self.token)
except Exception as ex:
self.send_response(BAD_REQUEST, dict(error='Invalid token: %s' % ex))
else:
self.device = DEVICE_TYPE_ANDROID
self.app = self.masterdb.applications.find_one({'shortname': self.appname})
if not self.app:
self.send_response(BAD_REQUEST, dict(error='Invalid application name'))
if not self.check_blockediplist(self.request.remote_ip, self.app):
self.send_response(LOCKED, dict(error='Blocked IP'))
else:
key = self.db.keys.find_one({'key':self.appkey})
if not key:
self.permission = 0
if self.accesskeyrequired:
self.send_response(BAD_REQUEST, dict(error='Invalid access key'))
else:
if 'permission' not in key:
key['permission'] = 0
self.permission = int(key['permission'])
def can(self, permissionname):
if permissionname not in API_PERMISSIONS:
return False
else:
return (self.permission & API_PERMISSIONS[permissionname][0]) == API_PERMISSIONS[permissionname][0]
def check_blockediplist(self, ip, app):
if app.has_key('blockediplist') and app['blockediplist']:
from netaddr import IPNetwork, IPAddress
iplist = app['blockediplist'].splitlines()
for blockedip in iplist:
if IPAddress(ip) in IPNetwork(blockedip):
return False
return True
@property
def dbname(self):
""" DB name"""
return options.appprefix + self.appname
@property
def db(self):
""" App DB, store logs/objects/users etc """
return self.application.mongodb[self.dbname]
@property
def masterdb(self):
""" Master DB instance, store airnotifier data """
return self.application.masterdb
@property
def apnsconnections(self):
""" APNs connections """
return self.application.services['apns']
@property
def gcmconnections(self):
""" GCM connections """
return self.application.services['gcm']
@property
def wnsconnections(self):
""" WNS connections """
return self.application.services['wns']
@property
def mpnsconnections(self):
""" WNS connections """
return self.application.services['mpns']
@property
def smsconnections(self):
""" WNS connections """
return self.application.services['sms']
def set_default_headers(self):
self.set_header('Content-Type', 'application/json; charset=utf-8')
self.set_header('X-Powered-By', 'AirNotifier/1.0')
def set_headers(self, headers):
for name in headers:
self.set_header(name, headers[name])
def send_response(self, status_code=200, data=None, headers=None):
""" Set REST API response """
self.set_status(status_code, None)
if headers is not None:
self.set_headers(headers)
if data:
data = json.dumps(data, default=json_default)
else:
data = ""
self.finish(data)
def finish(self, chunk=None):
super(APIBaseHandler, self).finish(chunk)
self._time_end = time.time()
def add_to_log(self, action, info=None, level="info"):
log = {}
log['action'] = strip_tags(action)
log['info'] = strip_tags(info)
log['level'] = strip_tags(level)
log['created'] = int(time.time())
self.db.logs.insert(log, safe=True)
def json_decode(self, text):
try:
data = json.loads(text)
except:
data = json.loads(urllib.unquote_plus(text))
return data
class EntityBuilder(object):
@staticmethod
def build_token(token, device, appname, channel, created=time.time()):
tokenentity = {}
tokenentity['device'] = device
tokenentity['appname'] = appname
tokenentity['token'] = token
tokenentity['channel'] = channel
tokenentity['created'] = created
return tokenentity
@route(r"/tokens/([^/]+)")
class TokenV1Handler(APIBaseHandler):
def delete(self, token):
"""Delete a token
"""
# To check the access key permissions we use bitmask method.
if not self.can("delete_token"):
self.send_response(FORBIDDEN, dict(error="No permission to delete token"))
return
try:
result = self.db.tokens.remove({'token':token}, safe=True)
if result['n'] == 0:
self.send_response(NOT_FOUND, dict(status='Token does\'t exist'))
else:
self.send_response(OK, dict(status='deleted'))
except Exception as ex:
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
def post(self, devicetoken):
"""Create a new token
"""
if not self.can("create_token"):
self.send_response(FORBIDDEN, dict(error="No permission to create token"))
return
device = self.get_argument('device', DEVICE_TYPE_IOS).lower()
if device == DEVICE_TYPE_IOS:
if len(devicetoken) != 64:
self.send_response(BAD_REQUEST, dict(error='Invalid token'))
return
try:
binascii.unhexlify(devicetoken)
except Exception as ex:
self.send_response(BAD_REQUEST, dict(error='Invalid token'))
channel = self.get_argument('channel', 'default')
token = EntityBuilder.build_token(devicetoken, device, self.appname, channel)
try:
result = self.db.tokens.update({'device': device, 'token': devicetoken, 'appname': self.appname}, token, safe=True, upsert=True)
# result
# {u'updatedExisting': True, u'connectionId': 47, u'ok': 1.0, u'err': None, u'n': 1}
if result['updatedExisting']:
self.send_response(OK, dict(status='token exists'))
self.add_to_log('Token exists', devicetoken)
else:
self.send_response(OK, dict(status='ok'))
self.add_to_log('Add token', devicetoken)
except Exception as ex:
self.add_to_log('Cannot add token', devicetoken, "warning")
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
@route(r"/notification/")
class NotificationHandler(APIBaseHandler):
def post(self):
""" Send notifications """
if not self.can("send_notification"):
self.send_response(FORBIDDEN, dict(error="No permission to send notification"))
return
if not self.token:
self.send_response(BAD_REQUEST, dict(error="No token provided"))
return
# iOS and Android shared params (use sliptlines trick to remove line ending)
alert = ''.join(self.get_argument('alert').splitlines())
device = self.get_argument('device', DEVICE_TYPE_IOS).lower()
channel = self.get_argument('channel', 'default')
# Android
collapse_key = self.get_argument('collapse_key', '')
# iOS
sound = self.get_argument('sound', None)
badge = self.get_argument('badge', None)
token = self.db.tokens.find_one({'token': self.token})
if not token:
token = EntityBuilder.build_token(self.token, device, self.appname, channel)
if not self.can("create_token"):
self.send_response(BAD_REQUEST, dict(error="Unknow token and you have no permission to create"))
return
try:
# TODO check permission to insert
self.db.tokens.insert(token, safe=True)
except Exception as ex:
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
knownparams = ['alert', 'sound', 'badge', 'token', 'device', 'collapse_key']
# Build the custom params (everything not alert/sound/badge/token)
customparams = {}
allparams = {}
for name, value in self.request.arguments.items():
allparams[name] = self.get_argument(name)
if name not in knownparams:
customparams[name] = self.get_argument(name)
logmessage = 'Message length: %s, Access key: %s' %(len(alert), self.appkey)
self.add_to_log('%s notification' % self.appname, logmessage)
if device == DEVICE_TYPE_IOS:
pl = PayLoad(alert=alert, sound=sound, badge=badge, identifier=0, expiry=None, customparams=customparams)
if not self.apnsconnections.has_key(self.app['shortname']):
# TODO: add message to queue in MongoDB
self.send_response(INTERNAL_SERVER_ERROR, dict(error="APNs is offline"))
return
count = len(self.apnsconnections[self.app['shortname']])
# Find an APNS instance
random.seed(time.time())
instanceid = random.randint(0, count - 1)
conn = self.apnsconnections[self.app['shortname']][instanceid]
# do the job
try:
conn.send(self.token, pl)
self.send_response(OK)
except Exception as ex:
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
elif device == DEVICE_TYPE_ANDROID:
try:
gcm = self.gcmconnections[self.app['shortname']][0]
data = dict({'message': alert}.items() + customparams.items())
response = gcm.send([self.token], data=data, collapse_key=collapse_key, ttl=3600)
responsedata = response.json()
if responsedata['failure'] == 0:
self.send_response(OK)
except GCMUpdateRegIDsException as ex:
self.send_response(OK)
except GCMInvalidRegistrationException as ex:
self.send_response(BAD_REQUEST, dict(error=str(ex), regids=ex.regids))
except GCMNotRegisteredException as ex:
self.send_response(BAD_REQUEST, dict(error=str(ex), regids=ex.regids))
except GCMException as ex:
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
else:
self.send_response(BAD_REQUEST, dict(error='Invalid device type'))
@route(r"/users")
@route(r"/api/v2/users")
class UsersHandler(APIBaseHandler):
"""Handle users
- Take application ID and secret
- Create user
"""
def post(self):
"""Register user
"""
username = self.get_argument('username')
password = self.get_argument('password')
email = self.get_argument('email')
now = int(time.time())
user = {
'username': username,
'password': password,
'email': email,
'created': now,
}
try:
cursor = self.db.users.find_one({'username':username})
if cursor:
self.send_response(BAD_REQUEST, dict(error='Username already exists'))
else:
userid = self.db.users.insert(user, safe=True)
self.add_to_log('Add user', username)
self.send_response(OK, {'userid': str(userid)})
except Exception as ex:
self.send_response(INTERNAL_SERVER_ERROR, dict(error=str(ex)))
def get(self):
"""Query users
"""
where = self.get_argument('where', None)
if not where:
data = {}
else:
try:
# unpack query conditions
data = self.json_decode(where)
except Exception as ex:
self.send_response(BAD_REQUEST, dict(error=str(ex)))
cursor = self.db.users.find(data)
users = []
for u in cursor:
users.append(u)
self.send_response(OK, users)
@route(r"/users/([^/]+)")
@route(r"/api/v2/users/([^/]+)")
class UserHandler(APIBaseHandler):
def delete(self, userId):
""" Delete user """
pass
def put(self, userId):
""" Update """
pass
def get(self, userId):
"""Get user details by ID
"""
username = self.get_argument('username', None)
email = self.get_argument('email', None)
userid = self.get_argument('userid', None)
conditions = {}
if username:
conditions['username'] = username
if email:
conditions['email'] = email
if userid:
conditions['id'] = userid
@route(r"/objects/([^/]+)/([^/]+)")
@route(r"/api/v2/objects/([^/]+)/([^/]+)")
class ObjectHandler(APIBaseHandler):
"""Object Handler
http://airnotifier.xxx/objects/cars/4f794f7329ddda1cb9000000
"""
def get(self, classname, objectId):
"""Get object by ID
"""
self.classname = classname
self.objectid = ObjectId(objectId)
doc = self.db[self.collection].find_one({'_id': self.objectid})
self.send_response(OK, doc)
return
def delete(self, classname, objectId):
"""Delete a object
"""
self.classname = classname
self.objectid = ObjectId(objectId)
result = self.db[self.collection].remove({'_id': self.objectid}, safe=True)
self.send_response(OK, dict(result=result))
def put(self, classname, objectId):
"""Update a object
"""
self.classname = classname
data = self.json_decode(self.request.body)
self.objectid = ObjectId(objectId)
result = self.db[self.collection].update({'_id': self.objectid}, data, safe=True)
@property
def collection(self):
collectionname = "%s%s" % (options.collectionprefix, self.classname)
return collectionname
@route(r"/objects/([^/]+)")
@route(r"/api/v2/objects/([^/]+)")
class ClassHandler(APIBaseHandler):
"""Object Handler
http://airnotifier.xxx/objects/cars
"""
@property
def collection(self):
cursor = self.db.objects.find_one({'collection':self.classname})
if not cursor:
col = {}
col['collection'] = self.classname
col['created'] = int(time.time())
self.add_to_log('Register collection', self.classname)
self.db.objects.insert(col, safe=True)
collectionname = "%s%s" % (options.collectionprefix, self.classname)
return collectionname
def get(self, classname):
"""Query collection
"""
self.classname = classname
where = self.get_argument('where', None)
if not where:
data = {}
else:
try:
# unpack query conditions
data = self.json_decode(where)
except Exception as ex:
self.send_response(BAD_REQUEST, dict(error=str(ex)))
objects = self.db[self.collection].find(data)
results = []
for obj in objects:
results.append(obj)
self.send_response(OK, results)
def post(self, classname):
"""Create collections
"""
self.classname = classname
try:
data = self.json_decode(self.request.body)
except Exception as ex:
self.send_response(BAD_REQUEST, ex)
self.add_to_log('Add object to %s' % self.classname, data)
objectId = self.db[self.collection].insert(data, safe=True)
self.send_response(OK, dict(objectId=objectId))
@route(r"/accesskeys/")
class AccessKeysV1Handler(APIBaseHandler):
def initialize(self):
self.accesskeyrequired = False
self._time_start = time.time()
def post(self):
"""Create access key
"""
result = self.verify_request()
if not result:
self.send_response(FORBIDDEN, dict(error="Site not registered on moodle.net"))
return
if not self.can('create_accesskey'):
self.send_response(FORBIDDEN, dict(error="No permission to create accesskey"))
return
key = {}
key['contact'] = self.get_argument('contact', '')
key['description'] = self.get_argument('description', '')
key['created'] = int(time.time())
# This is 1111 in binary means all permissions are granted
key['permission'] = API_PERMISSIONS['create_token'][0] | API_PERMISSIONS['delete_token'][0] \
| API_PERMISSIONS['send_notification'][0] | API_PERMISSIONS['send_broadcast'][0]
key['key'] = md5(str(uuid.uuid4())).hexdigest()
self.db.keys.insert(key)
self.send_response(OK, dict(accesskey=key['key']))
def verify_request(self):
huburl = "http://moodle.net/local/sitecheck/check.php"
mdlurl = self.get_argument('url', '')
mdlsiteid = self.get_argument('siteid', '')
params = {'siteid': mdlsiteid, 'url': mdlurl}
response = requests.get(huburl, params=params)
result = int(response.text)
if result == 0:
return False
else:
return True
@route(r"/broadcast/")
class BroadcastV1Handler(APIBaseHandler):
def post(self):
if not self.can('send_broadcast'):
self.send_response(FORBIDDEN, dict(error="No permission to send broadcast"))
return
# the cannel to be boradcasted
channel = self.get_argument('channel', 'default')
# iOS and Android shared params
alert = ''.join(self.get_argument('alert').splitlines())
# Android
collapse_key = self.get_argument('collapse_key', '')
# iOS
sound = self.get_argument('sound', None)
badge = self.get_argument('badge', None)
self.add_to_log('%s broadcast' % self.appname, alert, "important")
self.application.send_broadcast(self.appname, self.db, channel, alert)
delta_t = time.time() - self._time_start
logging.info("Broadcast took time: %sms" % (delta_t * 1000))
self.send_response(OK, dict(status='ok'))
| bsd-3-clause |
bigplus/rtv | rtv/docs.py | 1 | 2611 | from .__version__ import __version__
__all__ = ['AGENT', 'SUMMARY', 'AUTH', 'CONTROLS', 'HELP', 'COMMENT_FILE',
'SUBMISSION_FILE', 'COMMENT_EDIT_FILE']
AGENT = """\
desktop:https://github.com/michael-lazar/rtv:{} (by /u/civilization_phaze_3)\
""".format(__version__)
SUMMARY = """
Reddit Terminal Viewer is a lightweight browser for www.reddit.com built into a
terminal window.
"""
AUTH = """\
Authenticating is required to vote and leave comments. If only a username is
given, the program will display a secure prompt to enter a password.
"""
CONTROLS = """
Controls
--------
RTV currently supports browsing both subreddits and individual submissions.
In each mode the controls are slightly different. In subreddit mode you can
browse through the top submissions on either the front page or a specific
subreddit. In submission mode you can view the self text for a submission and
browse comments.
"""
HELP = """
Basic Commands
`j/k` or `UP/DOWN` : Move the cursor up/down
`m/n` or `PgUp/PgDn`: Jump to the previous/next page
`o` or `ENTER` : Open the selected item as a webpage
`r` or `F5` : Refresh page content
`u` : Log in or switch accounts
`?` : Show the help screen
`q` : Quit
Authenticated Commands
`a/z` : Upvote/downvote
`c` : Compose a new post or comment
`e` : Edit an existing post or comment
`d` : Delete an existing post or comment
`i` : Display new messages prompt
`s` : Open/close subscribed subreddits list
Subreddit Mode
`l` or `RIGHT` : Enter the selected submission
`/` : Open a prompt to switch subreddits
`f` : Open a prompt to search the current subreddit
Submission Mode
`h` or `LEFT` : Return to subreddit mode
`SPACE` : Fold the selected comment, or load additional comments
"""
COMMENT_FILE = u"""
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Replying to {author}'s {type}
{content}
"""
COMMENT_EDIT_FILE = u"""{content}
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Editing your comment
"""
SUBMISSION_FILE = u"""{content}
# Please enter your submission. Lines starting with '#' will be ignored,
# and an empty field aborts the submission.
#
# The first line will be interpreted as the title
# The following lines will be interpreted as the content
#
# Posting to {name}
"""
| mit |
ZHAW-INES/rioxo-uClinux-dist | user/python/python-2.4.4/Lib/xml/dom/__init__.py | 327 | 3998 | """W3C Document Object Model implementation for Python.
The Python mapping of the Document Object Model is documented in the
Python Library Reference in the section on the xml.dom package.
This package contains the following modules:
minidom -- A simple implementation of the Level 1 DOM with namespace
support added (based on the Level 2 specification) and other
minor Level 2 functionality.
pulldom -- DOM builder supporting on-demand tree-building for selected
subtrees of the document.
"""
class Node:
"""Class giving the NodeType constants."""
# DOM implementations may use this as a base class for their own
# Node implementations. If they don't, the constants defined here
# should still be used as the canonical definitions as they match
# the values given in the W3C recommendation. Client code can
# safely refer to these values in all tests of Node.nodeType
# values.
ELEMENT_NODE = 1
ATTRIBUTE_NODE = 2
TEXT_NODE = 3
CDATA_SECTION_NODE = 4
ENTITY_REFERENCE_NODE = 5
ENTITY_NODE = 6
PROCESSING_INSTRUCTION_NODE = 7
COMMENT_NODE = 8
DOCUMENT_NODE = 9
DOCUMENT_TYPE_NODE = 10
DOCUMENT_FRAGMENT_NODE = 11
NOTATION_NODE = 12
#ExceptionCode
INDEX_SIZE_ERR = 1
DOMSTRING_SIZE_ERR = 2
HIERARCHY_REQUEST_ERR = 3
WRONG_DOCUMENT_ERR = 4
INVALID_CHARACTER_ERR = 5
NO_DATA_ALLOWED_ERR = 6
NO_MODIFICATION_ALLOWED_ERR = 7
NOT_FOUND_ERR = 8
NOT_SUPPORTED_ERR = 9
INUSE_ATTRIBUTE_ERR = 10
INVALID_STATE_ERR = 11
SYNTAX_ERR = 12
INVALID_MODIFICATION_ERR = 13
NAMESPACE_ERR = 14
INVALID_ACCESS_ERR = 15
VALIDATION_ERR = 16
class DOMException(Exception):
"""Abstract base class for DOM exceptions.
Exceptions with specific codes are specializations of this class."""
def __init__(self, *args, **kw):
if self.__class__ is DOMException:
raise RuntimeError(
"DOMException should not be instantiated directly")
Exception.__init__(self, *args, **kw)
def _get_code(self):
return self.code
class IndexSizeErr(DOMException):
code = INDEX_SIZE_ERR
class DomstringSizeErr(DOMException):
code = DOMSTRING_SIZE_ERR
class HierarchyRequestErr(DOMException):
code = HIERARCHY_REQUEST_ERR
class WrongDocumentErr(DOMException):
code = WRONG_DOCUMENT_ERR
class InvalidCharacterErr(DOMException):
code = INVALID_CHARACTER_ERR
class NoDataAllowedErr(DOMException):
code = NO_DATA_ALLOWED_ERR
class NoModificationAllowedErr(DOMException):
code = NO_MODIFICATION_ALLOWED_ERR
class NotFoundErr(DOMException):
code = NOT_FOUND_ERR
class NotSupportedErr(DOMException):
code = NOT_SUPPORTED_ERR
class InuseAttributeErr(DOMException):
code = INUSE_ATTRIBUTE_ERR
class InvalidStateErr(DOMException):
code = INVALID_STATE_ERR
class SyntaxErr(DOMException):
code = SYNTAX_ERR
class InvalidModificationErr(DOMException):
code = INVALID_MODIFICATION_ERR
class NamespaceErr(DOMException):
code = NAMESPACE_ERR
class InvalidAccessErr(DOMException):
code = INVALID_ACCESS_ERR
class ValidationErr(DOMException):
code = VALIDATION_ERR
class UserDataHandler:
"""Class giving the operation constants for UserDataHandler.handle()."""
# Based on DOM Level 3 (WD 9 April 2002)
NODE_CLONED = 1
NODE_IMPORTED = 2
NODE_DELETED = 3
NODE_RENAMED = 4
XML_NAMESPACE = "http://www.w3.org/XML/1998/namespace"
XMLNS_NAMESPACE = "http://www.w3.org/2000/xmlns/"
XHTML_NAMESPACE = "http://www.w3.org/1999/xhtml"
EMPTY_NAMESPACE = None
EMPTY_PREFIX = None
from domreg import getDOMImplementation,registerDOMImplementation
| gpl-2.0 |
Johnzero/OE7 | openerp/addons-modules/product/partner.py | 52 | 1771 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class res_partner(osv.osv):
_name = 'res.partner'
_inherit = 'res.partner'
_columns = {
'property_product_pricelist': fields.property(
'product.pricelist',
type='many2one',
relation='product.pricelist',
domain=[('type','=','sale')],
string="Sale Pricelist",
view_load=True,
help="This pricelist will be used, instead of the default one, for sales to the current partner"),
}
def _commercial_fields(self, cr, uid, context=None):
return super(res_partner, self)._commercial_fields(cr, uid, context=context) + ['property_product_pricelist']
res_partner()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
gmacchi93/serverInfoParaguay | apps/venv/lib/python2.7/site-packages/django/contrib/gis/gdal/datasource.py | 82 | 4913 | """
DataSource is a wrapper for the OGR Data Source object, which provides
an interface for reading vector geometry data from many different file
formats (including ESRI shapefiles).
When instantiating a DataSource object, use the filename of a
GDAL-supported data source. For example, a SHP file or a
TIGER/Line file from the government.
The ds_driver keyword is used internally when a ctypes pointer
is passed in directly.
Example:
ds = DataSource('/home/foo/bar.shp')
for layer in ds:
for feature in layer:
# Getting the geometry for the feature.
g = feature.geom
# Getting the 'description' field for the feature.
desc = feature['description']
# We can also increment through all of the fields
# attached to this feature.
for field in feature:
# Get the name of the field (e.g. 'description')
nm = field.name
# Get the type (integer) of the field, e.g. 0 => OFTInteger
t = field.type
# Returns the value the field; OFTIntegers return ints,
# OFTReal returns floats, all else returns string.
val = field.value
"""
# ctypes prerequisites.
from ctypes import byref
# The GDAL C library, OGR exceptions, and the Layer object.
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.driver import Driver
from django.contrib.gis.gdal.error import GDALException, OGRIndexError
from django.contrib.gis.gdal.layer import Layer
# Getting the ctypes prototypes for the DataSource.
from django.contrib.gis.gdal.prototypes import ds as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
from django.utils.six.moves import range
# For more information, see the OGR C API source code:
# http://www.gdal.org/ogr/ogr__api_8h.html
#
# The OGR_DS_* routines are relevant here.
class DataSource(GDALBase):
"Wraps an OGR Data Source object."
def __init__(self, ds_input, ds_driver=False, write=False, encoding='utf-8'):
# The write flag.
if write:
self._write = 1
else:
self._write = 0
# See also http://trac.osgeo.org/gdal/wiki/rfc23_ogr_unicode
self.encoding = encoding
Driver.ensure_registered()
if isinstance(ds_input, six.string_types):
# The data source driver is a void pointer.
ds_driver = Driver.ptr_type()
try:
# OGROpen will auto-detect the data source type.
ds = capi.open_ds(force_bytes(ds_input), self._write, byref(ds_driver))
except GDALException:
# Making the error message more clear rather than something
# like "Invalid pointer returned from OGROpen".
raise GDALException('Could not open the datasource at "%s"' % ds_input)
elif isinstance(ds_input, self.ptr_type) and isinstance(ds_driver, Driver.ptr_type):
ds = ds_input
else:
raise GDALException('Invalid data source input type: %s' % type(ds_input))
if ds:
self.ptr = ds
self.driver = Driver(ds_driver)
else:
# Raise an exception if the returned pointer is NULL
raise GDALException('Invalid data source file "%s"' % ds_input)
def __del__(self):
"Destroys this DataStructure object."
if self._ptr and capi:
capi.destroy_ds(self._ptr)
def __iter__(self):
"Allows for iteration over the layers in a data source."
for i in range(self.layer_count):
yield self[i]
def __getitem__(self, index):
"Allows use of the index [] operator to get a layer at the index."
if isinstance(index, six.string_types):
l = capi.get_layer_by_name(self.ptr, force_bytes(index))
if not l:
raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
elif isinstance(index, int):
if index < 0 or index >= self.layer_count:
raise OGRIndexError('index out of range')
l = capi.get_layer(self._ptr, index)
else:
raise TypeError('Invalid index type: %s' % type(index))
return Layer(l, self)
def __len__(self):
"Returns the number of layers within the data source."
return self.layer_count
def __str__(self):
"Returns OGR GetName and Driver for the Data Source."
return '%s (%s)' % (self.name, str(self.driver))
@property
def layer_count(self):
"Returns the number of layers in the data source."
return capi.get_layer_count(self._ptr)
@property
def name(self):
"Returns the name of the data source."
name = capi.get_ds_name(self._ptr)
return force_text(name, self.encoding, strings_only=True)
| apache-2.0 |
tum-pbs/PhiFlow | tests/commit/physics/test_flip.py | 1 | 2660 | from unittest import TestCase
from phi.flow import *
from phi.physics._boundaries import Domain, STICKY
def step(particles, domain, dt, accessible):
velocity = particles >> domain.staggered_grid()
div_free_velocity, _, occupied = \
flip.make_incompressible(velocity + dt * math.tensor([0, -9.81]), domain, particles, accessible)
particles = flip.map_velocity_to_particles(particles, div_free_velocity, occupied, previous_velocity_grid=velocity, viscosity=0.9)
particles = advect.runge_kutta_4(particles, div_free_velocity, dt, accessible=accessible, occupied=occupied)
particles = flip.respect_boundaries(particles, domain, [])
return dict(particles=particles, domain=domain, dt=dt, accessible=accessible)
class FlipTest(TestCase):
def test_falling_block_short(self):
""" Tests if a block of liquid has a constant shape during free fall for 4 steps. """
DOMAIN = Domain(x=32, y=128, boundaries=STICKY, bounds=Box[0:32, 0:128])
DT = 0.05
ACCESSIBLE = DOMAIN.accessible_mask([], type=StaggeredGrid)
PARTICLES = DOMAIN.distribute_points(union(Box[12:20, 110:120])) * (0, -10)
extent = math.max(PARTICLES.points, dim='points') - math.min(PARTICLES.points, dim='points')
state = dict(particles=PARTICLES, domain=DOMAIN, dt=DT, accessible=ACCESSIBLE)
for i in range(4):
state = step(**state)
curr_extent = math.max(state['particles'].points, dim='points') - \
math.min(state['particles'].points, dim='points')
math.assert_close(curr_extent, extent) # shape of falling block stays the same
assert math.max(state['particles'].points, dim='points')[1] < \
math.max(PARTICLES.points, dim='points')[1] # block really falls
extent = curr_extent
def test_respect_boundaries(self):
""" Tests if particles really get puhsed outside of obstacles and domain boundaries. """
SIZE = 64
DOMAIN = Domain(x=SIZE, y=SIZE, boundaries=STICKY, bounds=Box[0:SIZE, 0:SIZE])
OBSTACLE = Box[20:40, 10:30]
PARTICLES = DOMAIN.distribute_points(union(Box[20:38, 20:50], Box[50:60, 10:50]), center=True) * (10, 0)
PARTICLES = advect.points(PARTICLES, PARTICLES, 1)
assert math.any(OBSTACLE.lies_inside(PARTICLES.points))
assert math.any((~DOMAIN.bounds).lies_inside(PARTICLES.points))
PARTICLES = flip.respect_boundaries(PARTICLES, DOMAIN, [OBSTACLE], offset=0.1)
assert math.all(~OBSTACLE.lies_inside(PARTICLES.points))
assert math.all(~(~DOMAIN.bounds).lies_inside(PARTICLES.points))
| mit |
spthaolt/VTK | Examples/Infovis/Python/tables_adv.py | 8 | 4425 | """
This file contains Python code illustrating the creation and manipulation of
vtkTable objects.
"""
from vtk import *
#------------------------------------------------------------------------------
# Some Helper Functions
#------------------------------------------------------------------------------
def add_row_to_vtkTable(vtk_table, new_row=None):
""" Python helper function to add a new row of data to a vtkTable object. """
# Just a couple of sanity checks.
if new_row == None:
print "ERROR: No data provided for new table row."
return False
if len(new_row) != vtk_table.GetNumberOfColumns():
print "ERROR: Number of entries in new row does not match # of columns in table."
return False
for i in range(vtk_table.GetNumberOfColumns()):
vtk_table.GetColumn(i).InsertNextValue( new_row[i] )
return True
def get_vtkTableHeaders(vtk_table):
""" Returns the vtkTable headers (column names) as a Python list """
headers = []
for icol in range( vtk_table.GetNumberOfColumns() ):
headers.append( vtk_table.GetColumn(icol).GetName() )
return headers
def get_vtkTableRow(vtk_table, row_number):
""" Returns a row from a vtkTable object as a Python list. """
row = []
for icol in range( vtk_table.GetNumberOfColumns() ):
row.append( vtk_table.GetColumn(icol).GetValue(row_number) )
return row
def get_vtkTableAsDelimitedText(vtk_table, sep="\t"):
""" return a nicely formatted string version of a vtkTable """
s = ""
hdrs = get_vtkTableHeaders(vtk_table)
for i in hdrs:
s += "%s%s"%(i,sep)
s = s.rstrip(sep)
s += "\n"
for irow in range(vtk_table.GetNumberOfRows()):
rowdata = get_vtkTableRow(vtk_table, irow)
for i in rowdata:
s += "%s%s"%(str(i),sep)
s = s.rstrip(sep)
s += "\n"
return s
#------------------------------------------------------------------------------
# Script Entry Point (i.e., main() )
#------------------------------------------------------------------------------
if __name__ == "__main__":
""" Main entry point of this python script """
#----------------------------------------------------------
# Create an empty table
T = vtkTable()
#----------------------------------------------------------
# Create Column 1 (IDs)
col1 = vtkIntArray()
col1.SetName("ID")
for i in range(1, 8):
col1.InsertNextValue(i)
T.AddColumn(col1)
#----------------------------------------------------------
# Create Column 2 (Names)
namesList = ['Bob', 'Ann', 'Sue', 'Bill', 'Joe', 'Jill', 'Rick']
col2 = vtkStringArray()
col2.SetName("Name")
for val in namesList:
col2.InsertNextValue(val)
T.AddColumn(col2)
#----------------------------------------------------------
# Create Column 3 (Ages)
agesList = [12, 25, 72, 11, 31, 36, 32]
col3 = vtkIntArray()
col3.SetName("Age")
for val in agesList:
col3.InsertNextValue(val)
T.AddColumn(col3)
#----------------------------------------------------------
# Add a row to the table
new_row = [8, "Luis", 68]
# we can't really use vtkTable.InsertNextRow() since it takes a vtkVariantArray
# as its argument (and the SetValue, etc. methods on that are not wrapped into
# Python) We can just append to each of the column arrays.
if not add_row_to_vtkTable(T, new_row):
print "Whoops!"
#----------------------------------------------------------
# Call PrintSelf() on a VTK object is done simply by printing the object
print 25*"="
print "Calling PrintSelf():"
print T
#----------------------------------------------------------
# Here are a couple of ways to print out our table in Python using
# the helper functions that appear earlier in this script.
# The accessor methods used here can be adapted to do more interesting
# things with a vtkTable from within Python.
# print out our table
print 25*"="
print "Rows as lists:"
print get_vtkTableHeaders(T)
for i in range(T.GetNumberOfRows()):
print get_vtkTableRow(T,i)
print ""
print 25*"="
print "Delimited text:"
print get_vtkTableAsDelimitedText(T)
print "vtkTable Python Example Completed." | bsd-3-clause |
lintzc/gpdb | src/test/tinc/tincrepo/mpp/lib/PSQL.py | 3 | 21726 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gppylib.commands.base import Command
import inspect
import os
import time
import tinctest
from tinctest.main import TINCException
from tinctest.lib.timeout import Timeout
class PSQLException(TINCException):
pass
class PSQL(Command):
"""This is a wrapper for running sql command."""
def __init__(self, sql_file = None, sql_cmd = None, out_file = None, output_to_file = True,
dbname = None,host = None, port = None, username = None, password = None,
PGOPTIONS = None, flags = '-a', isODBC = None,
timeout = 900, background = False):
PSQL.propagate_env_map = {}
if not dbname:
dbname_option = ""
else:
dbname_option = "-d %s" % (dbname)
if not username:
username_option = ""
else:
username_option = "-U %s" % (username)
if password:
PSQL.propagate_env_map = {'PGPASSWORD': password}
if not PGOPTIONS:
PGOPTIONS = ""
else:
PGOPTIONS = "PGOPTIONS='%s'" % PGOPTIONS
if not host:
hostname_option = ""
else:
hostname_option = "-h %s" % (host)
if not port:
port_option = ""
else:
port_option = "-p %s" % (port)
if sql_file:
if not os.path.exists(sql_file):
raise PSQLException('SQL file %s does not exist. ' %sql_file)
cmd_str = '%s psql --pset pager=off %s %s %s %s %s -f %s' \
% (PGOPTIONS, dbname_option, username_option, hostname_option, port_option,
flags, sql_file)
if not out_file:
out_file = sql_file.replace('.sql', '.out')
if output_to_file:
cmd_str = "%s &> %s 2>&1" % (cmd_str, out_file)
else:
assert sql_cmd is not None
cmd_str = "%s psql --pset pager=off %s %s %s %s %s -c \"%s\"" \
% (PGOPTIONS,dbname_option,username_option,hostname_option,
port_option, flags, sql_cmd)
if output_to_file and out_file:
cmd_str = "%s &> %s 2>&1" % (cmd_str, out_file)
if background:
cmd_str = "%s &" %cmd_str
Command.__init__(self, 'run sql', cmd_str)
@staticmethod
def run_sql_file(sql_file, out_file = None, output_to_file = True,
dbname = None, host = None, port = None, username = None, password = None,
PGOPTIONS = None, flags = '-a', isODBC = None,
timeout = 900, background = False):
"""
Run the given sql file using psql command line.
@type sql_file: string
@param sql_file: Complete path to the sql file.
@type out_file: string
@param out_file: Capture the output to a file if given.
@type dbname: string
@param dbname: Database against which the sql will be run.
@type host: string
@param host: Hostname for the connection
@type port: integer
@param port: Port number for the connection
@type username: string
@param username: Username for the connection
@type password: string
@param password: Password for the connection. Will be set through PGPASSWORD env variable for the connection.
@type PGOPTIONS: string
@param PGOPTIONS: Additional configurations for the connection.
@type flags: string
@param flags: PSQL flags to be used. Defaults to '-a'
@type isODBC: boolean
@param isODBC: Use ODBC for the connection if set.
@type timeout: integer
@param timeout: Timeout in seconds for the command line after which the command will be terminated and this will raise a L{PSQLException}
@type background: boolean
@param background: Run the command in the background and return immediately if set.
@rtype: boolean
@return: True if the command invocation was successful. False otherwise.
@raise PSQLException: When the sql file cannot be located.
"""
cmd = PSQL(sql_file = sql_file, out_file = out_file, output_to_file = output_to_file,
dbname = dbname, host = host, port = port, username = username, password = password,
PGOPTIONS = PGOPTIONS, flags = flags,
isODBC = isODBC, timeout = timeout, background = background)
tinctest.logger.debug("Running sql file - %s" %cmd)
cmd.run(validateAfter = False)
result = cmd.get_results()
tinctest.logger.debug("Output - %s" %result)
if result.rc != 0:
return False
return True
@staticmethod
def run_sql_command(sql_cmd, out_file = None, dbname = None,
host = None, port = None, username = None, password = None,
PGOPTIONS = None, flags = '-a', isODBC = None,
timeout = 900, background = False, results={'rc':0, 'stdout':'', 'stderr':''}):
"""
Run the given sql command using psql command line.
@type sql_cmd: string
@param sql_cmd: SQL command to run through the PSQL command line
@type out_file: string
@param out_file: Capture the output to a file if given.
@type dbname: string
@param dbname: Database against which the sql will be run.
@type host: string
@param host: Hostname for the connection
@type port: integer
@param port: Port number for the connection
@type username: string
@param username: Username for the connection
@type password: string
@param password: Password for the connection. Will be set through PGPASSWORD env variable for the connection.
@type PGOPTIONS: string
@param PGOPTIONS: Additional configurations for the connection.
@type flags: string
@param flags: PSQL flags to be used. Defaults to '-a'
@type isODBC: boolean
@param isODBC: Use ODBC for the connection if set.
@type timeout: integer
@param timeout: Timeout in seconds for the command line after which the command will be terminated and this will raise a L{PSQLException}
@type background: boolean
@param background: Run the command in the background and return immediately if set.
@rtype: string
@return: Output of the sql command
"""
cmd = PSQL(sql_cmd = sql_cmd, out_file = out_file, dbname = dbname,
host = host, port = port, username = username, password = password,
PGOPTIONS = PGOPTIONS, flags = flags,
isODBC = isODBC, timeout = timeout, background = background)
tinctest.logger.debug("Running command - %s" %cmd)
cmd.run(validateAfter = False)
result = cmd.get_results()
results['rc'] = result.rc
results['stdout'] = result.stdout
results['stderr'] = result.stderr
tinctest.logger.debug("Output - %s" %result)
return result.stdout
@staticmethod
def run_sql_file_utility_mode(sql_file, out_file = None, dbname = None,
host = None, port = None, username = None, password = None,
PGOPTIONS = None, flags = '-a', isODBC = None,
timeout = 900, background = False, output_to_file=True):
"""
Run the given sql file using psql command line in utility mode.
@type sql_file: string
@param sql_file: Complete path to the sql file.
@type out_file: string
@param out_file: Capture the output to a file if given.
@type dbname: string
@param dbname: Database against which the sql will be run.
@type host: string
@param host: Hostname for the connection
@type port: integer
@param port: Port number for the connection
@type username: string
@param username: Username for the connection
@type password: string
@param password: Password for the connection. Will be set through PGPASSWORD env variable for the connection.
@type PGOPTIONS: string
@param PGOPTIONS: Additional configurations for the connection.
@type flags: string
@param flags: PSQL flags to be used. Defaults to '-a'
@type isODBC: boolean
@param isODBC: Use ODBC for the connection if set.
@type timeout: integer
@param timeout: Timeout in seconds for the command line after which the command will be terminated and this will raise a L{PSQLException}
@type background: boolean
@param background: Run the command in the background and return immediately if set.
@rtype: boolean
@return: True if the command invocation was successful. False otherwise.
@raise PSQLException: When the sql file cannot be located.
"""
if PGOPTIONS:
PGOPTIONS = PGOPTIONS + " -c gp_session_role=utility"
else:
PGOPTIONS = "-c gp_session_role=utility"
return PSQL.run_sql_file(sql_file = sql_file, out_file = out_file, dbname = dbname,
host = host, port = port,
username = username, password = password,
PGOPTIONS = PGOPTIONS, flags = flags,
isODBC = isODBC, timeout = timeout, background = background,
output_to_file=output_to_file)
@staticmethod
def run_sql_command_utility_mode(sql_cmd, out_file = None, dbname = None,
host = None, port = None, username = None, password = None,
PGOPTIONS = None, flags = '-a', isODBC = None,
timeout = 900, background = False):
"""
Run the given sql command using psql command line in utility mode.
@type sql_cmd: string
@param sql_cmd: SQL command to run through the PSQL command line
@type out_file: string
@param out_file: Capture the output to a file if given.
@type dbname: string
@param dbname: Database against which the sql will be run.
@type host: string
@param host: Hostname for the connection
@type port: integer
@param port: Port number for the connection
@type username: string
@param username: Username for the connection
@type password: string
@param password: Password for the connection. Will be set through PGPASSWORD env variable for the connection.
@type PGOPTIONS: string
@param PGOPTIONS: Additional configurations for the connection.
@type flags: string
@param flags: PSQL flags to be used. Defaults to '-a'
@type isODBC: boolean
@param isODBC: Use ODBC for the connection if set.
@type timeout: integer
@param timeout: Timeout in seconds for the command line after which the command will be terminated and this will raise a L{PSQLException}
@type background: boolean
@param background: Run the command in the background and return immediately if set.
@rtype: string
@return: Output of the sql command
"""
if PGOPTIONS:
PGOPTIONS = PGOPTIONS + " -c gp_session_role=utility"
else:
PGOPTIONS = "-c gp_session_role=utility"
return PSQL.run_sql_command(sql_cmd = sql_cmd, out_file = out_file, dbname = dbname,
host = host, port = port, username = username, password = password,
PGOPTIONS = PGOPTIONS, flags = flags,
isODBC = isODBC, timeout = timeout, background = background)
@staticmethod
def run_sql_file_catalog_update(sql_file, out_file = None, dbname = None,
host = None, port = None, username = None, password = None,
PGOPTIONS = None, flags = '-a', isODBC = None,
timeout = 900, background = False):
"""
Run the given sql file using psql command line with catalog update privilege
@type sql_file: string
@param sql_file: Complete path to the sql file.
@type out_file: string
@param out_file: Capture the output to a file if given.
@type dbname: string
@param dbname: Database against which the sql will be run.
@type host: string
@param host: Hostname for the connection
@type port: integer
@param port: Port number for the connection
@type username: string
@param username: Username for the connection
@type password: string
@param password: Password for the connection. Will be set through PGPASSWORD env variable for the connection.
@type PGOPTIONS: string
@param PGOPTIONS: Additional configurations for the connection.
@type flags: string
@param flags: PSQL flags to be used. Defaults to '-a'
@type isODBC: boolean
@param isODBC: Use ODBC for the connection if set.
@type timeout: integer
@param timeout: Timeout in seconds for the command line after which the command will be terminated and this will raise a L{PSQLException}
@type background: boolean
@param background: Run the command in the background and return immediately if set.
@rtype: boolean
@return: True if the command invocation was successful. False otherwise.
@raise PSQLException: When the sql file cannot be located.
"""
if PGOPTIONS:
PGOPTIONS = PGOPTIONS + " -c gp_session_role=utility -c allow_system_table_mods=dml"
else:
PGOPTIONS = "-c gp_session_role=utility -c allow_system_table_mods=dml"
return PSQL.run_sql_file(sql_file = sql_file, out_file = out_file, dbname = dbname,
host = host, port = port,
username = username, password = password,
PGOPTIONS = PGOPTIONS, flags = flags,
isODBC = isODBC, timeout = timeout, background = background)
@staticmethod
def run_sql_command_catalog_update(sql_cmd, out_file = None, dbname = None,
host = None, port = None, username = None, password = None,
PGOPTIONS = None, flags = '-a', isODBC = None,
timeout = 900, background = False):
"""
Run the given sql command using psql command line with catalog update.
@type sql_cmd: string
@param sql_cmd: SQL command to run through the PSQL command line
@type out_file: string
@param out_file: Capture the output to a file if given.
@type dbname: string
@param dbname: Database against which the sql will be run.
@type host: string
@param host: Hostname for the connection
@type port: integer
@param port: Port number for the connection
@type username: string
@param username: Username for the connection
@type password: string
@param password: Password for the connection. Will be set through PGPASSWORD env variable for the connection.
@type PGOPTIONS: string
@param PGOPTIONS: Additional configurations for the connection.
@type flags: string
@param flags: PSQL flags to be used. Defaults to '-a'
@type isODBC: boolean
@param isODBC: Use ODBC for the connection if set.
@type timeout: integer
@param timeout: Timeout in seconds for the command line after which the command will be terminated and this will raise a L{PSQLException}
@type background: boolean
@param background: Run the command in the background and return immediately if set.
@rtype: string
@return: Output of the sql command
"""
if PGOPTIONS:
PGOPTIONS = PGOPTIONS + " -c gp_session_role=utility -c allow_system_table_mods=dml"
else:
PGOPTIONS = "-c gp_session_role=utility -c allow_system_table_mods=dml"
return PSQL.run_sql_command(sql_cmd = sql_cmd, out_file = out_file, dbname = dbname,
host = host, port = port, username = username, password = password,
PGOPTIONS = PGOPTIONS, flags = flags,
isODBC = isODBC, timeout = timeout, background = background)
@staticmethod
def drop_database(dbname, retries = 5, sleep_interval = 5):
"""
Execute dropdb against the given database.
@type dbname: string
@param dbname: Name of the database to be deleted
@type retires: integer
@param retries: Number of attempts to drop the database.
@type sleep_interval: integer
@param sleep_interval: Time in seconds between retry attempts
@rtype: boolean
@return: True if successful, False otherwise
@raise PSQLException: When the database does not exist
"""
# TBD: Use shell when available
if not PSQL.database_exists(dbname):
tinctest.logger.error("Database %s does not exist." %dbname)
raise PSQLException('Database %s does not exist' %dbname)
cmd = Command(name='drop database', cmdStr='dropdb %s' %(dbname))
tinctest.logger.debug("Dropping database: %s" %cmd)
count = 0
while count < retries:
cmd.run(validateAfter = False)
result = cmd.get_results()
tinctest.logger.debug("Output - %s" %result)
if result.rc == 0 and not result.stderr:
return True
time.sleep(sleep_interval)
count += 1
return False
@staticmethod
def create_database(dbname):
"""
Create a database with the given database name.
@type dbname: string
@param dbname: Name of the database to be created
@rtype: boolean
@return: True if successful, False otherwise
raise PSQLException: When the database already exists.
"""
# TBD: Use shell when available
if PSQL.database_exists(dbname):
raise PSQLException("Database %s already exists" %dbname)
cmd = Command(name='drop database', cmdStr='createdb %s' %(dbname))
tinctest.logger.debug("Creating database: %s" %cmd)
cmd.run(validateAfter = False)
result = cmd.get_results()
tinctest.logger.debug("Output - %s" %result)
if result.rc != 0 or result.stderr:
return False
return True
@staticmethod
def reset_database(dbname, retries = 5, sleep_interval = 5):
"""
Drops and recreates the database with the given database name
@type dbname: string
@param dbname: Name of the database
@type retires: integer
@param retries: Number of attempts to drop the database.
@type sleep_interval: integer
@param sleep_interval: Time in seconds between retry attempts
@rtype: boolean
@return: True if successful, False otherwise
"""
if PSQL.database_exists(dbname):
result = PSQL.drop_database(dbname, retries, sleep_interval)
if not result:
tinctest.logger.warning("Could not delete database %s" %dbname)
return False
return PSQL.create_database(dbname)
@staticmethod
def database_exists(dbname):
"""
Inspects if the database with the given name exists.
@type dbname: string
@param dbname: Name of the database
@rtype: boolean
@return: True if the database exists, False otherwise
"""
sql_cmd = "select 'command_found_' || datname from pg_database where datname like '" + dbname + "'"
output = PSQL.run_sql_command(sql_cmd = sql_cmd)
if 'command_found_' + dbname in output:
return True
return False
@staticmethod
def wait_for_database_up():
'''
Wait till the system is up, as master may take some time
to come back after FI crash.
'''
down = True
results = {'rc':0, 'stdout':'', 'stderr':''}
for i in range(60):
res = PSQL.run_sql_command('select count(*) from gp_dist_random(\'gp_id\');', results=results)
if results['rc'] == 0:
down = False
break
time.sleep(1)
if down:
raise PSQLException('database has not come up')
| apache-2.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-56/modules/sheets/lib/python2.7/site-packages/_markerlib/markers.py | 1769 | 3979 | # -*- coding: utf-8 -*-
"""Interpret PEP 345 environment markers.
EXPR [in|==|!=|not in] EXPR [or|and] ...
where EXPR belongs to any of those:
python_version = '%s.%s' % (sys.version_info[0], sys.version_info[1])
python_full_version = sys.version.split()[0]
os.name = os.name
sys.platform = sys.platform
platform.version = platform.version()
platform.machine = platform.machine()
platform.python_implementation = platform.python_implementation()
a free string, like '2.6', or 'win32'
"""
__all__ = ['default_environment', 'compile', 'interpret']
import ast
import os
import platform
import sys
import weakref
_builtin_compile = compile
try:
from platform import python_implementation
except ImportError:
if os.name == "java":
# Jython 2.5 has ast module, but not platform.python_implementation() function.
def python_implementation():
return "Jython"
else:
raise
# restricted set of variables
_VARS = {'sys.platform': sys.platform,
'python_version': '%s.%s' % sys.version_info[:2],
# FIXME parsing sys.platform is not reliable, but there is no other
# way to get e.g. 2.7.2+, and the PEP is defined with sys.version
'python_full_version': sys.version.split(' ', 1)[0],
'os.name': os.name,
'platform.version': platform.version(),
'platform.machine': platform.machine(),
'platform.python_implementation': python_implementation(),
'extra': None # wheel extension
}
for var in list(_VARS.keys()):
if '.' in var:
_VARS[var.replace('.', '_')] = _VARS[var]
def default_environment():
"""Return copy of default PEP 385 globals dictionary."""
return dict(_VARS)
class ASTWhitelist(ast.NodeTransformer):
def __init__(self, statement):
self.statement = statement # for error messages
ALLOWED = (ast.Compare, ast.BoolOp, ast.Attribute, ast.Name, ast.Load, ast.Str)
# Bool operations
ALLOWED += (ast.And, ast.Or)
# Comparison operations
ALLOWED += (ast.Eq, ast.Gt, ast.GtE, ast.In, ast.Is, ast.IsNot, ast.Lt, ast.LtE, ast.NotEq, ast.NotIn)
def visit(self, node):
"""Ensure statement only contains allowed nodes."""
if not isinstance(node, self.ALLOWED):
raise SyntaxError('Not allowed in environment markers.\n%s\n%s' %
(self.statement,
(' ' * node.col_offset) + '^'))
return ast.NodeTransformer.visit(self, node)
def visit_Attribute(self, node):
"""Flatten one level of attribute access."""
new_node = ast.Name("%s.%s" % (node.value.id, node.attr), node.ctx)
return ast.copy_location(new_node, node)
def parse_marker(marker):
tree = ast.parse(marker, mode='eval')
new_tree = ASTWhitelist(marker).generic_visit(tree)
return new_tree
def compile_marker(parsed_marker):
return _builtin_compile(parsed_marker, '<environment marker>', 'eval',
dont_inherit=True)
_cache = weakref.WeakValueDictionary()
def compile(marker):
"""Return compiled marker as a function accepting an environment dict."""
try:
return _cache[marker]
except KeyError:
pass
if not marker.strip():
def marker_fn(environment=None, override=None):
""""""
return True
else:
compiled_marker = compile_marker(parse_marker(marker))
def marker_fn(environment=None, override=None):
"""override updates environment"""
if override is None:
override = {}
if environment is None:
environment = default_environment()
environment.update(override)
return eval(compiled_marker, environment)
marker_fn.__doc__ = marker
_cache[marker] = marker_fn
return _cache[marker]
def interpret(marker, environment=None):
return compile(marker)(environment)
| gpl-3.0 |
eeshangarg/oh-mainline | mysite/search/views.py | 14 | 10479 | # This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2009, 2010, 2011 OpenHatch, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.http import HttpResponse, QueryDict, HttpResponseServerError, HttpResponseRedirect
from django.core import serializers
from django.shortcuts import get_object_or_404
from django.core.urlresolvers import reverse
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl # Python 2.5 on deployment
from mysite.search.models import Project
import mysite.search.view_helpers
import mysite.base.view_helpers
from mysite.base.view_helpers import render_response
import datetime
from dateutil import tz
import pytz
from django.utils import http
import json
import mysite.search.forms
import mysite.base.decorators
# Via http://www.djangosnippets.org/snippets/1435/
def encode_datetime(obj):
if isinstance(obj, datetime.date):
fixed = datetime.datetime(
obj.year, obj.month, obj.day, tzinfo=pytz.utc)
obj = fixed
if isinstance(obj, datetime.datetime):
return obj.astimezone(tz.tzutc()).strftime('%Y-%m-%dT%H:%M:%SZ')
raise TypeError("%s" % type(obj) + repr(obj) + " is not JSON serializable")
def search_index(request, invalid_subscribe_to_alert_form=None):
# Make the query string keys lowercase using a redirect.
if any([k.lower() != k for k in request.GET.keys()]):
new_GET = {}
for key in request.GET.keys():
new_GET[key.lower()] = request.GET[key]
return HttpResponseRedirect(reverse(search_index) + '?' + http.urlencode(new_GET))
if request.user.is_authenticated():
person = request.user.get_profile()
suggestion_keys = person.get_recommended_search_terms()
else:
suggestion_keys = []
suggestions = [(i, k, False) for i, k in enumerate(suggestion_keys)]
format = request.GET.get('format', None)
start = int(request.GET.get('start', 1))
end = int(request.GET.get('end', 10))
total_bug_count = 0
query = mysite.search.view_helpers.Query.create_from_GET_data(request.GET)
if query:
bugs = query.get_bugs_unordered()
# Sort
bugs = mysite.search.view_helpers.order_bugs(bugs)
total_bug_count = bugs.count()
bugs = bugs[start - 1:end]
else:
bugs = []
data = {}
data['query'] = query
prev_page_query_str = QueryDict('')
prev_page_query_str = prev_page_query_str.copy()
next_page_query_str = QueryDict('')
next_page_query_str = next_page_query_str.copy()
if query:
prev_page_query_str['q'] = query.terms_string
next_page_query_str['q'] = query.terms_string
if format:
prev_page_query_str['format'] = format
next_page_query_str['format'] = format
for facet_name, selected_option in query.active_facet_options.items():
prev_page_query_str[facet_name] = selected_option
next_page_query_str[facet_name] = selected_option
for facet_name in query.any_facet_options:
prev_page_query_str[facet_name] = ''
next_page_query_str[facet_name] = ''
diff = end - start
prev_page_query_str['start'] = start - diff - 1
prev_page_query_str['end'] = start - 1
next_page_query_str['start'] = end + 1
next_page_query_str['end'] = end + diff + 1
data['start'] = start
data['end'] = min(end, total_bug_count)
data['prev_page_url'] = '/search/?' + prev_page_query_str.urlencode()
data['next_page_url'] = '/search/?' + next_page_query_str.urlencode()
data['this_page_query_str'] = http.urlencode(request.GET)
is_this_page_1 = (start <= 1)
is_this_the_last_page = (end >= (total_bug_count - 1))
data['show_prev_page_link'] = not is_this_page_1
data['show_next_page_link'] = not is_this_the_last_page
if request.GET.get('confirm_email_alert_signup', ''):
data['confirm_email_alert_signup'] = 1
# If this the last page of results, display a form allowing user to
# subscribe to a Volunteer Opportunity search alert
if query and is_this_the_last_page:
if invalid_subscribe_to_alert_form:
alert_form = invalid_subscribe_to_alert_form
else:
initial = {
'query_string': request.META['QUERY_STRING'],
'how_many_bugs_at_time_of_request': len(bugs)
}
if request.user.is_authenticated():
initial['email'] = request.user.email
alert_form = mysite.search.forms.BugAlertSubscriptionForm(
initial=initial)
data['subscribe_to_alert_form'] = alert_form
# FIXME
# The template has no way of grabbing what URLs to put in the [x]
# So we help it out here by hacking around our fruity list-of-dicts
# data structure.
facet2any_query_string = {}
for facet in query.active_facet_options:
facet2any_query_string[facet] = query.get_facet_options(
facet, [''])[0]['query_string']
Bug = mysite.search.models.Bug
from django.db.models import Q, Count
data['popular_projects'] = list(Project.objects.filter(
name__in=['Miro', 'GnuCash', 'brasero', 'Evolution Exchange', 'songbird']).order_by('name').reverse())
data['all_projects'] = Project.objects.values('pk', 'name').filter(
bug__looks_closed=False).annotate(Count('bug')).order_by('name')
Person = mysite.profile.models.Person
import random
random_start = int(random.random() * 700)
data['contributors'] = Person.objects.all()[random_start:random_start + 5]
data['contributors2'] = Person.objects.all(
)[random_start + 10:random_start + 15]
data['languages'] = Project.objects.all().values_list(
'language', flat=True).order_by('language').exclude(language='').distinct()[:4]
if format == 'json':
# FIXME: Why `alert`?
return bugs_to_json_response(data, bugs, request.GET.get(
'jsoncallback', 'alert'))
else:
data['user'] = request.user
data['suggestions'] = suggestions
data['bunch_of_bugs'] = bugs
data['url'] = 'http://launchpad.net/'
data['total_bug_count'] = total_bug_count
data['facet2any_query_string'] = facet2any_query_string
data['project_count'] = mysite.search.view_helpers.get_project_count()
return mysite.base.decorators.as_view(request, 'search/search.html', data, slug=None)
def bugs_to_json_response(data, bunch_of_bugs, callback_function_name=''):
""" The search results page accesses this view via jQuery's getJSON method,
and loads its results into the DOM."""
# Purpose of this code: Serialize the list of bugs
# Step 1: Pull the bugs out of the database, getting them back
# as simple Python objects
obj_serializer = serializers.get_serializer('python')()
bugs = obj_serializer.serialize(bunch_of_bugs)
# Step 2: With a (tragically) large number of database calls,
# loop over these objects, replacing project primary keys with project
# display names.
for bug in bugs:
project = Project.objects.get(pk=int(bug['fields']['project']))
bug['fields']['project'] = project.display_name
# Step 3: Create a JSON-happy list of key-value pairs
data_list = [{'bugs': bugs}]
# Step 4: Create the string form of the JSON
json_as_string = json.dumps(data_list, default=encode_datetime)
# Step 5: Prefix it with the desired callback function name
json_string_with_callback = callback_function_name + \
'(' + json_as_string + ')'
# Step 6: Return that.
return HttpResponse(json_string_with_callback)
def list_to_jquery_autocompletion_format(list):
"""Converts a list to the format required by
jQuery's autocomplete plugin."""
return "\n".join(list)
class SearchableField:
"A field in the database you can search."
fields_by_prefix = {}
def __init__(self, _prefix):
self.prefix = _prefix
self.is_queried = False
self.fields_by_prefix[self.prefix] = self
def subscribe_to_bug_alert_do(request):
confirmation_query_string_fragment = "&confirm_email_alert_signup=1"
alert_form = mysite.search.forms.BugAlertSubscriptionForm(request.POST)
query_string = request.POST.get('query_string', '') # Lacks initial '?'
query_string = query_string.replace(confirmation_query_string_fragment, '')
next = reverse(search_index) + '?' + query_string
if alert_form.is_valid():
alert = alert_form.save()
if request.user.is_authenticated():
alert.user = request.user
alert.save()
next += confirmation_query_string_fragment
return HttpResponseRedirect(next)
elif query_string:
# We want search_index to get the right query string but we can't exactly
# do that. What we *can* do is fiddle with the request obj we're about
# to pass to search_index.
# Commence fiddling.
request.GET = dict(parse_qsl(query_string))
return search_index(request, alert_form)
else:
# If user tries to do a different bug search after invalid form input
return HttpResponseRedirect(next + request.META['QUERY_STRING'])
def project_has_icon(request, project_name):
p = get_object_or_404(Project, name=project_name)
if p.date_icon_was_fetched_from_ohloh is None:
return HttpResponse("keep polling")
return HttpResponse(p.get_url_of_icon_or_generic())
"""
Ways we could do autocompletion:
Method 1.
Cache languages, search those first.
Ask server to give a list of projects beginning with "c"
Server returns list, cache that.
Method 2.
Ask server to give a list of projects and languages beginning with "c"
Add top 100 fulltext words to the mix.
"""
# vim: set ai ts=4 sw=4 et nu:
| agpl-3.0 |
mikewiebe-ansible/ansible | lib/ansible/modules/network/fortimanager/fmgr_secprof_ips.py | 38 | 21515 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_secprof_ips
version_added: "2.8"
notes:
- Full Documentation at U(https://ftnt-ansible-docs.readthedocs.io/en/latest/).
author:
- Luke Weighall (@lweighall)
- Andrew Welsh (@Ghilli3)
- Jim Huber (@p4r4n0y1ng)
short_description: Managing IPS security profiles in FortiManager
description:
- Managing IPS security profiles in FortiManager
options:
adom:
description:
- The ADOM the configuration should belong to.
required: false
default: root
mode:
description:
- Sets one of three modes for managing the object.
- Allows use of soft-adds instead of overwriting existing values
choices: ['add', 'set', 'delete', 'update']
required: false
default: add
replacemsg_group:
description:
- Replacement message group.
required: false
name:
description:
- Sensor name.
required: false
extended_log:
description:
- Enable/disable extended logging.
required: false
choices:
- disable
- enable
comment:
description:
- Comment.
required: false
block_malicious_url:
description:
- Enable/disable malicious URL blocking.
required: false
choices:
- disable
- enable
entries:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
entries_action:
description:
- Action taken with traffic in which signatures are detected.
required: false
choices:
- pass
- block
- reset
- default
entries_application:
description:
- Applications to be protected. set application ? lists available applications. all includes
all applications. other includes all unlisted applications.
required: false
entries_location:
description:
- Protect client or server traffic.
required: false
entries_log:
description:
- Enable/disable logging of signatures included in filter.
required: false
choices:
- disable
- enable
entries_log_attack_context:
description:
- Enable/disable logging of attack context| URL buffer, header buffer, body buffer, packet buffer.
required: false
choices:
- disable
- enable
entries_log_packet:
description:
- Enable/disable packet logging. Enable to save the packet that triggers the filter. You can
download the packets in pcap format for diagnostic use.
required: false
choices:
- disable
- enable
entries_os:
description:
- Operating systems to be protected. all includes all operating systems. other includes all
unlisted operating systems.
required: false
entries_protocol:
description:
- Protocols to be examined. set protocol ? lists available protocols. all includes all protocols.
other includes all unlisted protocols.
required: false
entries_quarantine:
description:
- Quarantine method.
required: false
choices:
- none
- attacker
entries_quarantine_expiry:
description:
- Duration of quarantine.
required: false
entries_quarantine_log:
description:
- Enable/disable quarantine logging.
required: false
choices:
- disable
- enable
entries_rate_count:
description:
- Count of the rate.
required: false
entries_rate_duration:
description:
- Duration (sec) of the rate.
required: false
entries_rate_mode:
description:
- Rate limit mode.
required: false
choices:
- periodical
- continuous
entries_rate_track:
description:
- Track the packet protocol field.
required: false
choices:
- none
- src-ip
- dest-ip
- dhcp-client-mac
- dns-domain
entries_rule:
description:
- Identifies the predefined or custom IPS signatures to add to the sensor.
required: false
entries_severity:
description:
- Relative severity of the signature, from info to critical. Log messages generated by the signature
include the severity.
required: false
entries_status:
description:
- Status of the signatures included in filter. default enables the filter and only use filters
with default status of enable. Filters with default status of disable will not be used.
required: false
choices:
- disable
- enable
- default
entries_exempt_ip_dst_ip:
description:
- Destination IP address and netmask.
required: false
entries_exempt_ip_src_ip:
description:
- Source IP address and netmask.
required: false
filter:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
filter_action:
description:
- Action of selected rules.
required: false
choices:
- pass
- block
- default
- reset
filter_application:
description:
- Vulnerable application filter.
required: false
filter_location:
description:
- Vulnerability location filter.
required: false
filter_log:
description:
- Enable/disable logging of selected rules.
required: false
choices:
- disable
- enable
filter_log_packet:
description:
- Enable/disable packet logging of selected rules.
required: false
choices:
- disable
- enable
filter_name:
description:
- Filter name.
required: false
filter_os:
description:
- Vulnerable OS filter.
required: false
filter_protocol:
description:
- Vulnerable protocol filter.
required: false
filter_quarantine:
description:
- Quarantine IP or interface.
required: false
choices:
- none
- attacker
filter_quarantine_expiry:
description:
- Duration of quarantine in minute.
required: false
filter_quarantine_log:
description:
- Enable/disable logging of selected quarantine.
required: false
choices:
- disable
- enable
filter_severity:
description:
- Vulnerability severity filter.
required: false
filter_status:
description:
- Selected rules status.
required: false
choices:
- disable
- enable
- default
override:
description:
- EXPERTS ONLY! KNOWLEDGE OF FMGR JSON API IS REQUIRED!
- List of multiple child objects to be added. Expects a list of dictionaries.
- Dictionaries must use FortiManager API parameters, not the ansible ones listed below.
- If submitted, all other prefixed sub-parameters ARE IGNORED.
- This object is MUTUALLY EXCLUSIVE with its options.
- We expect that you know what you are doing with these list parameters, and are leveraging the JSON API Guide.
- WHEN IN DOUBT, USE THE SUB OPTIONS BELOW INSTEAD TO CREATE OBJECTS WITH MULTIPLE TASKS
required: false
override_action:
description:
- Action of override rule.
required: false
choices:
- pass
- block
- reset
override_log:
description:
- Enable/disable logging.
required: false
choices:
- disable
- enable
override_log_packet:
description:
- Enable/disable packet logging.
required: false
choices:
- disable
- enable
override_quarantine:
description:
- Quarantine IP or interface.
required: false
choices:
- none
- attacker
override_quarantine_expiry:
description:
- Duration of quarantine in minute.
required: false
override_quarantine_log:
description:
- Enable/disable logging of selected quarantine.
required: false
choices:
- disable
- enable
override_rule_id:
description:
- Override rule ID.
required: false
override_status:
description:
- Enable/disable status of override rule.
required: false
choices:
- disable
- enable
override_exempt_ip_dst_ip:
description:
- Destination IP address and netmask.
required: false
override_exempt_ip_src_ip:
description:
- Source IP address and netmask.
required: false
'''
EXAMPLES = '''
- name: DELETE Profile
fmgr_secprof_ips:
name: "Ansible_IPS_Profile"
comment: "Created by Ansible Module TEST"
mode: "delete"
- name: CREATE Profile
fmgr_secprof_ips:
name: "Ansible_IPS_Profile"
comment: "Created by Ansible Module TEST"
mode: "set"
block_malicious_url: "enable"
entries: [{severity: "high", action: "block", log-packet: "enable"}, {severity: "medium", action: "pass"}]
'''
RETURN = """
api_result:
description: full API response, includes status code and message
returned: always
type: str
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortimanager.fortimanager import FortiManagerHandler
from ansible.module_utils.network.fortimanager.common import FMGBaseException
from ansible.module_utils.network.fortimanager.common import FMGRCommon
from ansible.module_utils.network.fortimanager.common import DEFAULT_RESULT_OBJ
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
from ansible.module_utils.network.fortimanager.common import prepare_dict
from ansible.module_utils.network.fortimanager.common import scrub_dict
###############
# START METHODS
###############
def fmgr_ips_sensor_modify(fmgr, paramgram):
"""
:param fmgr: The fmgr object instance from fortimanager.py
:type fmgr: class object
:param paramgram: The formatted dictionary of options to process
:type paramgram: dict
:return: The response from the FortiManager
:rtype: dict
"""
mode = paramgram["mode"]
adom = paramgram["adom"]
# INIT A BASIC OBJECTS
response = DEFAULT_RESULT_OBJ
url = ""
datagram = {}
# EVAL THE MODE PARAMETER FOR SET OR ADD
if mode in ['set', 'add', 'update']:
url = '/pm/config/adom/{adom}/obj/ips/sensor'.format(adom=adom)
datagram = scrub_dict(prepare_dict(paramgram))
# EVAL THE MODE PARAMETER FOR DELETE
elif mode == "delete":
# SET THE CORRECT URL FOR DELETE
url = '/pm/config/adom/{adom}/obj/ips/sensor/{name}'.format(
adom=adom, name=paramgram["name"])
datagram = {}
response = fmgr.process_request(url, datagram, paramgram["mode"])
return response
#############
# END METHODS
#############
def main():
argument_spec = dict(
adom=dict(type="str", default="root"),
mode=dict(choices=["add", "set", "delete", "update"],
type="str", default="add"),
replacemsg_group=dict(required=False, type="str"),
name=dict(required=False, type="str"),
extended_log=dict(required=False, type="str",
choices=["disable", "enable"]),
comment=dict(required=False, type="str"),
block_malicious_url=dict(required=False, type="str", choices=[
"disable", "enable"]),
entries=dict(required=False, type="list"),
entries_action=dict(required=False, type="str", choices=[
"pass", "block", "reset", "default"]),
entries_application=dict(required=False, type="str"),
entries_location=dict(required=False, type="str"),
entries_log=dict(required=False, type="str",
choices=["disable", "enable"]),
entries_log_attack_context=dict(
required=False, type="str", choices=["disable", "enable"]),
entries_log_packet=dict(required=False, type="str", choices=[
"disable", "enable"]),
entries_os=dict(required=False, type="str"),
entries_protocol=dict(required=False, type="str"),
entries_quarantine=dict(required=False, type="str", choices=[
"none", "attacker"]),
entries_quarantine_expiry=dict(required=False, type="str"),
entries_quarantine_log=dict(
required=False, type="str", choices=["disable", "enable"]),
entries_rate_count=dict(required=False, type="int"),
entries_rate_duration=dict(required=False, type="int"),
entries_rate_mode=dict(required=False, type="str", choices=[
"periodical", "continuous"]),
entries_rate_track=dict(required=False, type="str",
choices=["none", "src-ip", "dest-ip", "dhcp-client-mac", "dns-domain"]),
entries_rule=dict(required=False, type="str"),
entries_severity=dict(required=False, type="str"),
entries_status=dict(required=False, type="str", choices=[
"disable", "enable", "default"]),
entries_exempt_ip_dst_ip=dict(required=False, type="str"),
entries_exempt_ip_src_ip=dict(required=False, type="str"),
filter=dict(required=False, type="list"),
filter_action=dict(required=False, type="str", choices=[
"pass", "block", "default", "reset"]),
filter_application=dict(required=False, type="str"),
filter_location=dict(required=False, type="str"),
filter_log=dict(required=False, type="str",
choices=["disable", "enable"]),
filter_log_packet=dict(required=False, type="str",
choices=["disable", "enable"]),
filter_name=dict(required=False, type="str"),
filter_os=dict(required=False, type="str"),
filter_protocol=dict(required=False, type="str"),
filter_quarantine=dict(required=False, type="str",
choices=["none", "attacker"]),
filter_quarantine_expiry=dict(required=False, type="int"),
filter_quarantine_log=dict(required=False, type="str", choices=[
"disable", "enable"]),
filter_severity=dict(required=False, type="str"),
filter_status=dict(required=False, type="str", choices=[
"disable", "enable", "default"]),
override=dict(required=False, type="list"),
override_action=dict(required=False, type="str",
choices=["pass", "block", "reset"]),
override_log=dict(required=False, type="str",
choices=["disable", "enable"]),
override_log_packet=dict(required=False, type="str", choices=[
"disable", "enable"]),
override_quarantine=dict(required=False, type="str", choices=[
"none", "attacker"]),
override_quarantine_expiry=dict(required=False, type="int"),
override_quarantine_log=dict(
required=False, type="str", choices=["disable", "enable"]),
override_rule_id=dict(required=False, type="str"),
override_status=dict(required=False, type="str",
choices=["disable", "enable"]),
override_exempt_ip_dst_ip=dict(required=False, type="str"),
override_exempt_ip_src_ip=dict(required=False, type="str"),
)
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False, )
# MODULE PARAMGRAM
paramgram = {
"mode": module.params["mode"],
"adom": module.params["adom"],
"replacemsg-group": module.params["replacemsg_group"],
"name": module.params["name"],
"extended-log": module.params["extended_log"],
"comment": module.params["comment"],
"block-malicious-url": module.params["block_malicious_url"],
"entries": {
"action": module.params["entries_action"],
"application": module.params["entries_application"],
"location": module.params["entries_location"],
"log": module.params["entries_log"],
"log-attack-context": module.params["entries_log_attack_context"],
"log-packet": module.params["entries_log_packet"],
"os": module.params["entries_os"],
"protocol": module.params["entries_protocol"],
"quarantine": module.params["entries_quarantine"],
"quarantine-expiry": module.params["entries_quarantine_expiry"],
"quarantine-log": module.params["entries_quarantine_log"],
"rate-count": module.params["entries_rate_count"],
"rate-duration": module.params["entries_rate_duration"],
"rate-mode": module.params["entries_rate_mode"],
"rate-track": module.params["entries_rate_track"],
"rule": module.params["entries_rule"],
"severity": module.params["entries_severity"],
"status": module.params["entries_status"],
"exempt-ip": {
"dst-ip": module.params["entries_exempt_ip_dst_ip"],
"src-ip": module.params["entries_exempt_ip_src_ip"],
},
},
"filter": {
"action": module.params["filter_action"],
"application": module.params["filter_application"],
"location": module.params["filter_location"],
"log": module.params["filter_log"],
"log-packet": module.params["filter_log_packet"],
"name": module.params["filter_name"],
"os": module.params["filter_os"],
"protocol": module.params["filter_protocol"],
"quarantine": module.params["filter_quarantine"],
"quarantine-expiry": module.params["filter_quarantine_expiry"],
"quarantine-log": module.params["filter_quarantine_log"],
"severity": module.params["filter_severity"],
"status": module.params["filter_status"],
},
"override": {
"action": module.params["override_action"],
"log": module.params["override_log"],
"log-packet": module.params["override_log_packet"],
"quarantine": module.params["override_quarantine"],
"quarantine-expiry": module.params["override_quarantine_expiry"],
"quarantine-log": module.params["override_quarantine_log"],
"rule-id": module.params["override_rule_id"],
"status": module.params["override_status"],
"exempt-ip": {
"dst-ip": module.params["override_exempt_ip_dst_ip"],
"src-ip": module.params["override_exempt_ip_src_ip"],
}
}
}
module.paramgram = paramgram
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
fmgr = FortiManagerHandler(connection, module)
fmgr.tools = FMGRCommon()
else:
module.fail_json(**FAIL_SOCKET_MSG)
list_overrides = ['entries', 'filter', 'override']
paramgram = fmgr.tools.paramgram_child_list_override(list_overrides=list_overrides,
paramgram=paramgram, module=module)
results = DEFAULT_RESULT_OBJ
try:
results = fmgr_ips_sensor_modify(fmgr, paramgram)
fmgr.govern_response(module=module, results=results,
ansible_facts=fmgr.construct_ansible_facts(results, module.params, paramgram))
except Exception as err:
raise FMGBaseException(err)
return module.exit_json(**results[1])
if __name__ == "__main__":
main()
| gpl-3.0 |
MRod5/pyturb | deprecated/isentropic_gas_test.py | 1 | 1320 | """
pyturb
isa.py tests
M Rodriguez
"""
from pyturb.isentropic_gas import IsentropicGas
import numpy as np
isentropic = IsentropicGas(selected_cp_air_model='ideal', selected_gamma_air_model='aaa')
isentropic = IsentropicGas(selected_cp_air_model='ideal', selected_gamma_air_model='ideal')
print('...........')
isentropic = IsentropicGas(selected_cp_air_model='nasa', selected_gamma_air_model='naca')
np.testing.assert_almost_equal(isentropic.stagnation_static_temperature_relation(0.5, 273), 1.05)
np.testing.assert_almost_equal(isentropic.stagnation_static_pressure_relation(0.5, 273), 1.186, decimal=3)
np.testing.assert_almost_equal(isentropic.kinetic_energy_from_enthalpy(15000, 10000), 5000, decimal=1)
np.testing.assert_almost_equal(isentropic.velocity_from_enthalpy(15000, 10000), 100, decimal=1)
np.testing.assert_almost_equal(isentropic.stagnation_enthalpy(100, 273), 279600, decimal=-3)
np.testing.assert_almost_equal(isentropic.stagnation_pressure_from_mach(0.5, 101325, 273), 101325*1.186, decimal=-3)
np.testing.assert_almost_equal(isentropic.mach_number(165.75, 273), 0.5, decimal=3)
np.testing.assert_almost_equal(isentropic.stagnation_temperature_from_mach(0.5, 273), 273*1.05, decimal=4)
np.testing.assert_almost_equal(isentropic.stagnation_temperature_from_vel(165.75, 273), 273*1.05, decimal=2)
| mit |
ablifedev/ABLIRC | ABLIRC/bin/Clip-Seq/ABLIFE/cal_nonintergenic_region.py | 1 | 10587 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
####################################################################################
### Copyright (C) 2015-2019 by ABLIFE
####################################################################################
####################################################################################
####################################################################################
# Date Version Author ChangeLog
#
#
#####################################################################################
"""
程序功能说明:
1.计算gene表达量
2.randCheck_gene
3.randCheck_mRNA
程序设计思路:
利用gffutils和HTSeq包进行统计
"""
import re, os, sys, logging, time, datetime
from optparse import OptionParser, OptionGroup
reload(sys)
sys.setdefaultencoding('utf-8')
import subprocess
import threading
import gffutils
import HTSeq
import numpy
import multiprocessing
import signal
from matplotlib import pyplot
sys.path.insert(1, os.path.split(os.path.realpath(__file__))[0] + "/../../")
# print(sys.path)
from ablib.utils.tools import *
if sys.version_info < (2, 7):
print("Python Version error: please use phthon2.7")
sys.exit(-1)
_version = 'v0.1'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def configOpt():
"""Init for option
"""
usage = 'Usage: %prog [-f] [other option] [-h]'
p = OptionParser(usage)
##basic options
p.add_option('-g', '--gff', dest='gff', action='store', type='string', help='gff file,do not have to provide it if db is exited')
p.add_option('-i', '--info', dest='info', action='store', type='string', help='chr length info')
p.add_option('-d', '--db', dest='db', default='gffdb', action='store', type='string', help='the gff database file to create or use')
p.add_option('-o', '--outfile', dest='outfile', default='intergenic.txt', action='store', type='string', help='intergenic.txt')
group = OptionGroup(p, "Preset options")
##preset options
group.add_option('-O', '--outDir', dest='outDir', default='./', action='store', type='string', help='output directory', metavar="DIR")
group.add_option('-L', '--logDir', dest='logDir', default='', action='store', type='string', help='log dir ,default is same as outDir')
group.add_option('-P', '--logPrefix', dest='logPrefix', default='', action='store', type='string', help='log file prefix')
group.add_option('-E', '--email', dest='email', default='none', action='store', type='string', help='email address, if you want get a email when this job is finished,default is no email', metavar="EMAIL")
group.add_option('-Q', '--quiet', dest='quiet', default=False, action='store_true', help='do not print messages to stdout')
group.add_option('-K', '--keepTemp', dest='keepTemp', default=False, action='store_true', help='keep temp dir')
group.add_option('-T', '--test', dest='isTest', default=False, action='store_true', help='run this program for test')
p.add_option_group(group)
if len(sys.argv) == 1:
p.print_help()
sys.exit(1)
opt, args = p.parse_args()
return (p, opt, args)
def listToString(x):
"""获得完整的命令
"""
rVal = ''
for a in x:
rVal += a + ' '
return rVal
opt_parser, opt, args = configOpt()
if opt.logDir == "":
opt.logDir = opt.outDir + '/log/'
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
scriptPath = os.path.abspath(os.path.dirname(__file__)) # absolute script path
binPath = scriptPath + '/bin' # absolute bin path
outPath = os.path.abspath(opt.outDir) # absolute output path
os.mkdir(outPath) if not os.path.isdir(outPath) else None
logPath = os.path.abspath(opt.logDir)
os.mkdir(logPath) if not os.path.isdir(logPath) else None
tempPath = outPath + '/temp/' # absolute bin path
# os.mkdir(tempPath) if not os.path.isdir(tempPath) else None
resultPath = outPath + '/result/'
# os.mkdir(resultPath) if not os.path.isdir(resultPath) else None
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def initLogging(logFilename):
"""Init for logging
"""
logging.basicConfig(level=logging.DEBUG, format='[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M', filename=logFilename, filemode='w')
if not opt.quiet:
# define a Handler which writes INFO messages or higher to the sys.stderr
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('[%(asctime)s : %(levelname)s] %(message)s', datefmt='%y-%m-%d %H:%M')
# tell the handler to use this format
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
dt = datetime.datetime.now()
logFile = logPath + '/' + opt.logPrefix + 'log.' + str(dt.strftime('%Y%m%d.%H%M%S.%f')) + '.txt'
initLogging(logFile)
logging.debug(sys.modules[__name__].__doc__)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug('Program version: %s' % _version)
logging.debug('Start the program with [%s]\n', listToString(sys.argv))
startTime = datetime.datetime.now()
logging.debug("计时器:Program start at %s" % startTime)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### S
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
### E
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
def main():
print("Main procedure start...")
if opt.gff:
db = gffutils.create_db(opt.gff, opt.db, merge_strategy="create_unique", verbose=False, force=True)
db = gffutils.FeatureDB(opt.db)
ga = HTSeq.GenomicArray("auto", stranded=True, typecode='i')
genes = ('gene','lincRNA_gene','miRNA_gene','mt_gene','processed_pseudogene','pseudogene','rRNA_gene','snoRNA_gene','snRNA_gene')
for gene in db.features_of_type(genes):
gu_iv = HTSeq.GenomicInterval(gene.seqid, gene.start - 1, gene.end, gene.strand)
ga[gu_iv]=1
with open(opt.outfile, 'w') as o:
for line in open(opt.info):
if line.startswith('#'): continue
if line.startswith('\n'): continue
line = line.strip().split('\t')
ext_iv = HTSeq.GenomicInterval(line[0], 0, int(line[1]), "+")
for iv, value in ga[ext_iv].steps():
if value == 1:
o.writelines(line[0] + '\t' + str(iv.start+1) + '\t' + str(iv.end) + '\tnoninter\t0\t' + '+' + '\n')
ext_iv = HTSeq.GenomicInterval(line[0], 0, int(line[1]), "-")
for iv, value in ga[ext_iv].steps():
if value == 1:
o.writelines(line[0] + '\t' + str(iv.start+1) + '\t' + str(iv.end) + '\tnoninter\t0\t' + '-' + '\n')
if __name__ == '__main__':
main()
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if not opt.keepTemp:
os.system('rm -rf ' + tempPath)
logging.debug("Temp folder is deleted..")
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
logging.debug("Program ended")
currentTime = datetime.datetime.now()
runningTime = (currentTime - startTime).seconds # in seconds
logging.debug("计时器:Program start at %s" % startTime)
logging.debug("计时器:Program end at %s" % currentTime)
logging.debug("计时器:Program ran %.2d:%.2d:%.2d" % (runningTime / 3600, (runningTime % 3600) / 60, runningTime % 60))
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
if opt.email != "none":
run_cmd = listToString(sys.argv)
sendEmail(opt.email, str(startTime), str(currentTime), run_cmd, outPath)
logging.info("发送邮件通知到 %s" % opt.email)
# -----------------------------------------------------------------------------------
# -----------------------------------------------------------------------------------
| mit |
glaubitz/fs-uae-debian | launcher/fsgs/ui/qwindow.py | 2 | 1674 | import os, sys
from fsui.qt import QUrl, QLibraryInfo
from fsui.qt import QMainWindow, QWidget, Qt
from PyQt5.QtQuick import QQuickView
# to make sure cxFreeze includes it
import PyQt5.QtNetwork
import PyQt5.QtQml
from fsbc.application import app
class GameCenterView(QQuickView):
def __init__(self, parent=None):
QQuickView.__init__(self, parent)
if getattr(sys, "frozen", ""):
qml_path = os.path.join(app.executable_dir(), "qml")
else:
qml_path = os.path.expanduser("~/git/fs-uae/fs-uae-launcher/qml")
engine = self.engine()
print(engine.importPathList())
print(engine.pluginPathList())
# engine.setPluginPathList([qml_path, "."])
# engine.addPluginPath(qml_path)
# engine.addImportPath(qml_path)
# engine.setPluginPathList([qml_path, "."])
# engine.setImportPathList([qml_path])
# engine.addPluginPath(qml_path)
# print("setImportPathList", [QUrl.fromLocalFile(qml_path)])
self.setSource(
QUrl.fromLocalFile(
os.path.join(qml_path, "ScaledUserInterface.qml")
)
)
# self.game_center_view = GameCenterView()
self.engine().quit.connect(self.on_quit)
self.resize(960, 540)
# self.game_center_widget = QWidget.createWindowContainer(
# self.game_center_view, parent=self)
# self.resize(960, 540)
# self.game_center_widget.setFocus()
# self.game_center_widget.setFocusPolicy(Qt.TabFocus)
# self.game_center_view.requestActivate()
# self.setFocus()
def on_quit(self):
self.close()
| gpl-2.0 |
thehajime/ns-3-dev | src/visualizer/visualizer/plugins/show_last_packets.py | 182 | 9460 | import gobject
import gtk
import ns.core
import ns.network
import ns.visualizer
from visualizer.base import InformationWindow
from visualizer.higcontainer import HIGContainer
from kiwi.ui.objectlist import ObjectList, Column
class ShowLastPackets(InformationWindow):
class PacketList(gtk.ScrolledWindow):
(
COLUMN_TIME,
COLUMN_INTERFACE,
COLUMN_SIZE,
COLUMN_CONTENTS,
) = range(4)
def __init__(self):
super(ShowLastPackets.PacketList, self).__init__()
self.set_properties(hscrollbar_policy=gtk.POLICY_AUTOMATIC,
vscrollbar_policy=gtk.POLICY_AUTOMATIC)
self.table_model = gtk.ListStore(*([str]*4))
treeview = gtk.TreeView(self.table_model)
treeview.show()
self.add(treeview)
def add_column(descr, colid):
column = gtk.TreeViewColumn(descr, gtk.CellRendererText(), text=colid)
treeview.append_column(column)
add_column("Time", self.COLUMN_TIME)
add_column("Interface", self.COLUMN_INTERFACE)
add_column("Size", self.COLUMN_SIZE)
add_column("Contents", self.COLUMN_CONTENTS)
def update(self, node, packet_list):
self.table_model.clear()
for sample in packet_list:
tree_iter = self.table_model.append()
if sample.device is None:
interface_name = "(unknown)"
else:
interface_name = ns.core.Names.FindName(sample.device)
if not interface_name:
interface_name = "(interface %i)" % sample.device.GetIfIndex()
self.table_model.set(tree_iter,
self.COLUMN_TIME, str(sample.time.GetSeconds()),
self.COLUMN_INTERFACE, interface_name,
self.COLUMN_SIZE, str(sample.packet.GetSize ()),
self.COLUMN_CONTENTS, str(sample.packet)
)
def __init__(self, visualizer, node_index):
InformationWindow.__init__(self)
self.win = gtk.Dialog(parent=visualizer.window,
flags=gtk.DIALOG_DESTROY_WITH_PARENT|gtk.DIALOG_NO_SEPARATOR,
buttons=(gtk.STOCK_CLOSE, gtk.RESPONSE_CLOSE))
self.win.connect("response", self._response_cb)
self.win.set_title("Last packets for node %i" % node_index)
self.visualizer = visualizer
self.viz_node = visualizer.get_node(node_index)
self.node = ns.network.NodeList.GetNode(node_index)
def smart_expand(expander, vbox):
if expander.get_expanded():
vbox.set_child_packing(expander, expand=True, fill=True, padding=0, pack_type=gtk.PACK_START)
else:
vbox.set_child_packing(expander, expand=False, fill=False, padding=0, pack_type=gtk.PACK_START)
main_hbox = gtk.HBox(False, 4)
main_hbox.show()
main_vbox = gtk.VBox(False, 4)
main_vbox.show()
self.win.vbox.add(main_hbox)
main_hbox.add(main_vbox)
self.tx_list = self.PacketList()
self.tx_list.show()
group = gtk.Expander("Last transmitted packets")
group.show()
group.add(self.tx_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
self.rx_list = self.PacketList()
self.rx_list.show()
group = gtk.Expander("Last received packets")
group.show()
group.add(self.rx_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
self.drop_list = self.PacketList()
self.drop_list.show()
group = gtk.Expander("Last dropped packets")
group.show()
group.add(self.drop_list)
main_vbox.pack_start(group, expand=False, fill=False)
group.connect_after("activate", smart_expand, main_vbox)
# Packet Filter
# - options
self.packet_capture_options = ns.visualizer.PyViz.PacketCaptureOptions()
self.packet_capture_options.numLastPackets = 100
packet_filter_vbox = gtk.VBox(False, 4)
packet_filter_vbox.show()
main_hbox.add(packet_filter_vbox)
sel_buttons_box = gtk.HButtonBox()
sel_buttons_box.show()
packet_filter_vbox.pack_start(sel_buttons_box, False, False, 4)
select_all_button = gobject.new(gtk.Button, label="Sel. All", visible=True)
select_none_button = gobject.new(gtk.Button, label="Sel. None", visible=True)
sel_buttons_box.add(select_all_button)
sel_buttons_box.add(select_none_button)
self.packet_filter_widget = ObjectList([
Column('selected', title="Sel.", data_type=bool, editable=True),
Column('name', title="Header"),
], sortable=True)
self.packet_filter_widget.show()
packet_filter_vbox.pack_start(self.packet_filter_widget, True, True, 4)
class TypeIdConfig(object):
__slots__ = ['name', 'selected', 'typeid']
self.packet_filter_list = [] # list of TypeIdConfig instances
Header = ns.core.TypeId.LookupByName("ns3::Header")
Trailer = ns.core.TypeId.LookupByName("ns3::Trailer")
for typeid_i in range(ns.core.TypeId.GetRegisteredN()):
typeid = ns.core.TypeId.GetRegistered(typeid_i)
# check if this is a header or trailer subtype
typeid_tmp = typeid
type_is_good = False
while 1:
if typeid_tmp == Header or typeid_tmp == Trailer:
type_is_good = True
break
if typeid_tmp.HasParent():
typeid_tmp = typeid_tmp.GetParent()
else:
break
if not type_is_good:
continue
if typeid in [Header, Trailer]:
continue
c = TypeIdConfig()
c.selected = True
c.name = typeid.GetName()
c.typeid = typeid
self.packet_filter_list.append(c)
self.packet_filter_widget.add_list(self.packet_filter_list)
def update_capture_options():
if self.op_AND_button.props.active:
self.packet_capture_options.mode = ns.visualizer.PyViz.PACKET_CAPTURE_FILTER_HEADERS_AND
else:
self.packet_capture_options.mode = ns.visualizer.PyViz.PACKET_CAPTURE_FILTER_HEADERS_OR
self.packet_capture_options.numLastPackets = 100
self.packet_capture_options.headers = [c.typeid for c in self.packet_filter_list if c.selected]
self.visualizer.simulation.lock.acquire()
try:
self.visualizer.simulation.sim_helper.SetPacketCaptureOptions(
self.node.GetId(), self.packet_capture_options)
finally:
self.visualizer.simulation.lock.release()
def sel_all_cb(bt):
for c in self.packet_filter_list:
c.selected = True
self.packet_filter_widget.refresh()
update_capture_options()
def sel_none_cb(bt):
for c in self.packet_filter_list:
c.selected = False
self.packet_filter_widget.refresh()
update_capture_options()
select_all_button.connect("clicked", sel_all_cb)
select_none_button.connect("clicked", sel_none_cb)
op_buttons_box = gtk.HButtonBox()
op_buttons_box.show()
packet_filter_vbox.pack_start(op_buttons_box, False, False, 4)
self.op_AND_button = gobject.new(gtk.RadioButton, label="AND", visible=True)
self.op_OR_button = gobject.new(gtk.RadioButton, label="OR", visible=True, group=self.op_AND_button)
op_buttons_box.add(self.op_AND_button)
op_buttons_box.add(self.op_OR_button)
self.op_OR_button.props.active = True
self.op_AND_button.connect("toggled", lambda b: update_capture_options())
def cell_edited(l, obj, attribute):
update_capture_options()
self.packet_filter_widget.connect("cell-edited", cell_edited)
update_capture_options()
self.visualizer.add_information_window(self)
self.win.set_default_size(600, 300)
self.win.show()
def _response_cb(self, win, response):
self.win.destroy()
self.visualizer.remove_information_window(self)
def update(self):
last_packets = self.visualizer.simulation.sim_helper.GetLastPackets(self.node.GetId())
self.tx_list.update(self.node, last_packets.lastTransmittedPackets)
self.rx_list.update(self.node, last_packets.lastReceivedPackets)
self.drop_list.update(self.node, last_packets.lastDroppedPackets)
def populate_node_menu(viz, node, menu):
menu_item = gtk.MenuItem("Show Last Packets")
menu_item.show()
def _show_it(dummy_menu_item):
ShowLastPackets(viz, node.node_index)
menu_item.connect("activate", _show_it)
menu.add(menu_item)
def register(viz):
viz.connect("populate-node-menu", populate_node_menu)
| gpl-2.0 |
yannickcr/CouchPotatoServer | libs/pyutil/nummedobj.py | 106 | 2141 | # Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# mailto:zooko@zooko.com
# This file is part of pyutil; see README.rst for licensing terms.
import dictutil
class NummedObj(object):
"""
This is useful for nicer debug printouts. Instead of objects of the same class being
distinguished from one another by their memory address, they each get a unique number, which
can be read as "the first object of this class", "the second object of this class", etc. This
is especially useful because separate runs of a program will yield identical debug output,
(assuming that the objects get created in the same order in each run). This makes it possible
to diff outputs from separate runs to see what changed, without having to ignore a difference
on every line due to different memory addresses of objects.
"""
objnums = dictutil.NumDict() # key: class names, value: highest used object number
def __init__(self, klass=None):
"""
@param klass: in which class are you counted? If default value of `None', then self.__class__ will be used.
"""
if klass is None:
klass = self.__class__
self._classname = klass.__name__
NummedObj.objnums.inc(self._classname)
self._objid = NummedObj.objnums[self._classname]
def __repr__(self):
return "<%s #%d>" % (self._classname, self._objid,)
def __lt__(self, other):
return (self._objid, self._classname,) < (other._objid, other._classname,)
def __le__(self, other):
return (self._objid, self._classname,) <= (other._objid, other._classname,)
def __eq__(self, other):
return (self._objid, self._classname,) == (other._objid, other._classname,)
def __ne__(self, other):
return (self._objid, self._classname,) != (other._objid, other._classname,)
def __gt__(self, other):
return (self._objid, self._classname,) > (other._objid, other._classname,)
def __ge__(self, other):
return (self._objid, self._classname,) >= (other._objid, other._classname,)
def __hash__(self):
return id(self)
| gpl-3.0 |
xfournet/intellij-community | python/lib/Lib/site-packages/django/utils/numberformat.py | 290 | 1632 | from django.conf import settings
from django.utils.safestring import mark_safe
def format(number, decimal_sep, decimal_pos, grouping=0, thousand_sep=''):
"""
Gets a number (as a number or string), and returns it as a string,
using formats definied as arguments:
* decimal_sep: Decimal separator symbol (for example ".")
* decimal_pos: Number of decimal positions
* grouping: Number of digits in every group limited by thousand separator
* thousand_sep: Thousand separator symbol (for example ",")
"""
use_grouping = settings.USE_L10N and \
settings.USE_THOUSAND_SEPARATOR and grouping
# Make the common case fast:
if isinstance(number, int) and not use_grouping and not decimal_pos:
return mark_safe(unicode(number))
# sign
if float(number) < 0:
sign = '-'
else:
sign = ''
str_number = unicode(number)
if str_number[0] == '-':
str_number = str_number[1:]
# decimal part
if '.' in str_number:
int_part, dec_part = str_number.split('.')
if decimal_pos:
dec_part = dec_part[:decimal_pos]
else:
int_part, dec_part = str_number, ''
if decimal_pos:
dec_part = dec_part + ('0' * (decimal_pos - len(dec_part)))
if dec_part: dec_part = decimal_sep + dec_part
# grouping
if use_grouping:
int_part_gd = ''
for cnt, digit in enumerate(int_part[::-1]):
if cnt and not cnt % grouping:
int_part_gd += thousand_sep
int_part_gd += digit
int_part = int_part_gd[::-1]
return sign + int_part + dec_part
| apache-2.0 |
rajexp/stepMuzic | allauth/socialaccount/providers/untappd/views.py | 6 | 1782 | import requests
from allauth.socialaccount.providers.oauth2.views import (OAuth2Adapter,
OAuth2LoginView,
OAuth2CallbackView)
from .client import UntappdOAuth2Client
from .provider import UntappdProvider
class UntappdOAuth2Adapter(OAuth2Adapter):
provider_id = UntappdProvider.id
access_token_url = 'https://untappd.com/oauth/authorize/'
access_token_method = 'GET'
authorize_url = 'https://untappd.com/oauth/authenticate/'
user_info_url = 'https://api.untappd.com/v4/user/info/'
supports_state = False
def complete_login(self, request, app, token, **kwargs):
resp = requests.get(self.user_info_url,
params={'access_token': token.token})
extra_data = resp.json()
# TODO: get and store the email from the user info json
return self.get_provider().sociallogin_from_response(request,
extra_data)
class UntappdOAuth2CallbackView(OAuth2CallbackView):
""" Custom OAuth2CallbackView to return UntappdOAuth2Client """
def get_client(self, request, app):
client = super(UntappdOAuth2CallbackView, self).get_client(request,
app)
untappd_client = UntappdOAuth2Client(
client.request, client.consumer_key, client.consumer_secret,
client.access_token_method, client.access_token_url,
client.callback_url, client.scope)
return untappd_client
oauth2_login = OAuth2LoginView.adapter_view(UntappdOAuth2Adapter)
oauth2_callback = UntappdOAuth2CallbackView.adapter_view(UntappdOAuth2Adapter)
| unlicense |
infobloxopen/infoblox-netmri | infoblox_netmri/api/broker/v3_6_0/spm_devices_default_grid_broker.py | 14 | 6283 | from ..broker import Broker
class SpmDevicesDefaultGridBroker(Broker):
controller = "spm_devices_default_grids"
def index(self, **kwargs):
"""Lists the available spm devices default grids. Any of the inputs listed may be be used to narrow the list; other inputs will be ignored. Of the various ways to query lists, using this method is most efficient.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` today
:param starttime: The data returned will represent the spm devices default grids with this date and time as lower boundary. If omitted, the result will indicate the most recently collected data.
:type starttime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` tomorrow
:param endtime: The data returned will represent the spm devices default grids with this date and time as upper boundary. If omitted, the result will indicate the most recently collected data.
:type endtime: DateTime
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, VirtualNetworkID, DeviceID, DeviceName, DeviceIPDotted, DeviceIPNumeric, Network, DeviceDNSName, TotalPorts, UsedTrunkPorts, UsedAccessPorts, FreePorts, FreePortsPercentage, AvailPorts, AvailPortsPercentage, PoEPorts, DeviceSysLocation, DeviceVendor, DeviceModel, PhysicalSerialNum, DeviceSysDescr, DeviceType, DeviceAssurance, FirstSeen, LastSeen, LastChanged, PollDuration, SwitchingInd.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each SpmDevicesDefaultGrid. Valid values are id, VirtualNetworkID, DeviceID, DeviceName, DeviceIPDotted, DeviceIPNumeric, Network, DeviceDNSName, TotalPorts, UsedTrunkPorts, UsedAccessPorts, FreePorts, FreePortsPercentage, AvailPorts, AvailPortsPercentage, PoEPorts, DeviceSysLocation, DeviceVendor, DeviceModel, PhysicalSerialNum, DeviceSysDescr, DeviceType, DeviceAssurance, FirstSeen, LastSeen, LastChanged, PollDuration, SwitchingInd. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param refresh_ind: If true, the grid will be regenerated, rather than using any available cached grid data.
:type refresh_ind: Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` False
:param async_ind: If true and if grid data is not yet available, it will return immediately with 202 status. User should retry again later.
:type async_ind: Boolean
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return spm_devices_default_grids: An array of the SpmDevicesDefaultGrid objects that match the specified input criteria.
:rtype spm_devices_default_grids: Array of SpmDevicesDefaultGrid
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return summary: A summary of calculation of selected columns, when applicable.
:rtype summary: Hash
"""
return self.api_list_request(self._get_method_fullname("index"), kwargs)
def data_partitions(self, **kwargs):
"""Returns data partitions with their statuses for specified grid. 0 - data not available for that date, 1 - data available but must be prepared, 2 - data prepared and immediately available
**Inputs**
**Outputs**
"""
return self.api_request(self._get_method_fullname("data_partitions"), kwargs)
| apache-2.0 |
OpenWinCon/OpenWinNet | web-gui/myvenv/lib/python3.4/site-packages/django/template/defaultfilters.py | 51 | 28341 | """Default variable filters."""
from __future__ import unicode_literals
import random as random_module
import re
import warnings
from decimal import ROUND_HALF_UP, Context, Decimal, InvalidOperation
from functools import wraps
from pprint import pformat
from django.conf import settings
from django.template.base import Library, Variable, VariableDoesNotExist
from django.utils import formats, six
from django.utils.dateformat import format, time_format
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import force_text, iri_to_uri
from django.utils.html import (
avoid_wrapping, conditional_escape, escape, escapejs, linebreaks,
remove_tags, strip_tags, urlize as _urlize,
)
from django.utils.http import urlquote
from django.utils.safestring import SafeData, mark_for_escaping, mark_safe
from django.utils.text import (
Truncator, normalize_newlines, phone2numeric, slugify as _slugify, wrap,
)
from django.utils.timesince import timesince, timeuntil
from django.utils.translation import ugettext, ungettext
register = Library()
#######################
# STRING DECORATOR #
#######################
def stringfilter(func):
"""
Decorator for filters which should only receive unicode objects. The object
passed as the first positional argument will be converted to a unicode
object.
"""
def _dec(*args, **kwargs):
if args:
args = list(args)
args[0] = force_text(args[0])
if (isinstance(args[0], SafeData) and
getattr(_dec._decorated_function, 'is_safe', False)):
return mark_safe(func(*args, **kwargs))
return func(*args, **kwargs)
# Include a reference to the real function (used to check original
# arguments by the template parser, and to bear the 'is_safe' attribute
# when multiple decorators are applied).
_dec._decorated_function = getattr(func, '_decorated_function', func)
return wraps(func)(_dec)
###################
# STRINGS #
###################
@register.filter(is_safe=True)
@stringfilter
def addslashes(value):
"""
Adds slashes before quotes. Useful for escaping strings in CSV, for
example. Less useful for escaping JavaScript; use the ``escapejs``
filter instead.
"""
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
@register.filter(is_safe=True)
@stringfilter
def capfirst(value):
"""Capitalizes the first character of the value."""
return value and value[0].upper() + value[1:]
@register.filter("escapejs")
@stringfilter
def escapejs_filter(value):
"""Hex encodes characters for use in JavaScript strings."""
return escapejs(value)
# Values for testing floatformat input against infinity and NaN representations,
# which differ across platforms and Python versions. Some (i.e. old Windows
# ones) are not recognized by Decimal but we want to return them unchanged vs.
# returning an empty string as we do for completely invalid input. Note these
# need to be built up from values that are not inf/nan, since inf/nan values do
# not reload properly from .pyc files on Windows prior to some level of Python 2.5
# (see Python Issue757815 and Issue1080440).
pos_inf = 1e200 * 1e200
neg_inf = -1e200 * 1e200
nan = (1e200 * 1e200) // (1e200 * 1e200)
special_floats = [str(pos_inf), str(neg_inf), str(nan)]
@register.filter(is_safe=True)
def floatformat(text, arg=-1):
"""
Displays a float to a specified number of decimal places.
If called without an argument, it displays the floating point number with
one decimal place -- but only if there's a decimal place to be displayed:
* num1 = 34.23234
* num2 = 34.00000
* num3 = 34.26000
* {{ num1|floatformat }} displays "34.2"
* {{ num2|floatformat }} displays "34"
* {{ num3|floatformat }} displays "34.3"
If arg is positive, it will always display exactly arg number of decimal
places:
* {{ num1|floatformat:3 }} displays "34.232"
* {{ num2|floatformat:3 }} displays "34.000"
* {{ num3|floatformat:3 }} displays "34.260"
If arg is negative, it will display arg number of decimal places -- but
only if there are places to be displayed:
* {{ num1|floatformat:"-3" }} displays "34.232"
* {{ num2|floatformat:"-3" }} displays "34"
* {{ num3|floatformat:"-3" }} displays "34.260"
If the input float is infinity or NaN, the (platform-dependent) string
representation of that value will be displayed.
"""
try:
input_val = force_text(text)
d = Decimal(input_val)
except UnicodeEncodeError:
return ''
except InvalidOperation:
if input_val in special_floats:
return input_val
try:
d = Decimal(force_text(float(text)))
except (ValueError, InvalidOperation, TypeError, UnicodeEncodeError):
return ''
try:
p = int(arg)
except ValueError:
return input_val
try:
m = int(d) - d
except (ValueError, OverflowError, InvalidOperation):
return input_val
if not m and p < 0:
return mark_safe(formats.number_format('%d' % (int(d)), 0))
if p == 0:
exp = Decimal(1)
else:
exp = Decimal('1.0') / (Decimal(10) ** abs(p))
try:
# Set the precision high enough to avoid an exception, see #15789.
tupl = d.as_tuple()
units = len(tupl[1]) - tupl[2]
prec = abs(p) + units + 1
# Avoid conversion to scientific notation by accessing `sign`, `digits`
# and `exponent` from `Decimal.as_tuple()` directly.
sign, digits, exponent = d.quantize(exp, ROUND_HALF_UP,
Context(prec=prec)).as_tuple()
digits = [six.text_type(digit) for digit in reversed(digits)]
while len(digits) <= abs(exponent):
digits.append('0')
digits.insert(-exponent, '.')
if sign:
digits.append('-')
number = ''.join(reversed(digits))
return mark_safe(formats.number_format(number, abs(p)))
except InvalidOperation:
return input_val
@register.filter(is_safe=True)
@stringfilter
def iriencode(value):
"""Escapes an IRI value for use in a URL."""
return force_text(iri_to_uri(value))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linenumbers(value, autoescape=True):
"""Displays text with line numbers."""
lines = value.split('\n')
# Find the maximum width of the line count, for use with zero padding
# string format command
width = six.text_type(len(six.text_type(len(lines))))
if not autoescape or isinstance(value, SafeData):
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, line)
else:
for i, line in enumerate(lines):
lines[i] = ("%0" + width + "d. %s") % (i + 1, escape(line))
return mark_safe('\n'.join(lines))
@register.filter(is_safe=True)
@stringfilter
def lower(value):
"""Converts a string into all lowercase."""
return value.lower()
@register.filter(is_safe=False)
@stringfilter
def make_list(value):
"""
Returns the value turned into a list.
For an integer, it's a list of digits.
For a string, it's a list of characters.
"""
return list(value)
@register.filter(is_safe=True)
@stringfilter
def slugify(value):
"""
Converts to ASCII. Converts spaces to hyphens. Removes characters that
aren't alphanumerics, underscores, or hyphens. Converts to lowercase.
Also strips leading and trailing whitespace.
"""
return _slugify(value)
@register.filter(is_safe=True)
def stringformat(value, arg):
"""
Formats the variable according to the arg, a string formatting specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
try:
return ("%" + six.text_type(arg)) % value
except (ValueError, TypeError):
return ""
@register.filter(is_safe=True)
@stringfilter
def title(value):
"""Converts a string into titlecase."""
t = re.sub("([a-z])'([A-Z])", lambda m: m.group(0).lower(), value.title())
return re.sub("\d([A-Z])", lambda m: m.group(0).lower(), t)
@register.filter(is_safe=True)
@stringfilter
def truncatechars(value, arg):
"""
Truncates a string after a certain number of characters.
Argument: Number of characters to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).chars(length)
@register.filter(is_safe=True)
@stringfilter
def truncatechars_html(value, arg):
"""
Truncates HTML after a certain number of chars.
Argument: Number of chars to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).chars(length, html=True)
@register.filter(is_safe=True)
@stringfilter
def truncatewords(value, arg):
"""
Truncates a string after a certain number of words.
Argument: Number of words to truncate after.
Newlines within the string are removed.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
return Truncator(value).words(length, truncate=' ...')
@register.filter(is_safe=True)
@stringfilter
def truncatewords_html(value, arg):
"""
Truncates HTML after a certain number of words.
Argument: Number of words to truncate after.
Newlines in the HTML are preserved.
"""
try:
length = int(arg)
except ValueError: # invalid literal for int()
return value # Fail silently.
return Truncator(value).words(length, html=True, truncate=' ...')
@register.filter(is_safe=False)
@stringfilter
def upper(value):
"""Converts a string into all uppercase."""
return value.upper()
@register.filter(is_safe=False)
@stringfilter
def urlencode(value, safe=None):
"""
Escapes a value for use in a URL.
Takes an optional ``safe`` parameter used to determine the characters which
should not be escaped by Django's ``urlquote`` method. If not provided, the
default safe characters will be used (but an empty string can be provided
when *all* characters should be escaped).
"""
kwargs = {}
if safe is not None:
kwargs['safe'] = safe
return urlquote(value, **kwargs)
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlize(value, autoescape=True):
"""Converts URLs in plain text into clickable links."""
return mark_safe(_urlize(value, nofollow=True, autoescape=autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def urlizetrunc(value, limit, autoescape=True):
"""
Converts URLs into clickable links, truncating URLs to the given character
limit, and adding 'rel=nofollow' attribute to discourage spamming.
Argument: Length to truncate URLs to.
"""
return mark_safe(_urlize(value, trim_url_limit=int(limit), nofollow=True,
autoescape=autoescape))
@register.filter(is_safe=False)
@stringfilter
def wordcount(value):
"""Returns the number of words."""
return len(value.split())
@register.filter(is_safe=True)
@stringfilter
def wordwrap(value, arg):
"""
Wraps words at specified line length.
Argument: number of characters to wrap the text at.
"""
return wrap(value, int(arg))
@register.filter(is_safe=True)
@stringfilter
def ljust(value, arg):
"""
Left-aligns the value in a field of a given width.
Argument: field size.
"""
return value.ljust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def rjust(value, arg):
"""
Right-aligns the value in a field of a given width.
Argument: field size.
"""
return value.rjust(int(arg))
@register.filter(is_safe=True)
@stringfilter
def center(value, arg):
"""Centers the value in a field of a given width."""
return value.center(int(arg))
@register.filter
@stringfilter
def cut(value, arg):
"""
Removes all values of arg from the given string.
"""
safe = isinstance(value, SafeData)
value = value.replace(arg, '')
if safe and arg != ';':
return mark_safe(value)
return value
###################
# HTML STRINGS #
###################
@register.filter("escape", is_safe=True)
@stringfilter
def escape_filter(value):
"""
Marks the value as a string that should be auto-escaped.
"""
return mark_for_escaping(value)
@register.filter(is_safe=True)
@stringfilter
def force_escape(value):
"""
Escapes a string's HTML. This returns a new string containing the escaped
characters (as opposed to "escape", which marks the content for later
possible escaping).
"""
return escape(value)
@register.filter("linebreaks", is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaks_filter(value, autoescape=True):
"""
Replaces line breaks in plain text with appropriate HTML; a single
newline becomes an HTML line break (``<br />``) and a new line
followed by a blank line becomes a paragraph break (``</p>``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
return mark_safe(linebreaks(value, autoescape))
@register.filter(is_safe=True, needs_autoescape=True)
@stringfilter
def linebreaksbr(value, autoescape=True):
"""
Converts all newlines in a piece of plain text to HTML line breaks
(``<br />``).
"""
autoescape = autoescape and not isinstance(value, SafeData)
value = normalize_newlines(value)
if autoescape:
value = escape(value)
return mark_safe(value.replace('\n', '<br />'))
@register.filter(is_safe=True)
@stringfilter
def safe(value):
"""
Marks the value as a string that should not be auto-escaped.
"""
return mark_safe(value)
@register.filter(is_safe=True)
def safeseq(value):
"""
A "safe" filter for sequences. Marks each element in the sequence,
individually, as safe, after converting them to unicode. Returns a list
with the results.
"""
return [mark_safe(force_text(obj)) for obj in value]
@register.filter(is_safe=True)
@stringfilter
def removetags(value, tags):
"""Removes a space separated list of [X]HTML tags from the output."""
return remove_tags(value, tags)
@register.filter(is_safe=True)
@stringfilter
def striptags(value):
"""Strips all [X]HTML tags."""
return strip_tags(value)
###################
# LISTS #
###################
@register.filter(is_safe=False)
def dictsort(value, arg):
"""
Takes a list of dicts, returns that list sorted by the property given in
the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def dictsortreversed(value, arg):
"""
Takes a list of dicts, returns that list sorted in reverse order by the
property given in the argument.
"""
try:
return sorted(value, key=Variable(arg).resolve, reverse=True)
except (TypeError, VariableDoesNotExist):
return ''
@register.filter(is_safe=False)
def first(value):
"""Returns the first item in a list."""
try:
return value[0]
except IndexError:
return ''
@register.filter(is_safe=True, needs_autoescape=True)
def join(value, arg, autoescape=True):
"""
Joins a list with a string, like Python's ``str.join(list)``.
"""
value = map(force_text, value)
if autoescape:
value = [conditional_escape(v) for v in value]
try:
data = conditional_escape(arg).join(value)
except AttributeError: # fail silently but nicely
return value
return mark_safe(data)
@register.filter(is_safe=True)
def last(value):
"Returns the last item in a list"
try:
return value[-1]
except IndexError:
return ''
@register.filter(is_safe=False)
def length(value):
"""Returns the length of the value - useful for lists."""
try:
return len(value)
except (ValueError, TypeError):
return 0
@register.filter(is_safe=False)
def length_is(value, arg):
"""Returns a boolean of whether the value's length is the argument."""
try:
return len(value) == int(arg)
except (ValueError, TypeError):
return ''
@register.filter(is_safe=True)
def random(value):
"""Returns a random item from the list."""
return random_module.choice(value)
@register.filter("slice", is_safe=True)
def slice_filter(value, arg):
"""
Returns a slice of the list.
Uses the same syntax as Python's list slicing; see
http://www.diveintopython3.net/native-datatypes.html#slicinglists
for an introduction.
"""
try:
bits = []
for x in arg.split(':'):
if len(x) == 0:
bits.append(None)
else:
bits.append(int(x))
return value[slice(*bits)]
except (ValueError, TypeError):
return value # Fail silently.
@register.filter(is_safe=True, needs_autoescape=True)
def unordered_list(value, autoescape=True):
"""
Recursively takes a self-nested list and returns an HTML unordered list --
WITHOUT opening and closing <ul> tags.
The list is assumed to be in the proper format. For example, if ``var``
contains: ``['States', ['Kansas', ['Lawrence', 'Topeka'], 'Illinois']]``,
then ``{{ var|unordered_list }}`` would return::
<li>States
<ul>
<li>Kansas
<ul>
<li>Lawrence</li>
<li>Topeka</li>
</ul>
</li>
<li>Illinois</li>
</ul>
</li>
"""
if autoescape:
escaper = conditional_escape
else:
escaper = lambda x: x
def convert_old_style_list(list_):
"""
Converts old style lists to the new easier to understand format.
The old list format looked like:
['Item 1', [['Item 1.1', []], ['Item 1.2', []]]
And it is converted to:
['Item 1', ['Item 1.1', 'Item 1.2]]
"""
if not isinstance(list_, (tuple, list)) or len(list_) != 2:
return list_, False
first_item, second_item = list_
if second_item == []:
return [first_item], True
try:
# see if second item is iterable
iter(second_item)
except TypeError:
return list_, False
old_style_list = True
new_second_item = []
for sublist in second_item:
item, old_style_list = convert_old_style_list(sublist)
if not old_style_list:
break
new_second_item.extend(item)
if old_style_list:
second_item = new_second_item
return [first_item, second_item], old_style_list
def walk_items(item_list):
item_iterator = iter(item_list)
for item in item_iterator:
try:
next_item = next(item_iterator)
except StopIteration:
next_item = None
if not isinstance(next_item, six.string_types):
try:
iter(next_item)
except TypeError:
pass
else:
yield item, next_item
continue
yield item, None
if next_item:
yield next_item, None
def list_formatter(item_list, tabs=1):
indent = '\t' * tabs
output = []
for item, children in walk_items(item_list):
sublist = ''
if children:
sublist = '\n%s<ul>\n%s\n%s</ul>\n%s' % (
indent, list_formatter(children, tabs + 1), indent, indent)
output.append('%s<li>%s%s</li>' % (
indent, escaper(force_text(item)), sublist))
return '\n'.join(output)
value, converted = convert_old_style_list(value)
if converted:
warnings.warn(
"The old style syntax in `unordered_list` is deprecated and will "
"be removed in Django 2.0. Use the the new format instead.",
RemovedInDjango20Warning)
return mark_safe(list_formatter(value))
###################
# INTEGERS #
###################
@register.filter(is_safe=False)
def add(value, arg):
"""Adds the arg to the value."""
try:
return int(value) + int(arg)
except (ValueError, TypeError):
try:
return value + arg
except Exception:
return ''
@register.filter(is_safe=False)
def get_digit(value, arg):
"""
Given a whole number, returns the requested digit of it, where 1 is the
right-most digit, 2 is the second-right-most digit, etc. Returns the
original value for invalid input (if input or argument is not an integer,
or if argument is less than 1). Otherwise, output is always an integer.
"""
try:
arg = int(arg)
value = int(value)
except ValueError:
return value # Fail silently for an invalid argument
if arg < 1:
return value
try:
return int(str(value)[-arg])
except IndexError:
return 0
###################
# DATES #
###################
@register.filter(expects_localtime=True, is_safe=False)
def date(value, arg=None):
"""Formats a date according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.DATE_FORMAT
try:
return formats.date_format(value, arg)
except AttributeError:
try:
return format(value, arg)
except AttributeError:
return ''
@register.filter(expects_localtime=True, is_safe=False)
def time(value, arg=None):
"""Formats a time according to the given format."""
if value in (None, ''):
return ''
if arg is None:
arg = settings.TIME_FORMAT
try:
return formats.time_format(value, arg)
except AttributeError:
try:
return time_format(value, arg)
except AttributeError:
return ''
@register.filter("timesince", is_safe=False)
def timesince_filter(value, arg=None):
"""Formats a date as the time since that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
if arg:
return timesince(value, arg)
return timesince(value)
except (ValueError, TypeError):
return ''
@register.filter("timeuntil", is_safe=False)
def timeuntil_filter(value, arg=None):
"""Formats a date as the time until that date (i.e. "4 days, 6 hours")."""
if not value:
return ''
try:
return timeuntil(value, arg)
except (ValueError, TypeError):
return ''
###################
# LOGIC #
###################
@register.filter(is_safe=False)
def default(value, arg):
"""If value is unavailable, use given default."""
return value or arg
@register.filter(is_safe=False)
def default_if_none(value, arg):
"""If value is None, use given default."""
if value is None:
return arg
return value
@register.filter(is_safe=False)
def divisibleby(value, arg):
"""Returns True if the value is devisible by the argument."""
return int(value) % int(arg) == 0
@register.filter(is_safe=False)
def yesno(value, arg=None):
"""
Given a string mapping values for true, false and (optionally) None,
returns one of those strings according to the value:
========== ====================== ==================================
Value Argument Outputs
========== ====================== ==================================
``True`` ``"yeah,no,maybe"`` ``yeah``
``False`` ``"yeah,no,maybe"`` ``no``
``None`` ``"yeah,no,maybe"`` ``maybe``
``None`` ``"yeah,no"`` ``"no"`` (converts None to False
if no mapping for None is given.
========== ====================== ==================================
"""
if arg is None:
arg = ugettext('yes,no,maybe')
bits = arg.split(',')
if len(bits) < 2:
return value # Invalid arg.
try:
yes, no, maybe = bits
except ValueError:
# Unpack list of wrong size (no "maybe" value provided).
yes, no, maybe = bits[0], bits[1], bits[1]
if value is None:
return maybe
if value:
return yes
return no
###################
# MISC #
###################
@register.filter(is_safe=True)
def filesizeformat(bytes):
"""
Formats the value like a 'human-readable' file size (i.e. 13 KB, 4.1 MB,
102 bytes, etc).
"""
try:
bytes = float(bytes)
except (TypeError, ValueError, UnicodeDecodeError):
value = ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
return avoid_wrapping(value)
filesize_number_format = lambda value: formats.number_format(round(value, 1), 1)
KB = 1 << 10
MB = 1 << 20
GB = 1 << 30
TB = 1 << 40
PB = 1 << 50
if bytes < KB:
value = ungettext("%(size)d byte", "%(size)d bytes", bytes) % {'size': bytes}
elif bytes < MB:
value = ugettext("%s KB") % filesize_number_format(bytes / KB)
elif bytes < GB:
value = ugettext("%s MB") % filesize_number_format(bytes / MB)
elif bytes < TB:
value = ugettext("%s GB") % filesize_number_format(bytes / GB)
elif bytes < PB:
value = ugettext("%s TB") % filesize_number_format(bytes / TB)
else:
value = ugettext("%s PB") % filesize_number_format(bytes / PB)
return avoid_wrapping(value)
@register.filter(is_safe=False)
def pluralize(value, arg='s'):
"""
Returns a plural suffix if the value is not 1. By default, 's' is used as
the suffix:
* If value is 0, vote{{ value|pluralize }} displays "0 votes".
* If value is 1, vote{{ value|pluralize }} displays "1 vote".
* If value is 2, vote{{ value|pluralize }} displays "2 votes".
If an argument is provided, that string is used instead:
* If value is 0, class{{ value|pluralize:"es" }} displays "0 classes".
* If value is 1, class{{ value|pluralize:"es" }} displays "1 class".
* If value is 2, class{{ value|pluralize:"es" }} displays "2 classes".
If the provided argument contains a comma, the text before the comma is
used for the singular case and the text after the comma is used for the
plural case:
* If value is 0, cand{{ value|pluralize:"y,ies" }} displays "0 candies".
* If value is 1, cand{{ value|pluralize:"y,ies" }} displays "1 candy".
* If value is 2, cand{{ value|pluralize:"y,ies" }} displays "2 candies".
"""
if ',' not in arg:
arg = ',' + arg
bits = arg.split(',')
if len(bits) > 2:
return ''
singular_suffix, plural_suffix = bits[:2]
try:
if float(value) != 1:
return plural_suffix
except ValueError: # Invalid string that's not a number.
pass
except TypeError: # Value isn't a string or a number; maybe it's a list?
try:
if len(value) != 1:
return plural_suffix
except TypeError: # len() of unsized object.
pass
return singular_suffix
@register.filter("phone2numeric", is_safe=True)
def phone2numeric_filter(value):
"""Takes a phone number and converts it in to its numerical equivalent."""
return phone2numeric(value)
@register.filter(is_safe=True)
def pprint(value):
"""A wrapper around pprint.pprint -- for debugging, really."""
try:
return pformat(value)
except Exception as e:
return "Error in formatting: %s: %s" % (e.__class__.__name__, force_text(e, errors="replace"))
| apache-2.0 |
RaghavPro/Runescape-Hiscores | hiscores/models.py | 1 | 6638 | from django.utils import timezone
from django.db import models
from django.db import connection
from django.conf import settings
from utils import skill_names
class Skills(models.Model):
overall_exp = models.FloatField(default=0)
overall = models.IntegerField(default=1)
attack_exp = models.FloatField(default=0)
attack = models.IntegerField(default=1)
defence_exp = models.FloatField(default=0)
defence = models.IntegerField(default=1)
strength_exp = models.FloatField(default=0)
strength = models.IntegerField(default=1)
constitution_exp = models.FloatField(default=0)
constitution = models.IntegerField(default=1)
ranged_exp = models.FloatField(default=0)
ranged = models.IntegerField(default=1)
prayer_exp = models.FloatField(default=0)
prayer = models.IntegerField(default=1)
magic_exp = models.FloatField(default=0)
magic = models.IntegerField(default=1)
cooking_exp = models.FloatField(default=0)
cooking = models.IntegerField(default=1)
woodcutting_exp = models.FloatField(default=0)
woodcutting = models.IntegerField(default=1)
fletching_exp = models.FloatField(default=0)
fletching = models.IntegerField(default=1)
fishing_exp = models.FloatField(default=0)
fishing = models.IntegerField(default=1)
firemaking_exp = models.FloatField(default=0)
firemaking = models.IntegerField(default=1)
crafting_exp = models.FloatField(default=0)
crafting = models.IntegerField(default=1)
smithing_exp = models.FloatField(default=0)
smithing = models.IntegerField(default=1)
mining_exp = models.FloatField(default=0)
mining = models.IntegerField(default=1)
herblore_exp = models.FloatField(default=0)
herblore = models.IntegerField(default=1)
agility_exp = models.FloatField(default=0)
agility = models.IntegerField(default=1)
thieving_exp = models.FloatField(default=0)
thieving = models.IntegerField(default=1)
slayer_exp = models.FloatField(default=0)
slayer = models.IntegerField(default=1)
farming_exp = models.FloatField(default=0)
farming = models.IntegerField(default=1)
runecrafting_exp = models.FloatField(default=0)
runecrafting = models.IntegerField(default=1)
hunter_exp = models.FloatField(default=0)
hunter = models.IntegerField(default=1)
construction_exp = models.FloatField(default=0)
construction = models.IntegerField(default=1)
summoning_exp = models.FloatField(default=0)
summoning = models.IntegerField(default=1)
dungeoneering_exp = models.FloatField(default=0)
dungeoneering = models.IntegerField(default=1)
user_name = models.CharField(max_length=30, primary_key=True)
creation_time = models.DateTimeField(default=timezone.now)
def get_skills(self):
"""
Get a player's stats in a list by using Model's '_meta' field.
Using row_number() to get level_rank of a player in a particular skill
:return: a list of dictionaries containing skill, rank, level and exp for each skill
"""
player_stats = []
cursor = connection.cursor()
db_name = settings.DATABASES['default']['NAME'] # I'm not sure where to put this actually.
table_name = type(self).__name__
for skill in skill_names:
level_name = self._meta.get_field(skill).name
exp_name = self._meta.get_field(skill + '_exp').name
subquery = "select row_number() OVER(ORDER BY " + exp_name + " DESC, creation_time) as rank,user_name from %s_%s" % (db_name, table_name)
query = "select row.rank from (" + subquery + ") as row where row.user_name=%s"
cursor.execute(query, [self.user_name])
level_rank = int(cursor.fetchone()[0])
level = getattr(self, level_name)
exp = getattr(self, exp_name)
player_stats.append({
'name': skill.title(),
'rank': level_rank,
'level': level,
'exp': int(exp),
})
return player_stats
def compare_skills(self, player2):
"""
Does pretty much same what the above get_skills() does but for both the players at the cost of just one loop.
Getting a rank using row_number() is a very costly operation. Instead of calling get_skills() twice it's a lot
efficient to get level_rank of both the players in just one query by providing both usernames in where clause.
:return: Two lists each containing skill, rank, level and exp in a dictionary for each skill of both players
"""
player1_stats, player2_stats = [], []
cursor = connection.cursor()
db_name = settings.DATABASES['default']['NAME'] # I'm not sure where to put this actually.
table_name = type(self).__name__
for skill in skill_names:
level_name = self._meta.get_field(skill).name
exp_name = self._meta.get_field(skill + '_exp').name
subquery = "select row_number() OVER(ORDER BY " + exp_name + " DESC, creation_time) as rank,user_name from %s_%s" % (db_name, table_name)
query = "select row.rank, row.user_name from (" + subquery + ") as row where row.user_name=%s or row.user_name=%s"
cursor.execute(query, [self.user_name, player2.user_name])
results = cursor.fetchall()
# Our query will returns [('rank', 'user_name'), ('rank', 'user_name')]
# We identify user's rank by checking returned user_name with object's user_name field.
if results[0][1] == self.user_name:
player1_level_level_rank, player2_level_level_rank = int(results[0][0]), int(results[1][0])
else:
player2_level_level_rank, player1_level_level_rank = int(results[0][0]), int(results[1][0])
player1_level = getattr(self, level_name)
player1_exp = getattr(self, exp_name)
player1_stats.append({
'name': skill.title(),
'rank': player1_level_level_rank,
'level': player1_level,
'exp': int(player1_exp),
})
player2_level = getattr(player2, level_name)
player2_exp = getattr(player2, exp_name)
player2_stats.append({
'name': skill.title(),
'rank': player2_level_level_rank,
'level': player2_level,
'exp': int(player2_exp),
})
return player1_stats, player2_stats
def __str__(self):
return self.user_name
| gpl-2.0 |
jk1/intellij-community | python/lib/Lib/pwd.py | 93 | 2552 | """
This module provides access to the Unix password database.
Password database entries are reported as 7-tuples containing the
following items from the password database (see `<pwd.h>'), in order:
pw_name, pw_passwd, pw_uid, pw_gid, pw_gecos, pw_dir, pw_shell. The
uid and gid items are integers, all others are strings. An exception
is raised if the entry asked for cannot be found.
"""
__all__ = ['getpwuid', 'getpwnam', 'getpwall']
from os import _name, _posix_impl
from org.python.core.Py import newString
if _name == 'nt':
raise ImportError, 'pwd module not supported on Windows'
class struct_passwd(tuple):
"""
pwd.struct_passwd: Results from getpw*() routines.
This object may be accessed either as a tuple of
(pw_name,pw_passwd,pw_uid,pw_gid,pw_gecos,pw_dir,pw_shell)
or via the object attributes as named in the above tuple.
"""
attrs = ['pw_name', 'pw_passwd', 'pw_uid', 'pw_gid', 'pw_gecos',
'pw_dir', 'pw_shell']
def __new__(cls, pwd):
pwd = (newString(pwd.loginName), newString(pwd.password), int(pwd.UID),
int(pwd.GID), newString(pwd.GECOS), newString(pwd.home),
newString(pwd.shell))
return tuple.__new__(cls, pwd)
def __getattr__(self, attr):
try:
return self[self.attrs.index(attr)]
except ValueError:
raise AttributeError
def getpwuid(uid):
"""
getpwuid(uid) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given numeric user ID.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwuid(uid)
if not entry:
raise KeyError(uid)
return struct_passwd(entry)
def getpwnam(name):
"""
getpwnam(name) -> (pw_name,pw_passwd,pw_uid,
pw_gid,pw_gecos,pw_dir,pw_shell)
Return the password database entry for the given user name.
See pwd.__doc__ for more on password database entries.
"""
entry = _posix_impl.getpwnam(name)
if not entry:
raise KeyError(name)
return struct_passwd(entry)
def getpwall():
"""
getpwall() -> list_of_entries
Return a list of all available password database entries,
in arbitrary order.
See pwd.__doc__ for more on password database entries.
"""
entries = []
while True:
entry = _posix_impl.getpwent()
if not entry:
break
entries.append(struct_passwd(entry))
return entries
| apache-2.0 |
paulmartel/voltdb | lib/python/vdm/server/Configuration.py | 1 | 56212 | # This file is part of VoltDB.
# Copyright (C) 2008-2016 VoltDB Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with VoltDB. If not, see <http://www.gnu.org/licenses/>.
import os
from collections import defaultdict
import json
import traceback
from xml.etree.ElementTree import Element, SubElement, tostring, XML
import sys
from flask import jsonify
import HTTPListener
import DeploymentConfig
from Validation import ServerInputs, DatabaseInputs, JsonInputs, UserInputs, ConfigValidation
from logging.handlers import RotatingFileHandler
import logging
import ast
import itertools
def convert_xml_to_json(config_path):
"""
Method to get the json content from xml file
:param config_path (string): path of xml file
"""
with open(config_path) as config_file:
xml = config_file.read()
config_content = XML(xml)
xml_final = etree_to_dict(config_content)
D2 = {}
for (k, v) in zip(xml_final.keys(), xml_final.values()):
D2[k] = v
# To get the list of servers in case of old members[] (for backward compatible)
if 'members' in D2[k] and 'member' in D2[k]['members'] and D2[k]['members']['member']:
if type(D2[k]['members']['member']) is dict:
member_json = get_field_from_xml(D2[k]['members']['member'], 'dict')
HTTPListener.Global.SERVERS[member_json[0]['id']] = member_json[0]
else:
member_json = get_field_from_xml(D2[k]['members']['member'], 'list')
for member in member_json:
HTTPListener.Global.SERVERS[member['id']] = member
if type(D2[k]['databases']['database']) is dict:
db_json = get_field_from_xml(D2[k]['databases']['database'],
'dict', 'database')
HTTPListener.Global.DATABASES[db_json[0]['id']] = db_json[0]
else:
db_json = get_field_from_xml(D2[k]['databases']['database'],
'list', 'database')
for database in db_json:
HTTPListener.Global.DATABASES[database['id']] = database
if type(D2[k]['deployments']['deployment']) is dict:
deployment_json = get_deployment_from_xml(D2[k]['deployments']
['deployment'], 'dict')
HTTPListener.Global.DEPLOYMENT[deployment_json[0]['databaseid']] = deployment_json[0]
else:
deployment_json = get_deployment_from_xml(D2[k]['deployments']
['deployment'], 'list')
for deployment in deployment_json:
HTTPListener.Global.DEPLOYMENT[deployment['databaseid']] = deployment
if D2[k]['deployments'] and 'deployment' in D2[k]['deployments']:
if type(D2[k]['deployments']['deployment']) is dict:
set_deployment_users(D2[k]['deployments']['deployment'])
else:
for deployment in D2[k]['deployments']['deployment']:
set_deployment_users(deployment)
def set_deployment_users(deployment):
if 'users' in deployment and deployment['users'] is not None\
and 'user' in deployment['users']:
if type(deployment) is dict:
user_json = get_users_from_xml(deployment,
'dict')
for user in user_json:
HTTPListener.Global.DEPLOYMENT_USERS[int(user['userid'])] = user
else:
user_json = get_users_from_xml(deployment,
'list')
for deployment_user in user_json:
HTTPListener.Global.DEPLOYMENT_USERS[int(deployment_user['userid'])] = deployment_user
def validate_and_convert_xml_to_json(config_path):
"""
Method to get the json content from xml file
:param config_path (string): path of xml file
"""
log_file = os.path.join(HTTPListener.Global.DATA_PATH, 'voltdeploy.log')
handler = RotatingFileHandler(log_file)
handler.setFormatter(logging.Formatter(
"%(asctime)s|%(levelname)s|%(message)s"))
log = logging.getLogger('werkzeug')
log.setLevel(logging.NOTSET)
log.addHandler(handler)
try:
with open(config_path) as config_file:
xml = config_file.read()
config_content = XML(xml)
xml_final = etree_to_dict(config_content)
D2 = {}
for (k, v) in zip(xml_final.keys(), xml_final.values()):
D2[k] = v
populate_database(D2[k]['databases']['database'], log)
if 'members' in D2[k] and 'member' in D2[k]['members'] and D2[k]['members']['member']:
if type(D2[k]['members']['member']) is dict:
populate_server(D2[k]['members']['member'], D2[k]['databases']['database'], log)
populate_deployment(D2[k]['deployments']['deployment'], log)
except Exception as err:
log.error("Error while reloading configuration: %s", "Invalid file content.")
def populate_database(databases, log):
success = True
if type(databases) is dict:
db_json = get_database_from_xml(databases,
'dict', log, 'database')
req = HTTPListener.DictClass()
req.json = {}
req.json = db_json[0]
inputs = DatabaseInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
if success is True:
HTTPListener.Global.DATABASES = {db_json[0]['id']: db_json[0]}
else:
db_json = get_database_from_xml(databases,
'list', log, 'database')
success = True
result = check_duplicate_database(db_json)
if result != "":
success = False
log.error("Error while reloading configuration: %s", result)
else:
for database in db_json:
req = HTTPListener.DictClass()
req.json = {}
req.json = database
inputs = DatabaseInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
if success is True:
HTTPListener.Global.DATABASES = {}
for database in db_json:
HTTPListener.Global.DATABASES[database['id']] = database
def populate_server(servers, databases, log):
success = True
if type(servers) is dict:
member_json = get_field_from_xml(servers, 'dict')
req = HTTPListener.DictClass()
req.json = {}
req.json = member_json[0]
inputs = ServerInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
else:
result = validate_server_ports_dict(member_json[0], databases, True)
if result is not None:
success = False
log.error("Error while reloading configuration: %s", result)
if success is True:
HTTPListener.Global.SERVERS = {member_json[0]['id']: member_json[0]}
else:
member_json = get_field_from_xml(servers, 'list')
for member in member_json:
req = HTTPListener.DictClass()
req.json = {}
req.json = member
inputs = ServerInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
result = validate_server_ports_list(member_json, databases, False)
if result is not None:
success = False
log.error("Error while reloading configuration: %s", result)
if success is True:
HTTPListener.Global.SERVERS = {}
for member in member_json:
HTTPListener.Global.SERVERS[member['id']] = member
def populate_deployment(deployments, log):
success = True
if type(deployments) is dict:
deployment_json = get_deployment_from_xml(deployments, 'dict')
req = HTTPListener.DictClass()
req.json = {}
req.json = deployment_json[0]
inputs = JsonInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
if success is True:
HTTPListener.Global.DEPLOYMENT = {deployment_json[0]['databaseid']: deployment_json[0]}
else:
deployment_json = get_deployment_from_xml(deployments, 'list')
for deployment in deployment_json:
req = HTTPListener.DictClass()
req.json = {}
req.json = deployment
inputs = JsonInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
if success is True:
HTTPListener.Global.DEPLOYMENT = {}
for deployment in deployment_json:
HTTPListener.Global.DEPLOYMENT[deployment['databaseid']] = deployment
success = True
if type(deployments) is list:
users = []
for deployment in deployments:
if 'users' in deployment and deployment['users'] is not None \
and 'user' in deployment['users']:
user_json = get_users_from_xml(deployment,
'dict')
if type(user_json) is dict:
req = HTTPListener.DictClass()
req.json = {}
user_json['plaintext'] = bool(user_json['plaintext'])
req.json = user_json
inputs = UserInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
if success is True:
users.append(user_json)
elif type(user_json) is list:
for user in user_json:
req = HTTPListener.DictClass()
req.json = {}
user['plaintext'] = bool(user['plaintext'])
req.json = user
inputs = UserInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
if len(user_json)> 1:
result = check_duplicate_user(user_json)
if result != "":
success = False
log.error("Error while reloading configuration: %s", result)
if success is True:
for user in user_json:
users.append(user)
if len(users) > 0:
HTTPListener.Global.DEPLOYMENT_USERS = {}
for user in users:
HTTPListener.Global.DEPLOYMENT_USERS[int(user['userid'])] = user
else:
user_json = get_users_from_xml(deployments,
'dict')
if type(user_json) is dict:
req = HTTPListener.DictClass()
req.json = {}
user_json['plaintext'] = bool(user_json['plaintext'])
req.json = user_json
inputs = UserInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
if success is True:
HTTPListener.Global.DEPLOYMENT_USERS = {int(user_json['userid']): user_json}
elif type(user_json) is list:
for user in user_json:
req = HTTPListener.DictClass()
req.json = {}
user['plaintext'] = bool(user['plaintext'])
req.json = user
inputs = UserInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
if len(user_json)> 1:
result = check_duplicate_user(user_json)
if result != "":
success = False
log.error("Error while reloading configuration: %s", result)
if success is True:
HTTPListener.Global.DEPLOYMENT_USERS = {}
for user in user_json:
HTTPListener.Global.DEPLOYMENT_USERS[int(user['userid'])] = user
def validate_server_ports_dict(member, databases, isDict):
arr = ["http-listener", "admin-listener", "internal-listener", "replication-listener", "zookeeper-listener",
"client-listener"]
specified_port_values = {
"http-listener": HTTPListener.get_port(member['http-listener']),
"admin-listener": HTTPListener.get_port(member['admin-listener']),
"replication-listener": HTTPListener.get_port(member['replication-listener']),
"client-listener": HTTPListener.get_port(member['client-listener']),
"zookeeper-listener": HTTPListener.get_port(member['zookeeper-listener']),
"internal-listener": HTTPListener.get_port(member['internal-listener'])
}
for option in arr:
value = specified_port_values[option]
for port_key in specified_port_values.keys():
if option != port_key and value is not None and specified_port_values[port_key] == value:
return "Duplicate port"
def validate_server_ports_list(members, databases, isDict):
arr = ["http-listener", "admin-listener", "internal-listener", "replication-listener", "zookeeper-listener",
"client-listener"]
for i in range(len(members)):
specified_port_values = {
"http-listener": HTTPListener.get_port(members[i]['http-listener']),
"admin-listener": HTTPListener.get_port(members[i]['admin-listener']),
"replication-listener": HTTPListener.get_port(members[i]['replication-listener']),
"client-listener": HTTPListener.get_port(members[i]['client-listener']),
"zookeeper-listener": HTTPListener.get_port(members[i]['zookeeper-listener']),
"internal-listener": HTTPListener.get_port(members[i]['internal-listener'])
}
for option in arr:
value = specified_port_values[option]
for port_key in specified_port_values.keys():
if option != port_key and value is not None and specified_port_values[port_key] == value:
return "Duplicate port"
if type(databases) is dict:
for option in arr:
result = check_port_valid(option, databases['members']['member'])
if result is not None:
return result
elif type(databases) is list:
for database in databases:
for option in arr:
result = check_port_valid(option, database['members']['member'])
if result is not None:
return result
def check_port_valid(port_option, servers):
result = None
for i in range(len(servers)):
for j in range(i + 1, len(servers)):
if servers[i]['hostname'] == servers[j]['hostname']:
result = compare(port_option, servers[i], servers[j])
if result is not None:
return result
def check_duplicate_database(databases):
for i in range(len(databases)):
for j in range(i + 1, len(databases)):
result = compare_database(databases[i], databases[j])
if result is not None:
return result
def check_duplicate_user(users):
for i in range(len(users)):
for j in range(i + 1, len(users)):
result = compare_user(users[i], users[j])
if result is not None:
return result
def get_servers_from_database_id(database_id):
servers = []
database = HTTPListener.Global.DATABASES.get(int(database_id))
if database is None:
return 'No database found for id: %u' % int(database_id)
else:
members = database['members']
for server_id in members:
server = HTTPListener.Global.SERVERS.get(server_id)
servers.append(server)
return servers
def compare(port_option, first, second):
if port_option == "http-listener":
default_port = "8080"
if port_option == "admin-listener":
default_port = "21211"
if port_option == "zookeeper-listener":
default_port = "7181"
if port_option == "replication-listener":
default_port = "5555"
if port_option == "client-listener":
default_port = "21212"
if port_option == "internal-listener":
default_port = "3021"
first_port = HTTPListener.get_port(first[port_option])
if first_port is None or first_port == "":
first_port = default_port
second_port = HTTPListener.get_port(second[port_option])
if second_port is None or second_port == "":
second_port = default_port
if first_port == second_port:
return "Port %s for the same host is already used by server %s for %s" % \
(first_port, first['hostname'], port_option)
def compare_database(first, second):
if first['name'] == second['name']:
return 'Duplicate database name: %s' % first['name']
else:
return ""
def compare_user(first, second):
if first['name'] == second['name']:
return 'Duplicate user name: %s' % first['name']
else:
return ""
def get_deployment_from_xml(deployment_xml, is_list):
"""
Function to get deployment json from the xml content
:param deployment_xml: raw deployment object
:param is_list: flag to know if it is list or dict
:return: deployment object in required format
"""
deployments = []
if is_list is 'list':
for deployment in deployment_xml:
deployments.append(get_deployment(deployment))
else:
deployments.append(get_deployment(deployment_xml))
return deployments
def get_deployment(deployment, is_upload=False):
"""
Gets the required deployment object
:param deployment: raw deployment object
:param is_upload: flag to know if it is list or dict
:return: deployment object in required format
"""
new_deployment = {}
for field in deployment:
if field == 'export' or field == 'import':
result = set_export_import_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'admin-mode':
result = set_admin_mode_filed(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'cluster':
result = set_cluster_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'commandlog':
result = set_command_log_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'heartbeat':
result = set_heartbeat_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'httpd':
result = set_httpd_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'partition-detection':
result = set_partition_detection_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'security':
result = set_security_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'snapshot':
result = set_snapshot_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'systemsettings':
result = set_system_setting_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'dr':
result = set_dr_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
elif field == 'users':
result = set_users_field(deployment, field, new_deployment)
if is_upload and 'success' not in result:
return handle_errors(field, result)
else:
new_deployment[field] = convert_field_required_format(deployment, field)
return new_deployment
def set_export_import_field(deployment, field, new_deployment):
result = 'success'
try:
if deployment[field] is not None:
new_deployment[field] = {}
new_deployment[field]['configuration'] = {}
if type(deployment[field]['configuration']) is list:
new_deployment[field]['configuration'] = get_field_from_xml(
deployment[field]['configuration'], 'list', 'export')
else:
new_deployment[field]['configuration'] = get_field_from_xml(
deployment[field]['configuration'], 'dict', 'export')
else:
new_deployment[field] = None
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_admin_mode_filed(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['adminstartup'] = parse_bool_string(deployment[field]
['adminstartup'])
new_deployment[field]['port'] = int(deployment[field]['port'])
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_cluster_field(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['hostcount'] = int(deployment[field]['hostcount'])
new_deployment[field]['kfactor'] = int(deployment[field]['kfactor'])
new_deployment[field]['sitesperhost'] = int(deployment[field]
['sitesperhost'])
new_deployment[field]['elastic'] = str(deployment[field]['elastic'])
new_deployment[field]['schema'] = str(deployment[field]['schema'])
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_command_log_field(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['enabled'] = parse_bool_string(deployment[field]
['enabled'])
new_deployment[field]['synchronous'] = parse_bool_string(deployment[field]
['synchronous'])
new_deployment[field]['logsize'] = int(deployment[field]['logsize'])
new_deployment[field]['frequency'] = {}
new_deployment[field]['frequency']['transactions'] = int(
deployment[field]['frequency']['transactions'])
new_deployment[field]['frequency']['time'] = int(deployment[field]
['frequency']
['time'])
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_heartbeat_field(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['timeout'] = int(deployment[field]['timeout'])
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_partition_detection_field(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['enabled'] = parse_bool_string(deployment[field]
['enabled'])
new_deployment[field]['snapshot'] = {}
new_deployment[field]['snapshot']['prefix'] = deployment[field]['snapshot']['prefix']
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_httpd_field(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['port'] = int(deployment[field]['port'])
new_deployment[field]['enabled'] = parse_bool_string(deployment[field]
['enabled'])
new_deployment[field]['jsonapi'] = {}
new_deployment[field]['jsonapi']['enabled'] = parse_bool_string(
deployment[field]['jsonapi']['enabled'])
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_security_field(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['enabled'] = parse_bool_string(deployment[field]
['enabled'])
new_deployment[field]['provider'] = str(deployment[field]['provider'])
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_snapshot_field(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['enabled'] = parse_bool_string(deployment[field]
['enabled'])
new_deployment[field]['frequency'] = str(deployment[field]['frequency'])
new_deployment[field]['prefix'] = str(deployment[field]['prefix'])
new_deployment[field]['retain'] = int(deployment[field]['retain'])
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_system_setting_field(deployment, field, new_deployment):
result = 'success'
try:
new_deployment[field] = {}
new_deployment[field]['elastic'] = {}
new_deployment[field]['elastic']['duration'] = int(deployment[field]
['elastic']
['duration'])
new_deployment[field]['elastic']['throughput'] = int(deployment[field]
['elastic']['throughput'])
new_deployment[field]['query'] = {}
new_deployment[field]['query']['timeout'] = int(deployment[field]['query']
['timeout'])
new_deployment[field]['snapshot'] = {}
new_deployment[field]['snapshot']['priority'] = int(deployment[field]
['snapshot']['priority'])
new_deployment[field]['temptables'] = {}
new_deployment[field]['temptables']['maxsize'] = int(deployment[field]
['temptables']['maxsize'])
if 'resourcemonitor' not in deployment[field] or \
deployment[field]['resourcemonitor'] is None:
if 'resourcemonitor' in deployment[field]:
new_deployment[field]['resourcemonitor'] = None
else:
new_deployment[field]['resourcemonitor'] = {}
if 'memorylimit' in deployment[field]['resourcemonitor']:
new_deployment[field]['resourcemonitor']['memorylimit'] = \
deployment[field]['resourcemonitor']['memorylimit']
if 'disklimit' in deployment[field]['resourcemonitor'] and 'feature' in \
deployment[field]['resourcemonitor']['disklimit']:
if type(deployment[field]['resourcemonitor']['disklimit']['feature']) is \
list:
new_deployment[field]['resourcemonitor']['disklimit'] = {}
new_deployment[field]['resourcemonitor']['disklimit']['feature'] = \
get_field_from_xml(deployment[field]
['resourcemonitor']['disklimit']['feature'],
'list', 'disklimit')
else:
new_deployment[field]['resourcemonitor']['disklimit'] = {}
new_deployment[field]['resourcemonitor']['disklimit']['feature'] = \
get_field_from_xml(deployment[field]['resourcemonitor']
['disklimit']['feature'], 'dict', 'disklimit')
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_dr_field(deployment, field, new_deployment):
result = 'success'
try:
if deployment[field] != 'None':
new_deployment[field] = {}
new_deployment[field]['id'] = int(deployment[field]['id'])
new_deployment[field]['listen'] = parse_bool_string(deployment[field]
['listen'])
if 'port' in deployment[field]:
new_deployment[field]['port'] = int(deployment[field]['port'])
if 'connection' in deployment[field] and deployment[field]['connection'] \
is not None and 'source' in deployment[field]['connection']:
new_deployment[field]['connection'] = {}
new_deployment[field]['connection']['source'] = str(
deployment[field]['connection']['source'])
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def set_users_field(deployment, field, new_deployment):
result = 'success'
try:
if deployment[field] is not None:
new_deployment[field] = {}
if type(deployment[field]['user']) is list:
new_deployment[field]['user'] = []
new_deployment[field]['user'] = get_field_from_xml(deployment[field]
['user'], 'list', 'user')
else:
new_deployment[field]['user'] = []
new_deployment[field]['user'] = get_field_from_xml(deployment[field]
['user'], 'dict', 'user')
except Exception, err:
result = str(err)
print_errors(field, result)
finally:
return result
def get_field_from_xml(xml_content, is_list, type_content=''):
"""
Gets the deployment attribute value in required format
:param content: deployment attribute value in raw format
:param is_list: check if it is a list or dict
:param type_content: attribute type
:return: deployment attribute object
"""
final_property = []
if is_list is 'list':
for content in xml_content:
final_property.append(get_fields(content, type_content))
else:
final_property.append(get_fields(xml_content, type_content))
return final_property
def get_database_from_xml(xml_content, is_list, log, type_content=''):
"""
Gets the deployment attribute value in required format
:param content: deployment attribute value in raw format
:param is_list: check if it is a list or dict
:param type_content: attribute type
:return: deployment attribute object
"""
final_property = []
if is_list is 'list':
for content in xml_content:
final_property.append(get_database_fields(content, type_content, log))
else:
final_property.append(get_database_fields(xml_content, type_content, log))
return final_property
def get_fields(content, type_content):
"""
Converts the deployment attribute value in required format
:param content: deployment attribute value in raw format
:param type_content: attribute type
:return: deployment attribute object
"""
new_property = {}
for field in content:
if field == 'plaintext' and type_content == 'user':
new_property[field] = parse_bool_string(content[field])
elif field == 'property' and type_content == 'export':
if type(content['property']) is list:
new_property['property'] = get_field_from_xml(content['property'],
'list', 'export')
else:
new_property['property'] = get_field_from_xml(content['property'],
'dict', 'export')
elif field == 'enabled' and type_content == 'export':
new_property[field] = parse_bool_string(content[field])
elif field == 'members':
members = []
if type(content[field]) is dict:
members = set_members_field(content[field])
# To get the database members in case of old members[] (for backward compatible)
elif type(content[field]) is str:
members = convert_field_required_format(content, field)
new_property[field] = members
else:
new_property[field] = convert_field_required_format(content, field)
return new_property
def get_database_fields(content, type_content, log):
"""
Converts the deployment attribute value in required format
:param content: deployment attribute value in raw format
:param type_content: attribute type
:return: deployment attribute object
"""
new_property = {}
for field in content:
if field == 'plaintext' and type_content == 'user':
new_property[field] = parse_bool_string(content[field])
elif field == 'property' and type_content == 'export':
if type(content['property']) is list:
new_property['property'] = get_field_from_xml(content['property'],
'list', 'export')
else:
new_property['property'] = get_field_from_xml(content['property'],
'dict', 'export')
elif field == 'enabled' and type_content == 'export':
new_property[field] = parse_bool_string(content[field])
elif field == 'members':
members = []
if type(content[field]) is dict:
members = populate_server(content[field]['member'], content, log)
# To get the database members in case of old members[] (for backward compatible)
elif type(content[field]) is str:
members = convert_field_required_format(content, field)
new_property[field] = members
else:
new_property[field] = convert_field_required_format(content, field)
return new_property
def set_members_field(content):
members = []
if content and 'member' in content and content['member']:
if type(content['member']) is dict:
member_json = get_field_from_xml(content['member'], 'dict')
HTTPListener.Global.SERVERS[member_json[0]['id']] = member_json[0]
else:
member_json = get_field_from_xml(content['member'], 'list')
for member in member_json:
HTTPListener.Global.SERVERS[member['id']] = member
for mem in member_json:
members.append(mem['id'])
return members
def populate_server(servers, databases, log):
members = []
success = True
if type(servers) is dict:
member_json = get_field_from_xml(servers, 'dict')
req = HTTPListener.DictClass()
req.json = {}
req.json = member_json[0]
inputs = ServerInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
else:
result = validate_server_ports_dict(member_json[0], databases, True)
if result is not None:
success = False
log.error("Error while reloading configuration: %s", result)
if success is True:
HTTPListener.Global.SERVERS = {member_json[0]['id']: member_json[0]}
else:
member_json = get_field_from_xml(servers, 'list')
for member in member_json:
req = HTTPListener.DictClass()
req.json = {}
req.json = member
inputs = ServerInputs(req)
if not inputs.validate():
success = False
sys.stdout.write(str(inputs.errors))
log.error("Error while reloading configuration: %s", str(inputs.errors))
result = validate_server_ports_list(member_json, databases, False)
if result is not None:
success = False
log.error("Error while reloading configuration: %s", result)
if success is True:
HTTPListener.Global.SERVERS = {}
for member in member_json:
HTTPListener.Global.SERVERS[member['id']] = member
for mem in member_json:
members.append(mem['id'])
return members
def get_users_from_xml(deployment_xml, is_list):
"""
Gets the users from the json obtained from xml file
:param deployment_xml:
:param is_list:
:return: users object
"""
users = []
if is_list is 'list':
for deployment in deployment_xml:
get_users(deployment, users)
else:
get_users(deployment_xml, users)
return users
def get_users(deployment, users):
"""
Creates the users object
:param deployment:
:param users:
:return: user object
"""
if 'users' in deployment and deployment['users'] is not None:
if type(deployment['users']['user']) is list:
for user in deployment['users']['user']:
users.append(convert_user_required_format(user))
else:
users.append(convert_user_required_format(deployment['users']
['user']))
return users
def convert_user_required_format(user):
"""
Convert the fields in user to required format
:param user:
:return: user object
"""
for field in user:
if field == 'databaseid':
user[field] = int(user[field])
return user
def convert_field_required_format(type, field):
"""
Convert the fields to required format
:param type: attribute name
:param field: field name
:return: field value in required format
"""
if field == 'databaseid':
modified_field = int(type[field])
elif field == 'id':
modified_field = int(type[field])
elif field == 'members':
modified_field = ast.literal_eval(type[field])
else:
modified_field = type[field]
return modified_field
def etree_to_dict(t):
"""
Gets the json object from the xml content
:param t: xml content
:return: object
"""
d = {t.tag: {} if t.attrib else None}
children = list(t)
if children:
dd = defaultdict(list)
for dc in map(etree_to_dict, children):
for k, v in dc.iteritems():
dd[k].append(v)
# d = {t.tag: {k:v[0] if len(v) == 1 else v for k, v in dd.iteritems()}}
aa = {}
for k, v in dd.iteritems():
aa[k] = v[0] if len(v) == 1 else v
d = {t.tag: aa}
if t.attrib:
d[t.tag].update((k, v) for k, v in t.attrib.iteritems())
if t.text:
text = t.text.strip()
if children or t.attrib:
if text:
d[t.tag]['value'] = text
else:
d[t.tag] = text
return d
def parse_bool_string(bool_string):
"""
Get the boolean value of bool_string
:param bool_string:
:return: boolean value
"""
return bool_string.upper() == 'TRUE'
def get_deployment_for_upload(deployment_xml):
"""
Gets the deployment json in required format for upload process
:param deployment_xml: deployment object in raw format
:return: deployment object in required format
"""
deployments = []
deployments.append(get_deployment(deployment_xml, True))
return deployments
def get_configuration():
"""
Gets the voltdeploy json object
:return: voltdeploy json object
"""
deployment_json = {
'voltdeploy': {
'databases': HTTPListener.Global.DATABASES,
'members': HTTPListener.Global.SERVERS,
'deployments': HTTPListener.Global.DEPLOYMENT,
'deployment_users': HTTPListener.Global.DEPLOYMENT_USERS
}
}
return deployment_json
def write_configuration_file():
"""
Write the xml content to voltdeploy.xml
"""
main_header = make_configuration_file()
try:
path = os.path.join(HTTPListener.Global.CONFIG_PATH, 'voltdeploy.xml')
f = open(path, 'w')
f.write(main_header)
f.close()
except Exception, err:
print str(err)
def make_configuration_file():
"""
Prepare the xml content
:return: xml content
"""
main_header = Element('voltdeploy')
db_top = SubElement(main_header, 'databases')
deployment_top = SubElement(main_header, 'deployments')
for key, value in HTTPListener.Global.DATABASES.items():
db_elem = SubElement(db_top, 'database')
for k, val in value.items():
if isinstance(val, bool):
if not value:
db_elem.attrib[k] = "false"
else:
db_elem.attrib[k] = "true"
elif k == 'members':
mem_elem = SubElement(db_elem, 'members')
for mem_id in val:
server_info = HTTPListener.Global.SERVERS.get(mem_id)
if server_info:
mem_item = SubElement(mem_elem, 'member')
for field in server_info:
if isinstance(server_info[field], bool):
if not value:
mem_item.attrib[field] = "false"
else:
mem_item.attrib[field] = "true"
else:
mem_item.attrib[field] = str(server_info[field])
else:
db_elem.attrib[k] = str(val)
for key, value in HTTPListener.Global.DEPLOYMENT.items():
HTTPListener.Global.DEPLOYMENT[key]['users'] = {}
HTTPListener.Global.DEPLOYMENT[key]['users']['user'] = []
d = HTTPListener.Global.DEPLOYMENT_USERS
for user_key, user_value in d.iteritems():
HTTPListener.Global.DEPLOYMENT[user_value['databaseid']]['users']['user'].append({
'name': d[user_key]['name'],
'roles': d[user_key]['roles'],
'plaintext': d[user_key]['plaintext'],
'password': d[user_key]['password'],
'databaseid': d[user_key]['databaseid'],
'userid': d[user_key]['userid']
})
deployment_elem = SubElement(deployment_top, 'deployment')
for k, val in value.items():
if k == 'users' and not val['user']:
pass
elif type(val) is dict:
DeploymentConfig.handle_deployment_dict(deployment_elem, k, val, False)
elif type(val) is list:
DeploymentConfig.handle_deployment_list(deployment_elem, k, val)
else:
if val is not None:
deployment_elem.attrib[k] = str(val)
return tostring(main_header, encoding='UTF-8')
def handle_errors(config_type, error):
"""
Print the error message while preparing required deployment object
:param config_type: Attribute name like (export, import etc)
:param error: Error obtained while converting the deployment attribute
:return: error object
"""
return {'error': config_type + ': ' + str(error)}
def print_errors(config_type, error):
"""
Print the error message while preparing required deployment object
:param config_type: Attribute name like (export, import etc)
:param error: Error obtained while converting the deployment attribute
"""
print 'error (' + config_type + '): ' + str(error)
print traceback.format_exc()
def set_deployment_for_upload(database_id, request):
if 'text/xml' in request.headers['Content-Type'] or 'application/xml' in request.headers['Content-Type']:
content = request.data
return read_content(content, database_id)
else:
dep_file = request.files['file']
if dep_file and HTTPListener.allowed_file(dep_file.filename):
content = dep_file.read()
return read_content(content, database_id)
else:
return {'status': 401, 'statusString': 'Invalid file type.'}
return {'status': 201, 'statusString': 'success'}
def read_content(content, database_id):
try:
o = XML(content)
xml_final = json.loads(json.dumps(etree_to_dict(o)))
if 'deployment' in xml_final and type(xml_final['deployment']) is dict:
deployment_data = get_deployment_for_upload(xml_final['deployment'])
if type(deployment_data) is dict:
if 'error' in deployment_data:
return {'status': 'failure', 'error': deployment_data['error']}
else:
deployment_json = deployment_data[0]
req = HTTPListener.DictClass()
req.json = {}
req.json = deployment_json
inputs = JsonInputs(req)
if not inputs.validate():
return {'status': 401, 'statusString': inputs.errors}
result = check_validation_deployment(req)
if 'status' in result and result['status'] == 401:
return {'status': 401, 'statusString': result['statusString']}
is_duplicate_user = check_duplicate_users(req)
if not is_duplicate_user:
return {'status': 401, 'statusString': 'Duplicate users not allowed.'}
is_invalid_roles = check_invalid_roles(req)
if not is_invalid_roles:
return {'status': 401, 'statusString': 'Invalid user roles.'}
HTTPListener.map_deployment(req, database_id)
deployment_user = [v if type(v) is list else [v] for v in HTTPListener.Global.DEPLOYMENT_USERS.values()]
if deployment_user is not None:
for user in deployment_user:
if user[0]['databaseid'] == database_id:
del HTTPListener.Global.DEPLOYMENT_USERS[int(user[0]['userid'])]
if 'users' in req.json and 'user' in req.json['users']:
for user in req.json['users']['user']:
if not HTTPListener.Global.DEPLOYMENT_USERS:
user_id = 1
else:
user_id = HTTPListener.Global.DEPLOYMENT_USERS.keys()[-1] + 1
user_roles = ','.join(set(user['roles'].split(',')))
HTTPListener.Global.DEPLOYMENT_USERS[user_id] = {
'name': user['name'],
'roles': user_roles,
'password': user['password'],
'plaintext': user['plaintext'],
'databaseid': database_id,
'userid': user_id
}
HTTPListener.sync_configuration()
write_configuration_file()
return {'status': 200, 'statusString': 'success'}
else:
return {'status': 401, 'statusString': 'Invalid file content.'}
except Exception as err:
return {'status': 401, 'statusString': 'Invalid file content.'}
def check_duplicate_users(req):
if 'users' in req.json and 'user' in req.json['users']:
user_name_list = []
for user in req.json['users']['user']:
if user['name'] in user_name_list:
return False
user_name_list.append(user['name'])
return True
def check_invalid_roles(req):
if 'users' in req.json and 'user' in req.json['users']:
for user in req.json['users']['user']:
roles = str(user['roles']).split(',')
for role in roles:
if role.strip() == '':
return False
return True
def check_validation_deployment(req):
if 'systemsettings' in req.json and 'resourcemonitor' in req.json['systemsettings']:
if 'memorylimit' in req.json['systemsettings']['resourcemonitor'] and \
'size' in req.json['systemsettings']['resourcemonitor']['memorylimit']:
size = str(req.json['systemsettings']['resourcemonitor']['memorylimit']['size'])
response = json.loads(HTTPListener.check_size_value(size, 'memorylimit').data)
if 'error' in response:
return {'status': 401, 'statusString': response['error']}
disk_limit_arr = []
if 'disklimit' in req.json['systemsettings']['resourcemonitor'] and \
'feature' in req.json['systemsettings']['resourcemonitor']['disklimit']:
for feature in req.json['systemsettings']['resourcemonitor']['disklimit']['feature']:
size = feature['size']
if feature['name'] in disk_limit_arr:
return {'status': 401, 'statusString': 'Duplicate items are not allowed.'}
disk_limit_arr.append(feature['name'])
response = json.loads(HTTPListener.check_size_value(size, 'disklimit').data)
if 'error' in response:
return {'status': 401, 'statusString': response['error']}
if 'snapshot' in req.json and 'frequency' in req.json['snapshot']:
frequency_unit = ['h', 'm', 's']
frequency = str(req.json['snapshot']['frequency'])
if ' ' in frequency:
return {'status': 401, 'statusString': 'Snapshot: White spaces not allowed in frequency.'}
last_char = frequency[len(frequency) - 1]
if last_char not in frequency_unit:
return {'status': 401, 'statusString': 'Snapshot: Invalid frequency value.'}
frequency = frequency[:-1]
try:
int_frequency = int(frequency)
except Exception, exp:
return {'status': 401, 'statusString': 'Snapshot: ' + str(exp)}
if 'export' in req.json and 'configuration' in req.json['export']:
for configuration in req.json['export']['configuration']:
result = check_export_property(configuration['type'], configuration['property'])
if 'status' in result and result['status'] == 401:
return {'status': 401, 'statusString': 'Export: ' + result['statusString']}
if 'import' in req.json and 'configuration' in req.json['import']:
for configuration in req.json['import']['configuration']:
result = check_export_property(configuration['type'], configuration['property'])
if 'status' in result and result['status'] == 401:
return {'status': 401, 'statusString': 'Import: ' + result['statusString']}
return {'status': 200, 'statusString': 'success'}
def check_export_property(type, properties):
property_list = []
for property in properties:
if 'name' in property and 'value' in property:
if str(property['name']).strip() == '' or str(property['value']).strip() == '':
return {'status': 401, 'statusString': 'Invalid property.'}
if property['name'] in property_list:
return {'status': 401, 'statusString': 'Duplicate properties are not allowed.'}
property_list.append(property['name'])
else:
return {'status': 401, 'statusString': 'Invalid property.'}
if str(type).lower() == 'kafka':
if 'metadata.broker.list' not in property_list:
return {'status': 401, 'statusString': 'Default property(metadata.broker.list) of kafka not present.'}
if str(type).lower() == 'elasticsearch':
if 'endpoint' not in property_list:
return {'status': 401, 'statusString': 'Default property(endpoint) of elasticsearch not present.'}
if str(type).lower() == 'file':
if 'type' not in property_list or 'nonce' not in property_list \
or 'outdir' not in property_list:
return {'status': 401, 'statusString': 'Default properties(type, nonce, outdir) of file not present.'}
if str(type).lower() == 'http':
if 'endpoint' not in property_list:
return {'status': 401, 'statusString': 'Default property(endpoint) of http not present.'}
if str(type).lower() == 'jdbc':
if 'jdbcdriver' not in property_list or 'jdbcurl' not in property_list:
return {'status': 401, 'statusString': 'Default properties(jdbcdriver, jdbcurl) of jdbc not present.'}
if str(type).lower() == 'rabbitmq':
if 'broker.host' not in property_list and 'amqp.uri' not in property_list:
return {'status': 401, 'statusString': 'Default property(either amqp.uri or broker.host) of '
'rabbitmq not present.'}
elif 'broker.host' in property_list and 'amqp.uri' in property_list:
return {'status': 401, 'statusString': 'Both broker.host and amqp.uri cannot be included as rabbibmq property.'}
return {'status': 200, 'statusString': 'success'}
| agpl-3.0 |
maellak/invenio | modules/miscutil/lib/dbdump.py | 12 | 15232 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
Invenio DB dumper.
"""
import os
import re
import time
from invenio.config import CFG_LOGDIR, CFG_PATH_MYSQL, CFG_PATH_GZIP
from invenio.dbquery import CFG_DATABASE_HOST, \
CFG_DATABASE_USER, \
CFG_DATABASE_PASS, \
CFG_DATABASE_NAME, \
CFG_DATABASE_PORT, \
CFG_DATABASE_SLAVE, \
get_connection_for_dump_on_slave, \
run_sql
from invenio.bibtask import task_init, \
write_message, \
task_set_option, \
task_get_option, \
task_update_progress, \
task_get_task_param, \
task_low_level_submission
from invenio.shellutils import run_shell_command, \
escape_shell_arg
def get_table_names(value):
"""
Get table names of the tables matching the given regular expressions
@param option: list of regular expressions
@return: list of strings
"""
rex = re.compile(value)
return [row[0] for row in run_sql("SHOW TABLES") if rex.search(row[0])]
def _delete_old_dumps(dirname, filename, number_to_keep):
"""
Look for files in DIRNAME directory starting with FILENAME
pattern. Delete up to NUMBER_TO_KEEP files (when sorted
alphabetically, which is equal to sorted by date). Useful to
prune old dump files.
"""
files = [x for x in os.listdir(dirname) if x.startswith(filename)]
files.sort()
for afile in files[:-number_to_keep]:
write_message("... deleting %s" % dirname + os.sep + afile)
os.remove(dirname + os.sep + afile)
def check_slave_is_up(connection=None):
"""Raise an StandardError in case the slave is not correctly up."""
if connection is None:
connection = get_connection_for_dump_on_slave()
res = run_sql("SHOW SLAVE STATUS", with_dict=True, connection=connection)
if res[0]['Slave_IO_Running'] != 'Yes':
raise StandardError("Slave_IO_Running is not set to 'Yes'")
if res[0]['Slave_SQL_Running'] != 'Yes':
raise StandardError("Slave_SQL_Running is not set to 'Yes'")
def check_slave_is_down(connection=None):
"""Raise an StandardError in case the slave is not correctly down."""
if connection is None:
connection = get_connection_for_dump_on_slave()
res = run_sql("SHOW SLAVE STATUS", with_dict=True, connection=connection)
if res[0]['Slave_SQL_Running'] != 'No':
raise StandardError("Slave_SQL_Running is not set to 'No'")
def detach_slave(connection=None):
"""Detach the slave."""
if connection is None:
connection = get_connection_for_dump_on_slave()
run_sql("STOP SLAVE SQL_THREAD", connection=connection)
check_slave_is_down(connection)
def attach_slave(connection=None):
"""Attach the slave."""
if connection is None:
connection = get_connection_for_dump_on_slave()
run_sql("START SLAVE", connection=connection)
check_slave_is_up(connection)
def check_slave_is_in_consistent_state(connection=None):
"""
Check if the slave is already aware that dbdump task is running.
dbdump being a monotask, guarantee that no other task is currently
running and it's hence safe to detach the slave and start the
actual dump.
"""
if connection is None:
connection = get_connection_for_dump_on_slave()
i = 0
## Let's take the current status of dbdump (e.g. RUNNING, ABOUT TO STOP, etc.)...
current_status = run_sql("SELECT status FROM schTASK WHERE id=%s", (task_get_task_param('task_id'), ))[0][0]
while True:
if i == 10:
## Timeout!!
raise StandardError("The slave seems not to pick up with the master")
## ...and let's see if it matches with what the slave sees.
if run_sql("SELECT status FROM schTASK WHERE id=%s AND status=%s", (task_get_task_param('task_id'), current_status), connection=connection):
## Bingo!
return
time.sleep(3)
i += 1
def dump_database(dump_path, host=CFG_DATABASE_HOST, port=CFG_DATABASE_PORT, \
user=CFG_DATABASE_USER, passw=CFG_DATABASE_PASS, \
name=CFG_DATABASE_NAME, params=None, compress=False, \
ignore_tables=None):
"""
Dump Invenio database into SQL file located at DUMP_PATH.
Will perform the command to mysqldump with the given host configuration
and user credentials.
Optional mysqldump parameters can also be passed. Otherwise, a default
set of parameters will be used.
@param dump_path: path on the filesystem to save the dump to.
@type dump_path: string
@param host: hostname of mysql database node to connect to.
@type host: string
@param port: port of mysql database node to connect to
@type port: string
@param user: username to connect with
@type user: string
@param passw: password to connect to with
@type passw: string
@param name: name of mysql database node to dump
@type name: string
@param params: command line parameters to pass to mysqldump. Optional.
@type params: string
@param compress: should the dump be compressed through gzip?
@type compress: bool
@param ignore_tables: list of tables to ignore in the dump
@type ignore: list of string
"""
write_message("... writing %s" % (dump_path,))
partial_dump_path = dump_path + ".part"
# Is mysqldump installed or in the right path?
cmd_prefix = CFG_PATH_MYSQL + 'dump'
if not os.path.exists(cmd_prefix):
raise StandardError("%s is not installed." % (cmd_prefix))
if not params:
# No parameters set, lets use the default ones.
params = " --skip-opt --add-drop-table --add-locks --create-options" \
" --quick --extended-insert --set-charset --disable-keys" \
" --lock-tables=false --max_allowed_packet=2G "
if ignore_tables:
params += " ".join([escape_shell_arg("--ignore-table=%s.%s" % (CFG_DATABASE_NAME, table)) for table in ignore_tables])
dump_cmd = "%s %s " \
" --host=%s --port=%s --user=%s --password=%s %s" % \
(cmd_prefix, \
params, \
escape_shell_arg(host), \
escape_shell_arg(str(port)), \
escape_shell_arg(user), \
escape_shell_arg(passw), \
escape_shell_arg(name))
if compress:
dump_cmd = "%s | %s -cf; exit ${PIPESTATUS[0]}" % \
(dump_cmd, \
CFG_PATH_GZIP)
dump_cmd = "bash -c %s" % (escape_shell_arg(dump_cmd),)
write_message(dump_cmd, verbose=2)
exit_code, stdout, stderr = run_shell_command(dump_cmd, None, partial_dump_path)
if exit_code:
raise StandardError("ERROR: mysqldump exit code is %s. stderr: %s stdout: %s" % \
(repr(exit_code), \
repr(stderr), \
repr(stdout)))
else:
os.rename(partial_dump_path, dump_path)
write_message("... completed writing %s" % (dump_path,))
def _dbdump_elaborate_submit_param(key, value, dummyopts, dummyargs):
"""
Elaborate task submission parameter. See bibtask's
task_submit_elaborate_specific_parameter_fnc for help.
"""
if key in ('-n', '--number'):
try:
task_set_option('number', int(value))
except ValueError:
raise StandardError("ERROR: Number '%s' is not integer." % (value,))
elif key in ('-o', '--output'):
if os.path.isdir(value):
task_set_option('output', value)
else:
raise StandardError("ERROR: Output '%s' is not a directory." % \
(value,))
elif key in ('--params',):
task_set_option('params', value)
elif key in ('--compress',):
if not CFG_PATH_GZIP or (CFG_PATH_GZIP and not os.path.exists(CFG_PATH_GZIP)):
raise StandardError("ERROR: No valid gzip path is defined.")
task_set_option('compress', True)
elif key in ('-S', '--slave'):
if value:
task_set_option('slave', value)
else:
if not CFG_DATABASE_SLAVE:
raise StandardError("ERROR: No slave defined.")
task_set_option('slave', CFG_DATABASE_SLAVE)
elif key in ('--dump-on-slave-helper', ):
task_set_option('dump_on_slave_helper_mode', True)
elif key in ('--ignore-tables',):
try:
re.compile(value)
task_set_option("ignore_tables", value)
except re.error:
raise StandardError, "ERROR: Passed string: '%s' is not a valid regular expression." % value
else:
return False
return True
def _dbdump_run_task_core():
"""
Run DB dumper core stuff.
Note: do not use task_can_sleep() stuff here because we don't want
other tasks to interrupt us while we are dumping the DB content.
"""
# read params:
host = CFG_DATABASE_HOST
port = CFG_DATABASE_PORT
connection = None
try:
if task_get_option('slave') and not task_get_option('dump_on_slave_helper_mode'):
connection = get_connection_for_dump_on_slave()
write_message("Dump on slave requested")
write_message("... checking if slave is well up...")
check_slave_is_up(connection)
write_message("... checking if slave is in consistent state...")
check_slave_is_in_consistent_state(connection)
write_message("... detaching slave database...")
detach_slave(connection)
write_message("... scheduling dump on slave helper...")
helper_arguments = []
if task_get_option("number"):
helper_arguments += ["--number", str(task_get_option("number"))]
if task_get_option("output"):
helper_arguments += ["--output", str(task_get_option("output"))]
if task_get_option("params"):
helper_arguments += ["--params", str(task_get_option("params"))]
if task_get_option("ignore_tables"):
helper_arguments += ["--ignore-tables", str(task_get_option("ignore_tables"))]
if task_get_option("compress"):
helper_arguments += ["--compress"]
if task_get_option("slave"):
helper_arguments += ["--slave", str(task_get_option("slave"))]
helper_arguments += ['-N', 'slavehelper', '--dump-on-slave-helper']
task_id = task_low_level_submission('dbdump', task_get_task_param('user'), '-P4', *helper_arguments)
write_message("Slave scheduled with ID %s" % task_id)
task_update_progress("DONE")
return True
elif task_get_option('dump_on_slave_helper_mode'):
write_message("Dumping on slave mode")
connection = get_connection_for_dump_on_slave()
write_message("... checking if slave is well down...")
check_slave_is_down(connection)
host = CFG_DATABASE_SLAVE
task_update_progress("Reading parameters")
write_message("Reading parameters started")
output_dir = task_get_option('output', CFG_LOGDIR)
output_num = task_get_option('number', 5)
params = task_get_option('params', None)
compress = task_get_option('compress', False)
slave = task_get_option('slave', False)
ignore_tables = task_get_option('ignore_tables', None)
if ignore_tables:
ignore_tables = get_table_names(ignore_tables)
else:
ignore_tables = None
output_file_suffix = task_get_task_param('task_starting_time')
output_file_suffix = output_file_suffix.replace(' ', '_') + '.sql'
if compress:
output_file_suffix = "%s.gz" % (output_file_suffix,)
write_message("Reading parameters ended")
# make dump:
task_update_progress("Dumping database")
write_message("Database dump started")
if slave:
output_file_prefix = 'slave-%s-dbdump-' % (CFG_DATABASE_NAME,)
else:
output_file_prefix = '%s-dbdump-' % (CFG_DATABASE_NAME,)
output_file = output_file_prefix + output_file_suffix
dump_path = output_dir + os.sep + output_file
dump_database(dump_path, \
host=host,
port=port,
params=params, \
compress=compress, \
ignore_tables=ignore_tables)
write_message("Database dump ended")
finally:
if connection and task_get_option('dump_on_slave_helper_mode'):
write_message("Reattaching slave")
attach_slave(connection)
# prune old dump files:
task_update_progress("Pruning old dump files")
write_message("Pruning old dump files started")
_delete_old_dumps(output_dir, output_file_prefix, output_num)
write_message("Pruning old dump files ended")
# we are done:
task_update_progress("Done.")
return True
def main():
"""Main that construct all the bibtask."""
task_init(authorization_action='rundbdump',
authorization_msg="DB Dump Task Submission",
help_specific_usage="""\
-o, --output=DIR Output directory. [default=%s]
-n, --number=NUM Keep up to NUM previous dump files. [default=5]
--params=PARAMS Specify your own mysqldump parameters. Optional.
--compress Compress dump directly into gzip.
-S, --slave=HOST Perform the dump from a slave, if no host use CFG_DATABASE_SLAVE.
--ignore-tables=regex Ignore tables matching the given regular expression
Examples:
$ dbdump --ignore-tables '^(idx|rnk)'
$ dbdump -n3 -o/tmp -s1d -L 02:00-04:00
""" % CFG_LOGDIR,
specific_params=("n:o:p:S:",
["number=", "output=", "params=", "slave=", "compress", 'ignore-tables=', "dump-on-slave-helper"]),
task_submit_elaborate_specific_parameter_fnc=_dbdump_elaborate_submit_param,
task_run_fnc=_dbdump_run_task_core)
if __name__ == '__main__':
main()
| gpl-2.0 |
derekchiang/keystone | keystone/contrib/ec2/routers.py | 8 | 1874 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystone.common import wsgi
from keystone.contrib.ec2 import controllers
class Ec2Extension(wsgi.ExtensionRouter):
def add_routes(self, mapper):
ec2_controller = controllers.Ec2Controller()
# validation
mapper.connect(
'/ec2tokens',
controller=ec2_controller,
action='authenticate',
conditions=dict(method=['POST']))
# crud
mapper.connect(
'/users/{user_id}/credentials/OS-EC2',
controller=ec2_controller,
action='create_credential',
conditions=dict(method=['POST']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2',
controller=ec2_controller,
action='get_credentials',
conditions=dict(method=['GET']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2/{credential_id}',
controller=ec2_controller,
action='get_credential',
conditions=dict(method=['GET']))
mapper.connect(
'/users/{user_id}/credentials/OS-EC2/{credential_id}',
controller=ec2_controller,
action='delete_credential',
conditions=dict(method=['DELETE']))
| apache-2.0 |
catapult-project/catapult-csm | telemetry/third_party/web-page-replay/proxyshaper.py | 39 | 3781 | #!/usr/bin/env python
# Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simulate network characteristics directly in Python.
Allows running replay without dummynet.
"""
import logging
import platformsettings
import re
import time
TIMER = platformsettings.timer
class ProxyShaperError(Exception):
"""Module catch-all error."""
pass
class BandwidthValueError(ProxyShaperError):
"""Raised for unexpected dummynet-style bandwidth value."""
pass
class RateLimitedFile(object):
"""Wrap a file like object with rate limiting.
TODO(slamm): Simulate slow-start.
Each RateLimitedFile corresponds to one-direction of a
bidirectional socket. Slow-start can be added here (algorithm needed).
Will consider changing this class to take read and write files and
corresponding bit rates for each.
"""
BYTES_PER_WRITE = 1460
def __init__(self, request_counter, f, bps):
"""Initialize a RateLimiter.
Args:
request_counter: callable to see how many requests share the limit.
f: file-like object to wrap.
bps: an integer of bits per second.
"""
self.request_counter = request_counter
self.original_file = f
self.bps = bps
def transfer_seconds(self, num_bytes):
"""Seconds to read/write |num_bytes| with |self.bps|."""
return 8.0 * num_bytes / self.bps
def write(self, data):
num_bytes = len(data)
num_sent_bytes = 0
while num_sent_bytes < num_bytes:
num_write_bytes = min(self.BYTES_PER_WRITE, num_bytes - num_sent_bytes)
num_requests = self.request_counter()
wait = self.transfer_seconds(num_write_bytes) * num_requests
logging.debug('write sleep: %0.4fs (%d requests)', wait, num_requests)
time.sleep(wait)
self.original_file.write(
data[num_sent_bytes:num_sent_bytes + num_write_bytes])
num_sent_bytes += num_write_bytes
def _read(self, read_func, size):
start = TIMER()
data = read_func(size)
read_seconds = TIMER() - start
num_bytes = len(data)
num_requests = self.request_counter()
wait = self.transfer_seconds(num_bytes) * num_requests - read_seconds
if wait > 0:
logging.debug('read sleep: %0.4fs %d requests)', wait, num_requests)
time.sleep(wait)
return data
def readline(self, size=-1):
return self._read(self.original_file.readline, size)
def read(self, size=-1):
return self._read(self.original_file.read, size)
def __getattr__(self, name):
"""Forward any non-overriden calls."""
return getattr(self.original_file, name)
def GetBitsPerSecond(bandwidth):
"""Return bits per second represented by dummynet bandwidth option.
See ipfw/dummynet.c:read_bandwidth for how it is really done.
Args:
bandwidth: a dummynet-style bandwidth specification (e.g. "10Kbit/s")
"""
if bandwidth == '0':
return 0
bw_re = r'^(\d+)(?:([KM])?(bit|Byte)/s)?$'
match = re.match(bw_re, str(bandwidth))
if not match:
raise BandwidthValueError('Value, "%s", does not match regex: %s' % (
bandwidth, bw_re))
bw = int(match.group(1))
if match.group(2) == 'K':
bw *= 1000
if match.group(2) == 'M':
bw *= 1000000
if match.group(3) == 'Byte':
bw *= 8
return bw
| bsd-3-clause |
ashang/calibre | src/cherrypy/_cpreqbody.py | 82 | 37768 | """Request body processing for CherryPy.
.. versionadded:: 3.2
Application authors have complete control over the parsing of HTTP request
entities. In short, :attr:`cherrypy.request.body<cherrypy._cprequest.Request.body>`
is now always set to an instance of :class:`RequestBody<cherrypy._cpreqbody.RequestBody>`,
and *that* class is a subclass of :class:`Entity<cherrypy._cpreqbody.Entity>`.
When an HTTP request includes an entity body, it is often desirable to
provide that information to applications in a form other than the raw bytes.
Different content types demand different approaches. Examples:
* For a GIF file, we want the raw bytes in a stream.
* An HTML form is better parsed into its component fields, and each text field
decoded from bytes to unicode.
* A JSON body should be deserialized into a Python dict or list.
When the request contains a Content-Type header, the media type is used as a
key to look up a value in the
:attr:`request.body.processors<cherrypy._cpreqbody.Entity.processors>` dict.
If the full media
type is not found, then the major type is tried; for example, if no processor
is found for the 'image/jpeg' type, then we look for a processor for the 'image'
types altogether. If neither the full type nor the major type has a matching
processor, then a default processor is used
(:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>`). For most
types, this means no processing is done, and the body is left unread as a
raw byte stream. Processors are configurable in an 'on_start_resource' hook.
Some processors, especially those for the 'text' types, attempt to decode bytes
to unicode. If the Content-Type request header includes a 'charset' parameter,
this is used to decode the entity. Otherwise, one or more default charsets may
be attempted, although this decision is up to each processor. If a processor
successfully decodes an Entity or Part, it should set the
:attr:`charset<cherrypy._cpreqbody.Entity.charset>` attribute
on the Entity or Part to the name of the successful charset, so that
applications can easily re-encode or transcode the value if they wish.
If the Content-Type of the request entity is of major type 'multipart', then
the above parsing process, and possibly a decoding process, is performed for
each part.
For both the full entity and multipart parts, a Content-Disposition header may
be used to fill :attr:`name<cherrypy._cpreqbody.Entity.name>` and
:attr:`filename<cherrypy._cpreqbody.Entity.filename>` attributes on the
request.body or the Part.
.. _custombodyprocessors:
Custom Processors
=================
You can add your own processors for any specific or major MIME type. Simply add
it to the :attr:`processors<cherrypy._cprequest.Entity.processors>` dict in a
hook/tool that runs at ``on_start_resource`` or ``before_request_body``.
Here's the built-in JSON tool for an example::
def json_in(force=True, debug=False):
request = cherrypy.serving.request
def json_processor(entity):
\"""Read application/json data into request.json.\"""
if not entity.headers.get("Content-Length", ""):
raise cherrypy.HTTPError(411)
body = entity.fp.read()
try:
request.json = json_decode(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
if force:
request.body.processors.clear()
request.body.default_proc = cherrypy.HTTPError(
415, 'Expected an application/json content type')
request.body.processors['application/json'] = json_processor
We begin by defining a new ``json_processor`` function to stick in the ``processors``
dictionary. All processor functions take a single argument, the ``Entity`` instance
they are to process. It will be called whenever a request is received (for those
URI's where the tool is turned on) which has a ``Content-Type`` of
"application/json".
First, it checks for a valid ``Content-Length`` (raising 411 if not valid), then
reads the remaining bytes on the socket. The ``fp`` object knows its own length, so
it won't hang waiting for data that never arrives. It will return when all data
has been read. Then, we decode those bytes using Python's built-in ``json`` module,
and stick the decoded result onto ``request.json`` . If it cannot be decoded, we
raise 400.
If the "force" argument is True (the default), the ``Tool`` clears the ``processors``
dict so that request entities of other ``Content-Types`` aren't parsed at all. Since
there's no entry for those invalid MIME types, the ``default_proc`` method of ``cherrypy.request.body``
is called. But this does nothing by default (usually to provide the page handler an opportunity to handle it.)
But in our case, we want to raise 415, so we replace ``request.body.default_proc``
with the error (``HTTPError`` instances, when called, raise themselves).
If we were defining a custom processor, we can do so without making a ``Tool``. Just add the config entry::
request.body.processors = {'application/json': json_processor}
Note that you can only replace the ``processors`` dict wholesale this way, not update the existing one.
"""
try:
from io import DEFAULT_BUFFER_SIZE
except ImportError:
DEFAULT_BUFFER_SIZE = 8192
import re
import sys
import tempfile
try:
from urllib import unquote_plus
except ImportError:
def unquote_plus(bs):
"""Bytes version of urllib.parse.unquote_plus."""
bs = bs.replace(ntob('+'), ntob(' '))
atoms = bs.split(ntob('%'))
for i in range(1, len(atoms)):
item = atoms[i]
try:
pct = int(item[:2], 16)
atoms[i] = bytes([pct]) + item[2:]
except ValueError:
pass
return ntob('').join(atoms)
import cherrypy
from cherrypy._cpcompat import basestring, ntob, ntou
from cherrypy.lib import httputil
# -------------------------------- Processors -------------------------------- #
def process_urlencoded(entity):
"""Read application/x-www-form-urlencoded data into entity.params."""
qs = entity.fp.read()
for charset in entity.attempt_charsets:
try:
params = {}
for aparam in qs.split(ntob('&')):
for pair in aparam.split(ntob(';')):
if not pair:
continue
atoms = pair.split(ntob('='), 1)
if len(atoms) == 1:
atoms.append(ntob(''))
key = unquote_plus(atoms[0]).decode(charset)
value = unquote_plus(atoms[1]).decode(charset)
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
except UnicodeDecodeError:
pass
else:
entity.charset = charset
break
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(entity.attempt_charsets))
# Now that all values have been successfully parsed and decoded,
# apply them to the entity.params dict.
for key, value in params.items():
if key in entity.params:
if not isinstance(entity.params[key], list):
entity.params[key] = [entity.params[key]]
entity.params[key].append(value)
else:
entity.params[key] = value
def process_multipart(entity):
"""Read all multipart parts into entity.parts."""
ib = ""
if 'boundary' in entity.content_type.params:
# http://tools.ietf.org/html/rfc2046#section-5.1.1
# "The grammar for parameters on the Content-type field is such that it
# is often necessary to enclose the boundary parameter values in quotes
# on the Content-type line"
ib = entity.content_type.params['boundary'].strip('"')
if not re.match("^[ -~]{0,200}[!-~]$", ib):
raise ValueError('Invalid boundary in multipart form: %r' % (ib,))
ib = ('--' + ib).encode('ascii')
# Find the first marker
while True:
b = entity.readline()
if not b:
return
b = b.strip()
if b == ib:
break
# Read all parts
while True:
part = entity.part_class.from_fp(entity.fp, ib)
entity.parts.append(part)
part.process()
if part.fp.done:
break
def process_multipart_form_data(entity):
"""Read all multipart/form-data parts into entity.parts or entity.params."""
process_multipart(entity)
kept_parts = []
for part in entity.parts:
if part.name is None:
kept_parts.append(part)
else:
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if part.name in entity.params:
if not isinstance(entity.params[part.name], list):
entity.params[part.name] = [entity.params[part.name]]
entity.params[part.name].append(value)
else:
entity.params[part.name] = value
entity.parts = kept_parts
def _old_process_multipart(entity):
"""The behavior of 3.2 and lower. Deprecated and will be changed in 3.3."""
process_multipart(entity)
params = entity.params
for part in entity.parts:
if part.name is None:
key = ntou('parts')
else:
key = part.name
if part.filename is None:
# It's a regular field
value = part.fullvalue()
else:
# It's a file upload. Retain the whole part so consumer code
# has access to its .file and .filename attributes.
value = part
if key in params:
if not isinstance(params[key], list):
params[key] = [params[key]]
params[key].append(value)
else:
params[key] = value
# --------------------------------- Entities --------------------------------- #
class Entity(object):
"""An HTTP request body, or MIME multipart body.
This class collects information about the HTTP request entity. When a
given entity is of MIME type "multipart", each part is parsed into its own
Entity instance, and the set of parts stored in
:attr:`entity.parts<cherrypy._cpreqbody.Entity.parts>`.
Between the ``before_request_body`` and ``before_handler`` tools, CherryPy
tries to process the request body (if any) by calling
:func:`request.body.process<cherrypy._cpreqbody.RequestBody.process`.
This uses the ``content_type`` of the Entity to look up a suitable processor
in :attr:`Entity.processors<cherrypy._cpreqbody.Entity.processors>`, a dict.
If a matching processor cannot be found for the complete Content-Type,
it tries again using the major type. For example, if a request with an
entity of type "image/jpeg" arrives, but no processor can be found for
that complete type, then one is sought for the major type "image". If a
processor is still not found, then the
:func:`default_proc<cherrypy._cpreqbody.Entity.default_proc>` method of the
Entity is called (which does nothing by default; you can override this too).
CherryPy includes processors for the "application/x-www-form-urlencoded"
type, the "multipart/form-data" type, and the "multipart" major type.
CherryPy 3.2 processes these types almost exactly as older versions.
Parts are passed as arguments to the page handler using their
``Content-Disposition.name`` if given, otherwise in a generic "parts"
argument. Each such part is either a string, or the
:class:`Part<cherrypy._cpreqbody.Part>` itself if it's a file. (In this
case it will have ``file`` and ``filename`` attributes, or possibly a
``value`` attribute). Each Part is itself a subclass of
Entity, and has its own ``process`` method and ``processors`` dict.
There is a separate processor for the "multipart" major type which is more
flexible, and simply stores all multipart parts in
:attr:`request.body.parts<cherrypy._cpreqbody.Entity.parts>`. You can
enable it with::
cherrypy.request.body.processors['multipart'] = _cpreqbody.process_multipart
in an ``on_start_resource`` tool.
"""
# http://tools.ietf.org/html/rfc2046#section-4.1.2:
# "The default character set, which must be assumed in the
# absence of a charset parameter, is US-ASCII."
# However, many browsers send data in utf-8 with no charset.
attempt_charsets = ['utf-8']
"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
charset = None
"""The successful decoding; see "attempt_charsets" above."""
content_type = None
"""The value of the Content-Type request header.
If the Entity is part of a multipart payload, this will be the Content-Type
given in the MIME headers for this part.
"""
default_content_type = 'application/x-www-form-urlencoded'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
filename = None
"""The ``Content-Disposition.filename`` header, if available."""
fp = None
"""The readable socket file object."""
headers = None
"""A dict of request/multipart header names and values.
This is a copy of the ``request.headers`` for the ``request.body``;
for multipart parts, it is the set of headers for that part.
"""
length = None
"""The value of the ``Content-Length`` header, if provided."""
name = None
"""The "name" parameter of the ``Content-Disposition`` header, if any."""
params = None
"""
If the request Content-Type is 'application/x-www-form-urlencoded' or
multipart, this will be a dict of the params pulled from the entity
body; that is, it will be the portion of request.params that come
from the message body (sometimes called "POST params", although they
can be sent with various HTTP method verbs). This value is set between
the 'before_request_body' and 'before_handler' hooks (assuming that
process_request_body is True)."""
processors = {'application/x-www-form-urlencoded': process_urlencoded,
'multipart/form-data': process_multipart_form_data,
'multipart': process_multipart,
}
"""A dict of Content-Type names to processor methods."""
parts = None
"""A list of Part instances if ``Content-Type`` is of major type "multipart"."""
part_class = None
"""The class used for multipart parts.
You can replace this with custom subclasses to alter the processing of
multipart parts.
"""
def __init__(self, fp, headers, params=None, parts=None):
# Make an instance-specific copy of the class processors
# so Tools, etc. can replace them per-request.
self.processors = self.processors.copy()
self.fp = fp
self.headers = headers
if params is None:
params = {}
self.params = params
if parts is None:
parts = []
self.parts = parts
# Content-Type
self.content_type = headers.elements('Content-Type')
if self.content_type:
self.content_type = self.content_type[0]
else:
self.content_type = httputil.HeaderElement.from_str(
self.default_content_type)
# Copy the class 'attempt_charsets', prepending any Content-Type charset
dec = self.content_type.params.get("charset", None)
if dec:
self.attempt_charsets = [dec] + [c for c in self.attempt_charsets
if c != dec]
else:
self.attempt_charsets = self.attempt_charsets[:]
# Length
self.length = None
clen = headers.get('Content-Length', None)
# If Transfer-Encoding is 'chunked', ignore any Content-Length.
if clen is not None and 'chunked' not in headers.get('Transfer-Encoding', ''):
try:
self.length = int(clen)
except ValueError:
pass
# Content-Disposition
self.name = None
self.filename = None
disp = headers.elements('Content-Disposition')
if disp:
disp = disp[0]
if 'name' in disp.params:
self.name = disp.params['name']
if self.name.startswith('"') and self.name.endswith('"'):
self.name = self.name[1:-1]
if 'filename' in disp.params:
self.filename = disp.params['filename']
if self.filename.startswith('"') and self.filename.endswith('"'):
self.filename = self.filename[1:-1]
# The 'type' attribute is deprecated in 3.2; remove it in 3.3.
type = property(lambda self: self.content_type,
doc="""A deprecated alias for :attr:`content_type<cherrypy._cpreqbody.Entity.content_type>`.""")
def read(self, size=None, fp_out=None):
return self.fp.read(size, fp_out)
def readline(self, size=None):
return self.fp.readline(size)
def readlines(self, sizehint=None):
return self.fp.readlines(sizehint)
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def next(self):
return self.__next__()
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None). Return fp_out."""
if fp_out is None:
fp_out = self.make_file()
self.read(fp_out=fp_out)
return fp_out
def make_file(self):
"""Return a file-like object into which the request body will be read.
By default, this will return a TemporaryFile. Override as needed.
See also :attr:`cherrypy._cpreqbody.Part.maxrambytes`."""
return tempfile.TemporaryFile()
def fullvalue(self):
"""Return this entity as a string, whether stored in a file or not."""
if self.file:
# It was stored in a tempfile. Read it.
self.file.seek(0)
value = self.file.read()
self.file.seek(0)
else:
value = self.value
return value
def process(self):
"""Execute the best-match processor for the given media type."""
proc = None
ct = self.content_type.value
try:
proc = self.processors[ct]
except KeyError:
toptype = ct.split('/', 1)[0]
try:
proc = self.processors[toptype]
except KeyError:
pass
if proc is None:
self.default_proc()
else:
proc(self)
def default_proc(self):
"""Called if a more-specific processor is not found for the ``Content-Type``."""
# Leave the fp alone for someone else to read. This works fine
# for request.body, but the Part subclasses need to override this
# so they can move on to the next part.
pass
class Part(Entity):
"""A MIME part entity, part of a multipart entity."""
# "The default character set, which must be assumed in the absence of a
# charset parameter, is US-ASCII."
attempt_charsets = ['us-ascii', 'utf-8']
"""A list of strings, each of which should be a known encoding.
When the Content-Type of the request body warrants it, each of the given
encodings will be tried in order. The first one to successfully decode the
entity without raising an error is stored as
:attr:`entity.charset<cherrypy._cpreqbody.Entity.charset>`. This defaults
to ``['utf-8']`` (plus 'ISO-8859-1' for "text/\*" types, as required by
`HTTP/1.1 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1>`_),
but ``['us-ascii', 'utf-8']`` for multipart parts.
"""
boundary = None
"""The MIME multipart boundary."""
default_content_type = 'text/plain'
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however (this class),
the MIME spec declares that a part with no Content-Type defaults to
"text/plain".
"""
# This is the default in stdlib cgi. We may want to increase it.
maxrambytes = 1000
"""The threshold of bytes after which point the ``Part`` will store its data
in a file (generated by :func:`make_file<cherrypy._cprequest.Entity.make_file>`)
instead of a string. Defaults to 1000, just like the :mod:`cgi` module in
Python's standard library.
"""
def __init__(self, fp, headers, boundary):
Entity.__init__(self, fp, headers)
self.boundary = boundary
self.file = None
self.value = None
def from_fp(cls, fp, boundary):
headers = cls.read_headers(fp)
return cls(fp, headers, boundary)
from_fp = classmethod(from_fp)
def read_headers(cls, fp):
headers = httputil.HeaderMap()
while True:
line = fp.readline()
if not line:
# No more data--illegal end of headers
raise EOFError("Illegal end of headers.")
if line == ntob('\r\n'):
# Normal end of headers
break
if not line.endswith(ntob('\r\n')):
raise ValueError("MIME requires CRLF terminators: %r" % line)
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip().decode('ISO-8859-1')
else:
k, v = line.split(ntob(":"), 1)
k = k.strip().decode('ISO-8859-1')
v = v.strip().decode('ISO-8859-1')
existing = headers.get(k)
if existing:
v = ", ".join((existing, v))
headers[k] = v
return headers
read_headers = classmethod(read_headers)
def read_lines_to_boundary(self, fp_out=None):
"""Read bytes from self.fp and return or write them to a file.
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like object that
supports the 'write' method; all bytes read will be written to the fp,
and that fp is returned.
"""
endmarker = self.boundary + ntob("--")
delim = ntob("")
prev_lf = True
lines = []
seen = 0
while True:
line = self.fp.readline(1<<16)
if not line:
raise EOFError("Illegal end of multipart body.")
if line.startswith(ntob("--")) and prev_lf:
strippedline = line.strip()
if strippedline == self.boundary:
break
if strippedline == endmarker:
self.fp.finish()
break
line = delim + line
if line.endswith(ntob("\r\n")):
delim = ntob("\r\n")
line = line[:-2]
prev_lf = True
elif line.endswith(ntob("\n")):
delim = ntob("\n")
line = line[:-1]
prev_lf = True
else:
delim = ntob("")
prev_lf = False
if fp_out is None:
lines.append(line)
seen += len(line)
if seen > self.maxrambytes:
fp_out = self.make_file()
for line in lines:
fp_out.write(line)
else:
fp_out.write(line)
if fp_out is None:
result = ntob('').join(lines)
for charset in self.attempt_charsets:
try:
result = result.decode(charset)
except UnicodeDecodeError:
pass
else:
self.charset = charset
return result
else:
raise cherrypy.HTTPError(
400, "The request entity could not be decoded. The following "
"charsets were attempted: %s" % repr(self.attempt_charsets))
else:
fp_out.seek(0)
return fp_out
def default_proc(self):
"""Called if a more-specific processor is not found for the ``Content-Type``."""
if self.filename:
# Always read into a file if a .filename was given.
self.file = self.read_into_file()
else:
result = self.read_lines_to_boundary()
if isinstance(result, basestring):
self.value = result
else:
self.file = result
def read_into_file(self, fp_out=None):
"""Read the request body into fp_out (or make_file() if None). Return fp_out."""
if fp_out is None:
fp_out = self.make_file()
self.read_lines_to_boundary(fp_out=fp_out)
return fp_out
Entity.part_class = Part
try:
inf = float('inf')
except ValueError:
# Python 2.4 and lower
class Infinity(object):
def __cmp__(self, other):
return 1
def __sub__(self, other):
return self
inf = Infinity()
comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control', 'Connection',
'Content-Encoding', 'Content-Language', 'Expect', 'If-Match',
'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'Te', 'Trailer',
'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning', 'Www-Authenticate']
class SizedReader:
def __init__(self, fp, length, maxbytes, bufsize=DEFAULT_BUFFER_SIZE, has_trailers=False):
# Wrap our fp in a buffer so peek() works
self.fp = fp
self.length = length
self.maxbytes = maxbytes
self.buffer = ntob('')
self.bufsize = bufsize
self.bytes_read = 0
self.done = False
self.has_trailers = has_trailers
def read(self, size=None, fp_out=None):
"""Read bytes from the request body and return or write them to a file.
A number of bytes less than or equal to the 'size' argument are read
off the socket. The actual number of bytes read are tracked in
self.bytes_read. The number may be smaller than 'size' when 1) the
client sends fewer bytes, 2) the 'Content-Length' request header
specifies fewer bytes than requested, or 3) the number of bytes read
exceeds self.maxbytes (in which case, 413 is raised).
If the 'fp_out' argument is None (the default), all bytes read are
returned in a single byte string.
If the 'fp_out' argument is not None, it must be a file-like object that
supports the 'write' method; all bytes read will be written to the fp,
and None is returned.
"""
if self.length is None:
if size is None:
remaining = inf
else:
remaining = size
else:
remaining = self.length - self.bytes_read
if size and size < remaining:
remaining = size
if remaining == 0:
self.finish()
if fp_out is None:
return ntob('')
else:
return None
chunks = []
# Read bytes from the buffer.
if self.buffer:
if remaining is inf:
data = self.buffer
self.buffer = ntob('')
else:
data = self.buffer[:remaining]
self.buffer = self.buffer[remaining:]
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
# Read bytes from the socket.
while remaining > 0:
chunksize = min(remaining, self.bufsize)
try:
data = self.fp.read(chunksize)
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
if not data:
self.finish()
break
datalen = len(data)
remaining -= datalen
# Check lengths.
self.bytes_read += datalen
if self.maxbytes and self.bytes_read > self.maxbytes:
raise cherrypy.HTTPError(413)
# Store the data.
if fp_out is None:
chunks.append(data)
else:
fp_out.write(data)
if fp_out is None:
return ntob('').join(chunks)
def readline(self, size=None):
"""Read a line from the request body and return it."""
chunks = []
while size is None or size > 0:
chunksize = self.bufsize
if size is not None and size < self.bufsize:
chunksize = size
data = self.read(chunksize)
if not data:
break
pos = data.find(ntob('\n')) + 1
if pos:
chunks.append(data[:pos])
remainder = data[pos:]
self.buffer += remainder
self.bytes_read -= len(remainder)
break
else:
chunks.append(data)
return ntob('').join(chunks)
def readlines(self, sizehint=None):
"""Read lines from the request body and return them."""
if self.length is not None:
if sizehint is None:
sizehint = self.length - self.bytes_read
else:
sizehint = min(sizehint, self.length - self.bytes_read)
lines = []
seen = 0
while True:
line = self.readline()
if not line:
break
lines.append(line)
seen += len(line)
if seen >= sizehint:
break
return lines
def finish(self):
self.done = True
if self.has_trailers and hasattr(self.fp, 'read_trailer_lines'):
self.trailers = {}
try:
for line in self.fp.read_trailer_lines():
if line[0] in ntob(' \t'):
# It's a continuation line.
v = line.strip()
else:
try:
k, v = line.split(ntob(":"), 1)
except ValueError:
raise ValueError("Illegal header line.")
k = k.strip().title()
v = v.strip()
if k in comma_separated_headers:
existing = self.trailers.get(envname)
if existing:
v = ntob(", ").join((existing, v))
self.trailers[k] = v
except Exception:
e = sys.exc_info()[1]
if e.__class__.__name__ == 'MaxSizeExceeded':
# Post data is too big
raise cherrypy.HTTPError(
413, "Maximum request length: %r" % e.args[1])
else:
raise
class RequestBody(Entity):
"""The entity of the HTTP request."""
bufsize = 8 * 1024
"""The buffer size used when reading the socket."""
# Don't parse the request body at all if the client didn't provide
# a Content-Type header. See http://www.cherrypy.org/ticket/790
default_content_type = ''
"""This defines a default ``Content-Type`` to use if no Content-Type header
is given. The empty string is used for RequestBody, which results in the
request body not being read or parsed at all. This is by design; a missing
``Content-Type`` header in the HTTP request entity is an error at best,
and a security hole at worst. For multipart parts, however, the MIME spec
declares that a part with no Content-Type defaults to "text/plain"
(see :class:`Part<cherrypy._cpreqbody.Part>`).
"""
maxbytes = None
"""Raise ``MaxSizeExceeded`` if more bytes than this are read from the socket."""
def __init__(self, fp, headers, params=None, request_params=None):
Entity.__init__(self, fp, headers, params)
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7.1
# When no explicit charset parameter is provided by the
# sender, media subtypes of the "text" type are defined
# to have a default charset value of "ISO-8859-1" when
# received via HTTP.
if self.content_type.value.startswith('text/'):
for c in ('ISO-8859-1', 'iso-8859-1', 'Latin-1', 'latin-1'):
if c in self.attempt_charsets:
break
else:
self.attempt_charsets.append('ISO-8859-1')
# Temporary fix while deprecating passing .parts as .params.
self.processors['multipart'] = _old_process_multipart
if request_params is None:
request_params = {}
self.request_params = request_params
def process(self):
"""Process the request entity based on its Content-Type."""
# "The presence of a message-body in a request is signaled by the
# inclusion of a Content-Length or Transfer-Encoding header field in
# the request's message-headers."
# It is possible to send a POST request with no body, for example;
# however, app developers are responsible in that case to set
# cherrypy.request.process_body to False so this method isn't called.
h = cherrypy.serving.request.headers
if 'Content-Length' not in h and 'Transfer-Encoding' not in h:
raise cherrypy.HTTPError(411)
self.fp = SizedReader(self.fp, self.length,
self.maxbytes, bufsize=self.bufsize,
has_trailers='Trailer' in h)
super(RequestBody, self).process()
# Body params should also be a part of the request_params
# add them in here.
request_params = self.request_params
for key, value in self.params.items():
# Python 2 only: keyword arguments must be byte strings (type 'str').
if sys.version_info < (3, 0):
if isinstance(key, unicode):
key = key.encode('ISO-8859-1')
if key in request_params:
if not isinstance(request_params[key], list):
request_params[key] = [request_params[key]]
request_params[key].append(value)
else:
request_params[key] = value
| gpl-3.0 |
gongyicoin/gongyicoin | contrib/seeds/makeseeds.py | 29 | 3747 | #!/usr/bin/env python
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = set([
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
])
import re
import sys
import dns.resolver
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):9333$")
PATTERN_AGENT = re.compile(r"^(\/Satoshi:0.8.6\/|\/Satoshi:0.9.(2|3)\/|\/Satoshi:0.10.\d{1,2}\/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
# Match only IPv4
m = PATTERN_IPV4.match(sline[0])
if m is None:
return None
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'ip': m.group(1),
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
}
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
result = []
asn_count = {}
for ip in ips:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid IPv4 address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['ipnum']))
for ip in ips:
print ip['ip']
if __name__ == '__main__':
main()
| mit |
fjcapdevila/django-helpdesk | helpdesk/forms.py | 4 | 19247 | """
django-helpdesk - A Django powered ticket tracker for small enterprise.
(c) Copyright 2008 Jutda. All Rights Reserved. See LICENSE for details.
forms.py - Definitions of newforms-based forms for creating and maintaining
tickets.
"""
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from django import forms
from django.forms import extras
from django.core.files.storage import default_storage
from django.conf import settings
from django.utils.translation import ugettext as _
try:
from django.contrib.auth import get_user_model
User = get_user_model()
except ImportError:
from django.contrib.auth.models import User
try:
from django.utils import timezone
except ImportError:
from datetime import datetime as timezone
from helpdesk.lib import send_templated_mail, safe_template_context
from helpdesk.models import Ticket, Queue, FollowUp, Attachment, IgnoreEmail, TicketCC, CustomField, TicketCustomFieldValue, TicketDependency
from helpdesk import settings as helpdesk_settings
class CustomFieldMixin(object):
"""
Mixin that provides a method to turn CustomFields into an actual field
"""
def customfield_to_field(self, field, instanceargs):
if field.data_type == 'varchar':
fieldclass = forms.CharField
instanceargs['max_length'] = field.max_length
elif field.data_type == 'text':
fieldclass = forms.CharField
instanceargs['widget'] = forms.Textarea
instanceargs['max_length'] = field.max_length
elif field.data_type == 'integer':
fieldclass = forms.IntegerField
elif field.data_type == 'decimal':
fieldclass = forms.DecimalField
instanceargs['decimal_places'] = field.decimal_places
instanceargs['max_digits'] = field.max_length
elif field.data_type == 'list':
fieldclass = forms.ChoiceField
choices = field.choices_as_array
if field.empty_selection_list:
choices.insert(0, ('','---------' ) )
instanceargs['choices'] = choices
elif field.data_type == 'boolean':
fieldclass = forms.BooleanField
elif field.data_type == 'date':
fieldclass = forms.DateField
elif field.data_type == 'time':
fieldclass = forms.TimeField
elif field.data_type == 'datetime':
fieldclass = forms.DateTimeField
elif field.data_type == 'email':
fieldclass = forms.EmailField
elif field.data_type == 'url':
fieldclass = forms.URLField
elif field.data_type == 'ipaddress':
fieldclass = forms.IPAddressField
elif field.data_type == 'slug':
fieldclass = forms.SlugField
self.fields['custom_%s' % field.name] = fieldclass(**instanceargs)
class EditTicketForm(CustomFieldMixin, forms.ModelForm):
class Meta:
model = Ticket
exclude = ('created', 'modified', 'status', 'on_hold', 'resolution', 'last_escalation', 'assigned_to')
def __init__(self, *args, **kwargs):
"""
Add any custom fields that are defined to the form
"""
super(EditTicketForm, self).__init__(*args, **kwargs)
for field in CustomField.objects.all():
try:
current_value = TicketCustomFieldValue.objects.get(ticket=self.instance, field=field)
initial_value = current_value.value
except TicketCustomFieldValue.DoesNotExist:
initial_value = None
instanceargs = {
'label': field.label,
'help_text': field.help_text,
'required': field.required,
'initial': initial_value,
}
self.customfield_to_field(field, instanceargs)
def save(self, *args, **kwargs):
for field, value in self.cleaned_data.items():
if field.startswith('custom_'):
field_name = field.replace('custom_', '', 1)
customfield = CustomField.objects.get(name=field_name)
try:
cfv = TicketCustomFieldValue.objects.get(ticket=self.instance, field=customfield)
except:
cfv = TicketCustomFieldValue(ticket=self.instance, field=customfield)
cfv.value = value
cfv.save()
return super(EditTicketForm, self).save(*args, **kwargs)
class EditFollowUpForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
"Filter not openned tickets here."
super(EditFollowUpForm, self).__init__(*args, **kwargs)
self.fields["ticket"].queryset = Ticket.objects.filter(status__in=(Ticket.OPEN_STATUS, Ticket.REOPENED_STATUS))
class Meta:
model = FollowUp
exclude = ('date', 'user',)
class TicketForm(CustomFieldMixin, forms.Form):
queue = forms.ChoiceField(
label=_('Queue'),
required=True,
choices=()
)
title = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'size':'60'}),
label=_('Summary of the problem'),
)
submitter_email = forms.EmailField(
required=False,
label=_('Submitter E-Mail Address'),
widget=forms.TextInput(attrs={'size':'60'}),
help_text=_('This e-mail address will receive copies of all public '
'updates to this ticket.'),
)
body = forms.CharField(
widget=forms.Textarea(attrs={'cols': 47, 'rows': 15}),
label=_('Description of Issue'),
required=True,
)
assigned_to = forms.ChoiceField(
choices=(),
required=False,
label=_('Case owner'),
help_text=_('If you select an owner other than yourself, they\'ll be '
'e-mailed details of this ticket immediately.'),
)
priority = forms.ChoiceField(
choices=Ticket.PRIORITY_CHOICES,
required=False,
initial='3',
label=_('Priority'),
help_text=_('Please select a priority carefully. If unsure, leave it '
'as \'3\'.'),
)
due_date = forms.DateTimeField(
widget=extras.SelectDateWidget,
required=False,
label=_('Due on'),
)
def clean_due_date(self):
data = self.cleaned_data['due_date']
#TODO: add Google calendar update hook
#if not hasattr(self, 'instance') or self.instance.due_date != new_data:
# print "you changed!"
return data
attachment = forms.FileField(
required=False,
label=_('Attach File'),
help_text=_('You can attach a file such as a document or screenshot to this ticket.'),
)
def __init__(self, *args, **kwargs):
"""
Add any custom fields that are defined to the form
"""
super(TicketForm, self).__init__(*args, **kwargs)
for field in CustomField.objects.all():
instanceargs = {
'label': field.label,
'help_text': field.help_text,
'required': field.required,
}
self.customfield_to_field(field, instanceargs)
def save(self, user):
"""
Writes and returns a Ticket() object
"""
q = Queue.objects.get(id=int(self.cleaned_data['queue']))
t = Ticket( title = self.cleaned_data['title'],
submitter_email = self.cleaned_data['submitter_email'],
created = timezone.now(),
status = Ticket.OPEN_STATUS,
queue = q,
description = self.cleaned_data['body'],
priority = self.cleaned_data['priority'],
due_date = self.cleaned_data['due_date'],
)
if self.cleaned_data['assigned_to']:
try:
u = User.objects.get(id=self.cleaned_data['assigned_to'])
t.assigned_to = u
except User.DoesNotExist:
t.assigned_to = None
t.save()
for field, value in self.cleaned_data.items():
if field.startswith('custom_'):
field_name = field.replace('custom_', '', 1)
customfield = CustomField.objects.get(name=field_name)
cfv = TicketCustomFieldValue(ticket=t,
field=customfield,
value=value)
cfv.save()
f = FollowUp( ticket = t,
title = _('Ticket Opened'),
date = timezone.now(),
public = True,
comment = self.cleaned_data['body'],
user = user,
)
if self.cleaned_data['assigned_to']:
f.title = _('Ticket Opened & Assigned to %(name)s') % {
'name': t.get_assigned_to
}
f.save()
files = []
if self.cleaned_data['attachment']:
import mimetypes
file = self.cleaned_data['attachment']
filename = file.name.replace(' ', '_')
a = Attachment(
followup=f,
filename=filename,
mime_type=mimetypes.guess_type(filename)[0] or 'application/octet-stream',
size=file.size,
)
a.file.save(file.name, file, save=False)
a.save()
if file.size < getattr(settings, 'MAX_EMAIL_ATTACHMENT_SIZE', 512000):
# Only files smaller than 512kb (or as defined in
# settings.MAX_EMAIL_ATTACHMENT_SIZE) are sent via email.
try:
files.append([a.filename, a.file])
except NotImplementedError:
pass
context = safe_template_context(t)
context['comment'] = f.comment
messages_sent_to = []
if t.submitter_email:
send_templated_mail(
'newticket_submitter',
context,
recipients=t.submitter_email,
sender=q.from_address,
fail_silently=True,
files=files,
)
messages_sent_to.append(t.submitter_email)
if t.assigned_to and t.assigned_to != user and t.assigned_to.usersettings.settings.get('email_on_ticket_assign', False) and t.assigned_to.email and t.assigned_to.email not in messages_sent_to:
send_templated_mail(
'assigned_owner',
context,
recipients=t.assigned_to.email,
sender=q.from_address,
fail_silently=True,
files=files,
)
messages_sent_to.append(t.assigned_to.email)
if q.new_ticket_cc and q.new_ticket_cc not in messages_sent_to:
send_templated_mail(
'newticket_cc',
context,
recipients=q.new_ticket_cc,
sender=q.from_address,
fail_silently=True,
files=files,
)
messages_sent_to.append(q.new_ticket_cc)
if q.updated_ticket_cc and q.updated_ticket_cc != q.new_ticket_cc and q.updated_ticket_cc not in messages_sent_to:
send_templated_mail(
'newticket_cc',
context,
recipients=q.updated_ticket_cc,
sender=q.from_address,
fail_silently=True,
files=files,
)
return t
class PublicTicketForm(CustomFieldMixin, forms.Form):
queue = forms.ChoiceField(
label=_('Queue'),
required=True,
choices=()
)
title = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(),
label=_('Summary of your query'),
)
submitter_email = forms.EmailField(
required=True,
label=_('Your E-Mail Address'),
help_text=_('We will e-mail you when your ticket is updated.'),
)
body = forms.CharField(
widget=forms.Textarea(),
label=_('Description of your issue'),
required=True,
help_text=_('Please be as descriptive as possible, including any '
'details we may need to address your query.'),
)
priority = forms.ChoiceField(
choices=Ticket.PRIORITY_CHOICES,
required=True,
initial='3',
label=_('Urgency'),
help_text=_('Please select a priority carefully.'),
)
due_date = forms.DateTimeField(
widget=extras.SelectDateWidget,
required=False,
label=_('Due on'),
)
attachment = forms.FileField(
required=False,
label=_('Attach File'),
help_text=_('You can attach a file such as a document or screenshot to this ticket.'),
max_length=1000,
)
def __init__(self, *args, **kwargs):
"""
Add any custom fields that are defined to the form
"""
super(PublicTicketForm, self).__init__(*args, **kwargs)
for field in CustomField.objects.filter(staff_only=False):
instanceargs = {
'label': field.label,
'help_text': field.help_text,
'required': field.required,
}
self.customfield_to_field(field, instanceargs)
def save(self):
"""
Writes and returns a Ticket() object
"""
q = Queue.objects.get(id=int(self.cleaned_data['queue']))
t = Ticket(
title = self.cleaned_data['title'],
submitter_email = self.cleaned_data['submitter_email'],
created = timezone.now(),
status = Ticket.OPEN_STATUS,
queue = q,
description = self.cleaned_data['body'],
priority = self.cleaned_data['priority'],
due_date = self.cleaned_data['due_date'],
)
t.save()
for field, value in self.cleaned_data.items():
if field.startswith('custom_'):
field_name = field.replace('custom_', '', 1)
customfield = CustomField.objects.get(name=field_name)
cfv = TicketCustomFieldValue(ticket=t,
field=customfield,
value=value)
cfv.save()
f = FollowUp(
ticket = t,
title = _('Ticket Opened Via Web'),
date = timezone.now(),
public = True,
comment = self.cleaned_data['body'],
)
f.save()
files = []
if self.cleaned_data['attachment']:
import mimetypes
file = self.cleaned_data['attachment']
filename = file.name.replace(' ', '_')
a = Attachment(
followup=f,
filename=filename,
mime_type=mimetypes.guess_type(filename)[0] or 'application/octet-stream',
size=file.size,
)
a.file.save(file.name, file, save=False)
a.save()
if file.size < getattr(settings, 'MAX_EMAIL_ATTACHMENT_SIZE', 512000):
# Only files smaller than 512kb (or as defined in
# settings.MAX_EMAIL_ATTACHMENT_SIZE) are sent via email.
files.append([a.filename, a.file])
context = safe_template_context(t)
messages_sent_to = []
send_templated_mail(
'newticket_submitter',
context,
recipients=t.submitter_email,
sender=q.from_address,
fail_silently=True,
files=files,
)
messages_sent_to.append(t.submitter_email)
if q.new_ticket_cc and q.new_ticket_cc not in messages_sent_to:
send_templated_mail(
'newticket_cc',
context,
recipients=q.new_ticket_cc,
sender=q.from_address,
fail_silently=True,
files=files,
)
messages_sent_to.append(q.new_ticket_cc)
if q.updated_ticket_cc and q.updated_ticket_cc != q.new_ticket_cc and q.updated_ticket_cc not in messages_sent_to:
send_templated_mail(
'newticket_cc',
context,
recipients=q.updated_ticket_cc,
sender=q.from_address,
fail_silently=True,
files=files,
)
return t
class UserSettingsForm(forms.Form):
login_view_ticketlist = forms.BooleanField(
label=_('Show Ticket List on Login?'),
help_text=_('Display the ticket list upon login? Otherwise, the dashboard is shown.'),
required=False,
)
email_on_ticket_change = forms.BooleanField(
label=_('E-mail me on ticket change?'),
help_text=_('If you\'re the ticket owner and the ticket is changed via the web by somebody else, do you want to receive an e-mail?'),
required=False,
)
email_on_ticket_assign = forms.BooleanField(
label=_('E-mail me when assigned a ticket?'),
help_text=_('If you are assigned a ticket via the web, do you want to receive an e-mail?'),
required=False,
)
email_on_ticket_apichange = forms.BooleanField(
label=_('E-mail me when a ticket is changed via the API?'),
help_text=_('If a ticket is altered by the API, do you want to receive an e-mail?'),
required=False,
)
tickets_per_page = forms.IntegerField(
label=_('Number of tickets to show per page'),
help_text=_('How many tickets do you want to see on the Ticket List page?'),
required=False,
min_value=1,
max_value=1000,
)
use_email_as_submitter = forms.BooleanField(
label=_('Use my e-mail address when submitting tickets?'),
help_text=_('When you submit a ticket, do you want to automatically use your e-mail address as the submitter address? You can type a different e-mail address when entering the ticket if needed, this option only changes the default.'),
required=False,
)
class EmailIgnoreForm(forms.ModelForm):
class Meta:
model = IgnoreEmail
exclude = []
class TicketCCForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TicketCCForm, self).__init__(*args, **kwargs)
if helpdesk_settings.HELPDESK_STAFF_ONLY_TICKET_CC:
users = User.objects.filter(is_active=True, is_staff=True).order_by(User.USERNAME_FIELD)
else:
users = User.objects.filter(is_active=True).order_by(User.USERNAME_FIELD)
self.fields['user'].queryset = users
class Meta:
model = TicketCC
exclude = ('ticket',)
class TicketDependencyForm(forms.ModelForm):
class Meta:
model = TicketDependency
exclude = ('ticket',)
| bsd-3-clause |
FrederichRiver/neutrino | applications/spider/spider/tarantula.py | 1 | 8498 | #!/usr/bin/python3
import requests
class tarantula(object):
def __init__(self):
self.start_url = 'https://www.zhihu.com/signin?next=%2F'
# self.start_url = 'https://www.zhihu.com/api/v3/oauth/sign_in'
self.session = requests.session()
self.headers = {
"user-agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
}
self.login_headers = {
"user-agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
"cookie": '_zap=5aaa77ee-0605-4d02-aebf-9720e4da4579; _xsrf=fdc145b4-2e24-48fb-a855-9d67e1a2eddc; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1583756360,1584202172,1584854816; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1584978905; KLBRSID=57358d62405ef24305120316801fd92a|1584978905|1584978902; d_c0="AHCeYl0aAhGPTiXL0hmySYEt5ntUI27vW4o=|1584978905"; capsion_ticket="2|1:0|10:1584978905|14:capsion_ticket|44:OGJjYTk3OWI3M2M1NDAzZDlkNmE2ZTM0Yjk0ZjZiY2E=|a234a3824f14660a32f10c9f33172ed52395d87cba879bde979d0e53985fd22a"; _ga=GA1.2.1605611667.1584978906; _gid=GA1.2.49021677.1584978906; _gat_gtag_UA_149949619_1=1',
"x-ab-param": 'se_click_club=0;tp_meta_card=0;se_sug=1;se_websearch=3;soc_adreadfilter=0;soc_adsort=0;zr_intervene=0;se_new_topic=0;se_ctx_rerank=1;se_webrs=1;li_tjys_ec_ab=0;zr_km_slot_style=event_card;se_cbert=0;soc_cardheight=2;top_hotcommerce=1;pf_fuceng=1;se_colorfultab=1;se_payconsult=0;zr_update_merge_size=1;se_timebox_up=0;tp_club_tab_feed=0;li_vip_no_ad_mon=0;zw_sameq_sorce=999;se_entity_model=0;zr_km_sku_thres=false;se_cate_l3=0;ug_newtag=0;soc_authormore=2;tsp_vote=2;top_test_4_liguangyi=1;ug_follow_answerer=0;se_billboardsearch=0;se_sug_term=0;tp_topic_tab=0;ug_goodcomment_0=1;ug_fw_answ_aut_1=0;se_zu_onebox=0;tp_qa_metacard_top=top;soc_update=1;zr_art_rec=base;zr_video_rank=new_rank;soc_adreadline=0;soc_iospinweight=0;pf_adjust=0;se_cbert_ab=1;soc_newfeed=2;li_se_heat=1;zr_answer_rec_cp=open;zr_search_sim=0;qap_article_like=1;se_topiclabel=1;tsp_hotlist_ui=1;soc_iosreadfilter=0;top_ebook=0;se_col_boost=0;se_p_slideshow=0;tp_club_feed=1;soc_special=0;top_universalebook=1;se_aa_base=0;se_relationship=1;se_college_cm=0;se_whitelist=0;se_rel_search=0;soc_adpinweight=0;zr_rel_search=base;se_expired_ob=0;se_college=default;se_spb309=0;tp_topic_rec=1;soc_ri_merge=0;li_query_match=0;li_se_across=0;se_highlight=0;ls_zvideo_rec=2;top_ydyq=X;zr_video_rank_nn=new_rank;li_catalog_card=1;zr_expslotpaid=1;tp_header_style=1;li_yxzl_new_style_a=1;tp_score_1=a;tp_club_qa_pic=1;qap_question_visitor= 0;soc_brdcst3=0;top_new_feed=5;ug_zero_follow_0=0;sem_up_growth=in_app;li_paid_answer_exp=0;li_ebok_chap=0;se_pek_test3=1;ug_zero_follow=0;top_v_album=1;ls_zvideo_license=1;se_qanchor=0;se_wannasearch=0;soc_zcfw_shipinshiti=1;soc_zcfw_broadcast=0;se_cardrank_2=1;soc_notification=0;se_hotmore=2;tsp_videobillboard=1;li_answer_card=0;li_answers_link=0;zr_km_sku_mix=sku_20;li_se_media_icon=1;pf_creator_card=1;tp_discover_copy=0;ug_follow_answerer_0=0;se_ffzx_jushen1=0;tp_sft_v2=d;qap_thanks=1;zr_slotpaidexp=1;se_cardrank_1=0;zw_payc_qaedit=0;se_adxtest=1;se_presearch_ab=0;soc_wonderuser_recom=2;soc_zcfw_broadcast2=1;li_answer_label=0;li_qa_btn_text=0;se_lottery=0;se_ios_spb309=0;se_pek_test=1;soc_leave_recommend=2;zr_km_feed_nlp=old;zr_rec_answer_cp=close;zr_article_new=close;se_suggest_cache=0;pf_noti_entry_num=0;zr_slot_cold_start=aver;zr_training_first=false;soc_zuichangfangwen=0;top_quality=0;ug_follow_topic_1=2;qap_ques_invite=0;se_topicfeed=0;ls_recommend_test=0;li_purchase_test=0;se_ltr_cp_new=0;top_root=0;se_prf=0;se_zu_recommend=0;soc_brdcst4=3;li_svip_tab_search=0;li_sku_bottom_bar_re=0;se_hotsearch_2=1;li_ebook_audio=0;se_famous=1;tp_club_tab=0;li_se_edu=0;se_webtimebox=0;se_ltr_dnn_cp=0;li_salt_hot=1;li_hot_score_ab=0;tp_club_header=1;li_se_section=0;zr_ans_rec=gbrank;soc_bignew=1;se_club_post=5;soc_userrec=0;se_ltr_video=0;se_movietab=1;se_likebutton=0;se_webmajorob=0;li_video_section=0;tp_club_join=0;li_ebook_read=0;li_album_liutongab=0;se_agency= 0;tp_club_pk=1;se_multianswer=0;tp_club_pic=0.6;soc_feed_intimacy=2;se_pek_test2=1;se_use_zitem=0;tp_topic_style=0;qap_question_author=0;tp_club_pic_swiper=0;soc_zcfw_badcase=0;soc_bigone=0;ls_videoad=2;li_assessment_show=1;li_pay_banner_type=6;zr_video_recall=current_recall;se_amovietab=1;soc_iosreadline=0;qap_payc_invite=0;se_entity_model_14=0;se_featured=1;se_subtext=0;soc_authormore2=2;li_android_vip=0;se_hotsearch_num=0;se_backsearch=0;tp_qa_metacard=1;tp_topic_head=0;li_search_v5=0;se_auto_syn=0;se_cardrank_3=0;tp_club_android_feed=old;tp_topic_tab_new=0-0-0;li_education_box=0;li_answer_right=0;zr_km_style=base;tp_club_qa=1;tp_discover=0;tp_sft=a;se_bert_comp=0;se_site_onebox=0;se_mobileweb=1;se_time_threshold=0;tp_topic_entry=0;li_qa_new_cover=1;se_sug_entrance=1;se_new_merger=1;se_hotsearch=0;se_search_feed=N;li_hot_voted=0;zr_slot_training=1;zr_slot_filter=false;tp_m_intro_re_topic=1;ls_fmp4=0;soc_iosintimacy=2;se_related_index=3;se_preset_tech=0;soc_stickypush=1;zr_test_aa1=0;tp_sticky_android=2;zr_km_answer=open_cvr;se_hot_timebox=0;soc_iossort=0;li_svip_cardshow=0;se_relation_1=0;se_waterfall=0;se_ad_index=10;tp_club_android_join=1;soc_yxzl_zcfw=0;pf_newguide_vertical=0;pf_foltopic_usernum=50;se_preset_label=1;se_cardrank_4=1;tp_qa_toast=1',
"x-zse-83": '3_2.0',
"x-zse-86": "1.0_a8Yyk69yo0OxghY8hG2qUbuBk_xfbLOyfLYBUgu0QX2p",
"x-requested-with": 'fetch',
"referer": 'https://www.zhihu.com/signin?next=%2F'
}
def run(self):
import re
import json
response = self.session.get(self.start_url, headers=self.headers)
result = response.headers
pattern = r'_xsrf=([a-z0-9\-]+)'
xsrf = re.findall(pattern, str(response.headers))
print(xsrf[0])
with open('zhihu.header', 'w') as f:
f.write(str(response.headers))
with open('zhihu.text', 'w') as f:
f.write(str(response.text))
form_data = {
"name": ''
}
p2 = r'"captchaNeeded":([a-z]+)'
cap_need = re.findall(p2, response.text)
print(cap_need[0])
self.captcha_url = "https://www.zhihu.com/api/v3/oauth/captcha?lang=cn"
cap_result = self.session.post(self.captcha_url, headers=self.login_headers)
print(cap_result)
# self.login_url = "https://www.zhihu.com/api/v3/oauth/sign_in"
# result = self.session.post(self.login_url)
# print(result)
def _get_signature(self, timestamp):
ha = hmac.new(b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)
grant_type = self.login_data['grant_type']
client_id = self.login_data['client_id']
source = self.login_data['source']
ha.update(bytes((grant_type + client_id + source + timestamp), 'utf-8'))
return ha.hexdigest()
def _get_xsrf():
pass
"""
request_url = 'https://www.zhihu.com/api/v3/oauth/sign_in'
# "cookie": '_zap=9d5d1617-95e1-41b0-8fdc-9ebe08f532c7; d_c0="ANDXmq7i7xCPTlIkz1GioQD14MrkA-BdxWU=|1583756348"; _ga=GA1.2.750210372.1583756361; tst=r; _xsrf=lnmXFSI9tM6XUerDd72s7hBBVYaqGSUJ; Hm_lvt_98beee57fd2ef70ccdd5ca52b9740c49=1583756360,1584202172,1584854816; _gid=GA1.2.320287222.1584854817; q_c1=de90185a939644b3aaf4b4c396b285ad|1584875523000|1584875523000; Hm_lpvt_98beee57fd2ef70ccdd5ca52b9740c49=1584877986; capsion_ticket="2|1:0|10:1584878087|14:capsion_ticket|44:YjQzYjkzZDA3NDVlNDhhMWI5YzFkYjE1NDEyYjQyNjk=|aa08a44c5c450f7b8f3402df4fdd25ad69d88701c00ddf06d4c719cde48d6eb4"; KLBRSID=fb3eda1aa35a9ed9f88f346a7a3ebe83|1584878205|158487548',
headers = {
"x-xsrftoken": 'lnmXFSI9tM6XUerDd72s7hBBVYaqGSUJ',
"user-agent": 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36',
"content-type": 'application/x-www-form-urlencoded'
"x-zse-83": '3_2.0'
}
form_data = None
timestamp = str(int(time.time()*1000))
resp = self.session.get(api, headers=headers)
show_captcha = re.search(r'true', resp.text)
if show_captcha:
put_resp = self.session.put(api, headers=headers)
img_base64 = re.findall(
r'"img_base64":"(.+)"', put_resp.text, re.S)[0].replace(r'\n', '')
with open('./captcha.jpg', 'wb') as f:
f.write(base64.b64decode(img_base64))
img = Image.open('./captcha.jpg')
"""
if __name__ == "__main__":
event = tarantula()
event.run()
| bsd-3-clause |
jhildreth/falcon-jwt-checker | falcon_jwt_checker/test/test_jwt_checker.py | 1 | 6838 | from unittest.mock import MagicMock
import falcon
import pytest
from falcon_jwt_checker.falcon_jwt_checker import JwtChecker
class TestJwtChecker:
def test_rejects_unsupported_algorithm(self):
with pytest.raises(RuntimeError):
JwtChecker(algorithm='super_algo')
def test_raises_401_when_no_auth_header(self):
with pytest.raises(falcon.HTTPUnauthorized):
checker = JwtChecker(algorithm='HS256', secret='secret')
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
resource = {}
params = {}
req.headers = {}
req.path = '/test'
checker.process_resource(req, resp, resource, params)
def test_raises_401_when_no_token_present(self):
with pytest.raises(falcon.HTTPUnauthorized):
checker = JwtChecker(algorithm='HS256', secret='secret')
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
resource = {}
params = {}
req.headers = {'AUTHORIZATION': 'Something else'}
req.path = '/test'
checker.process_resource(req, resp, resource, params)
def test_raises_401_when_bad_token_present(self):
with pytest.raises(falcon.HTTPUnauthorized):
checker = JwtChecker(algorithm='HS256', secret='secret')
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
resource = {}
params = {}
req.headers = {'AUTHORIZATION': 'Bearer xxBadTokenHerexx'}
req.path = '/test'
checker.process_resource(req, resp, resource, params)
def test_adds_claims_to_params_for_valid_token(self):
valid_token = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJUZXN0IEF1dGggU3lzdGVtIiwiaWF0IjoxNDc2NTU1NzIyLCJleHAiOjcyNTE0NDUzNjYsImF1ZCI6ImZhbGNvbi1qd3QtY2hlY2tlciIsInN1YiI6InRlc3RfdXNlciIsInJvbGUiOiJhZG1pbiJ9.v3wtjNKnz0-lRCJWm4UdYSkuMZ075PgwBsDL4kET62I'
checker = JwtChecker(algorithm='HS256', secret='secret',
audience='falcon-jwt-checker')
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
resource = {}
params = {}
req.headers = {'AUTHORIZATION': 'Bearer ' + valid_token}
req.path = '/test'
checker.process_resource(req, resp, resource, params)
assert params['jwt_claims']['sub'] == 'test_user'
assert params['jwt_claims']['role'] == 'admin'
assert params['jwt_claims']['iss'] == 'Test Auth System'
assert params['jwt_claims']['aud'] == 'falcon-jwt-checker'
def test_jwt_checking_is_skipped_on_exempt_routes(self):
checker = JwtChecker(algorithm='HS256', secret='secret',
audience='falcon-jwt-checker',
exempt_routes=['/', '/login'])
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
resource = {}
params = {}
req.headers = {}
req.path = '/login'
checker.process_resource(req, resp, resource, params)
# Test that only the specified route is exempt
with pytest.raises(falcon.HTTPUnauthorized):
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
req.headers = {}
req.path = '/test'
checker.process_resource(req, resp, resource, params)
def test_jwt_checking_is_skipped_for_exempt_methods(self):
checker = JwtChecker(algorithm='HS256', secret='secret',
audience='falcon-jwt-checker',
exempt_methods=['OPTIONS'])
req = MagicMock(spec=falcon.request)
resp = MagicMock(spec=falcon.response)
resource = {}
params = {}
req.headers = {}
req.path = '/test'
req.method = 'OPTIONS'
checker.process_resource(req, resp, resource, params)
# Test that only the specified method is exempt
with pytest.raises(falcon.HTTPUnauthorized):
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
req.headers = {}
req.path = '/test'
req.method = 'GET'
checker.process_resource(req, resp, resource, params)
def test_raises_401_when_audience_is_wrong(self):
with pytest.raises(falcon.HTTPUnauthorized):
checker = JwtChecker(algorithm='HS256', secret='secret',
audience='urn:foo')
# Signature is good, but audience is 'wrong'
bad_aud_token = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJUZXN0IEF1dGggU3lzdGVtIiwiaWF0IjoxNDc2NTU1NzIyLCJleHAiOjcyNTE0NDUzNjYsImF1ZCI6Indyb25nIiwic3ViIjoidGVzdF91c2VyIiwicm9sZSI6ImFkbWluIn0.3isrOoC_qtCoW13TCe-QhnMYb0z3gOd5VnxswLA_mFo'
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
resource = {}
params = {}
req.headers = {'AUTHORIZATION': 'Bearer ' + bad_aud_token}
req.path = '/test'
checker.process_resource(req, resp, resource, params)
def test_raises_401_when_issuer_is_wrong(self):
with pytest.raises(falcon.HTTPUnauthorized):
checker = JwtChecker(algorithm='HS256', secret='secret',
issuer='urn:foo')
# Signature is good, but issuer is 'wrong'
bad_iss_token = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpc3MiOiJ3cm9uZyIsImlhdCI6MTQ3NjU1NTcyMiwiZXhwIjo3MjUxNDQ1MzY2LCJhdWQiOiJmYWxjb24tand0LWNoZWNrZXIiLCJzdWIiOiJ0ZXN0X3VzZXIiLCJyb2xlIjoiYWRtaW4ifQ.1KRxaQcX9I_ua2DFkZCd3nsnbopiE8-mNMfRt99Jmhk'
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
resource = {}
params = {}
req.headers = {'AUTHORIZATION': 'Bearer ' + bad_iss_token}
req.path = '/test'
checker.process_resource(req, resp, resource, params)
def test_optional_claims_may_be_omitted_from_checker(self):
# No iss or aud claims present
minimal_token = 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJpYXQiOjE0NzY1NTU3MjIsImV4cCI6NzI1MTQ0NTM2Niwic3ViIjoidGVzdF91c2VyIiwicm9sZSI6ImFkbWluIn0.WuEQjLcBEt60suxcEMNLaYpN5PRxPhRUrmwqRvSDl-Y'
checker = JwtChecker(algorithm='HS256', secret='secret')
req = MagicMock(spec=falcon.Request)
resp = MagicMock(spec=falcon.Response)
resource = {}
params = {}
req.headers = {'AUTHORIZATION': 'Bearer ' + minimal_token}
req.path = '/test'
checker.process_resource(req, resp, resource, params)
| mit |
synsun/robotframework | src/robot/rebot.py | 3 | 22905 | #!/usr/bin/env python
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module implementing the command line entry point for post-processing outputs.
This module can be executed from the command line using the following
approaches::
python -m robot.rebot
python path/to/robot/rebot.py
Instead of ``python`` it is possible to use also other Python interpreters.
This module is also used by the installed ``rebot``, ``jyrebot`` and
``ipyrebot`` start-up scripts.
This module also provides :func:`rebot` and :func:`rebot_cli` functions
that can be used programmatically. Other code is for internal usage.
"""
import sys
# Allows running as a script. __name__ check needed with multiprocessing:
# https://github.com/robotframework/robotframework/issues/1137
if 'robot' not in sys.modules and __name__ == '__main__':
import pythonpathsetter
from robot.conf import RebotSettings
from robot.errors import DataError
from robot.reporting import ResultWriter
from robot.output import LOGGER
from robot.utils import Application
from robot.run import RobotFramework
USAGE = """Rebot -- Robot Framework report and log generator
Version: <VERSION>
Usage: rebot [options] robot_outputs
or: python -m robot.rebot [options] robot_outputs
or: python path/to/robot/rebot.py [options] robot_outputs
or: java -jar robotframework.jar rebot [options] robot_outputs
Rebot can be used to generate logs and reports in HTML format. It can also
produce new XML output files which can be further processed with Rebot or
other tools.
The easiest way to execute Rebot is using the `rebot` script created as part
of the normal installation. Alternatively it is possible to execute the
`robot.rebot` module directly using `python -m robot.rebot`, where `python`
can be replaced with any supported Python interpreter like `jython`, `ipy` or
`python3`. Yet another alternative is running the `robot/rebot.py` script like
`python path/to/robot`. Finally, there is a standalone JAR distribution.
Inputs to Rebot are XML output files generated by Robot Framework test runs or
earlier Rebot executions. When more than one input file is given, a new top
level test suite containing suites in the given files is created by default.
This allows combining multiple outputs together to create higher level reports.
An exception is that if --merge is used, results are combined by adding suites
and tests in subsequent outputs into the first suite structure. If same test
is found from multiple outputs, the last one replaces the earlier ones.
For more information about Rebot and other built-in tools, see
http://robotframework.org/robotframework/#built-in-tools. For more details
about Robot Framework in general, go to http://robotframework.org.
Options
=======
-R --merge When combining results, merge outputs together
instead of putting them under a new top level suite.
Example: rebot --merge orig.xml rerun.xml
-N --name name Set the name of the top level test suite. Underscores
in the name are converted to spaces. Default name is
created from the name of the executed data source.
-D --doc documentation Set the documentation of the top level test suite.
Underscores in the documentation are converted to
spaces and it may also contain simple HTML formatting
(e.g. *bold* and http://url/).
-M --metadata name:value * Set metadata of the top level suite. Underscores
in the name and value are converted to spaces. Value
can contain same HTML formatting as --doc.
Example: --metadata version:1.2
-G --settag tag * Sets given tag(s) to all executed test cases.
-t --test name * Select test cases by name or long name. Name is case
and space insensitive and it can also be a simple
pattern where `*` matches anything and `?` matches
any char. If using `*` and `?` in the console is
problematic, see --escape and --argumentfile.
-s --suite name * Select test suites by name. When this option is used
with --test, --include or --exclude, only test cases
in matching suites and also matching other filtering
criteria are selected. Given name can be a simple
pattern similarly as with --test.
-i --include tag * Select test cases to by tag. Similarly as name with
--test, tag is case and space insensitive and it is
possible to use patterns with `*` and `?` as
wildcards. Tags and patterns can also be combined
together with `AND`, `OR`, and `NOT` operators.
Examples: --include foo --include bar*
--include fooANDbar*
-e --exclude tag * Select test cases not to be included by tag. These
tests are not selected even if included with
--include. Tags are matched using the rules explained
with --include.
--processemptysuite Processes output also if the top level test suite is
empty. Useful e.g. with --include/--exclude when it
is not an error that no test matches the condition.
-c --critical tag * Tests having given tag are considered critical. If no
critical tags are set, all tags are critical. Tags
can be given as a pattern like with --include.
-n --noncritical tag * Tests with given tag are not critical even if they
have a tag set with --critical. Tag can be a pattern.
-d --outputdir dir Where to create output files. The default is the
directory where Rebot is run from and the given path
is considered relative to that unless it is absolute.
-o --output file XML output file. Not created unless this option is
specified. Given path, similarly as paths given to
--log, --report and --xunit, is relative to
--outputdir unless given as an absolute path.
-l --log file HTML log file. Can be disabled by giving a special
name `NONE`. Default: log.html
Examples: `--log mylog.html`, `-l none`
-r --report file HTML report file. Can be disabled with `NONE`
similarly as --log. Default: report.html
-x --xunit file xUnit compatible result file. Not created unless this
option is specified.
--xunitskipnoncritical Mark non-critical tests on xUnit output as skipped.
-T --timestampoutputs When this option is used, timestamp in a format
`YYYYMMDD-hhmmss` is added to all generated output
files between their basename and extension. For
example `-T -o output.xml -r report.html -l none`
creates files like `output-20070503-154410.xml` and
`report-20070503-154410.html`.
--splitlog Split log file into smaller pieces that open in
browser transparently.
--logtitle title Title for the generated test log. The default title
is `<Name Of The Suite> Test Log`. Underscores in
the title are converted into spaces in all titles.
--reporttitle title Title for the generated test report. The default
title is `<Name Of The Suite> Test Report`.
--reportbackground colors Background colors to use in the report file.
Either `all_passed:critical_passed:failed` or
`passed:failed`. Both color names and codes work.
Examples: --reportbackground green:yellow:red
--reportbackground #00E:#E00
-L --loglevel level Threshold for selecting messages. Available levels:
TRACE (default), DEBUG, INFO, WARN, NONE (no msgs).
Use syntax `LOGLEVEL:DEFAULT` to define the default
visible log level in log files.
Examples: --loglevel DEBUG
--loglevel DEBUG:INFO
--suitestatlevel level How many levels to show in `Statistics by Suite`
in log and report. By default all suite levels are
shown. Example: --suitestatlevel 3
--tagstatinclude tag * Include only matching tags in `Statistics by Tag`
and `Test Details` in log and report. By default all
tags set in test cases are shown. Given `tag` can
also be a simple pattern (see e.g. --test).
--tagstatexclude tag * Exclude matching tags from `Statistics by Tag` and
`Test Details`. This option can be used with
--tagstatinclude similarly as --exclude is used with
--include.
--tagstatcombine tags:name * Create combined statistics based on tags.
These statistics are added into `Statistics by Tag`
and matching tests into `Test Details`. If optional
`name` is not given, name of the combined tag is got
from the specified tags. Tags are combined using the
rules explained in --include.
Examples: --tagstatcombine requirement-*
--tagstatcombine tag1ANDtag2:My_name
--tagdoc pattern:doc * Add documentation to tags matching given pattern.
Documentation is shown in `Test Details` and also as
a tooltip in `Statistics by Tag`. Pattern can contain
characters `*` (matches anything) and `?` (matches
any char). Documentation can contain formatting
similarly as with --doc option.
Examples: --tagdoc mytag:My_documentation
--tagdoc regression:*See*_http://info.html
--tagdoc owner-*:Original_author
--tagstatlink pattern:link:title * Add external links into `Statistics by
Tag`. Pattern can contain characters `*` (matches
anything) and `?` (matches any char). Characters
matching to wildcard expressions can be used in link
and title with syntax %N, where N is index of the
match (starting from 1). In title underscores are
automatically converted to spaces.
Examples: --tagstatlink mytag:http://my.domain:Link
--tagstatlink bug-*:http://tracker/id=%1:Bug_Tracker
--removekeywords all|passed|for|wuks|name:<pattern>|tag:<pattern> *
Remove keyword data from all generated outputs.
Keywords containing warnings are not removed except
in `all` mode.
all: remove data from all keywords
passed: remove data only from keywords in passed
test cases and suites
for: remove passed iterations from for loops
wuks: remove all but the last failing keyword
inside `BuiltIn.Wait Until Keyword Succeeds`
name:<pattern>: remove data from keywords that match
the given pattern. The pattern is matched
against the full name of the keyword (e.g.
'MyLib.Keyword', 'resource.Second Keyword'),
is case, space, and underscore insensitive,
and may contain `*` and `?` as wildcards.
Examples: --removekeywords name:Lib.HugeKw
--removekeywords name:myresource.*
tag:<pattern>: remove data from keywords that match
the given pattern. Tags are case and space
insensitive and it is possible to use
patterns with `*` and `?` as wildcards.
Tags and patterns can also be combined
together with `AND`, `OR`, and `NOT`
operators.
Examples: --removekeywords foo
--removekeywords fooANDbar*
--flattenkeywords for|foritem|name:<pattern>|tag:<pattern> *
Flattens matching keywords in all generated outputs.
Matching keywords get all log messages from their
child keywords and children are discarded otherwise.
for: flatten for loops fully
foritem: flatten individual for loop iterations
name:<pattern>: flatten matched keywords using same
matching rules as with
`--removekeywords name:<pattern>`
tag:<pattern>: flatten matched keywords using same
matching rules as with
`--removekeywords tag:<pattern>`
--starttime timestamp Set starting time of test execution when creating
reports. Timestamp must be given in format
`2007-10-01 15:12:42.268` where all separators are
optional (e.g. `20071001151242268` is ok too) and
parts from milliseconds to hours can be omitted if
they are zero (e.g. `2007-10-01`). This can be used
to override starttime of the suite when reports are
created from a single suite or to set starttime for
combined suite, which is otherwise set to `N/A`.
--endtime timestamp Same as --starttime but for ending time. If both
options are used, elapsed time of the suite is
calculated based on them. For combined suites,
it is otherwise calculated by adding elapsed times
of combined test suites together.
--nostatusrc Sets the return code to zero regardless of failures
in test cases. Error codes are returned normally.
--prerebotmodifier class * Class to programmatically modify the result
model before creating outputs.
-C --consolecolors auto|on|ansi|off Use colors on console output or not.
auto: use colors when output not redirected (default)
on: always use colors
ansi: like `on` but use ANSI colors also on Windows
off: disable colors altogether
Note that colors do not work with Jython on Windows.
-P --pythonpath path * Additional locations to add to the module search path
that is used when importing Python based extensions.
-E --escape what:with * Escape characters which are problematic in console.
`what` is the name of the character to escape and
`with` is the string to escape it with. Note that
all given arguments, incl. data sources, are escaped
so escape characters ought to be selected carefully.
<---------------------ESCAPES----------------------->
Examples:
--escape space:_ --metadata X:Value_with_spaces
-E space:SP -E quot:Q -v var:QhelloSPworldQ
-A --argumentfile path * Text file to read more arguments from. File can have
both options and data sources one per line. Contents
do not need to be escaped but spaces in the beginning
and end of lines are removed. Empty lines and lines
starting with a hash character (#) are ignored.
Example file:
| --include regression
| --name Regression Tests
| # This is a comment line
| my_tests.html
| path/to/test/directory/
-h -? --help Print usage instructions.
--version Print version information.
Options that are marked with an asterisk (*) can be specified multiple times.
For example, `--test first --test third` selects test cases with name `first`
and `third`. If an option accepts a value but is not marked with an asterisk,
the last given value has precedence. For example, `--log A.html --log B.html`
creates log file `B.html`. Options accepting no values can be disabled by
using the same option again with `no` prefix added or dropped. The last option
has precedence regardless of how many times options are used. For example,
`--merge --merge --nomerge --nostatusrc --statusrc` would not activate the
merge mode and would return normal status rc.
Long option format is case-insensitive. For example, --SuiteStatLevel is
equivalent to but easier to read than --suitestatlevel. Long options can
also be shortened as long as they are unique. For example, `--logti Title`
works while `--lo log.html` does not because the former matches only --logtitle
but the latter matches both --log and --logtitle.
Environment Variables
=====================
REBOT_OPTIONS Space separated list of default options to be placed
in front of any explicit options on the command line.
ROBOT_SYSLOG_FILE Path to a file where Robot Framework writes internal
information about processed files. Can be useful when
debugging problems. If not set, or set to special
value `NONE`, writing to the syslog file is disabled.
ROBOT_SYSLOG_LEVEL Log level to use when writing to the syslog file.
Available levels are the same as for --loglevel
command line option and the default is INFO.
Examples
========
# Simple Rebot run that creates log and report with default names.
$ rebot output.xml
# Using options. Note that this is one long command split into multiple lines.
$ rebot --log smoke_log.html --report smoke_report.html --include smoke
--ReportTitle Smoke_Tests --ReportBackground green:yellow:red
--TagStatCombine tag1ANDtag2 path/to/myoutput.xml
# Executing `robot.rebot` module using Python and creating combined outputs.
$ python -m robot.rebot --name Combined outputs/*.xml
# Running `robot/rebot.py` script with Jython.
$ jython path/robot/rebot.py -N Project_X -l none -r x.html output.xml
"""
class Rebot(RobotFramework):
def __init__(self):
Application.__init__(self, USAGE, arg_limits=(1,),
env_options='REBOT_OPTIONS', logger=LOGGER)
def main(self, datasources, **options):
settings = RebotSettings(options)
LOGGER.register_console_logger(**settings.console_output_config)
LOGGER.disable_message_cache()
rc = ResultWriter(*datasources).write_results(settings)
if rc < 0:
raise DataError('No outputs created.')
return rc
def rebot_cli(arguments):
"""Command line execution entry point for running rebot.
:param arguments: Command line arguments as a list of strings.
For programmatic usage the :func:`rebot` method is typically better. It has
a better API for that usage and does not call :func:`sys.exit` like this
method.
Example::
from robot import rebot_cli
rebot_cli(['--report', 'r.html', '--log', 'NONE', 'o1.xml', 'o2.xml'])
"""
Rebot().execute_cli(arguments)
def rebot(*datasources, **options):
"""Creates reports/logs from given Robot output files with given options.
Given input files are paths to Robot output files similarly as when running
rebot from the command line. Options are given as keywords arguments and
their names are same as long command line options without hyphens.
Options that can be given on the command line multiple times can be
passed as lists like `include=['tag1', 'tag2']`. If such option is used
only once, it can be given also as a single string like `include='tag'`.
To capture stdout and/or stderr streams, pass open file objects in as
special keyword arguments `stdout` and `stderr`, respectively.
A return code is returned similarly as when running on the command line.
Examples::
from robot import rebot
rebot('path/to/output.xml')
with open('stdout.txt', 'w') as stdout:
rebot('o1.xml', 'o2.xml', report='r.html', log='NONE', stdout=stdout)
Equivalent command line usage::
rebot path/to/output.xml
rebot --report r.html --log NONE o1.xml o2.xml > stdout.txt
"""
return Rebot().execute(*datasources, **options)
if __name__ == '__main__':
rebot_cli(sys.argv[1:])
| apache-2.0 |
betoesquivel/fil2014 | build/django/django/utils/dates.py | 488 | 2237 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| mit |
tboyce021/home-assistant | homeassistant/components/iqvia/sensor.py | 7 | 6422 | """Support for IQVIA sensors."""
from statistics import mean
import numpy as np
from homeassistant.const import ATTR_STATE
from homeassistant.core import callback
from . import IQVIAEntity
from .const import (
DATA_COORDINATOR,
DOMAIN,
SENSORS,
TYPE_ALLERGY_FORECAST,
TYPE_ALLERGY_INDEX,
TYPE_ALLERGY_OUTLOOK,
TYPE_ALLERGY_TODAY,
TYPE_ALLERGY_TOMORROW,
TYPE_ASTHMA_FORECAST,
TYPE_ASTHMA_INDEX,
TYPE_ASTHMA_TODAY,
TYPE_ASTHMA_TOMORROW,
TYPE_DISEASE_FORECAST,
TYPE_DISEASE_INDEX,
TYPE_DISEASE_TODAY,
)
ATTR_ALLERGEN_AMOUNT = "allergen_amount"
ATTR_ALLERGEN_GENUS = "allergen_genus"
ATTR_ALLERGEN_NAME = "allergen_name"
ATTR_ALLERGEN_TYPE = "allergen_type"
ATTR_CITY = "city"
ATTR_OUTLOOK = "outlook"
ATTR_RATING = "rating"
ATTR_SEASON = "season"
ATTR_TREND = "trend"
ATTR_ZIP_CODE = "zip_code"
API_CATEGORY_MAPPING = {
TYPE_ALLERGY_TODAY: TYPE_ALLERGY_INDEX,
TYPE_ALLERGY_TOMORROW: TYPE_ALLERGY_INDEX,
TYPE_ALLERGY_TOMORROW: TYPE_ALLERGY_INDEX,
TYPE_ASTHMA_TODAY: TYPE_ASTHMA_INDEX,
TYPE_ASTHMA_TOMORROW: TYPE_ASTHMA_INDEX,
TYPE_DISEASE_TODAY: TYPE_DISEASE_INDEX,
}
RATING_MAPPING = [
{"label": "Low", "minimum": 0.0, "maximum": 2.4},
{"label": "Low/Medium", "minimum": 2.5, "maximum": 4.8},
{"label": "Medium", "minimum": 4.9, "maximum": 7.2},
{"label": "Medium/High", "minimum": 7.3, "maximum": 9.6},
{"label": "High", "minimum": 9.7, "maximum": 12},
]
TREND_FLAT = "Flat"
TREND_INCREASING = "Increasing"
TREND_SUBSIDING = "Subsiding"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up IQVIA sensors based on a config entry."""
sensor_class_mapping = {
TYPE_ALLERGY_FORECAST: ForecastSensor,
TYPE_ALLERGY_TODAY: IndexSensor,
TYPE_ALLERGY_TOMORROW: IndexSensor,
TYPE_ASTHMA_FORECAST: ForecastSensor,
TYPE_ASTHMA_TODAY: IndexSensor,
TYPE_ASTHMA_TOMORROW: IndexSensor,
TYPE_DISEASE_FORECAST: ForecastSensor,
TYPE_DISEASE_TODAY: IndexSensor,
}
sensors = []
for sensor_type, (name, icon) in SENSORS.items():
api_category = API_CATEGORY_MAPPING.get(sensor_type, sensor_type)
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][api_category]
sensor_class = sensor_class_mapping[sensor_type]
sensors.append(sensor_class(coordinator, entry, sensor_type, name, icon))
async_add_entities(sensors)
def calculate_trend(indices):
"""Calculate the "moving average" of a set of indices."""
index_range = np.arange(0, len(indices))
index_array = np.array(indices)
linear_fit = np.polyfit(index_range, index_array, 1)
slope = round(linear_fit[0], 2)
if slope > 0:
return TREND_INCREASING
if slope < 0:
return TREND_SUBSIDING
return TREND_FLAT
class ForecastSensor(IQVIAEntity):
"""Define sensor related to forecast data."""
@callback
def update_from_latest_data(self):
"""Update the sensor."""
data = self.coordinator.data.get("Location")
if not data or not data.get("periods"):
return
indices = [p["Index"] for p in data["periods"]]
average = round(mean(indices), 1)
[rating] = [
i["label"]
for i in RATING_MAPPING
if i["minimum"] <= average <= i["maximum"]
]
self._attrs.update(
{
ATTR_CITY: data["City"].title(),
ATTR_RATING: rating,
ATTR_STATE: data["State"],
ATTR_TREND: calculate_trend(indices),
ATTR_ZIP_CODE: data["ZIP"],
}
)
if self._type == TYPE_ALLERGY_FORECAST:
outlook_coordinator = self.hass.data[DOMAIN][DATA_COORDINATOR][
self._entry.entry_id
][TYPE_ALLERGY_OUTLOOK]
self._attrs[ATTR_OUTLOOK] = outlook_coordinator.data.get("Outlook")
self._attrs[ATTR_SEASON] = outlook_coordinator.data.get("Season")
self._state = average
class IndexSensor(IQVIAEntity):
"""Define sensor related to indices."""
@callback
def update_from_latest_data(self):
"""Update the sensor."""
if not self.coordinator.last_update_success:
return
try:
if self._type in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW):
data = self.coordinator.data.get("Location")
elif self._type in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW):
data = self.coordinator.data.get("Location")
elif self._type == TYPE_DISEASE_TODAY:
data = self.coordinator.data.get("Location")
except KeyError:
return
key = self._type.split("_")[-1].title()
try:
[period] = [p for p in data["periods"] if p["Type"] == key]
except ValueError:
return
[rating] = [
i["label"]
for i in RATING_MAPPING
if i["minimum"] <= period["Index"] <= i["maximum"]
]
self._attrs.update(
{
ATTR_CITY: data["City"].title(),
ATTR_RATING: rating,
ATTR_STATE: data["State"],
ATTR_ZIP_CODE: data["ZIP"],
}
)
if self._type in (TYPE_ALLERGY_TODAY, TYPE_ALLERGY_TOMORROW):
for idx, attrs in enumerate(period["Triggers"]):
index = idx + 1
self._attrs.update(
{
f"{ATTR_ALLERGEN_GENUS}_{index}": attrs["Genus"],
f"{ATTR_ALLERGEN_NAME}_{index}": attrs["Name"],
f"{ATTR_ALLERGEN_TYPE}_{index}": attrs["PlantType"],
}
)
elif self._type in (TYPE_ASTHMA_TODAY, TYPE_ASTHMA_TOMORROW):
for idx, attrs in enumerate(period["Triggers"]):
index = idx + 1
self._attrs.update(
{
f"{ATTR_ALLERGEN_NAME}_{index}": attrs["Name"],
f"{ATTR_ALLERGEN_AMOUNT}_{index}": attrs["PPM"],
}
)
elif self._type == TYPE_DISEASE_TODAY:
for attrs in period["Triggers"]:
self._attrs[f"{attrs['Name'].lower()}_index"] = attrs["Index"]
self._state = period["Index"]
| apache-2.0 |
hkariti/ansible | lib/ansible/modules/database/proxysql/proxysql_query_rules.py | 42 | 21852 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = '''
---
module: proxysql_query_rules
version_added: "2.3"
author: "Ben Mildren (@bmildren)"
short_description: Modifies query rules using the proxysql admin interface.
description:
- The M(proxysql_query_rules) module modifies query rules using the
proxysql admin interface.
options:
rule_id:
description:
- The unique id of the rule. Rules are processed in rule_id order.
active:
description:
- A rule with I(active) set to C(False) will be tracked in the database,
but will be never loaded in the in-memory data structures.
username:
description:
- Filtering criteria matching username. If I(username) is non-NULL, a
query will match only if the connection is made with the correct
username.
schemaname:
description:
- Filtering criteria matching schemaname. If I(schemaname) is non-NULL, a
query will match only if the connection uses schemaname as its default
schema.
flagIN:
description:
- Used in combination with I(flagOUT) and I(apply) to create chains of
rules.
client_addr:
description:
- Match traffic from a specific source.
proxy_addr:
description:
- Match incoming traffic on a specific local IP.
proxy_port:
description:
- Match incoming traffic on a specific local port.
digest:
description:
- Match queries with a specific digest, as returned by
stats_mysql_query_digest.digest.
match_digest:
description:
- Regular expression that matches the query digest. The dialect of
regular expressions used is that of re2 - https://github.com/google/re2
match_pattern:
description:
- Regular expression that matches the query text. The dialect of regular
expressions used is that of re2 - https://github.com/google/re2
negate_match_pattern:
description:
- If I(negate_match_pattern) is set to C(True), only queries not matching
the query text will be considered as a match. This acts as a NOT
operator in front of the regular expression matching against
match_pattern.
flagOUT:
description:
- Used in combination with I(flagIN) and apply to create chains of rules.
When set, I(flagOUT) signifies the I(flagIN) to be used in the next
chain of rules.
replace_pattern:
description:
- This is the pattern with which to replace the matched pattern. Note
that this is optional, and when omitted, the query processor will only
cache, route, or set other parameters without rewriting.
destination_hostgroup:
description:
- Route matched queries to this hostgroup. This happens unless there is a
started transaction and the logged in user has
I(transaction_persistent) set to C(True) (see M(proxysql_mysql_users)).
cache_ttl:
description:
- The number of milliseconds for which to cache the result of the query.
Note in ProxySQL 1.1 I(cache_ttl) was in seconds.
timeout:
description:
- The maximum timeout in milliseconds with which the matched or rewritten
query should be executed. If a query run for longer than the specific
threshold, the query is automatically killed. If timeout is not
specified, the global variable mysql-default_query_timeout applies.
retries:
description:
- The maximum number of times a query needs to be re-executed in case of
detected failure during the execution of the query. If retries is not
specified, the global variable mysql-query_retries_on_failure applies.
delay:
description:
- Number of milliseconds to delay the execution of the query. This is
essentially a throttling mechanism and QoS, and allows a way to give
priority to queries over others. This value is added to the
mysql-default_query_delay global variable that applies to all queries.
mirror_flagOUT:
description:
- Enables query mirroring. If set I(mirror_flagOUT) can be used to
evaluates the mirrored query against the specified chain of rules.
mirror_hostgroup:
description:
- Enables query mirroring. If set I(mirror_hostgroup) can be used to
mirror queries to the same or different hostgroup.
error_msg:
description:
- Query will be blocked, and the specified error_msg will be returned to
the client.
log:
description:
- Query will be logged.
apply:
description:
- Used in combination with I(flagIN) and I(flagOUT) to create chains of
rules. Setting apply to True signifies the last rule to be applied.
comment:
description:
- Free form text field, usable for a descriptive comment of the query
rule.
state:
description:
- When C(present) - adds the rule, when C(absent) - removes the rule.
choices: [ "present", "absent" ]
default: present
force_delete:
description:
- By default we avoid deleting more than one schedule in a single batch,
however if you need this behaviour and you're not concerned about the
schedules deleted, you can set I(force_delete) to C(True).
default: False
extends_documentation_fragment:
- proxysql.managing_config
- proxysql.connectivity
'''
EXAMPLES = '''
---
# This example adds a rule to redirect queries from a specific user to another
# hostgroup, it saves the mysql query rule config to disk, but avoids loading
# the mysql query config config to runtime (this might be because several
# rules are being added and the user wants to push the config to runtime in a
# single batch using the M(proxysql_manage_config) module). It uses supplied
# credentials to connect to the proxysql admin interface.
- proxysql_backend_servers:
login_user: admin
login_password: admin
username: 'guest_ro'
destination_hostgroup: 1
active: 1
retries: 3
state: present
load_to_runtime: False
# This example removes all rules that use the username 'guest_ro', saves the
# mysql query rule config to disk, and dynamically loads the mysql query rule
# config to runtime. It uses credentials in a supplied config file to connect
# to the proxysql admin interface.
- proxysql_backend_servers:
config_file: '~/proxysql.cnf'
username: 'guest_ro'
state: absent
force_delete: true
'''
RETURN = '''
stdout:
description: The mysql user modified or removed from proxysql
returned: On create/update will return the newly modified rule, in all
other cases will return a list of rules that match the supplied
criteria.
type: dict
"sample": {
"changed": true,
"msg": "Added rule to mysql_query_rules",
"rules": [
{
"active": "0",
"apply": "0",
"cache_ttl": null,
"client_addr": null,
"comment": null,
"delay": null,
"destination_hostgroup": 1,
"digest": null,
"error_msg": null,
"flagIN": "0",
"flagOUT": null,
"log": null,
"match_digest": null,
"match_pattern": null,
"mirror_flagOUT": null,
"mirror_hostgroup": null,
"negate_match_pattern": "0",
"proxy_addr": null,
"proxy_port": null,
"reconnect": null,
"replace_pattern": null,
"retries": null,
"rule_id": "1",
"schemaname": null,
"timeout": null,
"username": "guest_ro"
}
],
"state": "present"
}
'''
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.mysql import mysql_connect
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native
try:
import MySQLdb
import MySQLdb.cursors
except ImportError:
MYSQLDB_FOUND = False
else:
MYSQLDB_FOUND = True
# ===========================================
# proxysql module specific support methods.
#
def perform_checks(module):
if module.params["login_port"] < 0 \
or module.params["login_port"] > 65535:
module.fail_json(
msg="login_port must be a valid unix port number (0-65535)"
)
if not MYSQLDB_FOUND:
module.fail_json(
msg="the python mysqldb module is required"
)
def save_config_to_disk(cursor):
cursor.execute("SAVE MYSQL QUERY RULES TO DISK")
return True
def load_config_to_runtime(cursor):
cursor.execute("LOAD MYSQL QUERY RULES TO RUNTIME")
return True
class ProxyQueryRule(object):
def __init__(self, module):
self.state = module.params["state"]
self.force_delete = module.params["force_delete"]
self.save_to_disk = module.params["save_to_disk"]
self.load_to_runtime = module.params["load_to_runtime"]
config_data_keys = ["rule_id",
"active",
"username",
"schemaname",
"flagIN",
"client_addr",
"proxy_addr",
"proxy_port",
"digest",
"match_digest",
"match_pattern",
"negate_match_pattern",
"flagOUT",
"replace_pattern",
"destination_hostgroup",
"cache_ttl",
"timeout",
"retries",
"delay",
"mirror_flagOUT",
"mirror_hostgroup",
"error_msg",
"log",
"apply",
"comment"]
self.config_data = dict((k, module.params[k])
for k in config_data_keys)
def check_rule_pk_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `rule_count`
FROM mysql_query_rules
WHERE rule_id = %s"""
query_data = \
[self.config_data["rule_id"]]
cursor.execute(query_string, query_data)
check_count = cursor.fetchone()
return (int(check_count['rule_count']) > 0)
def check_rule_cfg_exists(self, cursor):
query_string = \
"""SELECT count(*) AS `rule_count`
FROM mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
check_count = cursor.fetchone()
return int(check_count['rule_count'])
def get_rule_config(self, cursor, created_rule_id=None):
query_string = \
"""SELECT *
FROM mysql_query_rules"""
if created_rule_id:
query_data = [created_rule_id, ]
query_string += "\nWHERE rule_id = %s"
cursor.execute(query_string, query_data)
rule = cursor.fetchone()
else:
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
rule = cursor.fetchall()
return rule
def create_rule_config(self, cursor):
query_string = \
"""INSERT INTO mysql_query_rules ("""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
query_string += "\n" + col + ","
query_string = query_string[:-1]
query_string += \
(")\n" +
"VALUES (" +
"%s ," * cols)
query_string = query_string[:-2]
query_string += ")"
cursor.execute(query_string, query_data)
new_rule_id = cursor.lastrowid
return True, new_rule_id
def update_rule_config(self, cursor):
query_string = """UPDATE mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None and col != "rule_id":
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\nSET " + col + "= %s,"
else:
query_string += "\n " + col + " = %s,"
query_string = query_string[:-1]
query_string += "\nWHERE rule_id = %s"
query_data.append(self.config_data["rule_id"])
cursor.execute(query_string, query_data)
return True
def delete_rule_config(self, cursor):
query_string = \
"""DELETE FROM mysql_query_rules"""
cols = 0
query_data = []
for col, val in iteritems(self.config_data):
if val is not None:
cols += 1
query_data.append(val)
if cols == 1:
query_string += "\n WHERE " + col + " = %s"
else:
query_string += "\n AND " + col + " = %s"
if cols > 0:
cursor.execute(query_string, query_data)
else:
cursor.execute(query_string)
check_count = cursor.rowcount
return True, int(check_count)
def manage_config(self, cursor, state):
if state:
if self.save_to_disk:
save_config_to_disk(cursor)
if self.load_to_runtime:
load_config_to_runtime(cursor)
def create_rule(self, check_mode, result, cursor):
if not check_mode:
result['changed'], new_rule_id = \
self.create_rule_config(cursor)
result['msg'] = "Added rule to mysql_query_rules"
self.manage_config(cursor,
result['changed'])
result['rules'] = \
self.get_rule_config(cursor, new_rule_id)
else:
result['changed'] = True
result['msg'] = ("Rule would have been added to" +
" mysql_query_rules, however" +
" check_mode is enabled.")
def update_rule(self, check_mode, result, cursor):
if not check_mode:
result['changed'] = \
self.update_rule_config(cursor)
result['msg'] = "Updated rule in mysql_query_rules"
self.manage_config(cursor,
result['changed'])
result['rules'] = \
self.get_rule_config(cursor)
else:
result['changed'] = True
result['msg'] = ("Rule would have been updated in" +
" mysql_query_rules, however" +
" check_mode is enabled.")
def delete_rule(self, check_mode, result, cursor):
if not check_mode:
result['rules'] = \
self.get_rule_config(cursor)
result['changed'], result['rows_affected'] = \
self.delete_rule_config(cursor)
result['msg'] = "Deleted rule from mysql_query_rules"
self.manage_config(cursor,
result['changed'])
else:
result['changed'] = True
result['msg'] = ("Rule would have been deleted from" +
" mysql_query_rules, however" +
" check_mode is enabled.")
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
login_user=dict(default=None, type='str'),
login_password=dict(default=None, no_log=True, type='str'),
login_host=dict(default="127.0.0.1"),
login_unix_socket=dict(default=None),
login_port=dict(default=6032, type='int'),
config_file=dict(default="", type='path'),
rule_id=dict(type='int'),
active=dict(type='bool'),
username=dict(type='str'),
schemaname=dict(type='str'),
flagIN=dict(type='int'),
client_addr=dict(type='str'),
proxy_addr=dict(type='str'),
proxy_port=dict(type='int'),
digest=dict(type='str'),
match_digest=dict(type='str'),
match_pattern=dict(type='str'),
negate_match_pattern=dict(type='bool'),
flagOUT=dict(type='int'),
replace_pattern=dict(type='str'),
destination_hostgroup=dict(type='int'),
cache_ttl=dict(type='int'),
timeout=dict(type='int'),
retries=dict(type='int'),
delay=dict(type='int'),
mirror_flagOUT=dict(type='int'),
mirror_hostgroup=dict(type='int'),
error_msg=dict(type='str'),
log=dict(type='bool'),
apply=dict(type='bool'),
comment=dict(type='str'),
state=dict(default='present', choices=['present',
'absent']),
force_delete=dict(default=False, type='bool'),
save_to_disk=dict(default=True, type='bool'),
load_to_runtime=dict(default=True, type='bool')
),
supports_check_mode=True
)
perform_checks(module)
login_user = module.params["login_user"]
login_password = module.params["login_password"]
config_file = module.params["config_file"]
cursor = None
try:
cursor = mysql_connect(module,
login_user,
login_password,
config_file,
cursor_class=MySQLdb.cursors.DictCursor)
except MySQLdb.Error as e:
module.fail_json(
msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e)
)
proxysql_query_rule = ProxyQueryRule(module)
result = {}
result['state'] = proxysql_query_rule.state
if proxysql_query_rule.state == "present":
try:
if not proxysql_query_rule.check_rule_cfg_exists(cursor):
if proxysql_query_rule.config_data["rule_id"] and \
proxysql_query_rule.check_rule_pk_exists(cursor):
proxysql_query_rule.update_rule(module.check_mode,
result,
cursor)
else:
proxysql_query_rule.create_rule(module.check_mode,
result,
cursor)
else:
result['changed'] = False
result['msg'] = ("The rule already exists in" +
" mysql_query_rules and doesn't need to be" +
" updated.")
result['rules'] = \
proxysql_query_rule.get_rule_config(cursor)
except MySQLdb.Error as e:
module.fail_json(
msg="unable to modify rule.. %s" % to_native(e)
)
elif proxysql_query_rule.state == "absent":
try:
existing_rules = proxysql_query_rule.check_rule_cfg_exists(cursor)
if existing_rules > 0:
if existing_rules == 1 or \
proxysql_query_rule.force_delete:
proxysql_query_rule.delete_rule(module.check_mode,
result,
cursor)
else:
module.fail_json(
msg=("Operation would delete multiple rules" +
" use force_delete to override this")
)
else:
result['changed'] = False
result['msg'] = ("The rule is already absent from the" +
" mysql_query_rules memory configuration")
except MySQLdb.Error as e:
module.fail_json(
msg="unable to remove rule.. %s" % to_native(e)
)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
runnersaw/Discrete-Hash-2015 | hash_function.py | 1 | 10801 | # By Sawyer, Griffin, and Rahil
# Custom hashing function for Olin College Discrete Math 2015
import sys
import math
import bitstring
from bitstring import BitArray, BitStream, ConstBitStream
from Crypto.Hash import MD5
from random import randint
import numpy
import matplotlib.pyplot as plt
def padBits(input, bitSize):
"""
This function will pad our input to a multiple of the block size
it will pad
"""
# if 0 bits, just return a block with 1000000000
if len(input.bin) == 0:
command = 'bin:1=1, bin:' + str(bitSize-1) + '=' + '0'*(bitSize-1)
return bitstring.pack(command)
#if length of the input is a multiple of block size,
length = len(input.bin)
remainder = length % bitSize
remainder = bitSize - remainder
if remainder == 0:
remainder = bitSize
zeroBits = remainder - 1
zeros = '0'*zeroBits
command = 'bin:'+str(length)+'='+input.bin+', bin:1=1, bin:' + str(zeroBits) + '=' + zeros
return bitstring.pack(command)
def leftShift(x,n):
"""
This function performs logical left shift
"""
for i in range(n):
bits = x.bin
finalBit = bits[0]
leftShiftedBits = (x<<1).bin
leftShiftedBits = leftShiftedBits[:-1] + finalBit
x = BitArray('0b'+leftShiftedBits)
return x
def rightShift(x,n):
"""
This function performs logical right shift
"""
for i in range(n):
bits = x.bin
finalBit = bits[len(bits)-1]
rightShiftedBits = (x>>1).bin
rightShiftedBits = finalBit + rightShiftedBits[1:]
x = BitArray('0b'+rightShiftedBits)
return x
def add(x,y,bits):
'''
We had to write a function to add two BitArrays because the '+' operator just concats them
'''
a = (x.uint + y.uint) % (2**bits)
return BitArray("uint:"+str(bits)+"="+str(a))
def truncate(x,n):
'''
This function takes a BitArray and truncates it to n bits
'''
bits = x.bin
newBits = bits[-n:]
return BitArray('0b'+newBits)
def j(x,y,z):
"""
This function does stuff inside of our compression
it should be (x v y) XOR (~z ^ y)
"""
a = (x | y) ^ (~z & y)
return a
def k(x,y,z):
"""
This function does stuff inside our compressionFunction
it should be (~x XOR y) v (z ^ ~x)
"""
a = (~x ^ y) | (z ^ ~x)
return a
def compressionFunction(input1, input2, bitSize):
"""
This function runs our compression function. The portion of code inside the
main for loop is our round function e(), which is run on input2, which is the
current block. The round function is looped through by the number of rounds
specified in the function (64 in this case). The round function utilizes addition,
shifts, and two functions j() and k(). At the end, the output of the round
function() is XORed with input1, which is the hashed version of the previous
block. The output of the XOR operation is returned by the function.
The bitSize input is necessary to split each block into four sub-blocks of the
correct size.
"""
alpha = 'abcd'
subBitSize = bitSize / 4
rounds = 64
for x in range(rounds):
blocks = {}
newBlocks = {}
for y in range(4):
blocks[alpha[y]] = input2[y*subBitSize:y*subBitSize+subBitSize]
shiftSize = subBitSize / 2 - 1
a_j = j(blocks['a'], blocks['b'], blocks['c'])
a_k = k(blocks['a'], a_j, blocks['d'])
newBlocks['a'] = add(a_k, blocks['b'], subBitSize)
newBlocks['b'] = blocks['a']
newBlocks['c'] = leftShift(blocks['d'], shiftSize)
newBlocks['d'] = add(blocks['b'], blocks['c'], subBitSize)
for z in range(4):
input2[z*subBitSize:z*subBitSize+subBitSize] = newBlocks[alpha[z]]
output = input1 ^ input2
return output
def merkle(messageBlock, bitSize, initialValue, padFunction, compressionFunction):
"""
The merkle calls our compression function multiple times
once for each message block
"""
# pad the bits
messageBlock = padFunction(messageBlock, bitSize)
#setup
prevState = initialValue
# loop through messages
numMessages = len(messageBlock.bin)/bitSize
for i in range(numMessages):
shortMessage = messageBlock[bitSize*i:bitSize*(i+1)] # get current message
prevState = compressionFunction(prevState, shortMessage, bitSize) # call compressionFunction
return prevState
def runMerkle(hashInput):
"""
This just runs the merkle given a certain input. It uses all of the global variables
defined in main to run the merkle function
"""
return merkle(hashInput, bitSize, iv, padBits, compressionFunction)
def percentSimilar(a,b):
'''
Returns the percentage of bits that are the same in a and b
'''
if len(a) != len(b):
print("Input same size numbers")
return
count = 0
for i in range(len(a)):
if (a[i] == b[i]):
count+=1
return float(count) / len(a)
def avalanche_test_compression(iters, bitSize):
"""
This function will test whether a given compression function produces good
avalanche effect. To do this we'll change one bit at random, roughly 50%
of the output bits should flip. In order to test this, we'll generate
a bunch of random bitstrings, pick random bit to flip for each one,
run the compression function, and do this many times in a row for each
bitstring. At the end we'll monitor the average % of bits that flipped,
as well as the minimum % and maximum % flipped
Inputs: iters = number of iterations to run
bitSize = the size of the input, please make this a power of 2
"""
similarPercents = []
prevState = BitArray('0b'+make_random_bitstring(bitSize))
#short array will be the same every time
shortMessage = BitArray('0b'+make_random_bitstring(bitSize))
#however many iterations of compression we want to do
for i in range(0,iters):
#now run compression on it
new_message = compressionFunction(prevState, shortMessage, bitSize)
#check how similar they are
percentSim = percentSimilar(new_message, prevState)
#add the percent similar to our list
similarPercents.append(percentSim)
#make the prev state the new message
prevState = new_message
#print similarPercents
print "compression avalanche percent for " + str(iters) + " tests is: "+str(numpy.mean(similarPercents))
return
def avalanche_test_merkle(iters):
"""
Run avalanche testing with our full merkle function, not just compression
"""
print "running merkle avalanche test"
similarPercents = []
for i in range(0,iters-1):
first_bitstring = BitArray('0b'+make_random_bitstring(bitSize))
flipped_first = flip_random_bit(first_bitstring)
interim_percent = percentSimilar(runMerkle(first_bitstring), runMerkle(flipped_first))
similarPercents.append(interim_percent)
print "merkle avalanche overall percent similar for " + str(iters) + " tests (custom merkle) is: " + str(numpy.mean(similarPercents))
print "merkle standard deviation for avalanche values (custom merkle) is: " + str(numpy.std(similarPercents))
#make a histogram of the data
plt.hist(similarPercents)
plt.title("Histogram of custom hash avalanche values")
plt.xlabel("Percent Similar")
plt.ylabel("Frequency")
plt.show()
print "merkle avalanche testing done"
def md5_bitstring_run(input_bitstring):
md5test = MD5.new()
md5test.update(make_random_bitstring(bitSize))
md5_hex = md5test.hexdigest()
md5_bitstring = BitArray('0x'+md5_hex)
return md5_bitstring
def avalanche_test_md5(iters):
"""
run the same avalanche test, but with md4 algorithm so that we can compare
to our custom algorithm
"""
print "running md5 avalanche test"
similarPercents = []
for i in range(0,iters-1):
first_bitstring = BitArray('0b'+make_random_bitstring(bitSize))
flipped_first = flip_random_bit(first_bitstring)
interim_percent = percentSimilar(md5_bitstring_run(first_bitstring), md5_bitstring_run(flipped_first))
similarPercents.append(interim_percent)
print "merkle avalanche overall percent similar for " + str(iters) + " tests (md5) is: " + str(numpy.mean(similarPercents))
print "merkle standard deviation for avalanche values (md5) is: " + str(numpy.std(similarPercents))
#make a histogram of the data
plt.hist(similarPercents)
plt.title("Histogram of custom hash avalanche values (md5)")
plt.xlabel("Percent Similar")
plt.ylabel("Frequency")
plt.show()
print "merkle avalanche testing done"
def flip_random_bit(first_bitstring):
"""
Selects a random bit from a bitstring and flips its value
"""
bits = first_bitstring.bin
flip_bit_index = randint(0,len(bits)-1)
new_bitstring = bits[0:flip_bit_index]
if first_bitstring[flip_bit_index]==0:
new_bitstring += '1'
else:
new_bitstring += '0'
new_bitstring += bits[flip_bit_index+1:]
return BitArray('0b'+new_bitstring)
def make_random_bitstring(length):
"""
Returns a string of bits of length next
you'll need to convert to a BitArray
"""
output_bitstring = "0"
for i in range(0,length-1):
#make a randint every time
output_bitstring += `randint(0,1)`
return output_bitstring
def collisionTest(digits):
'''
This function iterates through all possible values up to the number of digits
It saves these
'''
collisionDict = {}
numCollisions = 0
for i in range(digits):
numDigits = i+1
for j in range(2**numDigits):
hashInput = BitArray('uint:'+str(numDigits)+'='+str(j))
out = runMerkle(hashInput)
bin = out.bin
if out.bin in collisionDict:
collisionDict[out.bin][0] += 1
collisionDict[out.bin][1].append(hashInput.bin)
print("COLLISION")
numCollisions += 1
for i in range(len(collisionDict[out.bin][1])):
print(collisionDict[out.bin][1][i])
else:
collisionDict[out.bin] = [1, [hashInput.bin]]
print("Number collisions: "+str(numCollisions))
if __name__=="__main__":
bitSize = 32
iv = BitArray('0x0d84fee0')
avalanche_test_compression(100, bitSize)
avalanche_test_merkle(100)
avalanche_test_md5(100)
hashInput = BitArray('0x446973637265746520697320617765736f6d6521')
| mit |
aperigault/ansible | lib/ansible/plugins/doc_fragments/iosxr.py | 23 | 2955 | # -*- coding: utf-8 -*-
# Copyright: (c) 2015, Peter Sprygada <psprygada@ansible.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
class ModuleDocFragment(object):
# Standard files documentation fragment
DOCUMENTATION = r'''
options:
provider:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using C(connection: network_cli)."
- For more information please see the L(Network Guide, ../network/getting_started/network_differences.html#multiple-communication-protocols).
- HORIZONTALLINE
- A dict object containing connection details.
type: dict
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
type: str
required: true
port:
description:
- Specifies the port to use when building the connection to the remote device.
type: int
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used instead.
type: str
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used instead.
type: str
timeout:
description:
- Specifies the timeout in seconds for communicating with the network device
for either connecting or sending commands. If the timeout is
exceeded before the operation is completed, the module will error.
type: int
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not specified
in the task, the value of environment variable C(ANSIBLE_NET_SSH_KEYFILE)
will be used instead.
type: path
requirements:
- ncclient >= 0.5.3 when using netconf
- lxml >= 4.1.1 when using netconf
notes:
- For more information on using Ansible to manage network devices see the :ref:`Ansible Network Guide <network_guide>`
- For more information on using Ansible to manage Cisco devices see the `Cisco integration page <https://www.ansible.com/integrations/networks/cisco>`_.
'''
| gpl-3.0 |
40223222/40223222 | static/Brython3.1.1-20150328-091302/Lib/xml/sax/__init__.py | 637 | 3505 | """Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
from io import BytesIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
#if "PY_SAX_PARSER" in os.environ:
# default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| gpl-3.0 |
aplanas/rally | tests/unit/plugins/openstack/scenarios/swift/test_utils.py | 6 | 8600 | # Copyright 2015: Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.plugins.openstack.scenarios.swift import utils
from tests.unit import test
SWIFT_UTILS = "rally.plugins.openstack.scenarios.swift.utils"
class SwiftScenarioTestCase(test.ScenarioTestCase):
def test__list_containers(self):
headers_dict = mock.MagicMock()
containers_list = mock.MagicMock()
self.clients("swift").get_account.return_value = (headers_dict,
containers_list)
scenario = utils.SwiftScenario(context=self.context)
self.assertEqual((headers_dict, containers_list),
scenario._list_containers(fargs="f"))
kw = {"full_listing": True, "fargs": "f"}
self.clients("swift").get_account.assert_called_once_with(**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.list_containers")
def test__create_container(self):
container_name = mock.MagicMock()
scenario = utils.SwiftScenario()
# name + public=True + kw
self.assertEqual(container_name,
scenario._create_container(container_name,
public=True, fargs="f"))
kw = {"headers": {"X-Container-Read": ".r:*,.rlistings"}, "fargs": "f"}
self.clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
# name + public=True + additional header + kw
self.clients("swift").put_container.reset_mock()
self.assertEqual(container_name,
scenario._create_container(container_name,
public=True,
headers={"X-fake-name":
"fake-value"},
fargs="f"))
kw = {"headers": {"X-Container-Read": ".r:*,.rlistings",
"X-fake-name": "fake-value"}, "fargs": "f"}
self.clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
# name + public=False + additional header + kw
self.clients("swift").put_container.reset_mock()
self.assertEqual(container_name,
scenario._create_container(container_name,
public=False,
headers={"X-fake-name":
"fake-value"},
fargs="f"))
kw = {"headers": {"X-fake-name": "fake-value"}, "fargs": "f"}
self.clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
# name + kw
self.clients("swift").put_container.reset_mock()
self.assertEqual(container_name,
scenario._create_container(container_name, fargs="f"))
kw = {"fargs": "f"}
self.clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
# kw
scenario._generate_random_name = mock.MagicMock(
return_value=container_name)
self.clients("swift").put_container.reset_mock()
self.assertEqual(container_name,
scenario._create_container(fargs="f"))
kw = {"fargs": "f"}
self.clients("swift").put_container.assert_called_once_with(
container_name,
**kw)
self.assertEqual(1, scenario._generate_random_name.call_count)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.create_container")
def test__delete_container(self):
container_name = mock.MagicMock()
scenario = utils.SwiftScenario(context=self.context)
scenario._delete_container(container_name, fargs="f")
kw = {"fargs": "f"}
self.clients("swift").delete_container.assert_called_once_with(
container_name,
**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.delete_container")
def test__list_objects(self):
container_name = mock.MagicMock()
headers_dict = mock.MagicMock()
objects_list = mock.MagicMock()
self.clients("swift").get_container.return_value = (headers_dict,
objects_list)
scenario = utils.SwiftScenario(context=self.context)
self.assertEqual((headers_dict, objects_list),
scenario._list_objects(container_name, fargs="f"))
kw = {"full_listing": True, "fargs": "f"}
self.clients("swift").get_container.assert_called_once_with(
container_name,
**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.list_objects")
def test__upload_object(self):
container_name = mock.MagicMock()
object_name = mock.MagicMock()
content = mock.MagicMock()
etag = mock.MagicMock()
self.clients("swift").put_object.return_value = etag
scenario = utils.SwiftScenario(self.context)
# container + content + name + kw
self.assertEqual((etag, object_name),
scenario._upload_object(container_name, content,
object_name=object_name,
fargs="f"))
kw = {"fargs": "f"}
self.clients("swift").put_object.assert_called_once_with(
container_name, object_name,
content, **kw)
# container + content + kw
scenario._generate_random_name = mock.MagicMock(
return_value=object_name)
self.clients("swift").put_object.reset_mock()
self.assertEqual((etag, object_name),
scenario._upload_object(container_name, content,
fargs="f"))
kw = {"fargs": "f"}
self.clients("swift").put_object.assert_called_once_with(
container_name, object_name,
content, **kw)
self.assertEqual(1, scenario._generate_random_name.call_count)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.upload_object")
def test__download_object(self):
container_name = mock.MagicMock()
object_name = mock.MagicMock()
headers_dict = mock.MagicMock()
content = mock.MagicMock()
self.clients("swift").get_object.return_value = (headers_dict, content)
scenario = utils.SwiftScenario(context=self.context)
self.assertEqual((headers_dict, content),
scenario._download_object(container_name, object_name,
fargs="f"))
kw = {"fargs": "f"}
self.clients("swift").get_object.assert_called_once_with(
container_name, object_name,
**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.download_object")
def test__delete_object(self):
container_name = mock.MagicMock()
object_name = mock.MagicMock()
scenario = utils.SwiftScenario(context=self.context)
scenario._delete_object(container_name, object_name, fargs="f")
kw = {"fargs": "f"}
self.clients("swift").delete_object.assert_called_once_with(
container_name, object_name,
**kw)
self._test_atomic_action_timer(scenario.atomic_actions(),
"swift.delete_object")
| apache-2.0 |
WaveBlocks/WaveBlocks | src/WaveBlocks/IOM_plugin_wavepacket.py | 1 | 6577 | """The WaveBlocks Project
IOM plugin providing functions for handling
homogeneous Hagedorn wavepacket data.
@author: R. Bourquin
@copyright: Copyright (C) 2010, 2011 R. Bourquin
@license: Modified BSD License
"""
import numpy as np
def add_wavepacket(self, parameters, timeslots=None, blockid=0):
r"""
Add storage for the homogeneous wavepackets.
:param parameters: An ``ParameterProvider`` instance with at least the keys ``basis_size`` and ``ncomponents``.
"""
grp_wp = self._srf[self._prefixb+str(blockid)].require_group("wavepacket")
# If we run with an adaptive basis size, then we must make the data tensor size maximal
if parameters.has_key("max_basis_size"):
bs = parameters["max_basis_size"]
else:
bs = np.max(parameters["basis_size"])
# Create the dataset with appropriate parameters
if timeslots is None:
# This case is event based storing
daset_tg = grp_wp.create_dataset("timegrid", (0,), dtype=np.integer, chunks=True, maxshape=(None,))
daset_bs = grp_wp.create_dataset("basis_size", (0, parameters["ncomponents"]), dtype=np.integer, chunks=True, maxshape=(None,parameters["ncomponents"]))
daset_pi = grp_wp.create_dataset("Pi", (0, 1, 5), dtype=np.complexfloating, chunks=True, maxshape=(None,1,5))
daset_c = grp_wp.create_dataset("coefficients", (0, parameters["ncomponents"], bs), dtype=np.complexfloating, chunks=True, maxshape=(None,parameters["ncomponents"],bs))
else:
# User specified how much space is necessary.
daset_tg = grp_wp.create_dataset("timegrid", (timeslots,), dtype=np.integer)
daset_bs = grp_wp.create_dataset("basis_size", (timeslots, parameters["ncomponents"]), dtype=np.integer)
daset_pi = grp_wp.create_dataset("Pi", (timeslots, 1, 5), dtype=np.complexfloating)
daset_c = grp_wp.create_dataset("coefficients", (timeslots, parameters["ncomponents"], bs), dtype=np.complexfloating)
# Attach pointer to data instead timegrid
# Reason is that we have have two save functions but one timegrid
#daset_bs.attrs["pointer"] = 0
daset_pi.attrs["pointer"] = 0
daset_c.attrs["pointer"] = 0
def delete_wavepacket(self, blockid=0):
r"""
Remove the stored wavepackets.
"""
try:
del self._srf[self._prefixb+str(blockid)+"/wavepacket"]
except KeyError:
pass
def has_wavepacket(self, blockid=0):
r"""
Ask if the specified data block has the desired data tensor.
"""
return "wavepacket" in self._srf[self._prefixb+str(blockid)].keys()
def save_wavepacket_parameters(self, parameters, timestep=None, blockid=0):
r"""
Save the parameters of the Hagedorn wavepacket to a file.
:param parameters: The parameters of the Hagedorn wavepacket.
"""
pathtg = "/"+self._prefixb+str(blockid)+"/wavepacket/timegrid"
pathd = "/"+self._prefixb+str(blockid)+"/wavepacket/Pi"
timeslot = self._srf[pathd].attrs["pointer"]
# Write the data
self.must_resize(pathd, timeslot)
self._srf[pathd][timeslot,0,:] = np.squeeze(np.array(parameters))
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
self._srf[pathd].attrs["pointer"] += 1
def save_wavepacket_coefficients(self, coefficients, timestep=None, blockid=0):
r"""
Save the coefficients of the Hagedorn wavepacket to a file.
:param coefficients: The coefficients of the Hagedorn wavepacket.
"""
pathtg = "/"+self._prefixb+str(blockid)+"/wavepacket/timegrid"
pathbs = "/"+self._prefixb+str(blockid)+"/wavepacket/basis_size"
pathd = "/"+self._prefixb+str(blockid)+"/wavepacket/coefficients"
timeslot = self._srf[pathd].attrs["pointer"]
# Write the data
self.must_resize(pathd, timeslot)
self.must_resize(pathbs, timeslot)
for index, item in enumerate(coefficients):
bs = item.shape[0]
self._srf[pathbs][timeslot,index] = bs
self._srf[pathd][timeslot,index,:bs] = np.squeeze(item)
# Write the timestep to which the stored values belong into the timegrid
self.must_resize(pathtg, timeslot)
self._srf[pathtg][timeslot] = timestep
# Update the pointer
#self._srf[pathbs].attrs["pointer"] += 1
self._srf[pathd].attrs["pointer"] += 1
# The basis size already gets stored when saving the coefficients!
# def save_wavepacket_basissize(self, basissize, timestep=None, blockid=0):
# r"""
# Save the basis size of the Hagedorn wavepacket to a file.
#
# :param basissize: The basis size of the Hagedorn wavepacket.
# """
# pathtg = "/"+self._prefixb+str(blockid)+"/wavepacket/timegrid"
# pathd = "/"+self._prefixb+str(blockid)+"/wavepacket/basis_size"
# timeslot = self._srf[pathd].attrs["pointer"]
#
# # Write the data
# self.must_resize(pathd, timeslot)
# self._srf[pathd][timeslot,:] = np.squeeze(np.array(basissize))
#
# # Write the timestep to which the stored values belong into the timegrid
# self.must_resize(pathtg, timeslot)
# self._srf[pathtg][timeslot] = timestep
#
# # Update the pointer
# self._srf[pathd].attrs["pointer"] += 1
def load_wavepacket_timegrid(self, blockid=0):
pathtg = "/"+self._prefixb+str(blockid)+"/wavepacket/timegrid"
return self._srf[pathtg][:]
def load_wavepacket_parameters(self, timestep=None, blockid=0):
pathtg = "/"+self._prefixb+str(blockid)+"/wavepacket/timegrid"
pathd = "/"+self._prefixb+str(blockid)+"/wavepacket/Pi"
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
params = self._srf[pathd][index,0,:]
else:
params = self._srf[pathd][...,0,:]
return params
def load_wavepacket_coefficients(self, timestep=None, blockid=0):
pathtg = "/"+self._prefixb+str(blockid)+"/wavepacket/timegrid"
pathd = "/"+self._prefixb+str(blockid)+"/wavepacket/coefficients"
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
return self._srf[pathd][index,...]
else:
return self._srf[pathd][...]
def load_wavepacket_basissize(self, timestep=None, blockid=0):
pathtg = "/"+self._prefixb+str(blockid)+"/wavepacket/timegrid"
pathd = "/"+self._prefixb+str(blockid)+"/wavepacket/basis_size"
if timestep is not None:
index = self.find_timestep_index(pathtg, timestep)
size = self._srf[pathd][index,:]
else:
size = self._srf[pathd][...,:]
return size
| bsd-3-clause |
tlatzko/spmcluster | .tox/2.7-cover/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/codingstatemachine.py | 2931 | 2318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .constants import eStart
from .compat import wrap_ord
class CodingStateMachine:
def __init__(self, sm):
self._mModel = sm
self._mCurrentBytePos = 0
self._mCurrentCharLen = 0
self.reset()
def reset(self):
self._mCurrentState = eStart
def next_state(self, c):
# for each byte we get its class
# if it is first byte, we also get byte length
# PY3K: aBuf is a byte stream, so c is an int, not a byte
byteCls = self._mModel['classTable'][wrap_ord(c)]
if self._mCurrentState == eStart:
self._mCurrentBytePos = 0
self._mCurrentCharLen = self._mModel['charLenTable'][byteCls]
# from byte's class and stateTable, we get its next state
curr_state = (self._mCurrentState * self._mModel['classFactor']
+ byteCls)
self._mCurrentState = self._mModel['stateTable'][curr_state]
self._mCurrentBytePos += 1
return self._mCurrentState
def get_current_charlen(self):
return self._mCurrentCharLen
def get_coding_state_machine(self):
return self._mModel['name']
| bsd-2-clause |
Beeblio/django | tests/many_to_one/tests.py | 34 | 21685 | from copy import deepcopy
import datetime
from django.core.exceptions import MultipleObjectsReturned, FieldError
from django.db import transaction
from django.test import TestCase
from django.utils import six
from django.utils.translation import ugettext_lazy
from .models import Article, Reporter
class ManyToOneTests(TestCase):
def setUp(self):
# Create a few Reporters.
self.r = Reporter(first_name='John', last_name='Smith', email='john@example.com')
self.r.save()
self.r2 = Reporter(first_name='Paul', last_name='Jones', email='paul@example.com')
self.r2.save()
# Create an Article.
self.a = Article(id=None, headline="This is a test",
pub_date=datetime.date(2005, 7, 27), reporter=self.r)
self.a.save()
def test_get(self):
# Article objects have access to their related Reporter objects.
r = self.a.reporter
self.assertEqual(r.id, self.r.id)
# These are strings instead of unicode strings because that's what was used in
# the creation of this reporter (and we haven't refreshed the data from the
# database, which always returns unicode strings).
self.assertEqual((r.first_name, self.r.last_name), ('John', 'Smith'))
def test_create(self):
# You can also instantiate an Article by passing the Reporter's ID
# instead of a Reporter object.
a3 = Article(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
a3.save()
self.assertEqual(a3.reporter.id, self.r.id)
# Similarly, the reporter ID can be a string.
a4 = Article(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
a4.save()
self.assertEqual(repr(a4.reporter), "<Reporter: John Smith>")
def test_add(self):
# Create an Article via the Reporter object.
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.assertEqual(repr(new_article), "<Article: John's second story>")
self.assertEqual(new_article.reporter.id, self.r.id)
# Create a new article, and add it to the article set.
new_article2 = Article(headline="Paul's story", pub_date=datetime.date(2006, 1, 17))
self.r.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# Add the same article to a different article set - check that it moves.
self.r2.article_set.add(new_article2)
self.assertEqual(new_article2.reporter.id, self.r2.id)
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Adding an object of the wrong type raises TypeError.
with transaction.atomic():
with six.assertRaisesRegex(self, TypeError,
"'Article' instance expected, got <Reporter.*"):
self.r.article_set.add(self.r2)
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_assign(self):
new_article = self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
new_article2 = self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Assign the article to the reporter directly using the descriptor.
new_article2.reporter = self.r
new_article2.save()
self.assertEqual(repr(new_article2.reporter), "<Reporter: John Smith>")
self.assertEqual(new_article2.reporter.id, self.r.id)
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), [])
# Set the article back again using set descriptor.
self.r2.article_set = [new_article, new_article2]
self.assertQuerysetEqual(self.r.article_set.all(), ["<Article: This is a test>"])
self.assertQuerysetEqual(self.r2.article_set.all(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
])
# Funny case - assignment notation can only go so far; because the
# ForeignKey cannot be null, existing members of the set must remain.
self.r.article_set = [new_article]
self.assertQuerysetEqual(self.r.article_set.all(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r2.article_set.all(), ["<Article: Paul's story>"])
# Reporter cannot be null - there should not be a clear or remove method
self.assertFalse(hasattr(self.r2.article_set, 'remove'))
self.assertFalse(hasattr(self.r2.article_set, 'clear'))
def test_selects(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
# Reporter objects have access to their related Article objects.
self.assertQuerysetEqual(self.r.article_set.all(), [
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(self.r.article_set.filter(headline__startswith='This'),
["<Article: This is a test>"])
self.assertEqual(self.r.article_set.count(), 2)
self.assertEqual(self.r2.article_set.count(), 1)
# Get articles by id
self.assertQuerysetEqual(Article.objects.filter(id__exact=self.a.id),
["<Article: This is a test>"])
self.assertQuerysetEqual(Article.objects.filter(pk=self.a.id),
["<Article: This is a test>"])
# Query on an article property
self.assertQuerysetEqual(Article.objects.filter(headline__startswith='This'),
["<Article: This is a test>"])
# The API automatically follows relationships as far as you need.
# Use double underscores to separate relationships.
# This works as many levels deep as you want. There's no limit.
# Find all Articles for any Reporter whose first name is "John".
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name__exact='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Check that implied __exact also works
self.assertQuerysetEqual(Article.objects.filter(reporter__first_name='John'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Query twice over the related field.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith'),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# The underlying query only makes one join when a related table is referenced twice.
queryset = Article.objects.filter(reporter__first_name__exact='John',
reporter__last_name__exact='Smith')
self.assertNumQueries(1, list, queryset)
self.assertEqual(queryset.query.get_compiler(queryset.db).as_sql()[0].count('INNER JOIN'), 1)
# The automatically joined table has a predictable name.
self.assertQuerysetEqual(
Article.objects.filter(reporter__first_name__exact='John').extra(
where=["many_to_one_reporter.last_name='Smith'"]),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# ... and should work fine with the unicode that comes out of forms.Form.cleaned_data
self.assertQuerysetEqual(
(Article.objects
.filter(reporter__first_name__exact='John')
.extra(where=["many_to_one_reporter.last_name='%s'" % 'Smith'])),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
# Find all Articles for a Reporter.
# Use direct ID check, pk check, and object comparison
self.assertQuerysetEqual(
Article.objects.filter(reporter__id__exact=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__pk=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r.id),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter=self.r),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r.id, self.r2.id]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(
Article.objects.filter(reporter__in=[self.r, self.r2]).distinct(),
[
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: This is a test>",
])
# You can also use a queryset instead of a literal list of instances.
# The queryset must be reduced to a list of values using values(),
# then converted into a query
self.assertQuerysetEqual(
Article.objects.filter(
reporter__in=Reporter.objects.filter(first_name='John').values('pk').query
).distinct(),
[
"<Article: John's second story>",
"<Article: This is a test>",
])
def test_reverse_selects(self):
a3 = Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# Reporters can be queried
self.assertQuerysetEqual(Reporter.objects.filter(id__exact=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(pk=self.r.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(first_name__startswith='John'),
["<Reporter: John Smith>"])
# Reporters can query in opposite direction of ForeignKey definition
self.assertQuerysetEqual(Reporter.objects.filter(article__id__exact=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article__pk=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a.id),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(Reporter.objects.filter(article=self.a),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3.id]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a.id, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__in=[self.a, a3]).distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T'),
["<Reporter: John Smith>", "<Reporter: John Smith>"],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct(),
["<Reporter: John Smith>"])
# Counting in the opposite direction works in conjunction with distinct()
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').count(), 2)
self.assertEqual(
Reporter.objects.filter(article__headline__startswith='T').distinct().count(), 1)
# Queries can go round in circles.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John'),
[
"<Reporter: John Smith>",
"<Reporter: John Smith>",
"<Reporter: John Smith>",
],
ordered=False
)
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__first_name__startswith='John').distinct(),
["<Reporter: John Smith>"])
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter__exact=self.r).distinct(),
["<Reporter: John Smith>"])
# Check that implied __exact also works.
self.assertQuerysetEqual(
Reporter.objects.filter(article__reporter=self.r).distinct(),
["<Reporter: John Smith>"])
# It's possible to use values() calls across many-to-one relations.
# (Note, too, that we clear the ordering here so as not to drag the
# 'headline' field into the columns being used to determine uniqueness)
d = {'reporter__first_name': 'John', 'reporter__last_name': 'Smith'}
self.assertEqual([d],
list(Article.objects.filter(reporter=self.r).distinct().order_by()
.values('reporter__first_name', 'reporter__last_name')))
def test_select_related(self):
# Check that Article.objects.select_related().dates() works properly when
# there are multiple Articles with the same date but different foreign-key
# objects (Reporters).
r1 = Reporter.objects.create(first_name='Mike', last_name='Royko', email='royko@suntimes.com')
r2 = Reporter.objects.create(first_name='John', last_name='Kass', email='jkass@tribune.com')
Article.objects.create(headline='First', pub_date=datetime.date(1980, 4, 23), reporter=r1)
Article.objects.create(headline='Second', pub_date=datetime.date(1980, 4, 23), reporter=r2)
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'day')),
[
datetime.date(1980, 4, 23),
datetime.date(2005, 7, 27),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'month')),
[
datetime.date(1980, 4, 1),
datetime.date(2005, 7, 1),
])
self.assertEqual(list(Article.objects.select_related().dates('pub_date', 'year')),
[
datetime.date(1980, 1, 1),
datetime.date(2005, 1, 1),
])
def test_delete(self):
self.r.article_set.create(headline="John's second story",
pub_date=datetime.date(2005, 7, 29))
self.r2.article_set.create(headline="Paul's story",
pub_date=datetime.date(2006, 1, 17))
Article.objects.create(id=None, headline="Third article",
pub_date=datetime.date(2005, 7, 27), reporter_id=self.r.id)
Article.objects.create(id=None, headline="Fourth article",
pub_date=datetime.date(2005, 7, 27), reporter_id=str(self.r.id))
# If you delete a reporter, his articles will be deleted.
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Paul's story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
[
"<Reporter: John Smith>",
"<Reporter: Paul Jones>",
])
self.r2.delete()
self.assertQuerysetEqual(Article.objects.all(),
[
"<Article: Fourth article>",
"<Article: John's second story>",
"<Article: Third article>",
"<Article: This is a test>",
])
self.assertQuerysetEqual(Reporter.objects.order_by('first_name'),
["<Reporter: John Smith>"])
# You can delete using a JOIN in the query.
Reporter.objects.filter(article__headline__startswith='This').delete()
self.assertQuerysetEqual(Reporter.objects.all(), [])
self.assertQuerysetEqual(Article.objects.all(), [])
def test_regression_12876(self):
# Regression for #12876 -- Model methods that include queries that
# recursive don't cause recursion depth problems under deepcopy.
self.r.cached_query = Article.objects.filter(reporter=self.r)
self.assertEqual(repr(deepcopy(self.r)), "<Reporter: John Smith>")
def test_explicit_fk(self):
# Create a new Article with get_or_create using an explicit value
# for a ForeignKey.
a2, created = Article.objects.get_or_create(id=None,
headline="John's second test",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r.id)
self.assertTrue(created)
self.assertEqual(a2.reporter.id, self.r.id)
# You can specify filters containing the explicit FK value.
self.assertQuerysetEqual(
Article.objects.filter(reporter_id__exact=self.r.id),
[
"<Article: John's second test>",
"<Article: This is a test>",
])
# Create an Article by Paul for the same date.
a3 = Article.objects.create(id=None, headline="Paul's commentary",
pub_date=datetime.date(2011, 5, 7),
reporter_id=self.r2.id)
self.assertEqual(a3.reporter.id, self.r2.id)
# Get should respect explicit foreign keys as well.
self.assertRaises(MultipleObjectsReturned,
Article.objects.get, reporter_id=self.r.id)
self.assertEqual(repr(a3),
repr(Article.objects.get(reporter_id=self.r2.id,
pub_date=datetime.date(2011, 5, 7))))
def test_manager_class_caching(self):
r1 = Reporter.objects.create(first_name='Mike')
r2 = Reporter.objects.create(first_name='John')
# Same twice
self.assertTrue(r1.article_set.__class__ is r1.article_set.__class__)
# Same as each other
self.assertTrue(r1.article_set.__class__ is r2.article_set.__class__)
def test_create_relation_with_ugettext_lazy(self):
reporter = Reporter.objects.create(first_name='John',
last_name='Smith',
email='john.smith@example.com')
lazy = ugettext_lazy('test')
reporter.article_set.create(headline=lazy,
pub_date=datetime.date(2011, 6, 10))
notlazy = six.text_type(lazy)
article = reporter.article_set.get()
self.assertEqual(article.headline, notlazy)
def test_values_list_exception(self):
expected_message = "Cannot resolve keyword 'notafield' into field. Choices are: %s"
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(Reporter._meta.get_all_field_names()),
Article.objects.values_list,
'reporter__notafield')
self.assertRaisesMessage(FieldError,
expected_message % ', '.join(['EXTRA'] + Article._meta.get_all_field_names()),
Article.objects.extra(select={'EXTRA': 'EXTRA_SELECT'}).values_list,
'notafield')
| bsd-3-clause |
qPCR4vir/orange3 | Orange/canvas/application/aboutdialog.py | 3 | 1613 | """
Orange canvas about dialog
"""
import sys
import pkg_resources
from PyQt4.QtGui import QDialog, QDialogButtonBox, QVBoxLayout, QLabel
from PyQt4.QtCore import Qt
from .. import config
ABOUT_TEMPLATE = """\
<center>
<h4>Orange</h4>
<p>Version: {version}</p>
<p>(git revision: {git_revision})</p>
</center>
"""
class AboutDialog(QDialog):
def __init__(self, parent=None, **kwargs):
QDialog.__init__(self, parent, **kwargs)
if sys.platform == "darwin":
self.setAttribute(Qt.WA_MacSmallSize, True)
self.__setupUi()
def __setupUi(self):
layout = QVBoxLayout()
label = QLabel(self)
pixmap, _ = config.splash_screen()
label.setPixmap(pixmap)
layout.addWidget(label, Qt.AlignCenter)
try:
from Orange.version import version
from Orange.version import git_revision
except ImportError:
dist = pkg_resources.get_distribution("Orange3")
version = dist.version
git_revision = "Unknown"
text = ABOUT_TEMPLATE.format(version=version,
git_revision=git_revision[:7])
# TODO: Also list all known add-on versions.
text_label = QLabel(text)
layout.addWidget(text_label, Qt.AlignCenter)
buttons = QDialogButtonBox(QDialogButtonBox.Close,
Qt.Horizontal,
self)
layout.addWidget(buttons)
buttons.rejected.connect(self.accept)
layout.setSizeConstraint(QVBoxLayout.SetFixedSize)
self.setLayout(layout)
| bsd-2-clause |
neighborhoodhacker/kernel-prime | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
sharady/xen-api | scripts/examples/python/fixpbds.py | 9 | 2398 | #!/usr/bin/env python
# Copyright (c) 2006-2007 XenSource, Inc.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# Update the PBDs of a particular SR.
# This is somewhat more convoluted than simple parameter changes, as PBDs
# are read-only. This is to ensure they are always consistent with the
# state of the world.
# The parameters to change are defined in the variable 'map'
import XenAPI, sys
def main(session,sr,map):
# Get all the PBDs associated with the SR
sr = session.xenapi.SR.get_by_uuid(sr)
pbds = session.xenapi.SR.get_PBDs(sr)
# Unplug them all
for pbd in pbds:
session.xenapi.PBD.unplug(pbd)
# Now delete and recreate them one by one, updating the dconf
for pbd in pbds:
rec=session.xenapi.PBD.get_record(pbd)
newdconf=rec['device_config']
newdconf.update(map)
session.xenapi.PBD.destroy(pbd)
print "host=",rec['host']," sr=",rec['SR'],"newdconf=",newdconf
pbd=session.xenapi.PBD.create({'host':rec['host'],'SR':rec['SR'],'device_config':newdconf})
session.xenapi.PBD.plug(pbd)
if __name__ == "__main__":
if len(sys.argv) < 5:
print "Usage:"
print sys.argv[0], "<url> <username> <password> <sr-uuid>"
print "Note that the device-config parameters that are updated are located in the source file."
sys.exit(1)
url = sys.argv[1]
username = sys.argv[2]
password = sys.argv[3]
sr = sys.argv[4]
# This could be parsed from the command line.
map = { "target":"127.0.0.2" }
# First acquire a valid session by logging in:
session = XenAPI.Session(url)
session.xenapi.login_with_password(username, password)
main(session,sr,map)
| lgpl-2.1 |
detiber/ansible | lib/ansible/modules/cloud/amazon/ec2_customer_gateway.py | 61 | 9038 | #!/usr/bin/python
#
# This is a free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This Ansible library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this library. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ec2_customer_gateway
short_description: Manage an AWS customer gateway
description:
- Manage an AWS customer gateway
version_added: "2.2"
author: Michael Baydoun (@MichaelBaydoun)
requirements: [ botocore, boto3 ]
notes:
- You cannot create more than one customer gateway with the same IP address. If you run an identical request more than one time, the
first request creates the customer gateway, and subsequent requests return information about the existing customer gateway. The subsequent
requests do not create new customer gateway resources.
- Return values contain customer_gateway and customer_gateways keys which are identical dicts. You should use
customer_gateway. See U(https://github.com/ansible/ansible-modules-extras/issues/2773) for details.
options:
bgp_asn:
description:
- Border Gateway Protocol (BGP) Autonomous System Number (ASN), required when state=present.
required: false
default: null
ip_address:
description:
- Internet-routable IP address for customers gateway, must be a static address.
required: true
name:
description:
- Name of the customer gateway.
required: true
state:
description:
- Create or terminate the Customer Gateway.
required: false
default: present
choices: [ 'present', 'absent' ]
extends_documentation_fragment:
- aws
- ec2
'''
EXAMPLES = '''
# Create Customer Gateway
- ec2_customer_gateway:
bgp_asn: 12345
ip_address: 1.2.3.4
name: IndianapolisOffice
region: us-east-1
register: cgw
# Delete Customer Gateway
- ec2_customer_gateway:
ip_address: 1.2.3.4
name: IndianapolisOffice
state: absent
region: us-east-1
register: cgw
'''
RETURN = '''
gateway.customer_gateways:
description: details about the gateway that was created.
returned: success
type: complex
contains:
bgp_asn:
description: The Border Gateway Autonomous System Number.
returned: when exists and gateway is available.
sample: 65123
type: string
customer_gateway_id:
description: gateway id assigned by amazon.
returned: when exists and gateway is available.
sample: cgw-cb6386a2
type: string
ip_address:
description: ip address of your gateway device.
returned: when exists and gateway is available.
sample: 1.2.3.4
type: string
state:
description: state of gateway.
returned: when gateway exists and is available.
state: available
type: string
tags:
description: any tags on the gateway.
returned: when gateway exists and is available, and when tags exist.
state: available
type: string
type:
description: encryption type.
returned: when gateway exists and is available.
sample: ipsec.1
type: string
'''
try:
from botocore.exceptions import ClientError
HAS_BOTOCORE = True
except ImportError:
HAS_BOTOCORE = False
try:
import boto3
HAS_BOTO3 = True
except ImportError:
HAS_BOTO3 = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ec2 import (boto3_conn, camel_dict_to_snake_dict,
ec2_argument_spec, get_aws_connection_info)
class Ec2CustomerGatewayManager:
def __init__(self, module):
self.module = module
try:
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
if not region:
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
self.ec2 = boto3_conn(module, conn_type='client', resource='ec2', region=region, endpoint=ec2_url, **aws_connect_kwargs)
except ClientError as e:
module.fail_json(msg=e.message)
def ensure_cgw_absent(self, gw_id):
response = self.ec2.delete_customer_gateway(
DryRun=False,
CustomerGatewayId=gw_id
)
return response
def ensure_cgw_present(self, bgp_asn, ip_address):
response = self.ec2.create_customer_gateway(
DryRun=False,
Type='ipsec.1',
PublicIp=ip_address,
BgpAsn=bgp_asn,
)
return response
def tag_cgw_name(self, gw_id, name):
response = self.ec2.create_tags(
DryRun=False,
Resources=[
gw_id,
],
Tags=[
{
'Key': 'Name',
'Value': name
},
]
)
return response
def describe_gateways(self, ip_address):
response = self.ec2.describe_customer_gateways(
DryRun=False,
Filters=[
{
'Name': 'state',
'Values': [
'available',
]
},
{
'Name': 'ip-address',
'Values': [
ip_address,
]
}
]
)
return response
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(
dict(
bgp_asn=dict(required=False, type='int'),
ip_address=dict(required=True),
name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
)
)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
('state', 'present', ['bgp_asn'])
]
)
if not HAS_BOTOCORE:
module.fail_json(msg='botocore is required.')
if not HAS_BOTO3:
module.fail_json(msg='boto3 is required.')
gw_mgr = Ec2CustomerGatewayManager(module)
name = module.params.get('name')
existing = gw_mgr.describe_gateways(module.params['ip_address'])
# describe_gateways returns a key of CustomerGateways where as create_gateway returns a
# key of CustomerGateway. For consistency, change it here
existing['CustomerGateway'] = existing['CustomerGateways']
results = dict(changed=False)
if module.params['state'] == 'present':
if existing['CustomerGateway']:
results['gateway'] = existing
if existing['CustomerGateway'][0]['Tags']:
tag_array = existing['CustomerGateway'][0]['Tags']
for key, value in enumerate(tag_array):
if value['Key'] == 'Name':
current_name = value['Value']
if current_name != name:
results['name'] = gw_mgr.tag_cgw_name(
results['gateway']['CustomerGateway'][0]['CustomerGatewayId'],
module.params['name'],
)
results['changed'] = True
else:
if not module.check_mode:
results['gateway'] = gw_mgr.ensure_cgw_present(
module.params['bgp_asn'],
module.params['ip_address'],
)
results['name'] = gw_mgr.tag_cgw_name(
results['gateway']['CustomerGateway']['CustomerGatewayId'],
module.params['name'],
)
results['changed'] = True
elif module.params['state'] == 'absent':
if existing['CustomerGateway']:
results['gateway'] = existing
if not module.check_mode:
results['gateway'] = gw_mgr.ensure_cgw_absent(
existing['CustomerGateway'][0]['CustomerGatewayId']
)
results['changed'] = True
pretty_results = camel_dict_to_snake_dict(results)
module.exit_json(**pretty_results)
if __name__ == '__main__':
main()
| gpl-3.0 |
aksaxena80/test | tensorflow/models/rnn/linear_test.py | 5 | 1207 | # pylint: disable=g-bad-import-order,unused-import
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.models.rnn import linear
class LinearTest(tf.test.TestCase):
def testLinear(self):
with self.test_session() as sess:
with tf.variable_scope("root", initializer=tf.constant_initializer(1.0)):
x = tf.zeros([1, 2])
l = linear.linear([x], 2, False)
sess.run([tf.variables.initialize_all_variables()])
res = sess.run([l], {x.name: np.array([[1., 2.]])})
self.assertAllClose(res[0], [[3.0, 3.0]])
# Checks prevent you from accidentally creating a shared function.
with self.assertRaises(ValueError) as exc:
l1 = linear.linear([x], 2, False)
self.assertEqual(exc.exception.message[:12], "Over-sharing")
# But you can create a new one in a new scope and share the variables.
with tf.variable_scope("l1") as new_scope:
l1 = linear.linear([x], 2, False)
with tf.variable_scope(new_scope, reuse=True):
linear.linear([l1], 2, False)
self.assertEqual(len(tf.trainable_variables()), 2)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
SerCeMan/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/geos/coordseq.py | 411 | 5396 | """
This module houses the GEOSCoordSeq object, which is used internally
by GEOSGeometry to house the actual coordinates of the Point,
LineString, and LinearRing geometries.
"""
from ctypes import c_double, c_uint, byref
from django.contrib.gis.geos.base import GEOSBase, numpy
from django.contrib.gis.geos.error import GEOSException, GEOSIndexError
from django.contrib.gis.geos.libgeos import CS_PTR
from django.contrib.gis.geos import prototypes as capi
class GEOSCoordSeq(GEOSBase):
"The internal representation of a list of coordinates inside a Geometry."
ptr_type = CS_PTR
#### Python 'magic' routines ####
def __init__(self, ptr, z=False):
"Initializes from a GEOS pointer."
if not isinstance(ptr, CS_PTR):
raise TypeError('Coordinate sequence should initialize with a CS_PTR.')
self._ptr = ptr
self._z = z
def __iter__(self):
"Iterates over each point in the coordinate sequence."
for i in xrange(self.size):
yield self[i]
def __len__(self):
"Returns the number of points in the coordinate sequence."
return int(self.size)
def __str__(self):
"Returns the string representation of the coordinate sequence."
return str(self.tuple)
def __getitem__(self, index):
"Returns the coordinate sequence value at the given index."
coords = [self.getX(index), self.getY(index)]
if self.dims == 3 and self._z:
coords.append(self.getZ(index))
return tuple(coords)
def __setitem__(self, index, value):
"Sets the coordinate sequence value at the given index."
# Checking the input value
if isinstance(value, (list, tuple)):
pass
elif numpy and isinstance(value, numpy.ndarray):
pass
else:
raise TypeError('Must set coordinate with a sequence (list, tuple, or numpy array).')
# Checking the dims of the input
if self.dims == 3 and self._z:
n_args = 3
set_3d = True
else:
n_args = 2
set_3d = False
if len(value) != n_args:
raise TypeError('Dimension of value does not match.')
# Setting the X, Y, Z
self.setX(index, value[0])
self.setY(index, value[1])
if set_3d: self.setZ(index, value[2])
#### Internal Routines ####
def _checkindex(self, index):
"Checks the given index."
sz = self.size
if (sz < 1) or (index < 0) or (index >= sz):
raise GEOSIndexError('invalid GEOS Geometry index: %s' % str(index))
def _checkdim(self, dim):
"Checks the given dimension."
if dim < 0 or dim > 2:
raise GEOSException('invalid ordinate dimension "%d"' % dim)
#### Ordinate getting and setting routines ####
def getOrdinate(self, dimension, index):
"Returns the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
return capi.cs_getordinate(self.ptr, index, dimension, byref(c_double()))
def setOrdinate(self, dimension, index, value):
"Sets the value for the given dimension and index."
self._checkindex(index)
self._checkdim(dimension)
capi.cs_setordinate(self.ptr, index, dimension, value)
def getX(self, index):
"Get the X value at the index."
return self.getOrdinate(0, index)
def setX(self, index, value):
"Set X with the value at the given index."
self.setOrdinate(0, index, value)
def getY(self, index):
"Get the Y value at the given index."
return self.getOrdinate(1, index)
def setY(self, index, value):
"Set Y with the value at the given index."
self.setOrdinate(1, index, value)
def getZ(self, index):
"Get Z with the value at the given index."
return self.getOrdinate(2, index)
def setZ(self, index, value):
"Set Z with the value at the given index."
self.setOrdinate(2, index, value)
### Dimensions ###
@property
def size(self):
"Returns the size of this coordinate sequence."
return capi.cs_getsize(self.ptr, byref(c_uint()))
@property
def dims(self):
"Returns the dimensions of this coordinate sequence."
return capi.cs_getdims(self.ptr, byref(c_uint()))
@property
def hasz(self):
"""
Returns whether this coordinate sequence is 3D. This property value is
inherited from the parent Geometry.
"""
return self._z
### Other Methods ###
def clone(self):
"Clones this coordinate sequence."
return GEOSCoordSeq(capi.cs_clone(self.ptr), self.hasz)
@property
def kml(self):
"Returns the KML representation for the coordinates."
# Getting the substitution string depending on whether the coordinates have
# a Z dimension.
if self.hasz: substr = '%s,%s,%s '
else: substr = '%s,%s,0 '
return '<coordinates>%s</coordinates>' % \
''.join([substr % self[i] for i in xrange(len(self))]).strip()
@property
def tuple(self):
"Returns a tuple version of this coordinate sequence."
n = self.size
if n == 1: return self[0]
else: return tuple([self[i] for i in xrange(n)])
| apache-2.0 |
How2Compute/SmartHome | cli/py3env/lib/python3.4/site-packages/wheel/test/test_wheelfile.py | 327 | 4585 | import os
import wheel.install
import wheel.archive
import hashlib
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
import codecs
import zipfile
import pytest
import shutil
import tempfile
from contextlib import contextmanager
@contextmanager
def environ(key, value):
old_value = os.environ.get(key)
try:
os.environ[key] = value
yield
finally:
if old_value is None:
del os.environ[key]
else:
os.environ[key] = old_value
@contextmanager
def temporary_directory():
# tempfile.TemporaryDirectory doesn't exist in Python 2.
tempdir = tempfile.mkdtemp()
try:
yield tempdir
finally:
shutil.rmtree(tempdir)
@contextmanager
def readable_zipfile(path):
# zipfile.ZipFile() isn't a context manager under Python 2.
zf = zipfile.ZipFile(path, 'r')
try:
yield zf
finally:
zf.close()
def test_verifying_zipfile():
if not hasattr(zipfile.ZipExtFile, '_update_crc'):
pytest.skip('No ZIP verification. Missing ZipExtFile._update_crc.')
sio = StringIO()
zf = zipfile.ZipFile(sio, 'w')
zf.writestr("one", b"first file")
zf.writestr("two", b"second file")
zf.writestr("three", b"third file")
zf.close()
# In default mode, VerifyingZipFile checks the hash of any read file
# mentioned with set_expected_hash(). Files not mentioned with
# set_expected_hash() are not checked.
vzf = wheel.install.VerifyingZipFile(sio, 'r')
vzf.set_expected_hash("one", hashlib.sha256(b"first file").digest())
vzf.set_expected_hash("three", "blurble")
vzf.open("one").read()
vzf.open("two").read()
try:
vzf.open("three").read()
except wheel.install.BadWheelFile:
pass
else:
raise Exception("expected exception 'BadWheelFile()'")
# In strict mode, VerifyingZipFile requires every read file to be
# mentioned with set_expected_hash().
vzf.strict = True
try:
vzf.open("two").read()
except wheel.install.BadWheelFile:
pass
else:
raise Exception("expected exception 'BadWheelFile()'")
vzf.set_expected_hash("two", None)
vzf.open("two").read()
def test_pop_zipfile():
sio = StringIO()
zf = wheel.install.VerifyingZipFile(sio, 'w')
zf.writestr("one", b"first file")
zf.writestr("two", b"second file")
zf.close()
try:
zf.pop()
except RuntimeError:
pass # already closed
else:
raise Exception("expected RuntimeError")
zf = wheel.install.VerifyingZipFile(sio, 'a')
zf.pop()
zf.close()
zf = wheel.install.VerifyingZipFile(sio, 'r')
assert len(zf.infolist()) == 1
def test_zipfile_timestamp():
# An environment variable can be used to influence the timestamp on
# TarInfo objects inside the zip. See issue #143. TemporaryDirectory is
# not a context manager under Python 3.
with temporary_directory() as tempdir:
for filename in ('one', 'two', 'three'):
path = os.path.join(tempdir, filename)
with codecs.open(path, 'w', encoding='utf-8') as fp:
fp.write(filename + '\n')
zip_base_name = os.path.join(tempdir, 'dummy')
# The earliest date representable in TarInfos, 1980-01-01
with environ('SOURCE_DATE_EPOCH', '315576060'):
zip_filename = wheel.archive.make_wheelfile_inner(
zip_base_name, tempdir)
with readable_zipfile(zip_filename) as zf:
for info in zf.infolist():
assert info.date_time[:3] == (1980, 1, 1)
def test_zipfile_attributes():
# With the change from ZipFile.write() to .writestr(), we need to manually
# set member attributes.
with temporary_directory() as tempdir:
files = (('foo', 0o644), ('bar', 0o755))
for filename, mode in files:
path = os.path.join(tempdir, filename)
with codecs.open(path, 'w', encoding='utf-8') as fp:
fp.write(filename + '\n')
os.chmod(path, mode)
zip_base_name = os.path.join(tempdir, 'dummy')
zip_filename = wheel.archive.make_wheelfile_inner(
zip_base_name, tempdir)
with readable_zipfile(zip_filename) as zf:
for filename, mode in files:
info = zf.getinfo(os.path.join(tempdir, filename))
assert info.external_attr == (mode | 0o100000) << 16
assert info.compress_type == zipfile.ZIP_DEFLATED
| mit |
mmnelemane/nova | nova/tests/unit/api/openstack/compute/test_floating_ip_pools.py | 35 | 3017 | # Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack.compute import floating_ip_pools \
as fipp_v21
from nova.api.openstack.compute.legacy_v2.contrib import floating_ip_pools \
as fipp_v2
from nova import context
from nova import exception
from nova import network
from nova import test
from nova.tests.unit.api.openstack import fakes
def fake_get_floating_ip_pools(self, context):
return ['nova', 'other']
class FloatingIpPoolTestV21(test.NoDBTestCase):
floating_ip_pools = fipp_v21
def setUp(self):
super(FloatingIpPoolTestV21, self).setUp()
self.stubs.Set(network.api.API, "get_floating_ip_pools",
fake_get_floating_ip_pools)
self.context = context.RequestContext('fake', 'fake')
self.controller = self.floating_ip_pools.FloatingIPPoolsController()
self.req = fakes.HTTPRequest.blank('')
def test_translate_floating_ip_pools_view(self):
pools = fake_get_floating_ip_pools(None, self.context)
view = self.floating_ip_pools._translate_floating_ip_pools_view(pools)
self.assertIn('floating_ip_pools', view)
self.assertEqual(view['floating_ip_pools'][0]['name'],
pools[0])
self.assertEqual(view['floating_ip_pools'][1]['name'],
pools[1])
def test_floating_ips_pools_list(self):
res_dict = self.controller.index(self.req)
pools = fake_get_floating_ip_pools(None, self.context)
response = {'floating_ip_pools': [{'name': name} for name in pools]}
self.assertEqual(res_dict, response)
class FloatingIpPoolTestV2(FloatingIpPoolTestV21):
floating_ip_pools = fipp_v2
class FloatingIPPoolsPolicyEnforcementV21(test.NoDBTestCase):
def setUp(self):
super(FloatingIPPoolsPolicyEnforcementV21, self).setUp()
self.controller = fipp_v21.FloatingIPPoolsController()
self.req = fakes.HTTPRequest.blank('')
def test_change_password_policy_failed(self):
rule_name = "os_compute_api:os-floating-ip-pools"
rule = {rule_name: "project:non_fake"}
self.policy.set_rules(rule)
exc = self.assertRaises(
exception.PolicyNotAuthorized, self.controller.index, self.req)
self.assertEqual(
"Policy doesn't allow %s to be performed." %
rule_name, exc.format_message())
| apache-2.0 |
PeterDing/shadowsocks | shadowsocks/crypto/util.py | 1032 | 4287 | #!/usr/bin/env python
#
# Copyright 2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import logging
def find_library_nt(name):
# modified from ctypes.util
# ctypes.util.find_library just returns first result he found
# but we want to try them all
# because on Windows, users may have both 32bit and 64bit version installed
results = []
for directory in os.environ['PATH'].split(os.pathsep):
fname = os.path.join(directory, name)
if os.path.isfile(fname):
results.append(fname)
if fname.lower().endswith(".dll"):
continue
fname = fname + ".dll"
if os.path.isfile(fname):
results.append(fname)
return results
def find_library(possible_lib_names, search_symbol, library_name):
import ctypes.util
from ctypes import CDLL
paths = []
if type(possible_lib_names) not in (list, tuple):
possible_lib_names = [possible_lib_names]
lib_names = []
for lib_name in possible_lib_names:
lib_names.append(lib_name)
lib_names.append('lib' + lib_name)
for name in lib_names:
if os.name == "nt":
paths.extend(find_library_nt(name))
else:
path = ctypes.util.find_library(name)
if path:
paths.append(path)
if not paths:
# We may get here when find_library fails because, for example,
# the user does not have sufficient privileges to access those
# tools underlying find_library on linux.
import glob
for name in lib_names:
patterns = [
'/usr/local/lib*/lib%s.*' % name,
'/usr/lib*/lib%s.*' % name,
'lib%s.*' % name,
'%s.dll' % name]
for pat in patterns:
files = glob.glob(pat)
if files:
paths.extend(files)
for path in paths:
try:
lib = CDLL(path)
if hasattr(lib, search_symbol):
logging.info('loading %s from %s', library_name, path)
return lib
else:
logging.warn('can\'t find symbol %s in %s', search_symbol,
path)
except Exception:
pass
return None
def run_cipher(cipher, decipher):
from os import urandom
import random
import time
BLOCK_SIZE = 16384
rounds = 1 * 1024
plain = urandom(BLOCK_SIZE * rounds)
results = []
pos = 0
print('test start')
start = time.time()
while pos < len(plain):
l = random.randint(100, 32768)
c = cipher.update(plain[pos:pos + l])
results.append(c)
pos += l
pos = 0
c = b''.join(results)
results = []
while pos < len(plain):
l = random.randint(100, 32768)
results.append(decipher.update(c[pos:pos + l]))
pos += l
end = time.time()
print('speed: %d bytes/s' % (BLOCK_SIZE * rounds / (end - start)))
assert b''.join(results) == plain
def test_find_library():
assert find_library('c', 'strcpy', 'libc') is not None
assert find_library(['c'], 'strcpy', 'libc') is not None
assert find_library(('c',), 'strcpy', 'libc') is not None
assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate',
'libcrypto') is not None
assert find_library('notexist', 'strcpy', 'libnotexist') is None
assert find_library('c', 'symbol_not_exist', 'c') is None
assert find_library(('notexist', 'c', 'crypto', 'eay32'),
'EVP_CipherUpdate', 'libc') is not None
if __name__ == '__main__':
test_find_library()
| apache-2.0 |
FluidReview/djangosaml2 | djangosaml2/urls.py | 4 | 1241 | # Copyright (C) 2010-2012 Yaco Sistemas (http://www.yaco.es)
# Copyright (C) 2009 Lorenzo Gil Sanchez <lorenzo.gil.sanchez@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from django.conf.urls import patterns, handler500, url
# Fallback for Django versions < 1.4
except ImportError:
from django.conf.urls.defaults import patterns, handler500, url
urlpatterns = patterns(
'djangosaml2.views',
url(r'^login/$', 'login', name='saml2_login'),
url(r'^acs/$', 'assertion_consumer_service', name='saml2_acs'),
url(r'^logout/$', 'logout', name='saml2_logout'),
url(r'^ls/$', 'logout_service', name='saml2_ls'),
url(r'^metadata/$', 'metadata', name='saml2_metadata'),
)
handler500 = handler500
| apache-2.0 |
linuxdeepin/deepin-ui | dtk/ui/listview_preview_pixbuf.py | 1 | 2844 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2011 ~ 2012 Deepin, Inc.
# 2011 ~ 2012 Wang Yong
#
# Author: Wang Yong <lazycat.manatee@gmail.com>
# Maintainer: Wang Yong <lazycat.manatee@gmail.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from utils import get_content_size
import cairo
import gtk
import pango
import sys
# Below import must at end, otherwise will got ImportError
from draw import draw_vlinear, draw_text
def render_pixbuf(widget, event, input_args):
'''
Render and save pixbuf.
@param widget: Gtk.Widget instance.
@param event: Expose event.
@param input_args: Input arguments as format: (select_num, vlinear_color, text_color, filepath).
'''
# Init.
(select_num, vlinear_color, text_color, filepath) = input_args
cr = widget.window.cairo_create()
rect = widget.allocation
num_pixbuf = gtk.gdk.Pixbuf(gtk.gdk.COLORSPACE_RGB, True, 8, rect.width, rect.height)
# Draw background.
cr.set_operator(cairo.OPERATOR_OVER)
draw_vlinear(cr, rect.x, rect.y, rect.width, rect.height, eval(vlinear_color))
# Draw text.
draw_text(cr, select_num, rect.x, rect.y, rect.width, rect.height, text_color=text_color,
alignment=pango.ALIGN_CENTER)
# Render pixbuf from drawing area.
num_pixbuf.get_from_drawable(
widget.window, widget.get_colormap(), 0, 0, 0, 0,
rect.width, rect.height).save(filepath, "png")
# Exit after generate png file.
gtk.main_quit()
if __name__ == "__main__":
# Get input arguments.
input_args = sys.argv[1::]
(select_num, vlinear_color, text_color, filepath) = input_args
# Init.
num_padding_x = 8
num_padding_y = 1
(num_width, num_height) = get_content_size(select_num)
pixbuf_width = num_width + num_padding_x * 2
pixbuf_height = num_height + num_padding_y * 2
# Create window.
window = gtk.Window(gtk.WINDOW_POPUP)
window.set_colormap(gtk.gdk.Screen().get_rgba_colormap())
window.move(-pixbuf_width, -pixbuf_height) # move out of screen
window.set_default_size(pixbuf_width, pixbuf_height)
window.connect(
"expose-event",
lambda w, e: render_pixbuf(w, e, input_args))
window.show_all()
gtk.main()
| gpl-3.0 |
ellisonbg/talk-2014-strata-sc | ipythonproject.py | 4 | 1139 | from IPython.display import HTML, display
devs = [
('Fernando Perez', 'fperez.jpg'),
('Brian Granger', 'ellisonbg.jpg'),
('Min Ragan-Kelley', 'minrk.jpg'),
('Thomas Kluyver', 'takluyver.jpg'),
('Matthias Bussonnier', 'matthias.jpg'),
('Jonathan Frederic', 'jdfreder.jpg'),
('Paul Ivanov', 'ivanov.jpg'),
('Evan Patterson', 'epatters.jpg'),
('Damian Avila', 'damianavila.jpg'),
('Brad Froehle', 'brad.jpg'),
('Zach Sailer', 'zsailer.jpg'),
('Robert Kern', 'rkern.jpg'),
('Jorgen Stenarson', 'jorgen.jpg'),
('Jonathan March', 'jdmarch.jpg'),
('Kyle Kelley', 'rgbkrk.jpg')
]
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
s = "<table>"
for row in chunks(devs, 4):
s += "<tr>"
for person in row:
s += "<td>"
s += '<img src="ipythonteam/{image}" style="height: 150px; text-align: center; margin-left: auto; margin-right: auto;"/>'.format(image=person[1])
s += '<h3 style="text-align: center;">{name}</h3>'.format(name=person[0])
s += "</td>"
s += "</tr>"
s += "</table>"
def core_devs():
display(HTML(s)) | mit |
google/vimdoc | tests/module_tests.py | 1 | 6020 | import unittest
import vimdoc
from vimdoc.block import Block
from vimdoc import error
from vimdoc import module
class TestVimModule(unittest.TestCase):
def test_section(self):
plugin = module.VimPlugin('myplugin')
main_module = module.Module('myplugin', plugin)
intro = Block(vimdoc.SECTION)
intro.Local(name='Introduction', id='intro')
main_module.Merge(intro)
main_module.Close()
self.assertEqual([intro], list(main_module.Chunks()))
def test_duplicate_section(self):
plugin = module.VimPlugin('myplugin')
main_module = module.Module('myplugin', plugin)
intro = Block(vimdoc.SECTION)
intro.Local(name='Introduction', id='intro')
main_module.Merge(intro)
intro2 = Block(vimdoc.SECTION)
intro2.Local(name='Intro', id='intro')
with self.assertRaises(error.DuplicateSection) as cm:
main_module.Merge(intro2)
self.assertEqual(('Duplicate section intro defined.',), cm.exception.args)
def test_default_section_ordering(self):
"""Sections should be ordered according to documented built-in ordering."""
plugin = module.VimPlugin('myplugin')
main_module = module.Module('myplugin', plugin)
intro = Block(vimdoc.SECTION)
intro.Local(name='Introduction', id='intro')
commands = Block(vimdoc.SECTION)
commands.Local(name='Commands', id='commands')
about = Block(vimdoc.SECTION)
about.Local(name='About', id='about')
# Merge in arbitrary order.
main_module.Merge(commands)
main_module.Merge(about)
main_module.Merge(intro)
main_module.Close()
self.assertEqual([intro, commands, about], list(main_module.Chunks()))
def test_manual_section_ordering(self):
"""Sections should be ordered according to explicitly configured order."""
plugin = module.VimPlugin('myplugin')
main_module = module.Module('myplugin', plugin)
intro = Block(vimdoc.SECTION)
intro.Local(name='Introduction', id='intro')
# Configure explicit order.
intro.Global(order=['commands', 'about', 'intro'])
commands = Block(vimdoc.SECTION)
commands.Local(name='Commands', id='commands')
about = Block(vimdoc.SECTION)
about.Local(name='About', id='about')
# Merge in arbitrary order.
main_module.Merge(commands)
main_module.Merge(about)
main_module.Merge(intro)
main_module.Close()
self.assertEqual([commands, about, intro], list(main_module.Chunks()))
def test_child_sections(self):
"""Sections should be ordered after their parents."""
plugin = module.VimPlugin('myplugin')
main_module = module.Module('myplugin', plugin)
first = Block(vimdoc.SECTION)
first.Local(name='Section 1', id='first')
# Configure explicit order.
first.Global(order=['first', 'second', 'third'])
second = Block(vimdoc.SECTION)
second.Local(name='Section 2', id='second')
third = Block(vimdoc.SECTION)
third.Local(name='Section 3', id='third')
child11 = Block(vimdoc.SECTION)
child11.Local(name='child11', id='child11', parent_id='first')
child12 = Block(vimdoc.SECTION)
child12.Local(name='child12', id='child12', parent_id='first')
child21 = Block(vimdoc.SECTION)
child21.Local(name='child21', id='child21', parent_id='second')
# Merge in arbitrary order.
for m in [second, child12, third, child11, first, child21]:
main_module.Merge(m)
main_module.Close()
self.assertEqual(
[first, child11, child12, second, child21, third],
list(main_module.Chunks()))
def test_missing_parent(self):
"""Parent sections should exist."""
plugin = module.VimPlugin('myplugin')
main_module = module.Module('myplugin', plugin)
first = Block(vimdoc.SECTION)
first.Local(name='Section 1', id='first')
second = Block(vimdoc.SECTION)
second.Local(name='Section 2', id='second', parent_id='missing')
main_module.Merge(first)
main_module.Merge(second)
with self.assertRaises(error.NoSuchParentSection) as cm:
main_module.Close()
expected = (
'Section Section 2 has non-existent parent missing. '
'Try setting the id of the parent section explicitly.')
self.assertEqual((expected,), cm.exception.args)
def test_ordered_child(self):
"""Child sections should not be included in @order."""
plugin = module.VimPlugin('myplugin')
main_module = module.Module('myplugin', plugin)
first = Block(vimdoc.SECTION)
first.Local(name='Section 1', id='first')
second = Block(vimdoc.SECTION)
second.Local(name='Section 2', id='second', parent_id='first')
first.Global(order=['first', 'second'])
main_module.Merge(first)
main_module.Merge(second)
with self.assertRaises(error.OrderedChildSections) as cm:
main_module.Close()
self.assertEqual(("Child section second included in ordering ['first', 'second'].",), cm.exception.args)
def test_partial_ordering(self):
"""Always respect explicit order and prefer built-in ordering.
Undeclared built-in sections will be inserted into explicit order according
to default built-in ordering. The about section should come after custom
sections unless explicitly ordered."""
plugin = module.VimPlugin('myplugin')
main_module = module.Module('myplugin', plugin)
intro = Block(vimdoc.SECTION)
intro.Local(name='Introduction', id='intro')
# Configure explicit order.
intro.Global(order=['custom1', 'intro', 'custom2'])
commands = Block(vimdoc.SECTION)
commands.Local(name='Commands', id='commands')
about = Block(vimdoc.SECTION)
about.Local(name='About', id='about')
custom1 = Block(vimdoc.SECTION)
custom1.Local(name='Custom1', id='custom1')
custom2 = Block(vimdoc.SECTION)
custom2.Local(name='Custom2', id='custom2')
# Merge in arbitrary order.
for section in [commands, custom2, about, intro, custom1]:
main_module.Merge(section)
main_module.Close()
self.assertEqual([custom1, intro, commands, custom2, about],
list(main_module.Chunks()))
| apache-2.0 |
zaccoz/odoo | addons/purchase_double_validation/__init__.py | 441 | 1090 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import purchase_double_validation_installer
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
MatthewWilkes/mw4068-packaging | src/melange/src/soc/modules/ghop/views/models/mentor.py | 1 | 3838 | #!/usr/bin/env python2.5
#
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GHOP specific views for Organization Mentors.
"""
__authors__ = [
'"Madhusudan.C.S" <madhusudancs@gmail.com>'
]
from soc.logic import dicts
from soc.views.helper import decorators
from soc.views.models import mentor
import soc.cache.logic
from soc.modules.ghop.logic.models import mentor as ghop_mentor_logic
from soc.modules.ghop.logic.models import organization as ghop_org_logic
from soc.modules.ghop.logic.models import org_admin as ghop_org_admin_logic
from soc.modules.ghop.logic.models import student as ghop_student_logic
from soc.modules.ghop.views.helper import access as ghop_access
from soc.modules.ghop.views.models import organization as ghop_org_view
import soc.modules.ghop.logic.models.mentor
class View(mentor.View):
"""View methods for the GHOP Mentor model.
"""
def __init__(self, params=None):
"""Defines the fields and methods required for the mentor View class
to provide the user with list, public, create, edit and delete views.
Params:
params: a dict with params for this View
"""
rights = ghop_access.GHOPChecker(params)
rights['create'] = ['checkIsDeveloper']
rights['edit'] = [('checkIsMyActiveRole', ghop_mentor_logic.logic)]
rights['delete'] = ['checkIsDeveloper']
rights['invite'] = [('checkHasRoleForScope',
ghop_org_admin_logic.logic)]
rights['accept_invite'] = [
('checkIsMyRequestWithStatus', [['group_accepted']]),
('checkIsNotStudentForProgramOfOrgInRequest',
[ghop_org_logic.logic, ghop_student_logic.logic])]
rights['request'] = [
('checkIsNotStudentForProgramOfOrg',
[ghop_org_logic.logic, ghop_student_logic.logic]),
('checkCanMakeRequestToGroup', ghop_org_logic.logic)]
rights['process_request'] = [
('checkCanProcessRequest', [[ghop_org_admin_logic.logic]])]
rights['manage'] = [
('checkIsAllowedToManageRole', [ghop_mentor_logic.logic,
ghop_org_admin_logic.logic])]
new_params = {}
new_params['logic'] = soc.modules.ghop.logic.models.mentor.logic
new_params['group_logic'] = ghop_org_logic.logic
new_params['group_view'] = ghop_org_view.view
new_params['rights'] = rights
new_params['scope_view'] = ghop_org_view
new_params['name'] = "GHOP Mentor"
new_params['module_name'] = "mentor"
new_params['sidebar_grouping'] = 'Organizations'
new_params['module_package'] = 'soc.modules.ghop.views.models'
new_params['url_name'] = 'ghop/mentor'
new_params['role'] = 'ghop/mentor'
params = dicts.merge(params, new_params, sub_merge=True)
super(View, self).__init__(params=params)
view = View()
accept_invite = decorators.view(view.acceptInvite)
admin = decorators.view(view.admin)
create = decorators.view(view.create)
delete = decorators.view(view.delete)
edit = decorators.view(view.edit)
invite = decorators.view(view.invite)
list = decorators.view(view.list)
manage = decorators.view(view.manage)
process_request = decorators.view(view.processRequest)
role_request = decorators.view(view.request)
public = decorators.view(view.public)
export = decorators.view(view.export)
pick = decorators.view(view.pick)
| apache-2.0 |
zhoupeng/spice4xen | tools/python/logging/logging-0.4.9.2/test/log_test10.py | 42 | 2847 | #!/usr/bin/env python
#
# Copyright 2001-2002 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file is part of the Python logging distribution. See
# http://www.red-dove.com/python_logging.html
#
"""Test harness for the logging module. Shows use of a user-defined Logger subclass.
Copyright (C) 2001-2002 Vinay Sajip. All Rights Reserved.
"""
import sys
import locale
locale.setlocale(locale.LC_ALL, '')
from logging import *
LOG_FORMAT = "%(asctime)s %(levelname)-5s %(message)s"
DATE_FORMAT = "%x %X"
class MyLogger(Logger):
"""
A simple example of a logger extension.
"""
def debug(self, msg, *args, **kwargs):
"""
This overridden method passes exception information for DEBUG level calls
"""
if self.manager.disable >= DEBUG:
return
if DEBUG >= self.getEffectiveLevel():
exc_info = kwargs.get("exc_info", 0)
ei = None
if exc_info:
ei = sys.exc_info()
if not ei[1]:
ei = None
self._log(DEBUG, msg, args, ei)
del ei
class NotALogger:
pass
def config():
try:
setLoggerClass(NotALogger)
except Exception, e:
sys.stderr.write("%s\n" % e)
setLoggerClass(MyLogger)
if __name__ == "__main__":
basicConfig()
if __name__ == "__main__":
getLogger("").handlers[0].setFormatter(Formatter(LOG_FORMAT, DATE_FORMAT))
def run():
getLogger("").setLevel(DEBUG)
logger = getLogger("mylogger")
logger.info("Starting...")
logger.debug("Debug message not in exception handler (no traceback)")
logger.info("About to throw exception...")
try:
print "7" + 4
except Exception, e:
logger.debug("Debug message inside exception handler (traceback)",exc_info=1)
logger.info("Done.")
def main():
config()
run()
if __name__ == "__main__":
main()
| gpl-2.0 |
stonebig/numba | numba/tests/test_analysis.py | 5 | 32162 | # Tests numba.analysis functions
import collections
import types as pytypes
import numpy as np
from numba.core.compiler import compile_isolated, run_frontend, Flags, StateDict
from numba import jit, njit
from numba.core import types, errors, ir, rewrites, ir_utils, utils, cpu
from numba.core import postproc
from numba.core.inline_closurecall import InlineClosureCallPass
from numba.tests.support import TestCase, MemoryLeakMixin, SerialMixin
from numba.core.analysis import dead_branch_prune, rewrite_semantic_constants
_GLOBAL = 123
enable_pyobj_flags = Flags()
enable_pyobj_flags.enable_pyobject = True
def compile_to_ir(func):
func_ir = run_frontend(func)
state = StateDict()
state.func_ir = func_ir
state.typemap = None
state.calltypes = None
# call this to get print etc rewrites
rewrites.rewrite_registry.apply('before-inference', state)
return func_ir
class TestBranchPruneBase(MemoryLeakMixin, TestCase):
"""
Tests branch pruning
"""
_DEBUG = False
# find *all* branches
def find_branches(self, the_ir):
branches = []
for blk in the_ir.blocks.values():
tmp = [_ for _ in blk.find_insts(cls=ir.Branch)]
branches.extend(tmp)
return branches
def assert_prune(self, func, args_tys, prune, *args, **kwargs):
# This checks that the expected pruned branches have indeed been pruned.
# func is a python function to assess
# args_tys is the numba types arguments tuple
# prune arg is a list, one entry per branch. The value in the entry is
# encoded as follows:
# True: using constant inference only, the True branch will be pruned
# False: using constant inference only, the False branch will be pruned
# None: under no circumstances should this branch be pruned
# *args: the argument instances to pass to the function to check
# execution is still valid post transform
# **kwargs:
# - flags: compiler.Flags instance to pass to `compile_isolated`,
# permits use of e.g. object mode
func_ir = compile_to_ir(func)
before = func_ir.copy()
if self._DEBUG:
print("=" * 80)
print("before inline")
func_ir.dump()
# run closure inlining to ensure that nonlocals in closures are visible
inline_pass = InlineClosureCallPass(func_ir,
cpu.ParallelOptions(False),)
inline_pass.run()
# Remove all Dels, and re-run postproc
post_proc = postproc.PostProcessor(func_ir)
post_proc.run()
rewrite_semantic_constants(func_ir, args_tys)
if self._DEBUG:
print("=" * 80)
print("before prune")
func_ir.dump()
dead_branch_prune(func_ir, args_tys)
after = func_ir
if self._DEBUG:
print("after prune")
func_ir.dump()
before_branches = self.find_branches(before)
self.assertEqual(len(before_branches), len(prune))
# what is expected to be pruned
expect_removed = []
for idx, prune in enumerate(prune):
branch = before_branches[idx]
if prune is True:
expect_removed.append(branch.truebr)
elif prune is False:
expect_removed.append(branch.falsebr)
elif prune is None:
pass # nothing should be removed!
elif prune == 'both':
expect_removed.append(branch.falsebr)
expect_removed.append(branch.truebr)
else:
assert 0, "unreachable"
# compare labels
original_labels = set([_ for _ in before.blocks.keys()])
new_labels = set([_ for _ in after.blocks.keys()])
# assert that the new labels are precisely the original less the
# expected pruned labels
try:
self.assertEqual(new_labels, original_labels - set(expect_removed))
except AssertionError as e:
print("new_labels", sorted(new_labels))
print("original_labels", sorted(original_labels))
print("expect_removed", sorted(expect_removed))
raise e
supplied_flags = kwargs.pop('flags', False)
compiler_kws = {'flags': supplied_flags} if supplied_flags else {}
cres = compile_isolated(func, args_tys, **compiler_kws)
if args is None:
res = cres.entry_point()
expected = func()
else:
res = cres.entry_point(*args)
expected = func(*args)
self.assertEqual(res, expected)
class TestBranchPrune(TestBranchPruneBase, SerialMixin):
def test_single_if(self):
def impl(x):
if 1 == 0:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
def impl(x):
if 1 == 1:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [False], None)
def impl(x):
if x is None:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [False], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)
def impl(x):
if x == 10:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)
def impl(x):
if x == 10:
z = 3.14159 # noqa: F841 # no effect
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)
# TODO: cannot handle this without const prop
# def impl(x):
# z = None
# y = z
# if x == y:
# print("x is 10")
# self.assert_prune(impl, (types.NoneType('none'),), [None], None)
# self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)
def test_single_if_else(self):
def impl(x):
if x is None:
return 3.14159
else:
return 1.61803
self.assert_prune(impl, (types.NoneType('none'),), [False], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True], 10)
def test_single_if_const_val(self):
def impl(x):
if x == 100:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)
def impl(x):
# switch the condition order
if 100 == x:
return 3.14159
self.assert_prune(impl, (types.NoneType('none'),), [True], None)
self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)
def test_single_if_else_two_const_val(self):
def impl(x, y):
if x == y:
return 3.14159
else:
return 1.61803
self.assert_prune(impl, (types.IntegerLiteral(100),) * 2, [None], 100,
100)
self.assert_prune(impl, (types.NoneType('none'),) * 2, [False], None,
None)
self.assert_prune(impl, (types.IntegerLiteral(100),
types.NoneType('none'),), [True], 100, None)
self.assert_prune(impl, (types.IntegerLiteral(100),
types.IntegerLiteral(1000)), [None], 100, 1000)
def test_single_if_else_w_following_undetermined(self):
def impl(x):
x_is_none_work = False
if x is None:
x_is_none_work = True
else:
dead = 7 # noqa: F841 # no effect
if x_is_none_work:
y = 10
else:
y = -3
return y
self.assert_prune(impl, (types.NoneType('none'),), [False, None], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)
def impl(x):
x_is_none_work = False
if x is None:
x_is_none_work = True
else:
pass # force the True branch exit to be on backbone
if x_is_none_work:
y = 10
else:
y = -3
return y
self.assert_prune(impl, (types.NoneType('none'),), [None, None], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)
def test_double_if_else_rt_const(self):
def impl(x):
one_hundred = 100
x_is_none_work = 4
if x is None:
x_is_none_work = 100
else:
dead = 7 # noqa: F841 # no effect
if x_is_none_work == one_hundred:
y = 10
else:
y = -3
return y, x_is_none_work
self.assert_prune(impl, (types.NoneType('none'),), [False, None], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, None], 10)
def test_double_if_else_non_literal_const(self):
def impl(x):
one_hundred = 100
if x == one_hundred:
y = 3.14159
else:
y = 1.61803
return y
# no prune as compilation specialization on literal value not permitted
self.assert_prune(impl, (types.IntegerLiteral(10),), [None], 10)
self.assert_prune(impl, (types.IntegerLiteral(100),), [None], 100)
def test_single_two_branches_same_cond(self):
def impl(x):
if x is None:
y = 10
else:
y = 40
if x is not None:
z = 100
else:
z = 400
return z, y
self.assert_prune(impl, (types.NoneType('none'),), [False, True], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10)
def test_cond_is_kwarg_none(self):
def impl(x=None):
if x is None:
y = 10
else:
y = 40
if x is not None:
z = 100
else:
z = 400
return z, y
self.assert_prune(impl, (types.Omitted(None),),
[False, True], None)
self.assert_prune(impl, (types.NoneType('none'),), [False, True], None)
self.assert_prune(impl, (types.IntegerLiteral(10),), [True, False], 10)
def test_cond_is_kwarg_value(self):
def impl(x=1000):
if x == 1000:
y = 10
else:
y = 40
if x != 1000:
z = 100
else:
z = 400
return z, y
self.assert_prune(impl, (types.Omitted(1000),), [None, None], 1000)
self.assert_prune(impl, (types.IntegerLiteral(1000),), [None, None],
1000)
self.assert_prune(impl, (types.IntegerLiteral(0),), [None, None], 0)
self.assert_prune(impl, (types.NoneType('none'),), [True, False], None)
def test_cond_rewrite_is_correct(self):
# this checks that when a condition is replaced, it is replace by a
# true/false bit that correctly represents the evaluated condition
def fn(x):
if x is None:
return 10
return 12
def check(func, arg_tys, bit_val):
func_ir = compile_to_ir(func)
# check there is 1 branch
before_branches = self.find_branches(func_ir)
self.assertEqual(len(before_branches), 1)
# check the condition in the branch is a binop
pred_var = before_branches[0].cond
pred_defn = ir_utils.get_definition(func_ir, pred_var)
self.assertEqual(pred_defn.op, 'call')
condition_var = pred_defn.args[0]
condition_op = ir_utils.get_definition(func_ir, condition_var)
self.assertEqual(condition_op.op, 'binop')
# do the prune, this should kill the dead branch and rewrite the
#'condition to a true/false const bit
if self._DEBUG:
print("=" * 80)
print("before prune")
func_ir.dump()
dead_branch_prune(func_ir, arg_tys)
if self._DEBUG:
print("=" * 80)
print("after prune")
func_ir.dump()
# after mutation, the condition should be a const value `bit_val`
new_condition_defn = ir_utils.get_definition(func_ir, condition_var)
self.assertTrue(isinstance(new_condition_defn, ir.Const))
self.assertEqual(new_condition_defn.value, bit_val)
check(fn, (types.NoneType('none'),), 1)
check(fn, (types.IntegerLiteral(10),), 0)
def test_obj_mode_fallback(self):
# see issue #3879, this checks that object mode fall back doesn't suffer
# from the IR mutation
@jit
def bug(a, b):
if a.ndim == 1:
if b is None:
return dict()
return 12
return []
self.assertEqual(bug(np.zeros(10), 4), 12)
self.assertEqual(bug(np.arange(10), None), dict())
self.assertEqual(bug(np.arange(10).reshape((2, 5)), 10), [])
self.assertEqual(bug(np.arange(10).reshape((2, 5)), None), [])
self.assertFalse(bug.nopython_signatures)
def test_global_bake_in(self):
def impl(x):
if _GLOBAL == 123:
return x
else:
return x + 10
self.assert_prune(impl, (types.IntegerLiteral(1),), [False], 1)
global _GLOBAL
tmp = _GLOBAL
try:
_GLOBAL = 5
def impl(x):
if _GLOBAL == 123:
return x
else:
return x + 10
self.assert_prune(impl, (types.IntegerLiteral(1),), [True], 1)
finally:
_GLOBAL = tmp
def test_freevar_bake_in(self):
_FREEVAR = 123
def impl(x):
if _FREEVAR == 123:
return x
else:
return x + 10
self.assert_prune(impl, (types.IntegerLiteral(1),), [False], 1)
_FREEVAR = 12
def impl(x):
if _FREEVAR == 123:
return x
else:
return x + 10
self.assert_prune(impl, (types.IntegerLiteral(1),), [True], 1)
def test_redefined_variables_are_not_considered_in_prune(self):
# see issue #4163, checks that if a variable that is an argument is
# redefined in the user code it is not considered const
def impl(array, a=None):
if a is None:
a = 0
if a < 0:
return 10
return 30
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.NoneType('none'),),
[None, None],
np.zeros((2, 3)), None)
def test_comparison_operators(self):
# see issue #4163, checks that a variable that is an argument and has
# value None survives TypeError from invalid comparison which should be
# dead
def impl(array, a=None):
x = 0
if a is None:
return 10 # dynamic exec would return here
# static analysis requires that this is executed with a=None,
# hence TypeError
if a < 0:
return 20
return x
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.NoneType('none'),),
[False, 'both'],
np.zeros((2, 3)), None)
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.float64,),
[None, None],
np.zeros((2, 3)), 12.)
def test_redefinition_analysis_same_block(self):
# checks that a redefinition in a block with prunable potential doesn't
# break
def impl(array, x, a=None):
b = 0
if x < 4:
b = 12
if a is None:
a = 0
else:
b = 12
if a < 0:
return 10
return 30 + b + a
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.float64, types.NoneType('none'),),
[None, None, None],
np.zeros((2, 3)), 1., None)
def test_redefinition_analysis_different_block_can_exec(self):
# checks that a redefinition in a block that may be executed prevents
# pruning
def impl(array, x, a=None):
b = 0
if x > 5:
a = 11 # a redefined, cannot tell statically if this will exec
if x < 4:
b = 12
if a is None: # cannot prune, cannot determine if re-defn occurred
b += 5
else:
b += 7
if a < 0:
return 10
return 30 + b
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.float64, types.NoneType('none'),),
[None, None, None, None],
np.zeros((2, 3)), 1., None)
def test_redefinition_analysis_different_block_cannot_exec(self):
# checks that a redefinition in a block guarded by something that
# has prune potential
def impl(array, x=None, a=None):
b = 0
if x is not None:
a = 11
if a is None:
b += 5
else:
b += 7
return 30 + b
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.NoneType('none'), types.NoneType('none')),
[True, None],
np.zeros((2, 3)), None, None)
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.NoneType('none'), types.float64),
[True, None],
np.zeros((2, 3)), None, 1.2)
self.assert_prune(impl,
(types.Array(types.float64, 2, 'C'),
types.float64, types.NoneType('none')),
[None, None],
np.zeros((2, 3)), 1.2, None)
def test_closure_and_nonlocal_can_prune(self):
# Closures must be inlined ahead of branch pruning in case nonlocal
# is used. See issue #6585.
def impl():
x = 1000
def closure():
nonlocal x
x = 0
closure()
if x == 0:
return True
else:
return False
self.assert_prune(impl, (), [False,],)
def test_closure_and_nonlocal_cannot_prune(self):
# Closures must be inlined ahead of branch pruning in case nonlocal
# is used. See issue #6585.
def impl(n):
x = 1000
def closure(t):
nonlocal x
x = t
closure(n)
if x == 0:
return True
else:
return False
self.assert_prune(impl, (types.int64,), [None,], 1)
class TestBranchPrunePredicates(TestBranchPruneBase, SerialMixin):
# Really important thing to remember... the branch on predicates end up as
# POP_JUMP_IF_<bool> and the targets are backwards compared to normal, i.e.
# the true condition is far jump and the false the near i.e. `if x` would
# end up in Numba IR as e.g. `branch x 10, 6`.
_TRUTHY = (1, "String", True, 7.4, 3j)
_FALSEY = (0, "", False, 0.0, 0j, None)
def _literal_const_sample_generator(self, pyfunc, consts):
"""
This takes a python function, pyfunc, and manipulates its co_const
__code__ member to create a new function with different co_consts as
supplied in argument consts.
consts is a dict {index: value} of co_const tuple index to constant
value used to update a pyfunc clone's co_const.
"""
pyfunc_code = pyfunc.__code__
# translate consts spec to update the constants
co_consts = {k: v for k, v in enumerate(pyfunc_code.co_consts)}
for k, v in consts.items():
co_consts[k] = v
new_consts = tuple([v for _, v in sorted(co_consts.items())])
# create new code parts
co_args = [pyfunc_code.co_argcount]
if utils.PYVERSION >= (3, 8):
co_args.append(pyfunc_code.co_posonlyargcount)
co_args.append(pyfunc_code.co_kwonlyargcount)
co_args.extend([pyfunc_code.co_nlocals,
pyfunc_code.co_stacksize,
pyfunc_code.co_flags,
pyfunc_code.co_code,
new_consts,
pyfunc_code.co_names,
pyfunc_code.co_varnames,
pyfunc_code.co_filename,
pyfunc_code.co_name,
pyfunc_code.co_firstlineno,
pyfunc_code.co_lnotab,
pyfunc_code.co_freevars,
pyfunc_code.co_cellvars
])
# create code object with mutation
new_code = pytypes.CodeType(*co_args)
# get function
return pytypes.FunctionType(new_code, globals())
def test_literal_const_code_gen(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if _CONST1:
return 3.14159
else:
_CONST2 = "PLACEHOLDER2"
return _CONST2 + 4
new = self._literal_const_sample_generator(impl, {1:0, 3:20})
iconst = impl.__code__.co_consts
nconst = new.__code__.co_consts
self.assertEqual(iconst, (None, "PLACEHOLDER1", 3.14159,
"PLACEHOLDER2", 4))
self.assertEqual(nconst, (None, 0, 3.14159, 20, 4))
self.assertEqual(impl(None), 3.14159)
self.assertEqual(new(None), 24)
def test_single_if_const(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if _CONST1:
return 3.14159
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
func = self._literal_const_sample_generator(impl, {1: const})
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_negate_const(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if not _CONST1:
return 3.14159
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
func = self._literal_const_sample_generator(impl, {1: const})
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_const(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if _CONST1:
return 3.14159
else:
return 1.61803
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
func = self._literal_const_sample_generator(impl, {1: const})
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_negate_const(self):
def impl(x):
_CONST1 = "PLACEHOLDER1"
if not _CONST1:
return 3.14159
else:
return 1.61803
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
func = self._literal_const_sample_generator(impl, {1: const})
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_freevar(self):
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
def func(x):
if const:
return 3.14159, const
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_negate_freevar(self):
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
def func(x):
if not const:
return 3.14159, const
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_freevar(self):
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
def func(x):
if const:
return 3.14159, const
else:
return 1.61803, const
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_negate_freevar(self):
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for const in c_inp:
def func(x):
if not const:
return 3.14159, const
else:
return 1.61803, const
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
# globals in this section have absurd names after their test usecase names
# so as to prevent collisions and permit tests to run in parallel
def test_single_if_global(self):
global c_test_single_if_global
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for c in c_inp:
c_test_single_if_global = c
def func(x):
if c_test_single_if_global:
return 3.14159, c_test_single_if_global
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_negate_global(self):
global c_test_single_if_negate_global
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for c in c_inp:
c_test_single_if_negate_global = c
def func(x):
if c_test_single_if_negate_global:
return 3.14159, c_test_single_if_negate_global
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_global(self):
global c_test_single_if_else_global
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for c in c_inp:
c_test_single_if_else_global = c
def func(x):
if c_test_single_if_else_global:
return 3.14159, c_test_single_if_else_global
else:
return 1.61803, c_test_single_if_else_global
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_single_if_else_negate_global(self):
global c_test_single_if_else_negate_global
for c_inp, prune in (self._TRUTHY, False), (self._FALSEY, True):
for c in c_inp:
c_test_single_if_else_negate_global = c
def func(x):
if not c_test_single_if_else_negate_global:
return 3.14159, c_test_single_if_else_negate_global
else:
return 1.61803, c_test_single_if_else_negate_global
self.assert_prune(func, (types.NoneType('none'),), [prune],
None)
def test_issue_5618(self):
@njit
def foo():
values = np.zeros(1)
tmp = 666
if tmp:
values[0] = tmp
return values
self.assertPreciseEqual(foo.py_func()[0], 666.)
self.assertPreciseEqual(foo()[0], 666.)
class TestBranchPrunePostSemanticConstRewrites(TestBranchPruneBase):
# Tests that semantic constants rewriting works by virtue of branch pruning
def test_array_ndim_attr(self):
def impl(array):
if array.ndim == 2:
if array.shape[1] == 2:
return 1
else:
return 10
self.assert_prune(impl, (types.Array(types.float64, 2, 'C'),), [False,
None],
np.zeros((2, 3)))
self.assert_prune(impl, (types.Array(types.float64, 1, 'C'),), [True,
'both'],
np.zeros((2,)))
def test_tuple_len(self):
def impl(tup):
if len(tup) == 3:
if tup[2] == 2:
return 1
else:
return 0
self.assert_prune(impl, (types.UniTuple(types.int64, 3),), [False,
None],
tuple([1, 2, 3]))
self.assert_prune(impl, (types.UniTuple(types.int64, 2),), [True,
'both'],
tuple([1, 2]))
def test_attr_not_len(self):
# The purpose of this test is to make sure that the conditions guarding
# the rewrite part do not themselves raise exceptions.
# This produces an `ir.Expr` call node for `float.as_integer_ratio`,
# which is a getattr() on `float`.
@njit
def test():
float.as_integer_ratio(1.23)
# this should raise a TypingError
with self.assertRaises(errors.TypingError) as e:
test()
self.assertIn("Unknown attribute 'as_integer_ratio'", str(e.exception))
def test_ndim_not_on_array(self):
FakeArray = collections.namedtuple('FakeArray', ['ndim'])
fa = FakeArray(ndim=2)
def impl(fa):
if fa.ndim == 2:
return fa.ndim
else:
object()
# check prune works for array ndim
self.assert_prune(impl, (types.Array(types.float64, 2, 'C'),), [False],
np.zeros((2, 3)))
# check prune fails for something with `ndim` attr that is not array
FakeArrayType = types.NamedUniTuple(types.int64, 1, FakeArray)
self.assert_prune(impl, (FakeArrayType,), [None], fa,
flags=enable_pyobj_flags)
def test_semantic_const_propagates_before_static_rewrites(self):
# see issue #5015, the ndim needs writing in as a const before
# the rewrite passes run to make e.g. getitems static where possible
@njit
def impl(a, b):
return a.shape[:b.ndim]
args = (np.zeros((5, 4, 3, 2)), np.zeros((1, 1)))
self.assertPreciseEqual(impl(*args), impl.py_func(*args))
| bsd-2-clause |
abgoyal/alcatel_ot_4020D_kernel | Documentation/target/tcm_mod_builder.py | 3119 | 42754 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: nab@kernel.org
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!(se_nacl_new))\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!(tpg)) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!(" + fabric_mod_port + ")) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd_to_pool = " + fabric_mod_name + "_release_cmd,\n"
buf += " .release_cmd_direct = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .new_cmd_failure = " + fabric_mod_name + "_new_cmd_failure,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " .pack_lun = " + fabric_mod_name + "_pack_lun,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (!(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return -ENOMEM;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!(" + fabric_mod_name + "_fabric_configfs))\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "#ifdef MODULE\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
buf += "#endif\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_transport.h>\n"
buf += "#include <target/target_core_fabric_ops.h>\n"
buf += "#include <target/target_core_fabric_lib.h>\n"
buf += "#include <target/target_core_device.h>\n"
buf += "#include <target/target_core_tpg.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!(nacl)) {\n"
buf += " printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('release_cmd_to_pool', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('new_cmd_failure\)\(', fo):
buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
if re.search('pack_lun\)\(', fo):
buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
buf += "{\n"
buf += " WARN_ON(lun >= 256);\n"
buf += " /* Caller wants this byte-swapped */\n"
buf += " return cpu_to_le64((lun & 0xff) << 8);\n"
buf += "}\n\n"
bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
will-Do/avocado-vt | virttest/funcatexit.py | 12 | 3570 | """
funcatexit.py - allow programmer to define multiple exit functions to be
executed upon normal cases termination. Can be used for the environment clean
up functions. The basic idea is like atexit from python libs.
"""
__all__ = ["register", "run_exitfuncs", "unregister"]
import traceback
from avocado.core import exceptions
def run_exitfuncs(env, test_type):
"""
Run any registered exit functions.
exithandlers is traversed in reverse order so functions are executed
last in, first out.
param env: the global objects used by tests
param test_type: test type mark for exit functions
"""
error_message = ""
if env.data.get("exithandlers__%s" % test_type):
exithandlers = env.data.get("exithandlers__%s" % test_type)
while exithandlers:
func, targs, kargs = exithandlers.pop()
try:
func(*targs, **kargs)
except Exception, details:
error_message += "Error in %s:" % func.func_name
error_message += " %s\n" % details
traceback.print_exc()
return error_message
def register(env, test_type, func, *targs, **kargs):
"""
Register a function to be executed upon case termination.
func is returned to facilitate usage as a decorator.
param env: the global objects used by tests
param test_type: test type mark for exit functions
param func: function to be called at exit
param targs: optional arguments to pass to func
param kargs: optional keyword arguments to pass to func
"""
# Check for unpickable arguments
if func.func_name not in func.func_globals:
raise exceptions.TestError("Trying to register function '%s', which is not "
"declared at module scope (not in globals). "
"Please contact the test developer to fix it."
% func)
for arg in targs:
if hasattr(arg, '__slots__') and not hasattr(arg, '__getstate__'):
raise exceptions.TestError("Trying to register exitfunction '%s' with "
"unpickable targument '%s'. Please contact "
"the test developer to fix it."
% (func, arg))
for key, arg in kargs.iteritems():
if hasattr(arg, '__slots__') and not hasattr(arg, '__getstate__'):
raise exceptions.TestError("Trying to register exitfunction '%s' with "
"unpickable kargument '%s=%s'. Please "
"contact the test developer to fix it."
% (func, key, arg))
exithandlers = "exithandlers__%s" % test_type
if not env.data.get(exithandlers):
env.data[exithandlers] = []
env.data[exithandlers].append((func, targs, kargs))
return func
def unregister(env, test_type, func, *targs, **kargs):
"""
Unregister a function to be executed upon case termination.
func is returned to facilitate usage as a decorator.
param env: the global objects used by tests
param test_type: test type mark for exit functions
param func: function to be called at exit
param targs: optional arguments to pass to func
param kargs: optional keyword arguments to pass to func
"""
exithandlers = "exithandlers__%s" % test_type
if env.data.get(exithandlers):
env.data[exithandlers].remove((func, targs, kargs))
return func
| gpl-2.0 |
joshlk/blosc_store | blosc_store/blst.py | 1 | 4455 | #!/usr/bin/env python
import os
import shutil
import pandas as pd
import bloscpack as bp
try:
import ujson as json
except:
import json
"""
For reading and writing to the blosc store blst format. Nuances that users should take note:
* Column names are always saved as strings irrespective of there original data-type (e.g. could be int)
* DataFrame index is currently not preserved
* Datatypes currently supported: strings, numerical, datetime, categorical (with string and numeric)
"""
def to_blst(df, path):
"""
Save a DataFrame using blst format
:param df: DataFrame to save
:type df: pandas.DataFrame
:param path: The path to save to the blst store to. It uses a directory but you are still recommended to use a
'.blst' extension
:type path: str
:return:
"""
# Ensure not multi-index
if isinstance(df.columns, pd.MultiIndex):
raise NotImplementedError("MultiIndex columns not supported")
# Delete directory if already exists and make folder
if os.path.isdir(path):
shutil.rmtree(path)
os.makedirs(path)
# Save column file
column_meta_file = os.path.join(path, 'columns.txt')
column_meta = df.dtypes.reset_index()
column_meta.columns = ['col', 'dtype']
column_meta.to_csv(column_meta_file, sep='\t', index=False)
# Save each column
for col in df.columns:
dtype = str(df.dtypes[col])
if dtype == 'object': # String
file = os.path.join(path, "{}.csv".format(col))
df[[col]].to_csv(file, index=False, header=False)
elif dtype == 'category':
meta_file = os.path.join(path, "{}.meta.json".format(col))
file = os.path.join(path, "{}.bp".format(col))
# Meta file which contains the categories
meta = {
'categories': df[col].cat.categories.tolist(),
'ordered': df[col].cat.ordered
}
with open(meta_file, 'w') as f:
json.dump(meta, f)
bp.pack_ndarray_file(df[col].cat.codes.values, file)
else: # Numeric and datetime dtype
file = os.path.join(path, "{}.bp".format(col))
bp.pack_ndarray_file(df[col].values, file)
def read_blst_columns(path):
"""
Read the columns and datatype of a blst store
:param path: Path to blst store
:return: DataFrame of columns and dtypes
"""
column_meta_file = os.path.join(path, 'columns.txt')
column_meta = pd.read_table(column_meta_file)
return column_meta
def read_blst(path, columns='ALL'):
"""
Read a blst data store and return a DataFrame
:param path: Path to blst data store. Give the directory location
:param columns: Which columns to read and in which order. Give 'ALL' to read all columns.
:return: Read data
:rtype: pandas.DataFrame
"""
# Check path
if not os.path.isdir(path):
raise IOError("Folder does not exist: {}".format(path))
# Read the columns
column_meta = read_blst_columns(path)
column_meta_dict = column_meta.set_index('col')['dtype'].to_dict()
# Check columns integrity
if columns != 'ALL':
for col in columns:
if col not in column_meta_dict:
raise KeyError("'{}' not a column".format(col))
else:
columns = column_meta['col']
# Read each column
for i, col in enumerate(columns):
dtype = column_meta_dict[col]
if dtype == 'object': # String
file = os.path.join(path, "{}.csv".format(col))
col_df = pd.read_csv(file, header=None, names=[col])
elif dtype == 'category':
meta_file = os.path.join(path, "{}.meta.json".format(col))
file = os.path.join(path, "{}.bp".format(col))
with open(meta_file, 'r') as f:
meta = json.load(f)
col_df = bp.unpack_ndarray_file(file)
col_df = pd.Categorical.from_codes(col_df, meta['categories'],
ordered=meta['ordered'])
col_df = pd.Series(col_df, name=col).to_frame()
else: # Numeric and datetime dtype
file = os.path.join(path, "{}.bp".format(col))
col_df = bp.unpack_ndarray_file(file)
col_df = pd.Series(col_df, name=col).to_frame()
if i == 0:
df = col_df
else:
df[col] = col_df
return df
| mit |
is06/navitia | source/jormungandr/jormungandr/interfaces/argument.py | 14 | 1914 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
from flask.ext.restful import reqparse
import six
class ArgumentDoc(reqparse.Argument):
def __init__(self, name, default=None, dest=None, required=False,
ignore=False, type=six.text_type, location=('values',),
choices=(), action='store', help=None, operators=('=',),
case_sensitive=True, description=None, hidden=False):
super(ArgumentDoc, self).__init__(name, default, dest, required,
ignore, type, location, choices,
action, help, operators,
case_sensitive)
self.description = description
self.hidden = hidden
| agpl-3.0 |
privateip/ansible | lib/ansible/modules/network/nxos/nxos_snmp_host.py | 12 | 19738 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: nxos_snmp_host
version_added: "2.2"
short_description: Manages SNMP host configuration.
description:
- Manages SNMP host configuration parameters.
extends_documentation_fragment: nxos
author:
- Jason Edelman (@jedelman8)
- Gabriele Gerbino (@GGabriele)
notes:
- C(state=absent) removes the host configuration if it is configured.
options:
snmp_host:
description:
- IP address of hostname of target host.
required: true
version:
description:
- SNMP version.
required: false
default: v2c
choices: ['v2c', 'v3']
community:
description:
- Community string or v3 username.
required: false
default: null
udp:
description:
- UDP port number (0-65535).
required: false
default: null
type:
description:
- type of message to send to host.
required: false
default: traps
choices: ['trap', 'inform']
vrf:
description:
- VRF to use to source traffic to source.
required: false
default: null
vrf_filter:
description:
- Name of VRF to filter.
required: false
default: null
src_intf:
description:
- Source interface.
required: false
default: null
state:
description:
- Manage the state of the resource.
required: true
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# ensure snmp host is configured
- nxos_snmp_host:
snmp_host: 3.3.3.3
community: TESTING
state: present
host: "{{ inventory_hostname }}"
username: "{{ un }}"
password: "{{ pwd }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"community": "TESTING", "snmp_host": "3.3.3.3",
"snmp_type": "trap", "version": "v2c", "vrf_filter": "one_more_vrf"}
existing:
description: k/v pairs of existing snmp host
type: dict
sample: {"community": "TESTING", "snmp_type": "trap",
"udp": "162", "v3": "noauth", "version": "v2c",
"vrf": "test_vrf", "vrf_filter": ["test_vrf",
"another_test_vrf"]}
end_state:
description: k/v pairs of switchport after module execution
returned: always
type: dict or null
sample: {"community": "TESTING", "snmp_type": "trap",
"udp": "162", "v3": "noauth", "version": "v2c",
"vrf": "test_vrf", "vrf_filter": ["test_vrf",
"another_test_vrf", "one_more_vrf"]}
updates:
description: commands sent to the device
returned: always
type: list
sample: ["snmp-server host 3.3.3.3 filter-vrf another_test_vrf"]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import json
# COMMON CODE FOR MIGRATION
import re
from ansible.module_utils.basic import get_exception
from ansible.module_utils.netcfg import NetworkConfig, ConfigLine
from ansible.module_utils.shell import ShellError
try:
from ansible.module_utils.nxos import get_module
except ImportError:
from ansible.module_utils.nxos import NetworkModule
def to_list(val):
if isinstance(val, (list, tuple)):
return list(val)
elif val is not None:
return [val]
else:
return list()
class CustomNetworkConfig(NetworkConfig):
def expand_section(self, configobj, S=None):
if S is None:
S = list()
S.append(configobj)
for child in configobj.children:
if child in S:
continue
self.expand_section(child, S)
return S
def get_object(self, path):
for item in self.items:
if item.text == path[-1]:
parents = [p.text for p in item.parents]
if parents == path[:-1]:
return item
def to_block(self, section):
return '\n'.join([item.raw for item in section])
def get_section(self, path):
try:
section = self.get_section_objects(path)
return self.to_block(section)
except ValueError:
return list()
def get_section_objects(self, path):
if not isinstance(path, list):
path = [path]
obj = self.get_object(path)
if not obj:
raise ValueError('path does not exist in config')
return self.expand_section(obj)
def add(self, lines, parents=None):
"""Adds one or lines of configuration
"""
ancestors = list()
offset = 0
obj = None
## global config command
if not parents:
for line in to_list(lines):
item = ConfigLine(line)
item.raw = line
if item not in self.items:
self.items.append(item)
else:
for index, p in enumerate(parents):
try:
i = index + 1
obj = self.get_section_objects(parents[:i])[0]
ancestors.append(obj)
except ValueError:
# add parent to config
offset = index * self.indent
obj = ConfigLine(p)
obj.raw = p.rjust(len(p) + offset)
if ancestors:
obj.parents = list(ancestors)
ancestors[-1].children.append(obj)
self.items.append(obj)
ancestors.append(obj)
# add child objects
for line in to_list(lines):
# check if child already exists
for child in ancestors[-1].children:
if child.text == line:
break
else:
offset = len(parents) * self.indent
item = ConfigLine(line)
item.raw = line.rjust(len(line) + offset)
item.parents = ancestors
ancestors[-1].children.append(item)
self.items.append(item)
def get_network_module(**kwargs):
try:
return get_module(**kwargs)
except NameError:
return NetworkModule(**kwargs)
def get_config(module, include_defaults=False):
config = module.params['config']
if not config:
try:
config = module.get_config()
except AttributeError:
defaults = module.params['include_defaults']
config = module.config.get_config(include_defaults=defaults)
return CustomNetworkConfig(indent=2, contents=config)
def load_config(module, candidate):
config = get_config(module)
commands = candidate.difference(config)
commands = [str(c).strip() for c in commands]
save_config = module.params['save']
result = dict(changed=False)
if commands:
if not module.check_mode:
try:
module.configure(commands)
except AttributeError:
module.config(commands)
if save_config:
try:
module.config.save_config()
except AttributeError:
module.execute(['copy running-config startup-config'])
result['changed'] = True
result['updates'] = commands
return result
# END OF COMMON CODE
def execute_config_command(commands, module):
try:
module.configure(commands)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
except AttributeError:
try:
commands.insert(0, 'configure')
module.cli.add_commands(commands, output='config')
module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending CLI commands',
error=str(clie), commands=commands)
def get_cli_body_ssh(command, response, module):
"""Get response for when transport=cli. This is kind of a hack and mainly
needed because these modules were originally written for NX-API. And
not every command supports "| json" when using cli/ssh. As such, we assume
if | json returns an XML string, it is a valid command, but that the
resource doesn't exist yet. Instead, the output will be a raw string
when issuing commands containing 'show run'.
"""
if 'xml' in response[0]:
body = []
elif 'show run' in command:
body = response
else:
try:
body = [json.loads(response[0])]
except ValueError:
module.fail_json(msg='Command does not support JSON output',
command=command)
return body
def execute_show(cmds, module, command_type=None):
command_type_map = {
'cli_show': 'json',
'cli_show_ascii': 'text'
}
try:
if command_type:
response = module.execute(cmds, command_type=command_type)
else:
response = module.execute(cmds)
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
except AttributeError:
try:
if command_type:
command_type = command_type_map.get(command_type)
module.cli.add_commands(cmds, output=command_type)
response = module.cli.run_commands()
else:
module.cli.add_commands(cmds, raw=True)
response = module.cli.run_commands()
except ShellError:
clie = get_exception()
module.fail_json(msg='Error sending {0}'.format(cmds),
error=str(clie))
return response
def execute_show_command(command, module, command_type='cli_show'):
if module.params['transport'] == 'cli':
if 'show run' not in command:
command += ' | json'
cmds = [command]
response = execute_show(cmds, module)
body = get_cli_body_ssh(command, response, module)
elif module.params['transport'] == 'nxapi':
cmds = [command]
body = execute_show(cmds, module, command_type=command_type)
return body
def apply_key_map(key_map, table):
new_dict = {}
for key, value in table.items():
new_key = key_map.get(key)
if new_key:
value = table.get(key)
if value:
new_dict[new_key] = str(value)
else:
new_dict[new_key] = value
return new_dict
def get_snmp_host(host, module):
command = 'show snmp host'
body = execute_show_command(command, module)
host_map = {
'port': 'udp',
'version': 'version',
'level': 'v3',
'type': 'snmp_type',
'secname': 'community'
}
resource = {}
if body:
try:
resource_table = body[0]['TABLE_host']['ROW_host']
if isinstance(resource_table, dict):
resource_table = [resource_table]
for each in resource_table:
key = str(each['host'])
src = each.get('src_intf', None)
host_resource = apply_key_map(host_map, each)
if src:
host_resource['src_intf'] = src.split(':')[1].strip()
vrf_filt = each.get('TABLE_vrf_filters', None)
if vrf_filt:
vrf_filter = vrf_filt['ROW_vrf_filters']['vrf_filter'].split(':')[1].split(',')
filters = [vrf.strip() for vrf in vrf_filter]
host_resource['vrf_filter'] = filters
vrf = each.get('vrf', None)
if vrf:
host_resource['vrf'] = vrf.split(':')[1].strip()
resource[key] = host_resource
except (KeyError, AttributeError, TypeError):
return resource
find = resource.get(host, None)
if find:
fix_find = {}
for (key, value) in find.items():
if isinstance(value, str):
fix_find[key] = value.strip()
else:
fix_find[key] = value
return fix_find
else:
return {}
else:
return {}
def remove_snmp_host(host, existing):
commands = []
if existing['version'] == 'v3':
existing['version'] = '3'
command = 'no snmp-server host {0} {snmp_type} version \
{version} {v3} {community}'.format(host, **existing)
elif existing['version'] == 'v2c':
existing['version'] = '2c'
command = 'no snmp-server host {0} {snmp_type} version \
{version} {community}'.format(host, **existing)
if command:
commands.append(command)
return commands
def config_snmp_host(delta, proposed, existing, module):
commands = []
command_builder = []
host = proposed['snmp_host']
cmd = 'snmp-server host {0}'.format(proposed['snmp_host'])
snmp_type = delta.get('snmp_type', None)
version = delta.get('version', None)
ver = delta.get('v3', None)
community = delta.get('community', None)
command_builder.append(cmd)
if any([snmp_type, version, ver, community]):
type_string = snmp_type or existing.get('type')
if type_string:
command_builder.append(type_string)
version = version or existing.get('version')
if version:
if version == 'v2c':
vn = '2c'
elif version == 'v3':
vn = '3'
version_string = 'version {0}'.format(vn)
command_builder.append(version_string)
if ver:
ver_string = ver or existing.get('v3')
command_builder.append(ver_string)
if community:
community_string = community or existing.get('community')
command_builder.append(community_string)
cmd = ' '.join(command_builder)
commands.append(cmd)
CMDS = {
'vrf_filter': 'snmp-server host {0} filter-vrf {vrf_filter}',
'vrf': 'snmp-server host {0} use-vrf {vrf}',
'udp': 'snmp-server host {0} udp-port {udp}',
'src_intf': 'snmp-server host {0} source-interface {src_intf}'
}
for key, value in delta.items():
if key in ['vrf_filter', 'vrf', 'udp', 'src_intf']:
command = CMDS.get(key, None)
if command:
cmd = command.format(host, **delta)
commands.append(cmd)
cmd = None
return commands
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def main():
argument_spec = dict(
snmp_host=dict(required=True, type='str'),
community=dict(type='str'),
udp=dict(type='str'),
version=dict(choices=['v2c', 'v3'], default='v2c'),
src_intf=dict(type='str'),
v3=dict(choices=['noauth', 'auth', 'priv']),
vrf_filter=dict(type='str'),
vrf=dict(type='str'),
snmp_type=dict(choices=['trap', 'inform'], default='trap'),
state=dict(choices=['absent', 'present'], default='present'),
)
module = get_network_module(argument_spec=argument_spec,
supports_check_mode=True)
snmp_host = module.params['snmp_host']
community = module.params['community']
udp = module.params['udp']
version = module.params['version']
src_intf = module.params['src_intf']
v3 = module.params['v3']
vrf_filter = module.params['vrf_filter']
vrf = module.params['vrf']
snmp_type = module.params['snmp_type']
state = module.params['state']
if snmp_type == 'inform' and version != 'v3':
module.fail_json(msg='inform requires snmp v3')
if version == 'v2c' and v3:
module.fail_json(msg='param: "v3" should not be used when '
'using version v2c')
if not any([vrf_filter, vrf, udp, src_intf]):
if not all([snmp_type, version, community]):
module.fail_json(msg='when not configuring options like '
'vrf_filter, vrf, udp, and src_intf,'
'the following params are required: '
'type, version, community')
if version == 'v3' and v3 is None:
module.fail_json(msg='when using version=v3, the param v3 '
'(options: auth, noauth, priv) is also required')
existing = get_snmp_host(snmp_host, module)
# existing returns the list of vrfs configured for a given host
# checking to see if the proposed is in the list
store = existing.get('vrf_filter', None)
if existing and store:
if vrf_filter not in existing['vrf_filter']:
existing['vrf_filter'] = None
else:
existing['vrf_filter'] = vrf_filter
args = dict(
community=community,
snmp_host=snmp_host,
udp=udp,
version=version,
src_intf=src_intf,
vrf_filter=vrf_filter,
v3=v3,
vrf=vrf,
snmp_type=snmp_type
)
proposed = dict((k, v) for k, v in args.items() if v is not None)
delta = dict(set(proposed.items()).difference(existing.items()))
changed = False
commands = []
end_state = existing
if state == 'absent':
if existing:
command = remove_snmp_host(snmp_host, existing)
commands.append(command)
elif state == 'present':
if delta:
command = config_snmp_host(delta, proposed, existing, module)
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
changed = True
execute_config_command(cmds, module)
end_state = get_snmp_host(snmp_host, module)
if 'configure' in cmds:
cmds.pop(0)
if store:
existing['vrf_filter'] = store
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['end_state'] = end_state
results['updates'] = cmds
results['changed'] = changed
module.exit_json(**results)
if __name__ == "__main__":
main()
| gpl-3.0 |
bigmonachus/Delaunay | site_scons/site_tools/scons_qt4/test/qrc/manual/sconstest-manual.py | 6 | 1557 | #!/usr/bin/env python
#
# Copyright (c) 2001-2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
"""
Basic test for the Qrc() builder, called explicitly.
"""
import TestSCons
test = TestSCons.TestSCons()
test.dir_fixture("image")
test.file_fixture('SConscript')
test.file_fixture('../../qtenv.py')
test.file_fixture('../../../__init__.py','site_scons/site_tools/qt4/__init__.py')
test.run()
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| gpl-3.0 |
MER-GROUP/intellij-community | python/helpers/pydev/pydevd_attach_to_process/winappdbg/win32/context_amd64.py | 102 | 25137 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2014, Mario Vilas
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice,this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
CONTEXT structure for amd64.
"""
__revision__ = "$Id$"
from winappdbg.win32.defines import *
from winappdbg.win32.version import ARCH_AMD64
from winappdbg.win32 import context_i386
#==============================================================================
# This is used later on to calculate the list of exported symbols.
_all = None
_all = set(vars().keys())
#==============================================================================
#--- CONTEXT structures and constants -----------------------------------------
# The following values specify the type of access in the first parameter
# of the exception record when the exception code specifies an access
# violation.
EXCEPTION_READ_FAULT = 0 # exception caused by a read
EXCEPTION_WRITE_FAULT = 1 # exception caused by a write
EXCEPTION_EXECUTE_FAULT = 8 # exception caused by an instruction fetch
CONTEXT_AMD64 = 0x00100000
CONTEXT_CONTROL = (CONTEXT_AMD64 | long(0x1))
CONTEXT_INTEGER = (CONTEXT_AMD64 | long(0x2))
CONTEXT_SEGMENTS = (CONTEXT_AMD64 | long(0x4))
CONTEXT_FLOATING_POINT = (CONTEXT_AMD64 | long(0x8))
CONTEXT_DEBUG_REGISTERS = (CONTEXT_AMD64 | long(0x10))
CONTEXT_MMX_REGISTERS = CONTEXT_FLOATING_POINT
CONTEXT_FULL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_FLOATING_POINT)
CONTEXT_ALL = (CONTEXT_CONTROL | CONTEXT_INTEGER | CONTEXT_SEGMENTS | \
CONTEXT_FLOATING_POINT | CONTEXT_DEBUG_REGISTERS)
CONTEXT_EXCEPTION_ACTIVE = 0x8000000
CONTEXT_SERVICE_ACTIVE = 0x10000000
CONTEXT_EXCEPTION_REQUEST = 0x40000000
CONTEXT_EXCEPTION_REPORTING = 0x80000000
INITIAL_MXCSR = 0x1f80 # initial MXCSR value
INITIAL_FPCSR = 0x027f # initial FPCSR value
# typedef struct _XMM_SAVE_AREA32 {
# WORD ControlWord;
# WORD StatusWord;
# BYTE TagWord;
# BYTE Reserved1;
# WORD ErrorOpcode;
# DWORD ErrorOffset;
# WORD ErrorSelector;
# WORD Reserved2;
# DWORD DataOffset;
# WORD DataSelector;
# WORD Reserved3;
# DWORD MxCsr;
# DWORD MxCsr_Mask;
# M128A FloatRegisters[8];
# M128A XmmRegisters[16];
# BYTE Reserved4[96];
# } XMM_SAVE_AREA32, *PXMM_SAVE_AREA32;
class XMM_SAVE_AREA32(Structure):
_pack_ = 1
_fields_ = [
('ControlWord', WORD),
('StatusWord', WORD),
('TagWord', BYTE),
('Reserved1', BYTE),
('ErrorOpcode', WORD),
('ErrorOffset', DWORD),
('ErrorSelector', WORD),
('Reserved2', WORD),
('DataOffset', DWORD),
('DataSelector', WORD),
('Reserved3', WORD),
('MxCsr', DWORD),
('MxCsr_Mask', DWORD),
('FloatRegisters', M128A * 8),
('XmmRegisters', M128A * 16),
('Reserved4', BYTE * 96),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
for name, type in self._fields_:
if name in ('FloatRegisters', 'XmmRegisters'):
d[name] = tuple([ (x.LowPart + (x.HighPart << 64)) for x in getattr(self, name) ])
elif name == 'Reserved4':
d[name] = tuple([ chr(x) for x in getattr(self, name) ])
else:
d[name] = getattr(self, name)
return d
LEGACY_SAVE_AREA_LENGTH = sizeof(XMM_SAVE_AREA32)
PXMM_SAVE_AREA32 = ctypes.POINTER(XMM_SAVE_AREA32)
LPXMM_SAVE_AREA32 = PXMM_SAVE_AREA32
# //
# // Context Frame
# //
# // This frame has a several purposes: 1) it is used as an argument to
# // NtContinue, 2) is is used to constuct a call frame for APC delivery,
# // and 3) it is used in the user level thread creation routines.
# //
# //
# // The flags field within this record controls the contents of a CONTEXT
# // record.
# //
# // If the context record is used as an input parameter, then for each
# // portion of the context record controlled by a flag whose value is
# // set, it is assumed that that portion of the context record contains
# // valid context. If the context record is being used to modify a threads
# // context, then only that portion of the threads context is modified.
# //
# // If the context record is used as an output parameter to capture the
# // context of a thread, then only those portions of the thread's context
# // corresponding to set flags will be returned.
# //
# // CONTEXT_CONTROL specifies SegSs, Rsp, SegCs, Rip, and EFlags.
# //
# // CONTEXT_INTEGER specifies Rax, Rcx, Rdx, Rbx, Rbp, Rsi, Rdi, and R8-R15.
# //
# // CONTEXT_SEGMENTS specifies SegDs, SegEs, SegFs, and SegGs.
# //
# // CONTEXT_DEBUG_REGISTERS specifies Dr0-Dr3 and Dr6-Dr7.
# //
# // CONTEXT_MMX_REGISTERS specifies the floating point and extended registers
# // Mm0/St0-Mm7/St7 and Xmm0-Xmm15).
# //
#
# typedef struct DECLSPEC_ALIGN(16) _CONTEXT {
#
# //
# // Register parameter home addresses.
# //
# // N.B. These fields are for convience - they could be used to extend the
# // context record in the future.
# //
#
# DWORD64 P1Home;
# DWORD64 P2Home;
# DWORD64 P3Home;
# DWORD64 P4Home;
# DWORD64 P5Home;
# DWORD64 P6Home;
#
# //
# // Control flags.
# //
#
# DWORD ContextFlags;
# DWORD MxCsr;
#
# //
# // Segment Registers and processor flags.
# //
#
# WORD SegCs;
# WORD SegDs;
# WORD SegEs;
# WORD SegFs;
# WORD SegGs;
# WORD SegSs;
# DWORD EFlags;
#
# //
# // Debug registers
# //
#
# DWORD64 Dr0;
# DWORD64 Dr1;
# DWORD64 Dr2;
# DWORD64 Dr3;
# DWORD64 Dr6;
# DWORD64 Dr7;
#
# //
# // Integer registers.
# //
#
# DWORD64 Rax;
# DWORD64 Rcx;
# DWORD64 Rdx;
# DWORD64 Rbx;
# DWORD64 Rsp;
# DWORD64 Rbp;
# DWORD64 Rsi;
# DWORD64 Rdi;
# DWORD64 R8;
# DWORD64 R9;
# DWORD64 R10;
# DWORD64 R11;
# DWORD64 R12;
# DWORD64 R13;
# DWORD64 R14;
# DWORD64 R15;
#
# //
# // Program counter.
# //
#
# DWORD64 Rip;
#
# //
# // Floating point state.
# //
#
# union {
# XMM_SAVE_AREA32 FltSave;
# struct {
# M128A Header[2];
# M128A Legacy[8];
# M128A Xmm0;
# M128A Xmm1;
# M128A Xmm2;
# M128A Xmm3;
# M128A Xmm4;
# M128A Xmm5;
# M128A Xmm6;
# M128A Xmm7;
# M128A Xmm8;
# M128A Xmm9;
# M128A Xmm10;
# M128A Xmm11;
# M128A Xmm12;
# M128A Xmm13;
# M128A Xmm14;
# M128A Xmm15;
# };
# };
#
# //
# // Vector registers.
# //
#
# M128A VectorRegister[26];
# DWORD64 VectorControl;
#
# //
# // Special debug control registers.
# //
#
# DWORD64 DebugControl;
# DWORD64 LastBranchToRip;
# DWORD64 LastBranchFromRip;
# DWORD64 LastExceptionToRip;
# DWORD64 LastExceptionFromRip;
# } CONTEXT, *PCONTEXT;
class _CONTEXT_FLTSAVE_STRUCT(Structure):
_fields_ = [
('Header', M128A * 2),
('Legacy', M128A * 8),
('Xmm0', M128A),
('Xmm1', M128A),
('Xmm2', M128A),
('Xmm3', M128A),
('Xmm4', M128A),
('Xmm5', M128A),
('Xmm6', M128A),
('Xmm7', M128A),
('Xmm8', M128A),
('Xmm9', M128A),
('Xmm10', M128A),
('Xmm11', M128A),
('Xmm12', M128A),
('Xmm13', M128A),
('Xmm14', M128A),
('Xmm15', M128A),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
for name, type in self._fields_:
if name in ('Header', 'Legacy'):
d[name] = tuple([ (x.Low + (x.High << 64)) for x in getattr(self, name) ])
else:
x = getattr(self, name)
d[name] = x.Low + (x.High << 64)
return d
class _CONTEXT_FLTSAVE_UNION(Union):
_fields_ = [
('flt', XMM_SAVE_AREA32),
('xmm', _CONTEXT_FLTSAVE_STRUCT),
]
def from_dict(self):
raise NotImplementedError()
def to_dict(self):
d = dict()
d['flt'] = self.flt.to_dict()
d['xmm'] = self.xmm.to_dict()
return d
class CONTEXT(Structure):
arch = ARCH_AMD64
_pack_ = 16
_fields_ = [
# Register parameter home addresses.
('P1Home', DWORD64),
('P2Home', DWORD64),
('P3Home', DWORD64),
('P4Home', DWORD64),
('P5Home', DWORD64),
('P6Home', DWORD64),
# Control flags.
('ContextFlags', DWORD),
('MxCsr', DWORD),
# Segment Registers and processor flags.
('SegCs', WORD),
('SegDs', WORD),
('SegEs', WORD),
('SegFs', WORD),
('SegGs', WORD),
('SegSs', WORD),
('EFlags', DWORD),
# Debug registers.
('Dr0', DWORD64),
('Dr1', DWORD64),
('Dr2', DWORD64),
('Dr3', DWORD64),
('Dr6', DWORD64),
('Dr7', DWORD64),
# Integer registers.
('Rax', DWORD64),
('Rcx', DWORD64),
('Rdx', DWORD64),
('Rbx', DWORD64),
('Rsp', DWORD64),
('Rbp', DWORD64),
('Rsi', DWORD64),
('Rdi', DWORD64),
('R8', DWORD64),
('R9', DWORD64),
('R10', DWORD64),
('R11', DWORD64),
('R12', DWORD64),
('R13', DWORD64),
('R14', DWORD64),
('R15', DWORD64),
# Program counter.
('Rip', DWORD64),
# Floating point state.
('FltSave', _CONTEXT_FLTSAVE_UNION),
# Vector registers.
('VectorRegister', M128A * 26),
('VectorControl', DWORD64),
# Special debug control registers.
('DebugControl', DWORD64),
('LastBranchToRip', DWORD64),
('LastBranchFromRip', DWORD64),
('LastExceptionToRip', DWORD64),
('LastExceptionFromRip', DWORD64),
]
_others = ('P1Home', 'P2Home', 'P3Home', 'P4Home', 'P5Home', 'P6Home', \
'MxCsr', 'VectorRegister', 'VectorControl')
_control = ('SegSs', 'Rsp', 'SegCs', 'Rip', 'EFlags')
_integer = ('Rax', 'Rcx', 'Rdx', 'Rbx', 'Rsp', 'Rbp', 'Rsi', 'Rdi', \
'R8', 'R9', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15')
_segments = ('SegDs', 'SegEs', 'SegFs', 'SegGs')
_debug = ('Dr0', 'Dr1', 'Dr2', 'Dr3', 'Dr6', 'Dr7', \
'DebugControl', 'LastBranchToRip', 'LastBranchFromRip', \
'LastExceptionToRip', 'LastExceptionFromRip')
_mmx = ('Xmm0', 'Xmm1', 'Xmm2', 'Xmm3', 'Xmm4', 'Xmm5', 'Xmm6', 'Xmm7', \
'Xmm8', 'Xmm9', 'Xmm10', 'Xmm11', 'Xmm12', 'Xmm13', 'Xmm14', 'Xmm15')
# XXX TODO
# Convert VectorRegister and Xmm0-Xmm15 to pure Python types!
@classmethod
def from_dict(cls, ctx):
'Instance a new structure from a Python native type.'
ctx = Context(ctx)
s = cls()
ContextFlags = ctx['ContextFlags']
s.ContextFlags = ContextFlags
for key in cls._others:
if key != 'VectorRegister':
setattr(s, key, ctx[key])
else:
w = ctx[key]
v = (M128A * len(w))()
i = 0
for x in w:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
v[i] = y
i += 1
setattr(s, key, v)
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in cls._control:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in cls._integer:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in cls._segments:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in cls._debug:
setattr(s, key, ctx[key])
if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:
xmm = s.FltSave.xmm
for key in cls._mmx:
y = M128A()
y.High = x >> 64
y.Low = x - (x >> 64)
setattr(xmm, key, y)
return s
def to_dict(self):
'Convert a structure into a Python dictionary.'
ctx = Context()
ContextFlags = self.ContextFlags
ctx['ContextFlags'] = ContextFlags
for key in self._others:
if key != 'VectorRegister':
ctx[key] = getattr(self, key)
else:
ctx[key] = tuple([ (x.Low + (x.High << 64)) for x in getattr(self, key) ])
if (ContextFlags & CONTEXT_CONTROL) == CONTEXT_CONTROL:
for key in self._control:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_INTEGER) == CONTEXT_INTEGER:
for key in self._integer:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_SEGMENTS) == CONTEXT_SEGMENTS:
for key in self._segments:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_DEBUG_REGISTERS) == CONTEXT_DEBUG_REGISTERS:
for key in self._debug:
ctx[key] = getattr(self, key)
if (ContextFlags & CONTEXT_MMX_REGISTERS) == CONTEXT_MMX_REGISTERS:
xmm = self.FltSave.xmm.to_dict()
for key in self._mmx:
ctx[key] = xmm.get(key)
return ctx
PCONTEXT = ctypes.POINTER(CONTEXT)
LPCONTEXT = PCONTEXT
class Context(dict):
"""
Register context dictionary for the amd64 architecture.
"""
arch = CONTEXT.arch
def __get_pc(self):
return self['Rip']
def __set_pc(self, value):
self['Rip'] = value
pc = property(__get_pc, __set_pc)
def __get_sp(self):
return self['Rsp']
def __set_sp(self, value):
self['Rsp'] = value
sp = property(__get_sp, __set_sp)
def __get_fp(self):
return self['Rbp']
def __set_fp(self, value):
self['Rbp'] = value
fp = property(__get_fp, __set_fp)
#--- LDT_ENTRY structure ------------------------------------------------------
# typedef struct _LDT_ENTRY {
# WORD LimitLow;
# WORD BaseLow;
# union {
# struct {
# BYTE BaseMid;
# BYTE Flags1;
# BYTE Flags2;
# BYTE BaseHi;
# } Bytes;
# struct {
# DWORD BaseMid :8;
# DWORD Type :5;
# DWORD Dpl :2;
# DWORD Pres :1;
# DWORD LimitHi :4;
# DWORD Sys :1;
# DWORD Reserved_0 :1;
# DWORD Default_Big :1;
# DWORD Granularity :1;
# DWORD BaseHi :8;
# } Bits;
# } HighWord;
# } LDT_ENTRY,
# *PLDT_ENTRY;
class _LDT_ENTRY_BYTES_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', BYTE),
('Flags1', BYTE),
('Flags2', BYTE),
('BaseHi', BYTE),
]
class _LDT_ENTRY_BITS_(Structure):
_pack_ = 1
_fields_ = [
('BaseMid', DWORD, 8),
('Type', DWORD, 5),
('Dpl', DWORD, 2),
('Pres', DWORD, 1),
('LimitHi', DWORD, 4),
('Sys', DWORD, 1),
('Reserved_0', DWORD, 1),
('Default_Big', DWORD, 1),
('Granularity', DWORD, 1),
('BaseHi', DWORD, 8),
]
class _LDT_ENTRY_HIGHWORD_(Union):
_pack_ = 1
_fields_ = [
('Bytes', _LDT_ENTRY_BYTES_),
('Bits', _LDT_ENTRY_BITS_),
]
class LDT_ENTRY(Structure):
_pack_ = 1
_fields_ = [
('LimitLow', WORD),
('BaseLow', WORD),
('HighWord', _LDT_ENTRY_HIGHWORD_),
]
PLDT_ENTRY = POINTER(LDT_ENTRY)
LPLDT_ENTRY = PLDT_ENTRY
#--- WOW64 CONTEXT structure and constants ------------------------------------
# Value of SegCs in a Wow64 thread when running in 32 bits mode
WOW64_CS32 = 0x23
WOW64_CONTEXT_i386 = long(0x00010000)
WOW64_CONTEXT_i486 = long(0x00010000)
WOW64_CONTEXT_CONTROL = (WOW64_CONTEXT_i386 | long(0x00000001))
WOW64_CONTEXT_INTEGER = (WOW64_CONTEXT_i386 | long(0x00000002))
WOW64_CONTEXT_SEGMENTS = (WOW64_CONTEXT_i386 | long(0x00000004))
WOW64_CONTEXT_FLOATING_POINT = (WOW64_CONTEXT_i386 | long(0x00000008))
WOW64_CONTEXT_DEBUG_REGISTERS = (WOW64_CONTEXT_i386 | long(0x00000010))
WOW64_CONTEXT_EXTENDED_REGISTERS = (WOW64_CONTEXT_i386 | long(0x00000020))
WOW64_CONTEXT_FULL = (WOW64_CONTEXT_CONTROL | WOW64_CONTEXT_INTEGER | WOW64_CONTEXT_SEGMENTS)
WOW64_CONTEXT_ALL = (WOW64_CONTEXT_CONTROL | WOW64_CONTEXT_INTEGER | WOW64_CONTEXT_SEGMENTS | WOW64_CONTEXT_FLOATING_POINT | WOW64_CONTEXT_DEBUG_REGISTERS | WOW64_CONTEXT_EXTENDED_REGISTERS)
WOW64_SIZE_OF_80387_REGISTERS = 80
WOW64_MAXIMUM_SUPPORTED_EXTENSION = 512
class WOW64_FLOATING_SAVE_AREA (context_i386.FLOATING_SAVE_AREA):
pass
class WOW64_CONTEXT (context_i386.CONTEXT):
pass
class WOW64_LDT_ENTRY (context_i386.LDT_ENTRY):
pass
PWOW64_FLOATING_SAVE_AREA = POINTER(WOW64_FLOATING_SAVE_AREA)
PWOW64_CONTEXT = POINTER(WOW64_CONTEXT)
PWOW64_LDT_ENTRY = POINTER(WOW64_LDT_ENTRY)
###############################################################################
# BOOL WINAPI GetThreadSelectorEntry(
# __in HANDLE hThread,
# __in DWORD dwSelector,
# __out LPLDT_ENTRY lpSelectorEntry
# );
def GetThreadSelectorEntry(hThread, dwSelector):
_GetThreadSelectorEntry = windll.kernel32.GetThreadSelectorEntry
_GetThreadSelectorEntry.argtypes = [HANDLE, DWORD, LPLDT_ENTRY]
_GetThreadSelectorEntry.restype = bool
_GetThreadSelectorEntry.errcheck = RaiseIfZero
ldt = LDT_ENTRY()
_GetThreadSelectorEntry(hThread, dwSelector, byref(ldt))
return ldt
# BOOL WINAPI GetThreadContext(
# __in HANDLE hThread,
# __inout LPCONTEXT lpContext
# );
def GetThreadContext(hThread, ContextFlags = None, raw = False):
_GetThreadContext = windll.kernel32.GetThreadContext
_GetThreadContext.argtypes = [HANDLE, LPCONTEXT]
_GetThreadContext.restype = bool
_GetThreadContext.errcheck = RaiseIfZero
if ContextFlags is None:
ContextFlags = CONTEXT_ALL | CONTEXT_AMD64
Context = CONTEXT()
Context.ContextFlags = ContextFlags
_GetThreadContext(hThread, byref(Context))
if raw:
return Context
return Context.to_dict()
# BOOL WINAPI SetThreadContext(
# __in HANDLE hThread,
# __in const CONTEXT* lpContext
# );
def SetThreadContext(hThread, lpContext):
_SetThreadContext = windll.kernel32.SetThreadContext
_SetThreadContext.argtypes = [HANDLE, LPCONTEXT]
_SetThreadContext.restype = bool
_SetThreadContext.errcheck = RaiseIfZero
if isinstance(lpContext, dict):
lpContext = CONTEXT.from_dict(lpContext)
_SetThreadContext(hThread, byref(lpContext))
# BOOL Wow64GetThreadSelectorEntry(
# __in HANDLE hThread,
# __in DWORD dwSelector,
# __out PWOW64_LDT_ENTRY lpSelectorEntry
# );
def Wow64GetThreadSelectorEntry(hThread, dwSelector):
_Wow64GetThreadSelectorEntry = windll.kernel32.Wow64GetThreadSelectorEntry
_Wow64GetThreadSelectorEntry.argtypes = [HANDLE, DWORD, PWOW64_LDT_ENTRY]
_Wow64GetThreadSelectorEntry.restype = bool
_Wow64GetThreadSelectorEntry.errcheck = RaiseIfZero
lpSelectorEntry = WOW64_LDT_ENTRY()
_Wow64GetThreadSelectorEntry(hThread, dwSelector, byref(lpSelectorEntry))
return lpSelectorEntry
# DWORD WINAPI Wow64ResumeThread(
# __in HANDLE hThread
# );
def Wow64ResumeThread(hThread):
_Wow64ResumeThread = windll.kernel32.Wow64ResumeThread
_Wow64ResumeThread.argtypes = [HANDLE]
_Wow64ResumeThread.restype = DWORD
previousCount = _Wow64ResumeThread(hThread)
if previousCount == DWORD(-1).value:
raise ctypes.WinError()
return previousCount
# DWORD WINAPI Wow64SuspendThread(
# __in HANDLE hThread
# );
def Wow64SuspendThread(hThread):
_Wow64SuspendThread = windll.kernel32.Wow64SuspendThread
_Wow64SuspendThread.argtypes = [HANDLE]
_Wow64SuspendThread.restype = DWORD
previousCount = _Wow64SuspendThread(hThread)
if previousCount == DWORD(-1).value:
raise ctypes.WinError()
return previousCount
# XXX TODO Use this http://www.nynaeve.net/Code/GetThreadWow64Context.cpp
# Also see http://www.woodmann.com/forum/archive/index.php/t-11162.html
# BOOL WINAPI Wow64GetThreadContext(
# __in HANDLE hThread,
# __inout PWOW64_CONTEXT lpContext
# );
def Wow64GetThreadContext(hThread, ContextFlags = None):
_Wow64GetThreadContext = windll.kernel32.Wow64GetThreadContext
_Wow64GetThreadContext.argtypes = [HANDLE, PWOW64_CONTEXT]
_Wow64GetThreadContext.restype = bool
_Wow64GetThreadContext.errcheck = RaiseIfZero
# XXX doesn't exist in XP 64 bits
Context = WOW64_CONTEXT()
if ContextFlags is None:
Context.ContextFlags = WOW64_CONTEXT_ALL | WOW64_CONTEXT_i386
else:
Context.ContextFlags = ContextFlags
_Wow64GetThreadContext(hThread, byref(Context))
return Context.to_dict()
# BOOL WINAPI Wow64SetThreadContext(
# __in HANDLE hThread,
# __in const WOW64_CONTEXT *lpContext
# );
def Wow64SetThreadContext(hThread, lpContext):
_Wow64SetThreadContext = windll.kernel32.Wow64SetThreadContext
_Wow64SetThreadContext.argtypes = [HANDLE, PWOW64_CONTEXT]
_Wow64SetThreadContext.restype = bool
_Wow64SetThreadContext.errcheck = RaiseIfZero
# XXX doesn't exist in XP 64 bits
if isinstance(lpContext, dict):
lpContext = WOW64_CONTEXT.from_dict(lpContext)
_Wow64SetThreadContext(hThread, byref(lpContext))
#==============================================================================
# This calculates the list of exported symbols.
_all = set(vars().keys()).difference(_all)
__all__ = [_x for _x in _all if not _x.startswith('_')]
__all__.sort()
#==============================================================================
| apache-2.0 |
HewlettPackard/oneview-ansible | library/oneview_managed_san_facts.py | 1 | 5617 | #!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2019) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: oneview_managed_san_facts
short_description: Retrieve facts about the OneView Managed SANs.
description:
- Retrieve facts about the OneView Managed SANs.
version_added: "2.3"
requirements:
- "python >= 2.7.9"
- "hpeOneView >= 5.4.0"
author:
- "Mariana Kreisig (@marikrg)"
- "Abilio Parada (@abiliogp)"
options:
name:
description:
- Name of the Managed SAN.
required: false
options:
description:
- "List with options to gather additional facts about Managed SAN.
Options allowed:
C(endpoints) gets the list of endpoints in the SAN identified by name.
C(wwn) gets the list of Managed SANs associated with an informed WWN C(locate)."
required: false
params:
description:
- List of params to delimit, filter and sort the list of resources.
- "params allowed:
C(start): The first item to return, using 0-based indexing.
C(count): The number of resources to return.
C(query): A general query string to narrow the list of resources returned.
C(sort): The sort order of the returned data set."
required: false
extends_documentation_fragment:
- oneview
'''
EXAMPLES = '''
- name: Gather facts about all Managed SANs
oneview_managed_san_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
delegate_to: localhost
- debug: var=managed_sans
- name: Gather paginated, filtered and sorted facts about Managed SANs
oneview_managed_san_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
params:
start: 0
count: 3
sort: name:ascending
query: imported eq true
delegate_to: localhost
- debug: var=managed_sans
- name: Gather facts about a Managed SAN by name
oneview_managed_san_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
name: "SAN1_0"
delegate_to: localhost
- debug: var=managed_sans
- name: Gather facts about the endpoints in the SAN identified by name
oneview_managed_san_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
name: "SAN1_0"
options:
- endpoints
delegate_to: localhost
- debug: var=managed_sans
- debug: var=managed_san_endpoints
- name: Gather facts about Managed SANs for an associated WWN
oneview_managed_san_facts:
hostname: 172.16.101.48
username: administrator
password: my_password
api_version: 800
options:
- wwn:
locate: "20:00:4A:2B:21:E0:00:01"
delegate_to: localhost
- debug: var=wwn_associated_sans
'''
RETURN = '''
managed_sans:
description: The list of Managed SANs.
returned: Always, but can be null.
type: list
managed_san_endpoints:
description: The list of endpoints in the SAN identified by name.
returned: When requested, but can be null.
type: dict
wwn_associated_sans:
description: The list of associations between provided WWNs and the SANs.
returned: When requested, but can be null.
type: dict
'''
from ansible.module_utils.oneview import OneViewModule
class ManagedSanFactsModule(OneViewModule):
argument_spec = dict(
name=dict(required=False, type='str'),
options=dict(required=False, type='list'),
params=dict(required=False, type='dict')
)
def __init__(self):
super(ManagedSanFactsModule, self).__init__(additional_arg_spec=self.argument_spec)
self.set_resource_object(self.oneview_client.managed_sans)
def execute_module(self):
facts = dict()
name = self.module.params['name']
if name:
if self.current_resource:
facts['managed_sans'] = [self.current_resource.data]
if facts.get('managed_sans') and 'endpoints' in self.options:
managed_san = facts['managed_sans'][0]
if managed_san:
environmental_configuration = self.current_resource.get_endpoints()
facts['managed_san_endpoints'] = environmental_configuration
else:
facts['managed_sans'] = self.resource_client.get_all(**self.facts_params)
if self.options:
if self.options.get('wwn'):
wwn = self.__get_sub_options(self.options['wwn'])
facts['wwn_associated_sans'] = self.resource_client.get_wwn(wwn['locate'])
return dict(changed=False, ansible_facts=facts)
def __get_sub_options(self, option):
return option if isinstance(option, dict) else {}
def main():
ManagedSanFactsModule().run()
if __name__ == '__main__':
main()
| apache-2.0 |
python-rope/ropevim | ropevim.py | 1 | 18331 | """ropevim, a vim mode for using rope refactoring library"""
from __future__ import print_function
import os
import re
import sys
import tempfile
import ropemode.decorators
import ropemode.environment
import ropemode.interface
import vim
# Save the opportunity for some other Python interpreters (pypy?)
python_cmd = 'python3'
class VimUtils(ropemode.environment.Environment):
def ask(self, prompt, default=None, starting=None):
if starting is None:
starting = ''
if default is not None:
prompt = prompt + ('[%s] ' % default)
result = call('input("%s", "%s")' % (prompt, starting))
if default is not None and result == '':
return default
return result
def ask_values(self, prompt, values, default=None,
starting=None, show_values=None):
if show_values or (show_values is None and len(values) < 14):
self._print_values(values)
if default is not None:
prompt = prompt + ('[%s] ' % default)
starting = starting or ''
_completer.values = values
answer = call('input("%s", "%s", "customlist,RopeValueCompleter")' %
(prompt, starting))
if answer is None:
if 'cancel' in values:
return 'cancel'
return
if default is not None and not answer:
return default
if answer.isdigit() and 0 <= int(answer) < len(values):
return values[int(answer)]
return answer
def _print_values(self, values):
numbered = []
for index, value in enumerate(values):
numbered.append('%s. %s' % (index, str(value)))
echo('\n'.join(numbered) + '\n')
def ask_directory(self, prompt, default=None, starting=None):
return call('input("%s", ".", "dir")' % prompt)
def ask_completion(self, prompt, values, starting=None):
if self.get('vim_completion') and 'i' in call('mode()'):
if not self.get('extended_complete', False):
proposals = u','.join(u"'%s'" % self._completion_text(proposal)
for proposal in values)
else:
proposals = u','.join(self._extended_completion(proposal)
for proposal in values)
col = int(call('col(".")'))
if starting:
col -= len(starting)
command = u'call complete(%s, [%s])' % (col, proposals)
vim.command(command.encode(self._get_encoding()))
return None
return self.ask_values(prompt, values, starting=starting,
show_values=False)
def message(self, message):
echo(message)
def yes_or_no(self, prompt):
return self.ask_values(prompt, ['yes', 'y', 'no', 'n']).lower() \
in ['yes', 'y']
def y_or_n(self, prompt):
return self.yes_or_no(prompt)
def get(self, name, default=None):
vimname = 'g:ropevim_%s' % name
if str(vim.eval('exists("%s")' % vimname)) == '0':
return default
result = vim.eval(vimname)
if isinstance(result, str) and result.isdigit():
return int(result)
return result
def get_offset(self):
result = self._position_to_offset(*self.cursor)
return result
def _get_encoding(self):
return vim.eval('&encoding')
def _encode_line(self, line):
return line.encode(self._get_encoding())
def _decode_line(self, line):
if hasattr(line, 'decode'):
return line.decode(self._get_encoding())
else:
return line
def _position_to_offset(self, lineno, colno):
result = min(colno, len(self.buffer[lineno - 1]) + 1)
for line in self.buffer[:lineno-1]:
line = self._decode_line(line)
result += len(line) + 1
return result
def get_text(self):
return self._decode_line('\n'.join(self.buffer)) + u'\n'
def get_region(self):
beg_mark = self.buffer.mark('<')
end_mark = self.buffer.mark('>')
if beg_mark and end_mark:
start = self._position_to_offset(*beg_mark)
end = self._position_to_offset(*end_mark)
return start, end
else:
return 0, 0
@property
def buffer(self):
return vim.current.buffer
def _get_cursor(self):
lineno, col = vim.current.window.cursor
line = self._decode_line(vim.current.line[:col])
col = len(line)
return (lineno, col)
def _set_cursor(self, cursor):
lineno, col = cursor
line = self._decode_line(vim.current.line)
line = self._encode_line(line[:col])
col = len(line)
vim.current.window.cursor = (lineno, col)
cursor = property(_get_cursor, _set_cursor)
def filename(self):
return self.buffer.name
def is_modified(self):
return vim.eval('&modified')
def goto_line(self, lineno):
self.cursor = (lineno, 0)
def insert_line(self, line, lineno):
self.buffer[lineno - 1:lineno - 1] = [line]
def insert(self, text):
lineno, colno = self.cursor
line = self.buffer[lineno - 1]
self.buffer[lineno - 1] = line[:colno] + text + line[colno:]
self.cursor = (lineno, colno + len(text))
def delete(self, start, end):
lineno1, colno1 = self._offset_to_position(start - 1)
lineno2, colno2 = self._offset_to_position(end - 1)
lineno, colno = self.cursor
if lineno1 == lineno2:
line = self.buffer[lineno1 - 1]
self.buffer[lineno1 - 1] = line[:colno1] + line[colno2:]
if lineno == lineno1 and colno >= colno1:
diff = colno2 - colno1
self.cursor = (lineno, max(0, colno - diff))
def _offset_to_position(self, offset):
text = self.get_text()
lineno = text.count('\n', 0, offset) + 1
try:
colno = offset - text.rindex('\n', 0, offset) - 1
except ValueError:
colno = offset
return lineno, colno
def filenames(self):
result = []
for buffer in vim.buffers:
if buffer.name:
result.append(buffer.name)
return result
def save_files(self, filenames):
vim.command('wall')
def reload_files(self, filenames, moves={}):
initial = self.filename()
for filename in filenames:
self.find_file(moves.get(filename, filename), force=True)
if initial:
self.find_file(initial)
def _open_file(self, filename, new=False):
# TODO deprecated ... for now it is just an equivalent to
# g:ropevim_goto_def_newwin == 'tabnew'
if int(vim.eval('g:ropevim_open_files_in_tabs')):
new = 'tabnew'
if new in ('new', 'vnew', 'tabnew'):
vim.command(new)
vim.command('edit! %s' % filename)
@staticmethod
def _samefile(file1, file2):
# Breaks under Jython and other platforms, but I guess it should
# be enough.
if os.name == 'posix':
return os.path.samefile(file1, file2)
elif os.name == 'nt':
# it is a way more complicated, the following does not deal
# with hard links on Windows
# for better discussion see
# http://stackoverflow.com/q/8892831/164233
return os.path.normcase(os.path.normpath(file1)) == \
os.path.normcase(os.path.normpath(file2))
def find_file(self, filename, readonly=False, other=False, force=False):
"""
Originally coming from Emacs, so the definition is the same as
the Emacs Lisp function find-file ... "
(find-file FILENAME &optional WILDCARDS)
Edit file FILENAME.
Switch to a buffer visiting file FILENAME,
creating one if none already exists.
"""
if filename not in self.filenames() or force:
self._open_file(filename, new=other)
else:
found = False
for tab in vim.tabpages:
for win in tab.windows:
if self._samefile(win.buffer.name, filename):
vim.current.tabpage = tab
vim.current.window = win
vim.current.buffer = win.buffer
found = True
break
if not found:
self._open_file(filename, new=other)
if readonly:
vim.command('set nomodifiable')
def create_progress(self, name):
return VimProgress(name)
def current_word(self):
return vim.eval('expand("<cword>")')
def push_mark(self):
vim.command('mark `')
def prefix_value(self, prefix):
return prefix
def show_occurrences(self, locations):
self._quickfixdefs(locations)
def _quickfixdefs(self, locations):
filename = os.path.join(tempfile.gettempdir(), tempfile.mktemp())
try:
self._writedefs(locations, filename)
vim.command('let old_errorfile = &errorfile')
vim.command('let old_errorformat = &errorformat')
vim.command('set errorformat=%f:%l:\ %m')
vim.command('cfile ' + filename)
vim.command('let &errorformat = old_errorformat')
vim.command('let &errorfile = old_errorfile')
finally:
os.remove(filename)
def _writedefs(self, locations, filename):
tofile = open(filename, 'w')
try:
for location in locations:
# FIXME seems suspicious lineno = location.lineno
err = '%s:%d: %s %s\n' % (
os.path.relpath(location.filename), location.lineno,
location.note, location.line_content)
echo(err)
tofile.write(err)
finally:
tofile.close()
def show_doc(self, docs, altview=False):
if docs:
echo(docs)
def preview_changes(self, diffs):
echo(diffs)
return self.y_or_n('Do the changes? ')
def local_command(self, name, callback, key=None, prefix=False):
self._add_command(name, callback, key, prefix,
prekey=self.get('local_prefix'))
def global_command(self, name, callback, key=None, prefix=False):
self._add_command(name, callback, key, prefix,
prekey=self.get('global_prefix'))
def add_hook(self, name, callback, hook):
mapping = {'before_save': 'FileWritePre,BufWritePre',
'after_save': 'FileWritePost,BufWritePost',
'exit': 'VimLeave'}
self._add_function(name, callback)
vim.command('autocmd %s *.py call %s()' %
(mapping[hook], _vim_name(name)))
def _add_command(self, name, callback, key, prefix, prekey):
self._add_function(name, callback, prefix)
vim.command('command! -range %s call %s()' %
(_vim_name(name), _vim_name(name)))
if key is not None:
key = prekey + key.replace(' ', '')
vim.command('noremap %s :call %s()<cr>' % (key, _vim_name(name)))
def _add_function(self, name, callback, prefix=False):
globals()[name] = callback
arg = 'None' if prefix else ''
vim.command('function! %s() range\n' % _vim_name(name) +
'%s ropevim.%s(%s)\n' % (python_cmd, name, arg) +
'endfunction\n')
def _completion_data(self, proposal):
return proposal
_docstring_re = re.compile('^[\s\t\n]*([^\n]*)')
def _extended_completion(self, proposal):
# we are using extended complete and return dicts instead of strings.
# `ci` means "completion item". see `:help complete-items`
ci = {'word': proposal.name}
scope = proposal.scope[0].upper()
type_ = proposal.type
info = None
if proposal.scope == 'parameter_keyword':
scope = ' '
type_ = 'param'
if not hasattr(proposal, 'get_default'):
# old version of rope
pass
else:
default = proposal.get_default()
if default is None:
info = '*'
else:
info = '= %s' % default
elif proposal.scope == 'keyword':
scope = ' '
type_ = 'keywd'
elif proposal.scope == 'attribute':
scope = 'M'
if proposal.type == 'function':
type_ = 'meth'
elif proposal.type == 'instance':
type_ = 'prop'
elif proposal.type == 'function':
type_ = 'func'
elif proposal.type == 'instance':
type_ = 'inst'
elif proposal.type == 'module':
type_ = 'mod'
if info is None:
obj_doc = proposal.get_doc()
if obj_doc:
info = self._docstring_re.match(obj_doc).group(1)
else:
info = ''
if type_ is None:
type_ = ' '
else:
type_ = type_.ljust(5)[:5]
ci['menu'] = ' '.join((scope, type_, info))
ret = u'{%s}' % \
u','.join(u'"%s":"%s"' %
(key, value.replace('"', '\\"'))
for (key, value) in ci.items())
return ret
def _vim_name(name):
tokens = name.split('_')
newtokens = ['Rope'] + [token.title() for token in tokens]
return ''.join(newtokens)
class VimProgress(object):
def __init__(self, name):
self.name = name
self.last = 0
echo('%s ... ' % self.name)
def update(self, percent):
try:
vim.eval('getchar(0)')
except vim.error:
raise KeyboardInterrupt('Task %s was interrupted!' % self.name)
if percent > self.last + 4:
echo('%s ... %s%%%%' % (self.name, percent))
self.last = percent
def done(self):
echo('%s ... done' % self.name)
def echo(message):
print(message)
def call(command):
return vim.eval(command)
class _ValueCompleter(object):
def __init__(self):
self.values = []
vim.command('%s import vim' % python_cmd)
vim.command('function! RopeValueCompleter(A, L, P)\n'
'%s args = [vim.eval("a:" + p) for p in "ALP"]\n'
'%s ropevim._completer(*args)\n'
'return s:completions\n'
'endfunction\n' % (python_cmd, python_cmd))
def __call__(self, arg_lead, cmd_line, cursor_pos):
# don't know if self.values can be empty but better safe then sorry
if self.values:
if not isinstance(self.values[0], basestring):
result = [proposal.name for proposal in self.values
if proposal.name.startswith(arg_lead)]
else:
result = [proposal for proposal in self.values
if proposal.startswith(arg_lead)]
vim.command('let s:completions = %s' % result)
variables = {'ropevim_enable_autoimport': 1,
'ropevim_autoimport_underlineds': 0,
'ropevim_codeassist_maxfixes': 1,
'ropevim_enable_shortcuts': 1,
'ropevim_open_files_in_tabs': 0,
'ropevim_autoimport_modules': '[]',
'ropevim_confirm_saving': 0,
'ropevim_local_prefix': '"<C-c>r"',
'ropevim_global_prefix': '"<C-x>p"',
'ropevim_vim_completion': 0,
'ropevim_guess_project': 0}
shortcuts = {'code_assist': '<M-/>',
'lucky_assist': '<M-?>',
'goto_definition': '<C-c>g',
'show_doc': '<C-c>d',
'find_occurrences': '<C-c>f'}
insert_shortcuts = {'code_assist': '<M-/>',
'lucky_assist': '<M-?>'}
menu_structure = (
'open_project',
'close_project',
'find_file',
'undo',
'redo',
None, # separator
'rename',
'extract_variable',
'extract_method',
'inline',
'move',
'restructure',
'use_function',
'introduce_factory',
'change_signature',
'rename_current_module',
'move_current_module',
'module_to_package',
None, # separator
'code_assist',
'goto_definition',
'show_doc',
'find_occurrences',
'lucky_assist',
'jump_to_global',
'show_calltip',
)
def _init_variables():
for variable, default in variables.items():
vim.command('if !exists("g:%s")\n' % variable +
' let g:%s = %s\n' % (variable, default))
def _enable_shortcuts(env):
if env.get('enable_shortcuts'):
for command, shortcut in shortcuts.items():
vim.command('noremap %s :call %s()<cr>' %
(shortcut, _vim_name(command)))
for command, shortcut in insert_shortcuts.items():
command_name = _vim_name(command) + 'InsertMode'
vim.command('func! %s()\n' % command_name +
'call %s()\n' % _vim_name(command) +
'return ""\n'
'endfunc')
vim.command('imap %s <C-R>=%s()<cr>' % (shortcut, command_name))
def _add_menu(env, root_node='&Ropevim'):
cmd_tmpl = '%s <silent> %s.%s :call %s()<cr>'
vim.command('silent! aunmenu %s' % root_node)
for i, cb in enumerate(menu_structure):
if cb is None:
vim.command('amenu <silent> %s.-SEP%s- :' % (root_node, i))
continue
# use_function -> Use\ Function
name = cb.replace('_', '\ ').title()
for cmd in ('amenu', 'vmenu'):
vim.command(cmd_tmpl % (cmd, root_node, name, _vim_name(cb)))
ropemode.decorators.logger.message = echo
ropemode.decorators.logger.only_short = True
_completer = _ValueCompleter()
_init_variables()
_env = VimUtils()
_interface = ropemode.interface.RopeMode(env=_env)
_interface.init()
_enable_shortcuts(_env)
_add_menu(_env)
_add_menu(_env, 'PopUp.&Ropevim') # menu weight can also be added
| gpl-2.0 |
guorendong/iridium-browser-ubuntu | third_party/pyftpdlib/src/pyftpdlib/contrib/authorizers.py | 5 | 25014 | #!/usr/bin/env python
# $Id$
# pyftpdlib is released under the MIT license, reproduced below:
# ======================================================================
# Copyright (C) 2007-2012 Giampaolo Rodola' <g.rodola@gmail.com>
#
# All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# ======================================================================
"""An "authorizer" is a class handling authentications and permissions
of the FTP server. It is used by pyftpdlib.ftpserver.FTPHandler
class for:
- verifying user password
- getting user home directory
- checking user permissions when a filesystem read/write event occurs
- changing user when accessing the filesystem
This module contains two classes which implements such functionalities
in a system-specific way for both Unix and Windows.
"""
__all__ = []
import os
import errno
from pyftpdlib.ftpserver import DummyAuthorizer, AuthorizerError
def replace_anonymous(callable):
"""A decorator to replace anonymous user string passed to authorizer
methods as first arugument with the actual user used to handle
anonymous sessions.
"""
def wrapper(self, username, *args, **kwargs):
if username == 'anonymous':
username = self.anonymous_user or username
return callable(self, username, *args, **kwargs)
return wrapper
class _Base(object):
"""Methods common to both Unix and Windows authorizers.
Not supposed to be used directly.
"""
def __init__(self):
"""Check for errors in the constructor."""
if self.rejected_users and self.allowed_users:
raise ValueError("rejected_users and allowed_users options are "
"mutually exclusive")
users = self._get_system_users()
for user in (self.allowed_users or self.rejected_users):
if user == 'anonymous':
raise ValueError('invalid username "anonymous"')
if user not in users:
raise ValueError('unknown user %s' % user)
if self.anonymous_user is not None:
if not self.has_user(self.anonymous_user):
raise ValueError('no such user %s' % self.anonymous_user)
home = self.get_home_dir(self.anonymous_user)
if not os.path.isdir(home):
raise ValueError('no valid home set for user %s'
% self.anonymous_user)
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
if not password and not homedir and not perm and not msg_login \
and not msg_quit:
raise ValueError("at least one keyword argument must be specified")
if self.allowed_users and username not in self.allowed_users:
raise ValueError('%s is not an allowed user' % username)
if self.rejected_users and username in self.rejected_users:
raise ValueError('%s is not an allowed user' % username)
if username == "anonymous" and password:
raise ValueError("can't assign password to anonymous user")
if not self.has_user(username):
raise ValueError('no such user %s' % username)
if username in self._dummy_authorizer.user_table:
# re-set parameters
del self._dummy_authorizer.user_table[username]
self._dummy_authorizer.add_user(username, password or "",
homedir or os.getcwd(),
perm or "",
msg_login or "",
msg_quit or "")
if homedir is None:
self._dummy_authorizer.user_table[username]['home'] = ""
def get_msg_login(self, username):
return self._get_key(username, 'msg_login') or self.msg_login
def get_msg_quit(self, username):
return self._get_key(username, 'msg_quit') or self.msg_quit
def get_perms(self, username):
overridden_perms = self._get_key(username, 'perm')
if overridden_perms:
return overridden_perms
if username == 'anonymous':
return 'elr'
return self.global_perm
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
def _get_key(self, username, key):
if self._dummy_authorizer.has_user(username):
return self._dummy_authorizer.user_table[username][key]
def _is_rejected_user(self, username):
"""Return True if the user has been black listed via
allowed_users or rejected_users options.
"""
if self.allowed_users and username not in self.allowed_users:
return True
if self.rejected_users and username in self.rejected_users:
return True
return False
# Note: requires python >= 2.5
try:
import pwd, spwd, crypt
except ImportError:
pass
else:
__all__.extend(['BaseUnixAuthorizer', 'UnixAuthorizer'])
# the uid/gid the server runs under
PROCESS_UID = os.getuid()
PROCESS_GID = os.getgid()
class BaseUnixAuthorizer(object):
"""An authorizer compatible with Unix user account and password
database.
This class should not be used directly unless for subclassing.
Use higher-level UnixAuthorizer class instead.
"""
def __init__(self, anonymous_user=None):
if os.geteuid() != 0 or not spwd.getspall():
raise AuthorizerError("super user privileges are required")
self.anonymous_user = anonymous_user
if self.anonymous_user is not None:
if not self.anonymous_user in self._get_system_users():
raise ValueError('no such user %s' % self.anonymous_user)
try:
pwd.getpwnam(self.anonymous_user).pw_dir
except KeyError:
raise ValueError('no such user %s' % anonymous_user)
# --- overridden / private API
def validate_authentication(self, username, password):
"""Authenticates against shadow password db; return
True on success.
"""
if username == "anonymous":
return self.anonymous_user is not None
try:
pw1 = spwd.getspnam(username).sp_pwd
pw2 = crypt.crypt(password, pw1)
except KeyError: # no such username
return False
else:
return pw1 == pw2
@replace_anonymous
def impersonate_user(self, username, password):
"""Change process effective user/group ids to reflect
logged in user.
"""
try:
pwdstruct = pwd.getpwnam(username)
except KeyError:
raise AuthorizerError('no such user %s' % username)
else:
os.setegid(pwdstruct.pw_gid)
os.seteuid(pwdstruct.pw_uid)
def terminate_impersonation(self, username):
"""Revert process effective user/group IDs."""
os.setegid(PROCESS_GID)
os.seteuid(PROCESS_UID)
@replace_anonymous
def has_user(self, username):
"""Return True if user exists on the Unix system.
If the user has been black listed via allowed_users or
rejected_users options always return False.
"""
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
"""Return user home directory."""
try:
return pwd.getpwnam(username).pw_dir
except KeyError:
raise AuthorizerError('no such user %s' % username)
@staticmethod
def _get_system_users():
"""Return all users defined on the UNIX system."""
return [entry.pw_name for entry in pwd.getpwall()]
def get_msg_login(self, username):
return "Login successful."
def get_msg_quit(self, username):
return "Goodbye."
def get_perms(self, username):
return "elradfmw"
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
class UnixAuthorizer(_Base, BaseUnixAuthorizer):
"""A wrapper on top of BaseUnixAuthorizer providing options
to specify what users should be allowed to login, per-user
options, etc.
Example usages:
>>> from pyftpdlib.contrib.authorizers import UnixAuthorizer
>>> # accept all except root
>>> auth = UnixAuthorizer(rejected_users=["root"])
>>>
>>> # accept some users only
>>> auth = UnixAuthorizer(allowed_users=["matt", "jay"])
>>>
>>> # accept everybody and don't care if they have not a valid shell
>>> auth = UnixAuthorizer(require_valid_shell=False)
>>>
>>> # set specific options for a user
>>> auth.override_user("matt", password="foo", perm="elr")
"""
# --- public API
def __init__(self, global_perm="elradfmw",
allowed_users=[],
rejected_users=[],
require_valid_shell=True,
anonymous_user=None,
msg_login="Login successful.",
msg_quit="Goodbye."):
"""Parameters:
- (string) global_perm:
a series of letters referencing the users permissions;
defaults to "elradfmw" which means full read and write
access for everybody (except anonymous).
- (list) allowed_users:
a list of users which are accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (list) rejected_users:
a list of users which are not accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (bool) require_valid_shell:
Deny access for those users which do not have a valid shell
binary listed in /etc/shells.
If /etc/shells cannot be found this is a no-op.
Anonymous user is not subject to this option, and is free
to not have a valid shell defined.
Defaults to True (a valid shell is required for login).
- (string) anonymous_user:
specify it if you intend to provide anonymous access.
The value expected is a string representing the system user
to use for managing anonymous sessions; defaults to None
(anonymous access disabled).
- (string) msg_login:
the string sent when client logs in.
- (string) msg_quit:
the string sent when client quits.
"""
BaseUnixAuthorizer.__init__(self, anonymous_user)
self.global_perm = global_perm
self.allowed_users = allowed_users
self.rejected_users = rejected_users
self.anonymous_user = anonymous_user
self.require_valid_shell = require_valid_shell
self.msg_login = msg_login
self.msg_quit = msg_quit
self._dummy_authorizer = DummyAuthorizer()
self._dummy_authorizer._check_permissions('', global_perm)
_Base.__init__(self)
if require_valid_shell:
for username in self.allowed_users:
if not self._has_valid_shell(username):
raise ValueError("user %s has not a valid shell"
% username)
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
if self.require_valid_shell and username != 'anonymous':
if not self._has_valid_shell(username):
raise ValueError("user %s has not a valid shell"
% username)
_Base.override_user(self, username, password, homedir, perm,
msg_login, msg_quit)
# --- overridden / private API
def validate_authentication(self, username, password):
if username == "anonymous":
return self.anonymous_user is not None
if self._is_rejected_user(username):
return False
if self.require_valid_shell and username != 'anonymous':
if not self._has_valid_shell(username):
return False
overridden_password = self._get_key(username, 'pwd')
if overridden_password:
return overridden_password == password
return BaseUnixAuthorizer.validate_authentication(self, username, password)
@replace_anonymous
def has_user(self, username):
if self._is_rejected_user(username):
return False
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
overridden_home = self._get_key(username, 'home')
if overridden_home:
return overridden_home
return BaseUnixAuthorizer.get_home_dir(self, username)
@staticmethod
def _has_valid_shell(username):
"""Return True if the user has a valid shell binary listed
in /etc/shells. If /etc/shells can't be found return True.
"""
try:
file = open('/etc/shells', 'r')
except IOError, err:
if err.errno == errno.ENOENT:
return True
raise
else:
try:
try:
shell = pwd.getpwnam(username).pw_shell
except KeyError: # invalid user
return False
for line in file:
if line.startswith('#'):
continue
line = line.strip()
if line == shell:
return True
return False
finally:
file.close()
# Note: requires pywin32 extension
try:
import _winreg
import win32security, win32net, pywintypes, win32con, win32api
except ImportError:
pass
else:
__all__.extend(['BaseWindowsAuthorizer', 'WindowsAuthorizer'])
class BaseWindowsAuthorizer(object):
"""An authorizer compatible with Windows user account and
password database.
This class should not be used directly unless for subclassing.
Use higher-level WinowsAuthorizer class instead.
"""
def __init__(self, anonymous_user=None, anonymous_password=None):
# actually try to impersonate the user
self.anonymous_user = anonymous_user
self.anonymous_password = anonymous_password
if self.anonymous_user is not None:
self.impersonate_user(self.anonymous_user,
self.anonymous_password)
self.terminate_impersonation()
def validate_authentication(self, username, password):
if username == "anonymous":
return self.anonymous_user is not None
try:
win32security.LogonUser(username, None, password,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT)
except pywintypes.error:
return False
else:
return True
@replace_anonymous
def impersonate_user(self, username, password):
"""Impersonate the security context of another user."""
handler = win32security.LogonUser(username, None, password,
win32con.LOGON32_LOGON_INTERACTIVE,
win32con.LOGON32_PROVIDER_DEFAULT)
win32security.ImpersonateLoggedOnUser(handler)
handler.Close()
def terminate_impersonation(self, username):
"""Terminate the impersonation of another user."""
win32security.RevertToSelf()
@replace_anonymous
def has_user(self, username):
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
"""Return the user's profile directory, the closest thing
to a user home directory we have on Windows.
"""
try:
sid = win32security.ConvertSidToStringSid(
win32security.LookupAccountName(None, username)[0])
except pywintypes.error, err:
raise AuthorizerError(err)
path = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\ProfileList" + \
"\\" + sid
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, path)
except WindowsError:
raise AuthorizerError("No profile directory defined for user %s"
% username)
value = _winreg.QueryValueEx(key, "ProfileImagePath")[0]
return win32api.ExpandEnvironmentStrings(value)
@classmethod
def _get_system_users(cls):
"""Return all users defined on the Windows system."""
return [entry['name'] for entry in win32net.NetUserEnum(None, 0)[0]]
def get_msg_login(self, username):
return "Login successful."
def get_msg_quit(self, username):
return "Goodbye."
def get_perms(self, username):
return "elradfmw"
def has_perm(self, username, perm, path=None):
return perm in self.get_perms(username)
class WindowsAuthorizer(_Base, BaseWindowsAuthorizer):
"""A wrapper on top of BaseWindowsAuthorizer providing options
to specify what users should be allowed to login, per-user
options, etc.
Example usages:
>>> from pyftpdlib.contrib.authorizers import WindowsAuthorizer
>>> # accept all except Administrator
>>> auth = UnixAuthorizer(rejected_users=["Administrator"])
>>>
>>> # accept some users only
>>> auth = UnixAuthorizer(allowed_users=["matt", "jay"])
>>>
>>> # set specific options for a user
>>> auth.override_user("matt", password="foo", perm="elr")
"""
# --- public API
def __init__(self, global_perm="elradfmw",
allowed_users=[],
rejected_users=[],
anonymous_user=None,
anonymous_password=None,
msg_login="Login successful.",
msg_quit="Goodbye."):
"""Parameters:
- (string) global_perm:
a series of letters referencing the users permissions;
defaults to "elradfmw" which means full read and write
access for everybody (except anonymous).
- (list) allowed_users:
a list of users which are accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (list) rejected_users:
a list of users which are not accepted for authenticating
against the FTP server; defaults to [] (no restrictions).
- (string) anonymous_user:
specify it if you intend to provide anonymous access.
The value expected is a string representing the system user
to use for managing anonymous sessions.
As for IIS, it is recommended to use Guest account.
The common practice is to first enable the Guest user, which
is disabled by default and then assign an empty password.
Defaults to None (anonymous access disabled).
- (string) anonymous_password:
the password of the user who has been chosen to manage the
anonymous sessions. Defaults to None (empty password).
- (string) msg_login:
the string sent when client logs in.
- (string) msg_quit:
the string sent when client quits.
"""
self.global_perm = global_perm
self.allowed_users = allowed_users
self.rejected_users = rejected_users
self.anonymous_user = anonymous_user
self.anonymous_password = anonymous_password
self.msg_login = msg_login
self.msg_quit = msg_quit
self._dummy_authorizer = DummyAuthorizer()
self._dummy_authorizer._check_permissions('', global_perm)
_Base.__init__(self)
# actually try to impersonate the user
if self.anonymous_user is not None:
self.impersonate_user(self.anonymous_user,
self.anonymous_password)
self.terminate_impersonation()
def override_user(self, username, password=None, homedir=None, perm=None,
msg_login=None, msg_quit=None):
"""Overrides the options specified in the class constructor
for a specific user.
"""
_Base.override_user(self, username, password, homedir, perm,
msg_login, msg_quit)
# --- overridden / private API
def validate_authentication(self, username, password):
"""Authenticates against Windows user database; return
True on success.
"""
if username == "anonymous":
return self.anonymous_user is not None
if self.allowed_users and username not in self.allowed_users:
return False
if self.rejected_users and username in self.rejected_users:
return False
overridden_password = self._get_key(username, 'pwd')
if overridden_password:
return overridden_password == password
else:
return BaseWindowsAuthorizer.validate_authentication(self,
username, password)
def impersonate_user(self, username, password):
"""Impersonate the security context of another user."""
if username == "anonymous":
username = self.anonymous_user or ""
password = self.anonymous_password or ""
return BaseWindowsAuthorizer.impersonate_user(self, username, password)
@replace_anonymous
def has_user(self, username):
if self._is_rejected_user(username):
return False
return username in self._get_system_users()
@replace_anonymous
def get_home_dir(self, username):
overridden_home = self._get_key(username, 'home')
if overridden_home:
return overridden_home
return BaseWindowsAuthorizer.get_home_dir(self, username)
| bsd-3-clause |
broferek/ansible | test/units/modules/network/routeros/test_routeros_command.py | 68 | 4556 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.routeros import routeros_command
from units.modules.utils import set_module_args
from .routeros_module import TestRouterosModule, load_fixture
class TestRouterosCommandModule(TestRouterosModule):
module = routeros_command
def setUp(self):
super(TestRouterosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.routeros.routeros_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestRouterosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
obj = json.loads(item)
command = obj
except ValueError:
command = item
filename = str(command).replace(' ', '_').replace('/', '')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_routeros_command_simple(self):
set_module_args(dict(commands=['/system resource print']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue('platform: "MikroTik"' in result['stdout'][0])
def test_routeros_command_multiple(self):
set_module_args(dict(commands=['/system resource print', '/system resource print']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue('platform: "MikroTik"' in result['stdout'][0])
def test_routeros_command_wait_for(self):
wait_for = 'result[0] contains "MikroTik"'
set_module_args(dict(commands=['/system resource print'], wait_for=wait_for))
self.execute_module()
def test_routeros_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['/system resource print'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_routeros_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['/system resource print'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_routeros_command_match_any(self):
wait_for = ['result[0] contains "MikroTik"',
'result[0] contains "test string"']
set_module_args(dict(commands=['/system resource print'], wait_for=wait_for, match='any'))
self.execute_module()
def test_routeros_command_match_all(self):
wait_for = ['result[0] contains "MikroTik"',
'result[0] contains "RB1100"']
set_module_args(dict(commands=['/system resource print'], wait_for=wait_for, match='all'))
self.execute_module()
def test_routeros_command_match_all_failure(self):
wait_for = ['result[0] contains "MikroTik"',
'result[0] contains "test string"']
commands = ['/system resource print', '/system resource print']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
def test_routeros_command_wait_for_2(self):
wait_for = 'result[0] contains "wireless"'
set_module_args(dict(commands=['/system package print'], wait_for=wait_for))
self.execute_module()
| gpl-3.0 |
navotsil/Open-Knesset | video/management/commands/sub_commands/tests/AddVideo.py | 14 | 7647 | #encoding: utf-8
from django.test import TestCase
from video.management.commands.sub_commands.AddVideo import AddVideo
class AddVideo_test(AddVideo):
def __init__(
self, options, testCase, getMemberObjectReturn, getYoutubeVideosReturn,
getIsVideoExistsReturn, saveVideoReturn
):
self._testCase=testCase
self._getMemberObjectReturn=getMemberObjectReturn
self._getYoutubeVideosReturn=getYoutubeVideosReturn
self._getIsVideoExistsReturn=getIsVideoExistsReturn
self._saveVideoReturn=saveVideoReturn
self.saveVideoLog=[]
AddVideo.__init__(self,options)
def _getMemberObject(self,**kwargs):
params=(kwargs['id'],)
self._testCase.assertIn(params,self._getMemberObjectReturn)
return self._getMemberObjectReturn[params]
def _getYoutubeVideos(self,**kwargs):
params=(kwargs['youtube_id_url'],)
self._testCase.assertIn(params,self._getYoutubeVideosReturn)
return self._getYoutubeVideosReturn[params]
def _isVideoExists(self,video):
params=(video['source_id'],)
self._testCase.assertIn(params,self._getIsVideoExistsReturn)
return self._getIsVideoExistsReturn[params]
def _saveVideo(self,videoFields):
params=(videoFields['source_id'],)
self._testCase.assertIn(params,self._saveVideoReturn)
self.saveVideoLog.append(videoFields)
return self._saveVideoReturn[params]
class Options_test():
def __init__(self,testCase,opts):
self._opts=opts
self._testCase=testCase
def get(self,varname,default):
params=(varname,default)
self._testCase.assertIn(params,self._opts)
return self._opts[params]
class Member_test():
pass
class Video_test():
def __init__(self,id):
self.id=id
def save(self):
pass
class testAddVideo(TestCase):
testAddVideo=True
def testInvalidParams(self):
options=Options_test(self,{
('video-link',None):None,
('object-type',None):None,
('object-id',None):None,
('is_sticky',False):False,
})
getMemberObjectReturn={}
getYoutubeVideosReturn={}
getIsVideoExistsReturn={}
saveVideoReturn={}
av=AddVideo_test(
options, self, getMemberObjectReturn, getYoutubeVideosReturn,
getIsVideoExistsReturn, saveVideoReturn
)
self.assertFalse(av.run())
self.assertIn('you must specify a video link, object type and object id',av.ans)
def testUnsupportedObjectType(self):
options=Options_test(self,{
('video-link',None):'yyy',
('object-type',None):'xxx',
('object-id',None):'xxx',
('is_sticky',False):False,
})
getMemberObjectReturn={}
getYoutubeVideosReturn={}
getIsVideoExistsReturn={}
saveVideoReturn={}
av=AddVideo_test(
options, self, getMemberObjectReturn, getYoutubeVideosReturn,
getIsVideoExistsReturn, saveVideoReturn
)
self.assertFalse(av.run())
self.assertIn('unsupported object type',av.ans)
def testInvalidYoutubeLink(self):
options=Options_test(self,{
('video-link',None):'http://youtube/video1',
('object-type',None):'member',
('object-id',None):'1',
('is_sticky',False):True,
})
getMemberObjectReturn={
('1',):Member_test()
}
getYoutubeVideosReturn={}
getIsVideoExistsReturn={}
saveVideoReturn={}
av=AddVideo_test(
options, self, getMemberObjectReturn, getYoutubeVideosReturn,
getIsVideoExistsReturn, saveVideoReturn
)
self.assertFalse(av.run())
self.assertIn("unable to determine source type from url",av.ans)
def testCantFindYoutubeVideo(self):
options=Options_test(self,{
('video-link',None):'http://youtube/video?v=03aA',
('object-type',None):'member',
('object-id',None):'1',
('is_sticky',False):True,
})
getMemberObjectReturn={
('1',):Member_test()
}
getYoutubeVideosReturn={
('http://gdata.youtube.com/feeds/api/videos/03aA',):[]
}
getIsVideoExistsReturn={}
saveVideoReturn={}
av=AddVideo_test(
options, self, getMemberObjectReturn, getYoutubeVideosReturn,
getIsVideoExistsReturn, saveVideoReturn
)
self.assertFalse(av.run())
self.assertIn("failed to add the video",av.ans)
def testVideoAlreadyExists(self):
options=Options_test(self,{
('video-link',None):'http://youtube/video?v=03aA',
('object-type',None):'member',
('object-id',None):'1',
('is_sticky',False):True,
})
getMemberObjectReturn={
('1',):Member_test()
}
getYoutubeVideosReturn={
('http://gdata.youtube.com/feeds/api/videos/03aA',):[{},{},]
}
getIsVideoExistsReturn={
('http://gdata.youtube.com/feeds/api/videos/03aA',):True
}
saveVideoReturn={}
av=AddVideo_test(
options, self, getMemberObjectReturn, getYoutubeVideosReturn,
getIsVideoExistsReturn, saveVideoReturn
)
self.assertFalse(av.run())
self.assertIn("failed to add the video",av.ans)
def testInvalidYoutubeVideoData(self):
options=Options_test(self,{
('video-link',None):'http://youtube/video?v=03aA',
('object-type',None):'member',
('object-id',None):'1',
('is_sticky',False):True,
})
getMemberObjectReturn={
('1',):Member_test()
}
getYoutubeVideosReturn={
('http://gdata.youtube.com/feeds/api/videos/03aA',):[{},{},]
}
getIsVideoExistsReturn={
('http://gdata.youtube.com/feeds/api/videos/03aA',):False
}
saveVideoReturn={}
av=AddVideo_test(
options, self, getMemberObjectReturn, getYoutubeVideosReturn,
getIsVideoExistsReturn, saveVideoReturn
)
self.assertFalse(av.run())
self.assertIn("failed to add the video",av.ans)
def testAddYoutubeVideo(self):
options=Options_test(self,{
('video-link',None):'http://youtube/video?v=03aA',
('object-type',None):'member',
('object-id',None):'1',
('is_sticky',False):True,
})
getMemberObjectReturn={
('1',):Member_test()
}
getYoutubeVideosReturn={
('http://gdata.youtube.com/feeds/api/videos/03aA',):[
{
'embed_url_autoplay':'',
'thumbnail480x360':'',
'thumbnail90x120':'',
'title':'',
'description':'',
'link':'',
'id':'http://gdata.youtube.com/feeds/api/videos/03aA',
'published':'',
},
{},
]
}
getIsVideoExistsReturn={
('http://gdata.youtube.com/feeds/api/videos/03aA',):False
}
saveVideoReturn={
('http://gdata.youtube.com/feeds/api/videos/03aA',):Video_test(1)
}
av=AddVideo_test(
options, self, getMemberObjectReturn, getYoutubeVideosReturn,
getIsVideoExistsReturn, saveVideoReturn
)
self.assertTrue(av.run())
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.